11f4d4ed6SAlexander Lobakin // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 232a47e72SYuval Mintz /* QLogic qed NIC Driver 3e8f1cb50SMintz, Yuval * Copyright (c) 2015-2017 QLogic Corporation 4663eacd8SAlexander Lobakin * Copyright (c) 2019-2020 Marvell International Ltd. 532a47e72SYuval Mintz */ 632a47e72SYuval Mintz 7dacd88d6SYuval Mintz #include <linux/etherdevice.h> 836558c3dSYuval Mintz #include <linux/crc32.h> 9f29ffdb6SMintz, Yuval #include <linux/vmalloc.h> 106da95b52SAlok Prasad #include <linux/crash_dump.h> 110b55e27dSYuval Mintz #include <linux/qed/qed_iov_if.h> 121408cc1fSYuval Mintz #include "qed_cxt.h" 131408cc1fSYuval Mintz #include "qed_hsi.h" 1432a47e72SYuval Mintz #include "qed_hw.h" 151408cc1fSYuval Mintz #include "qed_init_ops.h" 1632a47e72SYuval Mintz #include "qed_int.h" 171408cc1fSYuval Mintz #include "qed_mcp.h" 1832a47e72SYuval Mintz #include "qed_reg_addr.h" 191408cc1fSYuval Mintz #include "qed_sp.h" 2032a47e72SYuval Mintz #include "qed_sriov.h" 2132a47e72SYuval Mintz #include "qed_vf.h" 226c9e80eaSMichal Kalderon static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 236c9e80eaSMichal Kalderon u8 opcode, 246c9e80eaSMichal Kalderon __le16 echo, 256c9e80eaSMichal Kalderon union event_ring_data *data, u8 fw_return_code); 267425d822SShahed Shaikh static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid); 2732a47e72SYuval Mintz 283b19f478SMintz, Yuval static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) 293b19f478SMintz, Yuval { 3008bc8f15SMintz, Yuval u8 legacy = 0; 313b19f478SMintz, Yuval 323b19f478SMintz, Yuval if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 333b19f478SMintz, Yuval ETH_HSI_VER_NO_PKT_LEN_TUNN) 343b19f478SMintz, Yuval legacy |= QED_QCID_LEGACY_VF_RX_PROD; 353b19f478SMintz, Yuval 3608bc8f15SMintz, Yuval if (!(p_vf->acquire.vfdev_info.capabilities & 3708bc8f15SMintz, Yuval VFPF_ACQUIRE_CAP_QUEUE_QIDS)) 3808bc8f15SMintz, Yuval legacy |= QED_QCID_LEGACY_VF_CID; 3908bc8f15SMintz, Yuval 403b19f478SMintz, Yuval return legacy; 413b19f478SMintz, Yuval } 423b19f478SMintz, Yuval 431408cc1fSYuval Mintz /* IOV ramrods */ 441fe614d1SYuval Mintz static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) 451408cc1fSYuval Mintz { 461408cc1fSYuval Mintz struct vf_start_ramrod_data *p_ramrod = NULL; 471408cc1fSYuval Mintz struct qed_spq_entry *p_ent = NULL; 481408cc1fSYuval Mintz struct qed_sp_init_data init_data; 491408cc1fSYuval Mintz int rc = -EINVAL; 501fe614d1SYuval Mintz u8 fp_minor; 511408cc1fSYuval Mintz 521408cc1fSYuval Mintz /* Get SPQ entry */ 531408cc1fSYuval Mintz memset(&init_data, 0, sizeof(init_data)); 541408cc1fSYuval Mintz init_data.cid = qed_spq_get_cid(p_hwfn); 551fe614d1SYuval Mintz init_data.opaque_fid = p_vf->opaque_fid; 561408cc1fSYuval Mintz init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 571408cc1fSYuval Mintz 581408cc1fSYuval Mintz rc = qed_sp_init_request(p_hwfn, &p_ent, 591408cc1fSYuval Mintz COMMON_RAMROD_VF_START, 601408cc1fSYuval Mintz PROTOCOLID_COMMON, &init_data); 611408cc1fSYuval Mintz if (rc) 621408cc1fSYuval Mintz return rc; 631408cc1fSYuval Mintz 641408cc1fSYuval Mintz p_ramrod = &p_ent->ramrod.vf_start; 651408cc1fSYuval Mintz 661fe614d1SYuval Mintz p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); 671fe614d1SYuval Mintz p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); 681408cc1fSYuval Mintz 691fe614d1SYuval Mintz switch (p_hwfn->hw_info.personality) { 701fe614d1SYuval Mintz case QED_PCI_ETH: 711408cc1fSYuval Mintz p_ramrod->personality = PERSONALITY_ETH; 721fe614d1SYuval Mintz break; 731fe614d1SYuval Mintz case QED_PCI_ETH_ROCE: 741fe614d1SYuval Mintz p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; 751fe614d1SYuval Mintz break; 761fe614d1SYuval Mintz default: 771fe614d1SYuval Mintz DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", 781fe614d1SYuval Mintz p_hwfn->hw_info.personality); 79fb5e7438SDenis Bolotin qed_sp_destroy_request(p_hwfn, p_ent); 801fe614d1SYuval Mintz return -EINVAL; 811fe614d1SYuval Mintz } 821fe614d1SYuval Mintz 831fe614d1SYuval Mintz fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; 84a044df83SYuval Mintz if (fp_minor > ETH_HSI_VER_MINOR && 85a044df83SYuval Mintz fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) { 861fe614d1SYuval Mintz DP_VERBOSE(p_hwfn, 871fe614d1SYuval Mintz QED_MSG_IOV, 881fe614d1SYuval Mintz "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", 891fe614d1SYuval Mintz p_vf->abs_vf_id, 901fe614d1SYuval Mintz ETH_HSI_VER_MAJOR, 911fe614d1SYuval Mintz fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 921fe614d1SYuval Mintz fp_minor = ETH_HSI_VER_MINOR; 931fe614d1SYuval Mintz } 941fe614d1SYuval Mintz 95351a4dedSYuval Mintz p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; 961fe614d1SYuval Mintz p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; 971fe614d1SYuval Mintz 981fe614d1SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 991fe614d1SYuval Mintz "VF[%d] - Starting using HSI %02x.%02x\n", 1001fe614d1SYuval Mintz p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); 1011408cc1fSYuval Mintz 1021408cc1fSYuval Mintz return qed_spq_post(p_hwfn, p_ent, NULL); 1031408cc1fSYuval Mintz } 1041408cc1fSYuval Mintz 1050b55e27dSYuval Mintz static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, 1060b55e27dSYuval Mintz u32 concrete_vfid, u16 opaque_vfid) 1070b55e27dSYuval Mintz { 1080b55e27dSYuval Mintz struct vf_stop_ramrod_data *p_ramrod = NULL; 1090b55e27dSYuval Mintz struct qed_spq_entry *p_ent = NULL; 1100b55e27dSYuval Mintz struct qed_sp_init_data init_data; 1110b55e27dSYuval Mintz int rc = -EINVAL; 1120b55e27dSYuval Mintz 1130b55e27dSYuval Mintz /* Get SPQ entry */ 1140b55e27dSYuval Mintz memset(&init_data, 0, sizeof(init_data)); 1150b55e27dSYuval Mintz init_data.cid = qed_spq_get_cid(p_hwfn); 1160b55e27dSYuval Mintz init_data.opaque_fid = opaque_vfid; 1170b55e27dSYuval Mintz init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1180b55e27dSYuval Mintz 1190b55e27dSYuval Mintz rc = qed_sp_init_request(p_hwfn, &p_ent, 1200b55e27dSYuval Mintz COMMON_RAMROD_VF_STOP, 1210b55e27dSYuval Mintz PROTOCOLID_COMMON, &init_data); 1220b55e27dSYuval Mintz if (rc) 1230b55e27dSYuval Mintz return rc; 1240b55e27dSYuval Mintz 1250b55e27dSYuval Mintz p_ramrod = &p_ent->ramrod.vf_stop; 1260b55e27dSYuval Mintz 1270b55e27dSYuval Mintz p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); 1280b55e27dSYuval Mintz 1290b55e27dSYuval Mintz return qed_spq_post(p_hwfn, p_ent, NULL); 1300b55e27dSYuval Mintz } 1310b55e27dSYuval Mintz 132da090917STomer Tayar bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, 1337eff82b0SYuval Mintz int rel_vf_id, 1347eff82b0SYuval Mintz bool b_enabled_only, bool b_non_malicious) 13532a47e72SYuval Mintz { 13632a47e72SYuval Mintz if (!p_hwfn->pf_iov_info) { 13732a47e72SYuval Mintz DP_NOTICE(p_hwfn->cdev, "No iov info\n"); 13832a47e72SYuval Mintz return false; 13932a47e72SYuval Mintz } 14032a47e72SYuval Mintz 14132a47e72SYuval Mintz if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || 14232a47e72SYuval Mintz (rel_vf_id < 0)) 14332a47e72SYuval Mintz return false; 14432a47e72SYuval Mintz 14532a47e72SYuval Mintz if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && 14632a47e72SYuval Mintz b_enabled_only) 14732a47e72SYuval Mintz return false; 14832a47e72SYuval Mintz 1497eff82b0SYuval Mintz if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) && 1507eff82b0SYuval Mintz b_non_malicious) 1517eff82b0SYuval Mintz return false; 1527eff82b0SYuval Mintz 15332a47e72SYuval Mintz return true; 15432a47e72SYuval Mintz } 15532a47e72SYuval Mintz 15637bff2b9SYuval Mintz static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, 15737bff2b9SYuval Mintz u16 relative_vf_id, 15837bff2b9SYuval Mintz bool b_enabled_only) 15937bff2b9SYuval Mintz { 16037bff2b9SYuval Mintz struct qed_vf_info *vf = NULL; 16137bff2b9SYuval Mintz 16237bff2b9SYuval Mintz if (!p_hwfn->pf_iov_info) { 16337bff2b9SYuval Mintz DP_NOTICE(p_hwfn->cdev, "No iov info\n"); 16437bff2b9SYuval Mintz return NULL; 16537bff2b9SYuval Mintz } 16637bff2b9SYuval Mintz 1677eff82b0SYuval Mintz if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, 1687eff82b0SYuval Mintz b_enabled_only, false)) 16937bff2b9SYuval Mintz vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; 17037bff2b9SYuval Mintz else 17137bff2b9SYuval Mintz DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", 17237bff2b9SYuval Mintz relative_vf_id); 17337bff2b9SYuval Mintz 17437bff2b9SYuval Mintz return vf; 17537bff2b9SYuval Mintz } 17637bff2b9SYuval Mintz 177007bc371SMintz, Yuval static struct qed_queue_cid * 178007bc371SMintz, Yuval qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue) 179007bc371SMintz, Yuval { 180007bc371SMintz, Yuval int i; 181007bc371SMintz, Yuval 182007bc371SMintz, Yuval for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 183007bc371SMintz, Yuval if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx) 184007bc371SMintz, Yuval return p_queue->cids[i].p_cid; 185007bc371SMintz, Yuval } 186007bc371SMintz, Yuval 187007bc371SMintz, Yuval return NULL; 188007bc371SMintz, Yuval } 189007bc371SMintz, Yuval 190f109c240SMintz, Yuval enum qed_iov_validate_q_mode { 191f109c240SMintz, Yuval QED_IOV_VALIDATE_Q_NA, 192f109c240SMintz, Yuval QED_IOV_VALIDATE_Q_ENABLE, 193f109c240SMintz, Yuval QED_IOV_VALIDATE_Q_DISABLE, 194f109c240SMintz, Yuval }; 195f109c240SMintz, Yuval 196f109c240SMintz, Yuval static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn, 197f109c240SMintz, Yuval struct qed_vf_info *p_vf, 198f109c240SMintz, Yuval u16 qid, 199f109c240SMintz, Yuval enum qed_iov_validate_q_mode mode, 200f109c240SMintz, Yuval bool b_is_tx) 20141086467SYuval Mintz { 202007bc371SMintz, Yuval int i; 203007bc371SMintz, Yuval 204f109c240SMintz, Yuval if (mode == QED_IOV_VALIDATE_Q_NA) 205f109c240SMintz, Yuval return true; 206f109c240SMintz, Yuval 207007bc371SMintz, Yuval for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 208007bc371SMintz, Yuval struct qed_vf_queue_cid *p_qcid; 209007bc371SMintz, Yuval 210007bc371SMintz, Yuval p_qcid = &p_vf->vf_queues[qid].cids[i]; 211007bc371SMintz, Yuval 212007bc371SMintz, Yuval if (!p_qcid->p_cid) 213007bc371SMintz, Yuval continue; 214007bc371SMintz, Yuval 215007bc371SMintz, Yuval if (p_qcid->b_is_tx != b_is_tx) 216007bc371SMintz, Yuval continue; 217007bc371SMintz, Yuval 218f109c240SMintz, Yuval return mode == QED_IOV_VALIDATE_Q_ENABLE; 219007bc371SMintz, Yuval } 220f109c240SMintz, Yuval 221f109c240SMintz, Yuval /* In case we haven't found any valid cid, then its disabled */ 222f109c240SMintz, Yuval return mode == QED_IOV_VALIDATE_Q_DISABLE; 223f109c240SMintz, Yuval } 224f109c240SMintz, Yuval 225f109c240SMintz, Yuval static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn, 226f109c240SMintz, Yuval struct qed_vf_info *p_vf, 227f109c240SMintz, Yuval u16 rx_qid, 228f109c240SMintz, Yuval enum qed_iov_validate_q_mode mode) 229f109c240SMintz, Yuval { 230f109c240SMintz, Yuval if (rx_qid >= p_vf->num_rxqs) { 23141086467SYuval Mintz DP_VERBOSE(p_hwfn, 23241086467SYuval Mintz QED_MSG_IOV, 23341086467SYuval Mintz "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n", 23441086467SYuval Mintz p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); 235f109c240SMintz, Yuval return false; 236f109c240SMintz, Yuval } 237f109c240SMintz, Yuval 238f109c240SMintz, Yuval return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false); 23941086467SYuval Mintz } 24041086467SYuval Mintz 24141086467SYuval Mintz static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn, 242f109c240SMintz, Yuval struct qed_vf_info *p_vf, 243f109c240SMintz, Yuval u16 tx_qid, 244f109c240SMintz, Yuval enum qed_iov_validate_q_mode mode) 24541086467SYuval Mintz { 246f109c240SMintz, Yuval if (tx_qid >= p_vf->num_txqs) { 24741086467SYuval Mintz DP_VERBOSE(p_hwfn, 24841086467SYuval Mintz QED_MSG_IOV, 24941086467SYuval Mintz "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n", 25041086467SYuval Mintz p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); 251f109c240SMintz, Yuval return false; 252f109c240SMintz, Yuval } 253f109c240SMintz, Yuval 254f109c240SMintz, Yuval return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true); 25541086467SYuval Mintz } 25641086467SYuval Mintz 25741086467SYuval Mintz static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn, 25841086467SYuval Mintz struct qed_vf_info *p_vf, u16 sb_idx) 25941086467SYuval Mintz { 26041086467SYuval Mintz int i; 26141086467SYuval Mintz 26241086467SYuval Mintz for (i = 0; i < p_vf->num_sbs; i++) 26341086467SYuval Mintz if (p_vf->igu_sbs[i] == sb_idx) 26441086467SYuval Mintz return true; 26541086467SYuval Mintz 26641086467SYuval Mintz DP_VERBOSE(p_hwfn, 26741086467SYuval Mintz QED_MSG_IOV, 26841086467SYuval Mintz "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n", 26941086467SYuval Mintz p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); 27041086467SYuval Mintz 27141086467SYuval Mintz return false; 27241086467SYuval Mintz } 27341086467SYuval Mintz 274f109c240SMintz, Yuval static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn, 275f109c240SMintz, Yuval struct qed_vf_info *p_vf) 276f109c240SMintz, Yuval { 277f109c240SMintz, Yuval u8 i; 278f109c240SMintz, Yuval 279f109c240SMintz, Yuval for (i = 0; i < p_vf->num_rxqs; i++) 280f109c240SMintz, Yuval if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, 281f109c240SMintz, Yuval QED_IOV_VALIDATE_Q_ENABLE, 282f109c240SMintz, Yuval false)) 283f109c240SMintz, Yuval return true; 284f109c240SMintz, Yuval 285f109c240SMintz, Yuval return false; 286f109c240SMintz, Yuval } 287f109c240SMintz, Yuval 288f109c240SMintz, Yuval static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn, 289f109c240SMintz, Yuval struct qed_vf_info *p_vf) 290f109c240SMintz, Yuval { 291f109c240SMintz, Yuval u8 i; 292f109c240SMintz, Yuval 293f109c240SMintz, Yuval for (i = 0; i < p_vf->num_txqs; i++) 294f109c240SMintz, Yuval if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, 295f109c240SMintz, Yuval QED_IOV_VALIDATE_Q_ENABLE, 296f109c240SMintz, Yuval true)) 297f109c240SMintz, Yuval return true; 298f109c240SMintz, Yuval 299f109c240SMintz, Yuval return false; 300f109c240SMintz, Yuval } 301f109c240SMintz, Yuval 302ba56947aSBaoyou Xie static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, 30336558c3dSYuval Mintz int vfid, struct qed_ptt *p_ptt) 30436558c3dSYuval Mintz { 30536558c3dSYuval Mintz struct qed_bulletin_content *p_bulletin; 30636558c3dSYuval Mintz int crc_size = sizeof(p_bulletin->crc); 30736558c3dSYuval Mintz struct qed_dmae_params params; 30836558c3dSYuval Mintz struct qed_vf_info *p_vf; 30936558c3dSYuval Mintz 31036558c3dSYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 31136558c3dSYuval Mintz if (!p_vf) 31236558c3dSYuval Mintz return -EINVAL; 31336558c3dSYuval Mintz 31436558c3dSYuval Mintz if (!p_vf->vf_bulletin) 31536558c3dSYuval Mintz return -EINVAL; 31636558c3dSYuval Mintz 31736558c3dSYuval Mintz p_bulletin = p_vf->bulletin.p_virt; 31836558c3dSYuval Mintz 31936558c3dSYuval Mintz /* Increment bulletin board version and compute crc */ 32036558c3dSYuval Mintz p_bulletin->version++; 32136558c3dSYuval Mintz p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, 32236558c3dSYuval Mintz p_vf->bulletin.size - crc_size); 32336558c3dSYuval Mintz 32436558c3dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 32536558c3dSYuval Mintz "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", 32636558c3dSYuval Mintz p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); 32736558c3dSYuval Mintz 32836558c3dSYuval Mintz /* propagate bulletin board via dmae to vm memory */ 32936558c3dSYuval Mintz memset(¶ms, 0, sizeof(params)); 330804c5702SMichal Kalderon SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1); 33136558c3dSYuval Mintz params.dst_vfid = p_vf->abs_vf_id; 33236558c3dSYuval Mintz return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, 33336558c3dSYuval Mintz p_vf->vf_bulletin, p_vf->bulletin.size / 4, 33436558c3dSYuval Mintz ¶ms); 33536558c3dSYuval Mintz } 33636558c3dSYuval Mintz 33732a47e72SYuval Mintz static int qed_iov_pci_cfg_info(struct qed_dev *cdev) 33832a47e72SYuval Mintz { 33932a47e72SYuval Mintz struct qed_hw_sriov_info *iov = cdev->p_iov_info; 34032a47e72SYuval Mintz int pos = iov->pos; 34132a47e72SYuval Mintz 34232a47e72SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos); 34332a47e72SYuval Mintz pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 34432a47e72SYuval Mintz 34532a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 34632a47e72SYuval Mintz pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); 34732a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 34832a47e72SYuval Mintz pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); 34932a47e72SYuval Mintz 35032a47e72SYuval Mintz pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); 35132a47e72SYuval Mintz if (iov->num_vfs) { 35232a47e72SYuval Mintz DP_VERBOSE(cdev, 35332a47e72SYuval Mintz QED_MSG_IOV, 35432a47e72SYuval Mintz "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); 35532a47e72SYuval Mintz iov->num_vfs = 0; 35632a47e72SYuval Mintz } 35732a47e72SYuval Mintz 35832a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 35932a47e72SYuval Mintz pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 36032a47e72SYuval Mintz 36132a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 36232a47e72SYuval Mintz pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 36332a47e72SYuval Mintz 36432a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 36532a47e72SYuval Mintz pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); 36632a47e72SYuval Mintz 36732a47e72SYuval Mintz pci_read_config_dword(cdev->pdev, 36832a47e72SYuval Mintz pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 36932a47e72SYuval Mintz 37032a47e72SYuval Mintz pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap); 37132a47e72SYuval Mintz 37232a47e72SYuval Mintz pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 37332a47e72SYuval Mintz 37432a47e72SYuval Mintz DP_VERBOSE(cdev, 37532a47e72SYuval Mintz QED_MSG_IOV, 37632a47e72SYuval Mintz "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 37732a47e72SYuval Mintz iov->nres, 37832a47e72SYuval Mintz iov->cap, 37932a47e72SYuval Mintz iov->ctrl, 38032a47e72SYuval Mintz iov->total_vfs, 38132a47e72SYuval Mintz iov->initial_vfs, 38232a47e72SYuval Mintz iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 38332a47e72SYuval Mintz 38432a47e72SYuval Mintz /* Some sanity checks */ 38532a47e72SYuval Mintz if (iov->num_vfs > NUM_OF_VFS(cdev) || 38632a47e72SYuval Mintz iov->total_vfs > NUM_OF_VFS(cdev)) { 38732a47e72SYuval Mintz /* This can happen only due to a bug. In this case we set 38832a47e72SYuval Mintz * num_vfs to zero to avoid memory corruption in the code that 38932a47e72SYuval Mintz * assumes max number of vfs 39032a47e72SYuval Mintz */ 39132a47e72SYuval Mintz DP_NOTICE(cdev, 39232a47e72SYuval Mintz "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n", 39332a47e72SYuval Mintz iov->num_vfs); 39432a47e72SYuval Mintz 39532a47e72SYuval Mintz iov->num_vfs = 0; 39632a47e72SYuval Mintz iov->total_vfs = 0; 39732a47e72SYuval Mintz } 39832a47e72SYuval Mintz 39932a47e72SYuval Mintz return 0; 40032a47e72SYuval Mintz } 40132a47e72SYuval Mintz 40232a47e72SYuval Mintz static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) 40332a47e72SYuval Mintz { 40432a47e72SYuval Mintz struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 40532a47e72SYuval Mintz struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 40632a47e72SYuval Mintz struct qed_bulletin_content *p_bulletin_virt; 40732a47e72SYuval Mintz dma_addr_t req_p, rply_p, bulletin_p; 40832a47e72SYuval Mintz union pfvf_tlvs *p_reply_virt_addr; 40932a47e72SYuval Mintz union vfpf_tlvs *p_req_virt_addr; 41032a47e72SYuval Mintz u8 idx = 0; 41132a47e72SYuval Mintz 41232a47e72SYuval Mintz memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); 41332a47e72SYuval Mintz 41432a47e72SYuval Mintz p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; 41532a47e72SYuval Mintz req_p = p_iov_info->mbx_msg_phys_addr; 41632a47e72SYuval Mintz p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; 41732a47e72SYuval Mintz rply_p = p_iov_info->mbx_reply_phys_addr; 41832a47e72SYuval Mintz p_bulletin_virt = p_iov_info->p_bulletins; 41932a47e72SYuval Mintz bulletin_p = p_iov_info->bulletins_phys; 42032a47e72SYuval Mintz if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { 42132a47e72SYuval Mintz DP_ERR(p_hwfn, 42232a47e72SYuval Mintz "qed_iov_setup_vfdb called without allocating mem first\n"); 42332a47e72SYuval Mintz return; 42432a47e72SYuval Mintz } 42532a47e72SYuval Mintz 42632a47e72SYuval Mintz for (idx = 0; idx < p_iov->total_vfs; idx++) { 42732a47e72SYuval Mintz struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; 42832a47e72SYuval Mintz u32 concrete; 42932a47e72SYuval Mintz 43032a47e72SYuval Mintz vf->vf_mbx.req_virt = p_req_virt_addr + idx; 43132a47e72SYuval Mintz vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); 43232a47e72SYuval Mintz vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; 43332a47e72SYuval Mintz vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); 43432a47e72SYuval Mintz 43532a47e72SYuval Mintz vf->state = VF_STOPPED; 43632a47e72SYuval Mintz vf->b_init = false; 43732a47e72SYuval Mintz 43832a47e72SYuval Mintz vf->bulletin.phys = idx * 43932a47e72SYuval Mintz sizeof(struct qed_bulletin_content) + 44032a47e72SYuval Mintz bulletin_p; 44132a47e72SYuval Mintz vf->bulletin.p_virt = p_bulletin_virt + idx; 44232a47e72SYuval Mintz vf->bulletin.size = sizeof(struct qed_bulletin_content); 44332a47e72SYuval Mintz 44432a47e72SYuval Mintz vf->relative_vf_id = idx; 44532a47e72SYuval Mintz vf->abs_vf_id = idx + p_iov->first_vf_in_pf; 44632a47e72SYuval Mintz concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id); 44732a47e72SYuval Mintz vf->concrete_fid = concrete; 44832a47e72SYuval Mintz vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | 44932a47e72SYuval Mintz (vf->abs_vf_id << 8); 45032a47e72SYuval Mintz vf->vport_id = idx + 1; 4511cf2b1a9SYuval Mintz 4521cf2b1a9SYuval Mintz vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; 4531cf2b1a9SYuval Mintz vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; 45432a47e72SYuval Mintz } 45532a47e72SYuval Mintz } 45632a47e72SYuval Mintz 45732a47e72SYuval Mintz static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) 45832a47e72SYuval Mintz { 45932a47e72SYuval Mintz struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 46032a47e72SYuval Mintz void **p_v_addr; 46132a47e72SYuval Mintz u16 num_vfs = 0; 46232a47e72SYuval Mintz 46332a47e72SYuval Mintz num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; 46432a47e72SYuval Mintz 46532a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 46632a47e72SYuval Mintz "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); 46732a47e72SYuval Mintz 46832a47e72SYuval Mintz /* Allocate PF Mailbox buffer (per-VF) */ 46932a47e72SYuval Mintz p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; 47032a47e72SYuval Mintz p_v_addr = &p_iov_info->mbx_msg_virt_addr; 47132a47e72SYuval Mintz *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 47232a47e72SYuval Mintz p_iov_info->mbx_msg_size, 47332a47e72SYuval Mintz &p_iov_info->mbx_msg_phys_addr, 47432a47e72SYuval Mintz GFP_KERNEL); 47532a47e72SYuval Mintz if (!*p_v_addr) 47632a47e72SYuval Mintz return -ENOMEM; 47732a47e72SYuval Mintz 47832a47e72SYuval Mintz /* Allocate PF Mailbox Reply buffer (per-VF) */ 47932a47e72SYuval Mintz p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; 48032a47e72SYuval Mintz p_v_addr = &p_iov_info->mbx_reply_virt_addr; 48132a47e72SYuval Mintz *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 48232a47e72SYuval Mintz p_iov_info->mbx_reply_size, 48332a47e72SYuval Mintz &p_iov_info->mbx_reply_phys_addr, 48432a47e72SYuval Mintz GFP_KERNEL); 48532a47e72SYuval Mintz if (!*p_v_addr) 48632a47e72SYuval Mintz return -ENOMEM; 48732a47e72SYuval Mintz 48832a47e72SYuval Mintz p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * 48932a47e72SYuval Mintz num_vfs; 49032a47e72SYuval Mintz p_v_addr = &p_iov_info->p_bulletins; 49132a47e72SYuval Mintz *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 49232a47e72SYuval Mintz p_iov_info->bulletins_size, 49332a47e72SYuval Mintz &p_iov_info->bulletins_phys, 49432a47e72SYuval Mintz GFP_KERNEL); 49532a47e72SYuval Mintz if (!*p_v_addr) 49632a47e72SYuval Mintz return -ENOMEM; 49732a47e72SYuval Mintz 49832a47e72SYuval Mintz DP_VERBOSE(p_hwfn, 49932a47e72SYuval Mintz QED_MSG_IOV, 50032a47e72SYuval Mintz "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", 50132a47e72SYuval Mintz p_iov_info->mbx_msg_virt_addr, 50232a47e72SYuval Mintz (u64) p_iov_info->mbx_msg_phys_addr, 50332a47e72SYuval Mintz p_iov_info->mbx_reply_virt_addr, 50432a47e72SYuval Mintz (u64) p_iov_info->mbx_reply_phys_addr, 50532a47e72SYuval Mintz p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); 50632a47e72SYuval Mintz 50732a47e72SYuval Mintz return 0; 50832a47e72SYuval Mintz } 50932a47e72SYuval Mintz 51032a47e72SYuval Mintz static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn) 51132a47e72SYuval Mintz { 51232a47e72SYuval Mintz struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 51332a47e72SYuval Mintz 51432a47e72SYuval Mintz if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) 51532a47e72SYuval Mintz dma_free_coherent(&p_hwfn->cdev->pdev->dev, 51632a47e72SYuval Mintz p_iov_info->mbx_msg_size, 51732a47e72SYuval Mintz p_iov_info->mbx_msg_virt_addr, 51832a47e72SYuval Mintz p_iov_info->mbx_msg_phys_addr); 51932a47e72SYuval Mintz 52032a47e72SYuval Mintz if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) 52132a47e72SYuval Mintz dma_free_coherent(&p_hwfn->cdev->pdev->dev, 52232a47e72SYuval Mintz p_iov_info->mbx_reply_size, 52332a47e72SYuval Mintz p_iov_info->mbx_reply_virt_addr, 52432a47e72SYuval Mintz p_iov_info->mbx_reply_phys_addr); 52532a47e72SYuval Mintz 52632a47e72SYuval Mintz if (p_iov_info->p_bulletins) 52732a47e72SYuval Mintz dma_free_coherent(&p_hwfn->cdev->pdev->dev, 52832a47e72SYuval Mintz p_iov_info->bulletins_size, 52932a47e72SYuval Mintz p_iov_info->p_bulletins, 53032a47e72SYuval Mintz p_iov_info->bulletins_phys); 53132a47e72SYuval Mintz } 53232a47e72SYuval Mintz 53332a47e72SYuval Mintz int qed_iov_alloc(struct qed_hwfn *p_hwfn) 53432a47e72SYuval Mintz { 53532a47e72SYuval Mintz struct qed_pf_iov *p_sriov; 53632a47e72SYuval Mintz 53732a47e72SYuval Mintz if (!IS_PF_SRIOV(p_hwfn)) { 53832a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 53932a47e72SYuval Mintz "No SR-IOV - no need for IOV db\n"); 54032a47e72SYuval Mintz return 0; 54132a47e72SYuval Mintz } 54232a47e72SYuval Mintz 54332a47e72SYuval Mintz p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL); 5442591c280SJoe Perches if (!p_sriov) 54532a47e72SYuval Mintz return -ENOMEM; 54632a47e72SYuval Mintz 54732a47e72SYuval Mintz p_hwfn->pf_iov_info = p_sriov; 54832a47e72SYuval Mintz 5496c9e80eaSMichal Kalderon qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, 5506c9e80eaSMichal Kalderon qed_sriov_eqe_event); 5516c9e80eaSMichal Kalderon 55232a47e72SYuval Mintz return qed_iov_allocate_vfdb(p_hwfn); 55332a47e72SYuval Mintz } 55432a47e72SYuval Mintz 5551ee240e3SMintz, Yuval void qed_iov_setup(struct qed_hwfn *p_hwfn) 55632a47e72SYuval Mintz { 55732a47e72SYuval Mintz if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) 55832a47e72SYuval Mintz return; 55932a47e72SYuval Mintz 56032a47e72SYuval Mintz qed_iov_setup_vfdb(p_hwfn); 56132a47e72SYuval Mintz } 56232a47e72SYuval Mintz 56332a47e72SYuval Mintz void qed_iov_free(struct qed_hwfn *p_hwfn) 56432a47e72SYuval Mintz { 5656c9e80eaSMichal Kalderon qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); 5666c9e80eaSMichal Kalderon 56732a47e72SYuval Mintz if (IS_PF_SRIOV_ALLOC(p_hwfn)) { 56832a47e72SYuval Mintz qed_iov_free_vfdb(p_hwfn); 56932a47e72SYuval Mintz kfree(p_hwfn->pf_iov_info); 57032a47e72SYuval Mintz } 57132a47e72SYuval Mintz } 57232a47e72SYuval Mintz 57332a47e72SYuval Mintz void qed_iov_free_hw_info(struct qed_dev *cdev) 57432a47e72SYuval Mintz { 57532a47e72SYuval Mintz kfree(cdev->p_iov_info); 57632a47e72SYuval Mintz cdev->p_iov_info = NULL; 57732a47e72SYuval Mintz } 57832a47e72SYuval Mintz 57932a47e72SYuval Mintz int qed_iov_hw_info(struct qed_hwfn *p_hwfn) 58032a47e72SYuval Mintz { 58132a47e72SYuval Mintz struct qed_dev *cdev = p_hwfn->cdev; 58232a47e72SYuval Mintz int pos; 58332a47e72SYuval Mintz int rc; 58432a47e72SYuval Mintz 5856da95b52SAlok Prasad if (is_kdump_kernel()) 5866da95b52SAlok Prasad return 0; 5876da95b52SAlok Prasad 5881408cc1fSYuval Mintz if (IS_VF(p_hwfn->cdev)) 5891408cc1fSYuval Mintz return 0; 5901408cc1fSYuval Mintz 59132a47e72SYuval Mintz /* Learn the PCI configuration */ 59232a47e72SYuval Mintz pos = pci_find_ext_capability(p_hwfn->cdev->pdev, 59332a47e72SYuval Mintz PCI_EXT_CAP_ID_SRIOV); 59432a47e72SYuval Mintz if (!pos) { 59532a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n"); 59632a47e72SYuval Mintz return 0; 59732a47e72SYuval Mintz } 59832a47e72SYuval Mintz 59932a47e72SYuval Mintz /* Allocate a new struct for IOV information */ 60032a47e72SYuval Mintz cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL); 6012591c280SJoe Perches if (!cdev->p_iov_info) 60232a47e72SYuval Mintz return -ENOMEM; 6032591c280SJoe Perches 60432a47e72SYuval Mintz cdev->p_iov_info->pos = pos; 60532a47e72SYuval Mintz 60632a47e72SYuval Mintz rc = qed_iov_pci_cfg_info(cdev); 60732a47e72SYuval Mintz if (rc) 60832a47e72SYuval Mintz return rc; 60932a47e72SYuval Mintz 61032a47e72SYuval Mintz /* We want PF IOV to be synonemous with the existance of p_iov_info; 61132a47e72SYuval Mintz * In case the capability is published but there are no VFs, simply 61232a47e72SYuval Mintz * de-allocate the struct. 61332a47e72SYuval Mintz */ 61432a47e72SYuval Mintz if (!cdev->p_iov_info->total_vfs) { 61532a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 61632a47e72SYuval Mintz "IOV capabilities, but no VFs are published\n"); 61732a47e72SYuval Mintz kfree(cdev->p_iov_info); 61832a47e72SYuval Mintz cdev->p_iov_info = NULL; 61932a47e72SYuval Mintz return 0; 62032a47e72SYuval Mintz } 62132a47e72SYuval Mintz 6229c79ddaaSMintz, Yuval /* First VF index based on offset is tricky: 6239c79ddaaSMintz, Yuval * - If ARI is supported [likely], offset - (16 - pf_id) would 6249c79ddaaSMintz, Yuval * provide the number for eng0. 2nd engine Vfs would begin 62532a47e72SYuval Mintz * after the first engine's VFs. 6269c79ddaaSMintz, Yuval * - If !ARI, VFs would start on next device. 6279c79ddaaSMintz, Yuval * so offset - (256 - pf_id) would provide the number. 6289c79ddaaSMintz, Yuval * Utilize the fact that (256 - pf_id) is achieved only by later 6298ac1ed79SJoe Perches * to differentiate between the two. 63032a47e72SYuval Mintz */ 6319c79ddaaSMintz, Yuval 6329c79ddaaSMintz, Yuval if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { 6339c79ddaaSMintz, Yuval u32 first = p_hwfn->cdev->p_iov_info->offset + 63432a47e72SYuval Mintz p_hwfn->abs_pf_id - 16; 6359c79ddaaSMintz, Yuval 6369c79ddaaSMintz, Yuval cdev->p_iov_info->first_vf_in_pf = first; 6379c79ddaaSMintz, Yuval 63832a47e72SYuval Mintz if (QED_PATH_ID(p_hwfn)) 63932a47e72SYuval Mintz cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; 6409c79ddaaSMintz, Yuval } else { 6419c79ddaaSMintz, Yuval u32 first = p_hwfn->cdev->p_iov_info->offset + 6429c79ddaaSMintz, Yuval p_hwfn->abs_pf_id - 256; 6439c79ddaaSMintz, Yuval 6449c79ddaaSMintz, Yuval cdev->p_iov_info->first_vf_in_pf = first; 6459c79ddaaSMintz, Yuval } 64632a47e72SYuval Mintz 64732a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 64832a47e72SYuval Mintz "First VF in hwfn 0x%08x\n", 64932a47e72SYuval Mintz cdev->p_iov_info->first_vf_in_pf); 65032a47e72SYuval Mintz 65132a47e72SYuval Mintz return 0; 65232a47e72SYuval Mintz } 65332a47e72SYuval Mintz 654bf774d14SYueHaibing static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, 6557eff82b0SYuval Mintz int vfid, bool b_fail_malicious) 65637bff2b9SYuval Mintz { 65737bff2b9SYuval Mintz /* Check PF supports sriov */ 658b0409fa0SYuval Mintz if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || 659b0409fa0SYuval Mintz !IS_PF_SRIOV_ALLOC(p_hwfn)) 66037bff2b9SYuval Mintz return false; 66137bff2b9SYuval Mintz 66237bff2b9SYuval Mintz /* Check VF validity */ 6637eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious)) 66437bff2b9SYuval Mintz return false; 66537bff2b9SYuval Mintz 66637bff2b9SYuval Mintz return true; 66737bff2b9SYuval Mintz } 66837bff2b9SYuval Mintz 669bf774d14SYueHaibing static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) 6707eff82b0SYuval Mintz { 6717eff82b0SYuval Mintz return _qed_iov_pf_sanity_check(p_hwfn, vfid, true); 6727eff82b0SYuval Mintz } 6737eff82b0SYuval Mintz 6740b55e27dSYuval Mintz static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, 6750b55e27dSYuval Mintz u16 rel_vf_id, u8 to_disable) 6760b55e27dSYuval Mintz { 6770b55e27dSYuval Mintz struct qed_vf_info *vf; 6780b55e27dSYuval Mintz int i; 6790b55e27dSYuval Mintz 6800b55e27dSYuval Mintz for_each_hwfn(cdev, i) { 6810b55e27dSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 6820b55e27dSYuval Mintz 6830b55e27dSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 6840b55e27dSYuval Mintz if (!vf) 6850b55e27dSYuval Mintz continue; 6860b55e27dSYuval Mintz 6870b55e27dSYuval Mintz vf->to_disable = to_disable; 6880b55e27dSYuval Mintz } 6890b55e27dSYuval Mintz } 6900b55e27dSYuval Mintz 691ba56947aSBaoyou Xie static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable) 6920b55e27dSYuval Mintz { 6930b55e27dSYuval Mintz u16 i; 6940b55e27dSYuval Mintz 6950b55e27dSYuval Mintz if (!IS_QED_SRIOV(cdev)) 6960b55e27dSYuval Mintz return; 6970b55e27dSYuval Mintz 6980b55e27dSYuval Mintz for (i = 0; i < cdev->p_iov_info->total_vfs; i++) 6990b55e27dSYuval Mintz qed_iov_set_vf_to_disable(cdev, i, to_disable); 7000b55e27dSYuval Mintz } 7010b55e27dSYuval Mintz 7021408cc1fSYuval Mintz static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, 7031408cc1fSYuval Mintz struct qed_ptt *p_ptt, u8 abs_vfid) 7041408cc1fSYuval Mintz { 7051408cc1fSYuval Mintz qed_wr(p_hwfn, p_ptt, 7061408cc1fSYuval Mintz PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, 7071408cc1fSYuval Mintz 1 << (abs_vfid & 0x1f)); 7081408cc1fSYuval Mintz } 7091408cc1fSYuval Mintz 710dacd88d6SYuval Mintz static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, 711dacd88d6SYuval Mintz struct qed_ptt *p_ptt, struct qed_vf_info *vf) 712dacd88d6SYuval Mintz { 713dacd88d6SYuval Mintz int i; 714dacd88d6SYuval Mintz 715dacd88d6SYuval Mintz /* Set VF masks and configuration - pretend */ 716dacd88d6SYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 717dacd88d6SYuval Mintz 718dacd88d6SYuval Mintz qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); 719dacd88d6SYuval Mintz 720dacd88d6SYuval Mintz /* unpretend */ 721dacd88d6SYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 722dacd88d6SYuval Mintz 723dacd88d6SYuval Mintz /* iterate over all queues, clear sb consumer */ 724b2b897ebSYuval Mintz for (i = 0; i < vf->num_sbs; i++) 725b2b897ebSYuval Mintz qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 726b2b897ebSYuval Mintz vf->igu_sbs[i], 727b2b897ebSYuval Mintz vf->opaque_fid, true); 728dacd88d6SYuval Mintz } 729dacd88d6SYuval Mintz 7300b55e27dSYuval Mintz static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, 7310b55e27dSYuval Mintz struct qed_ptt *p_ptt, 7320b55e27dSYuval Mintz struct qed_vf_info *vf, bool enable) 7330b55e27dSYuval Mintz { 7340b55e27dSYuval Mintz u32 igu_vf_conf; 7350b55e27dSYuval Mintz 7360b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 7370b55e27dSYuval Mintz 7380b55e27dSYuval Mintz igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); 7390b55e27dSYuval Mintz 7400b55e27dSYuval Mintz if (enable) 7410b55e27dSYuval Mintz igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; 7420b55e27dSYuval Mintz else 7430b55e27dSYuval Mintz igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; 7440b55e27dSYuval Mintz 7450b55e27dSYuval Mintz qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); 7460b55e27dSYuval Mintz 7470b55e27dSYuval Mintz /* unpretend */ 7480b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 7490b55e27dSYuval Mintz } 7500b55e27dSYuval Mintz 75188072fd4SMintz, Yuval static int 75288072fd4SMintz, Yuval qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn, 75388072fd4SMintz, Yuval struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs) 75488072fd4SMintz, Yuval { 75588072fd4SMintz, Yuval u8 current_max = 0; 75688072fd4SMintz, Yuval int i; 75788072fd4SMintz, Yuval 75888072fd4SMintz, Yuval /* For AH onward, configuration is per-PF. Find maximum of all 75988072fd4SMintz, Yuval * the currently enabled child VFs, and set the number to be that. 76088072fd4SMintz, Yuval */ 76188072fd4SMintz, Yuval if (!QED_IS_BB(p_hwfn->cdev)) { 76288072fd4SMintz, Yuval qed_for_each_vf(p_hwfn, i) { 76388072fd4SMintz, Yuval struct qed_vf_info *p_vf; 76488072fd4SMintz, Yuval 76588072fd4SMintz, Yuval p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true); 76688072fd4SMintz, Yuval if (!p_vf) 76788072fd4SMintz, Yuval continue; 76888072fd4SMintz, Yuval 76988072fd4SMintz, Yuval current_max = max_t(u8, current_max, p_vf->num_sbs); 77088072fd4SMintz, Yuval } 77188072fd4SMintz, Yuval } 77288072fd4SMintz, Yuval 77388072fd4SMintz, Yuval if (num_sbs > current_max) 77488072fd4SMintz, Yuval return qed_mcp_config_vf_msix(p_hwfn, p_ptt, 77588072fd4SMintz, Yuval abs_vf_id, num_sbs); 77688072fd4SMintz, Yuval 77788072fd4SMintz, Yuval return 0; 77888072fd4SMintz, Yuval } 77988072fd4SMintz, Yuval 7801408cc1fSYuval Mintz static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, 7811408cc1fSYuval Mintz struct qed_ptt *p_ptt, 7821408cc1fSYuval Mintz struct qed_vf_info *vf) 7831408cc1fSYuval Mintz { 7841408cc1fSYuval Mintz u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; 7851408cc1fSYuval Mintz int rc; 7861408cc1fSYuval Mintz 7874e9b2a67SMintz, Yuval /* It's possible VF was previously considered malicious - 7884e9b2a67SMintz, Yuval * clear the indication even if we're only going to disable VF. 7894e9b2a67SMintz, Yuval */ 7904e9b2a67SMintz, Yuval vf->b_malicious = false; 7914e9b2a67SMintz, Yuval 7920b55e27dSYuval Mintz if (vf->to_disable) 7930b55e27dSYuval Mintz return 0; 7940b55e27dSYuval Mintz 7951408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, 7961408cc1fSYuval Mintz QED_MSG_IOV, 7971408cc1fSYuval Mintz "Enable internal access for vf %x [abs %x]\n", 7981408cc1fSYuval Mintz vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); 7991408cc1fSYuval Mintz 8001408cc1fSYuval Mintz qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); 8011408cc1fSYuval Mintz 802b2b897ebSYuval Mintz qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 803b2b897ebSYuval Mintz 80488072fd4SMintz, Yuval rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt, 80588072fd4SMintz, Yuval vf->abs_vf_id, vf->num_sbs); 8061408cc1fSYuval Mintz if (rc) 8071408cc1fSYuval Mintz return rc; 8081408cc1fSYuval Mintz 8091408cc1fSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 8101408cc1fSYuval Mintz 8111408cc1fSYuval Mintz SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); 8121408cc1fSYuval Mintz STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); 8131408cc1fSYuval Mintz 8141408cc1fSYuval Mintz qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, 8151408cc1fSYuval Mintz p_hwfn->hw_info.hw_mode); 8161408cc1fSYuval Mintz 8171408cc1fSYuval Mintz /* unpretend */ 8181408cc1fSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 8191408cc1fSYuval Mintz 8201408cc1fSYuval Mintz vf->state = VF_FREE; 8211408cc1fSYuval Mintz 8221408cc1fSYuval Mintz return rc; 8231408cc1fSYuval Mintz } 8241408cc1fSYuval Mintz 8250b55e27dSYuval Mintz /** 8260b55e27dSYuval Mintz * @brief qed_iov_config_perm_table - configure the permission 8270b55e27dSYuval Mintz * zone table. 8280b55e27dSYuval Mintz * In E4, queue zone permission table size is 320x9. There 8290b55e27dSYuval Mintz * are 320 VF queues for single engine device (256 for dual 8300b55e27dSYuval Mintz * engine device), and each entry has the following format: 8310b55e27dSYuval Mintz * {Valid, VF[7:0]} 8320b55e27dSYuval Mintz * @param p_hwfn 8330b55e27dSYuval Mintz * @param p_ptt 8340b55e27dSYuval Mintz * @param vf 8350b55e27dSYuval Mintz * @param enable 8360b55e27dSYuval Mintz */ 8370b55e27dSYuval Mintz static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, 8380b55e27dSYuval Mintz struct qed_ptt *p_ptt, 8390b55e27dSYuval Mintz struct qed_vf_info *vf, u8 enable) 8400b55e27dSYuval Mintz { 8410b55e27dSYuval Mintz u32 reg_addr, val; 8420b55e27dSYuval Mintz u16 qzone_id = 0; 8430b55e27dSYuval Mintz int qid; 8440b55e27dSYuval Mintz 8450b55e27dSYuval Mintz for (qid = 0; qid < vf->num_rxqs; qid++) { 8460b55e27dSYuval Mintz qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, 8470b55e27dSYuval Mintz &qzone_id); 8480b55e27dSYuval Mintz 8490b55e27dSYuval Mintz reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; 8501a635e48SYuval Mintz val = enable ? (vf->abs_vf_id | BIT(8)) : 0; 8510b55e27dSYuval Mintz qed_wr(p_hwfn, p_ptt, reg_addr, val); 8520b55e27dSYuval Mintz } 8530b55e27dSYuval Mintz } 8540b55e27dSYuval Mintz 855dacd88d6SYuval Mintz static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn, 856dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 857dacd88d6SYuval Mintz struct qed_vf_info *vf) 858dacd88d6SYuval Mintz { 859dacd88d6SYuval Mintz /* Reset vf in IGU - interrupts are still disabled */ 860dacd88d6SYuval Mintz qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 861dacd88d6SYuval Mintz 862dacd88d6SYuval Mintz qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); 863dacd88d6SYuval Mintz 864dacd88d6SYuval Mintz /* Permission Table */ 865dacd88d6SYuval Mintz qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true); 866dacd88d6SYuval Mintz } 867dacd88d6SYuval Mintz 8681408cc1fSYuval Mintz static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, 8691408cc1fSYuval Mintz struct qed_ptt *p_ptt, 8701408cc1fSYuval Mintz struct qed_vf_info *vf, u16 num_rx_queues) 8711408cc1fSYuval Mintz { 87209b6b147SMintz, Yuval struct qed_igu_block *p_block; 87309b6b147SMintz, Yuval struct cau_sb_entry sb_entry; 87409b6b147SMintz, Yuval int qid = 0; 8751408cc1fSYuval Mintz u32 val = 0; 8761408cc1fSYuval Mintz 877726fdbe9SMintz, Yuval if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) 878726fdbe9SMintz, Yuval num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; 879726fdbe9SMintz, Yuval p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; 8801408cc1fSYuval Mintz 8811408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); 8821408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); 8831408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); 8841408cc1fSYuval Mintz 88509b6b147SMintz, Yuval for (qid = 0; qid < num_rx_queues; qid++) { 88609b6b147SMintz, Yuval p_block = qed_get_igu_free_sb(p_hwfn, false); 88709b6b147SMintz, Yuval vf->igu_sbs[qid] = p_block->igu_sb_id; 88809b6b147SMintz, Yuval p_block->status &= ~QED_IGU_STATUS_FREE; 8891408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); 8901408cc1fSYuval Mintz 8911408cc1fSYuval Mintz qed_wr(p_hwfn, p_ptt, 89209b6b147SMintz, Yuval IGU_REG_MAPPING_MEMORY + 89309b6b147SMintz, Yuval sizeof(u32) * p_block->igu_sb_id, val); 8941408cc1fSYuval Mintz 8951408cc1fSYuval Mintz /* Configure igu sb in CAU which were marked valid */ 8961408cc1fSYuval Mintz qed_init_cau_sb_entry(p_hwfn, &sb_entry, 89709b6b147SMintz, Yuval p_hwfn->rel_pf_id, vf->abs_vf_id, 1); 89883bf76e3SMichal Kalderon 8991408cc1fSYuval Mintz qed_dmae_host2grc(p_hwfn, p_ptt, 9001408cc1fSYuval Mintz (u64)(uintptr_t)&sb_entry, 9011408cc1fSYuval Mintz CAU_REG_SB_VAR_MEMORY + 90283bf76e3SMichal Kalderon p_block->igu_sb_id * sizeof(u64), 2, NULL); 9031408cc1fSYuval Mintz } 9041408cc1fSYuval Mintz 9051408cc1fSYuval Mintz vf->num_sbs = (u8) num_rx_queues; 9061408cc1fSYuval Mintz 9071408cc1fSYuval Mintz return vf->num_sbs; 9081408cc1fSYuval Mintz } 9091408cc1fSYuval Mintz 9100b55e27dSYuval Mintz static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, 9110b55e27dSYuval Mintz struct qed_ptt *p_ptt, 9120b55e27dSYuval Mintz struct qed_vf_info *vf) 9130b55e27dSYuval Mintz { 9140b55e27dSYuval Mintz struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 9150b55e27dSYuval Mintz int idx, igu_id; 9160b55e27dSYuval Mintz u32 addr, val; 9170b55e27dSYuval Mintz 9180b55e27dSYuval Mintz /* Invalidate igu CAM lines and mark them as free */ 9190b55e27dSYuval Mintz for (idx = 0; idx < vf->num_sbs; idx++) { 9200b55e27dSYuval Mintz igu_id = vf->igu_sbs[idx]; 9210b55e27dSYuval Mintz addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; 9220b55e27dSYuval Mintz 9230b55e27dSYuval Mintz val = qed_rd(p_hwfn, p_ptt, addr); 9240b55e27dSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); 9250b55e27dSYuval Mintz qed_wr(p_hwfn, p_ptt, addr, val); 9260b55e27dSYuval Mintz 927d749dd0dSMintz, Yuval p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE; 928726fdbe9SMintz, Yuval p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; 9290b55e27dSYuval Mintz } 9300b55e27dSYuval Mintz 9310b55e27dSYuval Mintz vf->num_sbs = 0; 9320b55e27dSYuval Mintz } 9330b55e27dSYuval Mintz 93433b2fbd0SMintz, Yuval static void qed_iov_set_link(struct qed_hwfn *p_hwfn, 93533b2fbd0SMintz, Yuval u16 vfid, 93633b2fbd0SMintz, Yuval struct qed_mcp_link_params *params, 93733b2fbd0SMintz, Yuval struct qed_mcp_link_state *link, 93833b2fbd0SMintz, Yuval struct qed_mcp_link_capabilities *p_caps) 93933b2fbd0SMintz, Yuval { 94033b2fbd0SMintz, Yuval struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 94133b2fbd0SMintz, Yuval vfid, 94233b2fbd0SMintz, Yuval false); 94333b2fbd0SMintz, Yuval struct qed_bulletin_content *p_bulletin; 94433b2fbd0SMintz, Yuval 94533b2fbd0SMintz, Yuval if (!p_vf) 94633b2fbd0SMintz, Yuval return; 94733b2fbd0SMintz, Yuval 94833b2fbd0SMintz, Yuval p_bulletin = p_vf->bulletin.p_virt; 94933b2fbd0SMintz, Yuval p_bulletin->req_autoneg = params->speed.autoneg; 95033b2fbd0SMintz, Yuval p_bulletin->req_adv_speed = params->speed.advertised_speeds; 95133b2fbd0SMintz, Yuval p_bulletin->req_forced_speed = params->speed.forced_speed; 95233b2fbd0SMintz, Yuval p_bulletin->req_autoneg_pause = params->pause.autoneg; 95333b2fbd0SMintz, Yuval p_bulletin->req_forced_rx = params->pause.forced_rx; 95433b2fbd0SMintz, Yuval p_bulletin->req_forced_tx = params->pause.forced_tx; 95533b2fbd0SMintz, Yuval p_bulletin->req_loopback = params->loopback_mode; 95633b2fbd0SMintz, Yuval 95733b2fbd0SMintz, Yuval p_bulletin->link_up = link->link_up; 95833b2fbd0SMintz, Yuval p_bulletin->speed = link->speed; 95933b2fbd0SMintz, Yuval p_bulletin->full_duplex = link->full_duplex; 96033b2fbd0SMintz, Yuval p_bulletin->autoneg = link->an; 96133b2fbd0SMintz, Yuval p_bulletin->autoneg_complete = link->an_complete; 96233b2fbd0SMintz, Yuval p_bulletin->parallel_detection = link->parallel_detection; 96333b2fbd0SMintz, Yuval p_bulletin->pfc_enabled = link->pfc_enabled; 96433b2fbd0SMintz, Yuval p_bulletin->partner_adv_speed = link->partner_adv_speed; 96533b2fbd0SMintz, Yuval p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; 96633b2fbd0SMintz, Yuval p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; 96733b2fbd0SMintz, Yuval p_bulletin->partner_adv_pause = link->partner_adv_pause; 96833b2fbd0SMintz, Yuval p_bulletin->sfp_tx_fault = link->sfp_tx_fault; 96933b2fbd0SMintz, Yuval 97033b2fbd0SMintz, Yuval p_bulletin->capability_speed = p_caps->speed_capabilities; 97133b2fbd0SMintz, Yuval } 97233b2fbd0SMintz, Yuval 9731408cc1fSYuval Mintz static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, 9741408cc1fSYuval Mintz struct qed_ptt *p_ptt, 9753da7a37aSMintz, Yuval struct qed_iov_vf_init_params *p_params) 9761408cc1fSYuval Mintz { 97733b2fbd0SMintz, Yuval struct qed_mcp_link_capabilities link_caps; 97833b2fbd0SMintz, Yuval struct qed_mcp_link_params link_params; 97933b2fbd0SMintz, Yuval struct qed_mcp_link_state link_state; 9801408cc1fSYuval Mintz u8 num_of_vf_avaiable_chains = 0; 9811408cc1fSYuval Mintz struct qed_vf_info *vf = NULL; 9823da7a37aSMintz, Yuval u16 qid, num_irqs; 9831408cc1fSYuval Mintz int rc = 0; 9841408cc1fSYuval Mintz u32 cids; 9851408cc1fSYuval Mintz u8 i; 9861408cc1fSYuval Mintz 9873da7a37aSMintz, Yuval vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); 9881408cc1fSYuval Mintz if (!vf) { 9891408cc1fSYuval Mintz DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); 9901408cc1fSYuval Mintz return -EINVAL; 9911408cc1fSYuval Mintz } 9921408cc1fSYuval Mintz 9931408cc1fSYuval Mintz if (vf->b_init) { 9943da7a37aSMintz, Yuval DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", 9953da7a37aSMintz, Yuval p_params->rel_vf_id); 9961408cc1fSYuval Mintz return -EINVAL; 9971408cc1fSYuval Mintz } 9981408cc1fSYuval Mintz 9993da7a37aSMintz, Yuval /* Perform sanity checking on the requested queue_id */ 10003da7a37aSMintz, Yuval for (i = 0; i < p_params->num_queues; i++) { 10013da7a37aSMintz, Yuval u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE); 10023da7a37aSMintz, Yuval u16 max_vf_qzone = min_vf_qzone + 10033da7a37aSMintz, Yuval FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1; 10043da7a37aSMintz, Yuval 10053da7a37aSMintz, Yuval qid = p_params->req_rx_queue[i]; 10063da7a37aSMintz, Yuval if (qid < min_vf_qzone || qid > max_vf_qzone) { 10073da7a37aSMintz, Yuval DP_NOTICE(p_hwfn, 10083da7a37aSMintz, Yuval "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n", 10093da7a37aSMintz, Yuval qid, 10103da7a37aSMintz, Yuval p_params->rel_vf_id, 10113da7a37aSMintz, Yuval min_vf_qzone, max_vf_qzone); 10123da7a37aSMintz, Yuval return -EINVAL; 10133da7a37aSMintz, Yuval } 10143da7a37aSMintz, Yuval 10153da7a37aSMintz, Yuval qid = p_params->req_tx_queue[i]; 10163da7a37aSMintz, Yuval if (qid > max_vf_qzone) { 10173da7a37aSMintz, Yuval DP_NOTICE(p_hwfn, 10183da7a37aSMintz, Yuval "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n", 10193da7a37aSMintz, Yuval qid, p_params->rel_vf_id, max_vf_qzone); 10203da7a37aSMintz, Yuval return -EINVAL; 10213da7a37aSMintz, Yuval } 10223da7a37aSMintz, Yuval 10233da7a37aSMintz, Yuval /* If client *really* wants, Tx qid can be shared with PF */ 10243da7a37aSMintz, Yuval if (qid < min_vf_qzone) 10253da7a37aSMintz, Yuval DP_VERBOSE(p_hwfn, 10263da7a37aSMintz, Yuval QED_MSG_IOV, 10273da7a37aSMintz, Yuval "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n", 10283da7a37aSMintz, Yuval p_params->rel_vf_id, qid, i); 10293da7a37aSMintz, Yuval } 10303da7a37aSMintz, Yuval 10311408cc1fSYuval Mintz /* Limit number of queues according to number of CIDs */ 10321408cc1fSYuval Mintz qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); 10331408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, 10341408cc1fSYuval Mintz QED_MSG_IOV, 10351408cc1fSYuval Mintz "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", 10363da7a37aSMintz, Yuval vf->relative_vf_id, p_params->num_queues, (u16)cids); 10373da7a37aSMintz, Yuval num_irqs = min_t(u16, p_params->num_queues, ((u16)cids)); 10381408cc1fSYuval Mintz 10391408cc1fSYuval Mintz num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, 10401408cc1fSYuval Mintz p_ptt, 10413da7a37aSMintz, Yuval vf, num_irqs); 10421408cc1fSYuval Mintz if (!num_of_vf_avaiable_chains) { 10431408cc1fSYuval Mintz DP_ERR(p_hwfn, "no available igu sbs\n"); 10441408cc1fSYuval Mintz return -ENOMEM; 10451408cc1fSYuval Mintz } 10461408cc1fSYuval Mintz 10471408cc1fSYuval Mintz /* Choose queue number and index ranges */ 10481408cc1fSYuval Mintz vf->num_rxqs = num_of_vf_avaiable_chains; 10491408cc1fSYuval Mintz vf->num_txqs = num_of_vf_avaiable_chains; 10501408cc1fSYuval Mintz 10511408cc1fSYuval Mintz for (i = 0; i < vf->num_rxqs; i++) { 1052007bc371SMintz, Yuval struct qed_vf_queue *p_queue = &vf->vf_queues[i]; 10531408cc1fSYuval Mintz 10543da7a37aSMintz, Yuval p_queue->fw_rx_qid = p_params->req_rx_queue[i]; 10553da7a37aSMintz, Yuval p_queue->fw_tx_qid = p_params->req_tx_queue[i]; 10561408cc1fSYuval Mintz 10571408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1058007bc371SMintz, Yuval "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n", 1059007bc371SMintz, Yuval vf->relative_vf_id, i, vf->igu_sbs[i], 1060007bc371SMintz, Yuval p_queue->fw_rx_qid, p_queue->fw_tx_qid); 10611408cc1fSYuval Mintz } 10623da7a37aSMintz, Yuval 106333b2fbd0SMintz, Yuval /* Update the link configuration in bulletin */ 106433b2fbd0SMintz, Yuval memcpy(&link_params, qed_mcp_get_link_params(p_hwfn), 106533b2fbd0SMintz, Yuval sizeof(link_params)); 106633b2fbd0SMintz, Yuval memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state)); 106733b2fbd0SMintz, Yuval memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn), 106833b2fbd0SMintz, Yuval sizeof(link_caps)); 106933b2fbd0SMintz, Yuval qed_iov_set_link(p_hwfn, p_params->rel_vf_id, 107033b2fbd0SMintz, Yuval &link_params, &link_state, &link_caps); 107133b2fbd0SMintz, Yuval 10721408cc1fSYuval Mintz rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); 10731408cc1fSYuval Mintz if (!rc) { 10741408cc1fSYuval Mintz vf->b_init = true; 10751408cc1fSYuval Mintz 10761408cc1fSYuval Mintz if (IS_LEAD_HWFN(p_hwfn)) 10771408cc1fSYuval Mintz p_hwfn->cdev->p_iov_info->num_vfs++; 10781408cc1fSYuval Mintz } 10791408cc1fSYuval Mintz 10801408cc1fSYuval Mintz return rc; 10811408cc1fSYuval Mintz } 10821408cc1fSYuval Mintz 10830b55e27dSYuval Mintz static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, 10840b55e27dSYuval Mintz struct qed_ptt *p_ptt, u16 rel_vf_id) 10850b55e27dSYuval Mintz { 1086079d20a6SManish Chopra struct qed_mcp_link_capabilities caps; 1087079d20a6SManish Chopra struct qed_mcp_link_params params; 1088079d20a6SManish Chopra struct qed_mcp_link_state link; 10890b55e27dSYuval Mintz struct qed_vf_info *vf = NULL; 10900b55e27dSYuval Mintz 10910b55e27dSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 10920b55e27dSYuval Mintz if (!vf) { 10930b55e27dSYuval Mintz DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); 10940b55e27dSYuval Mintz return -EINVAL; 10950b55e27dSYuval Mintz } 10960b55e27dSYuval Mintz 109736558c3dSYuval Mintz if (vf->bulletin.p_virt) 109836558c3dSYuval Mintz memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); 109936558c3dSYuval Mintz 110036558c3dSYuval Mintz memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); 110136558c3dSYuval Mintz 1102079d20a6SManish Chopra /* Get the link configuration back in bulletin so 1103079d20a6SManish Chopra * that when VFs are re-enabled they get the actual 1104079d20a6SManish Chopra * link configuration. 1105079d20a6SManish Chopra */ 1106079d20a6SManish Chopra memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params)); 1107079d20a6SManish Chopra memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link)); 1108079d20a6SManish Chopra memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 1109079d20a6SManish Chopra qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); 1110079d20a6SManish Chopra 11111fe614d1SYuval Mintz /* Forget the VF's acquisition message */ 11121fe614d1SYuval Mintz memset(&vf->acquire, 0, sizeof(vf->acquire)); 11130b55e27dSYuval Mintz 11140b55e27dSYuval Mintz /* disablng interrupts and resetting permission table was done during 11150b55e27dSYuval Mintz * vf-close, however, we could get here without going through vf_close 11160b55e27dSYuval Mintz */ 11170b55e27dSYuval Mintz /* Disable Interrupts for VF */ 11180b55e27dSYuval Mintz qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 11190b55e27dSYuval Mintz 11200b55e27dSYuval Mintz /* Reset Permission table */ 11210b55e27dSYuval Mintz qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 11220b55e27dSYuval Mintz 11230b55e27dSYuval Mintz vf->num_rxqs = 0; 11240b55e27dSYuval Mintz vf->num_txqs = 0; 11250b55e27dSYuval Mintz qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); 11260b55e27dSYuval Mintz 11270b55e27dSYuval Mintz if (vf->b_init) { 11280b55e27dSYuval Mintz vf->b_init = false; 11290b55e27dSYuval Mintz 11300b55e27dSYuval Mintz if (IS_LEAD_HWFN(p_hwfn)) 11310b55e27dSYuval Mintz p_hwfn->cdev->p_iov_info->num_vfs--; 11320b55e27dSYuval Mintz } 11330b55e27dSYuval Mintz 11340b55e27dSYuval Mintz return 0; 11350b55e27dSYuval Mintz } 11360b55e27dSYuval Mintz 113737bff2b9SYuval Mintz static bool qed_iov_tlv_supported(u16 tlvtype) 113837bff2b9SYuval Mintz { 113937bff2b9SYuval Mintz return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; 114037bff2b9SYuval Mintz } 114137bff2b9SYuval Mintz 114237bff2b9SYuval Mintz /* place a given tlv on the tlv buffer, continuing current tlv list */ 114337bff2b9SYuval Mintz void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) 114437bff2b9SYuval Mintz { 114537bff2b9SYuval Mintz struct channel_tlv *tl = (struct channel_tlv *)*offset; 114637bff2b9SYuval Mintz 114737bff2b9SYuval Mintz tl->type = type; 114837bff2b9SYuval Mintz tl->length = length; 114937bff2b9SYuval Mintz 115037bff2b9SYuval Mintz /* Offset should keep pointing to next TLV (the end of the last) */ 115137bff2b9SYuval Mintz *offset += length; 115237bff2b9SYuval Mintz 115337bff2b9SYuval Mintz /* Return a pointer to the start of the added tlv */ 115437bff2b9SYuval Mintz return *offset - length; 115537bff2b9SYuval Mintz } 115637bff2b9SYuval Mintz 115737bff2b9SYuval Mintz /* list the types and lengths of the tlvs on the buffer */ 115837bff2b9SYuval Mintz void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list) 115937bff2b9SYuval Mintz { 116037bff2b9SYuval Mintz u16 i = 1, total_length = 0; 116137bff2b9SYuval Mintz struct channel_tlv *tlv; 116237bff2b9SYuval Mintz 116337bff2b9SYuval Mintz do { 116437bff2b9SYuval Mintz tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); 116537bff2b9SYuval Mintz 116637bff2b9SYuval Mintz /* output tlv */ 116737bff2b9SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 116837bff2b9SYuval Mintz "TLV number %d: type %d, length %d\n", 116937bff2b9SYuval Mintz i, tlv->type, tlv->length); 117037bff2b9SYuval Mintz 117137bff2b9SYuval Mintz if (tlv->type == CHANNEL_TLV_LIST_END) 117237bff2b9SYuval Mintz return; 117337bff2b9SYuval Mintz 117437bff2b9SYuval Mintz /* Validate entry - protect against malicious VFs */ 117537bff2b9SYuval Mintz if (!tlv->length) { 117637bff2b9SYuval Mintz DP_NOTICE(p_hwfn, "TLV of length 0 found\n"); 117737bff2b9SYuval Mintz return; 117837bff2b9SYuval Mintz } 117937bff2b9SYuval Mintz 118037bff2b9SYuval Mintz total_length += tlv->length; 118137bff2b9SYuval Mintz 118237bff2b9SYuval Mintz if (total_length >= sizeof(struct tlv_buffer_size)) { 118337bff2b9SYuval Mintz DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n"); 118437bff2b9SYuval Mintz return; 118537bff2b9SYuval Mintz } 118637bff2b9SYuval Mintz 118737bff2b9SYuval Mintz i++; 118837bff2b9SYuval Mintz } while (1); 118937bff2b9SYuval Mintz } 119037bff2b9SYuval Mintz 119137bff2b9SYuval Mintz static void qed_iov_send_response(struct qed_hwfn *p_hwfn, 119237bff2b9SYuval Mintz struct qed_ptt *p_ptt, 119337bff2b9SYuval Mintz struct qed_vf_info *p_vf, 119437bff2b9SYuval Mintz u16 length, u8 status) 119537bff2b9SYuval Mintz { 119637bff2b9SYuval Mintz struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 119737bff2b9SYuval Mintz struct qed_dmae_params params; 119837bff2b9SYuval Mintz u8 eng_vf_id; 119937bff2b9SYuval Mintz 120037bff2b9SYuval Mintz mbx->reply_virt->default_resp.hdr.status = status; 120137bff2b9SYuval Mintz 120237bff2b9SYuval Mintz qed_dp_tlv_list(p_hwfn, mbx->reply_virt); 120337bff2b9SYuval Mintz 120437bff2b9SYuval Mintz eng_vf_id = p_vf->abs_vf_id; 120537bff2b9SYuval Mintz 1206804c5702SMichal Kalderon memset(¶ms, 0, sizeof(params)); 1207804c5702SMichal Kalderon SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1); 120837bff2b9SYuval Mintz params.dst_vfid = eng_vf_id; 120937bff2b9SYuval Mintz 121037bff2b9SYuval Mintz qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), 121137bff2b9SYuval Mintz mbx->req_virt->first_tlv.reply_address + 121237bff2b9SYuval Mintz sizeof(u64), 121337bff2b9SYuval Mintz (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, 121437bff2b9SYuval Mintz ¶ms); 121537bff2b9SYuval Mintz 1216d9194081SMintz, Yuval /* Once PF copies the rc to the VF, the latter can continue 1217d9194081SMintz, Yuval * and send an additional message. So we have to make sure the 1218d9194081SMintz, Yuval * channel would be re-set to ready prior to that. 1219d9194081SMintz, Yuval */ 122037bff2b9SYuval Mintz REG_WR(p_hwfn, 122137bff2b9SYuval Mintz GTT_BAR0_MAP_REG_USDM_RAM + 122237bff2b9SYuval Mintz USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); 1223d9194081SMintz, Yuval 1224d9194081SMintz, Yuval qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, 1225d9194081SMintz, Yuval mbx->req_virt->first_tlv.reply_address, 1226d9194081SMintz, Yuval sizeof(u64) / 4, ¶ms); 122737bff2b9SYuval Mintz } 122837bff2b9SYuval Mintz 1229dacd88d6SYuval Mintz static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, 1230dacd88d6SYuval Mintz enum qed_iov_vport_update_flag flag) 1231dacd88d6SYuval Mintz { 1232dacd88d6SYuval Mintz switch (flag) { 1233dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_ACTIVATE: 1234dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 123517b235c1SYuval Mintz case QED_IOV_VP_UPDATE_VLAN_STRIP: 123617b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 123717b235c1SYuval Mintz case QED_IOV_VP_UPDATE_TX_SWITCH: 123817b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 1239dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_MCAST: 1240dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_MCAST; 1241dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_ACCEPT_PARAM: 1242dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 1243dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_RSS: 1244dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_RSS; 124517b235c1SYuval Mintz case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: 124617b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 124717b235c1SYuval Mintz case QED_IOV_VP_UPDATE_SGE_TPA: 124817b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 1249dacd88d6SYuval Mintz default: 1250dacd88d6SYuval Mintz return 0; 1251dacd88d6SYuval Mintz } 1252dacd88d6SYuval Mintz } 1253dacd88d6SYuval Mintz 1254dacd88d6SYuval Mintz static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, 1255dacd88d6SYuval Mintz struct qed_vf_info *p_vf, 1256dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, 1257dacd88d6SYuval Mintz u8 status, 1258dacd88d6SYuval Mintz u16 tlvs_mask, u16 tlvs_accepted) 1259dacd88d6SYuval Mintz { 1260dacd88d6SYuval Mintz struct pfvf_def_resp_tlv *resp; 1261dacd88d6SYuval Mintz u16 size, total_len, i; 1262dacd88d6SYuval Mintz 1263dacd88d6SYuval Mintz memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); 1264dacd88d6SYuval Mintz p_mbx->offset = (u8 *)p_mbx->reply_virt; 1265dacd88d6SYuval Mintz size = sizeof(struct pfvf_def_resp_tlv); 1266dacd88d6SYuval Mintz total_len = size; 1267dacd88d6SYuval Mintz 1268dacd88d6SYuval Mintz qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); 1269dacd88d6SYuval Mintz 1270dacd88d6SYuval Mintz /* Prepare response for all extended tlvs if they are found by PF */ 1271dacd88d6SYuval Mintz for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { 12721a635e48SYuval Mintz if (!(tlvs_mask & BIT(i))) 1273dacd88d6SYuval Mintz continue; 1274dacd88d6SYuval Mintz 1275dacd88d6SYuval Mintz resp = qed_add_tlv(p_hwfn, &p_mbx->offset, 1276dacd88d6SYuval Mintz qed_iov_vport_to_tlv(p_hwfn, i), size); 1277dacd88d6SYuval Mintz 12781a635e48SYuval Mintz if (tlvs_accepted & BIT(i)) 1279dacd88d6SYuval Mintz resp->hdr.status = status; 1280dacd88d6SYuval Mintz else 1281dacd88d6SYuval Mintz resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; 1282dacd88d6SYuval Mintz 1283dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, 1284dacd88d6SYuval Mintz QED_MSG_IOV, 1285dacd88d6SYuval Mintz "VF[%d] - vport_update response: TLV %d, status %02x\n", 1286dacd88d6SYuval Mintz p_vf->relative_vf_id, 1287dacd88d6SYuval Mintz qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); 1288dacd88d6SYuval Mintz 1289dacd88d6SYuval Mintz total_len += size; 1290dacd88d6SYuval Mintz } 1291dacd88d6SYuval Mintz 1292dacd88d6SYuval Mintz qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, 1293dacd88d6SYuval Mintz sizeof(struct channel_list_end_tlv)); 1294dacd88d6SYuval Mintz 1295dacd88d6SYuval Mintz return total_len; 1296dacd88d6SYuval Mintz } 1297dacd88d6SYuval Mintz 129837bff2b9SYuval Mintz static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, 129937bff2b9SYuval Mintz struct qed_ptt *p_ptt, 130037bff2b9SYuval Mintz struct qed_vf_info *vf_info, 130137bff2b9SYuval Mintz u16 type, u16 length, u8 status) 130237bff2b9SYuval Mintz { 130337bff2b9SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; 130437bff2b9SYuval Mintz 130537bff2b9SYuval Mintz mbx->offset = (u8 *)mbx->reply_virt; 130637bff2b9SYuval Mintz 130737bff2b9SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, type, length); 130837bff2b9SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 130937bff2b9SYuval Mintz sizeof(struct channel_list_end_tlv)); 131037bff2b9SYuval Mintz 131137bff2b9SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); 131237bff2b9SYuval Mintz } 131337bff2b9SYuval Mintz 1314ba56947aSBaoyou Xie static struct 1315ba56947aSBaoyou Xie qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn, 13160b55e27dSYuval Mintz u16 relative_vf_id, 13170b55e27dSYuval Mintz bool b_enabled_only) 13180b55e27dSYuval Mintz { 13190b55e27dSYuval Mintz struct qed_vf_info *vf = NULL; 13200b55e27dSYuval Mintz 13210b55e27dSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); 13220b55e27dSYuval Mintz if (!vf) 13230b55e27dSYuval Mintz return NULL; 13240b55e27dSYuval Mintz 13250b55e27dSYuval Mintz return &vf->p_vf_info; 13260b55e27dSYuval Mintz } 13270b55e27dSYuval Mintz 1328ba56947aSBaoyou Xie static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) 13290b55e27dSYuval Mintz { 13300b55e27dSYuval Mintz struct qed_public_vf_info *vf_info; 13310b55e27dSYuval Mintz 13320b55e27dSYuval Mintz vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false); 13330b55e27dSYuval Mintz 13340b55e27dSYuval Mintz if (!vf_info) 13350b55e27dSYuval Mintz return; 13360b55e27dSYuval Mintz 13370b55e27dSYuval Mintz /* Clear the VF mac */ 13380ee28e31SShyam Saini eth_zero_addr(vf_info->mac); 1339f990c82cSMintz, Yuval 1340f990c82cSMintz, Yuval vf_info->rx_accept_mode = 0; 1341f990c82cSMintz, Yuval vf_info->tx_accept_mode = 0; 13420b55e27dSYuval Mintz } 13430b55e27dSYuval Mintz 13440b55e27dSYuval Mintz static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, 13450b55e27dSYuval Mintz struct qed_vf_info *p_vf) 13460b55e27dSYuval Mintz { 1347007bc371SMintz, Yuval u32 i, j; 13480b55e27dSYuval Mintz 13490b55e27dSYuval Mintz p_vf->vf_bulletin = 0; 1350dacd88d6SYuval Mintz p_vf->vport_instance = 0; 135108feecd7SYuval Mintz p_vf->configured_features = 0; 13520b55e27dSYuval Mintz 13530b55e27dSYuval Mintz /* If VF previously requested less resources, go back to default */ 13540b55e27dSYuval Mintz p_vf->num_rxqs = p_vf->num_sbs; 13550b55e27dSYuval Mintz p_vf->num_txqs = p_vf->num_sbs; 13560b55e27dSYuval Mintz 1357dacd88d6SYuval Mintz p_vf->num_active_rxqs = 0; 1358dacd88d6SYuval Mintz 13593da7a37aSMintz, Yuval for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { 1360007bc371SMintz, Yuval struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; 13613da7a37aSMintz, Yuval 1362007bc371SMintz, Yuval for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) { 1363007bc371SMintz, Yuval if (!p_queue->cids[j].p_cid) 1364007bc371SMintz, Yuval continue; 13653da7a37aSMintz, Yuval 1366007bc371SMintz, Yuval qed_eth_queue_cid_release(p_hwfn, 1367007bc371SMintz, Yuval p_queue->cids[j].p_cid); 1368007bc371SMintz, Yuval p_queue->cids[j].p_cid = NULL; 13693da7a37aSMintz, Yuval } 13703da7a37aSMintz, Yuval } 13710b55e27dSYuval Mintz 137208feecd7SYuval Mintz memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); 13731fe614d1SYuval Mintz memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); 13740b55e27dSYuval Mintz qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); 13750b55e27dSYuval Mintz } 13760b55e27dSYuval Mintz 13771a850bfcSMintz, Yuval /* Returns either 0, or log(size) */ 13781a850bfcSMintz, Yuval static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn, 13791a850bfcSMintz, Yuval struct qed_ptt *p_ptt) 13801a850bfcSMintz, Yuval { 13811a850bfcSMintz, Yuval u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE); 13821a850bfcSMintz, Yuval 13831a850bfcSMintz, Yuval if (val) 13841a850bfcSMintz, Yuval return val + 11; 13851a850bfcSMintz, Yuval return 0; 13861a850bfcSMintz, Yuval } 13871a850bfcSMintz, Yuval 13881a850bfcSMintz, Yuval static void 13891a850bfcSMintz, Yuval qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn, 13901a850bfcSMintz, Yuval struct qed_ptt *p_ptt, 13911a850bfcSMintz, Yuval struct qed_vf_info *p_vf, 13921a850bfcSMintz, Yuval struct vf_pf_resc_request *p_req, 13931a850bfcSMintz, Yuval struct pf_vf_resc *p_resp) 13941a850bfcSMintz, Yuval { 13951a850bfcSMintz, Yuval u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons; 13961a850bfcSMintz, Yuval u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) - 13971a850bfcSMintz, Yuval qed_db_addr_vf(0, DQ_DEMS_LEGACY); 13981a850bfcSMintz, Yuval u32 bar_size; 13991a850bfcSMintz, Yuval 14001a850bfcSMintz, Yuval p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons); 14011a850bfcSMintz, Yuval 14021a850bfcSMintz, Yuval /* If VF didn't bother asking for QIDs than don't bother limiting 14031a850bfcSMintz, Yuval * number of CIDs. The VF doesn't care about the number, and this 14041a850bfcSMintz, Yuval * has the likely result of causing an additional acquisition. 14051a850bfcSMintz, Yuval */ 14061a850bfcSMintz, Yuval if (!(p_vf->acquire.vfdev_info.capabilities & 14071a850bfcSMintz, Yuval VFPF_ACQUIRE_CAP_QUEUE_QIDS)) 14081a850bfcSMintz, Yuval return; 14091a850bfcSMintz, Yuval 14101a850bfcSMintz, Yuval /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount 14111a850bfcSMintz, Yuval * that would make sure doorbells for all CIDs fall within the bar. 14121a850bfcSMintz, Yuval * If it doesn't, make sure regview window is sufficient. 14131a850bfcSMintz, Yuval */ 14141a850bfcSMintz, Yuval if (p_vf->acquire.vfdev_info.capabilities & 14151a850bfcSMintz, Yuval VFPF_ACQUIRE_CAP_PHYSICAL_BAR) { 14161a850bfcSMintz, Yuval bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); 14171a850bfcSMintz, Yuval if (bar_size) 14181a850bfcSMintz, Yuval bar_size = 1 << bar_size; 14191a850bfcSMintz, Yuval 14201a850bfcSMintz, Yuval if (p_hwfn->cdev->num_hwfns > 1) 14211a850bfcSMintz, Yuval bar_size /= 2; 14221a850bfcSMintz, Yuval } else { 14231a850bfcSMintz, Yuval bar_size = PXP_VF_BAR0_DQ_LENGTH; 14241a850bfcSMintz, Yuval } 14251a850bfcSMintz, Yuval 14261a850bfcSMintz, Yuval if (bar_size / db_size < 256) 14271a850bfcSMintz, Yuval p_resp->num_cids = min_t(u8, p_resp->num_cids, 14281a850bfcSMintz, Yuval (u8)(bar_size / db_size)); 14291a850bfcSMintz, Yuval } 14301a850bfcSMintz, Yuval 14311cf2b1a9SYuval Mintz static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, 14321cf2b1a9SYuval Mintz struct qed_ptt *p_ptt, 14331cf2b1a9SYuval Mintz struct qed_vf_info *p_vf, 14341cf2b1a9SYuval Mintz struct vf_pf_resc_request *p_req, 14351cf2b1a9SYuval Mintz struct pf_vf_resc *p_resp) 14361cf2b1a9SYuval Mintz { 1437007bc371SMintz, Yuval u8 i; 14381cf2b1a9SYuval Mintz 14391cf2b1a9SYuval Mintz /* Queue related information */ 14401cf2b1a9SYuval Mintz p_resp->num_rxqs = p_vf->num_rxqs; 14411cf2b1a9SYuval Mintz p_resp->num_txqs = p_vf->num_txqs; 14421cf2b1a9SYuval Mintz p_resp->num_sbs = p_vf->num_sbs; 14431cf2b1a9SYuval Mintz 14441cf2b1a9SYuval Mintz for (i = 0; i < p_resp->num_sbs; i++) { 14451cf2b1a9SYuval Mintz p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; 14461cf2b1a9SYuval Mintz p_resp->hw_sbs[i].sb_qid = 0; 14471cf2b1a9SYuval Mintz } 14481cf2b1a9SYuval Mintz 14491cf2b1a9SYuval Mintz /* These fields are filled for backward compatibility. 14501cf2b1a9SYuval Mintz * Unused by modern vfs. 14511cf2b1a9SYuval Mintz */ 14521cf2b1a9SYuval Mintz for (i = 0; i < p_resp->num_rxqs; i++) { 14531cf2b1a9SYuval Mintz qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, 14541cf2b1a9SYuval Mintz (u16 *)&p_resp->hw_qid[i]); 1455007bc371SMintz, Yuval p_resp->cid[i] = i; 14561cf2b1a9SYuval Mintz } 14571cf2b1a9SYuval Mintz 14581cf2b1a9SYuval Mintz /* Filter related information */ 14591cf2b1a9SYuval Mintz p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, 14601cf2b1a9SYuval Mintz p_req->num_mac_filters); 14611cf2b1a9SYuval Mintz p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, 14621cf2b1a9SYuval Mintz p_req->num_vlan_filters); 14631cf2b1a9SYuval Mintz 14641a850bfcSMintz, Yuval qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp); 146508bc8f15SMintz, Yuval 14661cf2b1a9SYuval Mintz /* This isn't really needed/enforced, but some legacy VFs might depend 14671cf2b1a9SYuval Mintz * on the correct filling of this field. 14681cf2b1a9SYuval Mintz */ 14691cf2b1a9SYuval Mintz p_resp->num_mc_filters = QED_MAX_MC_ADDRS; 14701cf2b1a9SYuval Mintz 14711cf2b1a9SYuval Mintz /* Validate sufficient resources for VF */ 14721cf2b1a9SYuval Mintz if (p_resp->num_rxqs < p_req->num_rxqs || 14731cf2b1a9SYuval Mintz p_resp->num_txqs < p_req->num_txqs || 14741cf2b1a9SYuval Mintz p_resp->num_sbs < p_req->num_sbs || 14751cf2b1a9SYuval Mintz p_resp->num_mac_filters < p_req->num_mac_filters || 14761cf2b1a9SYuval Mintz p_resp->num_vlan_filters < p_req->num_vlan_filters || 147708bc8f15SMintz, Yuval p_resp->num_mc_filters < p_req->num_mc_filters || 147808bc8f15SMintz, Yuval p_resp->num_cids < p_req->num_cids) { 14791cf2b1a9SYuval Mintz DP_VERBOSE(p_hwfn, 14801cf2b1a9SYuval Mintz QED_MSG_IOV, 148108bc8f15SMintz, Yuval "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n", 14821cf2b1a9SYuval Mintz p_vf->abs_vf_id, 14831cf2b1a9SYuval Mintz p_req->num_rxqs, 14841cf2b1a9SYuval Mintz p_resp->num_rxqs, 14851cf2b1a9SYuval Mintz p_req->num_rxqs, 14861cf2b1a9SYuval Mintz p_resp->num_txqs, 14871cf2b1a9SYuval Mintz p_req->num_sbs, 14881cf2b1a9SYuval Mintz p_resp->num_sbs, 14891cf2b1a9SYuval Mintz p_req->num_mac_filters, 14901cf2b1a9SYuval Mintz p_resp->num_mac_filters, 14911cf2b1a9SYuval Mintz p_req->num_vlan_filters, 14921cf2b1a9SYuval Mintz p_resp->num_vlan_filters, 149308bc8f15SMintz, Yuval p_req->num_mc_filters, 149408bc8f15SMintz, Yuval p_resp->num_mc_filters, 149508bc8f15SMintz, Yuval p_req->num_cids, p_resp->num_cids); 1496a044df83SYuval Mintz 1497a044df83SYuval Mintz /* Some legacy OSes are incapable of correctly handling this 1498a044df83SYuval Mintz * failure. 1499a044df83SYuval Mintz */ 1500a044df83SYuval Mintz if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 1501a044df83SYuval Mintz ETH_HSI_VER_NO_PKT_LEN_TUNN) && 1502a044df83SYuval Mintz (p_vf->acquire.vfdev_info.os_type == 1503a044df83SYuval Mintz VFPF_ACQUIRE_OS_WINDOWS)) 1504a044df83SYuval Mintz return PFVF_STATUS_SUCCESS; 1505a044df83SYuval Mintz 15061cf2b1a9SYuval Mintz return PFVF_STATUS_NO_RESOURCE; 15071cf2b1a9SYuval Mintz } 15081cf2b1a9SYuval Mintz 15091cf2b1a9SYuval Mintz return PFVF_STATUS_SUCCESS; 15101cf2b1a9SYuval Mintz } 15111cf2b1a9SYuval Mintz 15121cf2b1a9SYuval Mintz static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn, 15131cf2b1a9SYuval Mintz struct pfvf_stats_info *p_stats) 15141cf2b1a9SYuval Mintz { 15151cf2b1a9SYuval Mintz p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + 15161cf2b1a9SYuval Mintz offsetof(struct mstorm_vf_zone, 15171cf2b1a9SYuval Mintz non_trigger.eth_queue_stat); 15181cf2b1a9SYuval Mintz p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); 15191cf2b1a9SYuval Mintz p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + 15201cf2b1a9SYuval Mintz offsetof(struct ustorm_vf_zone, 15211cf2b1a9SYuval Mintz non_trigger.eth_queue_stat); 15221cf2b1a9SYuval Mintz p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); 15231cf2b1a9SYuval Mintz p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + 15241cf2b1a9SYuval Mintz offsetof(struct pstorm_vf_zone, 15251cf2b1a9SYuval Mintz non_trigger.eth_queue_stat); 15261cf2b1a9SYuval Mintz p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); 15271cf2b1a9SYuval Mintz p_stats->tstats.address = 0; 15281cf2b1a9SYuval Mintz p_stats->tstats.len = 0; 15291cf2b1a9SYuval Mintz } 15301cf2b1a9SYuval Mintz 15311408cc1fSYuval Mintz static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, 153237bff2b9SYuval Mintz struct qed_ptt *p_ptt, 15331408cc1fSYuval Mintz struct qed_vf_info *vf) 153437bff2b9SYuval Mintz { 15351408cc1fSYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 15361408cc1fSYuval Mintz struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; 15371408cc1fSYuval Mintz struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 15381408cc1fSYuval Mintz struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; 15391cf2b1a9SYuval Mintz u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; 15401408cc1fSYuval Mintz struct pf_vf_resc *resc = &resp->resc; 15411fe614d1SYuval Mintz int rc; 15421fe614d1SYuval Mintz 15431fe614d1SYuval Mintz memset(resp, 0, sizeof(*resp)); 15441408cc1fSYuval Mintz 154505fafbfbSYuval Mintz /* Write the PF version so that VF would know which version 154605fafbfbSYuval Mintz * is supported - might be later overriden. This guarantees that 154705fafbfbSYuval Mintz * VF could recognize legacy PF based on lack of versions in reply. 154805fafbfbSYuval Mintz */ 154905fafbfbSYuval Mintz pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; 155005fafbfbSYuval Mintz pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; 155105fafbfbSYuval Mintz 1552a044df83SYuval Mintz if (vf->state != VF_FREE && vf->state != VF_STOPPED) { 1553a044df83SYuval Mintz DP_VERBOSE(p_hwfn, 1554a044df83SYuval Mintz QED_MSG_IOV, 1555a044df83SYuval Mintz "VF[%d] sent ACQUIRE but is already in state %d - fail request\n", 1556a044df83SYuval Mintz vf->abs_vf_id, vf->state); 1557a044df83SYuval Mintz goto out; 1558a044df83SYuval Mintz } 1559a044df83SYuval Mintz 15601408cc1fSYuval Mintz /* Validate FW compatibility */ 15611fe614d1SYuval Mintz if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { 1562a044df83SYuval Mintz if (req->vfdev_info.capabilities & 1563a044df83SYuval Mintz VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 1564a044df83SYuval Mintz struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; 1565a044df83SYuval Mintz 1566a044df83SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1567a044df83SYuval Mintz "VF[%d] is pre-fastpath HSI\n", 1568a044df83SYuval Mintz vf->abs_vf_id); 1569a044df83SYuval Mintz p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 1570a044df83SYuval Mintz p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; 1571a044df83SYuval Mintz } else { 15721408cc1fSYuval Mintz DP_INFO(p_hwfn, 15733321b6c2SColin Ian King "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n", 15741408cc1fSYuval Mintz vf->abs_vf_id, 15751fe614d1SYuval Mintz req->vfdev_info.eth_fp_hsi_major, 15761fe614d1SYuval Mintz req->vfdev_info.eth_fp_hsi_minor, 15771fe614d1SYuval Mintz ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 15781fe614d1SYuval Mintz 15791408cc1fSYuval Mintz goto out; 15801408cc1fSYuval Mintz } 1581a044df83SYuval Mintz } 15821408cc1fSYuval Mintz 15831408cc1fSYuval Mintz /* On 100g PFs, prevent old VFs from loading */ 15841408cc1fSYuval Mintz if ((p_hwfn->cdev->num_hwfns > 1) && 15851408cc1fSYuval Mintz !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { 15861408cc1fSYuval Mintz DP_INFO(p_hwfn, 15871408cc1fSYuval Mintz "VF[%d] is running an old driver that doesn't support 100g\n", 15881408cc1fSYuval Mintz vf->abs_vf_id); 15891408cc1fSYuval Mintz goto out; 15901408cc1fSYuval Mintz } 15911408cc1fSYuval Mintz 15921fe614d1SYuval Mintz /* Store the acquire message */ 15931fe614d1SYuval Mintz memcpy(&vf->acquire, req, sizeof(vf->acquire)); 15941408cc1fSYuval Mintz 15951408cc1fSYuval Mintz vf->opaque_fid = req->vfdev_info.opaque_fid; 15961408cc1fSYuval Mintz 15971408cc1fSYuval Mintz vf->vf_bulletin = req->bulletin_addr; 15981408cc1fSYuval Mintz vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? 15991408cc1fSYuval Mintz vf->bulletin.size : req->bulletin_size; 16001408cc1fSYuval Mintz 16011408cc1fSYuval Mintz /* fill in pfdev info */ 16021408cc1fSYuval Mintz pfdev_info->chip_num = p_hwfn->cdev->chip_num; 16031408cc1fSYuval Mintz pfdev_info->db_size = 0; 160421dd79e8STomer Tayar pfdev_info->indices_per_sb = PIS_PER_SB_E4; 16051408cc1fSYuval Mintz 16061408cc1fSYuval Mintz pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | 16071408cc1fSYuval Mintz PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; 16081408cc1fSYuval Mintz if (p_hwfn->cdev->num_hwfns > 1) 16091408cc1fSYuval Mintz pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; 16101408cc1fSYuval Mintz 161108bc8f15SMintz, Yuval /* Share our ability to use multiple queue-ids only with VFs 161208bc8f15SMintz, Yuval * that request it. 161308bc8f15SMintz, Yuval */ 161408bc8f15SMintz, Yuval if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) 161508bc8f15SMintz, Yuval pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS; 161608bc8f15SMintz, Yuval 16171a850bfcSMintz, Yuval /* Share the sizes of the bars with VF */ 16181a850bfcSMintz, Yuval resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); 16191a850bfcSMintz, Yuval 16201cf2b1a9SYuval Mintz qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); 16211408cc1fSYuval Mintz 16221408cc1fSYuval Mintz memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); 16231408cc1fSYuval Mintz 16241408cc1fSYuval Mintz pfdev_info->fw_major = FW_MAJOR_VERSION; 16251408cc1fSYuval Mintz pfdev_info->fw_minor = FW_MINOR_VERSION; 16261408cc1fSYuval Mintz pfdev_info->fw_rev = FW_REVISION_VERSION; 16271408cc1fSYuval Mintz pfdev_info->fw_eng = FW_ENGINEERING_VERSION; 1628a044df83SYuval Mintz 1629a044df83SYuval Mintz /* Incorrect when legacy, but doesn't matter as legacy isn't reading 1630a044df83SYuval Mintz * this field. 1631a044df83SYuval Mintz */ 16321a635e48SYuval Mintz pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR, 16331fe614d1SYuval Mintz req->vfdev_info.eth_fp_hsi_minor); 16341408cc1fSYuval Mintz pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; 16351408cc1fSYuval Mintz qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); 16361408cc1fSYuval Mintz 16371408cc1fSYuval Mintz pfdev_info->dev_type = p_hwfn->cdev->type; 16381408cc1fSYuval Mintz pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; 16391408cc1fSYuval Mintz 16401cf2b1a9SYuval Mintz /* Fill resources available to VF; Make sure there are enough to 16411cf2b1a9SYuval Mintz * satisfy the VF's request. 16421408cc1fSYuval Mintz */ 16431cf2b1a9SYuval Mintz vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf, 16441cf2b1a9SYuval Mintz &req->resc_request, resc); 16451cf2b1a9SYuval Mintz if (vfpf_status != PFVF_STATUS_SUCCESS) 16461cf2b1a9SYuval Mintz goto out; 16471408cc1fSYuval Mintz 16481fe614d1SYuval Mintz /* Start the VF in FW */ 16491fe614d1SYuval Mintz rc = qed_sp_vf_start(p_hwfn, vf); 16501fe614d1SYuval Mintz if (rc) { 16511fe614d1SYuval Mintz DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); 16521fe614d1SYuval Mintz vfpf_status = PFVF_STATUS_FAILURE; 16531fe614d1SYuval Mintz goto out; 16541fe614d1SYuval Mintz } 16551fe614d1SYuval Mintz 16561408cc1fSYuval Mintz /* Fill agreed size of bulletin board in response */ 16571408cc1fSYuval Mintz resp->bulletin_size = vf->bulletin.size; 165836558c3dSYuval Mintz qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); 16591408cc1fSYuval Mintz 16601408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, 16611408cc1fSYuval Mintz QED_MSG_IOV, 16621408cc1fSYuval Mintz "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" 16631408cc1fSYuval Mintz "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", 16641408cc1fSYuval Mintz vf->abs_vf_id, 16651408cc1fSYuval Mintz resp->pfdev_info.chip_num, 16661408cc1fSYuval Mintz resp->pfdev_info.db_size, 16671408cc1fSYuval Mintz resp->pfdev_info.indices_per_sb, 16681408cc1fSYuval Mintz resp->pfdev_info.capabilities, 16691408cc1fSYuval Mintz resc->num_rxqs, 16701408cc1fSYuval Mintz resc->num_txqs, 16711408cc1fSYuval Mintz resc->num_sbs, 16721408cc1fSYuval Mintz resc->num_mac_filters, 16731408cc1fSYuval Mintz resc->num_vlan_filters); 16741408cc1fSYuval Mintz vf->state = VF_ACQUIRED; 16751408cc1fSYuval Mintz 16761408cc1fSYuval Mintz /* Prepare Response */ 16771408cc1fSYuval Mintz out: 16781408cc1fSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, 16791408cc1fSYuval Mintz sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); 168037bff2b9SYuval Mintz } 168137bff2b9SYuval Mintz 16826ddc7608SYuval Mintz static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, 16836ddc7608SYuval Mintz struct qed_vf_info *p_vf, bool val) 16846ddc7608SYuval Mintz { 16856ddc7608SYuval Mintz struct qed_sp_vport_update_params params; 16866ddc7608SYuval Mintz int rc; 16876ddc7608SYuval Mintz 16886ddc7608SYuval Mintz if (val == p_vf->spoof_chk) { 16896ddc7608SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 16906ddc7608SYuval Mintz "Spoofchk value[%d] is already configured\n", val); 16916ddc7608SYuval Mintz return 0; 16926ddc7608SYuval Mintz } 16936ddc7608SYuval Mintz 16946ddc7608SYuval Mintz memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params)); 16956ddc7608SYuval Mintz params.opaque_fid = p_vf->opaque_fid; 16966ddc7608SYuval Mintz params.vport_id = p_vf->vport_id; 16976ddc7608SYuval Mintz params.update_anti_spoofing_en_flg = 1; 16986ddc7608SYuval Mintz params.anti_spoofing_en = val; 16996ddc7608SYuval Mintz 17006ddc7608SYuval Mintz rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); 1701cb1fa088SYuval Mintz if (!rc) { 17026ddc7608SYuval Mintz p_vf->spoof_chk = val; 17036ddc7608SYuval Mintz p_vf->req_spoofchk_val = p_vf->spoof_chk; 17046ddc7608SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 17056ddc7608SYuval Mintz "Spoofchk val[%d] configured\n", val); 17066ddc7608SYuval Mintz } else { 17076ddc7608SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 17086ddc7608SYuval Mintz "Spoofchk configuration[val:%d] failed for VF[%d]\n", 17096ddc7608SYuval Mintz val, p_vf->relative_vf_id); 17106ddc7608SYuval Mintz } 17116ddc7608SYuval Mintz 17126ddc7608SYuval Mintz return rc; 17136ddc7608SYuval Mintz } 17146ddc7608SYuval Mintz 171508feecd7SYuval Mintz static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, 171608feecd7SYuval Mintz struct qed_vf_info *p_vf) 171708feecd7SYuval Mintz { 171808feecd7SYuval Mintz struct qed_filter_ucast filter; 171908feecd7SYuval Mintz int rc = 0; 172008feecd7SYuval Mintz int i; 172108feecd7SYuval Mintz 172208feecd7SYuval Mintz memset(&filter, 0, sizeof(filter)); 172308feecd7SYuval Mintz filter.is_rx_filter = 1; 172408feecd7SYuval Mintz filter.is_tx_filter = 1; 172508feecd7SYuval Mintz filter.vport_to_add_to = p_vf->vport_id; 172608feecd7SYuval Mintz filter.opcode = QED_FILTER_ADD; 172708feecd7SYuval Mintz 172808feecd7SYuval Mintz /* Reconfigure vlans */ 172908feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 173008feecd7SYuval Mintz if (!p_vf->shadow_config.vlans[i].used) 173108feecd7SYuval Mintz continue; 173208feecd7SYuval Mintz 173308feecd7SYuval Mintz filter.type = QED_FILTER_VLAN; 173408feecd7SYuval Mintz filter.vlan = p_vf->shadow_config.vlans[i].vid; 17351a635e48SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 173608feecd7SYuval Mintz "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", 173708feecd7SYuval Mintz filter.vlan, p_vf->relative_vf_id); 17381a635e48SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 17391a635e48SYuval Mintz &filter, QED_SPQ_MODE_CB, NULL); 174008feecd7SYuval Mintz if (rc) { 174108feecd7SYuval Mintz DP_NOTICE(p_hwfn, 174208feecd7SYuval Mintz "Failed to configure VLAN [%04x] to VF [%04x]\n", 174308feecd7SYuval Mintz filter.vlan, p_vf->relative_vf_id); 174408feecd7SYuval Mintz break; 174508feecd7SYuval Mintz } 174608feecd7SYuval Mintz } 174708feecd7SYuval Mintz 174808feecd7SYuval Mintz return rc; 174908feecd7SYuval Mintz } 175008feecd7SYuval Mintz 175108feecd7SYuval Mintz static int 175208feecd7SYuval Mintz qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, 175308feecd7SYuval Mintz struct qed_vf_info *p_vf, u64 events) 175408feecd7SYuval Mintz { 175508feecd7SYuval Mintz int rc = 0; 175608feecd7SYuval Mintz 17571a635e48SYuval Mintz if ((events & BIT(VLAN_ADDR_FORCED)) && 175808feecd7SYuval Mintz !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) 175908feecd7SYuval Mintz rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); 176008feecd7SYuval Mintz 176108feecd7SYuval Mintz return rc; 176208feecd7SYuval Mintz } 176308feecd7SYuval Mintz 176408feecd7SYuval Mintz static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, 176508feecd7SYuval Mintz struct qed_vf_info *p_vf, u64 events) 176608feecd7SYuval Mintz { 176708feecd7SYuval Mintz int rc = 0; 176808feecd7SYuval Mintz struct qed_filter_ucast filter; 176908feecd7SYuval Mintz 177008feecd7SYuval Mintz if (!p_vf->vport_instance) 177108feecd7SYuval Mintz return -EINVAL; 177208feecd7SYuval Mintz 17737425d822SShahed Shaikh if ((events & BIT(MAC_ADDR_FORCED)) || 17747425d822SShahed Shaikh p_vf->p_vf_info.is_trusted_configured) { 1775eff16960SYuval Mintz /* Since there's no way [currently] of removing the MAC, 1776eff16960SYuval Mintz * we can always assume this means we need to force it. 1777eff16960SYuval Mintz */ 1778eff16960SYuval Mintz memset(&filter, 0, sizeof(filter)); 1779eff16960SYuval Mintz filter.type = QED_FILTER_MAC; 1780eff16960SYuval Mintz filter.opcode = QED_FILTER_REPLACE; 1781eff16960SYuval Mintz filter.is_rx_filter = 1; 1782eff16960SYuval Mintz filter.is_tx_filter = 1; 1783eff16960SYuval Mintz filter.vport_to_add_to = p_vf->vport_id; 1784eff16960SYuval Mintz ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); 1785eff16960SYuval Mintz 1786eff16960SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 1787eff16960SYuval Mintz &filter, QED_SPQ_MODE_CB, NULL); 1788eff16960SYuval Mintz if (rc) { 1789eff16960SYuval Mintz DP_NOTICE(p_hwfn, 1790eff16960SYuval Mintz "PF failed to configure MAC for VF\n"); 1791eff16960SYuval Mintz return rc; 1792eff16960SYuval Mintz } 17937425d822SShahed Shaikh if (p_vf->p_vf_info.is_trusted_configured) 17947425d822SShahed Shaikh p_vf->configured_features |= 17957425d822SShahed Shaikh BIT(VFPF_BULLETIN_MAC_ADDR); 17967425d822SShahed Shaikh else 17977425d822SShahed Shaikh p_vf->configured_features |= 17987425d822SShahed Shaikh BIT(MAC_ADDR_FORCED); 1799eff16960SYuval Mintz } 1800eff16960SYuval Mintz 18011a635e48SYuval Mintz if (events & BIT(VLAN_ADDR_FORCED)) { 180208feecd7SYuval Mintz struct qed_sp_vport_update_params vport_update; 180308feecd7SYuval Mintz u8 removal; 180408feecd7SYuval Mintz int i; 180508feecd7SYuval Mintz 180608feecd7SYuval Mintz memset(&filter, 0, sizeof(filter)); 180708feecd7SYuval Mintz filter.type = QED_FILTER_VLAN; 180808feecd7SYuval Mintz filter.is_rx_filter = 1; 180908feecd7SYuval Mintz filter.is_tx_filter = 1; 181008feecd7SYuval Mintz filter.vport_to_add_to = p_vf->vport_id; 181108feecd7SYuval Mintz filter.vlan = p_vf->bulletin.p_virt->pvid; 181208feecd7SYuval Mintz filter.opcode = filter.vlan ? QED_FILTER_REPLACE : 181308feecd7SYuval Mintz QED_FILTER_FLUSH; 181408feecd7SYuval Mintz 181508feecd7SYuval Mintz /* Send the ramrod */ 181608feecd7SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 181708feecd7SYuval Mintz &filter, QED_SPQ_MODE_CB, NULL); 181808feecd7SYuval Mintz if (rc) { 181908feecd7SYuval Mintz DP_NOTICE(p_hwfn, 182008feecd7SYuval Mintz "PF failed to configure VLAN for VF\n"); 182108feecd7SYuval Mintz return rc; 182208feecd7SYuval Mintz } 182308feecd7SYuval Mintz 182408feecd7SYuval Mintz /* Update the default-vlan & silent vlan stripping */ 182508feecd7SYuval Mintz memset(&vport_update, 0, sizeof(vport_update)); 182608feecd7SYuval Mintz vport_update.opaque_fid = p_vf->opaque_fid; 182708feecd7SYuval Mintz vport_update.vport_id = p_vf->vport_id; 182808feecd7SYuval Mintz vport_update.update_default_vlan_enable_flg = 1; 182908feecd7SYuval Mintz vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; 183008feecd7SYuval Mintz vport_update.update_default_vlan_flg = 1; 183108feecd7SYuval Mintz vport_update.default_vlan = filter.vlan; 183208feecd7SYuval Mintz 183308feecd7SYuval Mintz vport_update.update_inner_vlan_removal_flg = 1; 183408feecd7SYuval Mintz removal = filter.vlan ? 1 183508feecd7SYuval Mintz : p_vf->shadow_config.inner_vlan_removal; 183608feecd7SYuval Mintz vport_update.inner_vlan_removal_flg = removal; 183708feecd7SYuval Mintz vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; 183808feecd7SYuval Mintz rc = qed_sp_vport_update(p_hwfn, 183908feecd7SYuval Mintz &vport_update, 184008feecd7SYuval Mintz QED_SPQ_MODE_EBLOCK, NULL); 184108feecd7SYuval Mintz if (rc) { 184208feecd7SYuval Mintz DP_NOTICE(p_hwfn, 184308feecd7SYuval Mintz "PF failed to configure VF vport for vlan\n"); 184408feecd7SYuval Mintz return rc; 184508feecd7SYuval Mintz } 184608feecd7SYuval Mintz 184708feecd7SYuval Mintz /* Update all the Rx queues */ 184808feecd7SYuval Mintz for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { 1849007bc371SMintz, Yuval struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; 1850007bc371SMintz, Yuval struct qed_queue_cid *p_cid = NULL; 185108feecd7SYuval Mintz 1852007bc371SMintz, Yuval /* There can be at most 1 Rx queue on qzone. Find it */ 1853007bc371SMintz, Yuval p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); 18543da7a37aSMintz, Yuval if (!p_cid) 185508feecd7SYuval Mintz continue; 185608feecd7SYuval Mintz 18573da7a37aSMintz, Yuval rc = qed_sp_eth_rx_queues_update(p_hwfn, 18583da7a37aSMintz, Yuval (void **)&p_cid, 185908feecd7SYuval Mintz 1, 0, 1, 186008feecd7SYuval Mintz QED_SPQ_MODE_EBLOCK, 186108feecd7SYuval Mintz NULL); 186208feecd7SYuval Mintz if (rc) { 186308feecd7SYuval Mintz DP_NOTICE(p_hwfn, 186408feecd7SYuval Mintz "Failed to send Rx update fo queue[0x%04x]\n", 18653da7a37aSMintz, Yuval p_cid->rel.queue_id); 186608feecd7SYuval Mintz return rc; 186708feecd7SYuval Mintz } 186808feecd7SYuval Mintz } 186908feecd7SYuval Mintz 187008feecd7SYuval Mintz if (filter.vlan) 187108feecd7SYuval Mintz p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; 187208feecd7SYuval Mintz else 18731a635e48SYuval Mintz p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); 187408feecd7SYuval Mintz } 187508feecd7SYuval Mintz 187608feecd7SYuval Mintz /* If forced features are terminated, we need to configure the shadow 187708feecd7SYuval Mintz * configuration back again. 187808feecd7SYuval Mintz */ 187908feecd7SYuval Mintz if (events) 188008feecd7SYuval Mintz qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); 188108feecd7SYuval Mintz 188208feecd7SYuval Mintz return rc; 188308feecd7SYuval Mintz } 188408feecd7SYuval Mintz 1885dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, 1886dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1887dacd88d6SYuval Mintz struct qed_vf_info *vf) 1888dacd88d6SYuval Mintz { 1889dacd88d6SYuval Mintz struct qed_sp_vport_start_params params = { 0 }; 1890dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1891dacd88d6SYuval Mintz struct vfpf_vport_start_tlv *start; 1892dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 1893dacd88d6SYuval Mintz struct qed_vf_info *vf_info; 189408feecd7SYuval Mintz u64 *p_bitmap; 1895dacd88d6SYuval Mintz int sb_id; 1896dacd88d6SYuval Mintz int rc; 1897dacd88d6SYuval Mintz 1898dacd88d6SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); 1899dacd88d6SYuval Mintz if (!vf_info) { 1900dacd88d6SYuval Mintz DP_NOTICE(p_hwfn->cdev, 1901dacd88d6SYuval Mintz "Failed to get VF info, invalid vfid [%d]\n", 1902dacd88d6SYuval Mintz vf->relative_vf_id); 1903dacd88d6SYuval Mintz return; 1904dacd88d6SYuval Mintz } 1905dacd88d6SYuval Mintz 1906dacd88d6SYuval Mintz vf->state = VF_ENABLED; 1907dacd88d6SYuval Mintz start = &mbx->req_virt->start_vport; 1908dacd88d6SYuval Mintz 1909b801b159SMintz, Yuval qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); 1910b801b159SMintz, Yuval 1911dacd88d6SYuval Mintz /* Initialize Status block in CAU */ 1912dacd88d6SYuval Mintz for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { 1913dacd88d6SYuval Mintz if (!start->sb_addr[sb_id]) { 1914dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1915dacd88d6SYuval Mintz "VF[%d] did not fill the address of SB %d\n", 1916dacd88d6SYuval Mintz vf->relative_vf_id, sb_id); 1917dacd88d6SYuval Mintz break; 1918dacd88d6SYuval Mintz } 1919dacd88d6SYuval Mintz 1920dacd88d6SYuval Mintz qed_int_cau_conf_sb(p_hwfn, p_ptt, 1921dacd88d6SYuval Mintz start->sb_addr[sb_id], 19221a635e48SYuval Mintz vf->igu_sbs[sb_id], vf->abs_vf_id, 1); 1923dacd88d6SYuval Mintz } 1924dacd88d6SYuval Mintz 1925dacd88d6SYuval Mintz vf->mtu = start->mtu; 192608feecd7SYuval Mintz vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; 192708feecd7SYuval Mintz 192808feecd7SYuval Mintz /* Take into consideration configuration forced by hypervisor; 192908feecd7SYuval Mintz * If none is configured, use the supplied VF values [for old 193008feecd7SYuval Mintz * vfs that would still be fine, since they passed '0' as padding]. 193108feecd7SYuval Mintz */ 193208feecd7SYuval Mintz p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; 19331a635e48SYuval Mintz if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { 193408feecd7SYuval Mintz u8 vf_req = start->only_untagged; 193508feecd7SYuval Mintz 193608feecd7SYuval Mintz vf_info->bulletin.p_virt->default_only_untagged = vf_req; 193708feecd7SYuval Mintz *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; 193808feecd7SYuval Mintz } 1939dacd88d6SYuval Mintz 1940dacd88d6SYuval Mintz params.tpa_mode = start->tpa_mode; 1941dacd88d6SYuval Mintz params.remove_inner_vlan = start->inner_vlan_removal; 1942831bfb0eSYuval Mintz params.tx_switching = true; 1943dacd88d6SYuval Mintz 194408feecd7SYuval Mintz params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; 1945dacd88d6SYuval Mintz params.drop_ttl0 = false; 1946dacd88d6SYuval Mintz params.concrete_fid = vf->concrete_fid; 1947dacd88d6SYuval Mintz params.opaque_fid = vf->opaque_fid; 1948dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 1949dacd88d6SYuval Mintz params.max_buffers_per_cqe = start->max_buffers_per_cqe; 1950dacd88d6SYuval Mintz params.mtu = vf->mtu; 1951ff929696SManish Chopra 1952ff929696SManish Chopra /* Non trusted VFs should enable control frame filtering */ 1953ff929696SManish Chopra params.check_mac = !vf->p_vf_info.is_trusted_configured; 1954dacd88d6SYuval Mintz 1955dacd88d6SYuval Mintz rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); 19561a635e48SYuval Mintz if (rc) { 1957dacd88d6SYuval Mintz DP_ERR(p_hwfn, 1958dacd88d6SYuval Mintz "qed_iov_vf_mbx_start_vport returned error %d\n", rc); 1959dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 1960dacd88d6SYuval Mintz } else { 1961dacd88d6SYuval Mintz vf->vport_instance++; 196208feecd7SYuval Mintz 196308feecd7SYuval Mintz /* Force configuration if needed on the newly opened vport */ 196408feecd7SYuval Mintz qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); 19656ddc7608SYuval Mintz 19666ddc7608SYuval Mintz __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); 1967dacd88d6SYuval Mintz } 1968dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, 1969dacd88d6SYuval Mintz sizeof(struct pfvf_def_resp_tlv), status); 1970dacd88d6SYuval Mintz } 1971dacd88d6SYuval Mintz 1972dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, 1973dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1974dacd88d6SYuval Mintz struct qed_vf_info *vf) 1975dacd88d6SYuval Mintz { 1976dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 1977dacd88d6SYuval Mintz int rc; 1978dacd88d6SYuval Mintz 1979dacd88d6SYuval Mintz vf->vport_instance--; 19806ddc7608SYuval Mintz vf->spoof_chk = false; 1981dacd88d6SYuval Mintz 1982f109c240SMintz, Yuval if ((qed_iov_validate_active_rxq(p_hwfn, vf)) || 1983f109c240SMintz, Yuval (qed_iov_validate_active_txq(p_hwfn, vf))) { 1984f109c240SMintz, Yuval vf->b_malicious = true; 1985f109c240SMintz, Yuval DP_NOTICE(p_hwfn, 1986dc99da4fSColin Ian King "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n", 1987f109c240SMintz, Yuval vf->abs_vf_id); 1988f109c240SMintz, Yuval status = PFVF_STATUS_MALICIOUS; 1989f109c240SMintz, Yuval goto out; 1990f109c240SMintz, Yuval } 1991f109c240SMintz, Yuval 1992dacd88d6SYuval Mintz rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); 19931a635e48SYuval Mintz if (rc) { 1994dacd88d6SYuval Mintz DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", 1995dacd88d6SYuval Mintz rc); 1996dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 1997dacd88d6SYuval Mintz } 1998dacd88d6SYuval Mintz 199908feecd7SYuval Mintz /* Forget the configuration on the vport */ 200008feecd7SYuval Mintz vf->configured_features = 0; 200108feecd7SYuval Mintz memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); 200208feecd7SYuval Mintz 2003f109c240SMintz, Yuval out: 2004dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, 2005dacd88d6SYuval Mintz sizeof(struct pfvf_def_resp_tlv), status); 2006dacd88d6SYuval Mintz } 2007dacd88d6SYuval Mintz 2008dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, 2009dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2010a044df83SYuval Mintz struct qed_vf_info *vf, 2011a044df83SYuval Mintz u8 status, bool b_legacy) 2012dacd88d6SYuval Mintz { 2013dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2014dacd88d6SYuval Mintz struct pfvf_start_queue_resp_tlv *p_tlv; 2015dacd88d6SYuval Mintz struct vfpf_start_rxq_tlv *req; 2016a044df83SYuval Mintz u16 length; 2017dacd88d6SYuval Mintz 2018dacd88d6SYuval Mintz mbx->offset = (u8 *)mbx->reply_virt; 2019dacd88d6SYuval Mintz 2020a044df83SYuval Mintz /* Taking a bigger struct instead of adding a TLV to list was a 2021a044df83SYuval Mintz * mistake, but one which we're now stuck with, as some older 2022a044df83SYuval Mintz * clients assume the size of the previous response. 2023a044df83SYuval Mintz */ 2024a044df83SYuval Mintz if (!b_legacy) 2025a044df83SYuval Mintz length = sizeof(*p_tlv); 2026a044df83SYuval Mintz else 2027a044df83SYuval Mintz length = sizeof(struct pfvf_def_resp_tlv); 2028a044df83SYuval Mintz 2029dacd88d6SYuval Mintz p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, 2030a044df83SYuval Mintz length); 2031dacd88d6SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 2032dacd88d6SYuval Mintz sizeof(struct channel_list_end_tlv)); 2033dacd88d6SYuval Mintz 2034dacd88d6SYuval Mintz /* Update the TLV with the response */ 2035a044df83SYuval Mintz if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { 2036dacd88d6SYuval Mintz req = &mbx->req_virt->start_rxq; 2037351a4dedSYuval Mintz p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + 2038351a4dedSYuval Mintz offsetof(struct mstorm_vf_zone, 2039351a4dedSYuval Mintz non_trigger.eth_rx_queue_producers) + 2040351a4dedSYuval Mintz sizeof(struct eth_rx_prod_data) * req->rx_qid; 2041dacd88d6SYuval Mintz } 2042dacd88d6SYuval Mintz 2043a044df83SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); 2044dacd88d6SYuval Mintz } 2045dacd88d6SYuval Mintz 2046bbe3f233SMintz, Yuval static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn, 2047bbe3f233SMintz, Yuval struct qed_vf_info *p_vf, bool b_is_tx) 2048bbe3f233SMintz, Yuval { 204908bc8f15SMintz, Yuval struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; 205008bc8f15SMintz, Yuval struct vfpf_qid_tlv *p_qid_tlv; 205108bc8f15SMintz, Yuval 205208bc8f15SMintz, Yuval /* Search for the qid if the VF published its going to provide it */ 205308bc8f15SMintz, Yuval if (!(p_vf->acquire.vfdev_info.capabilities & 205408bc8f15SMintz, Yuval VFPF_ACQUIRE_CAP_QUEUE_QIDS)) { 2055bbe3f233SMintz, Yuval if (b_is_tx) 2056bbe3f233SMintz, Yuval return QED_IOV_LEGACY_QID_TX; 2057bbe3f233SMintz, Yuval else 2058bbe3f233SMintz, Yuval return QED_IOV_LEGACY_QID_RX; 2059bbe3f233SMintz, Yuval } 2060bbe3f233SMintz, Yuval 206108bc8f15SMintz, Yuval p_qid_tlv = (struct vfpf_qid_tlv *) 206208bc8f15SMintz, Yuval qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 206308bc8f15SMintz, Yuval CHANNEL_TLV_QID); 206408bc8f15SMintz, Yuval if (!p_qid_tlv) { 206508bc8f15SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 206608bc8f15SMintz, Yuval "VF[%2x]: Failed to provide qid\n", 206708bc8f15SMintz, Yuval p_vf->relative_vf_id); 206808bc8f15SMintz, Yuval 206908bc8f15SMintz, Yuval return QED_IOV_QID_INVALID; 207008bc8f15SMintz, Yuval } 207108bc8f15SMintz, Yuval 207208bc8f15SMintz, Yuval if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) { 207308bc8f15SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 207408bc8f15SMintz, Yuval "VF[%02x]: Provided qid out-of-bounds %02x\n", 207508bc8f15SMintz, Yuval p_vf->relative_vf_id, p_qid_tlv->qid); 207608bc8f15SMintz, Yuval return QED_IOV_QID_INVALID; 207708bc8f15SMintz, Yuval } 207808bc8f15SMintz, Yuval 207908bc8f15SMintz, Yuval return p_qid_tlv->qid; 208008bc8f15SMintz, Yuval } 208108bc8f15SMintz, Yuval 2082dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, 2083dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2084dacd88d6SYuval Mintz struct qed_vf_info *vf) 2085dacd88d6SYuval Mintz { 2086dacd88d6SYuval Mintz struct qed_queue_start_common_params params; 20873946497aSMintz, Yuval struct qed_queue_cid_vf_params vf_params; 2088dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 208941086467SYuval Mintz u8 status = PFVF_STATUS_NO_RESOURCE; 20903b19f478SMintz, Yuval u8 qid_usage_idx, vf_legacy = 0; 2091dacd88d6SYuval Mintz struct vfpf_start_rxq_tlv *req; 2092007bc371SMintz, Yuval struct qed_vf_queue *p_queue; 2093007bc371SMintz, Yuval struct qed_queue_cid *p_cid; 2094f604b17dSMintz, Yuval struct qed_sb_info sb_dummy; 2095dacd88d6SYuval Mintz int rc; 2096dacd88d6SYuval Mintz 2097dacd88d6SYuval Mintz req = &mbx->req_virt->start_rxq; 209841086467SYuval Mintz 2099f109c240SMintz, Yuval if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid, 2100f109c240SMintz, Yuval QED_IOV_VALIDATE_Q_DISABLE) || 210141086467SYuval Mintz !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) 210241086467SYuval Mintz goto out; 210341086467SYuval Mintz 2104bbe3f233SMintz, Yuval qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); 210508bc8f15SMintz, Yuval if (qid_usage_idx == QED_IOV_QID_INVALID) 210608bc8f15SMintz, Yuval goto out; 210708bc8f15SMintz, Yuval 21083da7a37aSMintz, Yuval p_queue = &vf->vf_queues[req->rx_qid]; 210908bc8f15SMintz, Yuval if (p_queue->cids[qid_usage_idx].p_cid) 211008bc8f15SMintz, Yuval goto out; 21113da7a37aSMintz, Yuval 21123b19f478SMintz, Yuval vf_legacy = qed_vf_calculate_legacy(vf); 21133946497aSMintz, Yuval 2114bbe3f233SMintz, Yuval /* Acquire a new queue-cid */ 21153da7a37aSMintz, Yuval memset(¶ms, 0, sizeof(params)); 21163da7a37aSMintz, Yuval params.queue_id = p_queue->fw_rx_qid; 2117dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 21183da7a37aSMintz, Yuval params.stats_id = vf->abs_vf_id + 0x10; 2119f604b17dSMintz, Yuval /* Since IGU index is passed via sb_info, construct a dummy one */ 2120f604b17dSMintz, Yuval memset(&sb_dummy, 0, sizeof(sb_dummy)); 2121f604b17dSMintz, Yuval sb_dummy.igu_sb_id = req->hw_sb; 2122f604b17dSMintz, Yuval params.p_sb = &sb_dummy; 2123dacd88d6SYuval Mintz params.sb_idx = req->sb_index; 2124dacd88d6SYuval Mintz 21253946497aSMintz, Yuval memset(&vf_params, 0, sizeof(vf_params)); 21263946497aSMintz, Yuval vf_params.vfid = vf->relative_vf_id; 21273946497aSMintz, Yuval vf_params.vf_qid = (u8)req->rx_qid; 21283b19f478SMintz, Yuval vf_params.vf_legacy = vf_legacy; 2129bbe3f233SMintz, Yuval vf_params.qid_usage_idx = qid_usage_idx; 2130007bc371SMintz, Yuval p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, 2131007bc371SMintz, Yuval ¶ms, true, &vf_params); 2132007bc371SMintz, Yuval if (!p_cid) 21333da7a37aSMintz, Yuval goto out; 21343da7a37aSMintz, Yuval 2135a044df83SYuval Mintz /* Legacy VFs have their Producers in a different location, which they 2136a044df83SYuval Mintz * calculate on their own and clean the producer prior to this. 2137a044df83SYuval Mintz */ 21383b19f478SMintz, Yuval if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD)) 2139a044df83SYuval Mintz REG_WR(p_hwfn, 2140a044df83SYuval Mintz GTT_BAR0_MAP_REG_MSDM_RAM + 2141a044df83SYuval Mintz MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), 2142a044df83SYuval Mintz 0); 2143a044df83SYuval Mintz 2144007bc371SMintz, Yuval rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid, 2145dacd88d6SYuval Mintz req->bd_max_bytes, 2146dacd88d6SYuval Mintz req->rxq_addr, 21473da7a37aSMintz, Yuval req->cqe_pbl_addr, req->cqe_pbl_size); 2148dacd88d6SYuval Mintz if (rc) { 2149dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2150007bc371SMintz, Yuval qed_eth_queue_cid_release(p_hwfn, p_cid); 2151dacd88d6SYuval Mintz } else { 2152007bc371SMintz, Yuval p_queue->cids[qid_usage_idx].p_cid = p_cid; 2153007bc371SMintz, Yuval p_queue->cids[qid_usage_idx].b_is_tx = false; 215441086467SYuval Mintz status = PFVF_STATUS_SUCCESS; 2155dacd88d6SYuval Mintz vf->num_active_rxqs++; 2156dacd88d6SYuval Mintz } 2157dacd88d6SYuval Mintz 215841086467SYuval Mintz out: 21593b19f478SMintz, Yuval qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, 21603b19f478SMintz, Yuval !!(vf_legacy & 21613b19f478SMintz, Yuval QED_QCID_LEGACY_VF_RX_PROD)); 2162dacd88d6SYuval Mintz } 2163dacd88d6SYuval Mintz 2164eaf3c0c6SChopra, Manish static void 2165eaf3c0c6SChopra, Manish qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp, 2166eaf3c0c6SChopra, Manish struct qed_tunnel_info *p_tun, 2167eaf3c0c6SChopra, Manish u16 tunn_feature_mask) 2168eaf3c0c6SChopra, Manish { 2169eaf3c0c6SChopra, Manish p_resp->tunn_feature_mask = tunn_feature_mask; 2170eaf3c0c6SChopra, Manish p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled; 2171eaf3c0c6SChopra, Manish p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled; 2172eaf3c0c6SChopra, Manish p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled; 2173eaf3c0c6SChopra, Manish p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled; 2174eaf3c0c6SChopra, Manish p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled; 2175eaf3c0c6SChopra, Manish p_resp->vxlan_clss = p_tun->vxlan.tun_cls; 2176eaf3c0c6SChopra, Manish p_resp->l2gre_clss = p_tun->l2_gre.tun_cls; 2177eaf3c0c6SChopra, Manish p_resp->ipgre_clss = p_tun->ip_gre.tun_cls; 2178eaf3c0c6SChopra, Manish p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls; 2179eaf3c0c6SChopra, Manish p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls; 2180eaf3c0c6SChopra, Manish p_resp->geneve_udp_port = p_tun->geneve_port.port; 2181eaf3c0c6SChopra, Manish p_resp->vxlan_udp_port = p_tun->vxlan_port.port; 2182eaf3c0c6SChopra, Manish } 2183eaf3c0c6SChopra, Manish 2184eaf3c0c6SChopra, Manish static void 2185eaf3c0c6SChopra, Manish __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, 2186eaf3c0c6SChopra, Manish struct qed_tunn_update_type *p_tun, 2187eaf3c0c6SChopra, Manish enum qed_tunn_mode mask, u8 tun_cls) 2188eaf3c0c6SChopra, Manish { 2189eaf3c0c6SChopra, Manish if (p_req->tun_mode_update_mask & BIT(mask)) { 2190eaf3c0c6SChopra, Manish p_tun->b_update_mode = true; 2191eaf3c0c6SChopra, Manish 2192eaf3c0c6SChopra, Manish if (p_req->tunn_mode & BIT(mask)) 2193eaf3c0c6SChopra, Manish p_tun->b_mode_enabled = true; 2194eaf3c0c6SChopra, Manish } 2195eaf3c0c6SChopra, Manish 2196eaf3c0c6SChopra, Manish p_tun->tun_cls = tun_cls; 2197eaf3c0c6SChopra, Manish } 2198eaf3c0c6SChopra, Manish 2199eaf3c0c6SChopra, Manish static void 2200eaf3c0c6SChopra, Manish qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, 2201eaf3c0c6SChopra, Manish struct qed_tunn_update_type *p_tun, 2202eaf3c0c6SChopra, Manish struct qed_tunn_update_udp_port *p_port, 2203eaf3c0c6SChopra, Manish enum qed_tunn_mode mask, 2204eaf3c0c6SChopra, Manish u8 tun_cls, u8 update_port, u16 port) 2205eaf3c0c6SChopra, Manish { 2206eaf3c0c6SChopra, Manish if (update_port) { 2207eaf3c0c6SChopra, Manish p_port->b_update_port = true; 2208eaf3c0c6SChopra, Manish p_port->port = port; 2209eaf3c0c6SChopra, Manish } 2210eaf3c0c6SChopra, Manish 2211eaf3c0c6SChopra, Manish __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls); 2212eaf3c0c6SChopra, Manish } 2213eaf3c0c6SChopra, Manish 2214eaf3c0c6SChopra, Manish static bool 2215eaf3c0c6SChopra, Manish qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req) 2216eaf3c0c6SChopra, Manish { 2217eaf3c0c6SChopra, Manish bool b_update_requested = false; 2218eaf3c0c6SChopra, Manish 2219eaf3c0c6SChopra, Manish if (p_req->tun_mode_update_mask || p_req->update_tun_cls || 2220eaf3c0c6SChopra, Manish p_req->update_geneve_port || p_req->update_vxlan_port) 2221eaf3c0c6SChopra, Manish b_update_requested = true; 2222eaf3c0c6SChopra, Manish 2223eaf3c0c6SChopra, Manish return b_update_requested; 2224eaf3c0c6SChopra, Manish } 2225eaf3c0c6SChopra, Manish 2226eaf3c0c6SChopra, Manish static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc) 2227eaf3c0c6SChopra, Manish { 2228eaf3c0c6SChopra, Manish if (tun->b_update_mode && !tun->b_mode_enabled) { 2229eaf3c0c6SChopra, Manish tun->b_update_mode = false; 2230eaf3c0c6SChopra, Manish *rc = -EINVAL; 2231eaf3c0c6SChopra, Manish } 2232eaf3c0c6SChopra, Manish } 2233eaf3c0c6SChopra, Manish 2234eaf3c0c6SChopra, Manish static int 2235eaf3c0c6SChopra, Manish qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn, 2236eaf3c0c6SChopra, Manish u16 *tun_features, bool *update, 2237eaf3c0c6SChopra, Manish struct qed_tunnel_info *tun_src) 2238eaf3c0c6SChopra, Manish { 2239eaf3c0c6SChopra, Manish struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth; 2240eaf3c0c6SChopra, Manish struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel; 2241eaf3c0c6SChopra, Manish u16 bultn_vxlan_port, bultn_geneve_port; 2242eaf3c0c6SChopra, Manish void *cookie = p_hwfn->cdev->ops_cookie; 2243eaf3c0c6SChopra, Manish int i, rc = 0; 2244eaf3c0c6SChopra, Manish 2245eaf3c0c6SChopra, Manish *tun_features = p_hwfn->cdev->tunn_feature_mask; 2246eaf3c0c6SChopra, Manish bultn_vxlan_port = tun->vxlan_port.port; 2247eaf3c0c6SChopra, Manish bultn_geneve_port = tun->geneve_port.port; 2248eaf3c0c6SChopra, Manish qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc); 2249eaf3c0c6SChopra, Manish qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc); 2250eaf3c0c6SChopra, Manish qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc); 2251eaf3c0c6SChopra, Manish qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc); 2252eaf3c0c6SChopra, Manish qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc); 2253eaf3c0c6SChopra, Manish 2254eaf3c0c6SChopra, Manish if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) && 2255eaf3c0c6SChopra, Manish (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2256eaf3c0c6SChopra, Manish tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2257eaf3c0c6SChopra, Manish tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2258eaf3c0c6SChopra, Manish tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2259eaf3c0c6SChopra, Manish tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) { 2260eaf3c0c6SChopra, Manish tun_src->b_update_rx_cls = false; 2261eaf3c0c6SChopra, Manish tun_src->b_update_tx_cls = false; 2262eaf3c0c6SChopra, Manish rc = -EINVAL; 2263eaf3c0c6SChopra, Manish } 2264eaf3c0c6SChopra, Manish 2265eaf3c0c6SChopra, Manish if (tun_src->vxlan_port.b_update_port) { 2266eaf3c0c6SChopra, Manish if (tun_src->vxlan_port.port == tun->vxlan_port.port) { 2267eaf3c0c6SChopra, Manish tun_src->vxlan_port.b_update_port = false; 2268eaf3c0c6SChopra, Manish } else { 2269eaf3c0c6SChopra, Manish *update = true; 2270eaf3c0c6SChopra, Manish bultn_vxlan_port = tun_src->vxlan_port.port; 2271eaf3c0c6SChopra, Manish } 2272eaf3c0c6SChopra, Manish } 2273eaf3c0c6SChopra, Manish 2274eaf3c0c6SChopra, Manish if (tun_src->geneve_port.b_update_port) { 2275eaf3c0c6SChopra, Manish if (tun_src->geneve_port.port == tun->geneve_port.port) { 2276eaf3c0c6SChopra, Manish tun_src->geneve_port.b_update_port = false; 2277eaf3c0c6SChopra, Manish } else { 2278eaf3c0c6SChopra, Manish *update = true; 2279eaf3c0c6SChopra, Manish bultn_geneve_port = tun_src->geneve_port.port; 2280eaf3c0c6SChopra, Manish } 2281eaf3c0c6SChopra, Manish } 2282eaf3c0c6SChopra, Manish 2283eaf3c0c6SChopra, Manish qed_for_each_vf(p_hwfn, i) { 2284eaf3c0c6SChopra, Manish qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port, 2285eaf3c0c6SChopra, Manish bultn_geneve_port); 2286eaf3c0c6SChopra, Manish } 2287eaf3c0c6SChopra, Manish 2288eaf3c0c6SChopra, Manish qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 2289eaf3c0c6SChopra, Manish ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port); 2290eaf3c0c6SChopra, Manish 2291eaf3c0c6SChopra, Manish return rc; 2292eaf3c0c6SChopra, Manish } 2293eaf3c0c6SChopra, Manish 2294eaf3c0c6SChopra, Manish static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn, 2295eaf3c0c6SChopra, Manish struct qed_ptt *p_ptt, 2296eaf3c0c6SChopra, Manish struct qed_vf_info *p_vf) 2297eaf3c0c6SChopra, Manish { 2298eaf3c0c6SChopra, Manish struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; 2299eaf3c0c6SChopra, Manish struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 2300eaf3c0c6SChopra, Manish struct pfvf_update_tunn_param_tlv *p_resp; 2301eaf3c0c6SChopra, Manish struct vfpf_update_tunn_param_tlv *p_req; 2302eaf3c0c6SChopra, Manish u8 status = PFVF_STATUS_SUCCESS; 2303eaf3c0c6SChopra, Manish bool b_update_required = false; 2304eaf3c0c6SChopra, Manish struct qed_tunnel_info tunn; 2305eaf3c0c6SChopra, Manish u16 tunn_feature_mask = 0; 2306eaf3c0c6SChopra, Manish int i, rc = 0; 2307eaf3c0c6SChopra, Manish 2308eaf3c0c6SChopra, Manish mbx->offset = (u8 *)mbx->reply_virt; 2309eaf3c0c6SChopra, Manish 2310eaf3c0c6SChopra, Manish memset(&tunn, 0, sizeof(tunn)); 2311eaf3c0c6SChopra, Manish p_req = &mbx->req_virt->tunn_param_update; 2312eaf3c0c6SChopra, Manish 2313eaf3c0c6SChopra, Manish if (!qed_iov_pf_validate_tunn_param(p_req)) { 2314eaf3c0c6SChopra, Manish DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2315eaf3c0c6SChopra, Manish "No tunnel update requested by VF\n"); 2316eaf3c0c6SChopra, Manish status = PFVF_STATUS_FAILURE; 2317eaf3c0c6SChopra, Manish goto send_resp; 2318eaf3c0c6SChopra, Manish } 2319eaf3c0c6SChopra, Manish 2320eaf3c0c6SChopra, Manish tunn.b_update_rx_cls = p_req->update_tun_cls; 2321eaf3c0c6SChopra, Manish tunn.b_update_tx_cls = p_req->update_tun_cls; 2322eaf3c0c6SChopra, Manish 2323eaf3c0c6SChopra, Manish qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port, 2324eaf3c0c6SChopra, Manish QED_MODE_VXLAN_TUNN, p_req->vxlan_clss, 2325eaf3c0c6SChopra, Manish p_req->update_vxlan_port, 2326eaf3c0c6SChopra, Manish p_req->vxlan_port); 2327eaf3c0c6SChopra, Manish qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port, 2328eaf3c0c6SChopra, Manish QED_MODE_L2GENEVE_TUNN, 2329eaf3c0c6SChopra, Manish p_req->l2geneve_clss, 2330eaf3c0c6SChopra, Manish p_req->update_geneve_port, 2331eaf3c0c6SChopra, Manish p_req->geneve_port); 2332eaf3c0c6SChopra, Manish __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve, 2333eaf3c0c6SChopra, Manish QED_MODE_IPGENEVE_TUNN, 2334eaf3c0c6SChopra, Manish p_req->ipgeneve_clss); 2335eaf3c0c6SChopra, Manish __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre, 2336eaf3c0c6SChopra, Manish QED_MODE_L2GRE_TUNN, p_req->l2gre_clss); 2337eaf3c0c6SChopra, Manish __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre, 2338eaf3c0c6SChopra, Manish QED_MODE_IPGRE_TUNN, p_req->ipgre_clss); 2339eaf3c0c6SChopra, Manish 2340eaf3c0c6SChopra, Manish /* If PF modifies VF's req then it should 2341eaf3c0c6SChopra, Manish * still return an error in case of partial configuration 2342eaf3c0c6SChopra, Manish * or modified configuration as opposed to requested one. 2343eaf3c0c6SChopra, Manish */ 2344eaf3c0c6SChopra, Manish rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask, 2345eaf3c0c6SChopra, Manish &b_update_required, &tunn); 2346eaf3c0c6SChopra, Manish 2347eaf3c0c6SChopra, Manish if (rc) 2348eaf3c0c6SChopra, Manish status = PFVF_STATUS_FAILURE; 2349eaf3c0c6SChopra, Manish 2350eaf3c0c6SChopra, Manish /* If QED client is willing to update anything ? */ 2351eaf3c0c6SChopra, Manish if (b_update_required) { 2352eaf3c0c6SChopra, Manish u16 geneve_port; 2353eaf3c0c6SChopra, Manish 23544f64675fSManish Chopra rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn, 2355eaf3c0c6SChopra, Manish QED_SPQ_MODE_EBLOCK, NULL); 2356eaf3c0c6SChopra, Manish if (rc) 2357eaf3c0c6SChopra, Manish status = PFVF_STATUS_FAILURE; 2358eaf3c0c6SChopra, Manish 2359eaf3c0c6SChopra, Manish geneve_port = p_tun->geneve_port.port; 2360eaf3c0c6SChopra, Manish qed_for_each_vf(p_hwfn, i) { 2361eaf3c0c6SChopra, Manish qed_iov_bulletin_set_udp_ports(p_hwfn, i, 2362eaf3c0c6SChopra, Manish p_tun->vxlan_port.port, 2363eaf3c0c6SChopra, Manish geneve_port); 2364eaf3c0c6SChopra, Manish } 2365eaf3c0c6SChopra, Manish } 2366eaf3c0c6SChopra, Manish 2367eaf3c0c6SChopra, Manish send_resp: 2368eaf3c0c6SChopra, Manish p_resp = qed_add_tlv(p_hwfn, &mbx->offset, 2369eaf3c0c6SChopra, Manish CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp)); 2370eaf3c0c6SChopra, Manish 2371eaf3c0c6SChopra, Manish qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask); 2372eaf3c0c6SChopra, Manish qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 2373eaf3c0c6SChopra, Manish sizeof(struct channel_list_end_tlv)); 2374eaf3c0c6SChopra, Manish 2375eaf3c0c6SChopra, Manish qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); 2376eaf3c0c6SChopra, Manish } 2377eaf3c0c6SChopra, Manish 23785040acf5SYuval Mintz static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, 23795040acf5SYuval Mintz struct qed_ptt *p_ptt, 2380007bc371SMintz, Yuval struct qed_vf_info *p_vf, 2381007bc371SMintz, Yuval u32 cid, u8 status) 23825040acf5SYuval Mintz { 23835040acf5SYuval Mintz struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 23845040acf5SYuval Mintz struct pfvf_start_queue_resp_tlv *p_tlv; 2385a044df83SYuval Mintz bool b_legacy = false; 2386a044df83SYuval Mintz u16 length; 23875040acf5SYuval Mintz 23885040acf5SYuval Mintz mbx->offset = (u8 *)mbx->reply_virt; 23895040acf5SYuval Mintz 2390a044df83SYuval Mintz /* Taking a bigger struct instead of adding a TLV to list was a 2391a044df83SYuval Mintz * mistake, but one which we're now stuck with, as some older 2392a044df83SYuval Mintz * clients assume the size of the previous response. 2393a044df83SYuval Mintz */ 2394a044df83SYuval Mintz if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 2395a044df83SYuval Mintz ETH_HSI_VER_NO_PKT_LEN_TUNN) 2396a044df83SYuval Mintz b_legacy = true; 2397a044df83SYuval Mintz 2398a044df83SYuval Mintz if (!b_legacy) 2399a044df83SYuval Mintz length = sizeof(*p_tlv); 2400a044df83SYuval Mintz else 2401a044df83SYuval Mintz length = sizeof(struct pfvf_def_resp_tlv); 2402a044df83SYuval Mintz 24035040acf5SYuval Mintz p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, 2404a044df83SYuval Mintz length); 24055040acf5SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 24065040acf5SYuval Mintz sizeof(struct channel_list_end_tlv)); 24075040acf5SYuval Mintz 24085040acf5SYuval Mintz /* Update the TLV with the response */ 2409007bc371SMintz, Yuval if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) 2410007bc371SMintz, Yuval p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); 24115040acf5SYuval Mintz 2412a044df83SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); 24135040acf5SYuval Mintz } 24145040acf5SYuval Mintz 2415dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, 2416dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2417dacd88d6SYuval Mintz struct qed_vf_info *vf) 2418dacd88d6SYuval Mintz { 2419dacd88d6SYuval Mintz struct qed_queue_start_common_params params; 24203946497aSMintz, Yuval struct qed_queue_cid_vf_params vf_params; 2421dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 242241086467SYuval Mintz u8 status = PFVF_STATUS_NO_RESOURCE; 2423dacd88d6SYuval Mintz struct vfpf_start_txq_tlv *req; 2424007bc371SMintz, Yuval struct qed_vf_queue *p_queue; 2425007bc371SMintz, Yuval struct qed_queue_cid *p_cid; 2426f604b17dSMintz, Yuval struct qed_sb_info sb_dummy; 24273b19f478SMintz, Yuval u8 qid_usage_idx, vf_legacy; 2428007bc371SMintz, Yuval u32 cid = 0; 2429dacd88d6SYuval Mintz int rc; 24303da7a37aSMintz, Yuval u16 pq; 2431dacd88d6SYuval Mintz 2432dacd88d6SYuval Mintz memset(¶ms, 0, sizeof(params)); 2433dacd88d6SYuval Mintz req = &mbx->req_virt->start_txq; 243441086467SYuval Mintz 2435f109c240SMintz, Yuval if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid, 243608bc8f15SMintz, Yuval QED_IOV_VALIDATE_Q_NA) || 243741086467SYuval Mintz !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) 243841086467SYuval Mintz goto out; 243941086467SYuval Mintz 2440bbe3f233SMintz, Yuval qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); 244108bc8f15SMintz, Yuval if (qid_usage_idx == QED_IOV_QID_INVALID) 244208bc8f15SMintz, Yuval goto out; 244308bc8f15SMintz, Yuval 24443da7a37aSMintz, Yuval p_queue = &vf->vf_queues[req->tx_qid]; 244508bc8f15SMintz, Yuval if (p_queue->cids[qid_usage_idx].p_cid) 244608bc8f15SMintz, Yuval goto out; 24473da7a37aSMintz, Yuval 24483b19f478SMintz, Yuval vf_legacy = qed_vf_calculate_legacy(vf); 24493946497aSMintz, Yuval 2450bbe3f233SMintz, Yuval /* Acquire a new queue-cid */ 24513da7a37aSMintz, Yuval params.queue_id = p_queue->fw_tx_qid; 2452dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 24533da7a37aSMintz, Yuval params.stats_id = vf->abs_vf_id + 0x10; 2454f604b17dSMintz, Yuval 2455f604b17dSMintz, Yuval /* Since IGU index is passed via sb_info, construct a dummy one */ 2456f604b17dSMintz, Yuval memset(&sb_dummy, 0, sizeof(sb_dummy)); 2457f604b17dSMintz, Yuval sb_dummy.igu_sb_id = req->hw_sb; 2458f604b17dSMintz, Yuval params.p_sb = &sb_dummy; 2459dacd88d6SYuval Mintz params.sb_idx = req->sb_index; 2460dacd88d6SYuval Mintz 24613946497aSMintz, Yuval memset(&vf_params, 0, sizeof(vf_params)); 24623946497aSMintz, Yuval vf_params.vfid = vf->relative_vf_id; 24633946497aSMintz, Yuval vf_params.vf_qid = (u8)req->tx_qid; 24643b19f478SMintz, Yuval vf_params.vf_legacy = vf_legacy; 2465bbe3f233SMintz, Yuval vf_params.qid_usage_idx = qid_usage_idx; 24663946497aSMintz, Yuval 2467007bc371SMintz, Yuval p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, 2468007bc371SMintz, Yuval ¶ms, false, &vf_params); 2469007bc371SMintz, Yuval if (!p_cid) 24703da7a37aSMintz, Yuval goto out; 2471dacd88d6SYuval Mintz 2472b5a9ee7cSAriel Elior pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id); 2473007bc371SMintz, Yuval rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, 24743da7a37aSMintz, Yuval req->pbl_addr, req->pbl_size, pq); 247541086467SYuval Mintz if (rc) { 2476dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2477007bc371SMintz, Yuval qed_eth_queue_cid_release(p_hwfn, p_cid); 247841086467SYuval Mintz } else { 247941086467SYuval Mintz status = PFVF_STATUS_SUCCESS; 2480007bc371SMintz, Yuval p_queue->cids[qid_usage_idx].p_cid = p_cid; 2481007bc371SMintz, Yuval p_queue->cids[qid_usage_idx].b_is_tx = true; 2482007bc371SMintz, Yuval cid = p_cid->cid; 248341086467SYuval Mintz } 2484dacd88d6SYuval Mintz 248541086467SYuval Mintz out: 2486007bc371SMintz, Yuval qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status); 2487dacd88d6SYuval Mintz } 2488dacd88d6SYuval Mintz 2489dacd88d6SYuval Mintz static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, 2490dacd88d6SYuval Mintz struct qed_vf_info *vf, 2491007bc371SMintz, Yuval u16 rxq_id, 2492007bc371SMintz, Yuval u8 qid_usage_idx, bool cqe_completion) 2493dacd88d6SYuval Mintz { 2494007bc371SMintz, Yuval struct qed_vf_queue *p_queue; 2495dacd88d6SYuval Mintz int rc = 0; 2496dacd88d6SYuval Mintz 249708bc8f15SMintz, Yuval if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) { 24984c4fa793SMintz, Yuval DP_VERBOSE(p_hwfn, 24994c4fa793SMintz, Yuval QED_MSG_IOV, 250008bc8f15SMintz, Yuval "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n", 250108bc8f15SMintz, Yuval vf->relative_vf_id, rxq_id, qid_usage_idx); 2502dacd88d6SYuval Mintz return -EINVAL; 25034c4fa793SMintz, Yuval } 2504dacd88d6SYuval Mintz 25054c4fa793SMintz, Yuval p_queue = &vf->vf_queues[rxq_id]; 25063da7a37aSMintz, Yuval 250708bc8f15SMintz, Yuval /* We've validated the index and the existence of the active RXQ - 250808bc8f15SMintz, Yuval * now we need to make sure that it's using the correct qid. 250908bc8f15SMintz, Yuval */ 251008bc8f15SMintz, Yuval if (!p_queue->cids[qid_usage_idx].p_cid || 251108bc8f15SMintz, Yuval p_queue->cids[qid_usage_idx].b_is_tx) { 251208bc8f15SMintz, Yuval struct qed_queue_cid *p_cid; 251308bc8f15SMintz, Yuval 251408bc8f15SMintz, Yuval p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); 251508bc8f15SMintz, Yuval DP_VERBOSE(p_hwfn, 251608bc8f15SMintz, Yuval QED_MSG_IOV, 251708bc8f15SMintz, Yuval "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n", 251808bc8f15SMintz, Yuval vf->relative_vf_id, 251908bc8f15SMintz, Yuval rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx); 252008bc8f15SMintz, Yuval return -EINVAL; 252108bc8f15SMintz, Yuval } 252208bc8f15SMintz, Yuval 252308bc8f15SMintz, Yuval /* Now that we know we have a valid Rx-queue - close it */ 25243da7a37aSMintz, Yuval rc = qed_eth_rx_queue_stop(p_hwfn, 2525007bc371SMintz, Yuval p_queue->cids[qid_usage_idx].p_cid, 25263da7a37aSMintz, Yuval false, cqe_completion); 2527dacd88d6SYuval Mintz if (rc) 2528dacd88d6SYuval Mintz return rc; 25293da7a37aSMintz, Yuval 2530007bc371SMintz, Yuval p_queue->cids[qid_usage_idx].p_cid = NULL; 2531dacd88d6SYuval Mintz vf->num_active_rxqs--; 2532dacd88d6SYuval Mintz 25334c4fa793SMintz, Yuval return 0; 2534dacd88d6SYuval Mintz } 2535dacd88d6SYuval Mintz 2536dacd88d6SYuval Mintz static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, 2537007bc371SMintz, Yuval struct qed_vf_info *vf, 2538007bc371SMintz, Yuval u16 txq_id, u8 qid_usage_idx) 2539dacd88d6SYuval Mintz { 2540007bc371SMintz, Yuval struct qed_vf_queue *p_queue; 25414c4fa793SMintz, Yuval int rc = 0; 2542dacd88d6SYuval Mintz 254308bc8f15SMintz, Yuval if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA)) 2544dacd88d6SYuval Mintz return -EINVAL; 2545dacd88d6SYuval Mintz 25464c4fa793SMintz, Yuval p_queue = &vf->vf_queues[txq_id]; 254708bc8f15SMintz, Yuval if (!p_queue->cids[qid_usage_idx].p_cid || 254808bc8f15SMintz, Yuval !p_queue->cids[qid_usage_idx].b_is_tx) 254908bc8f15SMintz, Yuval return -EINVAL; 2550dacd88d6SYuval Mintz 2551007bc371SMintz, Yuval rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid); 2552dacd88d6SYuval Mintz if (rc) 2553dacd88d6SYuval Mintz return rc; 25543da7a37aSMintz, Yuval 2555007bc371SMintz, Yuval p_queue->cids[qid_usage_idx].p_cid = NULL; 25564c4fa793SMintz, Yuval return 0; 2557dacd88d6SYuval Mintz } 2558dacd88d6SYuval Mintz 2559dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, 2560dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2561dacd88d6SYuval Mintz struct qed_vf_info *vf) 2562dacd88d6SYuval Mintz { 2563dacd88d6SYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 2564dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 25654c4fa793SMintz, Yuval u8 status = PFVF_STATUS_FAILURE; 2566dacd88d6SYuval Mintz struct vfpf_stop_rxqs_tlv *req; 2567007bc371SMintz, Yuval u8 qid_usage_idx; 2568dacd88d6SYuval Mintz int rc; 2569dacd88d6SYuval Mintz 25704c4fa793SMintz, Yuval /* There has never been an official driver that used this interface 25714c4fa793SMintz, Yuval * for stopping multiple queues, and it is now considered deprecated. 25724c4fa793SMintz, Yuval * Validate this isn't used here. 2573dacd88d6SYuval Mintz */ 2574dacd88d6SYuval Mintz req = &mbx->req_virt->stop_rxqs; 25754c4fa793SMintz, Yuval if (req->num_rxqs != 1) { 25764c4fa793SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 25774c4fa793SMintz, Yuval "Odd; VF[%d] tried stopping multiple Rx queues\n", 25784c4fa793SMintz, Yuval vf->relative_vf_id); 25794c4fa793SMintz, Yuval status = PFVF_STATUS_NOT_SUPPORTED; 25804c4fa793SMintz, Yuval goto out; 25814c4fa793SMintz, Yuval } 2582dacd88d6SYuval Mintz 2583007bc371SMintz, Yuval /* Find which qid-index is associated with the queue */ 2584007bc371SMintz, Yuval qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); 258508bc8f15SMintz, Yuval if (qid_usage_idx == QED_IOV_QID_INVALID) 258608bc8f15SMintz, Yuval goto out; 2587007bc371SMintz, Yuval 25884c4fa793SMintz, Yuval rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, 2589007bc371SMintz, Yuval qid_usage_idx, req->cqe_completion); 25904c4fa793SMintz, Yuval if (!rc) 25914c4fa793SMintz, Yuval status = PFVF_STATUS_SUCCESS; 25924c4fa793SMintz, Yuval out: 2593dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, 2594dacd88d6SYuval Mintz length, status); 2595dacd88d6SYuval Mintz } 2596dacd88d6SYuval Mintz 2597dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, 2598dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2599dacd88d6SYuval Mintz struct qed_vf_info *vf) 2600dacd88d6SYuval Mintz { 2601dacd88d6SYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 2602dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 26034c4fa793SMintz, Yuval u8 status = PFVF_STATUS_FAILURE; 2604dacd88d6SYuval Mintz struct vfpf_stop_txqs_tlv *req; 2605007bc371SMintz, Yuval u8 qid_usage_idx; 2606dacd88d6SYuval Mintz int rc; 2607dacd88d6SYuval Mintz 26084c4fa793SMintz, Yuval /* There has never been an official driver that used this interface 26094c4fa793SMintz, Yuval * for stopping multiple queues, and it is now considered deprecated. 26104c4fa793SMintz, Yuval * Validate this isn't used here. 2611dacd88d6SYuval Mintz */ 2612dacd88d6SYuval Mintz req = &mbx->req_virt->stop_txqs; 26134c4fa793SMintz, Yuval if (req->num_txqs != 1) { 26144c4fa793SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 26154c4fa793SMintz, Yuval "Odd; VF[%d] tried stopping multiple Tx queues\n", 26164c4fa793SMintz, Yuval vf->relative_vf_id); 26174c4fa793SMintz, Yuval status = PFVF_STATUS_NOT_SUPPORTED; 26184c4fa793SMintz, Yuval goto out; 26194c4fa793SMintz, Yuval } 2620007bc371SMintz, Yuval 2621007bc371SMintz, Yuval /* Find which qid-index is associated with the queue */ 2622007bc371SMintz, Yuval qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); 262308bc8f15SMintz, Yuval if (qid_usage_idx == QED_IOV_QID_INVALID) 262408bc8f15SMintz, Yuval goto out; 2625007bc371SMintz, Yuval 2626007bc371SMintz, Yuval rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx); 26274c4fa793SMintz, Yuval if (!rc) 26284c4fa793SMintz, Yuval status = PFVF_STATUS_SUCCESS; 2629dacd88d6SYuval Mintz 26304c4fa793SMintz, Yuval out: 2631dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, 2632dacd88d6SYuval Mintz length, status); 2633dacd88d6SYuval Mintz } 2634dacd88d6SYuval Mintz 263517b235c1SYuval Mintz static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, 263617b235c1SYuval Mintz struct qed_ptt *p_ptt, 263717b235c1SYuval Mintz struct qed_vf_info *vf) 263817b235c1SYuval Mintz { 26393da7a37aSMintz, Yuval struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF]; 264017b235c1SYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 264117b235c1SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 264217b235c1SYuval Mintz struct vfpf_update_rxq_tlv *req; 26433da7a37aSMintz, Yuval u8 status = PFVF_STATUS_FAILURE; 264417b235c1SYuval Mintz u8 complete_event_flg; 264517b235c1SYuval Mintz u8 complete_cqe_flg; 2646007bc371SMintz, Yuval u8 qid_usage_idx; 264717b235c1SYuval Mintz int rc; 264817b235c1SYuval Mintz u8 i; 264917b235c1SYuval Mintz 265017b235c1SYuval Mintz req = &mbx->req_virt->update_rxq; 265117b235c1SYuval Mintz complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); 265217b235c1SYuval Mintz complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); 265317b235c1SYuval Mintz 2654007bc371SMintz, Yuval qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); 265508bc8f15SMintz, Yuval if (qid_usage_idx == QED_IOV_QID_INVALID) 26563da7a37aSMintz, Yuval goto out; 265708bc8f15SMintz, Yuval 265808bc8f15SMintz, Yuval /* There shouldn't exist a VF that uses queue-qids yet uses this 265908bc8f15SMintz, Yuval * API with multiple Rx queues. Validate this. 266008bc8f15SMintz, Yuval */ 266108bc8f15SMintz, Yuval if ((vf->acquire.vfdev_info.capabilities & 266208bc8f15SMintz, Yuval VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) { 266308bc8f15SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 266408bc8f15SMintz, Yuval "VF[%d] supports QIDs but sends multiple queues\n", 266508bc8f15SMintz, Yuval vf->relative_vf_id); 266608bc8f15SMintz, Yuval goto out; 266708bc8f15SMintz, Yuval } 266808bc8f15SMintz, Yuval 266908bc8f15SMintz, Yuval /* Validate inputs - for the legacy case this is still true since 267008bc8f15SMintz, Yuval * qid_usage_idx for each Rx queue would be LEGACY_QID_RX. 267108bc8f15SMintz, Yuval */ 267208bc8f15SMintz, Yuval for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) { 267308bc8f15SMintz, Yuval if (!qed_iov_validate_rxq(p_hwfn, vf, i, 267408bc8f15SMintz, Yuval QED_IOV_VALIDATE_Q_NA) || 267508bc8f15SMintz, Yuval !vf->vf_queues[i].cids[qid_usage_idx].p_cid || 267608bc8f15SMintz, Yuval vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) { 267708bc8f15SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 267808bc8f15SMintz, Yuval "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", 267908bc8f15SMintz, Yuval vf->relative_vf_id, req->rx_qid, 268008bc8f15SMintz, Yuval req->num_rxqs); 268108bc8f15SMintz, Yuval goto out; 268208bc8f15SMintz, Yuval } 268317b235c1SYuval Mintz } 268417b235c1SYuval Mintz 2685f109c240SMintz, Yuval /* Prepare the handlers */ 26863da7a37aSMintz, Yuval for (i = 0; i < req->num_rxqs; i++) { 2687007bc371SMintz, Yuval u16 qid = req->rx_qid + i; 2688007bc371SMintz, Yuval 2689007bc371SMintz, Yuval handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid; 26903da7a37aSMintz, Yuval } 26913da7a37aSMintz, Yuval 26923da7a37aSMintz, Yuval rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers, 26933da7a37aSMintz, Yuval req->num_rxqs, 269417b235c1SYuval Mintz complete_cqe_flg, 269517b235c1SYuval Mintz complete_event_flg, 269617b235c1SYuval Mintz QED_SPQ_MODE_EBLOCK, NULL); 26973da7a37aSMintz, Yuval if (rc) 26983da7a37aSMintz, Yuval goto out; 269917b235c1SYuval Mintz 27003da7a37aSMintz, Yuval status = PFVF_STATUS_SUCCESS; 27013da7a37aSMintz, Yuval out: 270217b235c1SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, 270317b235c1SYuval Mintz length, status); 270417b235c1SYuval Mintz } 270517b235c1SYuval Mintz 2706dacd88d6SYuval Mintz void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, 2707dacd88d6SYuval Mintz void *p_tlvs_list, u16 req_type) 2708dacd88d6SYuval Mintz { 2709dacd88d6SYuval Mintz struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; 2710dacd88d6SYuval Mintz int len = 0; 2711dacd88d6SYuval Mintz 2712dacd88d6SYuval Mintz do { 2713dacd88d6SYuval Mintz if (!p_tlv->length) { 2714dacd88d6SYuval Mintz DP_NOTICE(p_hwfn, "Zero length TLV found\n"); 2715dacd88d6SYuval Mintz return NULL; 2716dacd88d6SYuval Mintz } 2717dacd88d6SYuval Mintz 2718dacd88d6SYuval Mintz if (p_tlv->type == req_type) { 2719dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2720dacd88d6SYuval Mintz "Extended tlv type %d, length %d found\n", 2721dacd88d6SYuval Mintz p_tlv->type, p_tlv->length); 2722dacd88d6SYuval Mintz return p_tlv; 2723dacd88d6SYuval Mintz } 2724dacd88d6SYuval Mintz 2725dacd88d6SYuval Mintz len += p_tlv->length; 2726dacd88d6SYuval Mintz p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); 2727dacd88d6SYuval Mintz 2728dacd88d6SYuval Mintz if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { 2729dacd88d6SYuval Mintz DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n"); 2730dacd88d6SYuval Mintz return NULL; 2731dacd88d6SYuval Mintz } 2732dacd88d6SYuval Mintz } while (p_tlv->type != CHANNEL_TLV_LIST_END); 2733dacd88d6SYuval Mintz 2734dacd88d6SYuval Mintz return NULL; 2735dacd88d6SYuval Mintz } 2736dacd88d6SYuval Mintz 2737dacd88d6SYuval Mintz static void 2738dacd88d6SYuval Mintz qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, 2739dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 2740dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2741dacd88d6SYuval Mintz { 2742dacd88d6SYuval Mintz struct vfpf_vport_update_activate_tlv *p_act_tlv; 2743dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 2744dacd88d6SYuval Mintz 2745dacd88d6SYuval Mintz p_act_tlv = (struct vfpf_vport_update_activate_tlv *) 2746dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2747dacd88d6SYuval Mintz if (!p_act_tlv) 2748dacd88d6SYuval Mintz return; 2749dacd88d6SYuval Mintz 2750dacd88d6SYuval Mintz p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; 2751dacd88d6SYuval Mintz p_data->vport_active_rx_flg = p_act_tlv->active_rx; 2752dacd88d6SYuval Mintz p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; 2753dacd88d6SYuval Mintz p_data->vport_active_tx_flg = p_act_tlv->active_tx; 2754dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; 2755dacd88d6SYuval Mintz } 2756dacd88d6SYuval Mintz 2757dacd88d6SYuval Mintz static void 275817b235c1SYuval Mintz qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, 275917b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 276017b235c1SYuval Mintz struct qed_vf_info *p_vf, 276117b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 276217b235c1SYuval Mintz { 276317b235c1SYuval Mintz struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; 276417b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 276517b235c1SYuval Mintz 276617b235c1SYuval Mintz p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) 276717b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 276817b235c1SYuval Mintz if (!p_vlan_tlv) 276917b235c1SYuval Mintz return; 277017b235c1SYuval Mintz 277108feecd7SYuval Mintz p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; 277208feecd7SYuval Mintz 277308feecd7SYuval Mintz /* Ignore the VF request if we're forcing a vlan */ 27741a635e48SYuval Mintz if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { 277517b235c1SYuval Mintz p_data->update_inner_vlan_removal_flg = 1; 277617b235c1SYuval Mintz p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; 277708feecd7SYuval Mintz } 277817b235c1SYuval Mintz 277917b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; 278017b235c1SYuval Mintz } 278117b235c1SYuval Mintz 278217b235c1SYuval Mintz static void 278317b235c1SYuval Mintz qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn, 278417b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 278517b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 278617b235c1SYuval Mintz { 278717b235c1SYuval Mintz struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 278817b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 278917b235c1SYuval Mintz 279017b235c1SYuval Mintz p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) 279117b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 279217b235c1SYuval Mintz tlv); 279317b235c1SYuval Mintz if (!p_tx_switch_tlv) 279417b235c1SYuval Mintz return; 279517b235c1SYuval Mintz 279617b235c1SYuval Mintz p_data->update_tx_switching_flg = 1; 279717b235c1SYuval Mintz p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; 279817b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH; 279917b235c1SYuval Mintz } 280017b235c1SYuval Mintz 280117b235c1SYuval Mintz static void 2802dacd88d6SYuval Mintz qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, 2803dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 2804dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2805dacd88d6SYuval Mintz { 2806dacd88d6SYuval Mintz struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 2807dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; 2808dacd88d6SYuval Mintz 2809dacd88d6SYuval Mintz p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) 2810dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2811dacd88d6SYuval Mintz if (!p_mcast_tlv) 2812dacd88d6SYuval Mintz return; 2813dacd88d6SYuval Mintz 2814dacd88d6SYuval Mintz p_data->update_approx_mcast_flg = 1; 2815dacd88d6SYuval Mintz memcpy(p_data->bins, p_mcast_tlv->bins, 281625c020a9SSudarsana Reddy Kalluru sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); 2817dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; 2818dacd88d6SYuval Mintz } 2819dacd88d6SYuval Mintz 2820dacd88d6SYuval Mintz static void 2821dacd88d6SYuval Mintz qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, 2822dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 2823dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2824dacd88d6SYuval Mintz { 2825dacd88d6SYuval Mintz struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; 2826dacd88d6SYuval Mintz struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 2827dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 2828dacd88d6SYuval Mintz 2829dacd88d6SYuval Mintz p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) 2830dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2831dacd88d6SYuval Mintz if (!p_accept_tlv) 2832dacd88d6SYuval Mintz return; 2833dacd88d6SYuval Mintz 2834dacd88d6SYuval Mintz p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; 2835dacd88d6SYuval Mintz p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; 2836dacd88d6SYuval Mintz p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; 2837dacd88d6SYuval Mintz p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; 2838dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; 2839dacd88d6SYuval Mintz } 2840dacd88d6SYuval Mintz 2841dacd88d6SYuval Mintz static void 284217b235c1SYuval Mintz qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn, 284317b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 284417b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 284517b235c1SYuval Mintz { 284617b235c1SYuval Mintz struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; 284717b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 284817b235c1SYuval Mintz 284917b235c1SYuval Mintz p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) 285017b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 285117b235c1SYuval Mintz tlv); 285217b235c1SYuval Mintz if (!p_accept_any_vlan) 285317b235c1SYuval Mintz return; 285417b235c1SYuval Mintz 285517b235c1SYuval Mintz p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; 285617b235c1SYuval Mintz p_data->update_accept_any_vlan_flg = 285717b235c1SYuval Mintz p_accept_any_vlan->update_accept_any_vlan_flg; 285817b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; 285917b235c1SYuval Mintz } 286017b235c1SYuval Mintz 286117b235c1SYuval Mintz static void 2862dacd88d6SYuval Mintz qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, 2863dacd88d6SYuval Mintz struct qed_vf_info *vf, 2864dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 2865dacd88d6SYuval Mintz struct qed_rss_params *p_rss, 2866f29ffdb6SMintz, Yuval struct qed_iov_vf_mbx *p_mbx, 2867f29ffdb6SMintz, Yuval u16 *tlvs_mask, u16 *tlvs_accepted) 2868dacd88d6SYuval Mintz { 2869dacd88d6SYuval Mintz struct vfpf_vport_update_rss_tlv *p_rss_tlv; 2870dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; 2871f29ffdb6SMintz, Yuval bool b_reject = false; 2872dacd88d6SYuval Mintz u16 table_size; 2873f29ffdb6SMintz, Yuval u16 i, q_idx; 2874dacd88d6SYuval Mintz 2875dacd88d6SYuval Mintz p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) 2876dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2877dacd88d6SYuval Mintz if (!p_rss_tlv) { 2878dacd88d6SYuval Mintz p_data->rss_params = NULL; 2879dacd88d6SYuval Mintz return; 2880dacd88d6SYuval Mintz } 2881dacd88d6SYuval Mintz 2882dacd88d6SYuval Mintz memset(p_rss, 0, sizeof(struct qed_rss_params)); 2883dacd88d6SYuval Mintz 2884dacd88d6SYuval Mintz p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & 2885dacd88d6SYuval Mintz VFPF_UPDATE_RSS_CONFIG_FLAG); 2886dacd88d6SYuval Mintz p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & 2887dacd88d6SYuval Mintz VFPF_UPDATE_RSS_CAPS_FLAG); 2888dacd88d6SYuval Mintz p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & 2889dacd88d6SYuval Mintz VFPF_UPDATE_RSS_IND_TABLE_FLAG); 2890dacd88d6SYuval Mintz p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & 2891dacd88d6SYuval Mintz VFPF_UPDATE_RSS_KEY_FLAG); 2892dacd88d6SYuval Mintz 2893dacd88d6SYuval Mintz p_rss->rss_enable = p_rss_tlv->rss_enable; 2894dacd88d6SYuval Mintz p_rss->rss_eng_id = vf->relative_vf_id + 1; 2895dacd88d6SYuval Mintz p_rss->rss_caps = p_rss_tlv->rss_caps; 2896dacd88d6SYuval Mintz p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; 2897dacd88d6SYuval Mintz memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); 2898dacd88d6SYuval Mintz 2899dacd88d6SYuval Mintz table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), 2900dacd88d6SYuval Mintz (1 << p_rss_tlv->rss_table_size_log)); 2901dacd88d6SYuval Mintz 2902dacd88d6SYuval Mintz for (i = 0; i < table_size; i++) { 2903007bc371SMintz, Yuval struct qed_queue_cid *p_cid; 2904007bc371SMintz, Yuval 2905f29ffdb6SMintz, Yuval q_idx = p_rss_tlv->rss_ind_table[i]; 2906f109c240SMintz, Yuval if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx, 2907f109c240SMintz, Yuval QED_IOV_VALIDATE_Q_ENABLE)) { 2908f29ffdb6SMintz, Yuval DP_VERBOSE(p_hwfn, 2909f29ffdb6SMintz, Yuval QED_MSG_IOV, 2910f29ffdb6SMintz, Yuval "VF[%d]: Omitting RSS due to wrong queue %04x\n", 2911f29ffdb6SMintz, Yuval vf->relative_vf_id, q_idx); 2912f29ffdb6SMintz, Yuval b_reject = true; 2913f29ffdb6SMintz, Yuval goto out; 2914f29ffdb6SMintz, Yuval } 2915dacd88d6SYuval Mintz 2916007bc371SMintz, Yuval p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]); 2917007bc371SMintz, Yuval p_rss->rss_ind_table[i] = p_cid; 2918dacd88d6SYuval Mintz } 2919dacd88d6SYuval Mintz 2920dacd88d6SYuval Mintz p_data->rss_params = p_rss; 2921f29ffdb6SMintz, Yuval out: 2922dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; 2923f29ffdb6SMintz, Yuval if (!b_reject) 2924f29ffdb6SMintz, Yuval *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS; 2925dacd88d6SYuval Mintz } 2926dacd88d6SYuval Mintz 292717b235c1SYuval Mintz static void 292817b235c1SYuval Mintz qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, 292917b235c1SYuval Mintz struct qed_vf_info *vf, 293017b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 293117b235c1SYuval Mintz struct qed_sge_tpa_params *p_sge_tpa, 293217b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 293317b235c1SYuval Mintz { 293417b235c1SYuval Mintz struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; 293517b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 293617b235c1SYuval Mintz 293717b235c1SYuval Mintz p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) 293817b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 293917b235c1SYuval Mintz 294017b235c1SYuval Mintz if (!p_sge_tpa_tlv) { 294117b235c1SYuval Mintz p_data->sge_tpa_params = NULL; 294217b235c1SYuval Mintz return; 294317b235c1SYuval Mintz } 294417b235c1SYuval Mintz 294517b235c1SYuval Mintz memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params)); 294617b235c1SYuval Mintz 294717b235c1SYuval Mintz p_sge_tpa->update_tpa_en_flg = 294817b235c1SYuval Mintz !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); 294917b235c1SYuval Mintz p_sge_tpa->update_tpa_param_flg = 295017b235c1SYuval Mintz !!(p_sge_tpa_tlv->update_sge_tpa_flags & 295117b235c1SYuval Mintz VFPF_UPDATE_TPA_PARAM_FLAG); 295217b235c1SYuval Mintz 295317b235c1SYuval Mintz p_sge_tpa->tpa_ipv4_en_flg = 295417b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); 295517b235c1SYuval Mintz p_sge_tpa->tpa_ipv6_en_flg = 295617b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); 295717b235c1SYuval Mintz p_sge_tpa->tpa_pkt_split_flg = 295817b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); 295917b235c1SYuval Mintz p_sge_tpa->tpa_hdr_data_split_flg = 296017b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); 296117b235c1SYuval Mintz p_sge_tpa->tpa_gro_consistent_flg = 296217b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); 296317b235c1SYuval Mintz 296417b235c1SYuval Mintz p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; 296517b235c1SYuval Mintz p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; 296617b235c1SYuval Mintz p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; 296717b235c1SYuval Mintz p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; 296817b235c1SYuval Mintz p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; 296917b235c1SYuval Mintz 297017b235c1SYuval Mintz p_data->sge_tpa_params = p_sge_tpa; 297117b235c1SYuval Mintz 297217b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; 297317b235c1SYuval Mintz } 297417b235c1SYuval Mintz 2975f990c82cSMintz, Yuval static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, 2976f990c82cSMintz, Yuval u8 vfid, 2977f990c82cSMintz, Yuval struct qed_sp_vport_update_params *params, 2978f990c82cSMintz, Yuval u16 *tlvs) 2979f990c82cSMintz, Yuval { 2980f990c82cSMintz, Yuval u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; 2981f990c82cSMintz, Yuval struct qed_filter_accept_flags *flags = ¶ms->accept_flags; 2982f990c82cSMintz, Yuval struct qed_public_vf_info *vf_info; 2983f990c82cSMintz, Yuval 2984f990c82cSMintz, Yuval /* Untrusted VFs can't even be trusted to know that fact. 2985f990c82cSMintz, Yuval * Simply indicate everything is configured fine, and trace 2986f990c82cSMintz, Yuval * configuration 'behind their back'. 2987f990c82cSMintz, Yuval */ 2988f990c82cSMintz, Yuval if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM))) 2989f990c82cSMintz, Yuval return 0; 2990f990c82cSMintz, Yuval 2991f990c82cSMintz, Yuval vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 2992f990c82cSMintz, Yuval 2993f990c82cSMintz, Yuval if (flags->update_rx_mode_config) { 2994f990c82cSMintz, Yuval vf_info->rx_accept_mode = flags->rx_accept_filter; 2995f990c82cSMintz, Yuval if (!vf_info->is_trusted_configured) 2996f990c82cSMintz, Yuval flags->rx_accept_filter &= ~mask; 2997f990c82cSMintz, Yuval } 2998f990c82cSMintz, Yuval 2999f990c82cSMintz, Yuval if (flags->update_tx_mode_config) { 3000f990c82cSMintz, Yuval vf_info->tx_accept_mode = flags->tx_accept_filter; 3001f990c82cSMintz, Yuval if (!vf_info->is_trusted_configured) 3002f990c82cSMintz, Yuval flags->tx_accept_filter &= ~mask; 3003f990c82cSMintz, Yuval } 3004f990c82cSMintz, Yuval 3005f990c82cSMintz, Yuval return 0; 3006f990c82cSMintz, Yuval } 3007f990c82cSMintz, Yuval 3008dacd88d6SYuval Mintz static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, 3009dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 3010dacd88d6SYuval Mintz struct qed_vf_info *vf) 3011dacd88d6SYuval Mintz { 3012f29ffdb6SMintz, Yuval struct qed_rss_params *p_rss_params = NULL; 3013dacd88d6SYuval Mintz struct qed_sp_vport_update_params params; 3014dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 301517b235c1SYuval Mintz struct qed_sge_tpa_params sge_tpa_params; 3016f29ffdb6SMintz, Yuval u16 tlvs_mask = 0, tlvs_accepted = 0; 3017dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 3018dacd88d6SYuval Mintz u16 length; 3019dacd88d6SYuval Mintz int rc; 3020dacd88d6SYuval Mintz 302141086467SYuval Mintz /* Valiate PF can send such a request */ 302241086467SYuval Mintz if (!vf->vport_instance) { 302341086467SYuval Mintz DP_VERBOSE(p_hwfn, 302441086467SYuval Mintz QED_MSG_IOV, 302541086467SYuval Mintz "No VPORT instance available for VF[%d], failing vport update\n", 302641086467SYuval Mintz vf->abs_vf_id); 302741086467SYuval Mintz status = PFVF_STATUS_FAILURE; 302841086467SYuval Mintz goto out; 302941086467SYuval Mintz } 3030f29ffdb6SMintz, Yuval p_rss_params = vzalloc(sizeof(*p_rss_params)); 3031f29ffdb6SMintz, Yuval if (p_rss_params == NULL) { 3032f29ffdb6SMintz, Yuval status = PFVF_STATUS_FAILURE; 3033f29ffdb6SMintz, Yuval goto out; 3034f29ffdb6SMintz, Yuval } 303541086467SYuval Mintz 3036dacd88d6SYuval Mintz memset(¶ms, 0, sizeof(params)); 3037dacd88d6SYuval Mintz params.opaque_fid = vf->opaque_fid; 3038dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 3039dacd88d6SYuval Mintz params.rss_params = NULL; 3040dacd88d6SYuval Mintz 3041dacd88d6SYuval Mintz /* Search for extended tlvs list and update values 3042dacd88d6SYuval Mintz * from VF in struct qed_sp_vport_update_params. 3043dacd88d6SYuval Mintz */ 3044dacd88d6SYuval Mintz qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 304517b235c1SYuval Mintz qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); 304617b235c1SYuval Mintz qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); 3047dacd88d6SYuval Mintz qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 3048dacd88d6SYuval Mintz qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); 304917b235c1SYuval Mintz qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); 305017b235c1SYuval Mintz qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, 305117b235c1SYuval Mintz &sge_tpa_params, mbx, &tlvs_mask); 3052dacd88d6SYuval Mintz 3053f29ffdb6SMintz, Yuval tlvs_accepted = tlvs_mask; 3054f29ffdb6SMintz, Yuval 3055f29ffdb6SMintz, Yuval /* Some of the extended TLVs need to be validated first; In that case, 3056f29ffdb6SMintz, Yuval * they can update the mask without updating the accepted [so that 3057f29ffdb6SMintz, Yuval * PF could communicate to VF it has rejected request]. 3058dacd88d6SYuval Mintz */ 3059f29ffdb6SMintz, Yuval qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params, 3060f29ffdb6SMintz, Yuval mbx, &tlvs_mask, &tlvs_accepted); 3061f29ffdb6SMintz, Yuval 3062f990c82cSMintz, Yuval if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id, 3063f990c82cSMintz, Yuval ¶ms, &tlvs_accepted)) { 3064f990c82cSMintz, Yuval tlvs_accepted = 0; 3065f990c82cSMintz, Yuval status = PFVF_STATUS_NOT_SUPPORTED; 3066f990c82cSMintz, Yuval goto out; 3067f990c82cSMintz, Yuval } 3068f990c82cSMintz, Yuval 3069f29ffdb6SMintz, Yuval if (!tlvs_accepted) { 3070f29ffdb6SMintz, Yuval if (tlvs_mask) 3071f29ffdb6SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3072f29ffdb6SMintz, Yuval "Upper-layer prevents VF vport configuration\n"); 3073f29ffdb6SMintz, Yuval else 3074f29ffdb6SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3075dacd88d6SYuval Mintz "No feature tlvs found for vport update\n"); 3076dacd88d6SYuval Mintz status = PFVF_STATUS_NOT_SUPPORTED; 3077dacd88d6SYuval Mintz goto out; 3078dacd88d6SYuval Mintz } 3079dacd88d6SYuval Mintz 3080dacd88d6SYuval Mintz rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); 3081dacd88d6SYuval Mintz 3082dacd88d6SYuval Mintz if (rc) 3083dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 3084dacd88d6SYuval Mintz 3085dacd88d6SYuval Mintz out: 3086f29ffdb6SMintz, Yuval vfree(p_rss_params); 3087dacd88d6SYuval Mintz length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, 3088f29ffdb6SMintz, Yuval tlvs_mask, tlvs_accepted); 3089dacd88d6SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); 3090dacd88d6SYuval Mintz } 3091dacd88d6SYuval Mintz 30928246d0b4SYuval Mintz static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn, 309308feecd7SYuval Mintz struct qed_vf_info *p_vf, 309408feecd7SYuval Mintz struct qed_filter_ucast *p_params) 309508feecd7SYuval Mintz { 309608feecd7SYuval Mintz int i; 309708feecd7SYuval Mintz 309808feecd7SYuval Mintz /* First remove entries and then add new ones */ 309908feecd7SYuval Mintz if (p_params->opcode == QED_FILTER_REMOVE) { 310008feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 310108feecd7SYuval Mintz if (p_vf->shadow_config.vlans[i].used && 310208feecd7SYuval Mintz p_vf->shadow_config.vlans[i].vid == 310308feecd7SYuval Mintz p_params->vlan) { 310408feecd7SYuval Mintz p_vf->shadow_config.vlans[i].used = false; 310508feecd7SYuval Mintz break; 310608feecd7SYuval Mintz } 310708feecd7SYuval Mintz if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { 310808feecd7SYuval Mintz DP_VERBOSE(p_hwfn, 310908feecd7SYuval Mintz QED_MSG_IOV, 311008feecd7SYuval Mintz "VF [%d] - Tries to remove a non-existing vlan\n", 311108feecd7SYuval Mintz p_vf->relative_vf_id); 311208feecd7SYuval Mintz return -EINVAL; 311308feecd7SYuval Mintz } 311408feecd7SYuval Mintz } else if (p_params->opcode == QED_FILTER_REPLACE || 311508feecd7SYuval Mintz p_params->opcode == QED_FILTER_FLUSH) { 311608feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 311708feecd7SYuval Mintz p_vf->shadow_config.vlans[i].used = false; 311808feecd7SYuval Mintz } 311908feecd7SYuval Mintz 312008feecd7SYuval Mintz /* In forced mode, we're willing to remove entries - but we don't add 312108feecd7SYuval Mintz * new ones. 312208feecd7SYuval Mintz */ 31231a635e48SYuval Mintz if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) 312408feecd7SYuval Mintz return 0; 312508feecd7SYuval Mintz 312608feecd7SYuval Mintz if (p_params->opcode == QED_FILTER_ADD || 312708feecd7SYuval Mintz p_params->opcode == QED_FILTER_REPLACE) { 312808feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 312908feecd7SYuval Mintz if (p_vf->shadow_config.vlans[i].used) 313008feecd7SYuval Mintz continue; 313108feecd7SYuval Mintz 313208feecd7SYuval Mintz p_vf->shadow_config.vlans[i].used = true; 313308feecd7SYuval Mintz p_vf->shadow_config.vlans[i].vid = p_params->vlan; 313408feecd7SYuval Mintz break; 313508feecd7SYuval Mintz } 313608feecd7SYuval Mintz 313708feecd7SYuval Mintz if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { 313808feecd7SYuval Mintz DP_VERBOSE(p_hwfn, 313908feecd7SYuval Mintz QED_MSG_IOV, 314008feecd7SYuval Mintz "VF [%d] - Tries to configure more than %d vlan filters\n", 314108feecd7SYuval Mintz p_vf->relative_vf_id, 314208feecd7SYuval Mintz QED_ETH_VF_NUM_VLAN_FILTERS + 1); 314308feecd7SYuval Mintz return -EINVAL; 314408feecd7SYuval Mintz } 314508feecd7SYuval Mintz } 314608feecd7SYuval Mintz 314708feecd7SYuval Mintz return 0; 314808feecd7SYuval Mintz } 314908feecd7SYuval Mintz 31508246d0b4SYuval Mintz static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, 31518246d0b4SYuval Mintz struct qed_vf_info *p_vf, 31528246d0b4SYuval Mintz struct qed_filter_ucast *p_params) 31538246d0b4SYuval Mintz { 31548246d0b4SYuval Mintz int i; 31558246d0b4SYuval Mintz 31568246d0b4SYuval Mintz /* If we're in forced-mode, we don't allow any change */ 31571a635e48SYuval Mintz if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) 31588246d0b4SYuval Mintz return 0; 31598246d0b4SYuval Mintz 31607425d822SShahed Shaikh /* Don't keep track of shadow copy since we don't intend to restore. */ 31617425d822SShahed Shaikh if (p_vf->p_vf_info.is_trusted_configured) 31627425d822SShahed Shaikh return 0; 31637425d822SShahed Shaikh 31648246d0b4SYuval Mintz /* First remove entries and then add new ones */ 31658246d0b4SYuval Mintz if (p_params->opcode == QED_FILTER_REMOVE) { 31668246d0b4SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 31678246d0b4SYuval Mintz if (ether_addr_equal(p_vf->shadow_config.macs[i], 31688246d0b4SYuval Mintz p_params->mac)) { 31690ee28e31SShyam Saini eth_zero_addr(p_vf->shadow_config.macs[i]); 31708246d0b4SYuval Mintz break; 31718246d0b4SYuval Mintz } 31728246d0b4SYuval Mintz } 31738246d0b4SYuval Mintz 31748246d0b4SYuval Mintz if (i == QED_ETH_VF_NUM_MAC_FILTERS) { 31758246d0b4SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 31768246d0b4SYuval Mintz "MAC isn't configured\n"); 31778246d0b4SYuval Mintz return -EINVAL; 31788246d0b4SYuval Mintz } 31798246d0b4SYuval Mintz } else if (p_params->opcode == QED_FILTER_REPLACE || 31808246d0b4SYuval Mintz p_params->opcode == QED_FILTER_FLUSH) { 31818246d0b4SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) 31820ee28e31SShyam Saini eth_zero_addr(p_vf->shadow_config.macs[i]); 31838246d0b4SYuval Mintz } 31848246d0b4SYuval Mintz 31858246d0b4SYuval Mintz /* List the new MAC address */ 31868246d0b4SYuval Mintz if (p_params->opcode != QED_FILTER_ADD && 31878246d0b4SYuval Mintz p_params->opcode != QED_FILTER_REPLACE) 31888246d0b4SYuval Mintz return 0; 31898246d0b4SYuval Mintz 31908246d0b4SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 31918246d0b4SYuval Mintz if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) { 31928246d0b4SYuval Mintz ether_addr_copy(p_vf->shadow_config.macs[i], 31938246d0b4SYuval Mintz p_params->mac); 31948246d0b4SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 31958246d0b4SYuval Mintz "Added MAC at %d entry in shadow\n", i); 31968246d0b4SYuval Mintz break; 31978246d0b4SYuval Mintz } 31988246d0b4SYuval Mintz } 31998246d0b4SYuval Mintz 32008246d0b4SYuval Mintz if (i == QED_ETH_VF_NUM_MAC_FILTERS) { 32018246d0b4SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n"); 32028246d0b4SYuval Mintz return -EINVAL; 32038246d0b4SYuval Mintz } 32048246d0b4SYuval Mintz 32058246d0b4SYuval Mintz return 0; 32068246d0b4SYuval Mintz } 32078246d0b4SYuval Mintz 32088246d0b4SYuval Mintz static int 32098246d0b4SYuval Mintz qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, 32108246d0b4SYuval Mintz struct qed_vf_info *p_vf, 32118246d0b4SYuval Mintz struct qed_filter_ucast *p_params) 32128246d0b4SYuval Mintz { 32138246d0b4SYuval Mintz int rc = 0; 32148246d0b4SYuval Mintz 32158246d0b4SYuval Mintz if (p_params->type == QED_FILTER_MAC) { 32168246d0b4SYuval Mintz rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); 32178246d0b4SYuval Mintz if (rc) 32188246d0b4SYuval Mintz return rc; 32198246d0b4SYuval Mintz } 32208246d0b4SYuval Mintz 32218246d0b4SYuval Mintz if (p_params->type == QED_FILTER_VLAN) 32228246d0b4SYuval Mintz rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); 32238246d0b4SYuval Mintz 32248246d0b4SYuval Mintz return rc; 32258246d0b4SYuval Mintz } 32268246d0b4SYuval Mintz 3227ba56947aSBaoyou Xie static int qed_iov_chk_ucast(struct qed_hwfn *hwfn, 3228dacd88d6SYuval Mintz int vfid, struct qed_filter_ucast *params) 3229dacd88d6SYuval Mintz { 3230dacd88d6SYuval Mintz struct qed_public_vf_info *vf; 3231dacd88d6SYuval Mintz 3232dacd88d6SYuval Mintz vf = qed_iov_get_public_vf_info(hwfn, vfid, true); 3233dacd88d6SYuval Mintz if (!vf) 3234dacd88d6SYuval Mintz return -EINVAL; 3235dacd88d6SYuval Mintz 3236dacd88d6SYuval Mintz /* No real decision to make; Store the configured MAC */ 3237dacd88d6SYuval Mintz if (params->type == QED_FILTER_MAC || 32387425d822SShahed Shaikh params->type == QED_FILTER_MAC_VLAN) { 3239dacd88d6SYuval Mintz ether_addr_copy(vf->mac, params->mac); 3240dacd88d6SYuval Mintz 32417425d822SShahed Shaikh if (vf->is_trusted_configured) { 32427425d822SShahed Shaikh qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid); 32437425d822SShahed Shaikh 32447425d822SShahed Shaikh /* Update and post bulleitin again */ 32457425d822SShahed Shaikh qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 32467425d822SShahed Shaikh } 32477425d822SShahed Shaikh } 32487425d822SShahed Shaikh 3249dacd88d6SYuval Mintz return 0; 3250dacd88d6SYuval Mintz } 3251dacd88d6SYuval Mintz 3252dacd88d6SYuval Mintz static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, 3253dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 3254dacd88d6SYuval Mintz struct qed_vf_info *vf) 3255dacd88d6SYuval Mintz { 325608feecd7SYuval Mintz struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt; 3257dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 3258dacd88d6SYuval Mintz struct vfpf_ucast_filter_tlv *req; 3259dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 3260dacd88d6SYuval Mintz struct qed_filter_ucast params; 3261dacd88d6SYuval Mintz int rc; 3262dacd88d6SYuval Mintz 3263dacd88d6SYuval Mintz /* Prepare the unicast filter params */ 3264dacd88d6SYuval Mintz memset(¶ms, 0, sizeof(struct qed_filter_ucast)); 3265dacd88d6SYuval Mintz req = &mbx->req_virt->ucast_filter; 3266dacd88d6SYuval Mintz params.opcode = (enum qed_filter_opcode)req->opcode; 3267dacd88d6SYuval Mintz params.type = (enum qed_filter_ucast_type)req->type; 3268dacd88d6SYuval Mintz 3269dacd88d6SYuval Mintz params.is_rx_filter = 1; 3270dacd88d6SYuval Mintz params.is_tx_filter = 1; 3271dacd88d6SYuval Mintz params.vport_to_remove_from = vf->vport_id; 3272dacd88d6SYuval Mintz params.vport_to_add_to = vf->vport_id; 3273dacd88d6SYuval Mintz memcpy(params.mac, req->mac, ETH_ALEN); 3274dacd88d6SYuval Mintz params.vlan = req->vlan; 3275dacd88d6SYuval Mintz 3276dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, 3277dacd88d6SYuval Mintz QED_MSG_IOV, 3278dacd88d6SYuval Mintz "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n", 3279dacd88d6SYuval Mintz vf->abs_vf_id, params.opcode, params.type, 3280dacd88d6SYuval Mintz params.is_rx_filter ? "RX" : "", 3281dacd88d6SYuval Mintz params.is_tx_filter ? "TX" : "", 3282dacd88d6SYuval Mintz params.vport_to_add_to, 3283dacd88d6SYuval Mintz params.mac[0], params.mac[1], 3284dacd88d6SYuval Mintz params.mac[2], params.mac[3], 3285dacd88d6SYuval Mintz params.mac[4], params.mac[5], params.vlan); 3286dacd88d6SYuval Mintz 3287dacd88d6SYuval Mintz if (!vf->vport_instance) { 3288dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, 3289dacd88d6SYuval Mintz QED_MSG_IOV, 3290dacd88d6SYuval Mintz "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", 3291dacd88d6SYuval Mintz vf->abs_vf_id); 3292dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 3293dacd88d6SYuval Mintz goto out; 3294dacd88d6SYuval Mintz } 3295dacd88d6SYuval Mintz 329608feecd7SYuval Mintz /* Update shadow copy of the VF configuration */ 329708feecd7SYuval Mintz if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) { 329808feecd7SYuval Mintz status = PFVF_STATUS_FAILURE; 329908feecd7SYuval Mintz goto out; 330008feecd7SYuval Mintz } 330108feecd7SYuval Mintz 330208feecd7SYuval Mintz /* Determine if the unicast filtering is acceptible by PF */ 33031a635e48SYuval Mintz if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) && 330408feecd7SYuval Mintz (params.type == QED_FILTER_VLAN || 330508feecd7SYuval Mintz params.type == QED_FILTER_MAC_VLAN)) { 330608feecd7SYuval Mintz /* Once VLAN is forced or PVID is set, do not allow 330708feecd7SYuval Mintz * to add/replace any further VLANs. 330808feecd7SYuval Mintz */ 330908feecd7SYuval Mintz if (params.opcode == QED_FILTER_ADD || 331008feecd7SYuval Mintz params.opcode == QED_FILTER_REPLACE) 331108feecd7SYuval Mintz status = PFVF_STATUS_FORCED; 331208feecd7SYuval Mintz goto out; 331308feecd7SYuval Mintz } 331408feecd7SYuval Mintz 33151a635e48SYuval Mintz if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) && 3316eff16960SYuval Mintz (params.type == QED_FILTER_MAC || 3317eff16960SYuval Mintz params.type == QED_FILTER_MAC_VLAN)) { 3318eff16960SYuval Mintz if (!ether_addr_equal(p_bulletin->mac, params.mac) || 3319eff16960SYuval Mintz (params.opcode != QED_FILTER_ADD && 3320eff16960SYuval Mintz params.opcode != QED_FILTER_REPLACE)) 3321eff16960SYuval Mintz status = PFVF_STATUS_FORCED; 3322eff16960SYuval Mintz goto out; 3323eff16960SYuval Mintz } 3324eff16960SYuval Mintz 3325dacd88d6SYuval Mintz rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms); 3326dacd88d6SYuval Mintz if (rc) { 3327dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 3328dacd88d6SYuval Mintz goto out; 3329dacd88d6SYuval Mintz } 3330dacd88d6SYuval Mintz 3331dacd88d6SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, 3332dacd88d6SYuval Mintz QED_SPQ_MODE_CB, NULL); 3333dacd88d6SYuval Mintz if (rc) 3334dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 3335dacd88d6SYuval Mintz 3336dacd88d6SYuval Mintz out: 3337dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, 3338dacd88d6SYuval Mintz sizeof(struct pfvf_def_resp_tlv), status); 3339dacd88d6SYuval Mintz } 3340dacd88d6SYuval Mintz 33410b55e27dSYuval Mintz static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, 33420b55e27dSYuval Mintz struct qed_ptt *p_ptt, 33430b55e27dSYuval Mintz struct qed_vf_info *vf) 33440b55e27dSYuval Mintz { 33450b55e27dSYuval Mintz int i; 33460b55e27dSYuval Mintz 33470b55e27dSYuval Mintz /* Reset the SBs */ 33480b55e27dSYuval Mintz for (i = 0; i < vf->num_sbs; i++) 33490b55e27dSYuval Mintz qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 33500b55e27dSYuval Mintz vf->igu_sbs[i], 33510b55e27dSYuval Mintz vf->opaque_fid, false); 33520b55e27dSYuval Mintz 33530b55e27dSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, 33540b55e27dSYuval Mintz sizeof(struct pfvf_def_resp_tlv), 33550b55e27dSYuval Mintz PFVF_STATUS_SUCCESS); 33560b55e27dSYuval Mintz } 33570b55e27dSYuval Mintz 33580b55e27dSYuval Mintz static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn, 33590b55e27dSYuval Mintz struct qed_ptt *p_ptt, struct qed_vf_info *vf) 33600b55e27dSYuval Mintz { 33610b55e27dSYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 33620b55e27dSYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 33630b55e27dSYuval Mintz 33640b55e27dSYuval Mintz /* Disable Interrupts for VF */ 33650b55e27dSYuval Mintz qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 33660b55e27dSYuval Mintz 33670b55e27dSYuval Mintz /* Reset Permission table */ 33680b55e27dSYuval Mintz qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 33690b55e27dSYuval Mintz 33700b55e27dSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, 33710b55e27dSYuval Mintz length, status); 33720b55e27dSYuval Mintz } 33730b55e27dSYuval Mintz 33740b55e27dSYuval Mintz static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, 33750b55e27dSYuval Mintz struct qed_ptt *p_ptt, 33760b55e27dSYuval Mintz struct qed_vf_info *p_vf) 33770b55e27dSYuval Mintz { 33780b55e27dSYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 33791fe614d1SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 33801fe614d1SYuval Mintz int rc = 0; 33810b55e27dSYuval Mintz 33820b55e27dSYuval Mintz qed_iov_vf_cleanup(p_hwfn, p_vf); 33830b55e27dSYuval Mintz 33841fe614d1SYuval Mintz if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { 33851fe614d1SYuval Mintz /* Stopping the VF */ 33861fe614d1SYuval Mintz rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, 33871fe614d1SYuval Mintz p_vf->opaque_fid); 33881fe614d1SYuval Mintz 33891fe614d1SYuval Mintz if (rc) { 33901fe614d1SYuval Mintz DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", 33911fe614d1SYuval Mintz rc); 33921fe614d1SYuval Mintz status = PFVF_STATUS_FAILURE; 33931fe614d1SYuval Mintz } 33941fe614d1SYuval Mintz 33951fe614d1SYuval Mintz p_vf->state = VF_STOPPED; 33961fe614d1SYuval Mintz } 33971fe614d1SYuval Mintz 33980b55e27dSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, 33991fe614d1SYuval Mintz length, status); 34000b55e27dSYuval Mintz } 34010b55e27dSYuval Mintz 3402bf5a94bfSRahul Verma static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, 3403bf5a94bfSRahul Verma struct qed_ptt *p_ptt, 3404bf5a94bfSRahul Verma struct qed_vf_info *p_vf) 3405bf5a94bfSRahul Verma { 3406bf5a94bfSRahul Verma struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 3407bf5a94bfSRahul Verma struct pfvf_read_coal_resp_tlv *p_resp; 3408bf5a94bfSRahul Verma struct vfpf_read_coal_req_tlv *req; 3409bf5a94bfSRahul Verma u8 status = PFVF_STATUS_FAILURE; 3410bf5a94bfSRahul Verma struct qed_vf_queue *p_queue; 3411bf5a94bfSRahul Verma struct qed_queue_cid *p_cid; 3412bf5a94bfSRahul Verma u16 coal = 0, qid, i; 3413bf5a94bfSRahul Verma bool b_is_rx; 3414bf5a94bfSRahul Verma int rc = 0; 3415bf5a94bfSRahul Verma 3416bf5a94bfSRahul Verma mbx->offset = (u8 *)mbx->reply_virt; 3417bf5a94bfSRahul Verma req = &mbx->req_virt->read_coal_req; 3418bf5a94bfSRahul Verma 3419bf5a94bfSRahul Verma qid = req->qid; 3420bf5a94bfSRahul Verma b_is_rx = req->is_rx ? true : false; 3421bf5a94bfSRahul Verma 3422bf5a94bfSRahul Verma if (b_is_rx) { 3423bf5a94bfSRahul Verma if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid, 3424bf5a94bfSRahul Verma QED_IOV_VALIDATE_Q_ENABLE)) { 3425bf5a94bfSRahul Verma DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3426bf5a94bfSRahul Verma "VF[%d]: Invalid Rx queue_id = %d\n", 3427bf5a94bfSRahul Verma p_vf->abs_vf_id, qid); 3428bf5a94bfSRahul Verma goto send_resp; 3429bf5a94bfSRahul Verma } 3430bf5a94bfSRahul Verma 3431bf5a94bfSRahul Verma p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]); 3432bf5a94bfSRahul Verma rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal); 3433bf5a94bfSRahul Verma if (rc) 3434bf5a94bfSRahul Verma goto send_resp; 3435bf5a94bfSRahul Verma } else { 3436bf5a94bfSRahul Verma if (!qed_iov_validate_txq(p_hwfn, p_vf, qid, 3437bf5a94bfSRahul Verma QED_IOV_VALIDATE_Q_ENABLE)) { 3438bf5a94bfSRahul Verma DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3439bf5a94bfSRahul Verma "VF[%d]: Invalid Tx queue_id = %d\n", 3440bf5a94bfSRahul Verma p_vf->abs_vf_id, qid); 3441bf5a94bfSRahul Verma goto send_resp; 3442bf5a94bfSRahul Verma } 3443bf5a94bfSRahul Verma for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 3444bf5a94bfSRahul Verma p_queue = &p_vf->vf_queues[qid]; 3445bf5a94bfSRahul Verma if ((!p_queue->cids[i].p_cid) || 3446bf5a94bfSRahul Verma (!p_queue->cids[i].b_is_tx)) 3447bf5a94bfSRahul Verma continue; 3448bf5a94bfSRahul Verma 3449bf5a94bfSRahul Verma p_cid = p_queue->cids[i].p_cid; 3450bf5a94bfSRahul Verma 3451bf5a94bfSRahul Verma rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal); 3452bf5a94bfSRahul Verma if (rc) 3453bf5a94bfSRahul Verma goto send_resp; 3454bf5a94bfSRahul Verma break; 3455bf5a94bfSRahul Verma } 3456bf5a94bfSRahul Verma } 3457bf5a94bfSRahul Verma 3458bf5a94bfSRahul Verma status = PFVF_STATUS_SUCCESS; 3459bf5a94bfSRahul Verma 3460bf5a94bfSRahul Verma send_resp: 3461bf5a94bfSRahul Verma p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ, 3462bf5a94bfSRahul Verma sizeof(*p_resp)); 3463bf5a94bfSRahul Verma p_resp->coal = coal; 3464bf5a94bfSRahul Verma 3465bf5a94bfSRahul Verma qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 3466bf5a94bfSRahul Verma sizeof(struct channel_list_end_tlv)); 3467bf5a94bfSRahul Verma 3468bf5a94bfSRahul Verma qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); 3469bf5a94bfSRahul Verma } 3470bf5a94bfSRahul Verma 3471477f2d14SRahul Verma static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, 3472477f2d14SRahul Verma struct qed_ptt *p_ptt, 3473477f2d14SRahul Verma struct qed_vf_info *vf) 3474477f2d14SRahul Verma { 3475477f2d14SRahul Verma struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 3476477f2d14SRahul Verma struct vfpf_update_coalesce *req; 3477477f2d14SRahul Verma u8 status = PFVF_STATUS_FAILURE; 3478477f2d14SRahul Verma struct qed_queue_cid *p_cid; 3479477f2d14SRahul Verma u16 rx_coal, tx_coal; 3480477f2d14SRahul Verma int rc = 0, i; 3481477f2d14SRahul Verma u16 qid; 3482477f2d14SRahul Verma 3483477f2d14SRahul Verma req = &mbx->req_virt->update_coalesce; 3484477f2d14SRahul Verma 3485477f2d14SRahul Verma rx_coal = req->rx_coal; 3486477f2d14SRahul Verma tx_coal = req->tx_coal; 3487477f2d14SRahul Verma qid = req->qid; 3488477f2d14SRahul Verma 3489477f2d14SRahul Verma if (!qed_iov_validate_rxq(p_hwfn, vf, qid, 3490477f2d14SRahul Verma QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) { 3491477f2d14SRahul Verma DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3492477f2d14SRahul Verma "VF[%d]: Invalid Rx queue_id = %d\n", 3493477f2d14SRahul Verma vf->abs_vf_id, qid); 3494477f2d14SRahul Verma goto out; 3495477f2d14SRahul Verma } 3496477f2d14SRahul Verma 3497477f2d14SRahul Verma if (!qed_iov_validate_txq(p_hwfn, vf, qid, 3498477f2d14SRahul Verma QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) { 3499477f2d14SRahul Verma DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3500477f2d14SRahul Verma "VF[%d]: Invalid Tx queue_id = %d\n", 3501477f2d14SRahul Verma vf->abs_vf_id, qid); 3502477f2d14SRahul Verma goto out; 3503477f2d14SRahul Verma } 3504477f2d14SRahul Verma 3505477f2d14SRahul Verma DP_VERBOSE(p_hwfn, 3506477f2d14SRahul Verma QED_MSG_IOV, 3507477f2d14SRahul Verma "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n", 3508477f2d14SRahul Verma vf->abs_vf_id, rx_coal, tx_coal, qid); 3509477f2d14SRahul Verma 3510477f2d14SRahul Verma if (rx_coal) { 3511477f2d14SRahul Verma p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); 3512477f2d14SRahul Verma 3513477f2d14SRahul Verma rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); 3514477f2d14SRahul Verma if (rc) { 3515477f2d14SRahul Verma DP_VERBOSE(p_hwfn, 3516477f2d14SRahul Verma QED_MSG_IOV, 3517477f2d14SRahul Verma "VF[%d]: Unable to set rx queue = %d coalesce\n", 3518477f2d14SRahul Verma vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); 3519477f2d14SRahul Verma goto out; 3520477f2d14SRahul Verma } 3521bf5a94bfSRahul Verma vf->rx_coal = rx_coal; 3522477f2d14SRahul Verma } 3523477f2d14SRahul Verma 3524477f2d14SRahul Verma if (tx_coal) { 3525477f2d14SRahul Verma struct qed_vf_queue *p_queue = &vf->vf_queues[qid]; 3526477f2d14SRahul Verma 3527477f2d14SRahul Verma for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 3528477f2d14SRahul Verma if (!p_queue->cids[i].p_cid) 3529477f2d14SRahul Verma continue; 3530477f2d14SRahul Verma 3531477f2d14SRahul Verma if (!p_queue->cids[i].b_is_tx) 3532477f2d14SRahul Verma continue; 3533477f2d14SRahul Verma 3534477f2d14SRahul Verma rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, 3535477f2d14SRahul Verma p_queue->cids[i].p_cid); 3536477f2d14SRahul Verma 3537477f2d14SRahul Verma if (rc) { 3538477f2d14SRahul Verma DP_VERBOSE(p_hwfn, 3539477f2d14SRahul Verma QED_MSG_IOV, 3540477f2d14SRahul Verma "VF[%d]: Unable to set tx queue coalesce\n", 3541477f2d14SRahul Verma vf->abs_vf_id); 3542477f2d14SRahul Verma goto out; 3543477f2d14SRahul Verma } 3544477f2d14SRahul Verma } 3545bf5a94bfSRahul Verma vf->tx_coal = tx_coal; 3546477f2d14SRahul Verma } 3547477f2d14SRahul Verma 3548477f2d14SRahul Verma status = PFVF_STATUS_SUCCESS; 3549477f2d14SRahul Verma out: 3550477f2d14SRahul Verma qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, 3551477f2d14SRahul Verma sizeof(struct pfvf_def_resp_tlv), status); 3552477f2d14SRahul Verma } 35530b55e27dSYuval Mintz static int 35540b55e27dSYuval Mintz qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, 35550b55e27dSYuval Mintz struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 35560b55e27dSYuval Mintz { 35570b55e27dSYuval Mintz int cnt; 35580b55e27dSYuval Mintz u32 val; 35590b55e27dSYuval Mintz 35600b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); 35610b55e27dSYuval Mintz 35620b55e27dSYuval Mintz for (cnt = 0; cnt < 50; cnt++) { 35630b55e27dSYuval Mintz val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); 35640b55e27dSYuval Mintz if (!val) 35650b55e27dSYuval Mintz break; 35660b55e27dSYuval Mintz msleep(20); 35670b55e27dSYuval Mintz } 35680b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 35690b55e27dSYuval Mintz 35700b55e27dSYuval Mintz if (cnt == 50) { 35710b55e27dSYuval Mintz DP_ERR(p_hwfn, 35720b55e27dSYuval Mintz "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", 35730b55e27dSYuval Mintz p_vf->abs_vf_id, val); 35740b55e27dSYuval Mintz return -EBUSY; 35750b55e27dSYuval Mintz } 35760b55e27dSYuval Mintz 35770b55e27dSYuval Mintz return 0; 35780b55e27dSYuval Mintz } 35790b55e27dSYuval Mintz 35800b55e27dSYuval Mintz static int 35810b55e27dSYuval Mintz qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, 35820b55e27dSYuval Mintz struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 35830b55e27dSYuval Mintz { 358421dd79e8STomer Tayar u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4]; 35850b55e27dSYuval Mintz int i, cnt; 35860b55e27dSYuval Mintz 35870b55e27dSYuval Mintz /* Read initial consumers & producers */ 358821dd79e8STomer Tayar for (i = 0; i < MAX_NUM_VOQS_E4; i++) { 35890b55e27dSYuval Mintz u32 prod; 35900b55e27dSYuval Mintz 35910b55e27dSYuval Mintz cons[i] = qed_rd(p_hwfn, p_ptt, 35920b55e27dSYuval Mintz PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 35930b55e27dSYuval Mintz i * 0x40); 35940b55e27dSYuval Mintz prod = qed_rd(p_hwfn, p_ptt, 35950b55e27dSYuval Mintz PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + 35960b55e27dSYuval Mintz i * 0x40); 35970b55e27dSYuval Mintz distance[i] = prod - cons[i]; 35980b55e27dSYuval Mintz } 35990b55e27dSYuval Mintz 36000b55e27dSYuval Mintz /* Wait for consumers to pass the producers */ 36010b55e27dSYuval Mintz i = 0; 36020b55e27dSYuval Mintz for (cnt = 0; cnt < 50; cnt++) { 360321dd79e8STomer Tayar for (; i < MAX_NUM_VOQS_E4; i++) { 36040b55e27dSYuval Mintz u32 tmp; 36050b55e27dSYuval Mintz 36060b55e27dSYuval Mintz tmp = qed_rd(p_hwfn, p_ptt, 36070b55e27dSYuval Mintz PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 36080b55e27dSYuval Mintz i * 0x40); 36090b55e27dSYuval Mintz if (distance[i] > tmp - cons[i]) 36100b55e27dSYuval Mintz break; 36110b55e27dSYuval Mintz } 36120b55e27dSYuval Mintz 361321dd79e8STomer Tayar if (i == MAX_NUM_VOQS_E4) 36140b55e27dSYuval Mintz break; 36150b55e27dSYuval Mintz 36160b55e27dSYuval Mintz msleep(20); 36170b55e27dSYuval Mintz } 36180b55e27dSYuval Mintz 36190b55e27dSYuval Mintz if (cnt == 50) { 36200b55e27dSYuval Mintz DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", 36210b55e27dSYuval Mintz p_vf->abs_vf_id, i); 36220b55e27dSYuval Mintz return -EBUSY; 36230b55e27dSYuval Mintz } 36240b55e27dSYuval Mintz 36250b55e27dSYuval Mintz return 0; 36260b55e27dSYuval Mintz } 36270b55e27dSYuval Mintz 36280b55e27dSYuval Mintz static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn, 36290b55e27dSYuval Mintz struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 36300b55e27dSYuval Mintz { 36310b55e27dSYuval Mintz int rc; 36320b55e27dSYuval Mintz 36330b55e27dSYuval Mintz rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); 36340b55e27dSYuval Mintz if (rc) 36350b55e27dSYuval Mintz return rc; 36360b55e27dSYuval Mintz 36370b55e27dSYuval Mintz rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); 36380b55e27dSYuval Mintz if (rc) 36390b55e27dSYuval Mintz return rc; 36400b55e27dSYuval Mintz 36410b55e27dSYuval Mintz return 0; 36420b55e27dSYuval Mintz } 36430b55e27dSYuval Mintz 36440b55e27dSYuval Mintz static int 36450b55e27dSYuval Mintz qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, 36460b55e27dSYuval Mintz struct qed_ptt *p_ptt, 36470b55e27dSYuval Mintz u16 rel_vf_id, u32 *ack_vfs) 36480b55e27dSYuval Mintz { 36490b55e27dSYuval Mintz struct qed_vf_info *p_vf; 36500b55e27dSYuval Mintz int rc = 0; 36510b55e27dSYuval Mintz 36520b55e27dSYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 36530b55e27dSYuval Mintz if (!p_vf) 36540b55e27dSYuval Mintz return 0; 36550b55e27dSYuval Mintz 36560b55e27dSYuval Mintz if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & 36570b55e27dSYuval Mintz (1ULL << (rel_vf_id % 64))) { 36580b55e27dSYuval Mintz u16 vfid = p_vf->abs_vf_id; 36590b55e27dSYuval Mintz 36600b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 36610b55e27dSYuval Mintz "VF[%d] - Handling FLR\n", vfid); 36620b55e27dSYuval Mintz 36630b55e27dSYuval Mintz qed_iov_vf_cleanup(p_hwfn, p_vf); 36640b55e27dSYuval Mintz 36650b55e27dSYuval Mintz /* If VF isn't active, no need for anything but SW */ 36660b55e27dSYuval Mintz if (!p_vf->b_init) 36670b55e27dSYuval Mintz goto cleanup; 36680b55e27dSYuval Mintz 36690b55e27dSYuval Mintz rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); 36700b55e27dSYuval Mintz if (rc) 36710b55e27dSYuval Mintz goto cleanup; 36720b55e27dSYuval Mintz 36730b55e27dSYuval Mintz rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true); 36740b55e27dSYuval Mintz if (rc) { 36750b55e27dSYuval Mintz DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); 36760b55e27dSYuval Mintz return rc; 36770b55e27dSYuval Mintz } 36780b55e27dSYuval Mintz 36797eff82b0SYuval Mintz /* Workaround to make VF-PF channel ready, as FW 36807eff82b0SYuval Mintz * doesn't do that as a part of FLR. 36817eff82b0SYuval Mintz */ 36827eff82b0SYuval Mintz REG_WR(p_hwfn, 36837eff82b0SYuval Mintz GTT_BAR0_MAP_REG_USDM_RAM + 36847eff82b0SYuval Mintz USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1); 36857eff82b0SYuval Mintz 36860b55e27dSYuval Mintz /* VF_STOPPED has to be set only after final cleanup 36870b55e27dSYuval Mintz * but prior to re-enabling the VF. 36880b55e27dSYuval Mintz */ 36890b55e27dSYuval Mintz p_vf->state = VF_STOPPED; 36900b55e27dSYuval Mintz 36910b55e27dSYuval Mintz rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); 36920b55e27dSYuval Mintz if (rc) { 36930b55e27dSYuval Mintz DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", 36940b55e27dSYuval Mintz vfid); 36950b55e27dSYuval Mintz return rc; 36960b55e27dSYuval Mintz } 36970b55e27dSYuval Mintz cleanup: 36980b55e27dSYuval Mintz /* Mark VF for ack and clean pending state */ 36990b55e27dSYuval Mintz if (p_vf->state == VF_RESET) 37000b55e27dSYuval Mintz p_vf->state = VF_STOPPED; 37011a635e48SYuval Mintz ack_vfs[vfid / 32] |= BIT((vfid % 32)); 37020b55e27dSYuval Mintz p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= 37030b55e27dSYuval Mintz ~(1ULL << (rel_vf_id % 64)); 3704fd3c615aSMintz, Yuval p_vf->vf_mbx.b_pending_msg = false; 37050b55e27dSYuval Mintz } 37060b55e27dSYuval Mintz 37070b55e27dSYuval Mintz return rc; 37080b55e27dSYuval Mintz } 37090b55e27dSYuval Mintz 3710ba56947aSBaoyou Xie static int 3711ba56947aSBaoyou Xie qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 37120b55e27dSYuval Mintz { 37130b55e27dSYuval Mintz u32 ack_vfs[VF_MAX_STATIC / 32]; 37140b55e27dSYuval Mintz int rc = 0; 37150b55e27dSYuval Mintz u16 i; 37160b55e27dSYuval Mintz 37170b55e27dSYuval Mintz memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); 37180b55e27dSYuval Mintz 37190b55e27dSYuval Mintz /* Since BRB <-> PRS interface can't be tested as part of the flr 37200b55e27dSYuval Mintz * polling due to HW limitations, simply sleep a bit. And since 37210b55e27dSYuval Mintz * there's no need to wait per-vf, do it before looping. 37220b55e27dSYuval Mintz */ 37230b55e27dSYuval Mintz msleep(100); 37240b55e27dSYuval Mintz 37250b55e27dSYuval Mintz for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) 37260b55e27dSYuval Mintz qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); 37270b55e27dSYuval Mintz 37280b55e27dSYuval Mintz rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); 37290b55e27dSYuval Mintz return rc; 37300b55e27dSYuval Mintz } 37310b55e27dSYuval Mintz 3732cccf6f5cSMintz, Yuval bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) 37330b55e27dSYuval Mintz { 3734cccf6f5cSMintz, Yuval bool found = false; 3735cccf6f5cSMintz, Yuval u16 i; 37360b55e27dSYuval Mintz 37370b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); 37380b55e27dSYuval Mintz for (i = 0; i < (VF_MAX_STATIC / 32); i++) 37390b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 37400b55e27dSYuval Mintz "[%08x,...,%08x]: %08x\n", 37410b55e27dSYuval Mintz i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); 37420b55e27dSYuval Mintz 37430b55e27dSYuval Mintz if (!p_hwfn->cdev->p_iov_info) { 37440b55e27dSYuval Mintz DP_NOTICE(p_hwfn, "VF flr but no IOV\n"); 3745cccf6f5cSMintz, Yuval return false; 37460b55e27dSYuval Mintz } 37470b55e27dSYuval Mintz 37480b55e27dSYuval Mintz /* Mark VFs */ 37490b55e27dSYuval Mintz for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { 37500b55e27dSYuval Mintz struct qed_vf_info *p_vf; 37510b55e27dSYuval Mintz u8 vfid; 37520b55e27dSYuval Mintz 37530b55e27dSYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, i, false); 37540b55e27dSYuval Mintz if (!p_vf) 37550b55e27dSYuval Mintz continue; 37560b55e27dSYuval Mintz 37570b55e27dSYuval Mintz vfid = p_vf->abs_vf_id; 37581a635e48SYuval Mintz if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) { 37590b55e27dSYuval Mintz u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; 37600b55e27dSYuval Mintz u16 rel_vf_id = p_vf->relative_vf_id; 37610b55e27dSYuval Mintz 37620b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 37630b55e27dSYuval Mintz "VF[%d] [rel %d] got FLR-ed\n", 37640b55e27dSYuval Mintz vfid, rel_vf_id); 37650b55e27dSYuval Mintz 37660b55e27dSYuval Mintz p_vf->state = VF_RESET; 37670b55e27dSYuval Mintz 37680b55e27dSYuval Mintz /* No need to lock here, since pending_flr should 37690b55e27dSYuval Mintz * only change here and before ACKing MFw. Since 37700b55e27dSYuval Mintz * MFW will not trigger an additional attention for 37710b55e27dSYuval Mintz * VF flr until ACKs, we're safe. 37720b55e27dSYuval Mintz */ 37730b55e27dSYuval Mintz p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); 3774cccf6f5cSMintz, Yuval found = true; 37750b55e27dSYuval Mintz } 37760b55e27dSYuval Mintz } 37770b55e27dSYuval Mintz 37780b55e27dSYuval Mintz return found; 37790b55e27dSYuval Mintz } 37800b55e27dSYuval Mintz 378173390ac9SYuval Mintz static void qed_iov_get_link(struct qed_hwfn *p_hwfn, 378273390ac9SYuval Mintz u16 vfid, 378373390ac9SYuval Mintz struct qed_mcp_link_params *p_params, 378473390ac9SYuval Mintz struct qed_mcp_link_state *p_link, 378573390ac9SYuval Mintz struct qed_mcp_link_capabilities *p_caps) 378673390ac9SYuval Mintz { 378773390ac9SYuval Mintz struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 378873390ac9SYuval Mintz vfid, 378973390ac9SYuval Mintz false); 379073390ac9SYuval Mintz struct qed_bulletin_content *p_bulletin; 379173390ac9SYuval Mintz 379273390ac9SYuval Mintz if (!p_vf) 379373390ac9SYuval Mintz return; 379473390ac9SYuval Mintz 379573390ac9SYuval Mintz p_bulletin = p_vf->bulletin.p_virt; 379673390ac9SYuval Mintz 379773390ac9SYuval Mintz if (p_params) 379873390ac9SYuval Mintz __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin); 379973390ac9SYuval Mintz if (p_link) 380073390ac9SYuval Mintz __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); 380173390ac9SYuval Mintz if (p_caps) 380273390ac9SYuval Mintz __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); 380373390ac9SYuval Mintz } 380473390ac9SYuval Mintz 3805809c45a0SShahed Shaikh static int 3806809c45a0SShahed Shaikh qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, 3807809c45a0SShahed Shaikh struct qed_ptt *p_ptt, 3808809c45a0SShahed Shaikh struct qed_vf_info *p_vf) 3809809c45a0SShahed Shaikh { 3810809c45a0SShahed Shaikh struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt; 3811809c45a0SShahed Shaikh struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 3812809c45a0SShahed Shaikh struct vfpf_bulletin_update_mac_tlv *p_req; 3813809c45a0SShahed Shaikh u8 status = PFVF_STATUS_SUCCESS; 3814809c45a0SShahed Shaikh int rc = 0; 3815809c45a0SShahed Shaikh 3816809c45a0SShahed Shaikh if (!p_vf->p_vf_info.is_trusted_configured) { 3817809c45a0SShahed Shaikh DP_VERBOSE(p_hwfn, 3818809c45a0SShahed Shaikh QED_MSG_IOV, 3819809c45a0SShahed Shaikh "Blocking bulletin update request from untrusted VF[%d]\n", 3820809c45a0SShahed Shaikh p_vf->abs_vf_id); 3821809c45a0SShahed Shaikh status = PFVF_STATUS_NOT_SUPPORTED; 3822809c45a0SShahed Shaikh rc = -EINVAL; 3823809c45a0SShahed Shaikh goto send_status; 3824809c45a0SShahed Shaikh } 3825809c45a0SShahed Shaikh 3826809c45a0SShahed Shaikh p_req = &mbx->req_virt->bulletin_update_mac; 3827809c45a0SShahed Shaikh ether_addr_copy(p_bulletin->mac, p_req->mac); 3828809c45a0SShahed Shaikh DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3829809c45a0SShahed Shaikh "Updated bulletin of VF[%d] with requested MAC[%pM]\n", 3830809c45a0SShahed Shaikh p_vf->abs_vf_id, p_req->mac); 3831809c45a0SShahed Shaikh 3832809c45a0SShahed Shaikh send_status: 3833809c45a0SShahed Shaikh qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 3834809c45a0SShahed Shaikh CHANNEL_TLV_BULLETIN_UPDATE_MAC, 3835809c45a0SShahed Shaikh sizeof(struct pfvf_def_resp_tlv), status); 3836809c45a0SShahed Shaikh return rc; 3837809c45a0SShahed Shaikh } 3838809c45a0SShahed Shaikh 383937bff2b9SYuval Mintz static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, 384037bff2b9SYuval Mintz struct qed_ptt *p_ptt, int vfid) 384137bff2b9SYuval Mintz { 384237bff2b9SYuval Mintz struct qed_iov_vf_mbx *mbx; 384337bff2b9SYuval Mintz struct qed_vf_info *p_vf; 384437bff2b9SYuval Mintz 384537bff2b9SYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 384637bff2b9SYuval Mintz if (!p_vf) 384737bff2b9SYuval Mintz return; 384837bff2b9SYuval Mintz 384937bff2b9SYuval Mintz mbx = &p_vf->vf_mbx; 385037bff2b9SYuval Mintz 385137bff2b9SYuval Mintz /* qed_iov_process_mbx_request */ 3852fd3c615aSMintz, Yuval if (!mbx->b_pending_msg) { 3853fd3c615aSMintz, Yuval DP_NOTICE(p_hwfn, 3854fd3c615aSMintz, Yuval "VF[%02x]: Trying to process mailbox message when none is pending\n", 3855fd3c615aSMintz, Yuval p_vf->abs_vf_id); 3856fd3c615aSMintz, Yuval return; 3857fd3c615aSMintz, Yuval } 3858fd3c615aSMintz, Yuval mbx->b_pending_msg = false; 385937bff2b9SYuval Mintz 386037bff2b9SYuval Mintz mbx->first_tlv = mbx->req_virt->first_tlv; 386137bff2b9SYuval Mintz 3862fd3c615aSMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3863fd3c615aSMintz, Yuval "VF[%02x]: Processing mailbox message [type %04x]\n", 3864fd3c615aSMintz, Yuval p_vf->abs_vf_id, mbx->first_tlv.tl.type); 3865fd3c615aSMintz, Yuval 386637bff2b9SYuval Mintz /* check if tlv type is known */ 38677eff82b0SYuval Mintz if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && 38687eff82b0SYuval Mintz !p_vf->b_malicious) { 38691408cc1fSYuval Mintz switch (mbx->first_tlv.tl.type) { 38701408cc1fSYuval Mintz case CHANNEL_TLV_ACQUIRE: 38711408cc1fSYuval Mintz qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); 38721408cc1fSYuval Mintz break; 3873dacd88d6SYuval Mintz case CHANNEL_TLV_VPORT_START: 3874dacd88d6SYuval Mintz qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); 3875dacd88d6SYuval Mintz break; 3876dacd88d6SYuval Mintz case CHANNEL_TLV_VPORT_TEARDOWN: 3877dacd88d6SYuval Mintz qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); 3878dacd88d6SYuval Mintz break; 3879dacd88d6SYuval Mintz case CHANNEL_TLV_START_RXQ: 3880dacd88d6SYuval Mintz qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); 3881dacd88d6SYuval Mintz break; 3882dacd88d6SYuval Mintz case CHANNEL_TLV_START_TXQ: 3883dacd88d6SYuval Mintz qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); 3884dacd88d6SYuval Mintz break; 3885dacd88d6SYuval Mintz case CHANNEL_TLV_STOP_RXQS: 3886dacd88d6SYuval Mintz qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); 3887dacd88d6SYuval Mintz break; 3888dacd88d6SYuval Mintz case CHANNEL_TLV_STOP_TXQS: 3889dacd88d6SYuval Mintz qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); 3890dacd88d6SYuval Mintz break; 389117b235c1SYuval Mintz case CHANNEL_TLV_UPDATE_RXQ: 389217b235c1SYuval Mintz qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); 389317b235c1SYuval Mintz break; 3894dacd88d6SYuval Mintz case CHANNEL_TLV_VPORT_UPDATE: 3895dacd88d6SYuval Mintz qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); 3896dacd88d6SYuval Mintz break; 3897dacd88d6SYuval Mintz case CHANNEL_TLV_UCAST_FILTER: 3898dacd88d6SYuval Mintz qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); 3899dacd88d6SYuval Mintz break; 39000b55e27dSYuval Mintz case CHANNEL_TLV_CLOSE: 39010b55e27dSYuval Mintz qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); 39020b55e27dSYuval Mintz break; 39030b55e27dSYuval Mintz case CHANNEL_TLV_INT_CLEANUP: 39040b55e27dSYuval Mintz qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); 39050b55e27dSYuval Mintz break; 39060b55e27dSYuval Mintz case CHANNEL_TLV_RELEASE: 39070b55e27dSYuval Mintz qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); 39080b55e27dSYuval Mintz break; 3909eaf3c0c6SChopra, Manish case CHANNEL_TLV_UPDATE_TUNN_PARAM: 3910eaf3c0c6SChopra, Manish qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); 3911eaf3c0c6SChopra, Manish break; 3912477f2d14SRahul Verma case CHANNEL_TLV_COALESCE_UPDATE: 3913477f2d14SRahul Verma qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf); 3914477f2d14SRahul Verma break; 3915bf5a94bfSRahul Verma case CHANNEL_TLV_COALESCE_READ: 3916bf5a94bfSRahul Verma qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); 3917bf5a94bfSRahul Verma break; 3918809c45a0SShahed Shaikh case CHANNEL_TLV_BULLETIN_UPDATE_MAC: 3919809c45a0SShahed Shaikh qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf); 3920809c45a0SShahed Shaikh break; 39211408cc1fSYuval Mintz } 39227eff82b0SYuval Mintz } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { 39237eff82b0SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 39247eff82b0SYuval Mintz "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n", 39257eff82b0SYuval Mintz p_vf->abs_vf_id, mbx->first_tlv.tl.type); 39267eff82b0SYuval Mintz 39277eff82b0SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 39287eff82b0SYuval Mintz mbx->first_tlv.tl.type, 39297eff82b0SYuval Mintz sizeof(struct pfvf_def_resp_tlv), 39307eff82b0SYuval Mintz PFVF_STATUS_MALICIOUS); 393137bff2b9SYuval Mintz } else { 393237bff2b9SYuval Mintz /* unknown TLV - this may belong to a VF driver from the future 393337bff2b9SYuval Mintz * - a version written after this PF driver was written, which 393437bff2b9SYuval Mintz * supports features unknown as of yet. Too bad since we don't 393537bff2b9SYuval Mintz * support them. Or this may be because someone wrote a crappy 393637bff2b9SYuval Mintz * VF driver and is sending garbage over the channel. 393737bff2b9SYuval Mintz */ 393854fdd80fSYuval Mintz DP_NOTICE(p_hwfn, 393954fdd80fSYuval Mintz "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n", 394054fdd80fSYuval Mintz p_vf->abs_vf_id, 394154fdd80fSYuval Mintz mbx->first_tlv.tl.type, 394254fdd80fSYuval Mintz mbx->first_tlv.tl.length, 394354fdd80fSYuval Mintz mbx->first_tlv.padding, mbx->first_tlv.reply_address); 394437bff2b9SYuval Mintz 394554fdd80fSYuval Mintz /* Try replying in case reply address matches the acquisition's 394654fdd80fSYuval Mintz * posted address. 394754fdd80fSYuval Mintz */ 394854fdd80fSYuval Mintz if (p_vf->acquire.first_tlv.reply_address && 394954fdd80fSYuval Mintz (mbx->first_tlv.reply_address == 395054fdd80fSYuval Mintz p_vf->acquire.first_tlv.reply_address)) { 395154fdd80fSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 395254fdd80fSYuval Mintz mbx->first_tlv.tl.type, 395354fdd80fSYuval Mintz sizeof(struct pfvf_def_resp_tlv), 395454fdd80fSYuval Mintz PFVF_STATUS_NOT_SUPPORTED); 395554fdd80fSYuval Mintz } else { 395637bff2b9SYuval Mintz DP_VERBOSE(p_hwfn, 395737bff2b9SYuval Mintz QED_MSG_IOV, 395854fdd80fSYuval Mintz "VF[%02x]: Can't respond to TLV - no valid reply address\n", 395954fdd80fSYuval Mintz p_vf->abs_vf_id); 396037bff2b9SYuval Mintz } 396137bff2b9SYuval Mintz } 396237bff2b9SYuval Mintz } 396337bff2b9SYuval Mintz 3964bf774d14SYueHaibing static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) 396537bff2b9SYuval Mintz { 3966fd3c615aSMintz, Yuval int i; 396737bff2b9SYuval Mintz 3968fd3c615aSMintz, Yuval memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); 3969fd3c615aSMintz, Yuval 3970fd3c615aSMintz, Yuval qed_for_each_vf(p_hwfn, i) { 3971fd3c615aSMintz, Yuval struct qed_vf_info *p_vf; 3972fd3c615aSMintz, Yuval 3973fd3c615aSMintz, Yuval p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; 3974fd3c615aSMintz, Yuval if (p_vf->vf_mbx.b_pending_msg) 3975fd3c615aSMintz, Yuval events[i / 64] |= 1ULL << (i % 64); 397637bff2b9SYuval Mintz } 397737bff2b9SYuval Mintz } 397837bff2b9SYuval Mintz 39797eff82b0SYuval Mintz static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, 39807eff82b0SYuval Mintz u16 abs_vfid) 39817eff82b0SYuval Mintz { 39827eff82b0SYuval Mintz u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf; 39837eff82b0SYuval Mintz 39847eff82b0SYuval Mintz if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { 39857eff82b0SYuval Mintz DP_VERBOSE(p_hwfn, 39867eff82b0SYuval Mintz QED_MSG_IOV, 39877eff82b0SYuval Mintz "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n", 39887eff82b0SYuval Mintz abs_vfid); 39897eff82b0SYuval Mintz return NULL; 39907eff82b0SYuval Mintz } 39917eff82b0SYuval Mintz 39927eff82b0SYuval Mintz return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min]; 39937eff82b0SYuval Mintz } 39947eff82b0SYuval Mintz 399537bff2b9SYuval Mintz static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, 399637bff2b9SYuval Mintz u16 abs_vfid, struct regpair *vf_msg) 399737bff2b9SYuval Mintz { 39987eff82b0SYuval Mintz struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn, 399937bff2b9SYuval Mintz abs_vfid); 40007eff82b0SYuval Mintz 40017eff82b0SYuval Mintz if (!p_vf) 400237bff2b9SYuval Mintz return 0; 400337bff2b9SYuval Mintz 400437bff2b9SYuval Mintz /* List the physical address of the request so that handler 400537bff2b9SYuval Mintz * could later on copy the message from it. 400637bff2b9SYuval Mintz */ 400737bff2b9SYuval Mintz p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; 400837bff2b9SYuval Mintz 400937bff2b9SYuval Mintz /* Mark the event and schedule the workqueue */ 4010fd3c615aSMintz, Yuval p_vf->vf_mbx.b_pending_msg = true; 401137bff2b9SYuval Mintz qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); 401237bff2b9SYuval Mintz 401337bff2b9SYuval Mintz return 0; 401437bff2b9SYuval Mintz } 401537bff2b9SYuval Mintz 40167eff82b0SYuval Mintz static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, 40177eff82b0SYuval Mintz struct malicious_vf_eqe_data *p_data) 40187eff82b0SYuval Mintz { 40197eff82b0SYuval Mintz struct qed_vf_info *p_vf; 40207eff82b0SYuval Mintz 40217eff82b0SYuval Mintz p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); 40227eff82b0SYuval Mintz 40237eff82b0SYuval Mintz if (!p_vf) 40247eff82b0SYuval Mintz return; 40257eff82b0SYuval Mintz 4026e99a21cbSMintz, Yuval if (!p_vf->b_malicious) { 4027e99a21cbSMintz, Yuval DP_NOTICE(p_hwfn, 40287eff82b0SYuval Mintz "VF [%d] - Malicious behavior [%02x]\n", 40297eff82b0SYuval Mintz p_vf->abs_vf_id, p_data->err_id); 40307eff82b0SYuval Mintz 40317eff82b0SYuval Mintz p_vf->b_malicious = true; 4032e99a21cbSMintz, Yuval } else { 4033e99a21cbSMintz, Yuval DP_INFO(p_hwfn, 4034e99a21cbSMintz, Yuval "VF [%d] - Malicious behavior [%02x]\n", 4035e99a21cbSMintz, Yuval p_vf->abs_vf_id, p_data->err_id); 4036e99a21cbSMintz, Yuval } 40377eff82b0SYuval Mintz } 40387eff82b0SYuval Mintz 40396c9e80eaSMichal Kalderon static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 40406c9e80eaSMichal Kalderon u8 opcode, 40416c9e80eaSMichal Kalderon __le16 echo, 40426c9e80eaSMichal Kalderon union event_ring_data *data, u8 fw_return_code) 404337bff2b9SYuval Mintz { 404437bff2b9SYuval Mintz switch (opcode) { 404537bff2b9SYuval Mintz case COMMON_EVENT_VF_PF_CHANNEL: 404637bff2b9SYuval Mintz return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), 404737bff2b9SYuval Mintz &data->vf_pf_channel.msg_addr); 40487eff82b0SYuval Mintz case COMMON_EVENT_MALICIOUS_VF: 40497eff82b0SYuval Mintz qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf); 40507eff82b0SYuval Mintz return 0; 405137bff2b9SYuval Mintz default: 405237bff2b9SYuval Mintz DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", 405337bff2b9SYuval Mintz opcode); 405437bff2b9SYuval Mintz return -EINVAL; 405537bff2b9SYuval Mintz } 405637bff2b9SYuval Mintz } 405737bff2b9SYuval Mintz 405832a47e72SYuval Mintz u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 405932a47e72SYuval Mintz { 406032a47e72SYuval Mintz struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 406132a47e72SYuval Mintz u16 i; 406232a47e72SYuval Mintz 406332a47e72SYuval Mintz if (!p_iov) 406432a47e72SYuval Mintz goto out; 406532a47e72SYuval Mintz 406632a47e72SYuval Mintz for (i = rel_vf_id; i < p_iov->total_vfs; i++) 40677eff82b0SYuval Mintz if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false)) 406832a47e72SYuval Mintz return i; 406932a47e72SYuval Mintz 407032a47e72SYuval Mintz out: 407132a47e72SYuval Mintz return MAX_NUM_VFS; 407232a47e72SYuval Mintz } 407337bff2b9SYuval Mintz 407437bff2b9SYuval Mintz static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, 407537bff2b9SYuval Mintz int vfid) 407637bff2b9SYuval Mintz { 407737bff2b9SYuval Mintz struct qed_dmae_params params; 407837bff2b9SYuval Mintz struct qed_vf_info *vf_info; 407937bff2b9SYuval Mintz 408037bff2b9SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 408137bff2b9SYuval Mintz if (!vf_info) 408237bff2b9SYuval Mintz return -EINVAL; 408337bff2b9SYuval Mintz 4084804c5702SMichal Kalderon memset(¶ms, 0, sizeof(params)); 4085804c5702SMichal Kalderon SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1); 4086804c5702SMichal Kalderon SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1); 408737bff2b9SYuval Mintz params.src_vfid = vf_info->abs_vf_id; 408837bff2b9SYuval Mintz 408937bff2b9SYuval Mintz if (qed_dmae_host2host(p_hwfn, ptt, 409037bff2b9SYuval Mintz vf_info->vf_mbx.pending_req, 409137bff2b9SYuval Mintz vf_info->vf_mbx.req_phys, 409237bff2b9SYuval Mintz sizeof(union vfpf_tlvs) / 4, ¶ms)) { 409337bff2b9SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 409437bff2b9SYuval Mintz "Failed to copy message from VF 0x%02x\n", vfid); 409537bff2b9SYuval Mintz 409637bff2b9SYuval Mintz return -EIO; 409737bff2b9SYuval Mintz } 409837bff2b9SYuval Mintz 409937bff2b9SYuval Mintz return 0; 410037bff2b9SYuval Mintz } 410137bff2b9SYuval Mintz 4102eff16960SYuval Mintz static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, 4103eff16960SYuval Mintz u8 *mac, int vfid) 4104eff16960SYuval Mintz { 4105eff16960SYuval Mintz struct qed_vf_info *vf_info; 4106eff16960SYuval Mintz u64 feature; 4107eff16960SYuval Mintz 4108eff16960SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4109eff16960SYuval Mintz if (!vf_info) { 4110eff16960SYuval Mintz DP_NOTICE(p_hwfn->cdev, 4111eff16960SYuval Mintz "Can not set forced MAC, invalid vfid [%d]\n", vfid); 4112eff16960SYuval Mintz return; 4113eff16960SYuval Mintz } 4114eff16960SYuval Mintz 41157eff82b0SYuval Mintz if (vf_info->b_malicious) { 41167eff82b0SYuval Mintz DP_NOTICE(p_hwfn->cdev, 41177eff82b0SYuval Mintz "Can't set forced MAC to malicious VF [%d]\n", vfid); 41187eff82b0SYuval Mintz return; 41197eff82b0SYuval Mintz } 41207eff82b0SYuval Mintz 41217425d822SShahed Shaikh if (vf_info->p_vf_info.is_trusted_configured) { 41227425d822SShahed Shaikh feature = BIT(VFPF_BULLETIN_MAC_ADDR); 41237425d822SShahed Shaikh /* Trust mode will disable Forced MAC */ 41247425d822SShahed Shaikh vf_info->bulletin.p_virt->valid_bitmap &= 41257425d822SShahed Shaikh ~BIT(MAC_ADDR_FORCED); 41267425d822SShahed Shaikh } else { 41277425d822SShahed Shaikh feature = BIT(MAC_ADDR_FORCED); 41287425d822SShahed Shaikh /* Forced MAC will disable MAC_ADDR */ 41297425d822SShahed Shaikh vf_info->bulletin.p_virt->valid_bitmap &= 41307425d822SShahed Shaikh ~BIT(VFPF_BULLETIN_MAC_ADDR); 41317425d822SShahed Shaikh } 41327425d822SShahed Shaikh 4133eff16960SYuval Mintz memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); 4134eff16960SYuval Mintz 4135eff16960SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap |= feature; 4136eff16960SYuval Mintz 4137eff16960SYuval Mintz qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 4138eff16960SYuval Mintz } 4139eff16960SYuval Mintz 41407425d822SShahed Shaikh static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid) 41417425d822SShahed Shaikh { 41427425d822SShahed Shaikh struct qed_vf_info *vf_info; 41437425d822SShahed Shaikh u64 feature; 41447425d822SShahed Shaikh 41457425d822SShahed Shaikh vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 41467425d822SShahed Shaikh if (!vf_info) { 41477425d822SShahed Shaikh DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n", 41487425d822SShahed Shaikh vfid); 41497425d822SShahed Shaikh return -EINVAL; 41507425d822SShahed Shaikh } 41517425d822SShahed Shaikh 41527425d822SShahed Shaikh if (vf_info->b_malicious) { 41537425d822SShahed Shaikh DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n", 41547425d822SShahed Shaikh vfid); 41557425d822SShahed Shaikh return -EINVAL; 41567425d822SShahed Shaikh } 41577425d822SShahed Shaikh 41587425d822SShahed Shaikh if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) { 41597425d822SShahed Shaikh DP_VERBOSE(p_hwfn, QED_MSG_IOV, 41607425d822SShahed Shaikh "Can not set MAC, Forced MAC is configured\n"); 41617425d822SShahed Shaikh return -EINVAL; 41627425d822SShahed Shaikh } 41637425d822SShahed Shaikh 41647425d822SShahed Shaikh feature = BIT(VFPF_BULLETIN_MAC_ADDR); 41657425d822SShahed Shaikh ether_addr_copy(vf_info->bulletin.p_virt->mac, mac); 41667425d822SShahed Shaikh 41677425d822SShahed Shaikh vf_info->bulletin.p_virt->valid_bitmap |= feature; 41687425d822SShahed Shaikh 41697425d822SShahed Shaikh if (vf_info->p_vf_info.is_trusted_configured) 41707425d822SShahed Shaikh qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 41717425d822SShahed Shaikh 41727425d822SShahed Shaikh return 0; 41737425d822SShahed Shaikh } 41747425d822SShahed Shaikh 4175ba56947aSBaoyou Xie static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, 417608feecd7SYuval Mintz u16 pvid, int vfid) 417708feecd7SYuval Mintz { 417808feecd7SYuval Mintz struct qed_vf_info *vf_info; 417908feecd7SYuval Mintz u64 feature; 418008feecd7SYuval Mintz 418108feecd7SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 418208feecd7SYuval Mintz if (!vf_info) { 418308feecd7SYuval Mintz DP_NOTICE(p_hwfn->cdev, 418408feecd7SYuval Mintz "Can not set forced MAC, invalid vfid [%d]\n", vfid); 418508feecd7SYuval Mintz return; 418608feecd7SYuval Mintz } 418708feecd7SYuval Mintz 41887eff82b0SYuval Mintz if (vf_info->b_malicious) { 41897eff82b0SYuval Mintz DP_NOTICE(p_hwfn->cdev, 41907eff82b0SYuval Mintz "Can't set forced vlan to malicious VF [%d]\n", vfid); 41917eff82b0SYuval Mintz return; 41927eff82b0SYuval Mintz } 41937eff82b0SYuval Mintz 419408feecd7SYuval Mintz feature = 1 << VLAN_ADDR_FORCED; 419508feecd7SYuval Mintz vf_info->bulletin.p_virt->pvid = pvid; 419608feecd7SYuval Mintz if (pvid) 419708feecd7SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap |= feature; 419808feecd7SYuval Mintz else 419908feecd7SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap &= ~feature; 420008feecd7SYuval Mintz 420108feecd7SYuval Mintz qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 420208feecd7SYuval Mintz } 420308feecd7SYuval Mintz 420497379f15SChopra, Manish void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, 420597379f15SChopra, Manish int vfid, u16 vxlan_port, u16 geneve_port) 420697379f15SChopra, Manish { 420797379f15SChopra, Manish struct qed_vf_info *vf_info; 420897379f15SChopra, Manish 420997379f15SChopra, Manish vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 421097379f15SChopra, Manish if (!vf_info) { 421197379f15SChopra, Manish DP_NOTICE(p_hwfn->cdev, 421297379f15SChopra, Manish "Can not set udp ports, invalid vfid [%d]\n", vfid); 421397379f15SChopra, Manish return; 421497379f15SChopra, Manish } 421597379f15SChopra, Manish 421697379f15SChopra, Manish if (vf_info->b_malicious) { 421797379f15SChopra, Manish DP_VERBOSE(p_hwfn, QED_MSG_IOV, 421897379f15SChopra, Manish "Can not set udp ports to malicious VF [%d]\n", 421997379f15SChopra, Manish vfid); 422097379f15SChopra, Manish return; 422197379f15SChopra, Manish } 422297379f15SChopra, Manish 422397379f15SChopra, Manish vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port; 422497379f15SChopra, Manish vf_info->bulletin.p_virt->geneve_udp_port = geneve_port; 422597379f15SChopra, Manish } 422697379f15SChopra, Manish 42276ddc7608SYuval Mintz static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) 42286ddc7608SYuval Mintz { 42296ddc7608SYuval Mintz struct qed_vf_info *p_vf_info; 42306ddc7608SYuval Mintz 42316ddc7608SYuval Mintz p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 42326ddc7608SYuval Mintz if (!p_vf_info) 42336ddc7608SYuval Mintz return false; 42346ddc7608SYuval Mintz 42356ddc7608SYuval Mintz return !!p_vf_info->vport_instance; 42366ddc7608SYuval Mintz } 42376ddc7608SYuval Mintz 4238ba56947aSBaoyou Xie static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) 42390b55e27dSYuval Mintz { 42400b55e27dSYuval Mintz struct qed_vf_info *p_vf_info; 42410b55e27dSYuval Mintz 42420b55e27dSYuval Mintz p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 42430b55e27dSYuval Mintz if (!p_vf_info) 42440b55e27dSYuval Mintz return true; 42450b55e27dSYuval Mintz 42460b55e27dSYuval Mintz return p_vf_info->state == VF_STOPPED; 42470b55e27dSYuval Mintz } 42480b55e27dSYuval Mintz 424973390ac9SYuval Mintz static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) 425073390ac9SYuval Mintz { 425173390ac9SYuval Mintz struct qed_vf_info *vf_info; 425273390ac9SYuval Mintz 425373390ac9SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 425473390ac9SYuval Mintz if (!vf_info) 425573390ac9SYuval Mintz return false; 425673390ac9SYuval Mintz 425773390ac9SYuval Mintz return vf_info->spoof_chk; 425873390ac9SYuval Mintz } 425973390ac9SYuval Mintz 4260ba56947aSBaoyou Xie static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) 42616ddc7608SYuval Mintz { 42626ddc7608SYuval Mintz struct qed_vf_info *vf; 42636ddc7608SYuval Mintz int rc = -EINVAL; 42646ddc7608SYuval Mintz 42656ddc7608SYuval Mintz if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 42666ddc7608SYuval Mintz DP_NOTICE(p_hwfn, 42676ddc7608SYuval Mintz "SR-IOV sanity check failed, can't set spoofchk\n"); 42686ddc7608SYuval Mintz goto out; 42696ddc7608SYuval Mintz } 42706ddc7608SYuval Mintz 42716ddc7608SYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 42726ddc7608SYuval Mintz if (!vf) 42736ddc7608SYuval Mintz goto out; 42746ddc7608SYuval Mintz 42756ddc7608SYuval Mintz if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) { 42766ddc7608SYuval Mintz /* After VF VPORT start PF will configure spoof check */ 42776ddc7608SYuval Mintz vf->req_spoofchk_val = val; 42786ddc7608SYuval Mintz rc = 0; 42796ddc7608SYuval Mintz goto out; 42806ddc7608SYuval Mintz } 42816ddc7608SYuval Mintz 42826ddc7608SYuval Mintz rc = __qed_iov_spoofchk_set(p_hwfn, vf, val); 42836ddc7608SYuval Mintz 42846ddc7608SYuval Mintz out: 42856ddc7608SYuval Mintz return rc; 42866ddc7608SYuval Mintz } 42876ddc7608SYuval Mintz 42887425d822SShahed Shaikh static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 42897425d822SShahed Shaikh { 42907425d822SShahed Shaikh struct qed_vf_info *p_vf; 42917425d822SShahed Shaikh 42927425d822SShahed Shaikh p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 42937425d822SShahed Shaikh if (!p_vf || !p_vf->bulletin.p_virt) 42947425d822SShahed Shaikh return NULL; 42957425d822SShahed Shaikh 42967425d822SShahed Shaikh if (!(p_vf->bulletin.p_virt->valid_bitmap & 42977425d822SShahed Shaikh BIT(VFPF_BULLETIN_MAC_ADDR))) 42987425d822SShahed Shaikh return NULL; 42997425d822SShahed Shaikh 43007425d822SShahed Shaikh return p_vf->bulletin.p_virt->mac; 43017425d822SShahed Shaikh } 43027425d822SShahed Shaikh 4303eff16960SYuval Mintz static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, 4304eff16960SYuval Mintz u16 rel_vf_id) 4305eff16960SYuval Mintz { 4306eff16960SYuval Mintz struct qed_vf_info *p_vf; 4307eff16960SYuval Mintz 4308eff16960SYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4309eff16960SYuval Mintz if (!p_vf || !p_vf->bulletin.p_virt) 4310eff16960SYuval Mintz return NULL; 4311eff16960SYuval Mintz 43121a635e48SYuval Mintz if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) 4313eff16960SYuval Mintz return NULL; 4314eff16960SYuval Mintz 4315eff16960SYuval Mintz return p_vf->bulletin.p_virt->mac; 4316eff16960SYuval Mintz } 4317eff16960SYuval Mintz 4318ba56947aSBaoyou Xie static u16 4319ba56947aSBaoyou Xie qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 432008feecd7SYuval Mintz { 432108feecd7SYuval Mintz struct qed_vf_info *p_vf; 432208feecd7SYuval Mintz 432308feecd7SYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 432408feecd7SYuval Mintz if (!p_vf || !p_vf->bulletin.p_virt) 432508feecd7SYuval Mintz return 0; 432608feecd7SYuval Mintz 43271a635e48SYuval Mintz if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) 432808feecd7SYuval Mintz return 0; 432908feecd7SYuval Mintz 433008feecd7SYuval Mintz return p_vf->bulletin.p_virt->pvid; 433108feecd7SYuval Mintz } 433208feecd7SYuval Mintz 4333733def6aSYuval Mintz static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, 4334733def6aSYuval Mintz struct qed_ptt *p_ptt, int vfid, int val) 4335733def6aSYuval Mintz { 4336733def6aSYuval Mintz struct qed_vf_info *vf; 4337733def6aSYuval Mintz u8 abs_vp_id = 0; 433892fae6fbSMichal Kalderon u16 rl_id; 4339733def6aSYuval Mintz int rc; 4340733def6aSYuval Mintz 4341733def6aSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4342733def6aSYuval Mintz if (!vf) 4343733def6aSYuval Mintz return -EINVAL; 4344733def6aSYuval Mintz 4345733def6aSYuval Mintz rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); 4346733def6aSYuval Mintz if (rc) 4347733def6aSYuval Mintz return rc; 4348733def6aSYuval Mintz 434992fae6fbSMichal Kalderon rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */ 435092fae6fbSMichal Kalderon return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val); 4351733def6aSYuval Mintz } 4352733def6aSYuval Mintz 4353ba56947aSBaoyou Xie static int 4354ba56947aSBaoyou Xie qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) 4355733def6aSYuval Mintz { 4356733def6aSYuval Mintz struct qed_vf_info *vf; 4357733def6aSYuval Mintz u8 vport_id; 4358733def6aSYuval Mintz int i; 4359733def6aSYuval Mintz 4360733def6aSYuval Mintz for_each_hwfn(cdev, i) { 4361733def6aSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 4362733def6aSYuval Mintz 4363733def6aSYuval Mintz if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 4364733def6aSYuval Mintz DP_NOTICE(p_hwfn, 4365733def6aSYuval Mintz "SR-IOV sanity check failed, can't set min rate\n"); 4366733def6aSYuval Mintz return -EINVAL; 4367733def6aSYuval Mintz } 4368733def6aSYuval Mintz } 4369733def6aSYuval Mintz 4370733def6aSYuval Mintz vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); 4371733def6aSYuval Mintz vport_id = vf->vport_id; 4372733def6aSYuval Mintz 4373733def6aSYuval Mintz return qed_configure_vport_wfq(cdev, vport_id, rate); 4374733def6aSYuval Mintz } 4375733def6aSYuval Mintz 437673390ac9SYuval Mintz static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) 437773390ac9SYuval Mintz { 437873390ac9SYuval Mintz struct qed_wfq_data *vf_vp_wfq; 437973390ac9SYuval Mintz struct qed_vf_info *vf_info; 438073390ac9SYuval Mintz 438173390ac9SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 438273390ac9SYuval Mintz if (!vf_info) 438373390ac9SYuval Mintz return 0; 438473390ac9SYuval Mintz 438573390ac9SYuval Mintz vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; 438673390ac9SYuval Mintz 438773390ac9SYuval Mintz if (vf_vp_wfq->configured) 438873390ac9SYuval Mintz return vf_vp_wfq->min_speed; 438973390ac9SYuval Mintz else 439073390ac9SYuval Mintz return 0; 439173390ac9SYuval Mintz } 439273390ac9SYuval Mintz 439337bff2b9SYuval Mintz /** 439437bff2b9SYuval Mintz * qed_schedule_iov - schedules IOV task for VF and PF 439537bff2b9SYuval Mintz * @hwfn: hardware function pointer 439637bff2b9SYuval Mintz * @flag: IOV flag for VF/PF 439737bff2b9SYuval Mintz */ 439837bff2b9SYuval Mintz void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) 439937bff2b9SYuval Mintz { 440037bff2b9SYuval Mintz smp_mb__before_atomic(); 440137bff2b9SYuval Mintz set_bit(flag, &hwfn->iov_task_flags); 440237bff2b9SYuval Mintz smp_mb__after_atomic(); 440337bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 440437bff2b9SYuval Mintz queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); 440537bff2b9SYuval Mintz } 440637bff2b9SYuval Mintz 44071408cc1fSYuval Mintz void qed_vf_start_iov_wq(struct qed_dev *cdev) 44081408cc1fSYuval Mintz { 44091408cc1fSYuval Mintz int i; 44101408cc1fSYuval Mintz 44111408cc1fSYuval Mintz for_each_hwfn(cdev, i) 44121408cc1fSYuval Mintz queue_delayed_work(cdev->hwfns[i].iov_wq, 44131408cc1fSYuval Mintz &cdev->hwfns[i].iov_task, 0); 44141408cc1fSYuval Mintz } 44151408cc1fSYuval Mintz 44160b55e27dSYuval Mintz int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) 44170b55e27dSYuval Mintz { 44180b55e27dSYuval Mintz int i, j; 44190b55e27dSYuval Mintz 44200b55e27dSYuval Mintz for_each_hwfn(cdev, i) 44210b55e27dSYuval Mintz if (cdev->hwfns[i].iov_wq) 44220b55e27dSYuval Mintz flush_workqueue(cdev->hwfns[i].iov_wq); 44230b55e27dSYuval Mintz 44240b55e27dSYuval Mintz /* Mark VFs for disablement */ 44250b55e27dSYuval Mintz qed_iov_set_vfs_to_disable(cdev, true); 44260b55e27dSYuval Mintz 44270b55e27dSYuval Mintz if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) 44280b55e27dSYuval Mintz pci_disable_sriov(cdev->pdev); 44290b55e27dSYuval Mintz 443064515dc8STomer Tayar if (cdev->recov_in_prog) { 443164515dc8STomer Tayar DP_VERBOSE(cdev, 443264515dc8STomer Tayar QED_MSG_IOV, 443364515dc8STomer Tayar "Skip SRIOV disable operations in the device since a recovery is in progress\n"); 443464515dc8STomer Tayar goto out; 443564515dc8STomer Tayar } 443664515dc8STomer Tayar 44370b55e27dSYuval Mintz for_each_hwfn(cdev, i) { 44380b55e27dSYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 44390b55e27dSYuval Mintz struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 44400b55e27dSYuval Mintz 44410b55e27dSYuval Mintz /* Failure to acquire the ptt in 100g creates an odd error 44420b55e27dSYuval Mintz * where the first engine has already relased IOV. 44430b55e27dSYuval Mintz */ 44440b55e27dSYuval Mintz if (!ptt) { 44450b55e27dSYuval Mintz DP_ERR(hwfn, "Failed to acquire ptt\n"); 44460b55e27dSYuval Mintz return -EBUSY; 44470b55e27dSYuval Mintz } 44480b55e27dSYuval Mintz 4449733def6aSYuval Mintz /* Clean WFQ db and configure equal weight for all vports */ 4450733def6aSYuval Mintz qed_clean_wfq_db(hwfn, ptt); 4451733def6aSYuval Mintz 44520b55e27dSYuval Mintz qed_for_each_vf(hwfn, j) { 44530b55e27dSYuval Mintz int k; 44540b55e27dSYuval Mintz 44557eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(hwfn, j, true, false)) 44560b55e27dSYuval Mintz continue; 44570b55e27dSYuval Mintz 44580b55e27dSYuval Mintz /* Wait until VF is disabled before releasing */ 44590b55e27dSYuval Mintz for (k = 0; k < 100; k++) { 44600b55e27dSYuval Mintz if (!qed_iov_is_vf_stopped(hwfn, j)) 44610b55e27dSYuval Mintz msleep(20); 44620b55e27dSYuval Mintz else 44630b55e27dSYuval Mintz break; 44640b55e27dSYuval Mintz } 44650b55e27dSYuval Mintz 44660b55e27dSYuval Mintz if (k < 100) 44670b55e27dSYuval Mintz qed_iov_release_hw_for_vf(&cdev->hwfns[i], 44680b55e27dSYuval Mintz ptt, j); 44690b55e27dSYuval Mintz else 44700b55e27dSYuval Mintz DP_ERR(hwfn, 44710b55e27dSYuval Mintz "Timeout waiting for VF's FLR to end\n"); 44720b55e27dSYuval Mintz } 44730b55e27dSYuval Mintz 44740b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 44750b55e27dSYuval Mintz } 447664515dc8STomer Tayar out: 44770b55e27dSYuval Mintz qed_iov_set_vfs_to_disable(cdev, false); 44780b55e27dSYuval Mintz 44790b55e27dSYuval Mintz return 0; 44800b55e27dSYuval Mintz } 44810b55e27dSYuval Mintz 44823da7a37aSMintz, Yuval static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, 44833da7a37aSMintz, Yuval u16 vfid, 44843da7a37aSMintz, Yuval struct qed_iov_vf_init_params *params) 44853da7a37aSMintz, Yuval { 44863da7a37aSMintz, Yuval u16 base, i; 44873da7a37aSMintz, Yuval 44883da7a37aSMintz, Yuval /* Since we have an equal resource distribution per-VF, and we assume 44893da7a37aSMintz, Yuval * PF has acquired the QED_PF_L2_QUE first queues, we start setting 44903da7a37aSMintz, Yuval * sequentially from there. 44913da7a37aSMintz, Yuval */ 44923da7a37aSMintz, Yuval base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues; 44933da7a37aSMintz, Yuval 44943da7a37aSMintz, Yuval params->rel_vf_id = vfid; 44953da7a37aSMintz, Yuval for (i = 0; i < params->num_queues; i++) { 44963da7a37aSMintz, Yuval params->req_rx_queue[i] = base + i; 44973da7a37aSMintz, Yuval params->req_tx_queue[i] = base + i; 44983da7a37aSMintz, Yuval } 44993da7a37aSMintz, Yuval } 45003da7a37aSMintz, Yuval 45010b55e27dSYuval Mintz static int qed_sriov_enable(struct qed_dev *cdev, int num) 45020b55e27dSYuval Mintz { 45033da7a37aSMintz, Yuval struct qed_iov_vf_init_params params; 4504538f8d00SSudarsana Reddy Kalluru struct qed_hwfn *hwfn; 4505538f8d00SSudarsana Reddy Kalluru struct qed_ptt *ptt; 45060b55e27dSYuval Mintz int i, j, rc; 45070b55e27dSYuval Mintz 45080b55e27dSYuval Mintz if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { 45090b55e27dSYuval Mintz DP_NOTICE(cdev, "Can start at most %d VFs\n", 45100b55e27dSYuval Mintz RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); 45110b55e27dSYuval Mintz return -EINVAL; 45120b55e27dSYuval Mintz } 45130b55e27dSYuval Mintz 45143da7a37aSMintz, Yuval memset(¶ms, 0, sizeof(params)); 45153da7a37aSMintz, Yuval 45160b55e27dSYuval Mintz /* Initialize HW for VF access */ 45170b55e27dSYuval Mintz for_each_hwfn(cdev, j) { 4518538f8d00SSudarsana Reddy Kalluru hwfn = &cdev->hwfns[j]; 4519538f8d00SSudarsana Reddy Kalluru ptt = qed_ptt_acquire(hwfn); 45205a1f965aSMintz, Yuval 45215a1f965aSMintz, Yuval /* Make sure not to use more than 16 queues per VF */ 45223da7a37aSMintz, Yuval params.num_queues = min_t(int, 45233da7a37aSMintz, Yuval FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, 45243da7a37aSMintz, Yuval 16); 45250b55e27dSYuval Mintz 45260b55e27dSYuval Mintz if (!ptt) { 45270b55e27dSYuval Mintz DP_ERR(hwfn, "Failed to acquire ptt\n"); 45280b55e27dSYuval Mintz rc = -EBUSY; 45290b55e27dSYuval Mintz goto err; 45300b55e27dSYuval Mintz } 45310b55e27dSYuval Mintz 45320b55e27dSYuval Mintz for (i = 0; i < num; i++) { 45337eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(hwfn, i, false, true)) 45340b55e27dSYuval Mintz continue; 45350b55e27dSYuval Mintz 45363da7a37aSMintz, Yuval qed_sriov_enable_qid_config(hwfn, i, ¶ms); 45373da7a37aSMintz, Yuval rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms); 45380b55e27dSYuval Mintz if (rc) { 45390b55e27dSYuval Mintz DP_ERR(cdev, "Failed to enable VF[%d]\n", i); 45400b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 45410b55e27dSYuval Mintz goto err; 45420b55e27dSYuval Mintz } 45430b55e27dSYuval Mintz } 45440b55e27dSYuval Mintz 45450b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 45460b55e27dSYuval Mintz } 45470b55e27dSYuval Mintz 45480b55e27dSYuval Mintz /* Enable SRIOV PCIe functions */ 45490b55e27dSYuval Mintz rc = pci_enable_sriov(cdev->pdev, num); 45500b55e27dSYuval Mintz if (rc) { 45510b55e27dSYuval Mintz DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc); 45520b55e27dSYuval Mintz goto err; 45530b55e27dSYuval Mintz } 45540b55e27dSYuval Mintz 4555538f8d00SSudarsana Reddy Kalluru hwfn = QED_LEADING_HWFN(cdev); 4556538f8d00SSudarsana Reddy Kalluru ptt = qed_ptt_acquire(hwfn); 4557538f8d00SSudarsana Reddy Kalluru if (!ptt) { 4558538f8d00SSudarsana Reddy Kalluru DP_ERR(hwfn, "Failed to acquire ptt\n"); 4559538f8d00SSudarsana Reddy Kalluru rc = -EBUSY; 4560538f8d00SSudarsana Reddy Kalluru goto err; 4561538f8d00SSudarsana Reddy Kalluru } 4562538f8d00SSudarsana Reddy Kalluru 4563538f8d00SSudarsana Reddy Kalluru rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB); 4564538f8d00SSudarsana Reddy Kalluru if (rc) 4565538f8d00SSudarsana Reddy Kalluru DP_INFO(cdev, "Failed to update eswitch mode\n"); 4566538f8d00SSudarsana Reddy Kalluru qed_ptt_release(hwfn, ptt); 4567538f8d00SSudarsana Reddy Kalluru 45680b55e27dSYuval Mintz return num; 45690b55e27dSYuval Mintz 45700b55e27dSYuval Mintz err: 45710b55e27dSYuval Mintz qed_sriov_disable(cdev, false); 45720b55e27dSYuval Mintz return rc; 45730b55e27dSYuval Mintz } 45740b55e27dSYuval Mintz 45750b55e27dSYuval Mintz static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) 45760b55e27dSYuval Mintz { 45770b55e27dSYuval Mintz if (!IS_QED_SRIOV(cdev)) { 45780b55e27dSYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n"); 45790b55e27dSYuval Mintz return -EOPNOTSUPP; 45800b55e27dSYuval Mintz } 45810b55e27dSYuval Mintz 45820b55e27dSYuval Mintz if (num_vfs_param) 45830b55e27dSYuval Mintz return qed_sriov_enable(cdev, num_vfs_param); 45840b55e27dSYuval Mintz else 45850b55e27dSYuval Mintz return qed_sriov_disable(cdev, true); 45860b55e27dSYuval Mintz } 45870b55e27dSYuval Mintz 4588eff16960SYuval Mintz static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) 4589eff16960SYuval Mintz { 4590eff16960SYuval Mintz int i; 4591eff16960SYuval Mintz 4592eff16960SYuval Mintz if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { 4593eff16960SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 4594eff16960SYuval Mintz "Cannot set a VF MAC; Sriov is not enabled\n"); 4595eff16960SYuval Mintz return -EINVAL; 4596eff16960SYuval Mintz } 4597eff16960SYuval Mintz 45987eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { 4599eff16960SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 4600eff16960SYuval Mintz "Cannot set VF[%d] MAC (VF is not active)\n", vfid); 4601eff16960SYuval Mintz return -EINVAL; 4602eff16960SYuval Mintz } 4603eff16960SYuval Mintz 4604eff16960SYuval Mintz for_each_hwfn(cdev, i) { 4605eff16960SYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4606eff16960SYuval Mintz struct qed_public_vf_info *vf_info; 4607eff16960SYuval Mintz 4608eff16960SYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 4609eff16960SYuval Mintz if (!vf_info) 4610eff16960SYuval Mintz continue; 4611eff16960SYuval Mintz 46127425d822SShahed Shaikh /* Set the MAC, and schedule the IOV task */ 46137425d822SShahed Shaikh if (vf_info->is_trusted_configured) 46147425d822SShahed Shaikh ether_addr_copy(vf_info->mac, mac); 46157425d822SShahed Shaikh else 4616eff16960SYuval Mintz ether_addr_copy(vf_info->forced_mac, mac); 46177425d822SShahed Shaikh 4618eff16960SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); 4619eff16960SYuval Mintz } 4620eff16960SYuval Mintz 4621eff16960SYuval Mintz return 0; 4622eff16960SYuval Mintz } 4623eff16960SYuval Mintz 462408feecd7SYuval Mintz static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) 462508feecd7SYuval Mintz { 462608feecd7SYuval Mintz int i; 462708feecd7SYuval Mintz 462808feecd7SYuval Mintz if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { 462908feecd7SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 463008feecd7SYuval Mintz "Cannot set a VF MAC; Sriov is not enabled\n"); 463108feecd7SYuval Mintz return -EINVAL; 463208feecd7SYuval Mintz } 463308feecd7SYuval Mintz 46347eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { 463508feecd7SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 463608feecd7SYuval Mintz "Cannot set VF[%d] MAC (VF is not active)\n", vfid); 463708feecd7SYuval Mintz return -EINVAL; 463808feecd7SYuval Mintz } 463908feecd7SYuval Mintz 464008feecd7SYuval Mintz for_each_hwfn(cdev, i) { 464108feecd7SYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 464208feecd7SYuval Mintz struct qed_public_vf_info *vf_info; 464308feecd7SYuval Mintz 464408feecd7SYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 464508feecd7SYuval Mintz if (!vf_info) 464608feecd7SYuval Mintz continue; 464708feecd7SYuval Mintz 464808feecd7SYuval Mintz /* Set the forced vlan, and schedule the IOV task */ 464908feecd7SYuval Mintz vf_info->forced_vlan = vid; 465008feecd7SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); 465108feecd7SYuval Mintz } 465208feecd7SYuval Mintz 465308feecd7SYuval Mintz return 0; 465408feecd7SYuval Mintz } 465508feecd7SYuval Mintz 465673390ac9SYuval Mintz static int qed_get_vf_config(struct qed_dev *cdev, 465773390ac9SYuval Mintz int vf_id, struct ifla_vf_info *ivi) 465873390ac9SYuval Mintz { 465973390ac9SYuval Mintz struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 466073390ac9SYuval Mintz struct qed_public_vf_info *vf_info; 466173390ac9SYuval Mintz struct qed_mcp_link_state link; 466273390ac9SYuval Mintz u32 tx_rate; 466373390ac9SYuval Mintz 466473390ac9SYuval Mintz /* Sanitize request */ 466573390ac9SYuval Mintz if (IS_VF(cdev)) 466673390ac9SYuval Mintz return -EINVAL; 466773390ac9SYuval Mintz 46687eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) { 466973390ac9SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 467073390ac9SYuval Mintz "VF index [%d] isn't active\n", vf_id); 467173390ac9SYuval Mintz return -EINVAL; 467273390ac9SYuval Mintz } 467373390ac9SYuval Mintz 467473390ac9SYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); 467573390ac9SYuval Mintz 467673390ac9SYuval Mintz qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); 467773390ac9SYuval Mintz 467873390ac9SYuval Mintz /* Fill information about VF */ 467973390ac9SYuval Mintz ivi->vf = vf_id; 468073390ac9SYuval Mintz 468173390ac9SYuval Mintz if (is_valid_ether_addr(vf_info->forced_mac)) 468273390ac9SYuval Mintz ether_addr_copy(ivi->mac, vf_info->forced_mac); 468373390ac9SYuval Mintz else 468473390ac9SYuval Mintz ether_addr_copy(ivi->mac, vf_info->mac); 468573390ac9SYuval Mintz 468673390ac9SYuval Mintz ivi->vlan = vf_info->forced_vlan; 468773390ac9SYuval Mintz ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id); 468873390ac9SYuval Mintz ivi->linkstate = vf_info->link_state; 468973390ac9SYuval Mintz tx_rate = vf_info->tx_rate; 469073390ac9SYuval Mintz ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; 469173390ac9SYuval Mintz ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); 469273390ac9SYuval Mintz 469373390ac9SYuval Mintz return 0; 469473390ac9SYuval Mintz } 469573390ac9SYuval Mintz 469636558c3dSYuval Mintz void qed_inform_vf_link_state(struct qed_hwfn *hwfn) 469736558c3dSYuval Mintz { 4698e50728efSMintz, Yuval struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev); 469936558c3dSYuval Mintz struct qed_mcp_link_capabilities caps; 470036558c3dSYuval Mintz struct qed_mcp_link_params params; 470136558c3dSYuval Mintz struct qed_mcp_link_state link; 470236558c3dSYuval Mintz int i; 470336558c3dSYuval Mintz 470436558c3dSYuval Mintz if (!hwfn->pf_iov_info) 470536558c3dSYuval Mintz return; 470636558c3dSYuval Mintz 470736558c3dSYuval Mintz /* Update bulletin of all future possible VFs with link configuration */ 470836558c3dSYuval Mintz for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { 4709733def6aSYuval Mintz struct qed_public_vf_info *vf_info; 4710733def6aSYuval Mintz 4711733def6aSYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, i, false); 4712733def6aSYuval Mintz if (!vf_info) 4713733def6aSYuval Mintz continue; 4714733def6aSYuval Mintz 4715e50728efSMintz, Yuval /* Only hwfn0 is actually interested in the link speed. 4716e50728efSMintz, Yuval * But since only it would receive an MFW indication of link, 4717e50728efSMintz, Yuval * need to take configuration from it - otherwise things like 4718e50728efSMintz, Yuval * rate limiting for hwfn1 VF would not work. 4719e50728efSMintz, Yuval */ 4720e50728efSMintz, Yuval memcpy(¶ms, qed_mcp_get_link_params(lead_hwfn), 4721e50728efSMintz, Yuval sizeof(params)); 4722e50728efSMintz, Yuval memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link)); 4723e50728efSMintz, Yuval memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn), 472436558c3dSYuval Mintz sizeof(caps)); 472536558c3dSYuval Mintz 4726733def6aSYuval Mintz /* Modify link according to the VF's configured link state */ 4727733def6aSYuval Mintz switch (vf_info->link_state) { 4728733def6aSYuval Mintz case IFLA_VF_LINK_STATE_DISABLE: 4729733def6aSYuval Mintz link.link_up = false; 4730733def6aSYuval Mintz break; 4731733def6aSYuval Mintz case IFLA_VF_LINK_STATE_ENABLE: 4732733def6aSYuval Mintz link.link_up = true; 4733733def6aSYuval Mintz /* Set speed according to maximum supported by HW. 4734733def6aSYuval Mintz * that is 40G for regular devices and 100G for CMT 4735733def6aSYuval Mintz * mode devices. 4736733def6aSYuval Mintz */ 4737733def6aSYuval Mintz link.speed = (hwfn->cdev->num_hwfns > 1) ? 4738733def6aSYuval Mintz 100000 : 40000; 4739733def6aSYuval Mintz default: 4740733def6aSYuval Mintz /* In auto mode pass PF link image to VF */ 4741733def6aSYuval Mintz break; 4742733def6aSYuval Mintz } 4743733def6aSYuval Mintz 4744733def6aSYuval Mintz if (link.link_up && vf_info->tx_rate) { 4745733def6aSYuval Mintz struct qed_ptt *ptt; 4746733def6aSYuval Mintz int rate; 4747733def6aSYuval Mintz 4748733def6aSYuval Mintz rate = min_t(int, vf_info->tx_rate, link.speed); 4749733def6aSYuval Mintz 4750733def6aSYuval Mintz ptt = qed_ptt_acquire(hwfn); 4751733def6aSYuval Mintz if (!ptt) { 4752733def6aSYuval Mintz DP_NOTICE(hwfn, "Failed to acquire PTT\n"); 4753733def6aSYuval Mintz return; 4754733def6aSYuval Mintz } 4755733def6aSYuval Mintz 4756733def6aSYuval Mintz if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) { 4757733def6aSYuval Mintz vf_info->tx_rate = rate; 4758733def6aSYuval Mintz link.speed = rate; 4759733def6aSYuval Mintz } 4760733def6aSYuval Mintz 4761733def6aSYuval Mintz qed_ptt_release(hwfn, ptt); 4762733def6aSYuval Mintz } 4763733def6aSYuval Mintz 476436558c3dSYuval Mintz qed_iov_set_link(hwfn, i, ¶ms, &link, &caps); 476536558c3dSYuval Mintz } 476636558c3dSYuval Mintz 476736558c3dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 476836558c3dSYuval Mintz } 476936558c3dSYuval Mintz 4770733def6aSYuval Mintz static int qed_set_vf_link_state(struct qed_dev *cdev, 4771733def6aSYuval Mintz int vf_id, int link_state) 4772733def6aSYuval Mintz { 4773733def6aSYuval Mintz int i; 4774733def6aSYuval Mintz 4775733def6aSYuval Mintz /* Sanitize request */ 4776733def6aSYuval Mintz if (IS_VF(cdev)) 4777733def6aSYuval Mintz return -EINVAL; 4778733def6aSYuval Mintz 47797eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) { 4780733def6aSYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 4781733def6aSYuval Mintz "VF index [%d] isn't active\n", vf_id); 4782733def6aSYuval Mintz return -EINVAL; 4783733def6aSYuval Mintz } 4784733def6aSYuval Mintz 4785733def6aSYuval Mintz /* Handle configuration of link state */ 4786733def6aSYuval Mintz for_each_hwfn(cdev, i) { 4787733def6aSYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4788733def6aSYuval Mintz struct qed_public_vf_info *vf; 4789733def6aSYuval Mintz 4790733def6aSYuval Mintz vf = qed_iov_get_public_vf_info(hwfn, vf_id, true); 4791733def6aSYuval Mintz if (!vf) 4792733def6aSYuval Mintz continue; 4793733def6aSYuval Mintz 4794733def6aSYuval Mintz if (vf->link_state == link_state) 4795733def6aSYuval Mintz continue; 4796733def6aSYuval Mintz 4797733def6aSYuval Mintz vf->link_state = link_state; 4798733def6aSYuval Mintz qed_inform_vf_link_state(&cdev->hwfns[i]); 4799733def6aSYuval Mintz } 4800733def6aSYuval Mintz 4801733def6aSYuval Mintz return 0; 4802733def6aSYuval Mintz } 4803733def6aSYuval Mintz 48046ddc7608SYuval Mintz static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val) 48056ddc7608SYuval Mintz { 48066ddc7608SYuval Mintz int i, rc = -EINVAL; 48076ddc7608SYuval Mintz 48086ddc7608SYuval Mintz for_each_hwfn(cdev, i) { 48096ddc7608SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 48106ddc7608SYuval Mintz 48116ddc7608SYuval Mintz rc = qed_iov_spoofchk_set(p_hwfn, vfid, val); 48126ddc7608SYuval Mintz if (rc) 48136ddc7608SYuval Mintz break; 48146ddc7608SYuval Mintz } 48156ddc7608SYuval Mintz 48166ddc7608SYuval Mintz return rc; 48176ddc7608SYuval Mintz } 48186ddc7608SYuval Mintz 4819733def6aSYuval Mintz static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate) 4820733def6aSYuval Mintz { 4821733def6aSYuval Mintz int i; 4822733def6aSYuval Mintz 4823733def6aSYuval Mintz for_each_hwfn(cdev, i) { 4824733def6aSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 4825733def6aSYuval Mintz struct qed_public_vf_info *vf; 4826733def6aSYuval Mintz 4827733def6aSYuval Mintz if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 4828733def6aSYuval Mintz DP_NOTICE(p_hwfn, 4829733def6aSYuval Mintz "SR-IOV sanity check failed, can't set tx rate\n"); 4830733def6aSYuval Mintz return -EINVAL; 4831733def6aSYuval Mintz } 4832733def6aSYuval Mintz 4833733def6aSYuval Mintz vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true); 4834733def6aSYuval Mintz 4835733def6aSYuval Mintz vf->tx_rate = rate; 4836733def6aSYuval Mintz 4837733def6aSYuval Mintz qed_inform_vf_link_state(p_hwfn); 4838733def6aSYuval Mintz } 4839733def6aSYuval Mintz 4840733def6aSYuval Mintz return 0; 4841733def6aSYuval Mintz } 4842733def6aSYuval Mintz 4843733def6aSYuval Mintz static int qed_set_vf_rate(struct qed_dev *cdev, 4844733def6aSYuval Mintz int vfid, u32 min_rate, u32 max_rate) 4845733def6aSYuval Mintz { 4846733def6aSYuval Mintz int rc_min = 0, rc_max = 0; 4847733def6aSYuval Mintz 4848733def6aSYuval Mintz if (max_rate) 4849733def6aSYuval Mintz rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate); 4850733def6aSYuval Mintz 4851733def6aSYuval Mintz if (min_rate) 4852733def6aSYuval Mintz rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate); 4853733def6aSYuval Mintz 4854733def6aSYuval Mintz if (rc_max | rc_min) 4855733def6aSYuval Mintz return -EINVAL; 4856733def6aSYuval Mintz 4857733def6aSYuval Mintz return 0; 4858733def6aSYuval Mintz } 4859733def6aSYuval Mintz 4860f990c82cSMintz, Yuval static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust) 4861f990c82cSMintz, Yuval { 4862f990c82cSMintz, Yuval int i; 4863f990c82cSMintz, Yuval 4864f990c82cSMintz, Yuval for_each_hwfn(cdev, i) { 4865f990c82cSMintz, Yuval struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4866f990c82cSMintz, Yuval struct qed_public_vf_info *vf; 4867f990c82cSMintz, Yuval 4868f990c82cSMintz, Yuval if (!qed_iov_pf_sanity_check(hwfn, vfid)) { 4869f990c82cSMintz, Yuval DP_NOTICE(hwfn, 4870f990c82cSMintz, Yuval "SR-IOV sanity check failed, can't set trust\n"); 4871f990c82cSMintz, Yuval return -EINVAL; 4872f990c82cSMintz, Yuval } 4873f990c82cSMintz, Yuval 4874f990c82cSMintz, Yuval vf = qed_iov_get_public_vf_info(hwfn, vfid, true); 4875f990c82cSMintz, Yuval 4876f990c82cSMintz, Yuval if (vf->is_trusted_request == trust) 4877f990c82cSMintz, Yuval return 0; 4878f990c82cSMintz, Yuval vf->is_trusted_request = trust; 4879f990c82cSMintz, Yuval 4880f990c82cSMintz, Yuval qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG); 4881f990c82cSMintz, Yuval } 4882f990c82cSMintz, Yuval 4883f990c82cSMintz, Yuval return 0; 4884f990c82cSMintz, Yuval } 4885f990c82cSMintz, Yuval 488637bff2b9SYuval Mintz static void qed_handle_vf_msg(struct qed_hwfn *hwfn) 488737bff2b9SYuval Mintz { 488837bff2b9SYuval Mintz u64 events[QED_VF_ARRAY_LENGTH]; 488937bff2b9SYuval Mintz struct qed_ptt *ptt; 489037bff2b9SYuval Mintz int i; 489137bff2b9SYuval Mintz 489237bff2b9SYuval Mintz ptt = qed_ptt_acquire(hwfn); 489337bff2b9SYuval Mintz if (!ptt) { 489437bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, 489537bff2b9SYuval Mintz "Can't acquire PTT; re-scheduling\n"); 489637bff2b9SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG); 489737bff2b9SYuval Mintz return; 489837bff2b9SYuval Mintz } 489937bff2b9SYuval Mintz 4900fd3c615aSMintz, Yuval qed_iov_pf_get_pending_events(hwfn, events); 490137bff2b9SYuval Mintz 490237bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, 490337bff2b9SYuval Mintz "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", 490437bff2b9SYuval Mintz events[0], events[1], events[2]); 490537bff2b9SYuval Mintz 490637bff2b9SYuval Mintz qed_for_each_vf(hwfn, i) { 490737bff2b9SYuval Mintz /* Skip VFs with no pending messages */ 490837bff2b9SYuval Mintz if (!(events[i / 64] & (1ULL << (i % 64)))) 490937bff2b9SYuval Mintz continue; 491037bff2b9SYuval Mintz 491137bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, 491237bff2b9SYuval Mintz "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 491337bff2b9SYuval Mintz i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); 491437bff2b9SYuval Mintz 491537bff2b9SYuval Mintz /* Copy VF's message to PF's request buffer for that VF */ 491637bff2b9SYuval Mintz if (qed_iov_copy_vf_msg(hwfn, ptt, i)) 491737bff2b9SYuval Mintz continue; 491837bff2b9SYuval Mintz 491937bff2b9SYuval Mintz qed_iov_process_mbx_req(hwfn, ptt, i); 492037bff2b9SYuval Mintz } 492137bff2b9SYuval Mintz 492237bff2b9SYuval Mintz qed_ptt_release(hwfn, ptt); 492337bff2b9SYuval Mintz } 492437bff2b9SYuval Mintz 49257425d822SShahed Shaikh static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn, 49267425d822SShahed Shaikh u8 *mac, 49277425d822SShahed Shaikh struct qed_public_vf_info *info) 49287425d822SShahed Shaikh { 49297425d822SShahed Shaikh if (info->is_trusted_configured) { 49307425d822SShahed Shaikh if (is_valid_ether_addr(info->mac) && 49317425d822SShahed Shaikh (!mac || !ether_addr_equal(mac, info->mac))) 49327425d822SShahed Shaikh return true; 49337425d822SShahed Shaikh } else { 49347425d822SShahed Shaikh if (is_valid_ether_addr(info->forced_mac) && 49357425d822SShahed Shaikh (!mac || !ether_addr_equal(mac, info->forced_mac))) 49367425d822SShahed Shaikh return true; 49377425d822SShahed Shaikh } 49387425d822SShahed Shaikh 49397425d822SShahed Shaikh return false; 49407425d822SShahed Shaikh } 49417425d822SShahed Shaikh 49427425d822SShahed Shaikh static void qed_set_bulletin_mac(struct qed_hwfn *hwfn, 49437425d822SShahed Shaikh struct qed_public_vf_info *info, 49447425d822SShahed Shaikh int vfid) 49457425d822SShahed Shaikh { 49467425d822SShahed Shaikh if (info->is_trusted_configured) 49477425d822SShahed Shaikh qed_iov_bulletin_set_mac(hwfn, info->mac, vfid); 49487425d822SShahed Shaikh else 49497425d822SShahed Shaikh qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid); 49507425d822SShahed Shaikh } 49517425d822SShahed Shaikh 495208feecd7SYuval Mintz static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) 495308feecd7SYuval Mintz { 495408feecd7SYuval Mintz int i; 495508feecd7SYuval Mintz 495608feecd7SYuval Mintz qed_for_each_vf(hwfn, i) { 495708feecd7SYuval Mintz struct qed_public_vf_info *info; 495808feecd7SYuval Mintz bool update = false; 4959eff16960SYuval Mintz u8 *mac; 496008feecd7SYuval Mintz 496108feecd7SYuval Mintz info = qed_iov_get_public_vf_info(hwfn, i, true); 496208feecd7SYuval Mintz if (!info) 496308feecd7SYuval Mintz continue; 496408feecd7SYuval Mintz 496508feecd7SYuval Mintz /* Update data on bulletin board */ 49667425d822SShahed Shaikh if (info->is_trusted_configured) 49677425d822SShahed Shaikh mac = qed_iov_bulletin_get_mac(hwfn, i); 49687425d822SShahed Shaikh else 4969eff16960SYuval Mintz mac = qed_iov_bulletin_get_forced_mac(hwfn, i); 49707425d822SShahed Shaikh 49717425d822SShahed Shaikh if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) { 4972eff16960SYuval Mintz DP_VERBOSE(hwfn, 4973eff16960SYuval Mintz QED_MSG_IOV, 4974eff16960SYuval Mintz "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", 4975eff16960SYuval Mintz i, 4976eff16960SYuval Mintz hwfn->cdev->p_iov_info->first_vf_in_pf + i); 4977eff16960SYuval Mintz 49787425d822SShahed Shaikh /* Update bulletin board with MAC */ 49797425d822SShahed Shaikh qed_set_bulletin_mac(hwfn, info, i); 4980eff16960SYuval Mintz update = true; 4981eff16960SYuval Mintz } 498208feecd7SYuval Mintz 498308feecd7SYuval Mintz if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^ 498408feecd7SYuval Mintz info->forced_vlan) { 498508feecd7SYuval Mintz DP_VERBOSE(hwfn, 498608feecd7SYuval Mintz QED_MSG_IOV, 498708feecd7SYuval Mintz "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n", 498808feecd7SYuval Mintz info->forced_vlan, 498908feecd7SYuval Mintz i, 499008feecd7SYuval Mintz hwfn->cdev->p_iov_info->first_vf_in_pf + i); 499108feecd7SYuval Mintz qed_iov_bulletin_set_forced_vlan(hwfn, 499208feecd7SYuval Mintz info->forced_vlan, i); 499308feecd7SYuval Mintz update = true; 499408feecd7SYuval Mintz } 499508feecd7SYuval Mintz 499608feecd7SYuval Mintz if (update) 499708feecd7SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 499808feecd7SYuval Mintz } 499908feecd7SYuval Mintz } 500008feecd7SYuval Mintz 500136558c3dSYuval Mintz static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) 500236558c3dSYuval Mintz { 500336558c3dSYuval Mintz struct qed_ptt *ptt; 500436558c3dSYuval Mintz int i; 500536558c3dSYuval Mintz 500636558c3dSYuval Mintz ptt = qed_ptt_acquire(hwfn); 500736558c3dSYuval Mintz if (!ptt) { 500836558c3dSYuval Mintz DP_NOTICE(hwfn, "Failed allocating a ptt entry\n"); 500936558c3dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 501036558c3dSYuval Mintz return; 501136558c3dSYuval Mintz } 501236558c3dSYuval Mintz 501336558c3dSYuval Mintz qed_for_each_vf(hwfn, i) 501436558c3dSYuval Mintz qed_iov_post_vf_bulletin(hwfn, i, ptt); 501536558c3dSYuval Mintz 501636558c3dSYuval Mintz qed_ptt_release(hwfn, ptt); 501736558c3dSYuval Mintz } 501836558c3dSYuval Mintz 50197425d822SShahed Shaikh static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) 50207425d822SShahed Shaikh { 50217425d822SShahed Shaikh struct qed_public_vf_info *vf_info; 50227425d822SShahed Shaikh struct qed_vf_info *vf; 50237425d822SShahed Shaikh u8 *force_mac; 50247425d822SShahed Shaikh int i; 50257425d822SShahed Shaikh 50267425d822SShahed Shaikh vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); 50277425d822SShahed Shaikh vf = qed_iov_get_vf_info(hwfn, vf_id, true); 50287425d822SShahed Shaikh 50297425d822SShahed Shaikh if (!vf_info || !vf) 50307425d822SShahed Shaikh return; 50317425d822SShahed Shaikh 50327425d822SShahed Shaikh /* Force MAC converted to generic MAC in case of VF trust on */ 50337425d822SShahed Shaikh if (vf_info->is_trusted_configured && 50347425d822SShahed Shaikh (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) { 50357425d822SShahed Shaikh force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id); 50367425d822SShahed Shaikh 50377425d822SShahed Shaikh if (force_mac) { 50387425d822SShahed Shaikh /* Clear existing shadow copy of MAC to have a clean 50397425d822SShahed Shaikh * slate. 50407425d822SShahed Shaikh */ 50417425d822SShahed Shaikh for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 50427425d822SShahed Shaikh if (ether_addr_equal(vf->shadow_config.macs[i], 50437425d822SShahed Shaikh vf_info->mac)) { 50447425d822SShahed Shaikh memset(vf->shadow_config.macs[i], 0, 50457425d822SShahed Shaikh ETH_ALEN); 50467425d822SShahed Shaikh DP_VERBOSE(hwfn, QED_MSG_IOV, 50477425d822SShahed Shaikh "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n", 50487425d822SShahed Shaikh vf_info->mac, vf_id); 50497425d822SShahed Shaikh break; 50507425d822SShahed Shaikh } 50517425d822SShahed Shaikh } 50527425d822SShahed Shaikh 50537425d822SShahed Shaikh ether_addr_copy(vf_info->mac, force_mac); 50547425d822SShahed Shaikh memset(vf_info->forced_mac, 0, ETH_ALEN); 50557425d822SShahed Shaikh vf->bulletin.p_virt->valid_bitmap &= 50567425d822SShahed Shaikh ~BIT(MAC_ADDR_FORCED); 50577425d822SShahed Shaikh qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 50587425d822SShahed Shaikh } 50597425d822SShahed Shaikh } 50607425d822SShahed Shaikh 50617425d822SShahed Shaikh /* Update shadow copy with VF MAC when trust mode is turned off */ 50627425d822SShahed Shaikh if (!vf_info->is_trusted_configured) { 50637425d822SShahed Shaikh u8 empty_mac[ETH_ALEN]; 50647425d822SShahed Shaikh 50657425d822SShahed Shaikh memset(empty_mac, 0, ETH_ALEN); 50667425d822SShahed Shaikh for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 50677425d822SShahed Shaikh if (ether_addr_equal(vf->shadow_config.macs[i], 50687425d822SShahed Shaikh empty_mac)) { 50697425d822SShahed Shaikh ether_addr_copy(vf->shadow_config.macs[i], 50707425d822SShahed Shaikh vf_info->mac); 50717425d822SShahed Shaikh DP_VERBOSE(hwfn, QED_MSG_IOV, 50727425d822SShahed Shaikh "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n", 50737425d822SShahed Shaikh vf_info->mac, vf_id); 50747425d822SShahed Shaikh break; 50757425d822SShahed Shaikh } 50767425d822SShahed Shaikh } 50777425d822SShahed Shaikh /* Clear bulletin when trust mode is turned off, 50787425d822SShahed Shaikh * to have a clean slate for next (normal) operations. 50797425d822SShahed Shaikh */ 50807425d822SShahed Shaikh qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id); 50817425d822SShahed Shaikh qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 50827425d822SShahed Shaikh } 50837425d822SShahed Shaikh } 50847425d822SShahed Shaikh 5085f990c82cSMintz, Yuval static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) 5086f990c82cSMintz, Yuval { 5087f990c82cSMintz, Yuval struct qed_sp_vport_update_params params; 5088f990c82cSMintz, Yuval struct qed_filter_accept_flags *flags; 5089f990c82cSMintz, Yuval struct qed_public_vf_info *vf_info; 5090f990c82cSMintz, Yuval struct qed_vf_info *vf; 5091f990c82cSMintz, Yuval u8 mask; 5092f990c82cSMintz, Yuval int i; 5093f990c82cSMintz, Yuval 5094f990c82cSMintz, Yuval mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; 5095f990c82cSMintz, Yuval flags = ¶ms.accept_flags; 5096f990c82cSMintz, Yuval 5097f990c82cSMintz, Yuval qed_for_each_vf(hwfn, i) { 5098f990c82cSMintz, Yuval /* Need to make sure current requested configuration didn't 5099f990c82cSMintz, Yuval * flip so that we'll end up configuring something that's not 5100f990c82cSMintz, Yuval * needed. 5101f990c82cSMintz, Yuval */ 5102f990c82cSMintz, Yuval vf_info = qed_iov_get_public_vf_info(hwfn, i, true); 5103f990c82cSMintz, Yuval if (vf_info->is_trusted_configured == 5104f990c82cSMintz, Yuval vf_info->is_trusted_request) 5105f990c82cSMintz, Yuval continue; 5106f990c82cSMintz, Yuval vf_info->is_trusted_configured = vf_info->is_trusted_request; 5107f990c82cSMintz, Yuval 51087425d822SShahed Shaikh /* Handle forced MAC mode */ 51097425d822SShahed Shaikh qed_update_mac_for_vf_trust_change(hwfn, i); 51107425d822SShahed Shaikh 5111f990c82cSMintz, Yuval /* Validate that the VF has a configured vport */ 5112f990c82cSMintz, Yuval vf = qed_iov_get_vf_info(hwfn, i, true); 5113f990c82cSMintz, Yuval if (!vf->vport_instance) 5114f990c82cSMintz, Yuval continue; 5115f990c82cSMintz, Yuval 5116f990c82cSMintz, Yuval memset(¶ms, 0, sizeof(params)); 5117f990c82cSMintz, Yuval params.opaque_fid = vf->opaque_fid; 5118f990c82cSMintz, Yuval params.vport_id = vf->vport_id; 5119f990c82cSMintz, Yuval 5120ff929696SManish Chopra params.update_ctl_frame_check = 1; 5121ff929696SManish Chopra params.mac_chk_en = !vf_info->is_trusted_configured; 5122ff929696SManish Chopra 5123f990c82cSMintz, Yuval if (vf_info->rx_accept_mode & mask) { 5124f990c82cSMintz, Yuval flags->update_rx_mode_config = 1; 5125f990c82cSMintz, Yuval flags->rx_accept_filter = vf_info->rx_accept_mode; 5126f990c82cSMintz, Yuval } 5127f990c82cSMintz, Yuval 5128f990c82cSMintz, Yuval if (vf_info->tx_accept_mode & mask) { 5129f990c82cSMintz, Yuval flags->update_tx_mode_config = 1; 5130f990c82cSMintz, Yuval flags->tx_accept_filter = vf_info->tx_accept_mode; 5131f990c82cSMintz, Yuval } 5132f990c82cSMintz, Yuval 5133f990c82cSMintz, Yuval /* Remove if needed; Otherwise this would set the mask */ 5134f990c82cSMintz, Yuval if (!vf_info->is_trusted_configured) { 5135f990c82cSMintz, Yuval flags->rx_accept_filter &= ~mask; 5136f990c82cSMintz, Yuval flags->tx_accept_filter &= ~mask; 5137f990c82cSMintz, Yuval } 5138f990c82cSMintz, Yuval 5139f990c82cSMintz, Yuval if (flags->update_rx_mode_config || 5140ff929696SManish Chopra flags->update_tx_mode_config || 5141ff929696SManish Chopra params.update_ctl_frame_check) 5142f990c82cSMintz, Yuval qed_sp_vport_update(hwfn, ¶ms, 5143f990c82cSMintz, Yuval QED_SPQ_MODE_EBLOCK, NULL); 5144f990c82cSMintz, Yuval } 5145f990c82cSMintz, Yuval } 5146f990c82cSMintz, Yuval 5147ba56947aSBaoyou Xie static void qed_iov_pf_task(struct work_struct *work) 5148ba56947aSBaoyou Xie 514937bff2b9SYuval Mintz { 515037bff2b9SYuval Mintz struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 515137bff2b9SYuval Mintz iov_task.work); 51520b55e27dSYuval Mintz int rc; 515337bff2b9SYuval Mintz 515437bff2b9SYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) 515537bff2b9SYuval Mintz return; 515637bff2b9SYuval Mintz 51570b55e27dSYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) { 51580b55e27dSYuval Mintz struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 51590b55e27dSYuval Mintz 51600b55e27dSYuval Mintz if (!ptt) { 51610b55e27dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); 51620b55e27dSYuval Mintz return; 51630b55e27dSYuval Mintz } 51640b55e27dSYuval Mintz 51650b55e27dSYuval Mintz rc = qed_iov_vf_flr_cleanup(hwfn, ptt); 51660b55e27dSYuval Mintz if (rc) 51670b55e27dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); 51680b55e27dSYuval Mintz 51690b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 51700b55e27dSYuval Mintz } 51710b55e27dSYuval Mintz 517237bff2b9SYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) 517337bff2b9SYuval Mintz qed_handle_vf_msg(hwfn); 517408feecd7SYuval Mintz 517508feecd7SYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, 517608feecd7SYuval Mintz &hwfn->iov_task_flags)) 517708feecd7SYuval Mintz qed_handle_pf_set_vf_unicast(hwfn); 517808feecd7SYuval Mintz 517936558c3dSYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, 518036558c3dSYuval Mintz &hwfn->iov_task_flags)) 518136558c3dSYuval Mintz qed_handle_bulletin_post(hwfn); 5182f990c82cSMintz, Yuval 5183f990c82cSMintz, Yuval if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags)) 5184f990c82cSMintz, Yuval qed_iov_handle_trust_change(hwfn); 518537bff2b9SYuval Mintz } 518637bff2b9SYuval Mintz 518737bff2b9SYuval Mintz void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) 518837bff2b9SYuval Mintz { 518937bff2b9SYuval Mintz int i; 519037bff2b9SYuval Mintz 519137bff2b9SYuval Mintz for_each_hwfn(cdev, i) { 519237bff2b9SYuval Mintz if (!cdev->hwfns[i].iov_wq) 519337bff2b9SYuval Mintz continue; 519437bff2b9SYuval Mintz 519537bff2b9SYuval Mintz if (schedule_first) { 519637bff2b9SYuval Mintz qed_schedule_iov(&cdev->hwfns[i], 519737bff2b9SYuval Mintz QED_IOV_WQ_STOP_WQ_FLAG); 519837bff2b9SYuval Mintz cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); 519937bff2b9SYuval Mintz } 520037bff2b9SYuval Mintz 520137bff2b9SYuval Mintz flush_workqueue(cdev->hwfns[i].iov_wq); 520237bff2b9SYuval Mintz destroy_workqueue(cdev->hwfns[i].iov_wq); 520337bff2b9SYuval Mintz } 520437bff2b9SYuval Mintz } 520537bff2b9SYuval Mintz 520637bff2b9SYuval Mintz int qed_iov_wq_start(struct qed_dev *cdev) 520737bff2b9SYuval Mintz { 520837bff2b9SYuval Mintz char name[NAME_SIZE]; 520937bff2b9SYuval Mintz int i; 521037bff2b9SYuval Mintz 521137bff2b9SYuval Mintz for_each_hwfn(cdev, i) { 521237bff2b9SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 521337bff2b9SYuval Mintz 521436558c3dSYuval Mintz /* PFs needs a dedicated workqueue only if they support IOV. 521536558c3dSYuval Mintz * VFs always require one. 521636558c3dSYuval Mintz */ 521736558c3dSYuval Mintz if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) 521837bff2b9SYuval Mintz continue; 521937bff2b9SYuval Mintz 522037bff2b9SYuval Mintz snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", 522137bff2b9SYuval Mintz cdev->pdev->bus->number, 522237bff2b9SYuval Mintz PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); 522337bff2b9SYuval Mintz 522437bff2b9SYuval Mintz p_hwfn->iov_wq = create_singlethread_workqueue(name); 522537bff2b9SYuval Mintz if (!p_hwfn->iov_wq) { 522637bff2b9SYuval Mintz DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n"); 522737bff2b9SYuval Mintz return -ENOMEM; 522837bff2b9SYuval Mintz } 522937bff2b9SYuval Mintz 523036558c3dSYuval Mintz if (IS_PF(cdev)) 523137bff2b9SYuval Mintz INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); 523236558c3dSYuval Mintz else 523336558c3dSYuval Mintz INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); 523437bff2b9SYuval Mintz } 523537bff2b9SYuval Mintz 523637bff2b9SYuval Mintz return 0; 523737bff2b9SYuval Mintz } 52380b55e27dSYuval Mintz 52390b55e27dSYuval Mintz const struct qed_iov_hv_ops qed_iov_ops_pass = { 52400b55e27dSYuval Mintz .configure = &qed_sriov_configure, 5241eff16960SYuval Mintz .set_mac = &qed_sriov_pf_set_mac, 524208feecd7SYuval Mintz .set_vlan = &qed_sriov_pf_set_vlan, 524373390ac9SYuval Mintz .get_config = &qed_get_vf_config, 5244733def6aSYuval Mintz .set_link_state = &qed_set_vf_link_state, 52456ddc7608SYuval Mintz .set_spoof = &qed_spoof_configure, 5246733def6aSYuval Mintz .set_rate = &qed_set_vf_rate, 5247f990c82cSMintz, Yuval .set_trust = &qed_set_vf_trust, 52480b55e27dSYuval Mintz }; 5249