132a47e72SYuval Mintz /* QLogic qed NIC Driver 2e8f1cb50SMintz, Yuval * Copyright (c) 2015-2017 QLogic Corporation 332a47e72SYuval Mintz * 4e8f1cb50SMintz, Yuval * This software is available to you under a choice of one of two 5e8f1cb50SMintz, Yuval * licenses. You may choose to be licensed under the terms of the GNU 6e8f1cb50SMintz, Yuval * General Public License (GPL) Version 2, available from the file 7e8f1cb50SMintz, Yuval * COPYING in the main directory of this source tree, or the 8e8f1cb50SMintz, Yuval * OpenIB.org BSD license below: 9e8f1cb50SMintz, Yuval * 10e8f1cb50SMintz, Yuval * Redistribution and use in source and binary forms, with or 11e8f1cb50SMintz, Yuval * without modification, are permitted provided that the following 12e8f1cb50SMintz, Yuval * conditions are met: 13e8f1cb50SMintz, Yuval * 14e8f1cb50SMintz, Yuval * - Redistributions of source code must retain the above 15e8f1cb50SMintz, Yuval * copyright notice, this list of conditions and the following 16e8f1cb50SMintz, Yuval * disclaimer. 17e8f1cb50SMintz, Yuval * 18e8f1cb50SMintz, Yuval * - Redistributions in binary form must reproduce the above 19e8f1cb50SMintz, Yuval * copyright notice, this list of conditions and the following 20e8f1cb50SMintz, Yuval * disclaimer in the documentation and /or other materials 21e8f1cb50SMintz, Yuval * provided with the distribution. 22e8f1cb50SMintz, Yuval * 23e8f1cb50SMintz, Yuval * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24e8f1cb50SMintz, Yuval * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25e8f1cb50SMintz, Yuval * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26e8f1cb50SMintz, Yuval * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27e8f1cb50SMintz, Yuval * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28e8f1cb50SMintz, Yuval * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29e8f1cb50SMintz, Yuval * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30e8f1cb50SMintz, Yuval * SOFTWARE. 3132a47e72SYuval Mintz */ 3232a47e72SYuval Mintz 33dacd88d6SYuval Mintz #include <linux/etherdevice.h> 3436558c3dSYuval Mintz #include <linux/crc32.h> 35f29ffdb6SMintz, Yuval #include <linux/vmalloc.h> 360b55e27dSYuval Mintz #include <linux/qed/qed_iov_if.h> 371408cc1fSYuval Mintz #include "qed_cxt.h" 381408cc1fSYuval Mintz #include "qed_hsi.h" 3932a47e72SYuval Mintz #include "qed_hw.h" 401408cc1fSYuval Mintz #include "qed_init_ops.h" 4132a47e72SYuval Mintz #include "qed_int.h" 421408cc1fSYuval Mintz #include "qed_mcp.h" 4332a47e72SYuval Mintz #include "qed_reg_addr.h" 441408cc1fSYuval Mintz #include "qed_sp.h" 4532a47e72SYuval Mintz #include "qed_sriov.h" 4632a47e72SYuval Mintz #include "qed_vf.h" 476c9e80eaSMichal Kalderon static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 486c9e80eaSMichal Kalderon u8 opcode, 496c9e80eaSMichal Kalderon __le16 echo, 506c9e80eaSMichal Kalderon union event_ring_data *data, u8 fw_return_code); 516c9e80eaSMichal Kalderon 5232a47e72SYuval Mintz 533b19f478SMintz, Yuval static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) 543b19f478SMintz, Yuval { 5508bc8f15SMintz, Yuval u8 legacy = 0; 563b19f478SMintz, Yuval 573b19f478SMintz, Yuval if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 583b19f478SMintz, Yuval ETH_HSI_VER_NO_PKT_LEN_TUNN) 593b19f478SMintz, Yuval legacy |= QED_QCID_LEGACY_VF_RX_PROD; 603b19f478SMintz, Yuval 6108bc8f15SMintz, Yuval if (!(p_vf->acquire.vfdev_info.capabilities & 6208bc8f15SMintz, Yuval VFPF_ACQUIRE_CAP_QUEUE_QIDS)) 6308bc8f15SMintz, Yuval legacy |= QED_QCID_LEGACY_VF_CID; 6408bc8f15SMintz, Yuval 653b19f478SMintz, Yuval return legacy; 663b19f478SMintz, Yuval } 673b19f478SMintz, Yuval 681408cc1fSYuval Mintz /* IOV ramrods */ 691fe614d1SYuval Mintz static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) 701408cc1fSYuval Mintz { 711408cc1fSYuval Mintz struct vf_start_ramrod_data *p_ramrod = NULL; 721408cc1fSYuval Mintz struct qed_spq_entry *p_ent = NULL; 731408cc1fSYuval Mintz struct qed_sp_init_data init_data; 741408cc1fSYuval Mintz int rc = -EINVAL; 751fe614d1SYuval Mintz u8 fp_minor; 761408cc1fSYuval Mintz 771408cc1fSYuval Mintz /* Get SPQ entry */ 781408cc1fSYuval Mintz memset(&init_data, 0, sizeof(init_data)); 791408cc1fSYuval Mintz init_data.cid = qed_spq_get_cid(p_hwfn); 801fe614d1SYuval Mintz init_data.opaque_fid = p_vf->opaque_fid; 811408cc1fSYuval Mintz init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 821408cc1fSYuval Mintz 831408cc1fSYuval Mintz rc = qed_sp_init_request(p_hwfn, &p_ent, 841408cc1fSYuval Mintz COMMON_RAMROD_VF_START, 851408cc1fSYuval Mintz PROTOCOLID_COMMON, &init_data); 861408cc1fSYuval Mintz if (rc) 871408cc1fSYuval Mintz return rc; 881408cc1fSYuval Mintz 891408cc1fSYuval Mintz p_ramrod = &p_ent->ramrod.vf_start; 901408cc1fSYuval Mintz 911fe614d1SYuval Mintz p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); 921fe614d1SYuval Mintz p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); 931408cc1fSYuval Mintz 941fe614d1SYuval Mintz switch (p_hwfn->hw_info.personality) { 951fe614d1SYuval Mintz case QED_PCI_ETH: 961408cc1fSYuval Mintz p_ramrod->personality = PERSONALITY_ETH; 971fe614d1SYuval Mintz break; 981fe614d1SYuval Mintz case QED_PCI_ETH_ROCE: 991fe614d1SYuval Mintz p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; 1001fe614d1SYuval Mintz break; 1011fe614d1SYuval Mintz default: 1021fe614d1SYuval Mintz DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", 1031fe614d1SYuval Mintz p_hwfn->hw_info.personality); 1041fe614d1SYuval Mintz return -EINVAL; 1051fe614d1SYuval Mintz } 1061fe614d1SYuval Mintz 1071fe614d1SYuval Mintz fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; 108a044df83SYuval Mintz if (fp_minor > ETH_HSI_VER_MINOR && 109a044df83SYuval Mintz fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) { 1101fe614d1SYuval Mintz DP_VERBOSE(p_hwfn, 1111fe614d1SYuval Mintz QED_MSG_IOV, 1121fe614d1SYuval Mintz "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", 1131fe614d1SYuval Mintz p_vf->abs_vf_id, 1141fe614d1SYuval Mintz ETH_HSI_VER_MAJOR, 1151fe614d1SYuval Mintz fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 1161fe614d1SYuval Mintz fp_minor = ETH_HSI_VER_MINOR; 1171fe614d1SYuval Mintz } 1181fe614d1SYuval Mintz 119351a4dedSYuval Mintz p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; 1201fe614d1SYuval Mintz p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; 1211fe614d1SYuval Mintz 1221fe614d1SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1231fe614d1SYuval Mintz "VF[%d] - Starting using HSI %02x.%02x\n", 1241fe614d1SYuval Mintz p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); 1251408cc1fSYuval Mintz 1261408cc1fSYuval Mintz return qed_spq_post(p_hwfn, p_ent, NULL); 1271408cc1fSYuval Mintz } 1281408cc1fSYuval Mintz 1290b55e27dSYuval Mintz static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, 1300b55e27dSYuval Mintz u32 concrete_vfid, u16 opaque_vfid) 1310b55e27dSYuval Mintz { 1320b55e27dSYuval Mintz struct vf_stop_ramrod_data *p_ramrod = NULL; 1330b55e27dSYuval Mintz struct qed_spq_entry *p_ent = NULL; 1340b55e27dSYuval Mintz struct qed_sp_init_data init_data; 1350b55e27dSYuval Mintz int rc = -EINVAL; 1360b55e27dSYuval Mintz 1370b55e27dSYuval Mintz /* Get SPQ entry */ 1380b55e27dSYuval Mintz memset(&init_data, 0, sizeof(init_data)); 1390b55e27dSYuval Mintz init_data.cid = qed_spq_get_cid(p_hwfn); 1400b55e27dSYuval Mintz init_data.opaque_fid = opaque_vfid; 1410b55e27dSYuval Mintz init_data.comp_mode = QED_SPQ_MODE_EBLOCK; 1420b55e27dSYuval Mintz 1430b55e27dSYuval Mintz rc = qed_sp_init_request(p_hwfn, &p_ent, 1440b55e27dSYuval Mintz COMMON_RAMROD_VF_STOP, 1450b55e27dSYuval Mintz PROTOCOLID_COMMON, &init_data); 1460b55e27dSYuval Mintz if (rc) 1470b55e27dSYuval Mintz return rc; 1480b55e27dSYuval Mintz 1490b55e27dSYuval Mintz p_ramrod = &p_ent->ramrod.vf_stop; 1500b55e27dSYuval Mintz 1510b55e27dSYuval Mintz p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); 1520b55e27dSYuval Mintz 1530b55e27dSYuval Mintz return qed_spq_post(p_hwfn, p_ent, NULL); 1540b55e27dSYuval Mintz } 1550b55e27dSYuval Mintz 156ba56947aSBaoyou Xie static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, 1577eff82b0SYuval Mintz int rel_vf_id, 1587eff82b0SYuval Mintz bool b_enabled_only, bool b_non_malicious) 15932a47e72SYuval Mintz { 16032a47e72SYuval Mintz if (!p_hwfn->pf_iov_info) { 16132a47e72SYuval Mintz DP_NOTICE(p_hwfn->cdev, "No iov info\n"); 16232a47e72SYuval Mintz return false; 16332a47e72SYuval Mintz } 16432a47e72SYuval Mintz 16532a47e72SYuval Mintz if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || 16632a47e72SYuval Mintz (rel_vf_id < 0)) 16732a47e72SYuval Mintz return false; 16832a47e72SYuval Mintz 16932a47e72SYuval Mintz if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && 17032a47e72SYuval Mintz b_enabled_only) 17132a47e72SYuval Mintz return false; 17232a47e72SYuval Mintz 1737eff82b0SYuval Mintz if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) && 1747eff82b0SYuval Mintz b_non_malicious) 1757eff82b0SYuval Mintz return false; 1767eff82b0SYuval Mintz 17732a47e72SYuval Mintz return true; 17832a47e72SYuval Mintz } 17932a47e72SYuval Mintz 18037bff2b9SYuval Mintz static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, 18137bff2b9SYuval Mintz u16 relative_vf_id, 18237bff2b9SYuval Mintz bool b_enabled_only) 18337bff2b9SYuval Mintz { 18437bff2b9SYuval Mintz struct qed_vf_info *vf = NULL; 18537bff2b9SYuval Mintz 18637bff2b9SYuval Mintz if (!p_hwfn->pf_iov_info) { 18737bff2b9SYuval Mintz DP_NOTICE(p_hwfn->cdev, "No iov info\n"); 18837bff2b9SYuval Mintz return NULL; 18937bff2b9SYuval Mintz } 19037bff2b9SYuval Mintz 1917eff82b0SYuval Mintz if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, 1927eff82b0SYuval Mintz b_enabled_only, false)) 19337bff2b9SYuval Mintz vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; 19437bff2b9SYuval Mintz else 19537bff2b9SYuval Mintz DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", 19637bff2b9SYuval Mintz relative_vf_id); 19737bff2b9SYuval Mintz 19837bff2b9SYuval Mintz return vf; 19937bff2b9SYuval Mintz } 20037bff2b9SYuval Mintz 201007bc371SMintz, Yuval static struct qed_queue_cid * 202007bc371SMintz, Yuval qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue) 203007bc371SMintz, Yuval { 204007bc371SMintz, Yuval int i; 205007bc371SMintz, Yuval 206007bc371SMintz, Yuval for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 207007bc371SMintz, Yuval if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx) 208007bc371SMintz, Yuval return p_queue->cids[i].p_cid; 209007bc371SMintz, Yuval } 210007bc371SMintz, Yuval 211007bc371SMintz, Yuval return NULL; 212007bc371SMintz, Yuval } 213007bc371SMintz, Yuval 214f109c240SMintz, Yuval enum qed_iov_validate_q_mode { 215f109c240SMintz, Yuval QED_IOV_VALIDATE_Q_NA, 216f109c240SMintz, Yuval QED_IOV_VALIDATE_Q_ENABLE, 217f109c240SMintz, Yuval QED_IOV_VALIDATE_Q_DISABLE, 218f109c240SMintz, Yuval }; 219f109c240SMintz, Yuval 220f109c240SMintz, Yuval static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn, 221f109c240SMintz, Yuval struct qed_vf_info *p_vf, 222f109c240SMintz, Yuval u16 qid, 223f109c240SMintz, Yuval enum qed_iov_validate_q_mode mode, 224f109c240SMintz, Yuval bool b_is_tx) 22541086467SYuval Mintz { 226007bc371SMintz, Yuval int i; 227007bc371SMintz, Yuval 228f109c240SMintz, Yuval if (mode == QED_IOV_VALIDATE_Q_NA) 229f109c240SMintz, Yuval return true; 230f109c240SMintz, Yuval 231007bc371SMintz, Yuval for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { 232007bc371SMintz, Yuval struct qed_vf_queue_cid *p_qcid; 233007bc371SMintz, Yuval 234007bc371SMintz, Yuval p_qcid = &p_vf->vf_queues[qid].cids[i]; 235007bc371SMintz, Yuval 236007bc371SMintz, Yuval if (!p_qcid->p_cid) 237007bc371SMintz, Yuval continue; 238007bc371SMintz, Yuval 239007bc371SMintz, Yuval if (p_qcid->b_is_tx != b_is_tx) 240007bc371SMintz, Yuval continue; 241007bc371SMintz, Yuval 242f109c240SMintz, Yuval return mode == QED_IOV_VALIDATE_Q_ENABLE; 243007bc371SMintz, Yuval } 244f109c240SMintz, Yuval 245f109c240SMintz, Yuval /* In case we haven't found any valid cid, then its disabled */ 246f109c240SMintz, Yuval return mode == QED_IOV_VALIDATE_Q_DISABLE; 247f109c240SMintz, Yuval } 248f109c240SMintz, Yuval 249f109c240SMintz, Yuval static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn, 250f109c240SMintz, Yuval struct qed_vf_info *p_vf, 251f109c240SMintz, Yuval u16 rx_qid, 252f109c240SMintz, Yuval enum qed_iov_validate_q_mode mode) 253f109c240SMintz, Yuval { 254f109c240SMintz, Yuval if (rx_qid >= p_vf->num_rxqs) { 25541086467SYuval Mintz DP_VERBOSE(p_hwfn, 25641086467SYuval Mintz QED_MSG_IOV, 25741086467SYuval Mintz "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n", 25841086467SYuval Mintz p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); 259f109c240SMintz, Yuval return false; 260f109c240SMintz, Yuval } 261f109c240SMintz, Yuval 262f109c240SMintz, Yuval return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false); 26341086467SYuval Mintz } 26441086467SYuval Mintz 26541086467SYuval Mintz static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn, 266f109c240SMintz, Yuval struct qed_vf_info *p_vf, 267f109c240SMintz, Yuval u16 tx_qid, 268f109c240SMintz, Yuval enum qed_iov_validate_q_mode mode) 26941086467SYuval Mintz { 270f109c240SMintz, Yuval if (tx_qid >= p_vf->num_txqs) { 27141086467SYuval Mintz DP_VERBOSE(p_hwfn, 27241086467SYuval Mintz QED_MSG_IOV, 27341086467SYuval Mintz "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n", 27441086467SYuval Mintz p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); 275f109c240SMintz, Yuval return false; 276f109c240SMintz, Yuval } 277f109c240SMintz, Yuval 278f109c240SMintz, Yuval return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true); 27941086467SYuval Mintz } 28041086467SYuval Mintz 28141086467SYuval Mintz static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn, 28241086467SYuval Mintz struct qed_vf_info *p_vf, u16 sb_idx) 28341086467SYuval Mintz { 28441086467SYuval Mintz int i; 28541086467SYuval Mintz 28641086467SYuval Mintz for (i = 0; i < p_vf->num_sbs; i++) 28741086467SYuval Mintz if (p_vf->igu_sbs[i] == sb_idx) 28841086467SYuval Mintz return true; 28941086467SYuval Mintz 29041086467SYuval Mintz DP_VERBOSE(p_hwfn, 29141086467SYuval Mintz QED_MSG_IOV, 29241086467SYuval Mintz "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n", 29341086467SYuval Mintz p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); 29441086467SYuval Mintz 29541086467SYuval Mintz return false; 29641086467SYuval Mintz } 29741086467SYuval Mintz 298f109c240SMintz, Yuval static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn, 299f109c240SMintz, Yuval struct qed_vf_info *p_vf) 300f109c240SMintz, Yuval { 301f109c240SMintz, Yuval u8 i; 302f109c240SMintz, Yuval 303f109c240SMintz, Yuval for (i = 0; i < p_vf->num_rxqs; i++) 304f109c240SMintz, Yuval if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, 305f109c240SMintz, Yuval QED_IOV_VALIDATE_Q_ENABLE, 306f109c240SMintz, Yuval false)) 307f109c240SMintz, Yuval return true; 308f109c240SMintz, Yuval 309f109c240SMintz, Yuval return false; 310f109c240SMintz, Yuval } 311f109c240SMintz, Yuval 312f109c240SMintz, Yuval static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn, 313f109c240SMintz, Yuval struct qed_vf_info *p_vf) 314f109c240SMintz, Yuval { 315f109c240SMintz, Yuval u8 i; 316f109c240SMintz, Yuval 317f109c240SMintz, Yuval for (i = 0; i < p_vf->num_txqs; i++) 318f109c240SMintz, Yuval if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, 319f109c240SMintz, Yuval QED_IOV_VALIDATE_Q_ENABLE, 320f109c240SMintz, Yuval true)) 321f109c240SMintz, Yuval return true; 322f109c240SMintz, Yuval 323f109c240SMintz, Yuval return false; 324f109c240SMintz, Yuval } 325f109c240SMintz, Yuval 326ba56947aSBaoyou Xie static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, 32736558c3dSYuval Mintz int vfid, struct qed_ptt *p_ptt) 32836558c3dSYuval Mintz { 32936558c3dSYuval Mintz struct qed_bulletin_content *p_bulletin; 33036558c3dSYuval Mintz int crc_size = sizeof(p_bulletin->crc); 33136558c3dSYuval Mintz struct qed_dmae_params params; 33236558c3dSYuval Mintz struct qed_vf_info *p_vf; 33336558c3dSYuval Mintz 33436558c3dSYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 33536558c3dSYuval Mintz if (!p_vf) 33636558c3dSYuval Mintz return -EINVAL; 33736558c3dSYuval Mintz 33836558c3dSYuval Mintz if (!p_vf->vf_bulletin) 33936558c3dSYuval Mintz return -EINVAL; 34036558c3dSYuval Mintz 34136558c3dSYuval Mintz p_bulletin = p_vf->bulletin.p_virt; 34236558c3dSYuval Mintz 34336558c3dSYuval Mintz /* Increment bulletin board version and compute crc */ 34436558c3dSYuval Mintz p_bulletin->version++; 34536558c3dSYuval Mintz p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, 34636558c3dSYuval Mintz p_vf->bulletin.size - crc_size); 34736558c3dSYuval Mintz 34836558c3dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 34936558c3dSYuval Mintz "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", 35036558c3dSYuval Mintz p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); 35136558c3dSYuval Mintz 35236558c3dSYuval Mintz /* propagate bulletin board via dmae to vm memory */ 35336558c3dSYuval Mintz memset(¶ms, 0, sizeof(params)); 35436558c3dSYuval Mintz params.flags = QED_DMAE_FLAG_VF_DST; 35536558c3dSYuval Mintz params.dst_vfid = p_vf->abs_vf_id; 35636558c3dSYuval Mintz return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, 35736558c3dSYuval Mintz p_vf->vf_bulletin, p_vf->bulletin.size / 4, 35836558c3dSYuval Mintz ¶ms); 35936558c3dSYuval Mintz } 36036558c3dSYuval Mintz 36132a47e72SYuval Mintz static int qed_iov_pci_cfg_info(struct qed_dev *cdev) 36232a47e72SYuval Mintz { 36332a47e72SYuval Mintz struct qed_hw_sriov_info *iov = cdev->p_iov_info; 36432a47e72SYuval Mintz int pos = iov->pos; 36532a47e72SYuval Mintz 36632a47e72SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos); 36732a47e72SYuval Mintz pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 36832a47e72SYuval Mintz 36932a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 37032a47e72SYuval Mintz pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); 37132a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 37232a47e72SYuval Mintz pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); 37332a47e72SYuval Mintz 37432a47e72SYuval Mintz pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); 37532a47e72SYuval Mintz if (iov->num_vfs) { 37632a47e72SYuval Mintz DP_VERBOSE(cdev, 37732a47e72SYuval Mintz QED_MSG_IOV, 37832a47e72SYuval Mintz "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); 37932a47e72SYuval Mintz iov->num_vfs = 0; 38032a47e72SYuval Mintz } 38132a47e72SYuval Mintz 38232a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 38332a47e72SYuval Mintz pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 38432a47e72SYuval Mintz 38532a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 38632a47e72SYuval Mintz pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 38732a47e72SYuval Mintz 38832a47e72SYuval Mintz pci_read_config_word(cdev->pdev, 38932a47e72SYuval Mintz pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); 39032a47e72SYuval Mintz 39132a47e72SYuval Mintz pci_read_config_dword(cdev->pdev, 39232a47e72SYuval Mintz pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 39332a47e72SYuval Mintz 39432a47e72SYuval Mintz pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap); 39532a47e72SYuval Mintz 39632a47e72SYuval Mintz pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 39732a47e72SYuval Mintz 39832a47e72SYuval Mintz DP_VERBOSE(cdev, 39932a47e72SYuval Mintz QED_MSG_IOV, 40032a47e72SYuval Mintz "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 40132a47e72SYuval Mintz iov->nres, 40232a47e72SYuval Mintz iov->cap, 40332a47e72SYuval Mintz iov->ctrl, 40432a47e72SYuval Mintz iov->total_vfs, 40532a47e72SYuval Mintz iov->initial_vfs, 40632a47e72SYuval Mintz iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 40732a47e72SYuval Mintz 40832a47e72SYuval Mintz /* Some sanity checks */ 40932a47e72SYuval Mintz if (iov->num_vfs > NUM_OF_VFS(cdev) || 41032a47e72SYuval Mintz iov->total_vfs > NUM_OF_VFS(cdev)) { 41132a47e72SYuval Mintz /* This can happen only due to a bug. In this case we set 41232a47e72SYuval Mintz * num_vfs to zero to avoid memory corruption in the code that 41332a47e72SYuval Mintz * assumes max number of vfs 41432a47e72SYuval Mintz */ 41532a47e72SYuval Mintz DP_NOTICE(cdev, 41632a47e72SYuval Mintz "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n", 41732a47e72SYuval Mintz iov->num_vfs); 41832a47e72SYuval Mintz 41932a47e72SYuval Mintz iov->num_vfs = 0; 42032a47e72SYuval Mintz iov->total_vfs = 0; 42132a47e72SYuval Mintz } 42232a47e72SYuval Mintz 42332a47e72SYuval Mintz return 0; 42432a47e72SYuval Mintz } 42532a47e72SYuval Mintz 42632a47e72SYuval Mintz static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) 42732a47e72SYuval Mintz { 42832a47e72SYuval Mintz struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 42932a47e72SYuval Mintz struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 43032a47e72SYuval Mintz struct qed_bulletin_content *p_bulletin_virt; 43132a47e72SYuval Mintz dma_addr_t req_p, rply_p, bulletin_p; 43232a47e72SYuval Mintz union pfvf_tlvs *p_reply_virt_addr; 43332a47e72SYuval Mintz union vfpf_tlvs *p_req_virt_addr; 43432a47e72SYuval Mintz u8 idx = 0; 43532a47e72SYuval Mintz 43632a47e72SYuval Mintz memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); 43732a47e72SYuval Mintz 43832a47e72SYuval Mintz p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; 43932a47e72SYuval Mintz req_p = p_iov_info->mbx_msg_phys_addr; 44032a47e72SYuval Mintz p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; 44132a47e72SYuval Mintz rply_p = p_iov_info->mbx_reply_phys_addr; 44232a47e72SYuval Mintz p_bulletin_virt = p_iov_info->p_bulletins; 44332a47e72SYuval Mintz bulletin_p = p_iov_info->bulletins_phys; 44432a47e72SYuval Mintz if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { 44532a47e72SYuval Mintz DP_ERR(p_hwfn, 44632a47e72SYuval Mintz "qed_iov_setup_vfdb called without allocating mem first\n"); 44732a47e72SYuval Mintz return; 44832a47e72SYuval Mintz } 44932a47e72SYuval Mintz 45032a47e72SYuval Mintz for (idx = 0; idx < p_iov->total_vfs; idx++) { 45132a47e72SYuval Mintz struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; 45232a47e72SYuval Mintz u32 concrete; 45332a47e72SYuval Mintz 45432a47e72SYuval Mintz vf->vf_mbx.req_virt = p_req_virt_addr + idx; 45532a47e72SYuval Mintz vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); 45632a47e72SYuval Mintz vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; 45732a47e72SYuval Mintz vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); 45832a47e72SYuval Mintz 45932a47e72SYuval Mintz vf->state = VF_STOPPED; 46032a47e72SYuval Mintz vf->b_init = false; 46132a47e72SYuval Mintz 46232a47e72SYuval Mintz vf->bulletin.phys = idx * 46332a47e72SYuval Mintz sizeof(struct qed_bulletin_content) + 46432a47e72SYuval Mintz bulletin_p; 46532a47e72SYuval Mintz vf->bulletin.p_virt = p_bulletin_virt + idx; 46632a47e72SYuval Mintz vf->bulletin.size = sizeof(struct qed_bulletin_content); 46732a47e72SYuval Mintz 46832a47e72SYuval Mintz vf->relative_vf_id = idx; 46932a47e72SYuval Mintz vf->abs_vf_id = idx + p_iov->first_vf_in_pf; 47032a47e72SYuval Mintz concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id); 47132a47e72SYuval Mintz vf->concrete_fid = concrete; 47232a47e72SYuval Mintz vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | 47332a47e72SYuval Mintz (vf->abs_vf_id << 8); 47432a47e72SYuval Mintz vf->vport_id = idx + 1; 4751cf2b1a9SYuval Mintz 4761cf2b1a9SYuval Mintz vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; 4771cf2b1a9SYuval Mintz vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; 47832a47e72SYuval Mintz } 47932a47e72SYuval Mintz } 48032a47e72SYuval Mintz 48132a47e72SYuval Mintz static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) 48232a47e72SYuval Mintz { 48332a47e72SYuval Mintz struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 48432a47e72SYuval Mintz void **p_v_addr; 48532a47e72SYuval Mintz u16 num_vfs = 0; 48632a47e72SYuval Mintz 48732a47e72SYuval Mintz num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; 48832a47e72SYuval Mintz 48932a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 49032a47e72SYuval Mintz "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); 49132a47e72SYuval Mintz 49232a47e72SYuval Mintz /* Allocate PF Mailbox buffer (per-VF) */ 49332a47e72SYuval Mintz p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; 49432a47e72SYuval Mintz p_v_addr = &p_iov_info->mbx_msg_virt_addr; 49532a47e72SYuval Mintz *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 49632a47e72SYuval Mintz p_iov_info->mbx_msg_size, 49732a47e72SYuval Mintz &p_iov_info->mbx_msg_phys_addr, 49832a47e72SYuval Mintz GFP_KERNEL); 49932a47e72SYuval Mintz if (!*p_v_addr) 50032a47e72SYuval Mintz return -ENOMEM; 50132a47e72SYuval Mintz 50232a47e72SYuval Mintz /* Allocate PF Mailbox Reply buffer (per-VF) */ 50332a47e72SYuval Mintz p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; 50432a47e72SYuval Mintz p_v_addr = &p_iov_info->mbx_reply_virt_addr; 50532a47e72SYuval Mintz *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 50632a47e72SYuval Mintz p_iov_info->mbx_reply_size, 50732a47e72SYuval Mintz &p_iov_info->mbx_reply_phys_addr, 50832a47e72SYuval Mintz GFP_KERNEL); 50932a47e72SYuval Mintz if (!*p_v_addr) 51032a47e72SYuval Mintz return -ENOMEM; 51132a47e72SYuval Mintz 51232a47e72SYuval Mintz p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * 51332a47e72SYuval Mintz num_vfs; 51432a47e72SYuval Mintz p_v_addr = &p_iov_info->p_bulletins; 51532a47e72SYuval Mintz *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 51632a47e72SYuval Mintz p_iov_info->bulletins_size, 51732a47e72SYuval Mintz &p_iov_info->bulletins_phys, 51832a47e72SYuval Mintz GFP_KERNEL); 51932a47e72SYuval Mintz if (!*p_v_addr) 52032a47e72SYuval Mintz return -ENOMEM; 52132a47e72SYuval Mintz 52232a47e72SYuval Mintz DP_VERBOSE(p_hwfn, 52332a47e72SYuval Mintz QED_MSG_IOV, 52432a47e72SYuval Mintz "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", 52532a47e72SYuval Mintz p_iov_info->mbx_msg_virt_addr, 52632a47e72SYuval Mintz (u64) p_iov_info->mbx_msg_phys_addr, 52732a47e72SYuval Mintz p_iov_info->mbx_reply_virt_addr, 52832a47e72SYuval Mintz (u64) p_iov_info->mbx_reply_phys_addr, 52932a47e72SYuval Mintz p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); 53032a47e72SYuval Mintz 53132a47e72SYuval Mintz return 0; 53232a47e72SYuval Mintz } 53332a47e72SYuval Mintz 53432a47e72SYuval Mintz static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn) 53532a47e72SYuval Mintz { 53632a47e72SYuval Mintz struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; 53732a47e72SYuval Mintz 53832a47e72SYuval Mintz if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) 53932a47e72SYuval Mintz dma_free_coherent(&p_hwfn->cdev->pdev->dev, 54032a47e72SYuval Mintz p_iov_info->mbx_msg_size, 54132a47e72SYuval Mintz p_iov_info->mbx_msg_virt_addr, 54232a47e72SYuval Mintz p_iov_info->mbx_msg_phys_addr); 54332a47e72SYuval Mintz 54432a47e72SYuval Mintz if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) 54532a47e72SYuval Mintz dma_free_coherent(&p_hwfn->cdev->pdev->dev, 54632a47e72SYuval Mintz p_iov_info->mbx_reply_size, 54732a47e72SYuval Mintz p_iov_info->mbx_reply_virt_addr, 54832a47e72SYuval Mintz p_iov_info->mbx_reply_phys_addr); 54932a47e72SYuval Mintz 55032a47e72SYuval Mintz if (p_iov_info->p_bulletins) 55132a47e72SYuval Mintz dma_free_coherent(&p_hwfn->cdev->pdev->dev, 55232a47e72SYuval Mintz p_iov_info->bulletins_size, 55332a47e72SYuval Mintz p_iov_info->p_bulletins, 55432a47e72SYuval Mintz p_iov_info->bulletins_phys); 55532a47e72SYuval Mintz } 55632a47e72SYuval Mintz 55732a47e72SYuval Mintz int qed_iov_alloc(struct qed_hwfn *p_hwfn) 55832a47e72SYuval Mintz { 55932a47e72SYuval Mintz struct qed_pf_iov *p_sriov; 56032a47e72SYuval Mintz 56132a47e72SYuval Mintz if (!IS_PF_SRIOV(p_hwfn)) { 56232a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 56332a47e72SYuval Mintz "No SR-IOV - no need for IOV db\n"); 56432a47e72SYuval Mintz return 0; 56532a47e72SYuval Mintz } 56632a47e72SYuval Mintz 56732a47e72SYuval Mintz p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL); 5682591c280SJoe Perches if (!p_sriov) 56932a47e72SYuval Mintz return -ENOMEM; 57032a47e72SYuval Mintz 57132a47e72SYuval Mintz p_hwfn->pf_iov_info = p_sriov; 57232a47e72SYuval Mintz 5736c9e80eaSMichal Kalderon qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, 5746c9e80eaSMichal Kalderon qed_sriov_eqe_event); 5756c9e80eaSMichal Kalderon 57632a47e72SYuval Mintz return qed_iov_allocate_vfdb(p_hwfn); 57732a47e72SYuval Mintz } 57832a47e72SYuval Mintz 5791ee240e3SMintz, Yuval void qed_iov_setup(struct qed_hwfn *p_hwfn) 58032a47e72SYuval Mintz { 58132a47e72SYuval Mintz if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) 58232a47e72SYuval Mintz return; 58332a47e72SYuval Mintz 58432a47e72SYuval Mintz qed_iov_setup_vfdb(p_hwfn); 58532a47e72SYuval Mintz } 58632a47e72SYuval Mintz 58732a47e72SYuval Mintz void qed_iov_free(struct qed_hwfn *p_hwfn) 58832a47e72SYuval Mintz { 5896c9e80eaSMichal Kalderon qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); 5906c9e80eaSMichal Kalderon 59132a47e72SYuval Mintz if (IS_PF_SRIOV_ALLOC(p_hwfn)) { 59232a47e72SYuval Mintz qed_iov_free_vfdb(p_hwfn); 59332a47e72SYuval Mintz kfree(p_hwfn->pf_iov_info); 59432a47e72SYuval Mintz } 59532a47e72SYuval Mintz } 59632a47e72SYuval Mintz 59732a47e72SYuval Mintz void qed_iov_free_hw_info(struct qed_dev *cdev) 59832a47e72SYuval Mintz { 59932a47e72SYuval Mintz kfree(cdev->p_iov_info); 60032a47e72SYuval Mintz cdev->p_iov_info = NULL; 60132a47e72SYuval Mintz } 60232a47e72SYuval Mintz 60332a47e72SYuval Mintz int qed_iov_hw_info(struct qed_hwfn *p_hwfn) 60432a47e72SYuval Mintz { 60532a47e72SYuval Mintz struct qed_dev *cdev = p_hwfn->cdev; 60632a47e72SYuval Mintz int pos; 60732a47e72SYuval Mintz int rc; 60832a47e72SYuval Mintz 6091408cc1fSYuval Mintz if (IS_VF(p_hwfn->cdev)) 6101408cc1fSYuval Mintz return 0; 6111408cc1fSYuval Mintz 61232a47e72SYuval Mintz /* Learn the PCI configuration */ 61332a47e72SYuval Mintz pos = pci_find_ext_capability(p_hwfn->cdev->pdev, 61432a47e72SYuval Mintz PCI_EXT_CAP_ID_SRIOV); 61532a47e72SYuval Mintz if (!pos) { 61632a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n"); 61732a47e72SYuval Mintz return 0; 61832a47e72SYuval Mintz } 61932a47e72SYuval Mintz 62032a47e72SYuval Mintz /* Allocate a new struct for IOV information */ 62132a47e72SYuval Mintz cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL); 6222591c280SJoe Perches if (!cdev->p_iov_info) 62332a47e72SYuval Mintz return -ENOMEM; 6242591c280SJoe Perches 62532a47e72SYuval Mintz cdev->p_iov_info->pos = pos; 62632a47e72SYuval Mintz 62732a47e72SYuval Mintz rc = qed_iov_pci_cfg_info(cdev); 62832a47e72SYuval Mintz if (rc) 62932a47e72SYuval Mintz return rc; 63032a47e72SYuval Mintz 63132a47e72SYuval Mintz /* We want PF IOV to be synonemous with the existance of p_iov_info; 63232a47e72SYuval Mintz * In case the capability is published but there are no VFs, simply 63332a47e72SYuval Mintz * de-allocate the struct. 63432a47e72SYuval Mintz */ 63532a47e72SYuval Mintz if (!cdev->p_iov_info->total_vfs) { 63632a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 63732a47e72SYuval Mintz "IOV capabilities, but no VFs are published\n"); 63832a47e72SYuval Mintz kfree(cdev->p_iov_info); 63932a47e72SYuval Mintz cdev->p_iov_info = NULL; 64032a47e72SYuval Mintz return 0; 64132a47e72SYuval Mintz } 64232a47e72SYuval Mintz 6439c79ddaaSMintz, Yuval /* First VF index based on offset is tricky: 6449c79ddaaSMintz, Yuval * - If ARI is supported [likely], offset - (16 - pf_id) would 6459c79ddaaSMintz, Yuval * provide the number for eng0. 2nd engine Vfs would begin 64632a47e72SYuval Mintz * after the first engine's VFs. 6479c79ddaaSMintz, Yuval * - If !ARI, VFs would start on next device. 6489c79ddaaSMintz, Yuval * so offset - (256 - pf_id) would provide the number. 6499c79ddaaSMintz, Yuval * Utilize the fact that (256 - pf_id) is achieved only by later 6508ac1ed79SJoe Perches * to differentiate between the two. 65132a47e72SYuval Mintz */ 6529c79ddaaSMintz, Yuval 6539c79ddaaSMintz, Yuval if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { 6549c79ddaaSMintz, Yuval u32 first = p_hwfn->cdev->p_iov_info->offset + 65532a47e72SYuval Mintz p_hwfn->abs_pf_id - 16; 6569c79ddaaSMintz, Yuval 6579c79ddaaSMintz, Yuval cdev->p_iov_info->first_vf_in_pf = first; 6589c79ddaaSMintz, Yuval 65932a47e72SYuval Mintz if (QED_PATH_ID(p_hwfn)) 66032a47e72SYuval Mintz cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; 6619c79ddaaSMintz, Yuval } else { 6629c79ddaaSMintz, Yuval u32 first = p_hwfn->cdev->p_iov_info->offset + 6639c79ddaaSMintz, Yuval p_hwfn->abs_pf_id - 256; 6649c79ddaaSMintz, Yuval 6659c79ddaaSMintz, Yuval cdev->p_iov_info->first_vf_in_pf = first; 6669c79ddaaSMintz, Yuval } 66732a47e72SYuval Mintz 66832a47e72SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 66932a47e72SYuval Mintz "First VF in hwfn 0x%08x\n", 67032a47e72SYuval Mintz cdev->p_iov_info->first_vf_in_pf); 67132a47e72SYuval Mintz 67232a47e72SYuval Mintz return 0; 67332a47e72SYuval Mintz } 67432a47e72SYuval Mintz 6757eff82b0SYuval Mintz bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, 6767eff82b0SYuval Mintz int vfid, bool b_fail_malicious) 67737bff2b9SYuval Mintz { 67837bff2b9SYuval Mintz /* Check PF supports sriov */ 679b0409fa0SYuval Mintz if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || 680b0409fa0SYuval Mintz !IS_PF_SRIOV_ALLOC(p_hwfn)) 68137bff2b9SYuval Mintz return false; 68237bff2b9SYuval Mintz 68337bff2b9SYuval Mintz /* Check VF validity */ 6847eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious)) 68537bff2b9SYuval Mintz return false; 68637bff2b9SYuval Mintz 68737bff2b9SYuval Mintz return true; 68837bff2b9SYuval Mintz } 68937bff2b9SYuval Mintz 6907eff82b0SYuval Mintz bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) 6917eff82b0SYuval Mintz { 6927eff82b0SYuval Mintz return _qed_iov_pf_sanity_check(p_hwfn, vfid, true); 6937eff82b0SYuval Mintz } 6947eff82b0SYuval Mintz 6950b55e27dSYuval Mintz static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, 6960b55e27dSYuval Mintz u16 rel_vf_id, u8 to_disable) 6970b55e27dSYuval Mintz { 6980b55e27dSYuval Mintz struct qed_vf_info *vf; 6990b55e27dSYuval Mintz int i; 7000b55e27dSYuval Mintz 7010b55e27dSYuval Mintz for_each_hwfn(cdev, i) { 7020b55e27dSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 7030b55e27dSYuval Mintz 7040b55e27dSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 7050b55e27dSYuval Mintz if (!vf) 7060b55e27dSYuval Mintz continue; 7070b55e27dSYuval Mintz 7080b55e27dSYuval Mintz vf->to_disable = to_disable; 7090b55e27dSYuval Mintz } 7100b55e27dSYuval Mintz } 7110b55e27dSYuval Mintz 712ba56947aSBaoyou Xie static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable) 7130b55e27dSYuval Mintz { 7140b55e27dSYuval Mintz u16 i; 7150b55e27dSYuval Mintz 7160b55e27dSYuval Mintz if (!IS_QED_SRIOV(cdev)) 7170b55e27dSYuval Mintz return; 7180b55e27dSYuval Mintz 7190b55e27dSYuval Mintz for (i = 0; i < cdev->p_iov_info->total_vfs; i++) 7200b55e27dSYuval Mintz qed_iov_set_vf_to_disable(cdev, i, to_disable); 7210b55e27dSYuval Mintz } 7220b55e27dSYuval Mintz 7231408cc1fSYuval Mintz static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, 7241408cc1fSYuval Mintz struct qed_ptt *p_ptt, u8 abs_vfid) 7251408cc1fSYuval Mintz { 7261408cc1fSYuval Mintz qed_wr(p_hwfn, p_ptt, 7271408cc1fSYuval Mintz PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, 7281408cc1fSYuval Mintz 1 << (abs_vfid & 0x1f)); 7291408cc1fSYuval Mintz } 7301408cc1fSYuval Mintz 731dacd88d6SYuval Mintz static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, 732dacd88d6SYuval Mintz struct qed_ptt *p_ptt, struct qed_vf_info *vf) 733dacd88d6SYuval Mintz { 734dacd88d6SYuval Mintz int i; 735dacd88d6SYuval Mintz 736dacd88d6SYuval Mintz /* Set VF masks and configuration - pretend */ 737dacd88d6SYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 738dacd88d6SYuval Mintz 739dacd88d6SYuval Mintz qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); 740dacd88d6SYuval Mintz 741dacd88d6SYuval Mintz /* unpretend */ 742dacd88d6SYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 743dacd88d6SYuval Mintz 744dacd88d6SYuval Mintz /* iterate over all queues, clear sb consumer */ 745b2b897ebSYuval Mintz for (i = 0; i < vf->num_sbs; i++) 746b2b897ebSYuval Mintz qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 747b2b897ebSYuval Mintz vf->igu_sbs[i], 748b2b897ebSYuval Mintz vf->opaque_fid, true); 749dacd88d6SYuval Mintz } 750dacd88d6SYuval Mintz 7510b55e27dSYuval Mintz static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, 7520b55e27dSYuval Mintz struct qed_ptt *p_ptt, 7530b55e27dSYuval Mintz struct qed_vf_info *vf, bool enable) 7540b55e27dSYuval Mintz { 7550b55e27dSYuval Mintz u32 igu_vf_conf; 7560b55e27dSYuval Mintz 7570b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 7580b55e27dSYuval Mintz 7590b55e27dSYuval Mintz igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); 7600b55e27dSYuval Mintz 7610b55e27dSYuval Mintz if (enable) 7620b55e27dSYuval Mintz igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; 7630b55e27dSYuval Mintz else 7640b55e27dSYuval Mintz igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; 7650b55e27dSYuval Mintz 7660b55e27dSYuval Mintz qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); 7670b55e27dSYuval Mintz 7680b55e27dSYuval Mintz /* unpretend */ 7690b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 7700b55e27dSYuval Mintz } 7710b55e27dSYuval Mintz 77288072fd4SMintz, Yuval static int 77388072fd4SMintz, Yuval qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn, 77488072fd4SMintz, Yuval struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs) 77588072fd4SMintz, Yuval { 77688072fd4SMintz, Yuval u8 current_max = 0; 77788072fd4SMintz, Yuval int i; 77888072fd4SMintz, Yuval 77988072fd4SMintz, Yuval /* For AH onward, configuration is per-PF. Find maximum of all 78088072fd4SMintz, Yuval * the currently enabled child VFs, and set the number to be that. 78188072fd4SMintz, Yuval */ 78288072fd4SMintz, Yuval if (!QED_IS_BB(p_hwfn->cdev)) { 78388072fd4SMintz, Yuval qed_for_each_vf(p_hwfn, i) { 78488072fd4SMintz, Yuval struct qed_vf_info *p_vf; 78588072fd4SMintz, Yuval 78688072fd4SMintz, Yuval p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true); 78788072fd4SMintz, Yuval if (!p_vf) 78888072fd4SMintz, Yuval continue; 78988072fd4SMintz, Yuval 79088072fd4SMintz, Yuval current_max = max_t(u8, current_max, p_vf->num_sbs); 79188072fd4SMintz, Yuval } 79288072fd4SMintz, Yuval } 79388072fd4SMintz, Yuval 79488072fd4SMintz, Yuval if (num_sbs > current_max) 79588072fd4SMintz, Yuval return qed_mcp_config_vf_msix(p_hwfn, p_ptt, 79688072fd4SMintz, Yuval abs_vf_id, num_sbs); 79788072fd4SMintz, Yuval 79888072fd4SMintz, Yuval return 0; 79988072fd4SMintz, Yuval } 80088072fd4SMintz, Yuval 8011408cc1fSYuval Mintz static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, 8021408cc1fSYuval Mintz struct qed_ptt *p_ptt, 8031408cc1fSYuval Mintz struct qed_vf_info *vf) 8041408cc1fSYuval Mintz { 8051408cc1fSYuval Mintz u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; 8061408cc1fSYuval Mintz int rc; 8071408cc1fSYuval Mintz 8084e9b2a67SMintz, Yuval /* It's possible VF was previously considered malicious - 8094e9b2a67SMintz, Yuval * clear the indication even if we're only going to disable VF. 8104e9b2a67SMintz, Yuval */ 8114e9b2a67SMintz, Yuval vf->b_malicious = false; 8124e9b2a67SMintz, Yuval 8130b55e27dSYuval Mintz if (vf->to_disable) 8140b55e27dSYuval Mintz return 0; 8150b55e27dSYuval Mintz 8161408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, 8171408cc1fSYuval Mintz QED_MSG_IOV, 8181408cc1fSYuval Mintz "Enable internal access for vf %x [abs %x]\n", 8191408cc1fSYuval Mintz vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); 8201408cc1fSYuval Mintz 8211408cc1fSYuval Mintz qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); 8221408cc1fSYuval Mintz 823b2b897ebSYuval Mintz qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 824b2b897ebSYuval Mintz 82588072fd4SMintz, Yuval rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt, 82688072fd4SMintz, Yuval vf->abs_vf_id, vf->num_sbs); 8271408cc1fSYuval Mintz if (rc) 8281408cc1fSYuval Mintz return rc; 8291408cc1fSYuval Mintz 8301408cc1fSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); 8311408cc1fSYuval Mintz 8321408cc1fSYuval Mintz SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); 8331408cc1fSYuval Mintz STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); 8341408cc1fSYuval Mintz 8351408cc1fSYuval Mintz qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, 8361408cc1fSYuval Mintz p_hwfn->hw_info.hw_mode); 8371408cc1fSYuval Mintz 8381408cc1fSYuval Mintz /* unpretend */ 8391408cc1fSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 8401408cc1fSYuval Mintz 8411408cc1fSYuval Mintz vf->state = VF_FREE; 8421408cc1fSYuval Mintz 8431408cc1fSYuval Mintz return rc; 8441408cc1fSYuval Mintz } 8451408cc1fSYuval Mintz 8460b55e27dSYuval Mintz /** 8470b55e27dSYuval Mintz * @brief qed_iov_config_perm_table - configure the permission 8480b55e27dSYuval Mintz * zone table. 8490b55e27dSYuval Mintz * In E4, queue zone permission table size is 320x9. There 8500b55e27dSYuval Mintz * are 320 VF queues for single engine device (256 for dual 8510b55e27dSYuval Mintz * engine device), and each entry has the following format: 8520b55e27dSYuval Mintz * {Valid, VF[7:0]} 8530b55e27dSYuval Mintz * @param p_hwfn 8540b55e27dSYuval Mintz * @param p_ptt 8550b55e27dSYuval Mintz * @param vf 8560b55e27dSYuval Mintz * @param enable 8570b55e27dSYuval Mintz */ 8580b55e27dSYuval Mintz static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, 8590b55e27dSYuval Mintz struct qed_ptt *p_ptt, 8600b55e27dSYuval Mintz struct qed_vf_info *vf, u8 enable) 8610b55e27dSYuval Mintz { 8620b55e27dSYuval Mintz u32 reg_addr, val; 8630b55e27dSYuval Mintz u16 qzone_id = 0; 8640b55e27dSYuval Mintz int qid; 8650b55e27dSYuval Mintz 8660b55e27dSYuval Mintz for (qid = 0; qid < vf->num_rxqs; qid++) { 8670b55e27dSYuval Mintz qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, 8680b55e27dSYuval Mintz &qzone_id); 8690b55e27dSYuval Mintz 8700b55e27dSYuval Mintz reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; 8711a635e48SYuval Mintz val = enable ? (vf->abs_vf_id | BIT(8)) : 0; 8720b55e27dSYuval Mintz qed_wr(p_hwfn, p_ptt, reg_addr, val); 8730b55e27dSYuval Mintz } 8740b55e27dSYuval Mintz } 8750b55e27dSYuval Mintz 876dacd88d6SYuval Mintz static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn, 877dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 878dacd88d6SYuval Mintz struct qed_vf_info *vf) 879dacd88d6SYuval Mintz { 880dacd88d6SYuval Mintz /* Reset vf in IGU - interrupts are still disabled */ 881dacd88d6SYuval Mintz qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 882dacd88d6SYuval Mintz 883dacd88d6SYuval Mintz qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); 884dacd88d6SYuval Mintz 885dacd88d6SYuval Mintz /* Permission Table */ 886dacd88d6SYuval Mintz qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true); 887dacd88d6SYuval Mintz } 888dacd88d6SYuval Mintz 8891408cc1fSYuval Mintz static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, 8901408cc1fSYuval Mintz struct qed_ptt *p_ptt, 8911408cc1fSYuval Mintz struct qed_vf_info *vf, u16 num_rx_queues) 8921408cc1fSYuval Mintz { 89309b6b147SMintz, Yuval struct qed_igu_block *p_block; 89409b6b147SMintz, Yuval struct cau_sb_entry sb_entry; 89509b6b147SMintz, Yuval int qid = 0; 8961408cc1fSYuval Mintz u32 val = 0; 8971408cc1fSYuval Mintz 898726fdbe9SMintz, Yuval if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) 899726fdbe9SMintz, Yuval num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; 900726fdbe9SMintz, Yuval p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; 9011408cc1fSYuval Mintz 9021408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); 9031408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); 9041408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); 9051408cc1fSYuval Mintz 90609b6b147SMintz, Yuval for (qid = 0; qid < num_rx_queues; qid++) { 90709b6b147SMintz, Yuval p_block = qed_get_igu_free_sb(p_hwfn, false); 90809b6b147SMintz, Yuval vf->igu_sbs[qid] = p_block->igu_sb_id; 90909b6b147SMintz, Yuval p_block->status &= ~QED_IGU_STATUS_FREE; 9101408cc1fSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); 9111408cc1fSYuval Mintz 9121408cc1fSYuval Mintz qed_wr(p_hwfn, p_ptt, 91309b6b147SMintz, Yuval IGU_REG_MAPPING_MEMORY + 91409b6b147SMintz, Yuval sizeof(u32) * p_block->igu_sb_id, val); 9151408cc1fSYuval Mintz 9161408cc1fSYuval Mintz /* Configure igu sb in CAU which were marked valid */ 9171408cc1fSYuval Mintz qed_init_cau_sb_entry(p_hwfn, &sb_entry, 91809b6b147SMintz, Yuval p_hwfn->rel_pf_id, vf->abs_vf_id, 1); 9191408cc1fSYuval Mintz qed_dmae_host2grc(p_hwfn, p_ptt, 9201408cc1fSYuval Mintz (u64)(uintptr_t)&sb_entry, 9211408cc1fSYuval Mintz CAU_REG_SB_VAR_MEMORY + 92209b6b147SMintz, Yuval p_block->igu_sb_id * sizeof(u64), 2, 0); 9231408cc1fSYuval Mintz } 9241408cc1fSYuval Mintz 9251408cc1fSYuval Mintz vf->num_sbs = (u8) num_rx_queues; 9261408cc1fSYuval Mintz 9271408cc1fSYuval Mintz return vf->num_sbs; 9281408cc1fSYuval Mintz } 9291408cc1fSYuval Mintz 9300b55e27dSYuval Mintz static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, 9310b55e27dSYuval Mintz struct qed_ptt *p_ptt, 9320b55e27dSYuval Mintz struct qed_vf_info *vf) 9330b55e27dSYuval Mintz { 9340b55e27dSYuval Mintz struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 9350b55e27dSYuval Mintz int idx, igu_id; 9360b55e27dSYuval Mintz u32 addr, val; 9370b55e27dSYuval Mintz 9380b55e27dSYuval Mintz /* Invalidate igu CAM lines and mark them as free */ 9390b55e27dSYuval Mintz for (idx = 0; idx < vf->num_sbs; idx++) { 9400b55e27dSYuval Mintz igu_id = vf->igu_sbs[idx]; 9410b55e27dSYuval Mintz addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; 9420b55e27dSYuval Mintz 9430b55e27dSYuval Mintz val = qed_rd(p_hwfn, p_ptt, addr); 9440b55e27dSYuval Mintz SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); 9450b55e27dSYuval Mintz qed_wr(p_hwfn, p_ptt, addr, val); 9460b55e27dSYuval Mintz 947d749dd0dSMintz, Yuval p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE; 948726fdbe9SMintz, Yuval p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; 9490b55e27dSYuval Mintz } 9500b55e27dSYuval Mintz 9510b55e27dSYuval Mintz vf->num_sbs = 0; 9520b55e27dSYuval Mintz } 9530b55e27dSYuval Mintz 95433b2fbd0SMintz, Yuval static void qed_iov_set_link(struct qed_hwfn *p_hwfn, 95533b2fbd0SMintz, Yuval u16 vfid, 95633b2fbd0SMintz, Yuval struct qed_mcp_link_params *params, 95733b2fbd0SMintz, Yuval struct qed_mcp_link_state *link, 95833b2fbd0SMintz, Yuval struct qed_mcp_link_capabilities *p_caps) 95933b2fbd0SMintz, Yuval { 96033b2fbd0SMintz, Yuval struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 96133b2fbd0SMintz, Yuval vfid, 96233b2fbd0SMintz, Yuval false); 96333b2fbd0SMintz, Yuval struct qed_bulletin_content *p_bulletin; 96433b2fbd0SMintz, Yuval 96533b2fbd0SMintz, Yuval if (!p_vf) 96633b2fbd0SMintz, Yuval return; 96733b2fbd0SMintz, Yuval 96833b2fbd0SMintz, Yuval p_bulletin = p_vf->bulletin.p_virt; 96933b2fbd0SMintz, Yuval p_bulletin->req_autoneg = params->speed.autoneg; 97033b2fbd0SMintz, Yuval p_bulletin->req_adv_speed = params->speed.advertised_speeds; 97133b2fbd0SMintz, Yuval p_bulletin->req_forced_speed = params->speed.forced_speed; 97233b2fbd0SMintz, Yuval p_bulletin->req_autoneg_pause = params->pause.autoneg; 97333b2fbd0SMintz, Yuval p_bulletin->req_forced_rx = params->pause.forced_rx; 97433b2fbd0SMintz, Yuval p_bulletin->req_forced_tx = params->pause.forced_tx; 97533b2fbd0SMintz, Yuval p_bulletin->req_loopback = params->loopback_mode; 97633b2fbd0SMintz, Yuval 97733b2fbd0SMintz, Yuval p_bulletin->link_up = link->link_up; 97833b2fbd0SMintz, Yuval p_bulletin->speed = link->speed; 97933b2fbd0SMintz, Yuval p_bulletin->full_duplex = link->full_duplex; 98033b2fbd0SMintz, Yuval p_bulletin->autoneg = link->an; 98133b2fbd0SMintz, Yuval p_bulletin->autoneg_complete = link->an_complete; 98233b2fbd0SMintz, Yuval p_bulletin->parallel_detection = link->parallel_detection; 98333b2fbd0SMintz, Yuval p_bulletin->pfc_enabled = link->pfc_enabled; 98433b2fbd0SMintz, Yuval p_bulletin->partner_adv_speed = link->partner_adv_speed; 98533b2fbd0SMintz, Yuval p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; 98633b2fbd0SMintz, Yuval p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; 98733b2fbd0SMintz, Yuval p_bulletin->partner_adv_pause = link->partner_adv_pause; 98833b2fbd0SMintz, Yuval p_bulletin->sfp_tx_fault = link->sfp_tx_fault; 98933b2fbd0SMintz, Yuval 99033b2fbd0SMintz, Yuval p_bulletin->capability_speed = p_caps->speed_capabilities; 99133b2fbd0SMintz, Yuval } 99233b2fbd0SMintz, Yuval 9931408cc1fSYuval Mintz static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, 9941408cc1fSYuval Mintz struct qed_ptt *p_ptt, 9953da7a37aSMintz, Yuval struct qed_iov_vf_init_params *p_params) 9961408cc1fSYuval Mintz { 99733b2fbd0SMintz, Yuval struct qed_mcp_link_capabilities link_caps; 99833b2fbd0SMintz, Yuval struct qed_mcp_link_params link_params; 99933b2fbd0SMintz, Yuval struct qed_mcp_link_state link_state; 10001408cc1fSYuval Mintz u8 num_of_vf_avaiable_chains = 0; 10011408cc1fSYuval Mintz struct qed_vf_info *vf = NULL; 10023da7a37aSMintz, Yuval u16 qid, num_irqs; 10031408cc1fSYuval Mintz int rc = 0; 10041408cc1fSYuval Mintz u32 cids; 10051408cc1fSYuval Mintz u8 i; 10061408cc1fSYuval Mintz 10073da7a37aSMintz, Yuval vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); 10081408cc1fSYuval Mintz if (!vf) { 10091408cc1fSYuval Mintz DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); 10101408cc1fSYuval Mintz return -EINVAL; 10111408cc1fSYuval Mintz } 10121408cc1fSYuval Mintz 10131408cc1fSYuval Mintz if (vf->b_init) { 10143da7a37aSMintz, Yuval DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", 10153da7a37aSMintz, Yuval p_params->rel_vf_id); 10161408cc1fSYuval Mintz return -EINVAL; 10171408cc1fSYuval Mintz } 10181408cc1fSYuval Mintz 10193da7a37aSMintz, Yuval /* Perform sanity checking on the requested queue_id */ 10203da7a37aSMintz, Yuval for (i = 0; i < p_params->num_queues; i++) { 10213da7a37aSMintz, Yuval u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE); 10223da7a37aSMintz, Yuval u16 max_vf_qzone = min_vf_qzone + 10233da7a37aSMintz, Yuval FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1; 10243da7a37aSMintz, Yuval 10253da7a37aSMintz, Yuval qid = p_params->req_rx_queue[i]; 10263da7a37aSMintz, Yuval if (qid < min_vf_qzone || qid > max_vf_qzone) { 10273da7a37aSMintz, Yuval DP_NOTICE(p_hwfn, 10283da7a37aSMintz, Yuval "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n", 10293da7a37aSMintz, Yuval qid, 10303da7a37aSMintz, Yuval p_params->rel_vf_id, 10313da7a37aSMintz, Yuval min_vf_qzone, max_vf_qzone); 10323da7a37aSMintz, Yuval return -EINVAL; 10333da7a37aSMintz, Yuval } 10343da7a37aSMintz, Yuval 10353da7a37aSMintz, Yuval qid = p_params->req_tx_queue[i]; 10363da7a37aSMintz, Yuval if (qid > max_vf_qzone) { 10373da7a37aSMintz, Yuval DP_NOTICE(p_hwfn, 10383da7a37aSMintz, Yuval "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n", 10393da7a37aSMintz, Yuval qid, p_params->rel_vf_id, max_vf_qzone); 10403da7a37aSMintz, Yuval return -EINVAL; 10413da7a37aSMintz, Yuval } 10423da7a37aSMintz, Yuval 10433da7a37aSMintz, Yuval /* If client *really* wants, Tx qid can be shared with PF */ 10443da7a37aSMintz, Yuval if (qid < min_vf_qzone) 10453da7a37aSMintz, Yuval DP_VERBOSE(p_hwfn, 10463da7a37aSMintz, Yuval QED_MSG_IOV, 10473da7a37aSMintz, Yuval "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n", 10483da7a37aSMintz, Yuval p_params->rel_vf_id, qid, i); 10493da7a37aSMintz, Yuval } 10503da7a37aSMintz, Yuval 10511408cc1fSYuval Mintz /* Limit number of queues according to number of CIDs */ 10521408cc1fSYuval Mintz qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); 10531408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, 10541408cc1fSYuval Mintz QED_MSG_IOV, 10551408cc1fSYuval Mintz "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", 10563da7a37aSMintz, Yuval vf->relative_vf_id, p_params->num_queues, (u16)cids); 10573da7a37aSMintz, Yuval num_irqs = min_t(u16, p_params->num_queues, ((u16)cids)); 10581408cc1fSYuval Mintz 10591408cc1fSYuval Mintz num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, 10601408cc1fSYuval Mintz p_ptt, 10613da7a37aSMintz, Yuval vf, num_irqs); 10621408cc1fSYuval Mintz if (!num_of_vf_avaiable_chains) { 10631408cc1fSYuval Mintz DP_ERR(p_hwfn, "no available igu sbs\n"); 10641408cc1fSYuval Mintz return -ENOMEM; 10651408cc1fSYuval Mintz } 10661408cc1fSYuval Mintz 10671408cc1fSYuval Mintz /* Choose queue number and index ranges */ 10681408cc1fSYuval Mintz vf->num_rxqs = num_of_vf_avaiable_chains; 10691408cc1fSYuval Mintz vf->num_txqs = num_of_vf_avaiable_chains; 10701408cc1fSYuval Mintz 10711408cc1fSYuval Mintz for (i = 0; i < vf->num_rxqs; i++) { 1072007bc371SMintz, Yuval struct qed_vf_queue *p_queue = &vf->vf_queues[i]; 10731408cc1fSYuval Mintz 10743da7a37aSMintz, Yuval p_queue->fw_rx_qid = p_params->req_rx_queue[i]; 10753da7a37aSMintz, Yuval p_queue->fw_tx_qid = p_params->req_tx_queue[i]; 10761408cc1fSYuval Mintz 10771408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1078007bc371SMintz, Yuval "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n", 1079007bc371SMintz, Yuval vf->relative_vf_id, i, vf->igu_sbs[i], 1080007bc371SMintz, Yuval p_queue->fw_rx_qid, p_queue->fw_tx_qid); 10811408cc1fSYuval Mintz } 10823da7a37aSMintz, Yuval 108333b2fbd0SMintz, Yuval /* Update the link configuration in bulletin */ 108433b2fbd0SMintz, Yuval memcpy(&link_params, qed_mcp_get_link_params(p_hwfn), 108533b2fbd0SMintz, Yuval sizeof(link_params)); 108633b2fbd0SMintz, Yuval memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state)); 108733b2fbd0SMintz, Yuval memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn), 108833b2fbd0SMintz, Yuval sizeof(link_caps)); 108933b2fbd0SMintz, Yuval qed_iov_set_link(p_hwfn, p_params->rel_vf_id, 109033b2fbd0SMintz, Yuval &link_params, &link_state, &link_caps); 109133b2fbd0SMintz, Yuval 10921408cc1fSYuval Mintz rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); 10931408cc1fSYuval Mintz if (!rc) { 10941408cc1fSYuval Mintz vf->b_init = true; 10951408cc1fSYuval Mintz 10961408cc1fSYuval Mintz if (IS_LEAD_HWFN(p_hwfn)) 10971408cc1fSYuval Mintz p_hwfn->cdev->p_iov_info->num_vfs++; 10981408cc1fSYuval Mintz } 10991408cc1fSYuval Mintz 11001408cc1fSYuval Mintz return rc; 11011408cc1fSYuval Mintz } 11021408cc1fSYuval Mintz 11030b55e27dSYuval Mintz static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, 11040b55e27dSYuval Mintz struct qed_ptt *p_ptt, u16 rel_vf_id) 11050b55e27dSYuval Mintz { 1106079d20a6SManish Chopra struct qed_mcp_link_capabilities caps; 1107079d20a6SManish Chopra struct qed_mcp_link_params params; 1108079d20a6SManish Chopra struct qed_mcp_link_state link; 11090b55e27dSYuval Mintz struct qed_vf_info *vf = NULL; 11100b55e27dSYuval Mintz 11110b55e27dSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 11120b55e27dSYuval Mintz if (!vf) { 11130b55e27dSYuval Mintz DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); 11140b55e27dSYuval Mintz return -EINVAL; 11150b55e27dSYuval Mintz } 11160b55e27dSYuval Mintz 111736558c3dSYuval Mintz if (vf->bulletin.p_virt) 111836558c3dSYuval Mintz memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); 111936558c3dSYuval Mintz 112036558c3dSYuval Mintz memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); 112136558c3dSYuval Mintz 1122079d20a6SManish Chopra /* Get the link configuration back in bulletin so 1123079d20a6SManish Chopra * that when VFs are re-enabled they get the actual 1124079d20a6SManish Chopra * link configuration. 1125079d20a6SManish Chopra */ 1126079d20a6SManish Chopra memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params)); 1127079d20a6SManish Chopra memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link)); 1128079d20a6SManish Chopra memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 1129079d20a6SManish Chopra qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); 1130079d20a6SManish Chopra 11311fe614d1SYuval Mintz /* Forget the VF's acquisition message */ 11321fe614d1SYuval Mintz memset(&vf->acquire, 0, sizeof(vf->acquire)); 11330b55e27dSYuval Mintz 11340b55e27dSYuval Mintz /* disablng interrupts and resetting permission table was done during 11350b55e27dSYuval Mintz * vf-close, however, we could get here without going through vf_close 11360b55e27dSYuval Mintz */ 11370b55e27dSYuval Mintz /* Disable Interrupts for VF */ 11380b55e27dSYuval Mintz qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 11390b55e27dSYuval Mintz 11400b55e27dSYuval Mintz /* Reset Permission table */ 11410b55e27dSYuval Mintz qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 11420b55e27dSYuval Mintz 11430b55e27dSYuval Mintz vf->num_rxqs = 0; 11440b55e27dSYuval Mintz vf->num_txqs = 0; 11450b55e27dSYuval Mintz qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); 11460b55e27dSYuval Mintz 11470b55e27dSYuval Mintz if (vf->b_init) { 11480b55e27dSYuval Mintz vf->b_init = false; 11490b55e27dSYuval Mintz 11500b55e27dSYuval Mintz if (IS_LEAD_HWFN(p_hwfn)) 11510b55e27dSYuval Mintz p_hwfn->cdev->p_iov_info->num_vfs--; 11520b55e27dSYuval Mintz } 11530b55e27dSYuval Mintz 11540b55e27dSYuval Mintz return 0; 11550b55e27dSYuval Mintz } 11560b55e27dSYuval Mintz 115737bff2b9SYuval Mintz static bool qed_iov_tlv_supported(u16 tlvtype) 115837bff2b9SYuval Mintz { 115937bff2b9SYuval Mintz return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; 116037bff2b9SYuval Mintz } 116137bff2b9SYuval Mintz 116237bff2b9SYuval Mintz /* place a given tlv on the tlv buffer, continuing current tlv list */ 116337bff2b9SYuval Mintz void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) 116437bff2b9SYuval Mintz { 116537bff2b9SYuval Mintz struct channel_tlv *tl = (struct channel_tlv *)*offset; 116637bff2b9SYuval Mintz 116737bff2b9SYuval Mintz tl->type = type; 116837bff2b9SYuval Mintz tl->length = length; 116937bff2b9SYuval Mintz 117037bff2b9SYuval Mintz /* Offset should keep pointing to next TLV (the end of the last) */ 117137bff2b9SYuval Mintz *offset += length; 117237bff2b9SYuval Mintz 117337bff2b9SYuval Mintz /* Return a pointer to the start of the added tlv */ 117437bff2b9SYuval Mintz return *offset - length; 117537bff2b9SYuval Mintz } 117637bff2b9SYuval Mintz 117737bff2b9SYuval Mintz /* list the types and lengths of the tlvs on the buffer */ 117837bff2b9SYuval Mintz void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list) 117937bff2b9SYuval Mintz { 118037bff2b9SYuval Mintz u16 i = 1, total_length = 0; 118137bff2b9SYuval Mintz struct channel_tlv *tlv; 118237bff2b9SYuval Mintz 118337bff2b9SYuval Mintz do { 118437bff2b9SYuval Mintz tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); 118537bff2b9SYuval Mintz 118637bff2b9SYuval Mintz /* output tlv */ 118737bff2b9SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 118837bff2b9SYuval Mintz "TLV number %d: type %d, length %d\n", 118937bff2b9SYuval Mintz i, tlv->type, tlv->length); 119037bff2b9SYuval Mintz 119137bff2b9SYuval Mintz if (tlv->type == CHANNEL_TLV_LIST_END) 119237bff2b9SYuval Mintz return; 119337bff2b9SYuval Mintz 119437bff2b9SYuval Mintz /* Validate entry - protect against malicious VFs */ 119537bff2b9SYuval Mintz if (!tlv->length) { 119637bff2b9SYuval Mintz DP_NOTICE(p_hwfn, "TLV of length 0 found\n"); 119737bff2b9SYuval Mintz return; 119837bff2b9SYuval Mintz } 119937bff2b9SYuval Mintz 120037bff2b9SYuval Mintz total_length += tlv->length; 120137bff2b9SYuval Mintz 120237bff2b9SYuval Mintz if (total_length >= sizeof(struct tlv_buffer_size)) { 120337bff2b9SYuval Mintz DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n"); 120437bff2b9SYuval Mintz return; 120537bff2b9SYuval Mintz } 120637bff2b9SYuval Mintz 120737bff2b9SYuval Mintz i++; 120837bff2b9SYuval Mintz } while (1); 120937bff2b9SYuval Mintz } 121037bff2b9SYuval Mintz 121137bff2b9SYuval Mintz static void qed_iov_send_response(struct qed_hwfn *p_hwfn, 121237bff2b9SYuval Mintz struct qed_ptt *p_ptt, 121337bff2b9SYuval Mintz struct qed_vf_info *p_vf, 121437bff2b9SYuval Mintz u16 length, u8 status) 121537bff2b9SYuval Mintz { 121637bff2b9SYuval Mintz struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 121737bff2b9SYuval Mintz struct qed_dmae_params params; 121837bff2b9SYuval Mintz u8 eng_vf_id; 121937bff2b9SYuval Mintz 122037bff2b9SYuval Mintz mbx->reply_virt->default_resp.hdr.status = status; 122137bff2b9SYuval Mintz 122237bff2b9SYuval Mintz qed_dp_tlv_list(p_hwfn, mbx->reply_virt); 122337bff2b9SYuval Mintz 122437bff2b9SYuval Mintz eng_vf_id = p_vf->abs_vf_id; 122537bff2b9SYuval Mintz 122637bff2b9SYuval Mintz memset(¶ms, 0, sizeof(struct qed_dmae_params)); 122737bff2b9SYuval Mintz params.flags = QED_DMAE_FLAG_VF_DST; 122837bff2b9SYuval Mintz params.dst_vfid = eng_vf_id; 122937bff2b9SYuval Mintz 123037bff2b9SYuval Mintz qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), 123137bff2b9SYuval Mintz mbx->req_virt->first_tlv.reply_address + 123237bff2b9SYuval Mintz sizeof(u64), 123337bff2b9SYuval Mintz (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, 123437bff2b9SYuval Mintz ¶ms); 123537bff2b9SYuval Mintz 1236d9194081SMintz, Yuval /* Once PF copies the rc to the VF, the latter can continue 1237d9194081SMintz, Yuval * and send an additional message. So we have to make sure the 1238d9194081SMintz, Yuval * channel would be re-set to ready prior to that. 1239d9194081SMintz, Yuval */ 124037bff2b9SYuval Mintz REG_WR(p_hwfn, 124137bff2b9SYuval Mintz GTT_BAR0_MAP_REG_USDM_RAM + 124237bff2b9SYuval Mintz USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); 1243d9194081SMintz, Yuval 1244d9194081SMintz, Yuval qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, 1245d9194081SMintz, Yuval mbx->req_virt->first_tlv.reply_address, 1246d9194081SMintz, Yuval sizeof(u64) / 4, ¶ms); 124737bff2b9SYuval Mintz } 124837bff2b9SYuval Mintz 1249dacd88d6SYuval Mintz static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, 1250dacd88d6SYuval Mintz enum qed_iov_vport_update_flag flag) 1251dacd88d6SYuval Mintz { 1252dacd88d6SYuval Mintz switch (flag) { 1253dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_ACTIVATE: 1254dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 125517b235c1SYuval Mintz case QED_IOV_VP_UPDATE_VLAN_STRIP: 125617b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 125717b235c1SYuval Mintz case QED_IOV_VP_UPDATE_TX_SWITCH: 125817b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 1259dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_MCAST: 1260dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_MCAST; 1261dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_ACCEPT_PARAM: 1262dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 1263dacd88d6SYuval Mintz case QED_IOV_VP_UPDATE_RSS: 1264dacd88d6SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_RSS; 126517b235c1SYuval Mintz case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: 126617b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 126717b235c1SYuval Mintz case QED_IOV_VP_UPDATE_SGE_TPA: 126817b235c1SYuval Mintz return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 1269dacd88d6SYuval Mintz default: 1270dacd88d6SYuval Mintz return 0; 1271dacd88d6SYuval Mintz } 1272dacd88d6SYuval Mintz } 1273dacd88d6SYuval Mintz 1274dacd88d6SYuval Mintz static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, 1275dacd88d6SYuval Mintz struct qed_vf_info *p_vf, 1276dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, 1277dacd88d6SYuval Mintz u8 status, 1278dacd88d6SYuval Mintz u16 tlvs_mask, u16 tlvs_accepted) 1279dacd88d6SYuval Mintz { 1280dacd88d6SYuval Mintz struct pfvf_def_resp_tlv *resp; 1281dacd88d6SYuval Mintz u16 size, total_len, i; 1282dacd88d6SYuval Mintz 1283dacd88d6SYuval Mintz memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); 1284dacd88d6SYuval Mintz p_mbx->offset = (u8 *)p_mbx->reply_virt; 1285dacd88d6SYuval Mintz size = sizeof(struct pfvf_def_resp_tlv); 1286dacd88d6SYuval Mintz total_len = size; 1287dacd88d6SYuval Mintz 1288dacd88d6SYuval Mintz qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); 1289dacd88d6SYuval Mintz 1290dacd88d6SYuval Mintz /* Prepare response for all extended tlvs if they are found by PF */ 1291dacd88d6SYuval Mintz for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { 12921a635e48SYuval Mintz if (!(tlvs_mask & BIT(i))) 1293dacd88d6SYuval Mintz continue; 1294dacd88d6SYuval Mintz 1295dacd88d6SYuval Mintz resp = qed_add_tlv(p_hwfn, &p_mbx->offset, 1296dacd88d6SYuval Mintz qed_iov_vport_to_tlv(p_hwfn, i), size); 1297dacd88d6SYuval Mintz 12981a635e48SYuval Mintz if (tlvs_accepted & BIT(i)) 1299dacd88d6SYuval Mintz resp->hdr.status = status; 1300dacd88d6SYuval Mintz else 1301dacd88d6SYuval Mintz resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; 1302dacd88d6SYuval Mintz 1303dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, 1304dacd88d6SYuval Mintz QED_MSG_IOV, 1305dacd88d6SYuval Mintz "VF[%d] - vport_update response: TLV %d, status %02x\n", 1306dacd88d6SYuval Mintz p_vf->relative_vf_id, 1307dacd88d6SYuval Mintz qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); 1308dacd88d6SYuval Mintz 1309dacd88d6SYuval Mintz total_len += size; 1310dacd88d6SYuval Mintz } 1311dacd88d6SYuval Mintz 1312dacd88d6SYuval Mintz qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, 1313dacd88d6SYuval Mintz sizeof(struct channel_list_end_tlv)); 1314dacd88d6SYuval Mintz 1315dacd88d6SYuval Mintz return total_len; 1316dacd88d6SYuval Mintz } 1317dacd88d6SYuval Mintz 131837bff2b9SYuval Mintz static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, 131937bff2b9SYuval Mintz struct qed_ptt *p_ptt, 132037bff2b9SYuval Mintz struct qed_vf_info *vf_info, 132137bff2b9SYuval Mintz u16 type, u16 length, u8 status) 132237bff2b9SYuval Mintz { 132337bff2b9SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; 132437bff2b9SYuval Mintz 132537bff2b9SYuval Mintz mbx->offset = (u8 *)mbx->reply_virt; 132637bff2b9SYuval Mintz 132737bff2b9SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, type, length); 132837bff2b9SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 132937bff2b9SYuval Mintz sizeof(struct channel_list_end_tlv)); 133037bff2b9SYuval Mintz 133137bff2b9SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); 133237bff2b9SYuval Mintz } 133337bff2b9SYuval Mintz 1334ba56947aSBaoyou Xie static struct 1335ba56947aSBaoyou Xie qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn, 13360b55e27dSYuval Mintz u16 relative_vf_id, 13370b55e27dSYuval Mintz bool b_enabled_only) 13380b55e27dSYuval Mintz { 13390b55e27dSYuval Mintz struct qed_vf_info *vf = NULL; 13400b55e27dSYuval Mintz 13410b55e27dSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); 13420b55e27dSYuval Mintz if (!vf) 13430b55e27dSYuval Mintz return NULL; 13440b55e27dSYuval Mintz 13450b55e27dSYuval Mintz return &vf->p_vf_info; 13460b55e27dSYuval Mintz } 13470b55e27dSYuval Mintz 1348ba56947aSBaoyou Xie static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) 13490b55e27dSYuval Mintz { 13500b55e27dSYuval Mintz struct qed_public_vf_info *vf_info; 13510b55e27dSYuval Mintz 13520b55e27dSYuval Mintz vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false); 13530b55e27dSYuval Mintz 13540b55e27dSYuval Mintz if (!vf_info) 13550b55e27dSYuval Mintz return; 13560b55e27dSYuval Mintz 13570b55e27dSYuval Mintz /* Clear the VF mac */ 13580ee28e31SShyam Saini eth_zero_addr(vf_info->mac); 1359f990c82cSMintz, Yuval 1360f990c82cSMintz, Yuval vf_info->rx_accept_mode = 0; 1361f990c82cSMintz, Yuval vf_info->tx_accept_mode = 0; 13620b55e27dSYuval Mintz } 13630b55e27dSYuval Mintz 13640b55e27dSYuval Mintz static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, 13650b55e27dSYuval Mintz struct qed_vf_info *p_vf) 13660b55e27dSYuval Mintz { 1367007bc371SMintz, Yuval u32 i, j; 13680b55e27dSYuval Mintz 13690b55e27dSYuval Mintz p_vf->vf_bulletin = 0; 1370dacd88d6SYuval Mintz p_vf->vport_instance = 0; 137108feecd7SYuval Mintz p_vf->configured_features = 0; 13720b55e27dSYuval Mintz 13730b55e27dSYuval Mintz /* If VF previously requested less resources, go back to default */ 13740b55e27dSYuval Mintz p_vf->num_rxqs = p_vf->num_sbs; 13750b55e27dSYuval Mintz p_vf->num_txqs = p_vf->num_sbs; 13760b55e27dSYuval Mintz 1377dacd88d6SYuval Mintz p_vf->num_active_rxqs = 0; 1378dacd88d6SYuval Mintz 13793da7a37aSMintz, Yuval for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { 1380007bc371SMintz, Yuval struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; 13813da7a37aSMintz, Yuval 1382007bc371SMintz, Yuval for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) { 1383007bc371SMintz, Yuval if (!p_queue->cids[j].p_cid) 1384007bc371SMintz, Yuval continue; 13853da7a37aSMintz, Yuval 1386007bc371SMintz, Yuval qed_eth_queue_cid_release(p_hwfn, 1387007bc371SMintz, Yuval p_queue->cids[j].p_cid); 1388007bc371SMintz, Yuval p_queue->cids[j].p_cid = NULL; 13893da7a37aSMintz, Yuval } 13903da7a37aSMintz, Yuval } 13910b55e27dSYuval Mintz 139208feecd7SYuval Mintz memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); 13931fe614d1SYuval Mintz memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); 13940b55e27dSYuval Mintz qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); 13950b55e27dSYuval Mintz } 13960b55e27dSYuval Mintz 13971a850bfcSMintz, Yuval /* Returns either 0, or log(size) */ 13981a850bfcSMintz, Yuval static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn, 13991a850bfcSMintz, Yuval struct qed_ptt *p_ptt) 14001a850bfcSMintz, Yuval { 14011a850bfcSMintz, Yuval u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE); 14021a850bfcSMintz, Yuval 14031a850bfcSMintz, Yuval if (val) 14041a850bfcSMintz, Yuval return val + 11; 14051a850bfcSMintz, Yuval return 0; 14061a850bfcSMintz, Yuval } 14071a850bfcSMintz, Yuval 14081a850bfcSMintz, Yuval static void 14091a850bfcSMintz, Yuval qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn, 14101a850bfcSMintz, Yuval struct qed_ptt *p_ptt, 14111a850bfcSMintz, Yuval struct qed_vf_info *p_vf, 14121a850bfcSMintz, Yuval struct vf_pf_resc_request *p_req, 14131a850bfcSMintz, Yuval struct pf_vf_resc *p_resp) 14141a850bfcSMintz, Yuval { 14151a850bfcSMintz, Yuval u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons; 14161a850bfcSMintz, Yuval u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) - 14171a850bfcSMintz, Yuval qed_db_addr_vf(0, DQ_DEMS_LEGACY); 14181a850bfcSMintz, Yuval u32 bar_size; 14191a850bfcSMintz, Yuval 14201a850bfcSMintz, Yuval p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons); 14211a850bfcSMintz, Yuval 14221a850bfcSMintz, Yuval /* If VF didn't bother asking for QIDs than don't bother limiting 14231a850bfcSMintz, Yuval * number of CIDs. The VF doesn't care about the number, and this 14241a850bfcSMintz, Yuval * has the likely result of causing an additional acquisition. 14251a850bfcSMintz, Yuval */ 14261a850bfcSMintz, Yuval if (!(p_vf->acquire.vfdev_info.capabilities & 14271a850bfcSMintz, Yuval VFPF_ACQUIRE_CAP_QUEUE_QIDS)) 14281a850bfcSMintz, Yuval return; 14291a850bfcSMintz, Yuval 14301a850bfcSMintz, Yuval /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount 14311a850bfcSMintz, Yuval * that would make sure doorbells for all CIDs fall within the bar. 14321a850bfcSMintz, Yuval * If it doesn't, make sure regview window is sufficient. 14331a850bfcSMintz, Yuval */ 14341a850bfcSMintz, Yuval if (p_vf->acquire.vfdev_info.capabilities & 14351a850bfcSMintz, Yuval VFPF_ACQUIRE_CAP_PHYSICAL_BAR) { 14361a850bfcSMintz, Yuval bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); 14371a850bfcSMintz, Yuval if (bar_size) 14381a850bfcSMintz, Yuval bar_size = 1 << bar_size; 14391a850bfcSMintz, Yuval 14401a850bfcSMintz, Yuval if (p_hwfn->cdev->num_hwfns > 1) 14411a850bfcSMintz, Yuval bar_size /= 2; 14421a850bfcSMintz, Yuval } else { 14431a850bfcSMintz, Yuval bar_size = PXP_VF_BAR0_DQ_LENGTH; 14441a850bfcSMintz, Yuval } 14451a850bfcSMintz, Yuval 14461a850bfcSMintz, Yuval if (bar_size / db_size < 256) 14471a850bfcSMintz, Yuval p_resp->num_cids = min_t(u8, p_resp->num_cids, 14481a850bfcSMintz, Yuval (u8)(bar_size / db_size)); 14491a850bfcSMintz, Yuval } 14501a850bfcSMintz, Yuval 14511cf2b1a9SYuval Mintz static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, 14521cf2b1a9SYuval Mintz struct qed_ptt *p_ptt, 14531cf2b1a9SYuval Mintz struct qed_vf_info *p_vf, 14541cf2b1a9SYuval Mintz struct vf_pf_resc_request *p_req, 14551cf2b1a9SYuval Mintz struct pf_vf_resc *p_resp) 14561cf2b1a9SYuval Mintz { 1457007bc371SMintz, Yuval u8 i; 14581cf2b1a9SYuval Mintz 14591cf2b1a9SYuval Mintz /* Queue related information */ 14601cf2b1a9SYuval Mintz p_resp->num_rxqs = p_vf->num_rxqs; 14611cf2b1a9SYuval Mintz p_resp->num_txqs = p_vf->num_txqs; 14621cf2b1a9SYuval Mintz p_resp->num_sbs = p_vf->num_sbs; 14631cf2b1a9SYuval Mintz 14641cf2b1a9SYuval Mintz for (i = 0; i < p_resp->num_sbs; i++) { 14651cf2b1a9SYuval Mintz p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; 14661cf2b1a9SYuval Mintz p_resp->hw_sbs[i].sb_qid = 0; 14671cf2b1a9SYuval Mintz } 14681cf2b1a9SYuval Mintz 14691cf2b1a9SYuval Mintz /* These fields are filled for backward compatibility. 14701cf2b1a9SYuval Mintz * Unused by modern vfs. 14711cf2b1a9SYuval Mintz */ 14721cf2b1a9SYuval Mintz for (i = 0; i < p_resp->num_rxqs; i++) { 14731cf2b1a9SYuval Mintz qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, 14741cf2b1a9SYuval Mintz (u16 *)&p_resp->hw_qid[i]); 1475007bc371SMintz, Yuval p_resp->cid[i] = i; 14761cf2b1a9SYuval Mintz } 14771cf2b1a9SYuval Mintz 14781cf2b1a9SYuval Mintz /* Filter related information */ 14791cf2b1a9SYuval Mintz p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, 14801cf2b1a9SYuval Mintz p_req->num_mac_filters); 14811cf2b1a9SYuval Mintz p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, 14821cf2b1a9SYuval Mintz p_req->num_vlan_filters); 14831cf2b1a9SYuval Mintz 14841a850bfcSMintz, Yuval qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp); 148508bc8f15SMintz, Yuval 14861cf2b1a9SYuval Mintz /* This isn't really needed/enforced, but some legacy VFs might depend 14871cf2b1a9SYuval Mintz * on the correct filling of this field. 14881cf2b1a9SYuval Mintz */ 14891cf2b1a9SYuval Mintz p_resp->num_mc_filters = QED_MAX_MC_ADDRS; 14901cf2b1a9SYuval Mintz 14911cf2b1a9SYuval Mintz /* Validate sufficient resources for VF */ 14921cf2b1a9SYuval Mintz if (p_resp->num_rxqs < p_req->num_rxqs || 14931cf2b1a9SYuval Mintz p_resp->num_txqs < p_req->num_txqs || 14941cf2b1a9SYuval Mintz p_resp->num_sbs < p_req->num_sbs || 14951cf2b1a9SYuval Mintz p_resp->num_mac_filters < p_req->num_mac_filters || 14961cf2b1a9SYuval Mintz p_resp->num_vlan_filters < p_req->num_vlan_filters || 149708bc8f15SMintz, Yuval p_resp->num_mc_filters < p_req->num_mc_filters || 149808bc8f15SMintz, Yuval p_resp->num_cids < p_req->num_cids) { 14991cf2b1a9SYuval Mintz DP_VERBOSE(p_hwfn, 15001cf2b1a9SYuval Mintz QED_MSG_IOV, 150108bc8f15SMintz, Yuval "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n", 15021cf2b1a9SYuval Mintz p_vf->abs_vf_id, 15031cf2b1a9SYuval Mintz p_req->num_rxqs, 15041cf2b1a9SYuval Mintz p_resp->num_rxqs, 15051cf2b1a9SYuval Mintz p_req->num_rxqs, 15061cf2b1a9SYuval Mintz p_resp->num_txqs, 15071cf2b1a9SYuval Mintz p_req->num_sbs, 15081cf2b1a9SYuval Mintz p_resp->num_sbs, 15091cf2b1a9SYuval Mintz p_req->num_mac_filters, 15101cf2b1a9SYuval Mintz p_resp->num_mac_filters, 15111cf2b1a9SYuval Mintz p_req->num_vlan_filters, 15121cf2b1a9SYuval Mintz p_resp->num_vlan_filters, 151308bc8f15SMintz, Yuval p_req->num_mc_filters, 151408bc8f15SMintz, Yuval p_resp->num_mc_filters, 151508bc8f15SMintz, Yuval p_req->num_cids, p_resp->num_cids); 1516a044df83SYuval Mintz 1517a044df83SYuval Mintz /* Some legacy OSes are incapable of correctly handling this 1518a044df83SYuval Mintz * failure. 1519a044df83SYuval Mintz */ 1520a044df83SYuval Mintz if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 1521a044df83SYuval Mintz ETH_HSI_VER_NO_PKT_LEN_TUNN) && 1522a044df83SYuval Mintz (p_vf->acquire.vfdev_info.os_type == 1523a044df83SYuval Mintz VFPF_ACQUIRE_OS_WINDOWS)) 1524a044df83SYuval Mintz return PFVF_STATUS_SUCCESS; 1525a044df83SYuval Mintz 15261cf2b1a9SYuval Mintz return PFVF_STATUS_NO_RESOURCE; 15271cf2b1a9SYuval Mintz } 15281cf2b1a9SYuval Mintz 15291cf2b1a9SYuval Mintz return PFVF_STATUS_SUCCESS; 15301cf2b1a9SYuval Mintz } 15311cf2b1a9SYuval Mintz 15321cf2b1a9SYuval Mintz static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn, 15331cf2b1a9SYuval Mintz struct pfvf_stats_info *p_stats) 15341cf2b1a9SYuval Mintz { 15351cf2b1a9SYuval Mintz p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + 15361cf2b1a9SYuval Mintz offsetof(struct mstorm_vf_zone, 15371cf2b1a9SYuval Mintz non_trigger.eth_queue_stat); 15381cf2b1a9SYuval Mintz p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); 15391cf2b1a9SYuval Mintz p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + 15401cf2b1a9SYuval Mintz offsetof(struct ustorm_vf_zone, 15411cf2b1a9SYuval Mintz non_trigger.eth_queue_stat); 15421cf2b1a9SYuval Mintz p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); 15431cf2b1a9SYuval Mintz p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + 15441cf2b1a9SYuval Mintz offsetof(struct pstorm_vf_zone, 15451cf2b1a9SYuval Mintz non_trigger.eth_queue_stat); 15461cf2b1a9SYuval Mintz p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); 15471cf2b1a9SYuval Mintz p_stats->tstats.address = 0; 15481cf2b1a9SYuval Mintz p_stats->tstats.len = 0; 15491cf2b1a9SYuval Mintz } 15501cf2b1a9SYuval Mintz 15511408cc1fSYuval Mintz static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, 155237bff2b9SYuval Mintz struct qed_ptt *p_ptt, 15531408cc1fSYuval Mintz struct qed_vf_info *vf) 155437bff2b9SYuval Mintz { 15551408cc1fSYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 15561408cc1fSYuval Mintz struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; 15571408cc1fSYuval Mintz struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 15581408cc1fSYuval Mintz struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; 15591cf2b1a9SYuval Mintz u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; 15601408cc1fSYuval Mintz struct pf_vf_resc *resc = &resp->resc; 15611fe614d1SYuval Mintz int rc; 15621fe614d1SYuval Mintz 15631fe614d1SYuval Mintz memset(resp, 0, sizeof(*resp)); 15641408cc1fSYuval Mintz 156505fafbfbSYuval Mintz /* Write the PF version so that VF would know which version 156605fafbfbSYuval Mintz * is supported - might be later overriden. This guarantees that 156705fafbfbSYuval Mintz * VF could recognize legacy PF based on lack of versions in reply. 156805fafbfbSYuval Mintz */ 156905fafbfbSYuval Mintz pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; 157005fafbfbSYuval Mintz pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; 157105fafbfbSYuval Mintz 1572a044df83SYuval Mintz if (vf->state != VF_FREE && vf->state != VF_STOPPED) { 1573a044df83SYuval Mintz DP_VERBOSE(p_hwfn, 1574a044df83SYuval Mintz QED_MSG_IOV, 1575a044df83SYuval Mintz "VF[%d] sent ACQUIRE but is already in state %d - fail request\n", 1576a044df83SYuval Mintz vf->abs_vf_id, vf->state); 1577a044df83SYuval Mintz goto out; 1578a044df83SYuval Mintz } 1579a044df83SYuval Mintz 15801408cc1fSYuval Mintz /* Validate FW compatibility */ 15811fe614d1SYuval Mintz if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { 1582a044df83SYuval Mintz if (req->vfdev_info.capabilities & 1583a044df83SYuval Mintz VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 1584a044df83SYuval Mintz struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; 1585a044df83SYuval Mintz 1586a044df83SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1587a044df83SYuval Mintz "VF[%d] is pre-fastpath HSI\n", 1588a044df83SYuval Mintz vf->abs_vf_id); 1589a044df83SYuval Mintz p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 1590a044df83SYuval Mintz p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; 1591a044df83SYuval Mintz } else { 15921408cc1fSYuval Mintz DP_INFO(p_hwfn, 15931fe614d1SYuval Mintz "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", 15941408cc1fSYuval Mintz vf->abs_vf_id, 15951fe614d1SYuval Mintz req->vfdev_info.eth_fp_hsi_major, 15961fe614d1SYuval Mintz req->vfdev_info.eth_fp_hsi_minor, 15971fe614d1SYuval Mintz ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); 15981fe614d1SYuval Mintz 15991408cc1fSYuval Mintz goto out; 16001408cc1fSYuval Mintz } 1601a044df83SYuval Mintz } 16021408cc1fSYuval Mintz 16031408cc1fSYuval Mintz /* On 100g PFs, prevent old VFs from loading */ 16041408cc1fSYuval Mintz if ((p_hwfn->cdev->num_hwfns > 1) && 16051408cc1fSYuval Mintz !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { 16061408cc1fSYuval Mintz DP_INFO(p_hwfn, 16071408cc1fSYuval Mintz "VF[%d] is running an old driver that doesn't support 100g\n", 16081408cc1fSYuval Mintz vf->abs_vf_id); 16091408cc1fSYuval Mintz goto out; 16101408cc1fSYuval Mintz } 16111408cc1fSYuval Mintz 16121fe614d1SYuval Mintz /* Store the acquire message */ 16131fe614d1SYuval Mintz memcpy(&vf->acquire, req, sizeof(vf->acquire)); 16141408cc1fSYuval Mintz 16151408cc1fSYuval Mintz vf->opaque_fid = req->vfdev_info.opaque_fid; 16161408cc1fSYuval Mintz 16171408cc1fSYuval Mintz vf->vf_bulletin = req->bulletin_addr; 16181408cc1fSYuval Mintz vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? 16191408cc1fSYuval Mintz vf->bulletin.size : req->bulletin_size; 16201408cc1fSYuval Mintz 16211408cc1fSYuval Mintz /* fill in pfdev info */ 16221408cc1fSYuval Mintz pfdev_info->chip_num = p_hwfn->cdev->chip_num; 16231408cc1fSYuval Mintz pfdev_info->db_size = 0; 16241408cc1fSYuval Mintz pfdev_info->indices_per_sb = PIS_PER_SB; 16251408cc1fSYuval Mintz 16261408cc1fSYuval Mintz pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | 16271408cc1fSYuval Mintz PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; 16281408cc1fSYuval Mintz if (p_hwfn->cdev->num_hwfns > 1) 16291408cc1fSYuval Mintz pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; 16301408cc1fSYuval Mintz 163108bc8f15SMintz, Yuval /* Share our ability to use multiple queue-ids only with VFs 163208bc8f15SMintz, Yuval * that request it. 163308bc8f15SMintz, Yuval */ 163408bc8f15SMintz, Yuval if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) 163508bc8f15SMintz, Yuval pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS; 163608bc8f15SMintz, Yuval 16371a850bfcSMintz, Yuval /* Share the sizes of the bars with VF */ 16381a850bfcSMintz, Yuval resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); 16391a850bfcSMintz, Yuval 16401cf2b1a9SYuval Mintz qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); 16411408cc1fSYuval Mintz 16421408cc1fSYuval Mintz memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); 16431408cc1fSYuval Mintz 16441408cc1fSYuval Mintz pfdev_info->fw_major = FW_MAJOR_VERSION; 16451408cc1fSYuval Mintz pfdev_info->fw_minor = FW_MINOR_VERSION; 16461408cc1fSYuval Mintz pfdev_info->fw_rev = FW_REVISION_VERSION; 16471408cc1fSYuval Mintz pfdev_info->fw_eng = FW_ENGINEERING_VERSION; 1648a044df83SYuval Mintz 1649a044df83SYuval Mintz /* Incorrect when legacy, but doesn't matter as legacy isn't reading 1650a044df83SYuval Mintz * this field. 1651a044df83SYuval Mintz */ 16521a635e48SYuval Mintz pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR, 16531fe614d1SYuval Mintz req->vfdev_info.eth_fp_hsi_minor); 16541408cc1fSYuval Mintz pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; 16551408cc1fSYuval Mintz qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); 16561408cc1fSYuval Mintz 16571408cc1fSYuval Mintz pfdev_info->dev_type = p_hwfn->cdev->type; 16581408cc1fSYuval Mintz pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; 16591408cc1fSYuval Mintz 16601cf2b1a9SYuval Mintz /* Fill resources available to VF; Make sure there are enough to 16611cf2b1a9SYuval Mintz * satisfy the VF's request. 16621408cc1fSYuval Mintz */ 16631cf2b1a9SYuval Mintz vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf, 16641cf2b1a9SYuval Mintz &req->resc_request, resc); 16651cf2b1a9SYuval Mintz if (vfpf_status != PFVF_STATUS_SUCCESS) 16661cf2b1a9SYuval Mintz goto out; 16671408cc1fSYuval Mintz 16681fe614d1SYuval Mintz /* Start the VF in FW */ 16691fe614d1SYuval Mintz rc = qed_sp_vf_start(p_hwfn, vf); 16701fe614d1SYuval Mintz if (rc) { 16711fe614d1SYuval Mintz DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); 16721fe614d1SYuval Mintz vfpf_status = PFVF_STATUS_FAILURE; 16731fe614d1SYuval Mintz goto out; 16741fe614d1SYuval Mintz } 16751fe614d1SYuval Mintz 16761408cc1fSYuval Mintz /* Fill agreed size of bulletin board in response */ 16771408cc1fSYuval Mintz resp->bulletin_size = vf->bulletin.size; 167836558c3dSYuval Mintz qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); 16791408cc1fSYuval Mintz 16801408cc1fSYuval Mintz DP_VERBOSE(p_hwfn, 16811408cc1fSYuval Mintz QED_MSG_IOV, 16821408cc1fSYuval Mintz "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" 16831408cc1fSYuval Mintz "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", 16841408cc1fSYuval Mintz vf->abs_vf_id, 16851408cc1fSYuval Mintz resp->pfdev_info.chip_num, 16861408cc1fSYuval Mintz resp->pfdev_info.db_size, 16871408cc1fSYuval Mintz resp->pfdev_info.indices_per_sb, 16881408cc1fSYuval Mintz resp->pfdev_info.capabilities, 16891408cc1fSYuval Mintz resc->num_rxqs, 16901408cc1fSYuval Mintz resc->num_txqs, 16911408cc1fSYuval Mintz resc->num_sbs, 16921408cc1fSYuval Mintz resc->num_mac_filters, 16931408cc1fSYuval Mintz resc->num_vlan_filters); 16941408cc1fSYuval Mintz vf->state = VF_ACQUIRED; 16951408cc1fSYuval Mintz 16961408cc1fSYuval Mintz /* Prepare Response */ 16971408cc1fSYuval Mintz out: 16981408cc1fSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, 16991408cc1fSYuval Mintz sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); 170037bff2b9SYuval Mintz } 170137bff2b9SYuval Mintz 17026ddc7608SYuval Mintz static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, 17036ddc7608SYuval Mintz struct qed_vf_info *p_vf, bool val) 17046ddc7608SYuval Mintz { 17056ddc7608SYuval Mintz struct qed_sp_vport_update_params params; 17066ddc7608SYuval Mintz int rc; 17076ddc7608SYuval Mintz 17086ddc7608SYuval Mintz if (val == p_vf->spoof_chk) { 17096ddc7608SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 17106ddc7608SYuval Mintz "Spoofchk value[%d] is already configured\n", val); 17116ddc7608SYuval Mintz return 0; 17126ddc7608SYuval Mintz } 17136ddc7608SYuval Mintz 17146ddc7608SYuval Mintz memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params)); 17156ddc7608SYuval Mintz params.opaque_fid = p_vf->opaque_fid; 17166ddc7608SYuval Mintz params.vport_id = p_vf->vport_id; 17176ddc7608SYuval Mintz params.update_anti_spoofing_en_flg = 1; 17186ddc7608SYuval Mintz params.anti_spoofing_en = val; 17196ddc7608SYuval Mintz 17206ddc7608SYuval Mintz rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); 1721cb1fa088SYuval Mintz if (!rc) { 17226ddc7608SYuval Mintz p_vf->spoof_chk = val; 17236ddc7608SYuval Mintz p_vf->req_spoofchk_val = p_vf->spoof_chk; 17246ddc7608SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 17256ddc7608SYuval Mintz "Spoofchk val[%d] configured\n", val); 17266ddc7608SYuval Mintz } else { 17276ddc7608SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 17286ddc7608SYuval Mintz "Spoofchk configuration[val:%d] failed for VF[%d]\n", 17296ddc7608SYuval Mintz val, p_vf->relative_vf_id); 17306ddc7608SYuval Mintz } 17316ddc7608SYuval Mintz 17326ddc7608SYuval Mintz return rc; 17336ddc7608SYuval Mintz } 17346ddc7608SYuval Mintz 173508feecd7SYuval Mintz static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, 173608feecd7SYuval Mintz struct qed_vf_info *p_vf) 173708feecd7SYuval Mintz { 173808feecd7SYuval Mintz struct qed_filter_ucast filter; 173908feecd7SYuval Mintz int rc = 0; 174008feecd7SYuval Mintz int i; 174108feecd7SYuval Mintz 174208feecd7SYuval Mintz memset(&filter, 0, sizeof(filter)); 174308feecd7SYuval Mintz filter.is_rx_filter = 1; 174408feecd7SYuval Mintz filter.is_tx_filter = 1; 174508feecd7SYuval Mintz filter.vport_to_add_to = p_vf->vport_id; 174608feecd7SYuval Mintz filter.opcode = QED_FILTER_ADD; 174708feecd7SYuval Mintz 174808feecd7SYuval Mintz /* Reconfigure vlans */ 174908feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 175008feecd7SYuval Mintz if (!p_vf->shadow_config.vlans[i].used) 175108feecd7SYuval Mintz continue; 175208feecd7SYuval Mintz 175308feecd7SYuval Mintz filter.type = QED_FILTER_VLAN; 175408feecd7SYuval Mintz filter.vlan = p_vf->shadow_config.vlans[i].vid; 17551a635e48SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 175608feecd7SYuval Mintz "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", 175708feecd7SYuval Mintz filter.vlan, p_vf->relative_vf_id); 17581a635e48SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 17591a635e48SYuval Mintz &filter, QED_SPQ_MODE_CB, NULL); 176008feecd7SYuval Mintz if (rc) { 176108feecd7SYuval Mintz DP_NOTICE(p_hwfn, 176208feecd7SYuval Mintz "Failed to configure VLAN [%04x] to VF [%04x]\n", 176308feecd7SYuval Mintz filter.vlan, p_vf->relative_vf_id); 176408feecd7SYuval Mintz break; 176508feecd7SYuval Mintz } 176608feecd7SYuval Mintz } 176708feecd7SYuval Mintz 176808feecd7SYuval Mintz return rc; 176908feecd7SYuval Mintz } 177008feecd7SYuval Mintz 177108feecd7SYuval Mintz static int 177208feecd7SYuval Mintz qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, 177308feecd7SYuval Mintz struct qed_vf_info *p_vf, u64 events) 177408feecd7SYuval Mintz { 177508feecd7SYuval Mintz int rc = 0; 177608feecd7SYuval Mintz 17771a635e48SYuval Mintz if ((events & BIT(VLAN_ADDR_FORCED)) && 177808feecd7SYuval Mintz !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) 177908feecd7SYuval Mintz rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); 178008feecd7SYuval Mintz 178108feecd7SYuval Mintz return rc; 178208feecd7SYuval Mintz } 178308feecd7SYuval Mintz 178408feecd7SYuval Mintz static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, 178508feecd7SYuval Mintz struct qed_vf_info *p_vf, u64 events) 178608feecd7SYuval Mintz { 178708feecd7SYuval Mintz int rc = 0; 178808feecd7SYuval Mintz struct qed_filter_ucast filter; 178908feecd7SYuval Mintz 179008feecd7SYuval Mintz if (!p_vf->vport_instance) 179108feecd7SYuval Mintz return -EINVAL; 179208feecd7SYuval Mintz 17931a635e48SYuval Mintz if (events & BIT(MAC_ADDR_FORCED)) { 1794eff16960SYuval Mintz /* Since there's no way [currently] of removing the MAC, 1795eff16960SYuval Mintz * we can always assume this means we need to force it. 1796eff16960SYuval Mintz */ 1797eff16960SYuval Mintz memset(&filter, 0, sizeof(filter)); 1798eff16960SYuval Mintz filter.type = QED_FILTER_MAC; 1799eff16960SYuval Mintz filter.opcode = QED_FILTER_REPLACE; 1800eff16960SYuval Mintz filter.is_rx_filter = 1; 1801eff16960SYuval Mintz filter.is_tx_filter = 1; 1802eff16960SYuval Mintz filter.vport_to_add_to = p_vf->vport_id; 1803eff16960SYuval Mintz ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); 1804eff16960SYuval Mintz 1805eff16960SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 1806eff16960SYuval Mintz &filter, QED_SPQ_MODE_CB, NULL); 1807eff16960SYuval Mintz if (rc) { 1808eff16960SYuval Mintz DP_NOTICE(p_hwfn, 1809eff16960SYuval Mintz "PF failed to configure MAC for VF\n"); 1810eff16960SYuval Mintz return rc; 1811eff16960SYuval Mintz } 1812eff16960SYuval Mintz 1813eff16960SYuval Mintz p_vf->configured_features |= 1 << MAC_ADDR_FORCED; 1814eff16960SYuval Mintz } 1815eff16960SYuval Mintz 18161a635e48SYuval Mintz if (events & BIT(VLAN_ADDR_FORCED)) { 181708feecd7SYuval Mintz struct qed_sp_vport_update_params vport_update; 181808feecd7SYuval Mintz u8 removal; 181908feecd7SYuval Mintz int i; 182008feecd7SYuval Mintz 182108feecd7SYuval Mintz memset(&filter, 0, sizeof(filter)); 182208feecd7SYuval Mintz filter.type = QED_FILTER_VLAN; 182308feecd7SYuval Mintz filter.is_rx_filter = 1; 182408feecd7SYuval Mintz filter.is_tx_filter = 1; 182508feecd7SYuval Mintz filter.vport_to_add_to = p_vf->vport_id; 182608feecd7SYuval Mintz filter.vlan = p_vf->bulletin.p_virt->pvid; 182708feecd7SYuval Mintz filter.opcode = filter.vlan ? QED_FILTER_REPLACE : 182808feecd7SYuval Mintz QED_FILTER_FLUSH; 182908feecd7SYuval Mintz 183008feecd7SYuval Mintz /* Send the ramrod */ 183108feecd7SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, 183208feecd7SYuval Mintz &filter, QED_SPQ_MODE_CB, NULL); 183308feecd7SYuval Mintz if (rc) { 183408feecd7SYuval Mintz DP_NOTICE(p_hwfn, 183508feecd7SYuval Mintz "PF failed to configure VLAN for VF\n"); 183608feecd7SYuval Mintz return rc; 183708feecd7SYuval Mintz } 183808feecd7SYuval Mintz 183908feecd7SYuval Mintz /* Update the default-vlan & silent vlan stripping */ 184008feecd7SYuval Mintz memset(&vport_update, 0, sizeof(vport_update)); 184108feecd7SYuval Mintz vport_update.opaque_fid = p_vf->opaque_fid; 184208feecd7SYuval Mintz vport_update.vport_id = p_vf->vport_id; 184308feecd7SYuval Mintz vport_update.update_default_vlan_enable_flg = 1; 184408feecd7SYuval Mintz vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; 184508feecd7SYuval Mintz vport_update.update_default_vlan_flg = 1; 184608feecd7SYuval Mintz vport_update.default_vlan = filter.vlan; 184708feecd7SYuval Mintz 184808feecd7SYuval Mintz vport_update.update_inner_vlan_removal_flg = 1; 184908feecd7SYuval Mintz removal = filter.vlan ? 1 185008feecd7SYuval Mintz : p_vf->shadow_config.inner_vlan_removal; 185108feecd7SYuval Mintz vport_update.inner_vlan_removal_flg = removal; 185208feecd7SYuval Mintz vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; 185308feecd7SYuval Mintz rc = qed_sp_vport_update(p_hwfn, 185408feecd7SYuval Mintz &vport_update, 185508feecd7SYuval Mintz QED_SPQ_MODE_EBLOCK, NULL); 185608feecd7SYuval Mintz if (rc) { 185708feecd7SYuval Mintz DP_NOTICE(p_hwfn, 185808feecd7SYuval Mintz "PF failed to configure VF vport for vlan\n"); 185908feecd7SYuval Mintz return rc; 186008feecd7SYuval Mintz } 186108feecd7SYuval Mintz 186208feecd7SYuval Mintz /* Update all the Rx queues */ 186308feecd7SYuval Mintz for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { 1864007bc371SMintz, Yuval struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; 1865007bc371SMintz, Yuval struct qed_queue_cid *p_cid = NULL; 186608feecd7SYuval Mintz 1867007bc371SMintz, Yuval /* There can be at most 1 Rx queue on qzone. Find it */ 1868007bc371SMintz, Yuval p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); 18693da7a37aSMintz, Yuval if (!p_cid) 187008feecd7SYuval Mintz continue; 187108feecd7SYuval Mintz 18723da7a37aSMintz, Yuval rc = qed_sp_eth_rx_queues_update(p_hwfn, 18733da7a37aSMintz, Yuval (void **)&p_cid, 187408feecd7SYuval Mintz 1, 0, 1, 187508feecd7SYuval Mintz QED_SPQ_MODE_EBLOCK, 187608feecd7SYuval Mintz NULL); 187708feecd7SYuval Mintz if (rc) { 187808feecd7SYuval Mintz DP_NOTICE(p_hwfn, 187908feecd7SYuval Mintz "Failed to send Rx update fo queue[0x%04x]\n", 18803da7a37aSMintz, Yuval p_cid->rel.queue_id); 188108feecd7SYuval Mintz return rc; 188208feecd7SYuval Mintz } 188308feecd7SYuval Mintz } 188408feecd7SYuval Mintz 188508feecd7SYuval Mintz if (filter.vlan) 188608feecd7SYuval Mintz p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; 188708feecd7SYuval Mintz else 18881a635e48SYuval Mintz p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); 188908feecd7SYuval Mintz } 189008feecd7SYuval Mintz 189108feecd7SYuval Mintz /* If forced features are terminated, we need to configure the shadow 189208feecd7SYuval Mintz * configuration back again. 189308feecd7SYuval Mintz */ 189408feecd7SYuval Mintz if (events) 189508feecd7SYuval Mintz qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); 189608feecd7SYuval Mintz 189708feecd7SYuval Mintz return rc; 189808feecd7SYuval Mintz } 189908feecd7SYuval Mintz 1900dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, 1901dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1902dacd88d6SYuval Mintz struct qed_vf_info *vf) 1903dacd88d6SYuval Mintz { 1904dacd88d6SYuval Mintz struct qed_sp_vport_start_params params = { 0 }; 1905dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 1906dacd88d6SYuval Mintz struct vfpf_vport_start_tlv *start; 1907dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 1908dacd88d6SYuval Mintz struct qed_vf_info *vf_info; 190908feecd7SYuval Mintz u64 *p_bitmap; 1910dacd88d6SYuval Mintz int sb_id; 1911dacd88d6SYuval Mintz int rc; 1912dacd88d6SYuval Mintz 1913dacd88d6SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); 1914dacd88d6SYuval Mintz if (!vf_info) { 1915dacd88d6SYuval Mintz DP_NOTICE(p_hwfn->cdev, 1916dacd88d6SYuval Mintz "Failed to get VF info, invalid vfid [%d]\n", 1917dacd88d6SYuval Mintz vf->relative_vf_id); 1918dacd88d6SYuval Mintz return; 1919dacd88d6SYuval Mintz } 1920dacd88d6SYuval Mintz 1921dacd88d6SYuval Mintz vf->state = VF_ENABLED; 1922dacd88d6SYuval Mintz start = &mbx->req_virt->start_vport; 1923dacd88d6SYuval Mintz 1924b801b159SMintz, Yuval qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); 1925b801b159SMintz, Yuval 1926dacd88d6SYuval Mintz /* Initialize Status block in CAU */ 1927dacd88d6SYuval Mintz for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { 1928dacd88d6SYuval Mintz if (!start->sb_addr[sb_id]) { 1929dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 1930dacd88d6SYuval Mintz "VF[%d] did not fill the address of SB %d\n", 1931dacd88d6SYuval Mintz vf->relative_vf_id, sb_id); 1932dacd88d6SYuval Mintz break; 1933dacd88d6SYuval Mintz } 1934dacd88d6SYuval Mintz 1935dacd88d6SYuval Mintz qed_int_cau_conf_sb(p_hwfn, p_ptt, 1936dacd88d6SYuval Mintz start->sb_addr[sb_id], 19371a635e48SYuval Mintz vf->igu_sbs[sb_id], vf->abs_vf_id, 1); 1938dacd88d6SYuval Mintz } 1939dacd88d6SYuval Mintz 1940dacd88d6SYuval Mintz vf->mtu = start->mtu; 194108feecd7SYuval Mintz vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; 194208feecd7SYuval Mintz 194308feecd7SYuval Mintz /* Take into consideration configuration forced by hypervisor; 194408feecd7SYuval Mintz * If none is configured, use the supplied VF values [for old 194508feecd7SYuval Mintz * vfs that would still be fine, since they passed '0' as padding]. 194608feecd7SYuval Mintz */ 194708feecd7SYuval Mintz p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; 19481a635e48SYuval Mintz if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { 194908feecd7SYuval Mintz u8 vf_req = start->only_untagged; 195008feecd7SYuval Mintz 195108feecd7SYuval Mintz vf_info->bulletin.p_virt->default_only_untagged = vf_req; 195208feecd7SYuval Mintz *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; 195308feecd7SYuval Mintz } 1954dacd88d6SYuval Mintz 1955dacd88d6SYuval Mintz params.tpa_mode = start->tpa_mode; 1956dacd88d6SYuval Mintz params.remove_inner_vlan = start->inner_vlan_removal; 1957831bfb0eSYuval Mintz params.tx_switching = true; 1958dacd88d6SYuval Mintz 195908feecd7SYuval Mintz params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; 1960dacd88d6SYuval Mintz params.drop_ttl0 = false; 1961dacd88d6SYuval Mintz params.concrete_fid = vf->concrete_fid; 1962dacd88d6SYuval Mintz params.opaque_fid = vf->opaque_fid; 1963dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 1964dacd88d6SYuval Mintz params.max_buffers_per_cqe = start->max_buffers_per_cqe; 1965dacd88d6SYuval Mintz params.mtu = vf->mtu; 196611a85d75SYuval Mintz params.check_mac = true; 1967dacd88d6SYuval Mintz 1968dacd88d6SYuval Mintz rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); 19691a635e48SYuval Mintz if (rc) { 1970dacd88d6SYuval Mintz DP_ERR(p_hwfn, 1971dacd88d6SYuval Mintz "qed_iov_vf_mbx_start_vport returned error %d\n", rc); 1972dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 1973dacd88d6SYuval Mintz } else { 1974dacd88d6SYuval Mintz vf->vport_instance++; 197508feecd7SYuval Mintz 197608feecd7SYuval Mintz /* Force configuration if needed on the newly opened vport */ 197708feecd7SYuval Mintz qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); 19786ddc7608SYuval Mintz 19796ddc7608SYuval Mintz __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); 1980dacd88d6SYuval Mintz } 1981dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, 1982dacd88d6SYuval Mintz sizeof(struct pfvf_def_resp_tlv), status); 1983dacd88d6SYuval Mintz } 1984dacd88d6SYuval Mintz 1985dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, 1986dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 1987dacd88d6SYuval Mintz struct qed_vf_info *vf) 1988dacd88d6SYuval Mintz { 1989dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 1990dacd88d6SYuval Mintz int rc; 1991dacd88d6SYuval Mintz 1992dacd88d6SYuval Mintz vf->vport_instance--; 19936ddc7608SYuval Mintz vf->spoof_chk = false; 1994dacd88d6SYuval Mintz 1995f109c240SMintz, Yuval if ((qed_iov_validate_active_rxq(p_hwfn, vf)) || 1996f109c240SMintz, Yuval (qed_iov_validate_active_txq(p_hwfn, vf))) { 1997f109c240SMintz, Yuval vf->b_malicious = true; 1998f109c240SMintz, Yuval DP_NOTICE(p_hwfn, 1999f109c240SMintz, Yuval "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n", 2000f109c240SMintz, Yuval vf->abs_vf_id); 2001f109c240SMintz, Yuval status = PFVF_STATUS_MALICIOUS; 2002f109c240SMintz, Yuval goto out; 2003f109c240SMintz, Yuval } 2004f109c240SMintz, Yuval 2005dacd88d6SYuval Mintz rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); 20061a635e48SYuval Mintz if (rc) { 2007dacd88d6SYuval Mintz DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", 2008dacd88d6SYuval Mintz rc); 2009dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2010dacd88d6SYuval Mintz } 2011dacd88d6SYuval Mintz 201208feecd7SYuval Mintz /* Forget the configuration on the vport */ 201308feecd7SYuval Mintz vf->configured_features = 0; 201408feecd7SYuval Mintz memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); 201508feecd7SYuval Mintz 2016f109c240SMintz, Yuval out: 2017dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, 2018dacd88d6SYuval Mintz sizeof(struct pfvf_def_resp_tlv), status); 2019dacd88d6SYuval Mintz } 2020dacd88d6SYuval Mintz 2021dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, 2022dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2023a044df83SYuval Mintz struct qed_vf_info *vf, 2024a044df83SYuval Mintz u8 status, bool b_legacy) 2025dacd88d6SYuval Mintz { 2026dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 2027dacd88d6SYuval Mintz struct pfvf_start_queue_resp_tlv *p_tlv; 2028dacd88d6SYuval Mintz struct vfpf_start_rxq_tlv *req; 2029a044df83SYuval Mintz u16 length; 2030dacd88d6SYuval Mintz 2031dacd88d6SYuval Mintz mbx->offset = (u8 *)mbx->reply_virt; 2032dacd88d6SYuval Mintz 2033a044df83SYuval Mintz /* Taking a bigger struct instead of adding a TLV to list was a 2034a044df83SYuval Mintz * mistake, but one which we're now stuck with, as some older 2035a044df83SYuval Mintz * clients assume the size of the previous response. 2036a044df83SYuval Mintz */ 2037a044df83SYuval Mintz if (!b_legacy) 2038a044df83SYuval Mintz length = sizeof(*p_tlv); 2039a044df83SYuval Mintz else 2040a044df83SYuval Mintz length = sizeof(struct pfvf_def_resp_tlv); 2041a044df83SYuval Mintz 2042dacd88d6SYuval Mintz p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, 2043a044df83SYuval Mintz length); 2044dacd88d6SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 2045dacd88d6SYuval Mintz sizeof(struct channel_list_end_tlv)); 2046dacd88d6SYuval Mintz 2047dacd88d6SYuval Mintz /* Update the TLV with the response */ 2048a044df83SYuval Mintz if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { 2049dacd88d6SYuval Mintz req = &mbx->req_virt->start_rxq; 2050351a4dedSYuval Mintz p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + 2051351a4dedSYuval Mintz offsetof(struct mstorm_vf_zone, 2052351a4dedSYuval Mintz non_trigger.eth_rx_queue_producers) + 2053351a4dedSYuval Mintz sizeof(struct eth_rx_prod_data) * req->rx_qid; 2054dacd88d6SYuval Mintz } 2055dacd88d6SYuval Mintz 2056a044df83SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); 2057dacd88d6SYuval Mintz } 2058dacd88d6SYuval Mintz 2059bbe3f233SMintz, Yuval static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn, 2060bbe3f233SMintz, Yuval struct qed_vf_info *p_vf, bool b_is_tx) 2061bbe3f233SMintz, Yuval { 206208bc8f15SMintz, Yuval struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; 206308bc8f15SMintz, Yuval struct vfpf_qid_tlv *p_qid_tlv; 206408bc8f15SMintz, Yuval 206508bc8f15SMintz, Yuval /* Search for the qid if the VF published its going to provide it */ 206608bc8f15SMintz, Yuval if (!(p_vf->acquire.vfdev_info.capabilities & 206708bc8f15SMintz, Yuval VFPF_ACQUIRE_CAP_QUEUE_QIDS)) { 2068bbe3f233SMintz, Yuval if (b_is_tx) 2069bbe3f233SMintz, Yuval return QED_IOV_LEGACY_QID_TX; 2070bbe3f233SMintz, Yuval else 2071bbe3f233SMintz, Yuval return QED_IOV_LEGACY_QID_RX; 2072bbe3f233SMintz, Yuval } 2073bbe3f233SMintz, Yuval 207408bc8f15SMintz, Yuval p_qid_tlv = (struct vfpf_qid_tlv *) 207508bc8f15SMintz, Yuval qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 207608bc8f15SMintz, Yuval CHANNEL_TLV_QID); 207708bc8f15SMintz, Yuval if (!p_qid_tlv) { 207808bc8f15SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 207908bc8f15SMintz, Yuval "VF[%2x]: Failed to provide qid\n", 208008bc8f15SMintz, Yuval p_vf->relative_vf_id); 208108bc8f15SMintz, Yuval 208208bc8f15SMintz, Yuval return QED_IOV_QID_INVALID; 208308bc8f15SMintz, Yuval } 208408bc8f15SMintz, Yuval 208508bc8f15SMintz, Yuval if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) { 208608bc8f15SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 208708bc8f15SMintz, Yuval "VF[%02x]: Provided qid out-of-bounds %02x\n", 208808bc8f15SMintz, Yuval p_vf->relative_vf_id, p_qid_tlv->qid); 208908bc8f15SMintz, Yuval return QED_IOV_QID_INVALID; 209008bc8f15SMintz, Yuval } 209108bc8f15SMintz, Yuval 209208bc8f15SMintz, Yuval return p_qid_tlv->qid; 209308bc8f15SMintz, Yuval } 209408bc8f15SMintz, Yuval 2095dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, 2096dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2097dacd88d6SYuval Mintz struct qed_vf_info *vf) 2098dacd88d6SYuval Mintz { 2099dacd88d6SYuval Mintz struct qed_queue_start_common_params params; 21003946497aSMintz, Yuval struct qed_queue_cid_vf_params vf_params; 2101dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 210241086467SYuval Mintz u8 status = PFVF_STATUS_NO_RESOURCE; 21033b19f478SMintz, Yuval u8 qid_usage_idx, vf_legacy = 0; 2104dacd88d6SYuval Mintz struct vfpf_start_rxq_tlv *req; 2105007bc371SMintz, Yuval struct qed_vf_queue *p_queue; 2106007bc371SMintz, Yuval struct qed_queue_cid *p_cid; 2107f604b17dSMintz, Yuval struct qed_sb_info sb_dummy; 2108dacd88d6SYuval Mintz int rc; 2109dacd88d6SYuval Mintz 2110dacd88d6SYuval Mintz req = &mbx->req_virt->start_rxq; 211141086467SYuval Mintz 2112f109c240SMintz, Yuval if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid, 2113f109c240SMintz, Yuval QED_IOV_VALIDATE_Q_DISABLE) || 211441086467SYuval Mintz !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) 211541086467SYuval Mintz goto out; 211641086467SYuval Mintz 2117bbe3f233SMintz, Yuval qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); 211808bc8f15SMintz, Yuval if (qid_usage_idx == QED_IOV_QID_INVALID) 211908bc8f15SMintz, Yuval goto out; 212008bc8f15SMintz, Yuval 21213da7a37aSMintz, Yuval p_queue = &vf->vf_queues[req->rx_qid]; 212208bc8f15SMintz, Yuval if (p_queue->cids[qid_usage_idx].p_cid) 212308bc8f15SMintz, Yuval goto out; 21243da7a37aSMintz, Yuval 21253b19f478SMintz, Yuval vf_legacy = qed_vf_calculate_legacy(vf); 21263946497aSMintz, Yuval 2127bbe3f233SMintz, Yuval /* Acquire a new queue-cid */ 21283da7a37aSMintz, Yuval memset(¶ms, 0, sizeof(params)); 21293da7a37aSMintz, Yuval params.queue_id = p_queue->fw_rx_qid; 2130dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 21313da7a37aSMintz, Yuval params.stats_id = vf->abs_vf_id + 0x10; 2132f604b17dSMintz, Yuval /* Since IGU index is passed via sb_info, construct a dummy one */ 2133f604b17dSMintz, Yuval memset(&sb_dummy, 0, sizeof(sb_dummy)); 2134f604b17dSMintz, Yuval sb_dummy.igu_sb_id = req->hw_sb; 2135f604b17dSMintz, Yuval params.p_sb = &sb_dummy; 2136dacd88d6SYuval Mintz params.sb_idx = req->sb_index; 2137dacd88d6SYuval Mintz 21383946497aSMintz, Yuval memset(&vf_params, 0, sizeof(vf_params)); 21393946497aSMintz, Yuval vf_params.vfid = vf->relative_vf_id; 21403946497aSMintz, Yuval vf_params.vf_qid = (u8)req->rx_qid; 21413b19f478SMintz, Yuval vf_params.vf_legacy = vf_legacy; 2142bbe3f233SMintz, Yuval vf_params.qid_usage_idx = qid_usage_idx; 2143007bc371SMintz, Yuval p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, 2144007bc371SMintz, Yuval ¶ms, true, &vf_params); 2145007bc371SMintz, Yuval if (!p_cid) 21463da7a37aSMintz, Yuval goto out; 21473da7a37aSMintz, Yuval 2148a044df83SYuval Mintz /* Legacy VFs have their Producers in a different location, which they 2149a044df83SYuval Mintz * calculate on their own and clean the producer prior to this. 2150a044df83SYuval Mintz */ 21513b19f478SMintz, Yuval if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD)) 2152a044df83SYuval Mintz REG_WR(p_hwfn, 2153a044df83SYuval Mintz GTT_BAR0_MAP_REG_MSDM_RAM + 2154a044df83SYuval Mintz MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), 2155a044df83SYuval Mintz 0); 2156a044df83SYuval Mintz 2157007bc371SMintz, Yuval rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid, 2158dacd88d6SYuval Mintz req->bd_max_bytes, 2159dacd88d6SYuval Mintz req->rxq_addr, 21603da7a37aSMintz, Yuval req->cqe_pbl_addr, req->cqe_pbl_size); 2161dacd88d6SYuval Mintz if (rc) { 2162dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2163007bc371SMintz, Yuval qed_eth_queue_cid_release(p_hwfn, p_cid); 2164dacd88d6SYuval Mintz } else { 2165007bc371SMintz, Yuval p_queue->cids[qid_usage_idx].p_cid = p_cid; 2166007bc371SMintz, Yuval p_queue->cids[qid_usage_idx].b_is_tx = false; 216741086467SYuval Mintz status = PFVF_STATUS_SUCCESS; 2168dacd88d6SYuval Mintz vf->num_active_rxqs++; 2169dacd88d6SYuval Mintz } 2170dacd88d6SYuval Mintz 217141086467SYuval Mintz out: 21723b19f478SMintz, Yuval qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, 21733b19f478SMintz, Yuval !!(vf_legacy & 21743b19f478SMintz, Yuval QED_QCID_LEGACY_VF_RX_PROD)); 2175dacd88d6SYuval Mintz } 2176dacd88d6SYuval Mintz 2177eaf3c0c6SChopra, Manish static void 2178eaf3c0c6SChopra, Manish qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp, 2179eaf3c0c6SChopra, Manish struct qed_tunnel_info *p_tun, 2180eaf3c0c6SChopra, Manish u16 tunn_feature_mask) 2181eaf3c0c6SChopra, Manish { 2182eaf3c0c6SChopra, Manish p_resp->tunn_feature_mask = tunn_feature_mask; 2183eaf3c0c6SChopra, Manish p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled; 2184eaf3c0c6SChopra, Manish p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled; 2185eaf3c0c6SChopra, Manish p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled; 2186eaf3c0c6SChopra, Manish p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled; 2187eaf3c0c6SChopra, Manish p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled; 2188eaf3c0c6SChopra, Manish p_resp->vxlan_clss = p_tun->vxlan.tun_cls; 2189eaf3c0c6SChopra, Manish p_resp->l2gre_clss = p_tun->l2_gre.tun_cls; 2190eaf3c0c6SChopra, Manish p_resp->ipgre_clss = p_tun->ip_gre.tun_cls; 2191eaf3c0c6SChopra, Manish p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls; 2192eaf3c0c6SChopra, Manish p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls; 2193eaf3c0c6SChopra, Manish p_resp->geneve_udp_port = p_tun->geneve_port.port; 2194eaf3c0c6SChopra, Manish p_resp->vxlan_udp_port = p_tun->vxlan_port.port; 2195eaf3c0c6SChopra, Manish } 2196eaf3c0c6SChopra, Manish 2197eaf3c0c6SChopra, Manish static void 2198eaf3c0c6SChopra, Manish __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, 2199eaf3c0c6SChopra, Manish struct qed_tunn_update_type *p_tun, 2200eaf3c0c6SChopra, Manish enum qed_tunn_mode mask, u8 tun_cls) 2201eaf3c0c6SChopra, Manish { 2202eaf3c0c6SChopra, Manish if (p_req->tun_mode_update_mask & BIT(mask)) { 2203eaf3c0c6SChopra, Manish p_tun->b_update_mode = true; 2204eaf3c0c6SChopra, Manish 2205eaf3c0c6SChopra, Manish if (p_req->tunn_mode & BIT(mask)) 2206eaf3c0c6SChopra, Manish p_tun->b_mode_enabled = true; 2207eaf3c0c6SChopra, Manish } 2208eaf3c0c6SChopra, Manish 2209eaf3c0c6SChopra, Manish p_tun->tun_cls = tun_cls; 2210eaf3c0c6SChopra, Manish } 2211eaf3c0c6SChopra, Manish 2212eaf3c0c6SChopra, Manish static void 2213eaf3c0c6SChopra, Manish qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, 2214eaf3c0c6SChopra, Manish struct qed_tunn_update_type *p_tun, 2215eaf3c0c6SChopra, Manish struct qed_tunn_update_udp_port *p_port, 2216eaf3c0c6SChopra, Manish enum qed_tunn_mode mask, 2217eaf3c0c6SChopra, Manish u8 tun_cls, u8 update_port, u16 port) 2218eaf3c0c6SChopra, Manish { 2219eaf3c0c6SChopra, Manish if (update_port) { 2220eaf3c0c6SChopra, Manish p_port->b_update_port = true; 2221eaf3c0c6SChopra, Manish p_port->port = port; 2222eaf3c0c6SChopra, Manish } 2223eaf3c0c6SChopra, Manish 2224eaf3c0c6SChopra, Manish __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls); 2225eaf3c0c6SChopra, Manish } 2226eaf3c0c6SChopra, Manish 2227eaf3c0c6SChopra, Manish static bool 2228eaf3c0c6SChopra, Manish qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req) 2229eaf3c0c6SChopra, Manish { 2230eaf3c0c6SChopra, Manish bool b_update_requested = false; 2231eaf3c0c6SChopra, Manish 2232eaf3c0c6SChopra, Manish if (p_req->tun_mode_update_mask || p_req->update_tun_cls || 2233eaf3c0c6SChopra, Manish p_req->update_geneve_port || p_req->update_vxlan_port) 2234eaf3c0c6SChopra, Manish b_update_requested = true; 2235eaf3c0c6SChopra, Manish 2236eaf3c0c6SChopra, Manish return b_update_requested; 2237eaf3c0c6SChopra, Manish } 2238eaf3c0c6SChopra, Manish 2239eaf3c0c6SChopra, Manish static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc) 2240eaf3c0c6SChopra, Manish { 2241eaf3c0c6SChopra, Manish if (tun->b_update_mode && !tun->b_mode_enabled) { 2242eaf3c0c6SChopra, Manish tun->b_update_mode = false; 2243eaf3c0c6SChopra, Manish *rc = -EINVAL; 2244eaf3c0c6SChopra, Manish } 2245eaf3c0c6SChopra, Manish } 2246eaf3c0c6SChopra, Manish 2247eaf3c0c6SChopra, Manish static int 2248eaf3c0c6SChopra, Manish qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn, 2249eaf3c0c6SChopra, Manish u16 *tun_features, bool *update, 2250eaf3c0c6SChopra, Manish struct qed_tunnel_info *tun_src) 2251eaf3c0c6SChopra, Manish { 2252eaf3c0c6SChopra, Manish struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth; 2253eaf3c0c6SChopra, Manish struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel; 2254eaf3c0c6SChopra, Manish u16 bultn_vxlan_port, bultn_geneve_port; 2255eaf3c0c6SChopra, Manish void *cookie = p_hwfn->cdev->ops_cookie; 2256eaf3c0c6SChopra, Manish int i, rc = 0; 2257eaf3c0c6SChopra, Manish 2258eaf3c0c6SChopra, Manish *tun_features = p_hwfn->cdev->tunn_feature_mask; 2259eaf3c0c6SChopra, Manish bultn_vxlan_port = tun->vxlan_port.port; 2260eaf3c0c6SChopra, Manish bultn_geneve_port = tun->geneve_port.port; 2261eaf3c0c6SChopra, Manish qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc); 2262eaf3c0c6SChopra, Manish qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc); 2263eaf3c0c6SChopra, Manish qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc); 2264eaf3c0c6SChopra, Manish qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc); 2265eaf3c0c6SChopra, Manish qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc); 2266eaf3c0c6SChopra, Manish 2267eaf3c0c6SChopra, Manish if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) && 2268eaf3c0c6SChopra, Manish (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2269eaf3c0c6SChopra, Manish tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2270eaf3c0c6SChopra, Manish tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2271eaf3c0c6SChopra, Manish tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2272eaf3c0c6SChopra, Manish tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) { 2273eaf3c0c6SChopra, Manish tun_src->b_update_rx_cls = false; 2274eaf3c0c6SChopra, Manish tun_src->b_update_tx_cls = false; 2275eaf3c0c6SChopra, Manish rc = -EINVAL; 2276eaf3c0c6SChopra, Manish } 2277eaf3c0c6SChopra, Manish 2278eaf3c0c6SChopra, Manish if (tun_src->vxlan_port.b_update_port) { 2279eaf3c0c6SChopra, Manish if (tun_src->vxlan_port.port == tun->vxlan_port.port) { 2280eaf3c0c6SChopra, Manish tun_src->vxlan_port.b_update_port = false; 2281eaf3c0c6SChopra, Manish } else { 2282eaf3c0c6SChopra, Manish *update = true; 2283eaf3c0c6SChopra, Manish bultn_vxlan_port = tun_src->vxlan_port.port; 2284eaf3c0c6SChopra, Manish } 2285eaf3c0c6SChopra, Manish } 2286eaf3c0c6SChopra, Manish 2287eaf3c0c6SChopra, Manish if (tun_src->geneve_port.b_update_port) { 2288eaf3c0c6SChopra, Manish if (tun_src->geneve_port.port == tun->geneve_port.port) { 2289eaf3c0c6SChopra, Manish tun_src->geneve_port.b_update_port = false; 2290eaf3c0c6SChopra, Manish } else { 2291eaf3c0c6SChopra, Manish *update = true; 2292eaf3c0c6SChopra, Manish bultn_geneve_port = tun_src->geneve_port.port; 2293eaf3c0c6SChopra, Manish } 2294eaf3c0c6SChopra, Manish } 2295eaf3c0c6SChopra, Manish 2296eaf3c0c6SChopra, Manish qed_for_each_vf(p_hwfn, i) { 2297eaf3c0c6SChopra, Manish qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port, 2298eaf3c0c6SChopra, Manish bultn_geneve_port); 2299eaf3c0c6SChopra, Manish } 2300eaf3c0c6SChopra, Manish 2301eaf3c0c6SChopra, Manish qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 2302eaf3c0c6SChopra, Manish ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port); 2303eaf3c0c6SChopra, Manish 2304eaf3c0c6SChopra, Manish return rc; 2305eaf3c0c6SChopra, Manish } 2306eaf3c0c6SChopra, Manish 2307eaf3c0c6SChopra, Manish static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn, 2308eaf3c0c6SChopra, Manish struct qed_ptt *p_ptt, 2309eaf3c0c6SChopra, Manish struct qed_vf_info *p_vf) 2310eaf3c0c6SChopra, Manish { 2311eaf3c0c6SChopra, Manish struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; 2312eaf3c0c6SChopra, Manish struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 2313eaf3c0c6SChopra, Manish struct pfvf_update_tunn_param_tlv *p_resp; 2314eaf3c0c6SChopra, Manish struct vfpf_update_tunn_param_tlv *p_req; 2315eaf3c0c6SChopra, Manish u8 status = PFVF_STATUS_SUCCESS; 2316eaf3c0c6SChopra, Manish bool b_update_required = false; 2317eaf3c0c6SChopra, Manish struct qed_tunnel_info tunn; 2318eaf3c0c6SChopra, Manish u16 tunn_feature_mask = 0; 2319eaf3c0c6SChopra, Manish int i, rc = 0; 2320eaf3c0c6SChopra, Manish 2321eaf3c0c6SChopra, Manish mbx->offset = (u8 *)mbx->reply_virt; 2322eaf3c0c6SChopra, Manish 2323eaf3c0c6SChopra, Manish memset(&tunn, 0, sizeof(tunn)); 2324eaf3c0c6SChopra, Manish p_req = &mbx->req_virt->tunn_param_update; 2325eaf3c0c6SChopra, Manish 2326eaf3c0c6SChopra, Manish if (!qed_iov_pf_validate_tunn_param(p_req)) { 2327eaf3c0c6SChopra, Manish DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2328eaf3c0c6SChopra, Manish "No tunnel update requested by VF\n"); 2329eaf3c0c6SChopra, Manish status = PFVF_STATUS_FAILURE; 2330eaf3c0c6SChopra, Manish goto send_resp; 2331eaf3c0c6SChopra, Manish } 2332eaf3c0c6SChopra, Manish 2333eaf3c0c6SChopra, Manish tunn.b_update_rx_cls = p_req->update_tun_cls; 2334eaf3c0c6SChopra, Manish tunn.b_update_tx_cls = p_req->update_tun_cls; 2335eaf3c0c6SChopra, Manish 2336eaf3c0c6SChopra, Manish qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port, 2337eaf3c0c6SChopra, Manish QED_MODE_VXLAN_TUNN, p_req->vxlan_clss, 2338eaf3c0c6SChopra, Manish p_req->update_vxlan_port, 2339eaf3c0c6SChopra, Manish p_req->vxlan_port); 2340eaf3c0c6SChopra, Manish qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port, 2341eaf3c0c6SChopra, Manish QED_MODE_L2GENEVE_TUNN, 2342eaf3c0c6SChopra, Manish p_req->l2geneve_clss, 2343eaf3c0c6SChopra, Manish p_req->update_geneve_port, 2344eaf3c0c6SChopra, Manish p_req->geneve_port); 2345eaf3c0c6SChopra, Manish __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve, 2346eaf3c0c6SChopra, Manish QED_MODE_IPGENEVE_TUNN, 2347eaf3c0c6SChopra, Manish p_req->ipgeneve_clss); 2348eaf3c0c6SChopra, Manish __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre, 2349eaf3c0c6SChopra, Manish QED_MODE_L2GRE_TUNN, p_req->l2gre_clss); 2350eaf3c0c6SChopra, Manish __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre, 2351eaf3c0c6SChopra, Manish QED_MODE_IPGRE_TUNN, p_req->ipgre_clss); 2352eaf3c0c6SChopra, Manish 2353eaf3c0c6SChopra, Manish /* If PF modifies VF's req then it should 2354eaf3c0c6SChopra, Manish * still return an error in case of partial configuration 2355eaf3c0c6SChopra, Manish * or modified configuration as opposed to requested one. 2356eaf3c0c6SChopra, Manish */ 2357eaf3c0c6SChopra, Manish rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask, 2358eaf3c0c6SChopra, Manish &b_update_required, &tunn); 2359eaf3c0c6SChopra, Manish 2360eaf3c0c6SChopra, Manish if (rc) 2361eaf3c0c6SChopra, Manish status = PFVF_STATUS_FAILURE; 2362eaf3c0c6SChopra, Manish 2363eaf3c0c6SChopra, Manish /* If QED client is willing to update anything ? */ 2364eaf3c0c6SChopra, Manish if (b_update_required) { 2365eaf3c0c6SChopra, Manish u16 geneve_port; 2366eaf3c0c6SChopra, Manish 23674f64675fSManish Chopra rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn, 2368eaf3c0c6SChopra, Manish QED_SPQ_MODE_EBLOCK, NULL); 2369eaf3c0c6SChopra, Manish if (rc) 2370eaf3c0c6SChopra, Manish status = PFVF_STATUS_FAILURE; 2371eaf3c0c6SChopra, Manish 2372eaf3c0c6SChopra, Manish geneve_port = p_tun->geneve_port.port; 2373eaf3c0c6SChopra, Manish qed_for_each_vf(p_hwfn, i) { 2374eaf3c0c6SChopra, Manish qed_iov_bulletin_set_udp_ports(p_hwfn, i, 2375eaf3c0c6SChopra, Manish p_tun->vxlan_port.port, 2376eaf3c0c6SChopra, Manish geneve_port); 2377eaf3c0c6SChopra, Manish } 2378eaf3c0c6SChopra, Manish } 2379eaf3c0c6SChopra, Manish 2380eaf3c0c6SChopra, Manish send_resp: 2381eaf3c0c6SChopra, Manish p_resp = qed_add_tlv(p_hwfn, &mbx->offset, 2382eaf3c0c6SChopra, Manish CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp)); 2383eaf3c0c6SChopra, Manish 2384eaf3c0c6SChopra, Manish qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask); 2385eaf3c0c6SChopra, Manish qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 2386eaf3c0c6SChopra, Manish sizeof(struct channel_list_end_tlv)); 2387eaf3c0c6SChopra, Manish 2388eaf3c0c6SChopra, Manish qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); 2389eaf3c0c6SChopra, Manish } 2390eaf3c0c6SChopra, Manish 23915040acf5SYuval Mintz static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, 23925040acf5SYuval Mintz struct qed_ptt *p_ptt, 2393007bc371SMintz, Yuval struct qed_vf_info *p_vf, 2394007bc371SMintz, Yuval u32 cid, u8 status) 23955040acf5SYuval Mintz { 23965040acf5SYuval Mintz struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 23975040acf5SYuval Mintz struct pfvf_start_queue_resp_tlv *p_tlv; 2398a044df83SYuval Mintz bool b_legacy = false; 2399a044df83SYuval Mintz u16 length; 24005040acf5SYuval Mintz 24015040acf5SYuval Mintz mbx->offset = (u8 *)mbx->reply_virt; 24025040acf5SYuval Mintz 2403a044df83SYuval Mintz /* Taking a bigger struct instead of adding a TLV to list was a 2404a044df83SYuval Mintz * mistake, but one which we're now stuck with, as some older 2405a044df83SYuval Mintz * clients assume the size of the previous response. 2406a044df83SYuval Mintz */ 2407a044df83SYuval Mintz if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == 2408a044df83SYuval Mintz ETH_HSI_VER_NO_PKT_LEN_TUNN) 2409a044df83SYuval Mintz b_legacy = true; 2410a044df83SYuval Mintz 2411a044df83SYuval Mintz if (!b_legacy) 2412a044df83SYuval Mintz length = sizeof(*p_tlv); 2413a044df83SYuval Mintz else 2414a044df83SYuval Mintz length = sizeof(struct pfvf_def_resp_tlv); 2415a044df83SYuval Mintz 24165040acf5SYuval Mintz p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, 2417a044df83SYuval Mintz length); 24185040acf5SYuval Mintz qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 24195040acf5SYuval Mintz sizeof(struct channel_list_end_tlv)); 24205040acf5SYuval Mintz 24215040acf5SYuval Mintz /* Update the TLV with the response */ 2422007bc371SMintz, Yuval if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) 2423007bc371SMintz, Yuval p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); 24245040acf5SYuval Mintz 2425a044df83SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); 24265040acf5SYuval Mintz } 24275040acf5SYuval Mintz 2428dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, 2429dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2430dacd88d6SYuval Mintz struct qed_vf_info *vf) 2431dacd88d6SYuval Mintz { 2432dacd88d6SYuval Mintz struct qed_queue_start_common_params params; 24333946497aSMintz, Yuval struct qed_queue_cid_vf_params vf_params; 2434dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 243541086467SYuval Mintz u8 status = PFVF_STATUS_NO_RESOURCE; 2436dacd88d6SYuval Mintz struct vfpf_start_txq_tlv *req; 2437007bc371SMintz, Yuval struct qed_vf_queue *p_queue; 2438007bc371SMintz, Yuval struct qed_queue_cid *p_cid; 2439f604b17dSMintz, Yuval struct qed_sb_info sb_dummy; 24403b19f478SMintz, Yuval u8 qid_usage_idx, vf_legacy; 2441007bc371SMintz, Yuval u32 cid = 0; 2442dacd88d6SYuval Mintz int rc; 24433da7a37aSMintz, Yuval u16 pq; 2444dacd88d6SYuval Mintz 2445dacd88d6SYuval Mintz memset(¶ms, 0, sizeof(params)); 2446dacd88d6SYuval Mintz req = &mbx->req_virt->start_txq; 244741086467SYuval Mintz 2448f109c240SMintz, Yuval if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid, 244908bc8f15SMintz, Yuval QED_IOV_VALIDATE_Q_NA) || 245041086467SYuval Mintz !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) 245141086467SYuval Mintz goto out; 245241086467SYuval Mintz 2453bbe3f233SMintz, Yuval qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); 245408bc8f15SMintz, Yuval if (qid_usage_idx == QED_IOV_QID_INVALID) 245508bc8f15SMintz, Yuval goto out; 245608bc8f15SMintz, Yuval 24573da7a37aSMintz, Yuval p_queue = &vf->vf_queues[req->tx_qid]; 245808bc8f15SMintz, Yuval if (p_queue->cids[qid_usage_idx].p_cid) 245908bc8f15SMintz, Yuval goto out; 24603da7a37aSMintz, Yuval 24613b19f478SMintz, Yuval vf_legacy = qed_vf_calculate_legacy(vf); 24623946497aSMintz, Yuval 2463bbe3f233SMintz, Yuval /* Acquire a new queue-cid */ 24643da7a37aSMintz, Yuval params.queue_id = p_queue->fw_tx_qid; 2465dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 24663da7a37aSMintz, Yuval params.stats_id = vf->abs_vf_id + 0x10; 2467f604b17dSMintz, Yuval 2468f604b17dSMintz, Yuval /* Since IGU index is passed via sb_info, construct a dummy one */ 2469f604b17dSMintz, Yuval memset(&sb_dummy, 0, sizeof(sb_dummy)); 2470f604b17dSMintz, Yuval sb_dummy.igu_sb_id = req->hw_sb; 2471f604b17dSMintz, Yuval params.p_sb = &sb_dummy; 2472dacd88d6SYuval Mintz params.sb_idx = req->sb_index; 2473dacd88d6SYuval Mintz 24743946497aSMintz, Yuval memset(&vf_params, 0, sizeof(vf_params)); 24753946497aSMintz, Yuval vf_params.vfid = vf->relative_vf_id; 24763946497aSMintz, Yuval vf_params.vf_qid = (u8)req->tx_qid; 24773b19f478SMintz, Yuval vf_params.vf_legacy = vf_legacy; 2478bbe3f233SMintz, Yuval vf_params.qid_usage_idx = qid_usage_idx; 24793946497aSMintz, Yuval 2480007bc371SMintz, Yuval p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, 2481007bc371SMintz, Yuval ¶ms, false, &vf_params); 2482007bc371SMintz, Yuval if (!p_cid) 24833da7a37aSMintz, Yuval goto out; 2484dacd88d6SYuval Mintz 2485b5a9ee7cSAriel Elior pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id); 2486007bc371SMintz, Yuval rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, 24873da7a37aSMintz, Yuval req->pbl_addr, req->pbl_size, pq); 248841086467SYuval Mintz if (rc) { 2489dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 2490007bc371SMintz, Yuval qed_eth_queue_cid_release(p_hwfn, p_cid); 249141086467SYuval Mintz } else { 249241086467SYuval Mintz status = PFVF_STATUS_SUCCESS; 2493007bc371SMintz, Yuval p_queue->cids[qid_usage_idx].p_cid = p_cid; 2494007bc371SMintz, Yuval p_queue->cids[qid_usage_idx].b_is_tx = true; 2495007bc371SMintz, Yuval cid = p_cid->cid; 249641086467SYuval Mintz } 2497dacd88d6SYuval Mintz 249841086467SYuval Mintz out: 2499007bc371SMintz, Yuval qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status); 2500dacd88d6SYuval Mintz } 2501dacd88d6SYuval Mintz 2502dacd88d6SYuval Mintz static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, 2503dacd88d6SYuval Mintz struct qed_vf_info *vf, 2504007bc371SMintz, Yuval u16 rxq_id, 2505007bc371SMintz, Yuval u8 qid_usage_idx, bool cqe_completion) 2506dacd88d6SYuval Mintz { 2507007bc371SMintz, Yuval struct qed_vf_queue *p_queue; 2508dacd88d6SYuval Mintz int rc = 0; 2509dacd88d6SYuval Mintz 251008bc8f15SMintz, Yuval if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) { 25114c4fa793SMintz, Yuval DP_VERBOSE(p_hwfn, 25124c4fa793SMintz, Yuval QED_MSG_IOV, 251308bc8f15SMintz, Yuval "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n", 251408bc8f15SMintz, Yuval vf->relative_vf_id, rxq_id, qid_usage_idx); 2515dacd88d6SYuval Mintz return -EINVAL; 25164c4fa793SMintz, Yuval } 2517dacd88d6SYuval Mintz 25184c4fa793SMintz, Yuval p_queue = &vf->vf_queues[rxq_id]; 25193da7a37aSMintz, Yuval 252008bc8f15SMintz, Yuval /* We've validated the index and the existence of the active RXQ - 252108bc8f15SMintz, Yuval * now we need to make sure that it's using the correct qid. 252208bc8f15SMintz, Yuval */ 252308bc8f15SMintz, Yuval if (!p_queue->cids[qid_usage_idx].p_cid || 252408bc8f15SMintz, Yuval p_queue->cids[qid_usage_idx].b_is_tx) { 252508bc8f15SMintz, Yuval struct qed_queue_cid *p_cid; 252608bc8f15SMintz, Yuval 252708bc8f15SMintz, Yuval p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); 252808bc8f15SMintz, Yuval DP_VERBOSE(p_hwfn, 252908bc8f15SMintz, Yuval QED_MSG_IOV, 253008bc8f15SMintz, Yuval "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n", 253108bc8f15SMintz, Yuval vf->relative_vf_id, 253208bc8f15SMintz, Yuval rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx); 253308bc8f15SMintz, Yuval return -EINVAL; 253408bc8f15SMintz, Yuval } 253508bc8f15SMintz, Yuval 253608bc8f15SMintz, Yuval /* Now that we know we have a valid Rx-queue - close it */ 25373da7a37aSMintz, Yuval rc = qed_eth_rx_queue_stop(p_hwfn, 2538007bc371SMintz, Yuval p_queue->cids[qid_usage_idx].p_cid, 25393da7a37aSMintz, Yuval false, cqe_completion); 2540dacd88d6SYuval Mintz if (rc) 2541dacd88d6SYuval Mintz return rc; 25423da7a37aSMintz, Yuval 2543007bc371SMintz, Yuval p_queue->cids[qid_usage_idx].p_cid = NULL; 2544dacd88d6SYuval Mintz vf->num_active_rxqs--; 2545dacd88d6SYuval Mintz 25464c4fa793SMintz, Yuval return 0; 2547dacd88d6SYuval Mintz } 2548dacd88d6SYuval Mintz 2549dacd88d6SYuval Mintz static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, 2550007bc371SMintz, Yuval struct qed_vf_info *vf, 2551007bc371SMintz, Yuval u16 txq_id, u8 qid_usage_idx) 2552dacd88d6SYuval Mintz { 2553007bc371SMintz, Yuval struct qed_vf_queue *p_queue; 25544c4fa793SMintz, Yuval int rc = 0; 2555dacd88d6SYuval Mintz 255608bc8f15SMintz, Yuval if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA)) 2557dacd88d6SYuval Mintz return -EINVAL; 2558dacd88d6SYuval Mintz 25594c4fa793SMintz, Yuval p_queue = &vf->vf_queues[txq_id]; 256008bc8f15SMintz, Yuval if (!p_queue->cids[qid_usage_idx].p_cid || 256108bc8f15SMintz, Yuval !p_queue->cids[qid_usage_idx].b_is_tx) 256208bc8f15SMintz, Yuval return -EINVAL; 2563dacd88d6SYuval Mintz 2564007bc371SMintz, Yuval rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid); 2565dacd88d6SYuval Mintz if (rc) 2566dacd88d6SYuval Mintz return rc; 25673da7a37aSMintz, Yuval 2568007bc371SMintz, Yuval p_queue->cids[qid_usage_idx].p_cid = NULL; 25694c4fa793SMintz, Yuval return 0; 2570dacd88d6SYuval Mintz } 2571dacd88d6SYuval Mintz 2572dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, 2573dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2574dacd88d6SYuval Mintz struct qed_vf_info *vf) 2575dacd88d6SYuval Mintz { 2576dacd88d6SYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 2577dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 25784c4fa793SMintz, Yuval u8 status = PFVF_STATUS_FAILURE; 2579dacd88d6SYuval Mintz struct vfpf_stop_rxqs_tlv *req; 2580007bc371SMintz, Yuval u8 qid_usage_idx; 2581dacd88d6SYuval Mintz int rc; 2582dacd88d6SYuval Mintz 25834c4fa793SMintz, Yuval /* There has never been an official driver that used this interface 25844c4fa793SMintz, Yuval * for stopping multiple queues, and it is now considered deprecated. 25854c4fa793SMintz, Yuval * Validate this isn't used here. 2586dacd88d6SYuval Mintz */ 2587dacd88d6SYuval Mintz req = &mbx->req_virt->stop_rxqs; 25884c4fa793SMintz, Yuval if (req->num_rxqs != 1) { 25894c4fa793SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 25904c4fa793SMintz, Yuval "Odd; VF[%d] tried stopping multiple Rx queues\n", 25914c4fa793SMintz, Yuval vf->relative_vf_id); 25924c4fa793SMintz, Yuval status = PFVF_STATUS_NOT_SUPPORTED; 25934c4fa793SMintz, Yuval goto out; 25944c4fa793SMintz, Yuval } 2595dacd88d6SYuval Mintz 2596007bc371SMintz, Yuval /* Find which qid-index is associated with the queue */ 2597007bc371SMintz, Yuval qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); 259808bc8f15SMintz, Yuval if (qid_usage_idx == QED_IOV_QID_INVALID) 259908bc8f15SMintz, Yuval goto out; 2600007bc371SMintz, Yuval 26014c4fa793SMintz, Yuval rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, 2602007bc371SMintz, Yuval qid_usage_idx, req->cqe_completion); 26034c4fa793SMintz, Yuval if (!rc) 26044c4fa793SMintz, Yuval status = PFVF_STATUS_SUCCESS; 26054c4fa793SMintz, Yuval out: 2606dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, 2607dacd88d6SYuval Mintz length, status); 2608dacd88d6SYuval Mintz } 2609dacd88d6SYuval Mintz 2610dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, 2611dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 2612dacd88d6SYuval Mintz struct qed_vf_info *vf) 2613dacd88d6SYuval Mintz { 2614dacd88d6SYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 2615dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 26164c4fa793SMintz, Yuval u8 status = PFVF_STATUS_FAILURE; 2617dacd88d6SYuval Mintz struct vfpf_stop_txqs_tlv *req; 2618007bc371SMintz, Yuval u8 qid_usage_idx; 2619dacd88d6SYuval Mintz int rc; 2620dacd88d6SYuval Mintz 26214c4fa793SMintz, Yuval /* There has never been an official driver that used this interface 26224c4fa793SMintz, Yuval * for stopping multiple queues, and it is now considered deprecated. 26234c4fa793SMintz, Yuval * Validate this isn't used here. 2624dacd88d6SYuval Mintz */ 2625dacd88d6SYuval Mintz req = &mbx->req_virt->stop_txqs; 26264c4fa793SMintz, Yuval if (req->num_txqs != 1) { 26274c4fa793SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 26284c4fa793SMintz, Yuval "Odd; VF[%d] tried stopping multiple Tx queues\n", 26294c4fa793SMintz, Yuval vf->relative_vf_id); 26304c4fa793SMintz, Yuval status = PFVF_STATUS_NOT_SUPPORTED; 26314c4fa793SMintz, Yuval goto out; 26324c4fa793SMintz, Yuval } 2633007bc371SMintz, Yuval 2634007bc371SMintz, Yuval /* Find which qid-index is associated with the queue */ 2635007bc371SMintz, Yuval qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); 263608bc8f15SMintz, Yuval if (qid_usage_idx == QED_IOV_QID_INVALID) 263708bc8f15SMintz, Yuval goto out; 2638007bc371SMintz, Yuval 2639007bc371SMintz, Yuval rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx); 26404c4fa793SMintz, Yuval if (!rc) 26414c4fa793SMintz, Yuval status = PFVF_STATUS_SUCCESS; 2642dacd88d6SYuval Mintz 26434c4fa793SMintz, Yuval out: 2644dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, 2645dacd88d6SYuval Mintz length, status); 2646dacd88d6SYuval Mintz } 2647dacd88d6SYuval Mintz 264817b235c1SYuval Mintz static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, 264917b235c1SYuval Mintz struct qed_ptt *p_ptt, 265017b235c1SYuval Mintz struct qed_vf_info *vf) 265117b235c1SYuval Mintz { 26523da7a37aSMintz, Yuval struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF]; 265317b235c1SYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 265417b235c1SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 265517b235c1SYuval Mintz struct vfpf_update_rxq_tlv *req; 26563da7a37aSMintz, Yuval u8 status = PFVF_STATUS_FAILURE; 265717b235c1SYuval Mintz u8 complete_event_flg; 265817b235c1SYuval Mintz u8 complete_cqe_flg; 2659007bc371SMintz, Yuval u8 qid_usage_idx; 266017b235c1SYuval Mintz int rc; 266117b235c1SYuval Mintz u8 i; 266217b235c1SYuval Mintz 266317b235c1SYuval Mintz req = &mbx->req_virt->update_rxq; 266417b235c1SYuval Mintz complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); 266517b235c1SYuval Mintz complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); 266617b235c1SYuval Mintz 2667007bc371SMintz, Yuval qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); 266808bc8f15SMintz, Yuval if (qid_usage_idx == QED_IOV_QID_INVALID) 26693da7a37aSMintz, Yuval goto out; 267008bc8f15SMintz, Yuval 267108bc8f15SMintz, Yuval /* There shouldn't exist a VF that uses queue-qids yet uses this 267208bc8f15SMintz, Yuval * API with multiple Rx queues. Validate this. 267308bc8f15SMintz, Yuval */ 267408bc8f15SMintz, Yuval if ((vf->acquire.vfdev_info.capabilities & 267508bc8f15SMintz, Yuval VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) { 267608bc8f15SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 267708bc8f15SMintz, Yuval "VF[%d] supports QIDs but sends multiple queues\n", 267808bc8f15SMintz, Yuval vf->relative_vf_id); 267908bc8f15SMintz, Yuval goto out; 268008bc8f15SMintz, Yuval } 268108bc8f15SMintz, Yuval 268208bc8f15SMintz, Yuval /* Validate inputs - for the legacy case this is still true since 268308bc8f15SMintz, Yuval * qid_usage_idx for each Rx queue would be LEGACY_QID_RX. 268408bc8f15SMintz, Yuval */ 268508bc8f15SMintz, Yuval for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) { 268608bc8f15SMintz, Yuval if (!qed_iov_validate_rxq(p_hwfn, vf, i, 268708bc8f15SMintz, Yuval QED_IOV_VALIDATE_Q_NA) || 268808bc8f15SMintz, Yuval !vf->vf_queues[i].cids[qid_usage_idx].p_cid || 268908bc8f15SMintz, Yuval vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) { 269008bc8f15SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 269108bc8f15SMintz, Yuval "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", 269208bc8f15SMintz, Yuval vf->relative_vf_id, req->rx_qid, 269308bc8f15SMintz, Yuval req->num_rxqs); 269408bc8f15SMintz, Yuval goto out; 269508bc8f15SMintz, Yuval } 269617b235c1SYuval Mintz } 269717b235c1SYuval Mintz 2698f109c240SMintz, Yuval /* Prepare the handlers */ 26993da7a37aSMintz, Yuval for (i = 0; i < req->num_rxqs; i++) { 2700007bc371SMintz, Yuval u16 qid = req->rx_qid + i; 2701007bc371SMintz, Yuval 2702007bc371SMintz, Yuval handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid; 27033da7a37aSMintz, Yuval } 27043da7a37aSMintz, Yuval 27053da7a37aSMintz, Yuval rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers, 27063da7a37aSMintz, Yuval req->num_rxqs, 270717b235c1SYuval Mintz complete_cqe_flg, 270817b235c1SYuval Mintz complete_event_flg, 270917b235c1SYuval Mintz QED_SPQ_MODE_EBLOCK, NULL); 27103da7a37aSMintz, Yuval if (rc) 27113da7a37aSMintz, Yuval goto out; 271217b235c1SYuval Mintz 27133da7a37aSMintz, Yuval status = PFVF_STATUS_SUCCESS; 27143da7a37aSMintz, Yuval out: 271517b235c1SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, 271617b235c1SYuval Mintz length, status); 271717b235c1SYuval Mintz } 271817b235c1SYuval Mintz 2719dacd88d6SYuval Mintz void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, 2720dacd88d6SYuval Mintz void *p_tlvs_list, u16 req_type) 2721dacd88d6SYuval Mintz { 2722dacd88d6SYuval Mintz struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; 2723dacd88d6SYuval Mintz int len = 0; 2724dacd88d6SYuval Mintz 2725dacd88d6SYuval Mintz do { 2726dacd88d6SYuval Mintz if (!p_tlv->length) { 2727dacd88d6SYuval Mintz DP_NOTICE(p_hwfn, "Zero length TLV found\n"); 2728dacd88d6SYuval Mintz return NULL; 2729dacd88d6SYuval Mintz } 2730dacd88d6SYuval Mintz 2731dacd88d6SYuval Mintz if (p_tlv->type == req_type) { 2732dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2733dacd88d6SYuval Mintz "Extended tlv type %d, length %d found\n", 2734dacd88d6SYuval Mintz p_tlv->type, p_tlv->length); 2735dacd88d6SYuval Mintz return p_tlv; 2736dacd88d6SYuval Mintz } 2737dacd88d6SYuval Mintz 2738dacd88d6SYuval Mintz len += p_tlv->length; 2739dacd88d6SYuval Mintz p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); 2740dacd88d6SYuval Mintz 2741dacd88d6SYuval Mintz if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { 2742dacd88d6SYuval Mintz DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n"); 2743dacd88d6SYuval Mintz return NULL; 2744dacd88d6SYuval Mintz } 2745dacd88d6SYuval Mintz } while (p_tlv->type != CHANNEL_TLV_LIST_END); 2746dacd88d6SYuval Mintz 2747dacd88d6SYuval Mintz return NULL; 2748dacd88d6SYuval Mintz } 2749dacd88d6SYuval Mintz 2750dacd88d6SYuval Mintz static void 2751dacd88d6SYuval Mintz qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, 2752dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 2753dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2754dacd88d6SYuval Mintz { 2755dacd88d6SYuval Mintz struct vfpf_vport_update_activate_tlv *p_act_tlv; 2756dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 2757dacd88d6SYuval Mintz 2758dacd88d6SYuval Mintz p_act_tlv = (struct vfpf_vport_update_activate_tlv *) 2759dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2760dacd88d6SYuval Mintz if (!p_act_tlv) 2761dacd88d6SYuval Mintz return; 2762dacd88d6SYuval Mintz 2763dacd88d6SYuval Mintz p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; 2764dacd88d6SYuval Mintz p_data->vport_active_rx_flg = p_act_tlv->active_rx; 2765dacd88d6SYuval Mintz p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; 2766dacd88d6SYuval Mintz p_data->vport_active_tx_flg = p_act_tlv->active_tx; 2767dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; 2768dacd88d6SYuval Mintz } 2769dacd88d6SYuval Mintz 2770dacd88d6SYuval Mintz static void 277117b235c1SYuval Mintz qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, 277217b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 277317b235c1SYuval Mintz struct qed_vf_info *p_vf, 277417b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 277517b235c1SYuval Mintz { 277617b235c1SYuval Mintz struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; 277717b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; 277817b235c1SYuval Mintz 277917b235c1SYuval Mintz p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) 278017b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 278117b235c1SYuval Mintz if (!p_vlan_tlv) 278217b235c1SYuval Mintz return; 278317b235c1SYuval Mintz 278408feecd7SYuval Mintz p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; 278508feecd7SYuval Mintz 278608feecd7SYuval Mintz /* Ignore the VF request if we're forcing a vlan */ 27871a635e48SYuval Mintz if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { 278817b235c1SYuval Mintz p_data->update_inner_vlan_removal_flg = 1; 278917b235c1SYuval Mintz p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; 279008feecd7SYuval Mintz } 279117b235c1SYuval Mintz 279217b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; 279317b235c1SYuval Mintz } 279417b235c1SYuval Mintz 279517b235c1SYuval Mintz static void 279617b235c1SYuval Mintz qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn, 279717b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 279817b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 279917b235c1SYuval Mintz { 280017b235c1SYuval Mintz struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 280117b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 280217b235c1SYuval Mintz 280317b235c1SYuval Mintz p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) 280417b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 280517b235c1SYuval Mintz tlv); 280617b235c1SYuval Mintz if (!p_tx_switch_tlv) 280717b235c1SYuval Mintz return; 280817b235c1SYuval Mintz 280917b235c1SYuval Mintz p_data->update_tx_switching_flg = 1; 281017b235c1SYuval Mintz p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; 281117b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH; 281217b235c1SYuval Mintz } 281317b235c1SYuval Mintz 281417b235c1SYuval Mintz static void 2815dacd88d6SYuval Mintz qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, 2816dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 2817dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2818dacd88d6SYuval Mintz { 2819dacd88d6SYuval Mintz struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 2820dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; 2821dacd88d6SYuval Mintz 2822dacd88d6SYuval Mintz p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) 2823dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2824dacd88d6SYuval Mintz if (!p_mcast_tlv) 2825dacd88d6SYuval Mintz return; 2826dacd88d6SYuval Mintz 2827dacd88d6SYuval Mintz p_data->update_approx_mcast_flg = 1; 2828dacd88d6SYuval Mintz memcpy(p_data->bins, p_mcast_tlv->bins, 2829dacd88d6SYuval Mintz sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 2830dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; 2831dacd88d6SYuval Mintz } 2832dacd88d6SYuval Mintz 2833dacd88d6SYuval Mintz static void 2834dacd88d6SYuval Mintz qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, 2835dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 2836dacd88d6SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 2837dacd88d6SYuval Mintz { 2838dacd88d6SYuval Mintz struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; 2839dacd88d6SYuval Mintz struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 2840dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 2841dacd88d6SYuval Mintz 2842dacd88d6SYuval Mintz p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) 2843dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2844dacd88d6SYuval Mintz if (!p_accept_tlv) 2845dacd88d6SYuval Mintz return; 2846dacd88d6SYuval Mintz 2847dacd88d6SYuval Mintz p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; 2848dacd88d6SYuval Mintz p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; 2849dacd88d6SYuval Mintz p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; 2850dacd88d6SYuval Mintz p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; 2851dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; 2852dacd88d6SYuval Mintz } 2853dacd88d6SYuval Mintz 2854dacd88d6SYuval Mintz static void 285517b235c1SYuval Mintz qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn, 285617b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 285717b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 285817b235c1SYuval Mintz { 285917b235c1SYuval Mintz struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; 286017b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 286117b235c1SYuval Mintz 286217b235c1SYuval Mintz p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) 286317b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, 286417b235c1SYuval Mintz tlv); 286517b235c1SYuval Mintz if (!p_accept_any_vlan) 286617b235c1SYuval Mintz return; 286717b235c1SYuval Mintz 286817b235c1SYuval Mintz p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; 286917b235c1SYuval Mintz p_data->update_accept_any_vlan_flg = 287017b235c1SYuval Mintz p_accept_any_vlan->update_accept_any_vlan_flg; 287117b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; 287217b235c1SYuval Mintz } 287317b235c1SYuval Mintz 287417b235c1SYuval Mintz static void 2875dacd88d6SYuval Mintz qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, 2876dacd88d6SYuval Mintz struct qed_vf_info *vf, 2877dacd88d6SYuval Mintz struct qed_sp_vport_update_params *p_data, 2878dacd88d6SYuval Mintz struct qed_rss_params *p_rss, 2879f29ffdb6SMintz, Yuval struct qed_iov_vf_mbx *p_mbx, 2880f29ffdb6SMintz, Yuval u16 *tlvs_mask, u16 *tlvs_accepted) 2881dacd88d6SYuval Mintz { 2882dacd88d6SYuval Mintz struct vfpf_vport_update_rss_tlv *p_rss_tlv; 2883dacd88d6SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; 2884f29ffdb6SMintz, Yuval bool b_reject = false; 2885dacd88d6SYuval Mintz u16 table_size; 2886f29ffdb6SMintz, Yuval u16 i, q_idx; 2887dacd88d6SYuval Mintz 2888dacd88d6SYuval Mintz p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) 2889dacd88d6SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 2890dacd88d6SYuval Mintz if (!p_rss_tlv) { 2891dacd88d6SYuval Mintz p_data->rss_params = NULL; 2892dacd88d6SYuval Mintz return; 2893dacd88d6SYuval Mintz } 2894dacd88d6SYuval Mintz 2895dacd88d6SYuval Mintz memset(p_rss, 0, sizeof(struct qed_rss_params)); 2896dacd88d6SYuval Mintz 2897dacd88d6SYuval Mintz p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & 2898dacd88d6SYuval Mintz VFPF_UPDATE_RSS_CONFIG_FLAG); 2899dacd88d6SYuval Mintz p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & 2900dacd88d6SYuval Mintz VFPF_UPDATE_RSS_CAPS_FLAG); 2901dacd88d6SYuval Mintz p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & 2902dacd88d6SYuval Mintz VFPF_UPDATE_RSS_IND_TABLE_FLAG); 2903dacd88d6SYuval Mintz p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & 2904dacd88d6SYuval Mintz VFPF_UPDATE_RSS_KEY_FLAG); 2905dacd88d6SYuval Mintz 2906dacd88d6SYuval Mintz p_rss->rss_enable = p_rss_tlv->rss_enable; 2907dacd88d6SYuval Mintz p_rss->rss_eng_id = vf->relative_vf_id + 1; 2908dacd88d6SYuval Mintz p_rss->rss_caps = p_rss_tlv->rss_caps; 2909dacd88d6SYuval Mintz p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; 2910dacd88d6SYuval Mintz memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); 2911dacd88d6SYuval Mintz 2912dacd88d6SYuval Mintz table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), 2913dacd88d6SYuval Mintz (1 << p_rss_tlv->rss_table_size_log)); 2914dacd88d6SYuval Mintz 2915dacd88d6SYuval Mintz for (i = 0; i < table_size; i++) { 2916007bc371SMintz, Yuval struct qed_queue_cid *p_cid; 2917007bc371SMintz, Yuval 2918f29ffdb6SMintz, Yuval q_idx = p_rss_tlv->rss_ind_table[i]; 2919f109c240SMintz, Yuval if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx, 2920f109c240SMintz, Yuval QED_IOV_VALIDATE_Q_ENABLE)) { 2921f29ffdb6SMintz, Yuval DP_VERBOSE(p_hwfn, 2922f29ffdb6SMintz, Yuval QED_MSG_IOV, 2923f29ffdb6SMintz, Yuval "VF[%d]: Omitting RSS due to wrong queue %04x\n", 2924f29ffdb6SMintz, Yuval vf->relative_vf_id, q_idx); 2925f29ffdb6SMintz, Yuval b_reject = true; 2926f29ffdb6SMintz, Yuval goto out; 2927f29ffdb6SMintz, Yuval } 2928dacd88d6SYuval Mintz 2929007bc371SMintz, Yuval p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]); 2930007bc371SMintz, Yuval p_rss->rss_ind_table[i] = p_cid; 2931dacd88d6SYuval Mintz } 2932dacd88d6SYuval Mintz 2933dacd88d6SYuval Mintz p_data->rss_params = p_rss; 2934f29ffdb6SMintz, Yuval out: 2935dacd88d6SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; 2936f29ffdb6SMintz, Yuval if (!b_reject) 2937f29ffdb6SMintz, Yuval *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS; 2938dacd88d6SYuval Mintz } 2939dacd88d6SYuval Mintz 294017b235c1SYuval Mintz static void 294117b235c1SYuval Mintz qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, 294217b235c1SYuval Mintz struct qed_vf_info *vf, 294317b235c1SYuval Mintz struct qed_sp_vport_update_params *p_data, 294417b235c1SYuval Mintz struct qed_sge_tpa_params *p_sge_tpa, 294517b235c1SYuval Mintz struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) 294617b235c1SYuval Mintz { 294717b235c1SYuval Mintz struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; 294817b235c1SYuval Mintz u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; 294917b235c1SYuval Mintz 295017b235c1SYuval Mintz p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) 295117b235c1SYuval Mintz qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); 295217b235c1SYuval Mintz 295317b235c1SYuval Mintz if (!p_sge_tpa_tlv) { 295417b235c1SYuval Mintz p_data->sge_tpa_params = NULL; 295517b235c1SYuval Mintz return; 295617b235c1SYuval Mintz } 295717b235c1SYuval Mintz 295817b235c1SYuval Mintz memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params)); 295917b235c1SYuval Mintz 296017b235c1SYuval Mintz p_sge_tpa->update_tpa_en_flg = 296117b235c1SYuval Mintz !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); 296217b235c1SYuval Mintz p_sge_tpa->update_tpa_param_flg = 296317b235c1SYuval Mintz !!(p_sge_tpa_tlv->update_sge_tpa_flags & 296417b235c1SYuval Mintz VFPF_UPDATE_TPA_PARAM_FLAG); 296517b235c1SYuval Mintz 296617b235c1SYuval Mintz p_sge_tpa->tpa_ipv4_en_flg = 296717b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); 296817b235c1SYuval Mintz p_sge_tpa->tpa_ipv6_en_flg = 296917b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); 297017b235c1SYuval Mintz p_sge_tpa->tpa_pkt_split_flg = 297117b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); 297217b235c1SYuval Mintz p_sge_tpa->tpa_hdr_data_split_flg = 297317b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); 297417b235c1SYuval Mintz p_sge_tpa->tpa_gro_consistent_flg = 297517b235c1SYuval Mintz !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); 297617b235c1SYuval Mintz 297717b235c1SYuval Mintz p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; 297817b235c1SYuval Mintz p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; 297917b235c1SYuval Mintz p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; 298017b235c1SYuval Mintz p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; 298117b235c1SYuval Mintz p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; 298217b235c1SYuval Mintz 298317b235c1SYuval Mintz p_data->sge_tpa_params = p_sge_tpa; 298417b235c1SYuval Mintz 298517b235c1SYuval Mintz *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; 298617b235c1SYuval Mintz } 298717b235c1SYuval Mintz 2988f990c82cSMintz, Yuval static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, 2989f990c82cSMintz, Yuval u8 vfid, 2990f990c82cSMintz, Yuval struct qed_sp_vport_update_params *params, 2991f990c82cSMintz, Yuval u16 *tlvs) 2992f990c82cSMintz, Yuval { 2993f990c82cSMintz, Yuval u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; 2994f990c82cSMintz, Yuval struct qed_filter_accept_flags *flags = ¶ms->accept_flags; 2995f990c82cSMintz, Yuval struct qed_public_vf_info *vf_info; 2996f990c82cSMintz, Yuval 2997f990c82cSMintz, Yuval /* Untrusted VFs can't even be trusted to know that fact. 2998f990c82cSMintz, Yuval * Simply indicate everything is configured fine, and trace 2999f990c82cSMintz, Yuval * configuration 'behind their back'. 3000f990c82cSMintz, Yuval */ 3001f990c82cSMintz, Yuval if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM))) 3002f990c82cSMintz, Yuval return 0; 3003f990c82cSMintz, Yuval 3004f990c82cSMintz, Yuval vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 3005f990c82cSMintz, Yuval 3006f990c82cSMintz, Yuval if (flags->update_rx_mode_config) { 3007f990c82cSMintz, Yuval vf_info->rx_accept_mode = flags->rx_accept_filter; 3008f990c82cSMintz, Yuval if (!vf_info->is_trusted_configured) 3009f990c82cSMintz, Yuval flags->rx_accept_filter &= ~mask; 3010f990c82cSMintz, Yuval } 3011f990c82cSMintz, Yuval 3012f990c82cSMintz, Yuval if (flags->update_tx_mode_config) { 3013f990c82cSMintz, Yuval vf_info->tx_accept_mode = flags->tx_accept_filter; 3014f990c82cSMintz, Yuval if (!vf_info->is_trusted_configured) 3015f990c82cSMintz, Yuval flags->tx_accept_filter &= ~mask; 3016f990c82cSMintz, Yuval } 3017f990c82cSMintz, Yuval 3018f990c82cSMintz, Yuval return 0; 3019f990c82cSMintz, Yuval } 3020f990c82cSMintz, Yuval 3021dacd88d6SYuval Mintz static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, 3022dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 3023dacd88d6SYuval Mintz struct qed_vf_info *vf) 3024dacd88d6SYuval Mintz { 3025f29ffdb6SMintz, Yuval struct qed_rss_params *p_rss_params = NULL; 3026dacd88d6SYuval Mintz struct qed_sp_vport_update_params params; 3027dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 302817b235c1SYuval Mintz struct qed_sge_tpa_params sge_tpa_params; 3029f29ffdb6SMintz, Yuval u16 tlvs_mask = 0, tlvs_accepted = 0; 3030dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 3031dacd88d6SYuval Mintz u16 length; 3032dacd88d6SYuval Mintz int rc; 3033dacd88d6SYuval Mintz 303441086467SYuval Mintz /* Valiate PF can send such a request */ 303541086467SYuval Mintz if (!vf->vport_instance) { 303641086467SYuval Mintz DP_VERBOSE(p_hwfn, 303741086467SYuval Mintz QED_MSG_IOV, 303841086467SYuval Mintz "No VPORT instance available for VF[%d], failing vport update\n", 303941086467SYuval Mintz vf->abs_vf_id); 304041086467SYuval Mintz status = PFVF_STATUS_FAILURE; 304141086467SYuval Mintz goto out; 304241086467SYuval Mintz } 3043f29ffdb6SMintz, Yuval p_rss_params = vzalloc(sizeof(*p_rss_params)); 3044f29ffdb6SMintz, Yuval if (p_rss_params == NULL) { 3045f29ffdb6SMintz, Yuval status = PFVF_STATUS_FAILURE; 3046f29ffdb6SMintz, Yuval goto out; 3047f29ffdb6SMintz, Yuval } 304841086467SYuval Mintz 3049dacd88d6SYuval Mintz memset(¶ms, 0, sizeof(params)); 3050dacd88d6SYuval Mintz params.opaque_fid = vf->opaque_fid; 3051dacd88d6SYuval Mintz params.vport_id = vf->vport_id; 3052dacd88d6SYuval Mintz params.rss_params = NULL; 3053dacd88d6SYuval Mintz 3054dacd88d6SYuval Mintz /* Search for extended tlvs list and update values 3055dacd88d6SYuval Mintz * from VF in struct qed_sp_vport_update_params. 3056dacd88d6SYuval Mintz */ 3057dacd88d6SYuval Mintz qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 305817b235c1SYuval Mintz qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); 305917b235c1SYuval Mintz qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); 3060dacd88d6SYuval Mintz qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); 3061dacd88d6SYuval Mintz qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); 306217b235c1SYuval Mintz qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); 306317b235c1SYuval Mintz qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, 306417b235c1SYuval Mintz &sge_tpa_params, mbx, &tlvs_mask); 3065dacd88d6SYuval Mintz 3066f29ffdb6SMintz, Yuval tlvs_accepted = tlvs_mask; 3067f29ffdb6SMintz, Yuval 3068f29ffdb6SMintz, Yuval /* Some of the extended TLVs need to be validated first; In that case, 3069f29ffdb6SMintz, Yuval * they can update the mask without updating the accepted [so that 3070f29ffdb6SMintz, Yuval * PF could communicate to VF it has rejected request]. 3071dacd88d6SYuval Mintz */ 3072f29ffdb6SMintz, Yuval qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params, 3073f29ffdb6SMintz, Yuval mbx, &tlvs_mask, &tlvs_accepted); 3074f29ffdb6SMintz, Yuval 3075f990c82cSMintz, Yuval if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id, 3076f990c82cSMintz, Yuval ¶ms, &tlvs_accepted)) { 3077f990c82cSMintz, Yuval tlvs_accepted = 0; 3078f990c82cSMintz, Yuval status = PFVF_STATUS_NOT_SUPPORTED; 3079f990c82cSMintz, Yuval goto out; 3080f990c82cSMintz, Yuval } 3081f990c82cSMintz, Yuval 3082f29ffdb6SMintz, Yuval if (!tlvs_accepted) { 3083f29ffdb6SMintz, Yuval if (tlvs_mask) 3084f29ffdb6SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3085f29ffdb6SMintz, Yuval "Upper-layer prevents VF vport configuration\n"); 3086f29ffdb6SMintz, Yuval else 3087f29ffdb6SMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3088dacd88d6SYuval Mintz "No feature tlvs found for vport update\n"); 3089dacd88d6SYuval Mintz status = PFVF_STATUS_NOT_SUPPORTED; 3090dacd88d6SYuval Mintz goto out; 3091dacd88d6SYuval Mintz } 3092dacd88d6SYuval Mintz 3093dacd88d6SYuval Mintz rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); 3094dacd88d6SYuval Mintz 3095dacd88d6SYuval Mintz if (rc) 3096dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 3097dacd88d6SYuval Mintz 3098dacd88d6SYuval Mintz out: 3099f29ffdb6SMintz, Yuval vfree(p_rss_params); 3100dacd88d6SYuval Mintz length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, 3101f29ffdb6SMintz, Yuval tlvs_mask, tlvs_accepted); 3102dacd88d6SYuval Mintz qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); 3103dacd88d6SYuval Mintz } 3104dacd88d6SYuval Mintz 31058246d0b4SYuval Mintz static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn, 310608feecd7SYuval Mintz struct qed_vf_info *p_vf, 310708feecd7SYuval Mintz struct qed_filter_ucast *p_params) 310808feecd7SYuval Mintz { 310908feecd7SYuval Mintz int i; 311008feecd7SYuval Mintz 311108feecd7SYuval Mintz /* First remove entries and then add new ones */ 311208feecd7SYuval Mintz if (p_params->opcode == QED_FILTER_REMOVE) { 311308feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 311408feecd7SYuval Mintz if (p_vf->shadow_config.vlans[i].used && 311508feecd7SYuval Mintz p_vf->shadow_config.vlans[i].vid == 311608feecd7SYuval Mintz p_params->vlan) { 311708feecd7SYuval Mintz p_vf->shadow_config.vlans[i].used = false; 311808feecd7SYuval Mintz break; 311908feecd7SYuval Mintz } 312008feecd7SYuval Mintz if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { 312108feecd7SYuval Mintz DP_VERBOSE(p_hwfn, 312208feecd7SYuval Mintz QED_MSG_IOV, 312308feecd7SYuval Mintz "VF [%d] - Tries to remove a non-existing vlan\n", 312408feecd7SYuval Mintz p_vf->relative_vf_id); 312508feecd7SYuval Mintz return -EINVAL; 312608feecd7SYuval Mintz } 312708feecd7SYuval Mintz } else if (p_params->opcode == QED_FILTER_REPLACE || 312808feecd7SYuval Mintz p_params->opcode == QED_FILTER_FLUSH) { 312908feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) 313008feecd7SYuval Mintz p_vf->shadow_config.vlans[i].used = false; 313108feecd7SYuval Mintz } 313208feecd7SYuval Mintz 313308feecd7SYuval Mintz /* In forced mode, we're willing to remove entries - but we don't add 313408feecd7SYuval Mintz * new ones. 313508feecd7SYuval Mintz */ 31361a635e48SYuval Mintz if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) 313708feecd7SYuval Mintz return 0; 313808feecd7SYuval Mintz 313908feecd7SYuval Mintz if (p_params->opcode == QED_FILTER_ADD || 314008feecd7SYuval Mintz p_params->opcode == QED_FILTER_REPLACE) { 314108feecd7SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { 314208feecd7SYuval Mintz if (p_vf->shadow_config.vlans[i].used) 314308feecd7SYuval Mintz continue; 314408feecd7SYuval Mintz 314508feecd7SYuval Mintz p_vf->shadow_config.vlans[i].used = true; 314608feecd7SYuval Mintz p_vf->shadow_config.vlans[i].vid = p_params->vlan; 314708feecd7SYuval Mintz break; 314808feecd7SYuval Mintz } 314908feecd7SYuval Mintz 315008feecd7SYuval Mintz if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { 315108feecd7SYuval Mintz DP_VERBOSE(p_hwfn, 315208feecd7SYuval Mintz QED_MSG_IOV, 315308feecd7SYuval Mintz "VF [%d] - Tries to configure more than %d vlan filters\n", 315408feecd7SYuval Mintz p_vf->relative_vf_id, 315508feecd7SYuval Mintz QED_ETH_VF_NUM_VLAN_FILTERS + 1); 315608feecd7SYuval Mintz return -EINVAL; 315708feecd7SYuval Mintz } 315808feecd7SYuval Mintz } 315908feecd7SYuval Mintz 316008feecd7SYuval Mintz return 0; 316108feecd7SYuval Mintz } 316208feecd7SYuval Mintz 31638246d0b4SYuval Mintz static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, 31648246d0b4SYuval Mintz struct qed_vf_info *p_vf, 31658246d0b4SYuval Mintz struct qed_filter_ucast *p_params) 31668246d0b4SYuval Mintz { 31678246d0b4SYuval Mintz int i; 31688246d0b4SYuval Mintz 31698246d0b4SYuval Mintz /* If we're in forced-mode, we don't allow any change */ 31701a635e48SYuval Mintz if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) 31718246d0b4SYuval Mintz return 0; 31728246d0b4SYuval Mintz 31738246d0b4SYuval Mintz /* First remove entries and then add new ones */ 31748246d0b4SYuval Mintz if (p_params->opcode == QED_FILTER_REMOVE) { 31758246d0b4SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 31768246d0b4SYuval Mintz if (ether_addr_equal(p_vf->shadow_config.macs[i], 31778246d0b4SYuval Mintz p_params->mac)) { 31780ee28e31SShyam Saini eth_zero_addr(p_vf->shadow_config.macs[i]); 31798246d0b4SYuval Mintz break; 31808246d0b4SYuval Mintz } 31818246d0b4SYuval Mintz } 31828246d0b4SYuval Mintz 31838246d0b4SYuval Mintz if (i == QED_ETH_VF_NUM_MAC_FILTERS) { 31848246d0b4SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 31858246d0b4SYuval Mintz "MAC isn't configured\n"); 31868246d0b4SYuval Mintz return -EINVAL; 31878246d0b4SYuval Mintz } 31888246d0b4SYuval Mintz } else if (p_params->opcode == QED_FILTER_REPLACE || 31898246d0b4SYuval Mintz p_params->opcode == QED_FILTER_FLUSH) { 31908246d0b4SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) 31910ee28e31SShyam Saini eth_zero_addr(p_vf->shadow_config.macs[i]); 31928246d0b4SYuval Mintz } 31938246d0b4SYuval Mintz 31948246d0b4SYuval Mintz /* List the new MAC address */ 31958246d0b4SYuval Mintz if (p_params->opcode != QED_FILTER_ADD && 31968246d0b4SYuval Mintz p_params->opcode != QED_FILTER_REPLACE) 31978246d0b4SYuval Mintz return 0; 31988246d0b4SYuval Mintz 31998246d0b4SYuval Mintz for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { 32008246d0b4SYuval Mintz if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) { 32018246d0b4SYuval Mintz ether_addr_copy(p_vf->shadow_config.macs[i], 32028246d0b4SYuval Mintz p_params->mac); 32038246d0b4SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 32048246d0b4SYuval Mintz "Added MAC at %d entry in shadow\n", i); 32058246d0b4SYuval Mintz break; 32068246d0b4SYuval Mintz } 32078246d0b4SYuval Mintz } 32088246d0b4SYuval Mintz 32098246d0b4SYuval Mintz if (i == QED_ETH_VF_NUM_MAC_FILTERS) { 32108246d0b4SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n"); 32118246d0b4SYuval Mintz return -EINVAL; 32128246d0b4SYuval Mintz } 32138246d0b4SYuval Mintz 32148246d0b4SYuval Mintz return 0; 32158246d0b4SYuval Mintz } 32168246d0b4SYuval Mintz 32178246d0b4SYuval Mintz static int 32188246d0b4SYuval Mintz qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, 32198246d0b4SYuval Mintz struct qed_vf_info *p_vf, 32208246d0b4SYuval Mintz struct qed_filter_ucast *p_params) 32218246d0b4SYuval Mintz { 32228246d0b4SYuval Mintz int rc = 0; 32238246d0b4SYuval Mintz 32248246d0b4SYuval Mintz if (p_params->type == QED_FILTER_MAC) { 32258246d0b4SYuval Mintz rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); 32268246d0b4SYuval Mintz if (rc) 32278246d0b4SYuval Mintz return rc; 32288246d0b4SYuval Mintz } 32298246d0b4SYuval Mintz 32308246d0b4SYuval Mintz if (p_params->type == QED_FILTER_VLAN) 32318246d0b4SYuval Mintz rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); 32328246d0b4SYuval Mintz 32338246d0b4SYuval Mintz return rc; 32348246d0b4SYuval Mintz } 32358246d0b4SYuval Mintz 3236ba56947aSBaoyou Xie static int qed_iov_chk_ucast(struct qed_hwfn *hwfn, 3237dacd88d6SYuval Mintz int vfid, struct qed_filter_ucast *params) 3238dacd88d6SYuval Mintz { 3239dacd88d6SYuval Mintz struct qed_public_vf_info *vf; 3240dacd88d6SYuval Mintz 3241dacd88d6SYuval Mintz vf = qed_iov_get_public_vf_info(hwfn, vfid, true); 3242dacd88d6SYuval Mintz if (!vf) 3243dacd88d6SYuval Mintz return -EINVAL; 3244dacd88d6SYuval Mintz 3245dacd88d6SYuval Mintz /* No real decision to make; Store the configured MAC */ 3246dacd88d6SYuval Mintz if (params->type == QED_FILTER_MAC || 3247dacd88d6SYuval Mintz params->type == QED_FILTER_MAC_VLAN) 3248dacd88d6SYuval Mintz ether_addr_copy(vf->mac, params->mac); 3249dacd88d6SYuval Mintz 3250dacd88d6SYuval Mintz return 0; 3251dacd88d6SYuval Mintz } 3252dacd88d6SYuval Mintz 3253dacd88d6SYuval Mintz static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, 3254dacd88d6SYuval Mintz struct qed_ptt *p_ptt, 3255dacd88d6SYuval Mintz struct qed_vf_info *vf) 3256dacd88d6SYuval Mintz { 325708feecd7SYuval Mintz struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt; 3258dacd88d6SYuval Mintz struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; 3259dacd88d6SYuval Mintz struct vfpf_ucast_filter_tlv *req; 3260dacd88d6SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 3261dacd88d6SYuval Mintz struct qed_filter_ucast params; 3262dacd88d6SYuval Mintz int rc; 3263dacd88d6SYuval Mintz 3264dacd88d6SYuval Mintz /* Prepare the unicast filter params */ 3265dacd88d6SYuval Mintz memset(¶ms, 0, sizeof(struct qed_filter_ucast)); 3266dacd88d6SYuval Mintz req = &mbx->req_virt->ucast_filter; 3267dacd88d6SYuval Mintz params.opcode = (enum qed_filter_opcode)req->opcode; 3268dacd88d6SYuval Mintz params.type = (enum qed_filter_ucast_type)req->type; 3269dacd88d6SYuval Mintz 3270dacd88d6SYuval Mintz params.is_rx_filter = 1; 3271dacd88d6SYuval Mintz params.is_tx_filter = 1; 3272dacd88d6SYuval Mintz params.vport_to_remove_from = vf->vport_id; 3273dacd88d6SYuval Mintz params.vport_to_add_to = vf->vport_id; 3274dacd88d6SYuval Mintz memcpy(params.mac, req->mac, ETH_ALEN); 3275dacd88d6SYuval Mintz params.vlan = req->vlan; 3276dacd88d6SYuval Mintz 3277dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, 3278dacd88d6SYuval Mintz QED_MSG_IOV, 3279dacd88d6SYuval Mintz "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n", 3280dacd88d6SYuval Mintz vf->abs_vf_id, params.opcode, params.type, 3281dacd88d6SYuval Mintz params.is_rx_filter ? "RX" : "", 3282dacd88d6SYuval Mintz params.is_tx_filter ? "TX" : "", 3283dacd88d6SYuval Mintz params.vport_to_add_to, 3284dacd88d6SYuval Mintz params.mac[0], params.mac[1], 3285dacd88d6SYuval Mintz params.mac[2], params.mac[3], 3286dacd88d6SYuval Mintz params.mac[4], params.mac[5], params.vlan); 3287dacd88d6SYuval Mintz 3288dacd88d6SYuval Mintz if (!vf->vport_instance) { 3289dacd88d6SYuval Mintz DP_VERBOSE(p_hwfn, 3290dacd88d6SYuval Mintz QED_MSG_IOV, 3291dacd88d6SYuval Mintz "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", 3292dacd88d6SYuval Mintz vf->abs_vf_id); 3293dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 3294dacd88d6SYuval Mintz goto out; 3295dacd88d6SYuval Mintz } 3296dacd88d6SYuval Mintz 329708feecd7SYuval Mintz /* Update shadow copy of the VF configuration */ 329808feecd7SYuval Mintz if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) { 329908feecd7SYuval Mintz status = PFVF_STATUS_FAILURE; 330008feecd7SYuval Mintz goto out; 330108feecd7SYuval Mintz } 330208feecd7SYuval Mintz 330308feecd7SYuval Mintz /* Determine if the unicast filtering is acceptible by PF */ 33041a635e48SYuval Mintz if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) && 330508feecd7SYuval Mintz (params.type == QED_FILTER_VLAN || 330608feecd7SYuval Mintz params.type == QED_FILTER_MAC_VLAN)) { 330708feecd7SYuval Mintz /* Once VLAN is forced or PVID is set, do not allow 330808feecd7SYuval Mintz * to add/replace any further VLANs. 330908feecd7SYuval Mintz */ 331008feecd7SYuval Mintz if (params.opcode == QED_FILTER_ADD || 331108feecd7SYuval Mintz params.opcode == QED_FILTER_REPLACE) 331208feecd7SYuval Mintz status = PFVF_STATUS_FORCED; 331308feecd7SYuval Mintz goto out; 331408feecd7SYuval Mintz } 331508feecd7SYuval Mintz 33161a635e48SYuval Mintz if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) && 3317eff16960SYuval Mintz (params.type == QED_FILTER_MAC || 3318eff16960SYuval Mintz params.type == QED_FILTER_MAC_VLAN)) { 3319eff16960SYuval Mintz if (!ether_addr_equal(p_bulletin->mac, params.mac) || 3320eff16960SYuval Mintz (params.opcode != QED_FILTER_ADD && 3321eff16960SYuval Mintz params.opcode != QED_FILTER_REPLACE)) 3322eff16960SYuval Mintz status = PFVF_STATUS_FORCED; 3323eff16960SYuval Mintz goto out; 3324eff16960SYuval Mintz } 3325eff16960SYuval Mintz 3326dacd88d6SYuval Mintz rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms); 3327dacd88d6SYuval Mintz if (rc) { 3328dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 3329dacd88d6SYuval Mintz goto out; 3330dacd88d6SYuval Mintz } 3331dacd88d6SYuval Mintz 3332dacd88d6SYuval Mintz rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, 3333dacd88d6SYuval Mintz QED_SPQ_MODE_CB, NULL); 3334dacd88d6SYuval Mintz if (rc) 3335dacd88d6SYuval Mintz status = PFVF_STATUS_FAILURE; 3336dacd88d6SYuval Mintz 3337dacd88d6SYuval Mintz out: 3338dacd88d6SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, 3339dacd88d6SYuval Mintz sizeof(struct pfvf_def_resp_tlv), status); 3340dacd88d6SYuval Mintz } 3341dacd88d6SYuval Mintz 33420b55e27dSYuval Mintz static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, 33430b55e27dSYuval Mintz struct qed_ptt *p_ptt, 33440b55e27dSYuval Mintz struct qed_vf_info *vf) 33450b55e27dSYuval Mintz { 33460b55e27dSYuval Mintz int i; 33470b55e27dSYuval Mintz 33480b55e27dSYuval Mintz /* Reset the SBs */ 33490b55e27dSYuval Mintz for (i = 0; i < vf->num_sbs; i++) 33500b55e27dSYuval Mintz qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 33510b55e27dSYuval Mintz vf->igu_sbs[i], 33520b55e27dSYuval Mintz vf->opaque_fid, false); 33530b55e27dSYuval Mintz 33540b55e27dSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, 33550b55e27dSYuval Mintz sizeof(struct pfvf_def_resp_tlv), 33560b55e27dSYuval Mintz PFVF_STATUS_SUCCESS); 33570b55e27dSYuval Mintz } 33580b55e27dSYuval Mintz 33590b55e27dSYuval Mintz static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn, 33600b55e27dSYuval Mintz struct qed_ptt *p_ptt, struct qed_vf_info *vf) 33610b55e27dSYuval Mintz { 33620b55e27dSYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 33630b55e27dSYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 33640b55e27dSYuval Mintz 33650b55e27dSYuval Mintz /* Disable Interrupts for VF */ 33660b55e27dSYuval Mintz qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); 33670b55e27dSYuval Mintz 33680b55e27dSYuval Mintz /* Reset Permission table */ 33690b55e27dSYuval Mintz qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); 33700b55e27dSYuval Mintz 33710b55e27dSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, 33720b55e27dSYuval Mintz length, status); 33730b55e27dSYuval Mintz } 33740b55e27dSYuval Mintz 33750b55e27dSYuval Mintz static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, 33760b55e27dSYuval Mintz struct qed_ptt *p_ptt, 33770b55e27dSYuval Mintz struct qed_vf_info *p_vf) 33780b55e27dSYuval Mintz { 33790b55e27dSYuval Mintz u16 length = sizeof(struct pfvf_def_resp_tlv); 33801fe614d1SYuval Mintz u8 status = PFVF_STATUS_SUCCESS; 33811fe614d1SYuval Mintz int rc = 0; 33820b55e27dSYuval Mintz 33830b55e27dSYuval Mintz qed_iov_vf_cleanup(p_hwfn, p_vf); 33840b55e27dSYuval Mintz 33851fe614d1SYuval Mintz if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { 33861fe614d1SYuval Mintz /* Stopping the VF */ 33871fe614d1SYuval Mintz rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, 33881fe614d1SYuval Mintz p_vf->opaque_fid); 33891fe614d1SYuval Mintz 33901fe614d1SYuval Mintz if (rc) { 33911fe614d1SYuval Mintz DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", 33921fe614d1SYuval Mintz rc); 33931fe614d1SYuval Mintz status = PFVF_STATUS_FAILURE; 33941fe614d1SYuval Mintz } 33951fe614d1SYuval Mintz 33961fe614d1SYuval Mintz p_vf->state = VF_STOPPED; 33971fe614d1SYuval Mintz } 33981fe614d1SYuval Mintz 33990b55e27dSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, 34001fe614d1SYuval Mintz length, status); 34010b55e27dSYuval Mintz } 34020b55e27dSYuval Mintz 34030b55e27dSYuval Mintz static int 34040b55e27dSYuval Mintz qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, 34050b55e27dSYuval Mintz struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 34060b55e27dSYuval Mintz { 34070b55e27dSYuval Mintz int cnt; 34080b55e27dSYuval Mintz u32 val; 34090b55e27dSYuval Mintz 34100b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); 34110b55e27dSYuval Mintz 34120b55e27dSYuval Mintz for (cnt = 0; cnt < 50; cnt++) { 34130b55e27dSYuval Mintz val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); 34140b55e27dSYuval Mintz if (!val) 34150b55e27dSYuval Mintz break; 34160b55e27dSYuval Mintz msleep(20); 34170b55e27dSYuval Mintz } 34180b55e27dSYuval Mintz qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 34190b55e27dSYuval Mintz 34200b55e27dSYuval Mintz if (cnt == 50) { 34210b55e27dSYuval Mintz DP_ERR(p_hwfn, 34220b55e27dSYuval Mintz "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", 34230b55e27dSYuval Mintz p_vf->abs_vf_id, val); 34240b55e27dSYuval Mintz return -EBUSY; 34250b55e27dSYuval Mintz } 34260b55e27dSYuval Mintz 34270b55e27dSYuval Mintz return 0; 34280b55e27dSYuval Mintz } 34290b55e27dSYuval Mintz 34300b55e27dSYuval Mintz static int 34310b55e27dSYuval Mintz qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, 34320b55e27dSYuval Mintz struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 34330b55e27dSYuval Mintz { 34340b55e27dSYuval Mintz u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS]; 34350b55e27dSYuval Mintz int i, cnt; 34360b55e27dSYuval Mintz 34370b55e27dSYuval Mintz /* Read initial consumers & producers */ 34380b55e27dSYuval Mintz for (i = 0; i < MAX_NUM_VOQS; i++) { 34390b55e27dSYuval Mintz u32 prod; 34400b55e27dSYuval Mintz 34410b55e27dSYuval Mintz cons[i] = qed_rd(p_hwfn, p_ptt, 34420b55e27dSYuval Mintz PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 34430b55e27dSYuval Mintz i * 0x40); 34440b55e27dSYuval Mintz prod = qed_rd(p_hwfn, p_ptt, 34450b55e27dSYuval Mintz PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + 34460b55e27dSYuval Mintz i * 0x40); 34470b55e27dSYuval Mintz distance[i] = prod - cons[i]; 34480b55e27dSYuval Mintz } 34490b55e27dSYuval Mintz 34500b55e27dSYuval Mintz /* Wait for consumers to pass the producers */ 34510b55e27dSYuval Mintz i = 0; 34520b55e27dSYuval Mintz for (cnt = 0; cnt < 50; cnt++) { 34530b55e27dSYuval Mintz for (; i < MAX_NUM_VOQS; i++) { 34540b55e27dSYuval Mintz u32 tmp; 34550b55e27dSYuval Mintz 34560b55e27dSYuval Mintz tmp = qed_rd(p_hwfn, p_ptt, 34570b55e27dSYuval Mintz PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + 34580b55e27dSYuval Mintz i * 0x40); 34590b55e27dSYuval Mintz if (distance[i] > tmp - cons[i]) 34600b55e27dSYuval Mintz break; 34610b55e27dSYuval Mintz } 34620b55e27dSYuval Mintz 34630b55e27dSYuval Mintz if (i == MAX_NUM_VOQS) 34640b55e27dSYuval Mintz break; 34650b55e27dSYuval Mintz 34660b55e27dSYuval Mintz msleep(20); 34670b55e27dSYuval Mintz } 34680b55e27dSYuval Mintz 34690b55e27dSYuval Mintz if (cnt == 50) { 34700b55e27dSYuval Mintz DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", 34710b55e27dSYuval Mintz p_vf->abs_vf_id, i); 34720b55e27dSYuval Mintz return -EBUSY; 34730b55e27dSYuval Mintz } 34740b55e27dSYuval Mintz 34750b55e27dSYuval Mintz return 0; 34760b55e27dSYuval Mintz } 34770b55e27dSYuval Mintz 34780b55e27dSYuval Mintz static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn, 34790b55e27dSYuval Mintz struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) 34800b55e27dSYuval Mintz { 34810b55e27dSYuval Mintz int rc; 34820b55e27dSYuval Mintz 34830b55e27dSYuval Mintz rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); 34840b55e27dSYuval Mintz if (rc) 34850b55e27dSYuval Mintz return rc; 34860b55e27dSYuval Mintz 34870b55e27dSYuval Mintz rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); 34880b55e27dSYuval Mintz if (rc) 34890b55e27dSYuval Mintz return rc; 34900b55e27dSYuval Mintz 34910b55e27dSYuval Mintz return 0; 34920b55e27dSYuval Mintz } 34930b55e27dSYuval Mintz 34940b55e27dSYuval Mintz static int 34950b55e27dSYuval Mintz qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, 34960b55e27dSYuval Mintz struct qed_ptt *p_ptt, 34970b55e27dSYuval Mintz u16 rel_vf_id, u32 *ack_vfs) 34980b55e27dSYuval Mintz { 34990b55e27dSYuval Mintz struct qed_vf_info *p_vf; 35000b55e27dSYuval Mintz int rc = 0; 35010b55e27dSYuval Mintz 35020b55e27dSYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); 35030b55e27dSYuval Mintz if (!p_vf) 35040b55e27dSYuval Mintz return 0; 35050b55e27dSYuval Mintz 35060b55e27dSYuval Mintz if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & 35070b55e27dSYuval Mintz (1ULL << (rel_vf_id % 64))) { 35080b55e27dSYuval Mintz u16 vfid = p_vf->abs_vf_id; 35090b55e27dSYuval Mintz 35100b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 35110b55e27dSYuval Mintz "VF[%d] - Handling FLR\n", vfid); 35120b55e27dSYuval Mintz 35130b55e27dSYuval Mintz qed_iov_vf_cleanup(p_hwfn, p_vf); 35140b55e27dSYuval Mintz 35150b55e27dSYuval Mintz /* If VF isn't active, no need for anything but SW */ 35160b55e27dSYuval Mintz if (!p_vf->b_init) 35170b55e27dSYuval Mintz goto cleanup; 35180b55e27dSYuval Mintz 35190b55e27dSYuval Mintz rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); 35200b55e27dSYuval Mintz if (rc) 35210b55e27dSYuval Mintz goto cleanup; 35220b55e27dSYuval Mintz 35230b55e27dSYuval Mintz rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true); 35240b55e27dSYuval Mintz if (rc) { 35250b55e27dSYuval Mintz DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); 35260b55e27dSYuval Mintz return rc; 35270b55e27dSYuval Mintz } 35280b55e27dSYuval Mintz 35297eff82b0SYuval Mintz /* Workaround to make VF-PF channel ready, as FW 35307eff82b0SYuval Mintz * doesn't do that as a part of FLR. 35317eff82b0SYuval Mintz */ 35327eff82b0SYuval Mintz REG_WR(p_hwfn, 35337eff82b0SYuval Mintz GTT_BAR0_MAP_REG_USDM_RAM + 35347eff82b0SYuval Mintz USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1); 35357eff82b0SYuval Mintz 35360b55e27dSYuval Mintz /* VF_STOPPED has to be set only after final cleanup 35370b55e27dSYuval Mintz * but prior to re-enabling the VF. 35380b55e27dSYuval Mintz */ 35390b55e27dSYuval Mintz p_vf->state = VF_STOPPED; 35400b55e27dSYuval Mintz 35410b55e27dSYuval Mintz rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); 35420b55e27dSYuval Mintz if (rc) { 35430b55e27dSYuval Mintz DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", 35440b55e27dSYuval Mintz vfid); 35450b55e27dSYuval Mintz return rc; 35460b55e27dSYuval Mintz } 35470b55e27dSYuval Mintz cleanup: 35480b55e27dSYuval Mintz /* Mark VF for ack and clean pending state */ 35490b55e27dSYuval Mintz if (p_vf->state == VF_RESET) 35500b55e27dSYuval Mintz p_vf->state = VF_STOPPED; 35511a635e48SYuval Mintz ack_vfs[vfid / 32] |= BIT((vfid % 32)); 35520b55e27dSYuval Mintz p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= 35530b55e27dSYuval Mintz ~(1ULL << (rel_vf_id % 64)); 3554fd3c615aSMintz, Yuval p_vf->vf_mbx.b_pending_msg = false; 35550b55e27dSYuval Mintz } 35560b55e27dSYuval Mintz 35570b55e27dSYuval Mintz return rc; 35580b55e27dSYuval Mintz } 35590b55e27dSYuval Mintz 3560ba56947aSBaoyou Xie static int 3561ba56947aSBaoyou Xie qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 35620b55e27dSYuval Mintz { 35630b55e27dSYuval Mintz u32 ack_vfs[VF_MAX_STATIC / 32]; 35640b55e27dSYuval Mintz int rc = 0; 35650b55e27dSYuval Mintz u16 i; 35660b55e27dSYuval Mintz 35670b55e27dSYuval Mintz memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); 35680b55e27dSYuval Mintz 35690b55e27dSYuval Mintz /* Since BRB <-> PRS interface can't be tested as part of the flr 35700b55e27dSYuval Mintz * polling due to HW limitations, simply sleep a bit. And since 35710b55e27dSYuval Mintz * there's no need to wait per-vf, do it before looping. 35720b55e27dSYuval Mintz */ 35730b55e27dSYuval Mintz msleep(100); 35740b55e27dSYuval Mintz 35750b55e27dSYuval Mintz for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) 35760b55e27dSYuval Mintz qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); 35770b55e27dSYuval Mintz 35780b55e27dSYuval Mintz rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); 35790b55e27dSYuval Mintz return rc; 35800b55e27dSYuval Mintz } 35810b55e27dSYuval Mintz 3582cccf6f5cSMintz, Yuval bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) 35830b55e27dSYuval Mintz { 3584cccf6f5cSMintz, Yuval bool found = false; 3585cccf6f5cSMintz, Yuval u16 i; 35860b55e27dSYuval Mintz 35870b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); 35880b55e27dSYuval Mintz for (i = 0; i < (VF_MAX_STATIC / 32); i++) 35890b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 35900b55e27dSYuval Mintz "[%08x,...,%08x]: %08x\n", 35910b55e27dSYuval Mintz i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); 35920b55e27dSYuval Mintz 35930b55e27dSYuval Mintz if (!p_hwfn->cdev->p_iov_info) { 35940b55e27dSYuval Mintz DP_NOTICE(p_hwfn, "VF flr but no IOV\n"); 3595cccf6f5cSMintz, Yuval return false; 35960b55e27dSYuval Mintz } 35970b55e27dSYuval Mintz 35980b55e27dSYuval Mintz /* Mark VFs */ 35990b55e27dSYuval Mintz for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { 36000b55e27dSYuval Mintz struct qed_vf_info *p_vf; 36010b55e27dSYuval Mintz u8 vfid; 36020b55e27dSYuval Mintz 36030b55e27dSYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, i, false); 36040b55e27dSYuval Mintz if (!p_vf) 36050b55e27dSYuval Mintz continue; 36060b55e27dSYuval Mintz 36070b55e27dSYuval Mintz vfid = p_vf->abs_vf_id; 36081a635e48SYuval Mintz if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) { 36090b55e27dSYuval Mintz u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; 36100b55e27dSYuval Mintz u16 rel_vf_id = p_vf->relative_vf_id; 36110b55e27dSYuval Mintz 36120b55e27dSYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 36130b55e27dSYuval Mintz "VF[%d] [rel %d] got FLR-ed\n", 36140b55e27dSYuval Mintz vfid, rel_vf_id); 36150b55e27dSYuval Mintz 36160b55e27dSYuval Mintz p_vf->state = VF_RESET; 36170b55e27dSYuval Mintz 36180b55e27dSYuval Mintz /* No need to lock here, since pending_flr should 36190b55e27dSYuval Mintz * only change here and before ACKing MFw. Since 36200b55e27dSYuval Mintz * MFW will not trigger an additional attention for 36210b55e27dSYuval Mintz * VF flr until ACKs, we're safe. 36220b55e27dSYuval Mintz */ 36230b55e27dSYuval Mintz p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); 3624cccf6f5cSMintz, Yuval found = true; 36250b55e27dSYuval Mintz } 36260b55e27dSYuval Mintz } 36270b55e27dSYuval Mintz 36280b55e27dSYuval Mintz return found; 36290b55e27dSYuval Mintz } 36300b55e27dSYuval Mintz 363173390ac9SYuval Mintz static void qed_iov_get_link(struct qed_hwfn *p_hwfn, 363273390ac9SYuval Mintz u16 vfid, 363373390ac9SYuval Mintz struct qed_mcp_link_params *p_params, 363473390ac9SYuval Mintz struct qed_mcp_link_state *p_link, 363573390ac9SYuval Mintz struct qed_mcp_link_capabilities *p_caps) 363673390ac9SYuval Mintz { 363773390ac9SYuval Mintz struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 363873390ac9SYuval Mintz vfid, 363973390ac9SYuval Mintz false); 364073390ac9SYuval Mintz struct qed_bulletin_content *p_bulletin; 364173390ac9SYuval Mintz 364273390ac9SYuval Mintz if (!p_vf) 364373390ac9SYuval Mintz return; 364473390ac9SYuval Mintz 364573390ac9SYuval Mintz p_bulletin = p_vf->bulletin.p_virt; 364673390ac9SYuval Mintz 364773390ac9SYuval Mintz if (p_params) 364873390ac9SYuval Mintz __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin); 364973390ac9SYuval Mintz if (p_link) 365073390ac9SYuval Mintz __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); 365173390ac9SYuval Mintz if (p_caps) 365273390ac9SYuval Mintz __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); 365373390ac9SYuval Mintz } 365473390ac9SYuval Mintz 365537bff2b9SYuval Mintz static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, 365637bff2b9SYuval Mintz struct qed_ptt *p_ptt, int vfid) 365737bff2b9SYuval Mintz { 365837bff2b9SYuval Mintz struct qed_iov_vf_mbx *mbx; 365937bff2b9SYuval Mintz struct qed_vf_info *p_vf; 366037bff2b9SYuval Mintz 366137bff2b9SYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 366237bff2b9SYuval Mintz if (!p_vf) 366337bff2b9SYuval Mintz return; 366437bff2b9SYuval Mintz 366537bff2b9SYuval Mintz mbx = &p_vf->vf_mbx; 366637bff2b9SYuval Mintz 366737bff2b9SYuval Mintz /* qed_iov_process_mbx_request */ 3668fd3c615aSMintz, Yuval if (!mbx->b_pending_msg) { 3669fd3c615aSMintz, Yuval DP_NOTICE(p_hwfn, 3670fd3c615aSMintz, Yuval "VF[%02x]: Trying to process mailbox message when none is pending\n", 3671fd3c615aSMintz, Yuval p_vf->abs_vf_id); 3672fd3c615aSMintz, Yuval return; 3673fd3c615aSMintz, Yuval } 3674fd3c615aSMintz, Yuval mbx->b_pending_msg = false; 367537bff2b9SYuval Mintz 367637bff2b9SYuval Mintz mbx->first_tlv = mbx->req_virt->first_tlv; 367737bff2b9SYuval Mintz 3678fd3c615aSMintz, Yuval DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3679fd3c615aSMintz, Yuval "VF[%02x]: Processing mailbox message [type %04x]\n", 3680fd3c615aSMintz, Yuval p_vf->abs_vf_id, mbx->first_tlv.tl.type); 3681fd3c615aSMintz, Yuval 368237bff2b9SYuval Mintz /* check if tlv type is known */ 36837eff82b0SYuval Mintz if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && 36847eff82b0SYuval Mintz !p_vf->b_malicious) { 36851408cc1fSYuval Mintz switch (mbx->first_tlv.tl.type) { 36861408cc1fSYuval Mintz case CHANNEL_TLV_ACQUIRE: 36871408cc1fSYuval Mintz qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); 36881408cc1fSYuval Mintz break; 3689dacd88d6SYuval Mintz case CHANNEL_TLV_VPORT_START: 3690dacd88d6SYuval Mintz qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); 3691dacd88d6SYuval Mintz break; 3692dacd88d6SYuval Mintz case CHANNEL_TLV_VPORT_TEARDOWN: 3693dacd88d6SYuval Mintz qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); 3694dacd88d6SYuval Mintz break; 3695dacd88d6SYuval Mintz case CHANNEL_TLV_START_RXQ: 3696dacd88d6SYuval Mintz qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); 3697dacd88d6SYuval Mintz break; 3698dacd88d6SYuval Mintz case CHANNEL_TLV_START_TXQ: 3699dacd88d6SYuval Mintz qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); 3700dacd88d6SYuval Mintz break; 3701dacd88d6SYuval Mintz case CHANNEL_TLV_STOP_RXQS: 3702dacd88d6SYuval Mintz qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); 3703dacd88d6SYuval Mintz break; 3704dacd88d6SYuval Mintz case CHANNEL_TLV_STOP_TXQS: 3705dacd88d6SYuval Mintz qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); 3706dacd88d6SYuval Mintz break; 370717b235c1SYuval Mintz case CHANNEL_TLV_UPDATE_RXQ: 370817b235c1SYuval Mintz qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); 370917b235c1SYuval Mintz break; 3710dacd88d6SYuval Mintz case CHANNEL_TLV_VPORT_UPDATE: 3711dacd88d6SYuval Mintz qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); 3712dacd88d6SYuval Mintz break; 3713dacd88d6SYuval Mintz case CHANNEL_TLV_UCAST_FILTER: 3714dacd88d6SYuval Mintz qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); 3715dacd88d6SYuval Mintz break; 37160b55e27dSYuval Mintz case CHANNEL_TLV_CLOSE: 37170b55e27dSYuval Mintz qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); 37180b55e27dSYuval Mintz break; 37190b55e27dSYuval Mintz case CHANNEL_TLV_INT_CLEANUP: 37200b55e27dSYuval Mintz qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); 37210b55e27dSYuval Mintz break; 37220b55e27dSYuval Mintz case CHANNEL_TLV_RELEASE: 37230b55e27dSYuval Mintz qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); 37240b55e27dSYuval Mintz break; 3725eaf3c0c6SChopra, Manish case CHANNEL_TLV_UPDATE_TUNN_PARAM: 3726eaf3c0c6SChopra, Manish qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); 3727eaf3c0c6SChopra, Manish break; 37281408cc1fSYuval Mintz } 37297eff82b0SYuval Mintz } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { 37307eff82b0SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 37317eff82b0SYuval Mintz "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n", 37327eff82b0SYuval Mintz p_vf->abs_vf_id, mbx->first_tlv.tl.type); 37337eff82b0SYuval Mintz 37347eff82b0SYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 37357eff82b0SYuval Mintz mbx->first_tlv.tl.type, 37367eff82b0SYuval Mintz sizeof(struct pfvf_def_resp_tlv), 37377eff82b0SYuval Mintz PFVF_STATUS_MALICIOUS); 373837bff2b9SYuval Mintz } else { 373937bff2b9SYuval Mintz /* unknown TLV - this may belong to a VF driver from the future 374037bff2b9SYuval Mintz * - a version written after this PF driver was written, which 374137bff2b9SYuval Mintz * supports features unknown as of yet. Too bad since we don't 374237bff2b9SYuval Mintz * support them. Or this may be because someone wrote a crappy 374337bff2b9SYuval Mintz * VF driver and is sending garbage over the channel. 374437bff2b9SYuval Mintz */ 374554fdd80fSYuval Mintz DP_NOTICE(p_hwfn, 374654fdd80fSYuval Mintz "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n", 374754fdd80fSYuval Mintz p_vf->abs_vf_id, 374854fdd80fSYuval Mintz mbx->first_tlv.tl.type, 374954fdd80fSYuval Mintz mbx->first_tlv.tl.length, 375054fdd80fSYuval Mintz mbx->first_tlv.padding, mbx->first_tlv.reply_address); 375137bff2b9SYuval Mintz 375254fdd80fSYuval Mintz /* Try replying in case reply address matches the acquisition's 375354fdd80fSYuval Mintz * posted address. 375454fdd80fSYuval Mintz */ 375554fdd80fSYuval Mintz if (p_vf->acquire.first_tlv.reply_address && 375654fdd80fSYuval Mintz (mbx->first_tlv.reply_address == 375754fdd80fSYuval Mintz p_vf->acquire.first_tlv.reply_address)) { 375854fdd80fSYuval Mintz qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, 375954fdd80fSYuval Mintz mbx->first_tlv.tl.type, 376054fdd80fSYuval Mintz sizeof(struct pfvf_def_resp_tlv), 376154fdd80fSYuval Mintz PFVF_STATUS_NOT_SUPPORTED); 376254fdd80fSYuval Mintz } else { 376337bff2b9SYuval Mintz DP_VERBOSE(p_hwfn, 376437bff2b9SYuval Mintz QED_MSG_IOV, 376554fdd80fSYuval Mintz "VF[%02x]: Can't respond to TLV - no valid reply address\n", 376654fdd80fSYuval Mintz p_vf->abs_vf_id); 376737bff2b9SYuval Mintz } 376837bff2b9SYuval Mintz } 376937bff2b9SYuval Mintz } 377037bff2b9SYuval Mintz 3771fd3c615aSMintz, Yuval void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) 377237bff2b9SYuval Mintz { 3773fd3c615aSMintz, Yuval int i; 377437bff2b9SYuval Mintz 3775fd3c615aSMintz, Yuval memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); 3776fd3c615aSMintz, Yuval 3777fd3c615aSMintz, Yuval qed_for_each_vf(p_hwfn, i) { 3778fd3c615aSMintz, Yuval struct qed_vf_info *p_vf; 3779fd3c615aSMintz, Yuval 3780fd3c615aSMintz, Yuval p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; 3781fd3c615aSMintz, Yuval if (p_vf->vf_mbx.b_pending_msg) 3782fd3c615aSMintz, Yuval events[i / 64] |= 1ULL << (i % 64); 378337bff2b9SYuval Mintz } 378437bff2b9SYuval Mintz } 378537bff2b9SYuval Mintz 37867eff82b0SYuval Mintz static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, 37877eff82b0SYuval Mintz u16 abs_vfid) 37887eff82b0SYuval Mintz { 37897eff82b0SYuval Mintz u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf; 37907eff82b0SYuval Mintz 37917eff82b0SYuval Mintz if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { 37927eff82b0SYuval Mintz DP_VERBOSE(p_hwfn, 37937eff82b0SYuval Mintz QED_MSG_IOV, 37947eff82b0SYuval Mintz "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n", 37957eff82b0SYuval Mintz abs_vfid); 37967eff82b0SYuval Mintz return NULL; 37977eff82b0SYuval Mintz } 37987eff82b0SYuval Mintz 37997eff82b0SYuval Mintz return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min]; 38007eff82b0SYuval Mintz } 38017eff82b0SYuval Mintz 380237bff2b9SYuval Mintz static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, 380337bff2b9SYuval Mintz u16 abs_vfid, struct regpair *vf_msg) 380437bff2b9SYuval Mintz { 38057eff82b0SYuval Mintz struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn, 380637bff2b9SYuval Mintz abs_vfid); 38077eff82b0SYuval Mintz 38087eff82b0SYuval Mintz if (!p_vf) 380937bff2b9SYuval Mintz return 0; 381037bff2b9SYuval Mintz 381137bff2b9SYuval Mintz /* List the physical address of the request so that handler 381237bff2b9SYuval Mintz * could later on copy the message from it. 381337bff2b9SYuval Mintz */ 381437bff2b9SYuval Mintz p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; 381537bff2b9SYuval Mintz 381637bff2b9SYuval Mintz /* Mark the event and schedule the workqueue */ 3817fd3c615aSMintz, Yuval p_vf->vf_mbx.b_pending_msg = true; 381837bff2b9SYuval Mintz qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); 381937bff2b9SYuval Mintz 382037bff2b9SYuval Mintz return 0; 382137bff2b9SYuval Mintz } 382237bff2b9SYuval Mintz 38237eff82b0SYuval Mintz static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, 38247eff82b0SYuval Mintz struct malicious_vf_eqe_data *p_data) 38257eff82b0SYuval Mintz { 38267eff82b0SYuval Mintz struct qed_vf_info *p_vf; 38277eff82b0SYuval Mintz 38287eff82b0SYuval Mintz p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); 38297eff82b0SYuval Mintz 38307eff82b0SYuval Mintz if (!p_vf) 38317eff82b0SYuval Mintz return; 38327eff82b0SYuval Mintz 3833e99a21cbSMintz, Yuval if (!p_vf->b_malicious) { 3834e99a21cbSMintz, Yuval DP_NOTICE(p_hwfn, 38357eff82b0SYuval Mintz "VF [%d] - Malicious behavior [%02x]\n", 38367eff82b0SYuval Mintz p_vf->abs_vf_id, p_data->err_id); 38377eff82b0SYuval Mintz 38387eff82b0SYuval Mintz p_vf->b_malicious = true; 3839e99a21cbSMintz, Yuval } else { 3840e99a21cbSMintz, Yuval DP_INFO(p_hwfn, 3841e99a21cbSMintz, Yuval "VF [%d] - Malicious behavior [%02x]\n", 3842e99a21cbSMintz, Yuval p_vf->abs_vf_id, p_data->err_id); 3843e99a21cbSMintz, Yuval } 38447eff82b0SYuval Mintz } 38457eff82b0SYuval Mintz 38466c9e80eaSMichal Kalderon static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, 38476c9e80eaSMichal Kalderon u8 opcode, 38486c9e80eaSMichal Kalderon __le16 echo, 38496c9e80eaSMichal Kalderon union event_ring_data *data, u8 fw_return_code) 385037bff2b9SYuval Mintz { 385137bff2b9SYuval Mintz switch (opcode) { 385237bff2b9SYuval Mintz case COMMON_EVENT_VF_PF_CHANNEL: 385337bff2b9SYuval Mintz return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), 385437bff2b9SYuval Mintz &data->vf_pf_channel.msg_addr); 38557eff82b0SYuval Mintz case COMMON_EVENT_MALICIOUS_VF: 38567eff82b0SYuval Mintz qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf); 38577eff82b0SYuval Mintz return 0; 385837bff2b9SYuval Mintz default: 385937bff2b9SYuval Mintz DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", 386037bff2b9SYuval Mintz opcode); 386137bff2b9SYuval Mintz return -EINVAL; 386237bff2b9SYuval Mintz } 386337bff2b9SYuval Mintz } 386437bff2b9SYuval Mintz 386532a47e72SYuval Mintz u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 386632a47e72SYuval Mintz { 386732a47e72SYuval Mintz struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 386832a47e72SYuval Mintz u16 i; 386932a47e72SYuval Mintz 387032a47e72SYuval Mintz if (!p_iov) 387132a47e72SYuval Mintz goto out; 387232a47e72SYuval Mintz 387332a47e72SYuval Mintz for (i = rel_vf_id; i < p_iov->total_vfs; i++) 38747eff82b0SYuval Mintz if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false)) 387532a47e72SYuval Mintz return i; 387632a47e72SYuval Mintz 387732a47e72SYuval Mintz out: 387832a47e72SYuval Mintz return MAX_NUM_VFS; 387932a47e72SYuval Mintz } 388037bff2b9SYuval Mintz 388137bff2b9SYuval Mintz static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, 388237bff2b9SYuval Mintz int vfid) 388337bff2b9SYuval Mintz { 388437bff2b9SYuval Mintz struct qed_dmae_params params; 388537bff2b9SYuval Mintz struct qed_vf_info *vf_info; 388637bff2b9SYuval Mintz 388737bff2b9SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 388837bff2b9SYuval Mintz if (!vf_info) 388937bff2b9SYuval Mintz return -EINVAL; 389037bff2b9SYuval Mintz 389137bff2b9SYuval Mintz memset(¶ms, 0, sizeof(struct qed_dmae_params)); 389237bff2b9SYuval Mintz params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST; 389337bff2b9SYuval Mintz params.src_vfid = vf_info->abs_vf_id; 389437bff2b9SYuval Mintz 389537bff2b9SYuval Mintz if (qed_dmae_host2host(p_hwfn, ptt, 389637bff2b9SYuval Mintz vf_info->vf_mbx.pending_req, 389737bff2b9SYuval Mintz vf_info->vf_mbx.req_phys, 389837bff2b9SYuval Mintz sizeof(union vfpf_tlvs) / 4, ¶ms)) { 389937bff2b9SYuval Mintz DP_VERBOSE(p_hwfn, QED_MSG_IOV, 390037bff2b9SYuval Mintz "Failed to copy message from VF 0x%02x\n", vfid); 390137bff2b9SYuval Mintz 390237bff2b9SYuval Mintz return -EIO; 390337bff2b9SYuval Mintz } 390437bff2b9SYuval Mintz 390537bff2b9SYuval Mintz return 0; 390637bff2b9SYuval Mintz } 390737bff2b9SYuval Mintz 3908eff16960SYuval Mintz static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, 3909eff16960SYuval Mintz u8 *mac, int vfid) 3910eff16960SYuval Mintz { 3911eff16960SYuval Mintz struct qed_vf_info *vf_info; 3912eff16960SYuval Mintz u64 feature; 3913eff16960SYuval Mintz 3914eff16960SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 3915eff16960SYuval Mintz if (!vf_info) { 3916eff16960SYuval Mintz DP_NOTICE(p_hwfn->cdev, 3917eff16960SYuval Mintz "Can not set forced MAC, invalid vfid [%d]\n", vfid); 3918eff16960SYuval Mintz return; 3919eff16960SYuval Mintz } 3920eff16960SYuval Mintz 39217eff82b0SYuval Mintz if (vf_info->b_malicious) { 39227eff82b0SYuval Mintz DP_NOTICE(p_hwfn->cdev, 39237eff82b0SYuval Mintz "Can't set forced MAC to malicious VF [%d]\n", vfid); 39247eff82b0SYuval Mintz return; 39257eff82b0SYuval Mintz } 39267eff82b0SYuval Mintz 3927eff16960SYuval Mintz feature = 1 << MAC_ADDR_FORCED; 3928eff16960SYuval Mintz memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); 3929eff16960SYuval Mintz 3930eff16960SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap |= feature; 3931eff16960SYuval Mintz /* Forced MAC will disable MAC_ADDR */ 39321a635e48SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR); 3933eff16960SYuval Mintz 3934eff16960SYuval Mintz qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 3935eff16960SYuval Mintz } 3936eff16960SYuval Mintz 3937ba56947aSBaoyou Xie static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, 393808feecd7SYuval Mintz u16 pvid, int vfid) 393908feecd7SYuval Mintz { 394008feecd7SYuval Mintz struct qed_vf_info *vf_info; 394108feecd7SYuval Mintz u64 feature; 394208feecd7SYuval Mintz 394308feecd7SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 394408feecd7SYuval Mintz if (!vf_info) { 394508feecd7SYuval Mintz DP_NOTICE(p_hwfn->cdev, 394608feecd7SYuval Mintz "Can not set forced MAC, invalid vfid [%d]\n", vfid); 394708feecd7SYuval Mintz return; 394808feecd7SYuval Mintz } 394908feecd7SYuval Mintz 39507eff82b0SYuval Mintz if (vf_info->b_malicious) { 39517eff82b0SYuval Mintz DP_NOTICE(p_hwfn->cdev, 39527eff82b0SYuval Mintz "Can't set forced vlan to malicious VF [%d]\n", vfid); 39537eff82b0SYuval Mintz return; 39547eff82b0SYuval Mintz } 39557eff82b0SYuval Mintz 395608feecd7SYuval Mintz feature = 1 << VLAN_ADDR_FORCED; 395708feecd7SYuval Mintz vf_info->bulletin.p_virt->pvid = pvid; 395808feecd7SYuval Mintz if (pvid) 395908feecd7SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap |= feature; 396008feecd7SYuval Mintz else 396108feecd7SYuval Mintz vf_info->bulletin.p_virt->valid_bitmap &= ~feature; 396208feecd7SYuval Mintz 396308feecd7SYuval Mintz qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 396408feecd7SYuval Mintz } 396508feecd7SYuval Mintz 396697379f15SChopra, Manish void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, 396797379f15SChopra, Manish int vfid, u16 vxlan_port, u16 geneve_port) 396897379f15SChopra, Manish { 396997379f15SChopra, Manish struct qed_vf_info *vf_info; 397097379f15SChopra, Manish 397197379f15SChopra, Manish vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 397297379f15SChopra, Manish if (!vf_info) { 397397379f15SChopra, Manish DP_NOTICE(p_hwfn->cdev, 397497379f15SChopra, Manish "Can not set udp ports, invalid vfid [%d]\n", vfid); 397597379f15SChopra, Manish return; 397697379f15SChopra, Manish } 397797379f15SChopra, Manish 397897379f15SChopra, Manish if (vf_info->b_malicious) { 397997379f15SChopra, Manish DP_VERBOSE(p_hwfn, QED_MSG_IOV, 398097379f15SChopra, Manish "Can not set udp ports to malicious VF [%d]\n", 398197379f15SChopra, Manish vfid); 398297379f15SChopra, Manish return; 398397379f15SChopra, Manish } 398497379f15SChopra, Manish 398597379f15SChopra, Manish vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port; 398697379f15SChopra, Manish vf_info->bulletin.p_virt->geneve_udp_port = geneve_port; 398797379f15SChopra, Manish } 398897379f15SChopra, Manish 39896ddc7608SYuval Mintz static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) 39906ddc7608SYuval Mintz { 39916ddc7608SYuval Mintz struct qed_vf_info *p_vf_info; 39926ddc7608SYuval Mintz 39936ddc7608SYuval Mintz p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 39946ddc7608SYuval Mintz if (!p_vf_info) 39956ddc7608SYuval Mintz return false; 39966ddc7608SYuval Mintz 39976ddc7608SYuval Mintz return !!p_vf_info->vport_instance; 39986ddc7608SYuval Mintz } 39996ddc7608SYuval Mintz 4000ba56947aSBaoyou Xie static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) 40010b55e27dSYuval Mintz { 40020b55e27dSYuval Mintz struct qed_vf_info *p_vf_info; 40030b55e27dSYuval Mintz 40040b55e27dSYuval Mintz p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 40050b55e27dSYuval Mintz if (!p_vf_info) 40060b55e27dSYuval Mintz return true; 40070b55e27dSYuval Mintz 40080b55e27dSYuval Mintz return p_vf_info->state == VF_STOPPED; 40090b55e27dSYuval Mintz } 40100b55e27dSYuval Mintz 401173390ac9SYuval Mintz static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) 401273390ac9SYuval Mintz { 401373390ac9SYuval Mintz struct qed_vf_info *vf_info; 401473390ac9SYuval Mintz 401573390ac9SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 401673390ac9SYuval Mintz if (!vf_info) 401773390ac9SYuval Mintz return false; 401873390ac9SYuval Mintz 401973390ac9SYuval Mintz return vf_info->spoof_chk; 402073390ac9SYuval Mintz } 402173390ac9SYuval Mintz 4022ba56947aSBaoyou Xie static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) 40236ddc7608SYuval Mintz { 40246ddc7608SYuval Mintz struct qed_vf_info *vf; 40256ddc7608SYuval Mintz int rc = -EINVAL; 40266ddc7608SYuval Mintz 40276ddc7608SYuval Mintz if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 40286ddc7608SYuval Mintz DP_NOTICE(p_hwfn, 40296ddc7608SYuval Mintz "SR-IOV sanity check failed, can't set spoofchk\n"); 40306ddc7608SYuval Mintz goto out; 40316ddc7608SYuval Mintz } 40326ddc7608SYuval Mintz 40336ddc7608SYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 40346ddc7608SYuval Mintz if (!vf) 40356ddc7608SYuval Mintz goto out; 40366ddc7608SYuval Mintz 40376ddc7608SYuval Mintz if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) { 40386ddc7608SYuval Mintz /* After VF VPORT start PF will configure spoof check */ 40396ddc7608SYuval Mintz vf->req_spoofchk_val = val; 40406ddc7608SYuval Mintz rc = 0; 40416ddc7608SYuval Mintz goto out; 40426ddc7608SYuval Mintz } 40436ddc7608SYuval Mintz 40446ddc7608SYuval Mintz rc = __qed_iov_spoofchk_set(p_hwfn, vf, val); 40456ddc7608SYuval Mintz 40466ddc7608SYuval Mintz out: 40476ddc7608SYuval Mintz return rc; 40486ddc7608SYuval Mintz } 40496ddc7608SYuval Mintz 4050eff16960SYuval Mintz static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, 4051eff16960SYuval Mintz u16 rel_vf_id) 4052eff16960SYuval Mintz { 4053eff16960SYuval Mintz struct qed_vf_info *p_vf; 4054eff16960SYuval Mintz 4055eff16960SYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 4056eff16960SYuval Mintz if (!p_vf || !p_vf->bulletin.p_virt) 4057eff16960SYuval Mintz return NULL; 4058eff16960SYuval Mintz 40591a635e48SYuval Mintz if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) 4060eff16960SYuval Mintz return NULL; 4061eff16960SYuval Mintz 4062eff16960SYuval Mintz return p_vf->bulletin.p_virt->mac; 4063eff16960SYuval Mintz } 4064eff16960SYuval Mintz 4065ba56947aSBaoyou Xie static u16 4066ba56947aSBaoyou Xie qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) 406708feecd7SYuval Mintz { 406808feecd7SYuval Mintz struct qed_vf_info *p_vf; 406908feecd7SYuval Mintz 407008feecd7SYuval Mintz p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); 407108feecd7SYuval Mintz if (!p_vf || !p_vf->bulletin.p_virt) 407208feecd7SYuval Mintz return 0; 407308feecd7SYuval Mintz 40741a635e48SYuval Mintz if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) 407508feecd7SYuval Mintz return 0; 407608feecd7SYuval Mintz 407708feecd7SYuval Mintz return p_vf->bulletin.p_virt->pvid; 407808feecd7SYuval Mintz } 407908feecd7SYuval Mintz 4080733def6aSYuval Mintz static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, 4081733def6aSYuval Mintz struct qed_ptt *p_ptt, int vfid, int val) 4082733def6aSYuval Mintz { 4083733def6aSYuval Mintz struct qed_vf_info *vf; 4084733def6aSYuval Mintz u8 abs_vp_id = 0; 4085733def6aSYuval Mintz int rc; 4086733def6aSYuval Mintz 4087733def6aSYuval Mintz vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 4088733def6aSYuval Mintz if (!vf) 4089733def6aSYuval Mintz return -EINVAL; 4090733def6aSYuval Mintz 4091733def6aSYuval Mintz rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); 4092733def6aSYuval Mintz if (rc) 4093733def6aSYuval Mintz return rc; 4094733def6aSYuval Mintz 4095733def6aSYuval Mintz return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val); 4096733def6aSYuval Mintz } 4097733def6aSYuval Mintz 4098ba56947aSBaoyou Xie static int 4099ba56947aSBaoyou Xie qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) 4100733def6aSYuval Mintz { 4101733def6aSYuval Mintz struct qed_vf_info *vf; 4102733def6aSYuval Mintz u8 vport_id; 4103733def6aSYuval Mintz int i; 4104733def6aSYuval Mintz 4105733def6aSYuval Mintz for_each_hwfn(cdev, i) { 4106733def6aSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 4107733def6aSYuval Mintz 4108733def6aSYuval Mintz if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 4109733def6aSYuval Mintz DP_NOTICE(p_hwfn, 4110733def6aSYuval Mintz "SR-IOV sanity check failed, can't set min rate\n"); 4111733def6aSYuval Mintz return -EINVAL; 4112733def6aSYuval Mintz } 4113733def6aSYuval Mintz } 4114733def6aSYuval Mintz 4115733def6aSYuval Mintz vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); 4116733def6aSYuval Mintz vport_id = vf->vport_id; 4117733def6aSYuval Mintz 4118733def6aSYuval Mintz return qed_configure_vport_wfq(cdev, vport_id, rate); 4119733def6aSYuval Mintz } 4120733def6aSYuval Mintz 412173390ac9SYuval Mintz static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) 412273390ac9SYuval Mintz { 412373390ac9SYuval Mintz struct qed_wfq_data *vf_vp_wfq; 412473390ac9SYuval Mintz struct qed_vf_info *vf_info; 412573390ac9SYuval Mintz 412673390ac9SYuval Mintz vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); 412773390ac9SYuval Mintz if (!vf_info) 412873390ac9SYuval Mintz return 0; 412973390ac9SYuval Mintz 413073390ac9SYuval Mintz vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; 413173390ac9SYuval Mintz 413273390ac9SYuval Mintz if (vf_vp_wfq->configured) 413373390ac9SYuval Mintz return vf_vp_wfq->min_speed; 413473390ac9SYuval Mintz else 413573390ac9SYuval Mintz return 0; 413673390ac9SYuval Mintz } 413773390ac9SYuval Mintz 413837bff2b9SYuval Mintz /** 413937bff2b9SYuval Mintz * qed_schedule_iov - schedules IOV task for VF and PF 414037bff2b9SYuval Mintz * @hwfn: hardware function pointer 414137bff2b9SYuval Mintz * @flag: IOV flag for VF/PF 414237bff2b9SYuval Mintz */ 414337bff2b9SYuval Mintz void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) 414437bff2b9SYuval Mintz { 414537bff2b9SYuval Mintz smp_mb__before_atomic(); 414637bff2b9SYuval Mintz set_bit(flag, &hwfn->iov_task_flags); 414737bff2b9SYuval Mintz smp_mb__after_atomic(); 414837bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 414937bff2b9SYuval Mintz queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); 415037bff2b9SYuval Mintz } 415137bff2b9SYuval Mintz 41521408cc1fSYuval Mintz void qed_vf_start_iov_wq(struct qed_dev *cdev) 41531408cc1fSYuval Mintz { 41541408cc1fSYuval Mintz int i; 41551408cc1fSYuval Mintz 41561408cc1fSYuval Mintz for_each_hwfn(cdev, i) 41571408cc1fSYuval Mintz queue_delayed_work(cdev->hwfns[i].iov_wq, 41581408cc1fSYuval Mintz &cdev->hwfns[i].iov_task, 0); 41591408cc1fSYuval Mintz } 41601408cc1fSYuval Mintz 41610b55e27dSYuval Mintz int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) 41620b55e27dSYuval Mintz { 41630b55e27dSYuval Mintz int i, j; 41640b55e27dSYuval Mintz 41650b55e27dSYuval Mintz for_each_hwfn(cdev, i) 41660b55e27dSYuval Mintz if (cdev->hwfns[i].iov_wq) 41670b55e27dSYuval Mintz flush_workqueue(cdev->hwfns[i].iov_wq); 41680b55e27dSYuval Mintz 41690b55e27dSYuval Mintz /* Mark VFs for disablement */ 41700b55e27dSYuval Mintz qed_iov_set_vfs_to_disable(cdev, true); 41710b55e27dSYuval Mintz 41720b55e27dSYuval Mintz if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) 41730b55e27dSYuval Mintz pci_disable_sriov(cdev->pdev); 41740b55e27dSYuval Mintz 41750b55e27dSYuval Mintz for_each_hwfn(cdev, i) { 41760b55e27dSYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 41770b55e27dSYuval Mintz struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 41780b55e27dSYuval Mintz 41790b55e27dSYuval Mintz /* Failure to acquire the ptt in 100g creates an odd error 41800b55e27dSYuval Mintz * where the first engine has already relased IOV. 41810b55e27dSYuval Mintz */ 41820b55e27dSYuval Mintz if (!ptt) { 41830b55e27dSYuval Mintz DP_ERR(hwfn, "Failed to acquire ptt\n"); 41840b55e27dSYuval Mintz return -EBUSY; 41850b55e27dSYuval Mintz } 41860b55e27dSYuval Mintz 4187733def6aSYuval Mintz /* Clean WFQ db and configure equal weight for all vports */ 4188733def6aSYuval Mintz qed_clean_wfq_db(hwfn, ptt); 4189733def6aSYuval Mintz 41900b55e27dSYuval Mintz qed_for_each_vf(hwfn, j) { 41910b55e27dSYuval Mintz int k; 41920b55e27dSYuval Mintz 41937eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(hwfn, j, true, false)) 41940b55e27dSYuval Mintz continue; 41950b55e27dSYuval Mintz 41960b55e27dSYuval Mintz /* Wait until VF is disabled before releasing */ 41970b55e27dSYuval Mintz for (k = 0; k < 100; k++) { 41980b55e27dSYuval Mintz if (!qed_iov_is_vf_stopped(hwfn, j)) 41990b55e27dSYuval Mintz msleep(20); 42000b55e27dSYuval Mintz else 42010b55e27dSYuval Mintz break; 42020b55e27dSYuval Mintz } 42030b55e27dSYuval Mintz 42040b55e27dSYuval Mintz if (k < 100) 42050b55e27dSYuval Mintz qed_iov_release_hw_for_vf(&cdev->hwfns[i], 42060b55e27dSYuval Mintz ptt, j); 42070b55e27dSYuval Mintz else 42080b55e27dSYuval Mintz DP_ERR(hwfn, 42090b55e27dSYuval Mintz "Timeout waiting for VF's FLR to end\n"); 42100b55e27dSYuval Mintz } 42110b55e27dSYuval Mintz 42120b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 42130b55e27dSYuval Mintz } 42140b55e27dSYuval Mintz 42150b55e27dSYuval Mintz qed_iov_set_vfs_to_disable(cdev, false); 42160b55e27dSYuval Mintz 42170b55e27dSYuval Mintz return 0; 42180b55e27dSYuval Mintz } 42190b55e27dSYuval Mintz 42203da7a37aSMintz, Yuval static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, 42213da7a37aSMintz, Yuval u16 vfid, 42223da7a37aSMintz, Yuval struct qed_iov_vf_init_params *params) 42233da7a37aSMintz, Yuval { 42243da7a37aSMintz, Yuval u16 base, i; 42253da7a37aSMintz, Yuval 42263da7a37aSMintz, Yuval /* Since we have an equal resource distribution per-VF, and we assume 42273da7a37aSMintz, Yuval * PF has acquired the QED_PF_L2_QUE first queues, we start setting 42283da7a37aSMintz, Yuval * sequentially from there. 42293da7a37aSMintz, Yuval */ 42303da7a37aSMintz, Yuval base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues; 42313da7a37aSMintz, Yuval 42323da7a37aSMintz, Yuval params->rel_vf_id = vfid; 42333da7a37aSMintz, Yuval for (i = 0; i < params->num_queues; i++) { 42343da7a37aSMintz, Yuval params->req_rx_queue[i] = base + i; 42353da7a37aSMintz, Yuval params->req_tx_queue[i] = base + i; 42363da7a37aSMintz, Yuval } 42373da7a37aSMintz, Yuval } 42383da7a37aSMintz, Yuval 42390b55e27dSYuval Mintz static int qed_sriov_enable(struct qed_dev *cdev, int num) 42400b55e27dSYuval Mintz { 42413da7a37aSMintz, Yuval struct qed_iov_vf_init_params params; 42420b55e27dSYuval Mintz int i, j, rc; 42430b55e27dSYuval Mintz 42440b55e27dSYuval Mintz if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { 42450b55e27dSYuval Mintz DP_NOTICE(cdev, "Can start at most %d VFs\n", 42460b55e27dSYuval Mintz RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); 42470b55e27dSYuval Mintz return -EINVAL; 42480b55e27dSYuval Mintz } 42490b55e27dSYuval Mintz 42503da7a37aSMintz, Yuval memset(¶ms, 0, sizeof(params)); 42513da7a37aSMintz, Yuval 42520b55e27dSYuval Mintz /* Initialize HW for VF access */ 42530b55e27dSYuval Mintz for_each_hwfn(cdev, j) { 42540b55e27dSYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[j]; 42550b55e27dSYuval Mintz struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 42565a1f965aSMintz, Yuval 42575a1f965aSMintz, Yuval /* Make sure not to use more than 16 queues per VF */ 42583da7a37aSMintz, Yuval params.num_queues = min_t(int, 42593da7a37aSMintz, Yuval FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, 42603da7a37aSMintz, Yuval 16); 42610b55e27dSYuval Mintz 42620b55e27dSYuval Mintz if (!ptt) { 42630b55e27dSYuval Mintz DP_ERR(hwfn, "Failed to acquire ptt\n"); 42640b55e27dSYuval Mintz rc = -EBUSY; 42650b55e27dSYuval Mintz goto err; 42660b55e27dSYuval Mintz } 42670b55e27dSYuval Mintz 42680b55e27dSYuval Mintz for (i = 0; i < num; i++) { 42697eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(hwfn, i, false, true)) 42700b55e27dSYuval Mintz continue; 42710b55e27dSYuval Mintz 42723da7a37aSMintz, Yuval qed_sriov_enable_qid_config(hwfn, i, ¶ms); 42733da7a37aSMintz, Yuval rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms); 42740b55e27dSYuval Mintz if (rc) { 42750b55e27dSYuval Mintz DP_ERR(cdev, "Failed to enable VF[%d]\n", i); 42760b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 42770b55e27dSYuval Mintz goto err; 42780b55e27dSYuval Mintz } 42790b55e27dSYuval Mintz } 42800b55e27dSYuval Mintz 42810b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 42820b55e27dSYuval Mintz } 42830b55e27dSYuval Mintz 42840b55e27dSYuval Mintz /* Enable SRIOV PCIe functions */ 42850b55e27dSYuval Mintz rc = pci_enable_sriov(cdev->pdev, num); 42860b55e27dSYuval Mintz if (rc) { 42870b55e27dSYuval Mintz DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc); 42880b55e27dSYuval Mintz goto err; 42890b55e27dSYuval Mintz } 42900b55e27dSYuval Mintz 42910b55e27dSYuval Mintz return num; 42920b55e27dSYuval Mintz 42930b55e27dSYuval Mintz err: 42940b55e27dSYuval Mintz qed_sriov_disable(cdev, false); 42950b55e27dSYuval Mintz return rc; 42960b55e27dSYuval Mintz } 42970b55e27dSYuval Mintz 42980b55e27dSYuval Mintz static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) 42990b55e27dSYuval Mintz { 43000b55e27dSYuval Mintz if (!IS_QED_SRIOV(cdev)) { 43010b55e27dSYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n"); 43020b55e27dSYuval Mintz return -EOPNOTSUPP; 43030b55e27dSYuval Mintz } 43040b55e27dSYuval Mintz 43050b55e27dSYuval Mintz if (num_vfs_param) 43060b55e27dSYuval Mintz return qed_sriov_enable(cdev, num_vfs_param); 43070b55e27dSYuval Mintz else 43080b55e27dSYuval Mintz return qed_sriov_disable(cdev, true); 43090b55e27dSYuval Mintz } 43100b55e27dSYuval Mintz 4311eff16960SYuval Mintz static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) 4312eff16960SYuval Mintz { 4313eff16960SYuval Mintz int i; 4314eff16960SYuval Mintz 4315eff16960SYuval Mintz if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { 4316eff16960SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 4317eff16960SYuval Mintz "Cannot set a VF MAC; Sriov is not enabled\n"); 4318eff16960SYuval Mintz return -EINVAL; 4319eff16960SYuval Mintz } 4320eff16960SYuval Mintz 43217eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { 4322eff16960SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 4323eff16960SYuval Mintz "Cannot set VF[%d] MAC (VF is not active)\n", vfid); 4324eff16960SYuval Mintz return -EINVAL; 4325eff16960SYuval Mintz } 4326eff16960SYuval Mintz 4327eff16960SYuval Mintz for_each_hwfn(cdev, i) { 4328eff16960SYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4329eff16960SYuval Mintz struct qed_public_vf_info *vf_info; 4330eff16960SYuval Mintz 4331eff16960SYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 4332eff16960SYuval Mintz if (!vf_info) 4333eff16960SYuval Mintz continue; 4334eff16960SYuval Mintz 4335eff16960SYuval Mintz /* Set the forced MAC, and schedule the IOV task */ 4336eff16960SYuval Mintz ether_addr_copy(vf_info->forced_mac, mac); 4337eff16960SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); 4338eff16960SYuval Mintz } 4339eff16960SYuval Mintz 4340eff16960SYuval Mintz return 0; 4341eff16960SYuval Mintz } 4342eff16960SYuval Mintz 434308feecd7SYuval Mintz static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) 434408feecd7SYuval Mintz { 434508feecd7SYuval Mintz int i; 434608feecd7SYuval Mintz 434708feecd7SYuval Mintz if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { 434808feecd7SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 434908feecd7SYuval Mintz "Cannot set a VF MAC; Sriov is not enabled\n"); 435008feecd7SYuval Mintz return -EINVAL; 435108feecd7SYuval Mintz } 435208feecd7SYuval Mintz 43537eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { 435408feecd7SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 435508feecd7SYuval Mintz "Cannot set VF[%d] MAC (VF is not active)\n", vfid); 435608feecd7SYuval Mintz return -EINVAL; 435708feecd7SYuval Mintz } 435808feecd7SYuval Mintz 435908feecd7SYuval Mintz for_each_hwfn(cdev, i) { 436008feecd7SYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 436108feecd7SYuval Mintz struct qed_public_vf_info *vf_info; 436208feecd7SYuval Mintz 436308feecd7SYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); 436408feecd7SYuval Mintz if (!vf_info) 436508feecd7SYuval Mintz continue; 436608feecd7SYuval Mintz 436708feecd7SYuval Mintz /* Set the forced vlan, and schedule the IOV task */ 436808feecd7SYuval Mintz vf_info->forced_vlan = vid; 436908feecd7SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); 437008feecd7SYuval Mintz } 437108feecd7SYuval Mintz 437208feecd7SYuval Mintz return 0; 437308feecd7SYuval Mintz } 437408feecd7SYuval Mintz 437573390ac9SYuval Mintz static int qed_get_vf_config(struct qed_dev *cdev, 437673390ac9SYuval Mintz int vf_id, struct ifla_vf_info *ivi) 437773390ac9SYuval Mintz { 437873390ac9SYuval Mintz struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 437973390ac9SYuval Mintz struct qed_public_vf_info *vf_info; 438073390ac9SYuval Mintz struct qed_mcp_link_state link; 438173390ac9SYuval Mintz u32 tx_rate; 438273390ac9SYuval Mintz 438373390ac9SYuval Mintz /* Sanitize request */ 438473390ac9SYuval Mintz if (IS_VF(cdev)) 438573390ac9SYuval Mintz return -EINVAL; 438673390ac9SYuval Mintz 43877eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) { 438873390ac9SYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 438973390ac9SYuval Mintz "VF index [%d] isn't active\n", vf_id); 439073390ac9SYuval Mintz return -EINVAL; 439173390ac9SYuval Mintz } 439273390ac9SYuval Mintz 439373390ac9SYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); 439473390ac9SYuval Mintz 439573390ac9SYuval Mintz qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); 439673390ac9SYuval Mintz 439773390ac9SYuval Mintz /* Fill information about VF */ 439873390ac9SYuval Mintz ivi->vf = vf_id; 439973390ac9SYuval Mintz 440073390ac9SYuval Mintz if (is_valid_ether_addr(vf_info->forced_mac)) 440173390ac9SYuval Mintz ether_addr_copy(ivi->mac, vf_info->forced_mac); 440273390ac9SYuval Mintz else 440373390ac9SYuval Mintz ether_addr_copy(ivi->mac, vf_info->mac); 440473390ac9SYuval Mintz 440573390ac9SYuval Mintz ivi->vlan = vf_info->forced_vlan; 440673390ac9SYuval Mintz ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id); 440773390ac9SYuval Mintz ivi->linkstate = vf_info->link_state; 440873390ac9SYuval Mintz tx_rate = vf_info->tx_rate; 440973390ac9SYuval Mintz ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; 441073390ac9SYuval Mintz ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); 441173390ac9SYuval Mintz 441273390ac9SYuval Mintz return 0; 441373390ac9SYuval Mintz } 441473390ac9SYuval Mintz 441536558c3dSYuval Mintz void qed_inform_vf_link_state(struct qed_hwfn *hwfn) 441636558c3dSYuval Mintz { 4417e50728efSMintz, Yuval struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev); 441836558c3dSYuval Mintz struct qed_mcp_link_capabilities caps; 441936558c3dSYuval Mintz struct qed_mcp_link_params params; 442036558c3dSYuval Mintz struct qed_mcp_link_state link; 442136558c3dSYuval Mintz int i; 442236558c3dSYuval Mintz 442336558c3dSYuval Mintz if (!hwfn->pf_iov_info) 442436558c3dSYuval Mintz return; 442536558c3dSYuval Mintz 442636558c3dSYuval Mintz /* Update bulletin of all future possible VFs with link configuration */ 442736558c3dSYuval Mintz for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { 4428733def6aSYuval Mintz struct qed_public_vf_info *vf_info; 4429733def6aSYuval Mintz 4430733def6aSYuval Mintz vf_info = qed_iov_get_public_vf_info(hwfn, i, false); 4431733def6aSYuval Mintz if (!vf_info) 4432733def6aSYuval Mintz continue; 4433733def6aSYuval Mintz 4434e50728efSMintz, Yuval /* Only hwfn0 is actually interested in the link speed. 4435e50728efSMintz, Yuval * But since only it would receive an MFW indication of link, 4436e50728efSMintz, Yuval * need to take configuration from it - otherwise things like 4437e50728efSMintz, Yuval * rate limiting for hwfn1 VF would not work. 4438e50728efSMintz, Yuval */ 4439e50728efSMintz, Yuval memcpy(¶ms, qed_mcp_get_link_params(lead_hwfn), 4440e50728efSMintz, Yuval sizeof(params)); 4441e50728efSMintz, Yuval memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link)); 4442e50728efSMintz, Yuval memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn), 444336558c3dSYuval Mintz sizeof(caps)); 444436558c3dSYuval Mintz 4445733def6aSYuval Mintz /* Modify link according to the VF's configured link state */ 4446733def6aSYuval Mintz switch (vf_info->link_state) { 4447733def6aSYuval Mintz case IFLA_VF_LINK_STATE_DISABLE: 4448733def6aSYuval Mintz link.link_up = false; 4449733def6aSYuval Mintz break; 4450733def6aSYuval Mintz case IFLA_VF_LINK_STATE_ENABLE: 4451733def6aSYuval Mintz link.link_up = true; 4452733def6aSYuval Mintz /* Set speed according to maximum supported by HW. 4453733def6aSYuval Mintz * that is 40G for regular devices and 100G for CMT 4454733def6aSYuval Mintz * mode devices. 4455733def6aSYuval Mintz */ 4456733def6aSYuval Mintz link.speed = (hwfn->cdev->num_hwfns > 1) ? 4457733def6aSYuval Mintz 100000 : 40000; 4458733def6aSYuval Mintz default: 4459733def6aSYuval Mintz /* In auto mode pass PF link image to VF */ 4460733def6aSYuval Mintz break; 4461733def6aSYuval Mintz } 4462733def6aSYuval Mintz 4463733def6aSYuval Mintz if (link.link_up && vf_info->tx_rate) { 4464733def6aSYuval Mintz struct qed_ptt *ptt; 4465733def6aSYuval Mintz int rate; 4466733def6aSYuval Mintz 4467733def6aSYuval Mintz rate = min_t(int, vf_info->tx_rate, link.speed); 4468733def6aSYuval Mintz 4469733def6aSYuval Mintz ptt = qed_ptt_acquire(hwfn); 4470733def6aSYuval Mintz if (!ptt) { 4471733def6aSYuval Mintz DP_NOTICE(hwfn, "Failed to acquire PTT\n"); 4472733def6aSYuval Mintz return; 4473733def6aSYuval Mintz } 4474733def6aSYuval Mintz 4475733def6aSYuval Mintz if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) { 4476733def6aSYuval Mintz vf_info->tx_rate = rate; 4477733def6aSYuval Mintz link.speed = rate; 4478733def6aSYuval Mintz } 4479733def6aSYuval Mintz 4480733def6aSYuval Mintz qed_ptt_release(hwfn, ptt); 4481733def6aSYuval Mintz } 4482733def6aSYuval Mintz 448336558c3dSYuval Mintz qed_iov_set_link(hwfn, i, ¶ms, &link, &caps); 448436558c3dSYuval Mintz } 448536558c3dSYuval Mintz 448636558c3dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 448736558c3dSYuval Mintz } 448836558c3dSYuval Mintz 4489733def6aSYuval Mintz static int qed_set_vf_link_state(struct qed_dev *cdev, 4490733def6aSYuval Mintz int vf_id, int link_state) 4491733def6aSYuval Mintz { 4492733def6aSYuval Mintz int i; 4493733def6aSYuval Mintz 4494733def6aSYuval Mintz /* Sanitize request */ 4495733def6aSYuval Mintz if (IS_VF(cdev)) 4496733def6aSYuval Mintz return -EINVAL; 4497733def6aSYuval Mintz 44987eff82b0SYuval Mintz if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) { 4499733def6aSYuval Mintz DP_VERBOSE(cdev, QED_MSG_IOV, 4500733def6aSYuval Mintz "VF index [%d] isn't active\n", vf_id); 4501733def6aSYuval Mintz return -EINVAL; 4502733def6aSYuval Mintz } 4503733def6aSYuval Mintz 4504733def6aSYuval Mintz /* Handle configuration of link state */ 4505733def6aSYuval Mintz for_each_hwfn(cdev, i) { 4506733def6aSYuval Mintz struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4507733def6aSYuval Mintz struct qed_public_vf_info *vf; 4508733def6aSYuval Mintz 4509733def6aSYuval Mintz vf = qed_iov_get_public_vf_info(hwfn, vf_id, true); 4510733def6aSYuval Mintz if (!vf) 4511733def6aSYuval Mintz continue; 4512733def6aSYuval Mintz 4513733def6aSYuval Mintz if (vf->link_state == link_state) 4514733def6aSYuval Mintz continue; 4515733def6aSYuval Mintz 4516733def6aSYuval Mintz vf->link_state = link_state; 4517733def6aSYuval Mintz qed_inform_vf_link_state(&cdev->hwfns[i]); 4518733def6aSYuval Mintz } 4519733def6aSYuval Mintz 4520733def6aSYuval Mintz return 0; 4521733def6aSYuval Mintz } 4522733def6aSYuval Mintz 45236ddc7608SYuval Mintz static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val) 45246ddc7608SYuval Mintz { 45256ddc7608SYuval Mintz int i, rc = -EINVAL; 45266ddc7608SYuval Mintz 45276ddc7608SYuval Mintz for_each_hwfn(cdev, i) { 45286ddc7608SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 45296ddc7608SYuval Mintz 45306ddc7608SYuval Mintz rc = qed_iov_spoofchk_set(p_hwfn, vfid, val); 45316ddc7608SYuval Mintz if (rc) 45326ddc7608SYuval Mintz break; 45336ddc7608SYuval Mintz } 45346ddc7608SYuval Mintz 45356ddc7608SYuval Mintz return rc; 45366ddc7608SYuval Mintz } 45376ddc7608SYuval Mintz 4538733def6aSYuval Mintz static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate) 4539733def6aSYuval Mintz { 4540733def6aSYuval Mintz int i; 4541733def6aSYuval Mintz 4542733def6aSYuval Mintz for_each_hwfn(cdev, i) { 4543733def6aSYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 4544733def6aSYuval Mintz struct qed_public_vf_info *vf; 4545733def6aSYuval Mintz 4546733def6aSYuval Mintz if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { 4547733def6aSYuval Mintz DP_NOTICE(p_hwfn, 4548733def6aSYuval Mintz "SR-IOV sanity check failed, can't set tx rate\n"); 4549733def6aSYuval Mintz return -EINVAL; 4550733def6aSYuval Mintz } 4551733def6aSYuval Mintz 4552733def6aSYuval Mintz vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true); 4553733def6aSYuval Mintz 4554733def6aSYuval Mintz vf->tx_rate = rate; 4555733def6aSYuval Mintz 4556733def6aSYuval Mintz qed_inform_vf_link_state(p_hwfn); 4557733def6aSYuval Mintz } 4558733def6aSYuval Mintz 4559733def6aSYuval Mintz return 0; 4560733def6aSYuval Mintz } 4561733def6aSYuval Mintz 4562733def6aSYuval Mintz static int qed_set_vf_rate(struct qed_dev *cdev, 4563733def6aSYuval Mintz int vfid, u32 min_rate, u32 max_rate) 4564733def6aSYuval Mintz { 4565733def6aSYuval Mintz int rc_min = 0, rc_max = 0; 4566733def6aSYuval Mintz 4567733def6aSYuval Mintz if (max_rate) 4568733def6aSYuval Mintz rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate); 4569733def6aSYuval Mintz 4570733def6aSYuval Mintz if (min_rate) 4571733def6aSYuval Mintz rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate); 4572733def6aSYuval Mintz 4573733def6aSYuval Mintz if (rc_max | rc_min) 4574733def6aSYuval Mintz return -EINVAL; 4575733def6aSYuval Mintz 4576733def6aSYuval Mintz return 0; 4577733def6aSYuval Mintz } 4578733def6aSYuval Mintz 4579f990c82cSMintz, Yuval static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust) 4580f990c82cSMintz, Yuval { 4581f990c82cSMintz, Yuval int i; 4582f990c82cSMintz, Yuval 4583f990c82cSMintz, Yuval for_each_hwfn(cdev, i) { 4584f990c82cSMintz, Yuval struct qed_hwfn *hwfn = &cdev->hwfns[i]; 4585f990c82cSMintz, Yuval struct qed_public_vf_info *vf; 4586f990c82cSMintz, Yuval 4587f990c82cSMintz, Yuval if (!qed_iov_pf_sanity_check(hwfn, vfid)) { 4588f990c82cSMintz, Yuval DP_NOTICE(hwfn, 4589f990c82cSMintz, Yuval "SR-IOV sanity check failed, can't set trust\n"); 4590f990c82cSMintz, Yuval return -EINVAL; 4591f990c82cSMintz, Yuval } 4592f990c82cSMintz, Yuval 4593f990c82cSMintz, Yuval vf = qed_iov_get_public_vf_info(hwfn, vfid, true); 4594f990c82cSMintz, Yuval 4595f990c82cSMintz, Yuval if (vf->is_trusted_request == trust) 4596f990c82cSMintz, Yuval return 0; 4597f990c82cSMintz, Yuval vf->is_trusted_request = trust; 4598f990c82cSMintz, Yuval 4599f990c82cSMintz, Yuval qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG); 4600f990c82cSMintz, Yuval } 4601f990c82cSMintz, Yuval 4602f990c82cSMintz, Yuval return 0; 4603f990c82cSMintz, Yuval } 4604f990c82cSMintz, Yuval 460537bff2b9SYuval Mintz static void qed_handle_vf_msg(struct qed_hwfn *hwfn) 460637bff2b9SYuval Mintz { 460737bff2b9SYuval Mintz u64 events[QED_VF_ARRAY_LENGTH]; 460837bff2b9SYuval Mintz struct qed_ptt *ptt; 460937bff2b9SYuval Mintz int i; 461037bff2b9SYuval Mintz 461137bff2b9SYuval Mintz ptt = qed_ptt_acquire(hwfn); 461237bff2b9SYuval Mintz if (!ptt) { 461337bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, 461437bff2b9SYuval Mintz "Can't acquire PTT; re-scheduling\n"); 461537bff2b9SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG); 461637bff2b9SYuval Mintz return; 461737bff2b9SYuval Mintz } 461837bff2b9SYuval Mintz 4619fd3c615aSMintz, Yuval qed_iov_pf_get_pending_events(hwfn, events); 462037bff2b9SYuval Mintz 462137bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, 462237bff2b9SYuval Mintz "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", 462337bff2b9SYuval Mintz events[0], events[1], events[2]); 462437bff2b9SYuval Mintz 462537bff2b9SYuval Mintz qed_for_each_vf(hwfn, i) { 462637bff2b9SYuval Mintz /* Skip VFs with no pending messages */ 462737bff2b9SYuval Mintz if (!(events[i / 64] & (1ULL << (i % 64)))) 462837bff2b9SYuval Mintz continue; 462937bff2b9SYuval Mintz 463037bff2b9SYuval Mintz DP_VERBOSE(hwfn, QED_MSG_IOV, 463137bff2b9SYuval Mintz "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 463237bff2b9SYuval Mintz i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); 463337bff2b9SYuval Mintz 463437bff2b9SYuval Mintz /* Copy VF's message to PF's request buffer for that VF */ 463537bff2b9SYuval Mintz if (qed_iov_copy_vf_msg(hwfn, ptt, i)) 463637bff2b9SYuval Mintz continue; 463737bff2b9SYuval Mintz 463837bff2b9SYuval Mintz qed_iov_process_mbx_req(hwfn, ptt, i); 463937bff2b9SYuval Mintz } 464037bff2b9SYuval Mintz 464137bff2b9SYuval Mintz qed_ptt_release(hwfn, ptt); 464237bff2b9SYuval Mintz } 464337bff2b9SYuval Mintz 464408feecd7SYuval Mintz static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) 464508feecd7SYuval Mintz { 464608feecd7SYuval Mintz int i; 464708feecd7SYuval Mintz 464808feecd7SYuval Mintz qed_for_each_vf(hwfn, i) { 464908feecd7SYuval Mintz struct qed_public_vf_info *info; 465008feecd7SYuval Mintz bool update = false; 4651eff16960SYuval Mintz u8 *mac; 465208feecd7SYuval Mintz 465308feecd7SYuval Mintz info = qed_iov_get_public_vf_info(hwfn, i, true); 465408feecd7SYuval Mintz if (!info) 465508feecd7SYuval Mintz continue; 465608feecd7SYuval Mintz 465708feecd7SYuval Mintz /* Update data on bulletin board */ 4658eff16960SYuval Mintz mac = qed_iov_bulletin_get_forced_mac(hwfn, i); 4659eff16960SYuval Mintz if (is_valid_ether_addr(info->forced_mac) && 4660eff16960SYuval Mintz (!mac || !ether_addr_equal(mac, info->forced_mac))) { 4661eff16960SYuval Mintz DP_VERBOSE(hwfn, 4662eff16960SYuval Mintz QED_MSG_IOV, 4663eff16960SYuval Mintz "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", 4664eff16960SYuval Mintz i, 4665eff16960SYuval Mintz hwfn->cdev->p_iov_info->first_vf_in_pf + i); 4666eff16960SYuval Mintz 4667eff16960SYuval Mintz /* Update bulletin board with forced MAC */ 4668eff16960SYuval Mintz qed_iov_bulletin_set_forced_mac(hwfn, 4669eff16960SYuval Mintz info->forced_mac, i); 4670eff16960SYuval Mintz update = true; 4671eff16960SYuval Mintz } 467208feecd7SYuval Mintz 467308feecd7SYuval Mintz if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^ 467408feecd7SYuval Mintz info->forced_vlan) { 467508feecd7SYuval Mintz DP_VERBOSE(hwfn, 467608feecd7SYuval Mintz QED_MSG_IOV, 467708feecd7SYuval Mintz "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n", 467808feecd7SYuval Mintz info->forced_vlan, 467908feecd7SYuval Mintz i, 468008feecd7SYuval Mintz hwfn->cdev->p_iov_info->first_vf_in_pf + i); 468108feecd7SYuval Mintz qed_iov_bulletin_set_forced_vlan(hwfn, 468208feecd7SYuval Mintz info->forced_vlan, i); 468308feecd7SYuval Mintz update = true; 468408feecd7SYuval Mintz } 468508feecd7SYuval Mintz 468608feecd7SYuval Mintz if (update) 468708feecd7SYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 468808feecd7SYuval Mintz } 468908feecd7SYuval Mintz } 469008feecd7SYuval Mintz 469136558c3dSYuval Mintz static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) 469236558c3dSYuval Mintz { 469336558c3dSYuval Mintz struct qed_ptt *ptt; 469436558c3dSYuval Mintz int i; 469536558c3dSYuval Mintz 469636558c3dSYuval Mintz ptt = qed_ptt_acquire(hwfn); 469736558c3dSYuval Mintz if (!ptt) { 469836558c3dSYuval Mintz DP_NOTICE(hwfn, "Failed allocating a ptt entry\n"); 469936558c3dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 470036558c3dSYuval Mintz return; 470136558c3dSYuval Mintz } 470236558c3dSYuval Mintz 470336558c3dSYuval Mintz qed_for_each_vf(hwfn, i) 470436558c3dSYuval Mintz qed_iov_post_vf_bulletin(hwfn, i, ptt); 470536558c3dSYuval Mintz 470636558c3dSYuval Mintz qed_ptt_release(hwfn, ptt); 470736558c3dSYuval Mintz } 470836558c3dSYuval Mintz 4709f990c82cSMintz, Yuval static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) 4710f990c82cSMintz, Yuval { 4711f990c82cSMintz, Yuval struct qed_sp_vport_update_params params; 4712f990c82cSMintz, Yuval struct qed_filter_accept_flags *flags; 4713f990c82cSMintz, Yuval struct qed_public_vf_info *vf_info; 4714f990c82cSMintz, Yuval struct qed_vf_info *vf; 4715f990c82cSMintz, Yuval u8 mask; 4716f990c82cSMintz, Yuval int i; 4717f990c82cSMintz, Yuval 4718f990c82cSMintz, Yuval mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; 4719f990c82cSMintz, Yuval flags = ¶ms.accept_flags; 4720f990c82cSMintz, Yuval 4721f990c82cSMintz, Yuval qed_for_each_vf(hwfn, i) { 4722f990c82cSMintz, Yuval /* Need to make sure current requested configuration didn't 4723f990c82cSMintz, Yuval * flip so that we'll end up configuring something that's not 4724f990c82cSMintz, Yuval * needed. 4725f990c82cSMintz, Yuval */ 4726f990c82cSMintz, Yuval vf_info = qed_iov_get_public_vf_info(hwfn, i, true); 4727f990c82cSMintz, Yuval if (vf_info->is_trusted_configured == 4728f990c82cSMintz, Yuval vf_info->is_trusted_request) 4729f990c82cSMintz, Yuval continue; 4730f990c82cSMintz, Yuval vf_info->is_trusted_configured = vf_info->is_trusted_request; 4731f990c82cSMintz, Yuval 4732f990c82cSMintz, Yuval /* Validate that the VF has a configured vport */ 4733f990c82cSMintz, Yuval vf = qed_iov_get_vf_info(hwfn, i, true); 4734f990c82cSMintz, Yuval if (!vf->vport_instance) 4735f990c82cSMintz, Yuval continue; 4736f990c82cSMintz, Yuval 4737f990c82cSMintz, Yuval memset(¶ms, 0, sizeof(params)); 4738f990c82cSMintz, Yuval params.opaque_fid = vf->opaque_fid; 4739f990c82cSMintz, Yuval params.vport_id = vf->vport_id; 4740f990c82cSMintz, Yuval 4741f990c82cSMintz, Yuval if (vf_info->rx_accept_mode & mask) { 4742f990c82cSMintz, Yuval flags->update_rx_mode_config = 1; 4743f990c82cSMintz, Yuval flags->rx_accept_filter = vf_info->rx_accept_mode; 4744f990c82cSMintz, Yuval } 4745f990c82cSMintz, Yuval 4746f990c82cSMintz, Yuval if (vf_info->tx_accept_mode & mask) { 4747f990c82cSMintz, Yuval flags->update_tx_mode_config = 1; 4748f990c82cSMintz, Yuval flags->tx_accept_filter = vf_info->tx_accept_mode; 4749f990c82cSMintz, Yuval } 4750f990c82cSMintz, Yuval 4751f990c82cSMintz, Yuval /* Remove if needed; Otherwise this would set the mask */ 4752f990c82cSMintz, Yuval if (!vf_info->is_trusted_configured) { 4753f990c82cSMintz, Yuval flags->rx_accept_filter &= ~mask; 4754f990c82cSMintz, Yuval flags->tx_accept_filter &= ~mask; 4755f990c82cSMintz, Yuval } 4756f990c82cSMintz, Yuval 4757f990c82cSMintz, Yuval if (flags->update_rx_mode_config || 4758f990c82cSMintz, Yuval flags->update_tx_mode_config) 4759f990c82cSMintz, Yuval qed_sp_vport_update(hwfn, ¶ms, 4760f990c82cSMintz, Yuval QED_SPQ_MODE_EBLOCK, NULL); 4761f990c82cSMintz, Yuval } 4762f990c82cSMintz, Yuval } 4763f990c82cSMintz, Yuval 4764ba56947aSBaoyou Xie static void qed_iov_pf_task(struct work_struct *work) 4765ba56947aSBaoyou Xie 476637bff2b9SYuval Mintz { 476737bff2b9SYuval Mintz struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 476837bff2b9SYuval Mintz iov_task.work); 47690b55e27dSYuval Mintz int rc; 477037bff2b9SYuval Mintz 477137bff2b9SYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) 477237bff2b9SYuval Mintz return; 477337bff2b9SYuval Mintz 47740b55e27dSYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) { 47750b55e27dSYuval Mintz struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 47760b55e27dSYuval Mintz 47770b55e27dSYuval Mintz if (!ptt) { 47780b55e27dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); 47790b55e27dSYuval Mintz return; 47800b55e27dSYuval Mintz } 47810b55e27dSYuval Mintz 47820b55e27dSYuval Mintz rc = qed_iov_vf_flr_cleanup(hwfn, ptt); 47830b55e27dSYuval Mintz if (rc) 47840b55e27dSYuval Mintz qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); 47850b55e27dSYuval Mintz 47860b55e27dSYuval Mintz qed_ptt_release(hwfn, ptt); 47870b55e27dSYuval Mintz } 47880b55e27dSYuval Mintz 478937bff2b9SYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) 479037bff2b9SYuval Mintz qed_handle_vf_msg(hwfn); 479108feecd7SYuval Mintz 479208feecd7SYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, 479308feecd7SYuval Mintz &hwfn->iov_task_flags)) 479408feecd7SYuval Mintz qed_handle_pf_set_vf_unicast(hwfn); 479508feecd7SYuval Mintz 479636558c3dSYuval Mintz if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, 479736558c3dSYuval Mintz &hwfn->iov_task_flags)) 479836558c3dSYuval Mintz qed_handle_bulletin_post(hwfn); 4799f990c82cSMintz, Yuval 4800f990c82cSMintz, Yuval if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags)) 4801f990c82cSMintz, Yuval qed_iov_handle_trust_change(hwfn); 480237bff2b9SYuval Mintz } 480337bff2b9SYuval Mintz 480437bff2b9SYuval Mintz void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) 480537bff2b9SYuval Mintz { 480637bff2b9SYuval Mintz int i; 480737bff2b9SYuval Mintz 480837bff2b9SYuval Mintz for_each_hwfn(cdev, i) { 480937bff2b9SYuval Mintz if (!cdev->hwfns[i].iov_wq) 481037bff2b9SYuval Mintz continue; 481137bff2b9SYuval Mintz 481237bff2b9SYuval Mintz if (schedule_first) { 481337bff2b9SYuval Mintz qed_schedule_iov(&cdev->hwfns[i], 481437bff2b9SYuval Mintz QED_IOV_WQ_STOP_WQ_FLAG); 481537bff2b9SYuval Mintz cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); 481637bff2b9SYuval Mintz } 481737bff2b9SYuval Mintz 481837bff2b9SYuval Mintz flush_workqueue(cdev->hwfns[i].iov_wq); 481937bff2b9SYuval Mintz destroy_workqueue(cdev->hwfns[i].iov_wq); 482037bff2b9SYuval Mintz } 482137bff2b9SYuval Mintz } 482237bff2b9SYuval Mintz 482337bff2b9SYuval Mintz int qed_iov_wq_start(struct qed_dev *cdev) 482437bff2b9SYuval Mintz { 482537bff2b9SYuval Mintz char name[NAME_SIZE]; 482637bff2b9SYuval Mintz int i; 482737bff2b9SYuval Mintz 482837bff2b9SYuval Mintz for_each_hwfn(cdev, i) { 482937bff2b9SYuval Mintz struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 483037bff2b9SYuval Mintz 483136558c3dSYuval Mintz /* PFs needs a dedicated workqueue only if they support IOV. 483236558c3dSYuval Mintz * VFs always require one. 483336558c3dSYuval Mintz */ 483436558c3dSYuval Mintz if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) 483537bff2b9SYuval Mintz continue; 483637bff2b9SYuval Mintz 483737bff2b9SYuval Mintz snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", 483837bff2b9SYuval Mintz cdev->pdev->bus->number, 483937bff2b9SYuval Mintz PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); 484037bff2b9SYuval Mintz 484137bff2b9SYuval Mintz p_hwfn->iov_wq = create_singlethread_workqueue(name); 484237bff2b9SYuval Mintz if (!p_hwfn->iov_wq) { 484337bff2b9SYuval Mintz DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n"); 484437bff2b9SYuval Mintz return -ENOMEM; 484537bff2b9SYuval Mintz } 484637bff2b9SYuval Mintz 484736558c3dSYuval Mintz if (IS_PF(cdev)) 484837bff2b9SYuval Mintz INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); 484936558c3dSYuval Mintz else 485036558c3dSYuval Mintz INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); 485137bff2b9SYuval Mintz } 485237bff2b9SYuval Mintz 485337bff2b9SYuval Mintz return 0; 485437bff2b9SYuval Mintz } 48550b55e27dSYuval Mintz 48560b55e27dSYuval Mintz const struct qed_iov_hv_ops qed_iov_ops_pass = { 48570b55e27dSYuval Mintz .configure = &qed_sriov_configure, 4858eff16960SYuval Mintz .set_mac = &qed_sriov_pf_set_mac, 485908feecd7SYuval Mintz .set_vlan = &qed_sriov_pf_set_vlan, 486073390ac9SYuval Mintz .get_config = &qed_get_vf_config, 4861733def6aSYuval Mintz .set_link_state = &qed_set_vf_link_state, 48626ddc7608SYuval Mintz .set_spoof = &qed_spoof_configure, 4863733def6aSYuval Mintz .set_rate = &qed_set_vf_rate, 4864f990c82cSMintz, Yuval .set_trust = &qed_set_vf_trust, 48650b55e27dSYuval Mintz }; 4866