132a47e72SYuval Mintz /* QLogic qed NIC Driver
232a47e72SYuval Mintz  * Copyright (c) 2015 QLogic Corporation
332a47e72SYuval Mintz  *
432a47e72SYuval Mintz  * This software is available under the terms of the GNU General Public License
532a47e72SYuval Mintz  * (GPL) Version 2, available from the file COPYING in the main directory of
632a47e72SYuval Mintz  * this source tree.
732a47e72SYuval Mintz  */
832a47e72SYuval Mintz 
9dacd88d6SYuval Mintz #include <linux/etherdevice.h>
1036558c3dSYuval Mintz #include <linux/crc32.h>
110b55e27dSYuval Mintz #include <linux/qed/qed_iov_if.h>
121408cc1fSYuval Mintz #include "qed_cxt.h"
131408cc1fSYuval Mintz #include "qed_hsi.h"
1432a47e72SYuval Mintz #include "qed_hw.h"
151408cc1fSYuval Mintz #include "qed_init_ops.h"
1632a47e72SYuval Mintz #include "qed_int.h"
171408cc1fSYuval Mintz #include "qed_mcp.h"
1832a47e72SYuval Mintz #include "qed_reg_addr.h"
191408cc1fSYuval Mintz #include "qed_sp.h"
2032a47e72SYuval Mintz #include "qed_sriov.h"
2132a47e72SYuval Mintz #include "qed_vf.h"
2232a47e72SYuval Mintz 
231408cc1fSYuval Mintz /* IOV ramrods */
241fe614d1SYuval Mintz static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
251408cc1fSYuval Mintz {
261408cc1fSYuval Mintz 	struct vf_start_ramrod_data *p_ramrod = NULL;
271408cc1fSYuval Mintz 	struct qed_spq_entry *p_ent = NULL;
281408cc1fSYuval Mintz 	struct qed_sp_init_data init_data;
291408cc1fSYuval Mintz 	int rc = -EINVAL;
301fe614d1SYuval Mintz 	u8 fp_minor;
311408cc1fSYuval Mintz 
321408cc1fSYuval Mintz 	/* Get SPQ entry */
331408cc1fSYuval Mintz 	memset(&init_data, 0, sizeof(init_data));
341408cc1fSYuval Mintz 	init_data.cid = qed_spq_get_cid(p_hwfn);
351fe614d1SYuval Mintz 	init_data.opaque_fid = p_vf->opaque_fid;
361408cc1fSYuval Mintz 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
371408cc1fSYuval Mintz 
381408cc1fSYuval Mintz 	rc = qed_sp_init_request(p_hwfn, &p_ent,
391408cc1fSYuval Mintz 				 COMMON_RAMROD_VF_START,
401408cc1fSYuval Mintz 				 PROTOCOLID_COMMON, &init_data);
411408cc1fSYuval Mintz 	if (rc)
421408cc1fSYuval Mintz 		return rc;
431408cc1fSYuval Mintz 
441408cc1fSYuval Mintz 	p_ramrod = &p_ent->ramrod.vf_start;
451408cc1fSYuval Mintz 
461fe614d1SYuval Mintz 	p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
471fe614d1SYuval Mintz 	p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
481408cc1fSYuval Mintz 
491fe614d1SYuval Mintz 	switch (p_hwfn->hw_info.personality) {
501fe614d1SYuval Mintz 	case QED_PCI_ETH:
511408cc1fSYuval Mintz 		p_ramrod->personality = PERSONALITY_ETH;
521fe614d1SYuval Mintz 		break;
531fe614d1SYuval Mintz 	case QED_PCI_ETH_ROCE:
541fe614d1SYuval Mintz 		p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
551fe614d1SYuval Mintz 		break;
561fe614d1SYuval Mintz 	default:
571fe614d1SYuval Mintz 		DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
581fe614d1SYuval Mintz 			  p_hwfn->hw_info.personality);
591fe614d1SYuval Mintz 		return -EINVAL;
601fe614d1SYuval Mintz 	}
611fe614d1SYuval Mintz 
621fe614d1SYuval Mintz 	fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
63a044df83SYuval Mintz 	if (fp_minor > ETH_HSI_VER_MINOR &&
64a044df83SYuval Mintz 	    fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
651fe614d1SYuval Mintz 		DP_VERBOSE(p_hwfn,
661fe614d1SYuval Mintz 			   QED_MSG_IOV,
671fe614d1SYuval Mintz 			   "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
681fe614d1SYuval Mintz 			   p_vf->abs_vf_id,
691fe614d1SYuval Mintz 			   ETH_HSI_VER_MAJOR,
701fe614d1SYuval Mintz 			   fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
711fe614d1SYuval Mintz 		fp_minor = ETH_HSI_VER_MINOR;
721fe614d1SYuval Mintz 	}
731fe614d1SYuval Mintz 
74351a4dedSYuval Mintz 	p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
751fe614d1SYuval Mintz 	p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
761fe614d1SYuval Mintz 
771fe614d1SYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
781fe614d1SYuval Mintz 		   "VF[%d] - Starting using HSI %02x.%02x\n",
791fe614d1SYuval Mintz 		   p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
801408cc1fSYuval Mintz 
811408cc1fSYuval Mintz 	return qed_spq_post(p_hwfn, p_ent, NULL);
821408cc1fSYuval Mintz }
831408cc1fSYuval Mintz 
840b55e27dSYuval Mintz static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
850b55e27dSYuval Mintz 			  u32 concrete_vfid, u16 opaque_vfid)
860b55e27dSYuval Mintz {
870b55e27dSYuval Mintz 	struct vf_stop_ramrod_data *p_ramrod = NULL;
880b55e27dSYuval Mintz 	struct qed_spq_entry *p_ent = NULL;
890b55e27dSYuval Mintz 	struct qed_sp_init_data init_data;
900b55e27dSYuval Mintz 	int rc = -EINVAL;
910b55e27dSYuval Mintz 
920b55e27dSYuval Mintz 	/* Get SPQ entry */
930b55e27dSYuval Mintz 	memset(&init_data, 0, sizeof(init_data));
940b55e27dSYuval Mintz 	init_data.cid = qed_spq_get_cid(p_hwfn);
950b55e27dSYuval Mintz 	init_data.opaque_fid = opaque_vfid;
960b55e27dSYuval Mintz 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
970b55e27dSYuval Mintz 
980b55e27dSYuval Mintz 	rc = qed_sp_init_request(p_hwfn, &p_ent,
990b55e27dSYuval Mintz 				 COMMON_RAMROD_VF_STOP,
1000b55e27dSYuval Mintz 				 PROTOCOLID_COMMON, &init_data);
1010b55e27dSYuval Mintz 	if (rc)
1020b55e27dSYuval Mintz 		return rc;
1030b55e27dSYuval Mintz 
1040b55e27dSYuval Mintz 	p_ramrod = &p_ent->ramrod.vf_stop;
1050b55e27dSYuval Mintz 
1060b55e27dSYuval Mintz 	p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
1070b55e27dSYuval Mintz 
1080b55e27dSYuval Mintz 	return qed_spq_post(p_hwfn, p_ent, NULL);
1090b55e27dSYuval Mintz }
1100b55e27dSYuval Mintz 
111ba56947aSBaoyou Xie static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
1127eff82b0SYuval Mintz 				  int rel_vf_id,
1137eff82b0SYuval Mintz 				  bool b_enabled_only, bool b_non_malicious)
11432a47e72SYuval Mintz {
11532a47e72SYuval Mintz 	if (!p_hwfn->pf_iov_info) {
11632a47e72SYuval Mintz 		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
11732a47e72SYuval Mintz 		return false;
11832a47e72SYuval Mintz 	}
11932a47e72SYuval Mintz 
12032a47e72SYuval Mintz 	if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
12132a47e72SYuval Mintz 	    (rel_vf_id < 0))
12232a47e72SYuval Mintz 		return false;
12332a47e72SYuval Mintz 
12432a47e72SYuval Mintz 	if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
12532a47e72SYuval Mintz 	    b_enabled_only)
12632a47e72SYuval Mintz 		return false;
12732a47e72SYuval Mintz 
1287eff82b0SYuval Mintz 	if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
1297eff82b0SYuval Mintz 	    b_non_malicious)
1307eff82b0SYuval Mintz 		return false;
1317eff82b0SYuval Mintz 
13232a47e72SYuval Mintz 	return true;
13332a47e72SYuval Mintz }
13432a47e72SYuval Mintz 
13537bff2b9SYuval Mintz static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
13637bff2b9SYuval Mintz 					       u16 relative_vf_id,
13737bff2b9SYuval Mintz 					       bool b_enabled_only)
13837bff2b9SYuval Mintz {
13937bff2b9SYuval Mintz 	struct qed_vf_info *vf = NULL;
14037bff2b9SYuval Mintz 
14137bff2b9SYuval Mintz 	if (!p_hwfn->pf_iov_info) {
14237bff2b9SYuval Mintz 		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
14337bff2b9SYuval Mintz 		return NULL;
14437bff2b9SYuval Mintz 	}
14537bff2b9SYuval Mintz 
1467eff82b0SYuval Mintz 	if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
1477eff82b0SYuval Mintz 				  b_enabled_only, false))
14837bff2b9SYuval Mintz 		vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
14937bff2b9SYuval Mintz 	else
15037bff2b9SYuval Mintz 		DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
15137bff2b9SYuval Mintz 		       relative_vf_id);
15237bff2b9SYuval Mintz 
15337bff2b9SYuval Mintz 	return vf;
15437bff2b9SYuval Mintz }
15537bff2b9SYuval Mintz 
15641086467SYuval Mintz static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
15741086467SYuval Mintz 				 struct qed_vf_info *p_vf, u16 rx_qid)
15841086467SYuval Mintz {
15941086467SYuval Mintz 	if (rx_qid >= p_vf->num_rxqs)
16041086467SYuval Mintz 		DP_VERBOSE(p_hwfn,
16141086467SYuval Mintz 			   QED_MSG_IOV,
16241086467SYuval Mintz 			   "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
16341086467SYuval Mintz 			   p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
16441086467SYuval Mintz 	return rx_qid < p_vf->num_rxqs;
16541086467SYuval Mintz }
16641086467SYuval Mintz 
16741086467SYuval Mintz static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
16841086467SYuval Mintz 				 struct qed_vf_info *p_vf, u16 tx_qid)
16941086467SYuval Mintz {
17041086467SYuval Mintz 	if (tx_qid >= p_vf->num_txqs)
17141086467SYuval Mintz 		DP_VERBOSE(p_hwfn,
17241086467SYuval Mintz 			   QED_MSG_IOV,
17341086467SYuval Mintz 			   "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
17441086467SYuval Mintz 			   p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
17541086467SYuval Mintz 	return tx_qid < p_vf->num_txqs;
17641086467SYuval Mintz }
17741086467SYuval Mintz 
17841086467SYuval Mintz static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
17941086467SYuval Mintz 				struct qed_vf_info *p_vf, u16 sb_idx)
18041086467SYuval Mintz {
18141086467SYuval Mintz 	int i;
18241086467SYuval Mintz 
18341086467SYuval Mintz 	for (i = 0; i < p_vf->num_sbs; i++)
18441086467SYuval Mintz 		if (p_vf->igu_sbs[i] == sb_idx)
18541086467SYuval Mintz 			return true;
18641086467SYuval Mintz 
18741086467SYuval Mintz 	DP_VERBOSE(p_hwfn,
18841086467SYuval Mintz 		   QED_MSG_IOV,
18941086467SYuval Mintz 		   "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
19041086467SYuval Mintz 		   p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
19141086467SYuval Mintz 
19241086467SYuval Mintz 	return false;
19341086467SYuval Mintz }
19441086467SYuval Mintz 
195ba56947aSBaoyou Xie static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
19636558c3dSYuval Mintz 				    int vfid, struct qed_ptt *p_ptt)
19736558c3dSYuval Mintz {
19836558c3dSYuval Mintz 	struct qed_bulletin_content *p_bulletin;
19936558c3dSYuval Mintz 	int crc_size = sizeof(p_bulletin->crc);
20036558c3dSYuval Mintz 	struct qed_dmae_params params;
20136558c3dSYuval Mintz 	struct qed_vf_info *p_vf;
20236558c3dSYuval Mintz 
20336558c3dSYuval Mintz 	p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
20436558c3dSYuval Mintz 	if (!p_vf)
20536558c3dSYuval Mintz 		return -EINVAL;
20636558c3dSYuval Mintz 
20736558c3dSYuval Mintz 	if (!p_vf->vf_bulletin)
20836558c3dSYuval Mintz 		return -EINVAL;
20936558c3dSYuval Mintz 
21036558c3dSYuval Mintz 	p_bulletin = p_vf->bulletin.p_virt;
21136558c3dSYuval Mintz 
21236558c3dSYuval Mintz 	/* Increment bulletin board version and compute crc */
21336558c3dSYuval Mintz 	p_bulletin->version++;
21436558c3dSYuval Mintz 	p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
21536558c3dSYuval Mintz 				p_vf->bulletin.size - crc_size);
21636558c3dSYuval Mintz 
21736558c3dSYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
21836558c3dSYuval Mintz 		   "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
21936558c3dSYuval Mintz 		   p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
22036558c3dSYuval Mintz 
22136558c3dSYuval Mintz 	/* propagate bulletin board via dmae to vm memory */
22236558c3dSYuval Mintz 	memset(&params, 0, sizeof(params));
22336558c3dSYuval Mintz 	params.flags = QED_DMAE_FLAG_VF_DST;
22436558c3dSYuval Mintz 	params.dst_vfid = p_vf->abs_vf_id;
22536558c3dSYuval Mintz 	return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
22636558c3dSYuval Mintz 				  p_vf->vf_bulletin, p_vf->bulletin.size / 4,
22736558c3dSYuval Mintz 				  &params);
22836558c3dSYuval Mintz }
22936558c3dSYuval Mintz 
23032a47e72SYuval Mintz static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
23132a47e72SYuval Mintz {
23232a47e72SYuval Mintz 	struct qed_hw_sriov_info *iov = cdev->p_iov_info;
23332a47e72SYuval Mintz 	int pos = iov->pos;
23432a47e72SYuval Mintz 
23532a47e72SYuval Mintz 	DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
23632a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
23732a47e72SYuval Mintz 
23832a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
23932a47e72SYuval Mintz 			     pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
24032a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
24132a47e72SYuval Mintz 			     pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
24232a47e72SYuval Mintz 
24332a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
24432a47e72SYuval Mintz 	if (iov->num_vfs) {
24532a47e72SYuval Mintz 		DP_VERBOSE(cdev,
24632a47e72SYuval Mintz 			   QED_MSG_IOV,
24732a47e72SYuval Mintz 			   "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
24832a47e72SYuval Mintz 		iov->num_vfs = 0;
24932a47e72SYuval Mintz 	}
25032a47e72SYuval Mintz 
25132a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
25232a47e72SYuval Mintz 			     pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
25332a47e72SYuval Mintz 
25432a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
25532a47e72SYuval Mintz 			     pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
25632a47e72SYuval Mintz 
25732a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
25832a47e72SYuval Mintz 			     pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
25932a47e72SYuval Mintz 
26032a47e72SYuval Mintz 	pci_read_config_dword(cdev->pdev,
26132a47e72SYuval Mintz 			      pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
26232a47e72SYuval Mintz 
26332a47e72SYuval Mintz 	pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
26432a47e72SYuval Mintz 
26532a47e72SYuval Mintz 	pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
26632a47e72SYuval Mintz 
26732a47e72SYuval Mintz 	DP_VERBOSE(cdev,
26832a47e72SYuval Mintz 		   QED_MSG_IOV,
26932a47e72SYuval Mintz 		   "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
27032a47e72SYuval Mintz 		   iov->nres,
27132a47e72SYuval Mintz 		   iov->cap,
27232a47e72SYuval Mintz 		   iov->ctrl,
27332a47e72SYuval Mintz 		   iov->total_vfs,
27432a47e72SYuval Mintz 		   iov->initial_vfs,
27532a47e72SYuval Mintz 		   iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
27632a47e72SYuval Mintz 
27732a47e72SYuval Mintz 	/* Some sanity checks */
27832a47e72SYuval Mintz 	if (iov->num_vfs > NUM_OF_VFS(cdev) ||
27932a47e72SYuval Mintz 	    iov->total_vfs > NUM_OF_VFS(cdev)) {
28032a47e72SYuval Mintz 		/* This can happen only due to a bug. In this case we set
28132a47e72SYuval Mintz 		 * num_vfs to zero to avoid memory corruption in the code that
28232a47e72SYuval Mintz 		 * assumes max number of vfs
28332a47e72SYuval Mintz 		 */
28432a47e72SYuval Mintz 		DP_NOTICE(cdev,
28532a47e72SYuval Mintz 			  "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
28632a47e72SYuval Mintz 			  iov->num_vfs);
28732a47e72SYuval Mintz 
28832a47e72SYuval Mintz 		iov->num_vfs = 0;
28932a47e72SYuval Mintz 		iov->total_vfs = 0;
29032a47e72SYuval Mintz 	}
29132a47e72SYuval Mintz 
29232a47e72SYuval Mintz 	return 0;
29332a47e72SYuval Mintz }
29432a47e72SYuval Mintz 
29532a47e72SYuval Mintz static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
29632a47e72SYuval Mintz 					struct qed_ptt *p_ptt)
29732a47e72SYuval Mintz {
29832a47e72SYuval Mintz 	struct qed_igu_block *p_sb;
29932a47e72SYuval Mintz 	u16 sb_id;
30032a47e72SYuval Mintz 	u32 val;
30132a47e72SYuval Mintz 
30232a47e72SYuval Mintz 	if (!p_hwfn->hw_info.p_igu_info) {
30332a47e72SYuval Mintz 		DP_ERR(p_hwfn,
30432a47e72SYuval Mintz 		       "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
30532a47e72SYuval Mintz 		return;
30632a47e72SYuval Mintz 	}
30732a47e72SYuval Mintz 
30832a47e72SYuval Mintz 	for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
30932a47e72SYuval Mintz 	     sb_id++) {
31032a47e72SYuval Mintz 		p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
31132a47e72SYuval Mintz 		if ((p_sb->status & QED_IGU_STATUS_FREE) &&
31232a47e72SYuval Mintz 		    !(p_sb->status & QED_IGU_STATUS_PF)) {
31332a47e72SYuval Mintz 			val = qed_rd(p_hwfn, p_ptt,
31432a47e72SYuval Mintz 				     IGU_REG_MAPPING_MEMORY + sb_id * 4);
31532a47e72SYuval Mintz 			SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
31632a47e72SYuval Mintz 			qed_wr(p_hwfn, p_ptt,
31732a47e72SYuval Mintz 			       IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
31832a47e72SYuval Mintz 		}
31932a47e72SYuval Mintz 	}
32032a47e72SYuval Mintz }
32132a47e72SYuval Mintz 
32232a47e72SYuval Mintz static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
32332a47e72SYuval Mintz {
32432a47e72SYuval Mintz 	struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
32532a47e72SYuval Mintz 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
32632a47e72SYuval Mintz 	struct qed_bulletin_content *p_bulletin_virt;
32732a47e72SYuval Mintz 	dma_addr_t req_p, rply_p, bulletin_p;
32832a47e72SYuval Mintz 	union pfvf_tlvs *p_reply_virt_addr;
32932a47e72SYuval Mintz 	union vfpf_tlvs *p_req_virt_addr;
33032a47e72SYuval Mintz 	u8 idx = 0;
33132a47e72SYuval Mintz 
33232a47e72SYuval Mintz 	memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
33332a47e72SYuval Mintz 
33432a47e72SYuval Mintz 	p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
33532a47e72SYuval Mintz 	req_p = p_iov_info->mbx_msg_phys_addr;
33632a47e72SYuval Mintz 	p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
33732a47e72SYuval Mintz 	rply_p = p_iov_info->mbx_reply_phys_addr;
33832a47e72SYuval Mintz 	p_bulletin_virt = p_iov_info->p_bulletins;
33932a47e72SYuval Mintz 	bulletin_p = p_iov_info->bulletins_phys;
34032a47e72SYuval Mintz 	if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
34132a47e72SYuval Mintz 		DP_ERR(p_hwfn,
34232a47e72SYuval Mintz 		       "qed_iov_setup_vfdb called without allocating mem first\n");
34332a47e72SYuval Mintz 		return;
34432a47e72SYuval Mintz 	}
34532a47e72SYuval Mintz 
34632a47e72SYuval Mintz 	for (idx = 0; idx < p_iov->total_vfs; idx++) {
34732a47e72SYuval Mintz 		struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
34832a47e72SYuval Mintz 		u32 concrete;
34932a47e72SYuval Mintz 
35032a47e72SYuval Mintz 		vf->vf_mbx.req_virt = p_req_virt_addr + idx;
35132a47e72SYuval Mintz 		vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
35232a47e72SYuval Mintz 		vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
35332a47e72SYuval Mintz 		vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
35432a47e72SYuval Mintz 
35532a47e72SYuval Mintz 		vf->state = VF_STOPPED;
35632a47e72SYuval Mintz 		vf->b_init = false;
35732a47e72SYuval Mintz 
35832a47e72SYuval Mintz 		vf->bulletin.phys = idx *
35932a47e72SYuval Mintz 				    sizeof(struct qed_bulletin_content) +
36032a47e72SYuval Mintz 				    bulletin_p;
36132a47e72SYuval Mintz 		vf->bulletin.p_virt = p_bulletin_virt + idx;
36232a47e72SYuval Mintz 		vf->bulletin.size = sizeof(struct qed_bulletin_content);
36332a47e72SYuval Mintz 
36432a47e72SYuval Mintz 		vf->relative_vf_id = idx;
36532a47e72SYuval Mintz 		vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
36632a47e72SYuval Mintz 		concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
36732a47e72SYuval Mintz 		vf->concrete_fid = concrete;
36832a47e72SYuval Mintz 		vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
36932a47e72SYuval Mintz 				 (vf->abs_vf_id << 8);
37032a47e72SYuval Mintz 		vf->vport_id = idx + 1;
3711cf2b1a9SYuval Mintz 
3721cf2b1a9SYuval Mintz 		vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
3731cf2b1a9SYuval Mintz 		vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
37432a47e72SYuval Mintz 	}
37532a47e72SYuval Mintz }
37632a47e72SYuval Mintz 
37732a47e72SYuval Mintz static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
37832a47e72SYuval Mintz {
37932a47e72SYuval Mintz 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
38032a47e72SYuval Mintz 	void **p_v_addr;
38132a47e72SYuval Mintz 	u16 num_vfs = 0;
38232a47e72SYuval Mintz 
38332a47e72SYuval Mintz 	num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
38432a47e72SYuval Mintz 
38532a47e72SYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
38632a47e72SYuval Mintz 		   "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
38732a47e72SYuval Mintz 
38832a47e72SYuval Mintz 	/* Allocate PF Mailbox buffer (per-VF) */
38932a47e72SYuval Mintz 	p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
39032a47e72SYuval Mintz 	p_v_addr = &p_iov_info->mbx_msg_virt_addr;
39132a47e72SYuval Mintz 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
39232a47e72SYuval Mintz 				       p_iov_info->mbx_msg_size,
39332a47e72SYuval Mintz 				       &p_iov_info->mbx_msg_phys_addr,
39432a47e72SYuval Mintz 				       GFP_KERNEL);
39532a47e72SYuval Mintz 	if (!*p_v_addr)
39632a47e72SYuval Mintz 		return -ENOMEM;
39732a47e72SYuval Mintz 
39832a47e72SYuval Mintz 	/* Allocate PF Mailbox Reply buffer (per-VF) */
39932a47e72SYuval Mintz 	p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
40032a47e72SYuval Mintz 	p_v_addr = &p_iov_info->mbx_reply_virt_addr;
40132a47e72SYuval Mintz 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
40232a47e72SYuval Mintz 				       p_iov_info->mbx_reply_size,
40332a47e72SYuval Mintz 				       &p_iov_info->mbx_reply_phys_addr,
40432a47e72SYuval Mintz 				       GFP_KERNEL);
40532a47e72SYuval Mintz 	if (!*p_v_addr)
40632a47e72SYuval Mintz 		return -ENOMEM;
40732a47e72SYuval Mintz 
40832a47e72SYuval Mintz 	p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
40932a47e72SYuval Mintz 				     num_vfs;
41032a47e72SYuval Mintz 	p_v_addr = &p_iov_info->p_bulletins;
41132a47e72SYuval Mintz 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
41232a47e72SYuval Mintz 				       p_iov_info->bulletins_size,
41332a47e72SYuval Mintz 				       &p_iov_info->bulletins_phys,
41432a47e72SYuval Mintz 				       GFP_KERNEL);
41532a47e72SYuval Mintz 	if (!*p_v_addr)
41632a47e72SYuval Mintz 		return -ENOMEM;
41732a47e72SYuval Mintz 
41832a47e72SYuval Mintz 	DP_VERBOSE(p_hwfn,
41932a47e72SYuval Mintz 		   QED_MSG_IOV,
42032a47e72SYuval Mintz 		   "PF's Requests mailbox [%p virt 0x%llx phys],  Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
42132a47e72SYuval Mintz 		   p_iov_info->mbx_msg_virt_addr,
42232a47e72SYuval Mintz 		   (u64) p_iov_info->mbx_msg_phys_addr,
42332a47e72SYuval Mintz 		   p_iov_info->mbx_reply_virt_addr,
42432a47e72SYuval Mintz 		   (u64) p_iov_info->mbx_reply_phys_addr,
42532a47e72SYuval Mintz 		   p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
42632a47e72SYuval Mintz 
42732a47e72SYuval Mintz 	return 0;
42832a47e72SYuval Mintz }
42932a47e72SYuval Mintz 
43032a47e72SYuval Mintz static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
43132a47e72SYuval Mintz {
43232a47e72SYuval Mintz 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
43332a47e72SYuval Mintz 
43432a47e72SYuval Mintz 	if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
43532a47e72SYuval Mintz 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
43632a47e72SYuval Mintz 				  p_iov_info->mbx_msg_size,
43732a47e72SYuval Mintz 				  p_iov_info->mbx_msg_virt_addr,
43832a47e72SYuval Mintz 				  p_iov_info->mbx_msg_phys_addr);
43932a47e72SYuval Mintz 
44032a47e72SYuval Mintz 	if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
44132a47e72SYuval Mintz 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
44232a47e72SYuval Mintz 				  p_iov_info->mbx_reply_size,
44332a47e72SYuval Mintz 				  p_iov_info->mbx_reply_virt_addr,
44432a47e72SYuval Mintz 				  p_iov_info->mbx_reply_phys_addr);
44532a47e72SYuval Mintz 
44632a47e72SYuval Mintz 	if (p_iov_info->p_bulletins)
44732a47e72SYuval Mintz 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
44832a47e72SYuval Mintz 				  p_iov_info->bulletins_size,
44932a47e72SYuval Mintz 				  p_iov_info->p_bulletins,
45032a47e72SYuval Mintz 				  p_iov_info->bulletins_phys);
45132a47e72SYuval Mintz }
45232a47e72SYuval Mintz 
45332a47e72SYuval Mintz int qed_iov_alloc(struct qed_hwfn *p_hwfn)
45432a47e72SYuval Mintz {
45532a47e72SYuval Mintz 	struct qed_pf_iov *p_sriov;
45632a47e72SYuval Mintz 
45732a47e72SYuval Mintz 	if (!IS_PF_SRIOV(p_hwfn)) {
45832a47e72SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
45932a47e72SYuval Mintz 			   "No SR-IOV - no need for IOV db\n");
46032a47e72SYuval Mintz 		return 0;
46132a47e72SYuval Mintz 	}
46232a47e72SYuval Mintz 
46332a47e72SYuval Mintz 	p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
4642591c280SJoe Perches 	if (!p_sriov)
46532a47e72SYuval Mintz 		return -ENOMEM;
46632a47e72SYuval Mintz 
46732a47e72SYuval Mintz 	p_hwfn->pf_iov_info = p_sriov;
46832a47e72SYuval Mintz 
46932a47e72SYuval Mintz 	return qed_iov_allocate_vfdb(p_hwfn);
47032a47e72SYuval Mintz }
47132a47e72SYuval Mintz 
47232a47e72SYuval Mintz void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
47332a47e72SYuval Mintz {
47432a47e72SYuval Mintz 	if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
47532a47e72SYuval Mintz 		return;
47632a47e72SYuval Mintz 
47732a47e72SYuval Mintz 	qed_iov_setup_vfdb(p_hwfn);
47832a47e72SYuval Mintz 	qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
47932a47e72SYuval Mintz }
48032a47e72SYuval Mintz 
48132a47e72SYuval Mintz void qed_iov_free(struct qed_hwfn *p_hwfn)
48232a47e72SYuval Mintz {
48332a47e72SYuval Mintz 	if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
48432a47e72SYuval Mintz 		qed_iov_free_vfdb(p_hwfn);
48532a47e72SYuval Mintz 		kfree(p_hwfn->pf_iov_info);
48632a47e72SYuval Mintz 	}
48732a47e72SYuval Mintz }
48832a47e72SYuval Mintz 
48932a47e72SYuval Mintz void qed_iov_free_hw_info(struct qed_dev *cdev)
49032a47e72SYuval Mintz {
49132a47e72SYuval Mintz 	kfree(cdev->p_iov_info);
49232a47e72SYuval Mintz 	cdev->p_iov_info = NULL;
49332a47e72SYuval Mintz }
49432a47e72SYuval Mintz 
49532a47e72SYuval Mintz int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
49632a47e72SYuval Mintz {
49732a47e72SYuval Mintz 	struct qed_dev *cdev = p_hwfn->cdev;
49832a47e72SYuval Mintz 	int pos;
49932a47e72SYuval Mintz 	int rc;
50032a47e72SYuval Mintz 
5011408cc1fSYuval Mintz 	if (IS_VF(p_hwfn->cdev))
5021408cc1fSYuval Mintz 		return 0;
5031408cc1fSYuval Mintz 
50432a47e72SYuval Mintz 	/* Learn the PCI configuration */
50532a47e72SYuval Mintz 	pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
50632a47e72SYuval Mintz 				      PCI_EXT_CAP_ID_SRIOV);
50732a47e72SYuval Mintz 	if (!pos) {
50832a47e72SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
50932a47e72SYuval Mintz 		return 0;
51032a47e72SYuval Mintz 	}
51132a47e72SYuval Mintz 
51232a47e72SYuval Mintz 	/* Allocate a new struct for IOV information */
51332a47e72SYuval Mintz 	cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
5142591c280SJoe Perches 	if (!cdev->p_iov_info)
51532a47e72SYuval Mintz 		return -ENOMEM;
5162591c280SJoe Perches 
51732a47e72SYuval Mintz 	cdev->p_iov_info->pos = pos;
51832a47e72SYuval Mintz 
51932a47e72SYuval Mintz 	rc = qed_iov_pci_cfg_info(cdev);
52032a47e72SYuval Mintz 	if (rc)
52132a47e72SYuval Mintz 		return rc;
52232a47e72SYuval Mintz 
52332a47e72SYuval Mintz 	/* We want PF IOV to be synonemous with the existance of p_iov_info;
52432a47e72SYuval Mintz 	 * In case the capability is published but there are no VFs, simply
52532a47e72SYuval Mintz 	 * de-allocate the struct.
52632a47e72SYuval Mintz 	 */
52732a47e72SYuval Mintz 	if (!cdev->p_iov_info->total_vfs) {
52832a47e72SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
52932a47e72SYuval Mintz 			   "IOV capabilities, but no VFs are published\n");
53032a47e72SYuval Mintz 		kfree(cdev->p_iov_info);
53132a47e72SYuval Mintz 		cdev->p_iov_info = NULL;
53232a47e72SYuval Mintz 		return 0;
53332a47e72SYuval Mintz 	}
53432a47e72SYuval Mintz 
53532a47e72SYuval Mintz 	/* Calculate the first VF index - this is a bit tricky; Basically,
53632a47e72SYuval Mintz 	 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
53732a47e72SYuval Mintz 	 * after the first engine's VFs.
53832a47e72SYuval Mintz 	 */
53932a47e72SYuval Mintz 	cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
54032a47e72SYuval Mintz 					   p_hwfn->abs_pf_id - 16;
54132a47e72SYuval Mintz 	if (QED_PATH_ID(p_hwfn))
54232a47e72SYuval Mintz 		cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
54332a47e72SYuval Mintz 
54432a47e72SYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
54532a47e72SYuval Mintz 		   "First VF in hwfn 0x%08x\n",
54632a47e72SYuval Mintz 		   cdev->p_iov_info->first_vf_in_pf);
54732a47e72SYuval Mintz 
54832a47e72SYuval Mintz 	return 0;
54932a47e72SYuval Mintz }
55032a47e72SYuval Mintz 
5517eff82b0SYuval Mintz bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
5527eff82b0SYuval Mintz 			      int vfid, bool b_fail_malicious)
55337bff2b9SYuval Mintz {
55437bff2b9SYuval Mintz 	/* Check PF supports sriov */
555b0409fa0SYuval Mintz 	if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
556b0409fa0SYuval Mintz 	    !IS_PF_SRIOV_ALLOC(p_hwfn))
55737bff2b9SYuval Mintz 		return false;
55837bff2b9SYuval Mintz 
55937bff2b9SYuval Mintz 	/* Check VF validity */
5607eff82b0SYuval Mintz 	if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
56137bff2b9SYuval Mintz 		return false;
56237bff2b9SYuval Mintz 
56337bff2b9SYuval Mintz 	return true;
56437bff2b9SYuval Mintz }
56537bff2b9SYuval Mintz 
5667eff82b0SYuval Mintz bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
5677eff82b0SYuval Mintz {
5687eff82b0SYuval Mintz 	return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
5697eff82b0SYuval Mintz }
5707eff82b0SYuval Mintz 
5710b55e27dSYuval Mintz static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
5720b55e27dSYuval Mintz 				      u16 rel_vf_id, u8 to_disable)
5730b55e27dSYuval Mintz {
5740b55e27dSYuval Mintz 	struct qed_vf_info *vf;
5750b55e27dSYuval Mintz 	int i;
5760b55e27dSYuval Mintz 
5770b55e27dSYuval Mintz 	for_each_hwfn(cdev, i) {
5780b55e27dSYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
5790b55e27dSYuval Mintz 
5800b55e27dSYuval Mintz 		vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
5810b55e27dSYuval Mintz 		if (!vf)
5820b55e27dSYuval Mintz 			continue;
5830b55e27dSYuval Mintz 
5840b55e27dSYuval Mintz 		vf->to_disable = to_disable;
5850b55e27dSYuval Mintz 	}
5860b55e27dSYuval Mintz }
5870b55e27dSYuval Mintz 
588ba56947aSBaoyou Xie static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
5890b55e27dSYuval Mintz {
5900b55e27dSYuval Mintz 	u16 i;
5910b55e27dSYuval Mintz 
5920b55e27dSYuval Mintz 	if (!IS_QED_SRIOV(cdev))
5930b55e27dSYuval Mintz 		return;
5940b55e27dSYuval Mintz 
5950b55e27dSYuval Mintz 	for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
5960b55e27dSYuval Mintz 		qed_iov_set_vf_to_disable(cdev, i, to_disable);
5970b55e27dSYuval Mintz }
5980b55e27dSYuval Mintz 
5991408cc1fSYuval Mintz static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
6001408cc1fSYuval Mintz 				       struct qed_ptt *p_ptt, u8 abs_vfid)
6011408cc1fSYuval Mintz {
6021408cc1fSYuval Mintz 	qed_wr(p_hwfn, p_ptt,
6031408cc1fSYuval Mintz 	       PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
6041408cc1fSYuval Mintz 	       1 << (abs_vfid & 0x1f));
6051408cc1fSYuval Mintz }
6061408cc1fSYuval Mintz 
607dacd88d6SYuval Mintz static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
608dacd88d6SYuval Mintz 				 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
609dacd88d6SYuval Mintz {
610dacd88d6SYuval Mintz 	int i;
611dacd88d6SYuval Mintz 
612dacd88d6SYuval Mintz 	/* Set VF masks and configuration - pretend */
613dacd88d6SYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
614dacd88d6SYuval Mintz 
615dacd88d6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
616dacd88d6SYuval Mintz 
617dacd88d6SYuval Mintz 	/* unpretend */
618dacd88d6SYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
619dacd88d6SYuval Mintz 
620dacd88d6SYuval Mintz 	/* iterate over all queues, clear sb consumer */
621b2b897ebSYuval Mintz 	for (i = 0; i < vf->num_sbs; i++)
622b2b897ebSYuval Mintz 		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
623b2b897ebSYuval Mintz 						vf->igu_sbs[i],
624b2b897ebSYuval Mintz 						vf->opaque_fid, true);
625dacd88d6SYuval Mintz }
626dacd88d6SYuval Mintz 
6270b55e27dSYuval Mintz static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
6280b55e27dSYuval Mintz 				   struct qed_ptt *p_ptt,
6290b55e27dSYuval Mintz 				   struct qed_vf_info *vf, bool enable)
6300b55e27dSYuval Mintz {
6310b55e27dSYuval Mintz 	u32 igu_vf_conf;
6320b55e27dSYuval Mintz 
6330b55e27dSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
6340b55e27dSYuval Mintz 
6350b55e27dSYuval Mintz 	igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
6360b55e27dSYuval Mintz 
6370b55e27dSYuval Mintz 	if (enable)
6380b55e27dSYuval Mintz 		igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
6390b55e27dSYuval Mintz 	else
6400b55e27dSYuval Mintz 		igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
6410b55e27dSYuval Mintz 
6420b55e27dSYuval Mintz 	qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
6430b55e27dSYuval Mintz 
6440b55e27dSYuval Mintz 	/* unpretend */
6450b55e27dSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
6460b55e27dSYuval Mintz }
6470b55e27dSYuval Mintz 
6481408cc1fSYuval Mintz static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
6491408cc1fSYuval Mintz 				    struct qed_ptt *p_ptt,
6501408cc1fSYuval Mintz 				    struct qed_vf_info *vf)
6511408cc1fSYuval Mintz {
6521408cc1fSYuval Mintz 	u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
6531408cc1fSYuval Mintz 	int rc;
6541408cc1fSYuval Mintz 
6550b55e27dSYuval Mintz 	if (vf->to_disable)
6560b55e27dSYuval Mintz 		return 0;
6570b55e27dSYuval Mintz 
6581408cc1fSYuval Mintz 	DP_VERBOSE(p_hwfn,
6591408cc1fSYuval Mintz 		   QED_MSG_IOV,
6601408cc1fSYuval Mintz 		   "Enable internal access for vf %x [abs %x]\n",
6611408cc1fSYuval Mintz 		   vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
6621408cc1fSYuval Mintz 
6631408cc1fSYuval Mintz 	qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
6641408cc1fSYuval Mintz 
665b2b897ebSYuval Mintz 	qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
666b2b897ebSYuval Mintz 
6677eff82b0SYuval Mintz 	/* It's possible VF was previously considered malicious */
6687eff82b0SYuval Mintz 	vf->b_malicious = false;
6697eff82b0SYuval Mintz 
6701408cc1fSYuval Mintz 	rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
6711408cc1fSYuval Mintz 	if (rc)
6721408cc1fSYuval Mintz 		return rc;
6731408cc1fSYuval Mintz 
6741408cc1fSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
6751408cc1fSYuval Mintz 
6761408cc1fSYuval Mintz 	SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
6771408cc1fSYuval Mintz 	STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
6781408cc1fSYuval Mintz 
6791408cc1fSYuval Mintz 	qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
6801408cc1fSYuval Mintz 		     p_hwfn->hw_info.hw_mode);
6811408cc1fSYuval Mintz 
6821408cc1fSYuval Mintz 	/* unpretend */
6831408cc1fSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
6841408cc1fSYuval Mintz 
6851408cc1fSYuval Mintz 	vf->state = VF_FREE;
6861408cc1fSYuval Mintz 
6871408cc1fSYuval Mintz 	return rc;
6881408cc1fSYuval Mintz }
6891408cc1fSYuval Mintz 
6900b55e27dSYuval Mintz /**
6910b55e27dSYuval Mintz  * @brief qed_iov_config_perm_table - configure the permission
6920b55e27dSYuval Mintz  *      zone table.
6930b55e27dSYuval Mintz  *      In E4, queue zone permission table size is 320x9. There
6940b55e27dSYuval Mintz  *      are 320 VF queues for single engine device (256 for dual
6950b55e27dSYuval Mintz  *      engine device), and each entry has the following format:
6960b55e27dSYuval Mintz  *      {Valid, VF[7:0]}
6970b55e27dSYuval Mintz  * @param p_hwfn
6980b55e27dSYuval Mintz  * @param p_ptt
6990b55e27dSYuval Mintz  * @param vf
7000b55e27dSYuval Mintz  * @param enable
7010b55e27dSYuval Mintz  */
7020b55e27dSYuval Mintz static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
7030b55e27dSYuval Mintz 				      struct qed_ptt *p_ptt,
7040b55e27dSYuval Mintz 				      struct qed_vf_info *vf, u8 enable)
7050b55e27dSYuval Mintz {
7060b55e27dSYuval Mintz 	u32 reg_addr, val;
7070b55e27dSYuval Mintz 	u16 qzone_id = 0;
7080b55e27dSYuval Mintz 	int qid;
7090b55e27dSYuval Mintz 
7100b55e27dSYuval Mintz 	for (qid = 0; qid < vf->num_rxqs; qid++) {
7110b55e27dSYuval Mintz 		qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
7120b55e27dSYuval Mintz 				&qzone_id);
7130b55e27dSYuval Mintz 
7140b55e27dSYuval Mintz 		reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
7151a635e48SYuval Mintz 		val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
7160b55e27dSYuval Mintz 		qed_wr(p_hwfn, p_ptt, reg_addr, val);
7170b55e27dSYuval Mintz 	}
7180b55e27dSYuval Mintz }
7190b55e27dSYuval Mintz 
720dacd88d6SYuval Mintz static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
721dacd88d6SYuval Mintz 				      struct qed_ptt *p_ptt,
722dacd88d6SYuval Mintz 				      struct qed_vf_info *vf)
723dacd88d6SYuval Mintz {
724dacd88d6SYuval Mintz 	/* Reset vf in IGU - interrupts are still disabled */
725dacd88d6SYuval Mintz 	qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
726dacd88d6SYuval Mintz 
727dacd88d6SYuval Mintz 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
728dacd88d6SYuval Mintz 
729dacd88d6SYuval Mintz 	/* Permission Table */
730dacd88d6SYuval Mintz 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
731dacd88d6SYuval Mintz }
732dacd88d6SYuval Mintz 
7331408cc1fSYuval Mintz static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
7341408cc1fSYuval Mintz 				   struct qed_ptt *p_ptt,
7351408cc1fSYuval Mintz 				   struct qed_vf_info *vf, u16 num_rx_queues)
7361408cc1fSYuval Mintz {
7371408cc1fSYuval Mintz 	struct qed_igu_block *igu_blocks;
7381408cc1fSYuval Mintz 	int qid = 0, igu_id = 0;
7391408cc1fSYuval Mintz 	u32 val = 0;
7401408cc1fSYuval Mintz 
7411408cc1fSYuval Mintz 	igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
7421408cc1fSYuval Mintz 
7431408cc1fSYuval Mintz 	if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
7441408cc1fSYuval Mintz 		num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
7451408cc1fSYuval Mintz 	p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
7461408cc1fSYuval Mintz 
7471408cc1fSYuval Mintz 	SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
7481408cc1fSYuval Mintz 	SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
7491408cc1fSYuval Mintz 	SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
7501408cc1fSYuval Mintz 
7511408cc1fSYuval Mintz 	while ((qid < num_rx_queues) &&
7521408cc1fSYuval Mintz 	       (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
7531408cc1fSYuval Mintz 		if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
7541408cc1fSYuval Mintz 			struct cau_sb_entry sb_entry;
7551408cc1fSYuval Mintz 
7561408cc1fSYuval Mintz 			vf->igu_sbs[qid] = (u16)igu_id;
7571408cc1fSYuval Mintz 			igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
7581408cc1fSYuval Mintz 
7591408cc1fSYuval Mintz 			SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
7601408cc1fSYuval Mintz 
7611408cc1fSYuval Mintz 			qed_wr(p_hwfn, p_ptt,
7621408cc1fSYuval Mintz 			       IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
7631408cc1fSYuval Mintz 			       val);
7641408cc1fSYuval Mintz 
7651408cc1fSYuval Mintz 			/* Configure igu sb in CAU which were marked valid */
7661408cc1fSYuval Mintz 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
7671408cc1fSYuval Mintz 					      p_hwfn->rel_pf_id,
7681408cc1fSYuval Mintz 					      vf->abs_vf_id, 1);
7691408cc1fSYuval Mintz 			qed_dmae_host2grc(p_hwfn, p_ptt,
7701408cc1fSYuval Mintz 					  (u64)(uintptr_t)&sb_entry,
7711408cc1fSYuval Mintz 					  CAU_REG_SB_VAR_MEMORY +
7721408cc1fSYuval Mintz 					  igu_id * sizeof(u64), 2, 0);
7731408cc1fSYuval Mintz 			qid++;
7741408cc1fSYuval Mintz 		}
7751408cc1fSYuval Mintz 		igu_id++;
7761408cc1fSYuval Mintz 	}
7771408cc1fSYuval Mintz 
7781408cc1fSYuval Mintz 	vf->num_sbs = (u8) num_rx_queues;
7791408cc1fSYuval Mintz 
7801408cc1fSYuval Mintz 	return vf->num_sbs;
7811408cc1fSYuval Mintz }
7821408cc1fSYuval Mintz 
7830b55e27dSYuval Mintz static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
7840b55e27dSYuval Mintz 				    struct qed_ptt *p_ptt,
7850b55e27dSYuval Mintz 				    struct qed_vf_info *vf)
7860b55e27dSYuval Mintz {
7870b55e27dSYuval Mintz 	struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
7880b55e27dSYuval Mintz 	int idx, igu_id;
7890b55e27dSYuval Mintz 	u32 addr, val;
7900b55e27dSYuval Mintz 
7910b55e27dSYuval Mintz 	/* Invalidate igu CAM lines and mark them as free */
7920b55e27dSYuval Mintz 	for (idx = 0; idx < vf->num_sbs; idx++) {
7930b55e27dSYuval Mintz 		igu_id = vf->igu_sbs[idx];
7940b55e27dSYuval Mintz 		addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
7950b55e27dSYuval Mintz 
7960b55e27dSYuval Mintz 		val = qed_rd(p_hwfn, p_ptt, addr);
7970b55e27dSYuval Mintz 		SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
7980b55e27dSYuval Mintz 		qed_wr(p_hwfn, p_ptt, addr, val);
7990b55e27dSYuval Mintz 
8000b55e27dSYuval Mintz 		p_info->igu_map.igu_blocks[igu_id].status |=
8010b55e27dSYuval Mintz 		    QED_IGU_STATUS_FREE;
8020b55e27dSYuval Mintz 
8030b55e27dSYuval Mintz 		p_hwfn->hw_info.p_igu_info->free_blks++;
8040b55e27dSYuval Mintz 	}
8050b55e27dSYuval Mintz 
8060b55e27dSYuval Mintz 	vf->num_sbs = 0;
8070b55e27dSYuval Mintz }
8080b55e27dSYuval Mintz 
8091408cc1fSYuval Mintz static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
8101408cc1fSYuval Mintz 				  struct qed_ptt *p_ptt,
8113da7a37aSMintz, Yuval 				  struct qed_iov_vf_init_params *p_params)
8121408cc1fSYuval Mintz {
8131408cc1fSYuval Mintz 	u8 num_of_vf_avaiable_chains = 0;
8141408cc1fSYuval Mintz 	struct qed_vf_info *vf = NULL;
8153da7a37aSMintz, Yuval 	u16 qid, num_irqs;
8161408cc1fSYuval Mintz 	int rc = 0;
8171408cc1fSYuval Mintz 	u32 cids;
8181408cc1fSYuval Mintz 	u8 i;
8191408cc1fSYuval Mintz 
8203da7a37aSMintz, Yuval 	vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
8211408cc1fSYuval Mintz 	if (!vf) {
8221408cc1fSYuval Mintz 		DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
8231408cc1fSYuval Mintz 		return -EINVAL;
8241408cc1fSYuval Mintz 	}
8251408cc1fSYuval Mintz 
8261408cc1fSYuval Mintz 	if (vf->b_init) {
8273da7a37aSMintz, Yuval 		DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
8283da7a37aSMintz, Yuval 			  p_params->rel_vf_id);
8291408cc1fSYuval Mintz 		return -EINVAL;
8301408cc1fSYuval Mintz 	}
8311408cc1fSYuval Mintz 
8323da7a37aSMintz, Yuval 	/* Perform sanity checking on the requested queue_id */
8333da7a37aSMintz, Yuval 	for (i = 0; i < p_params->num_queues; i++) {
8343da7a37aSMintz, Yuval 		u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
8353da7a37aSMintz, Yuval 		u16 max_vf_qzone = min_vf_qzone +
8363da7a37aSMintz, Yuval 		    FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
8373da7a37aSMintz, Yuval 
8383da7a37aSMintz, Yuval 		qid = p_params->req_rx_queue[i];
8393da7a37aSMintz, Yuval 		if (qid < min_vf_qzone || qid > max_vf_qzone) {
8403da7a37aSMintz, Yuval 			DP_NOTICE(p_hwfn,
8413da7a37aSMintz, Yuval 				  "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
8423da7a37aSMintz, Yuval 				  qid,
8433da7a37aSMintz, Yuval 				  p_params->rel_vf_id,
8443da7a37aSMintz, Yuval 				  min_vf_qzone, max_vf_qzone);
8453da7a37aSMintz, Yuval 			return -EINVAL;
8463da7a37aSMintz, Yuval 		}
8473da7a37aSMintz, Yuval 
8483da7a37aSMintz, Yuval 		qid = p_params->req_tx_queue[i];
8493da7a37aSMintz, Yuval 		if (qid > max_vf_qzone) {
8503da7a37aSMintz, Yuval 			DP_NOTICE(p_hwfn,
8513da7a37aSMintz, Yuval 				  "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
8523da7a37aSMintz, Yuval 				  qid, p_params->rel_vf_id, max_vf_qzone);
8533da7a37aSMintz, Yuval 			return -EINVAL;
8543da7a37aSMintz, Yuval 		}
8553da7a37aSMintz, Yuval 
8563da7a37aSMintz, Yuval 		/* If client *really* wants, Tx qid can be shared with PF */
8573da7a37aSMintz, Yuval 		if (qid < min_vf_qzone)
8583da7a37aSMintz, Yuval 			DP_VERBOSE(p_hwfn,
8593da7a37aSMintz, Yuval 				   QED_MSG_IOV,
8603da7a37aSMintz, Yuval 				   "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
8613da7a37aSMintz, Yuval 				   p_params->rel_vf_id, qid, i);
8623da7a37aSMintz, Yuval 	}
8633da7a37aSMintz, Yuval 
8641408cc1fSYuval Mintz 	/* Limit number of queues according to number of CIDs */
8651408cc1fSYuval Mintz 	qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
8661408cc1fSYuval Mintz 	DP_VERBOSE(p_hwfn,
8671408cc1fSYuval Mintz 		   QED_MSG_IOV,
8681408cc1fSYuval Mintz 		   "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
8693da7a37aSMintz, Yuval 		   vf->relative_vf_id, p_params->num_queues, (u16)cids);
8703da7a37aSMintz, Yuval 	num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
8711408cc1fSYuval Mintz 
8721408cc1fSYuval Mintz 	num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
8731408cc1fSYuval Mintz 							     p_ptt,
8743da7a37aSMintz, Yuval 							     vf, num_irqs);
8751408cc1fSYuval Mintz 	if (!num_of_vf_avaiable_chains) {
8761408cc1fSYuval Mintz 		DP_ERR(p_hwfn, "no available igu sbs\n");
8771408cc1fSYuval Mintz 		return -ENOMEM;
8781408cc1fSYuval Mintz 	}
8791408cc1fSYuval Mintz 
8801408cc1fSYuval Mintz 	/* Choose queue number and index ranges */
8811408cc1fSYuval Mintz 	vf->num_rxqs = num_of_vf_avaiable_chains;
8821408cc1fSYuval Mintz 	vf->num_txqs = num_of_vf_avaiable_chains;
8831408cc1fSYuval Mintz 
8841408cc1fSYuval Mintz 	for (i = 0; i < vf->num_rxqs; i++) {
8853da7a37aSMintz, Yuval 		struct qed_vf_q_info *p_queue = &vf->vf_queues[i];
8861408cc1fSYuval Mintz 
8873da7a37aSMintz, Yuval 		p_queue->fw_rx_qid = p_params->req_rx_queue[i];
8883da7a37aSMintz, Yuval 		p_queue->fw_tx_qid = p_params->req_tx_queue[i];
8891408cc1fSYuval Mintz 
8901408cc1fSYuval Mintz 		/* CIDs are per-VF, so no problem having them 0-based. */
8913da7a37aSMintz, Yuval 		p_queue->fw_cid = i;
8921408cc1fSYuval Mintz 
8931408cc1fSYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
8943da7a37aSMintz, Yuval 			   "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]  CID %04x\n",
8953da7a37aSMintz, Yuval 			   vf->relative_vf_id,
8963da7a37aSMintz, Yuval 			   i, vf->igu_sbs[i],
8973da7a37aSMintz, Yuval 			   p_queue->fw_rx_qid,
8983da7a37aSMintz, Yuval 			   p_queue->fw_tx_qid, p_queue->fw_cid);
8991408cc1fSYuval Mintz 	}
9003da7a37aSMintz, Yuval 
9011408cc1fSYuval Mintz 	rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
9021408cc1fSYuval Mintz 	if (!rc) {
9031408cc1fSYuval Mintz 		vf->b_init = true;
9041408cc1fSYuval Mintz 
9051408cc1fSYuval Mintz 		if (IS_LEAD_HWFN(p_hwfn))
9061408cc1fSYuval Mintz 			p_hwfn->cdev->p_iov_info->num_vfs++;
9071408cc1fSYuval Mintz 	}
9081408cc1fSYuval Mintz 
9091408cc1fSYuval Mintz 	return rc;
9101408cc1fSYuval Mintz }
9111408cc1fSYuval Mintz 
912079d20a6SManish Chopra static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
913079d20a6SManish Chopra 			     u16 vfid,
914079d20a6SManish Chopra 			     struct qed_mcp_link_params *params,
915079d20a6SManish Chopra 			     struct qed_mcp_link_state *link,
916079d20a6SManish Chopra 			     struct qed_mcp_link_capabilities *p_caps)
917079d20a6SManish Chopra {
918079d20a6SManish Chopra 	struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
919079d20a6SManish Chopra 						       vfid,
920079d20a6SManish Chopra 						       false);
921079d20a6SManish Chopra 	struct qed_bulletin_content *p_bulletin;
922079d20a6SManish Chopra 
923079d20a6SManish Chopra 	if (!p_vf)
924079d20a6SManish Chopra 		return;
925079d20a6SManish Chopra 
926079d20a6SManish Chopra 	p_bulletin = p_vf->bulletin.p_virt;
927079d20a6SManish Chopra 	p_bulletin->req_autoneg = params->speed.autoneg;
928079d20a6SManish Chopra 	p_bulletin->req_adv_speed = params->speed.advertised_speeds;
929079d20a6SManish Chopra 	p_bulletin->req_forced_speed = params->speed.forced_speed;
930079d20a6SManish Chopra 	p_bulletin->req_autoneg_pause = params->pause.autoneg;
931079d20a6SManish Chopra 	p_bulletin->req_forced_rx = params->pause.forced_rx;
932079d20a6SManish Chopra 	p_bulletin->req_forced_tx = params->pause.forced_tx;
933079d20a6SManish Chopra 	p_bulletin->req_loopback = params->loopback_mode;
934079d20a6SManish Chopra 
935079d20a6SManish Chopra 	p_bulletin->link_up = link->link_up;
936079d20a6SManish Chopra 	p_bulletin->speed = link->speed;
937079d20a6SManish Chopra 	p_bulletin->full_duplex = link->full_duplex;
938079d20a6SManish Chopra 	p_bulletin->autoneg = link->an;
939079d20a6SManish Chopra 	p_bulletin->autoneg_complete = link->an_complete;
940079d20a6SManish Chopra 	p_bulletin->parallel_detection = link->parallel_detection;
941079d20a6SManish Chopra 	p_bulletin->pfc_enabled = link->pfc_enabled;
942079d20a6SManish Chopra 	p_bulletin->partner_adv_speed = link->partner_adv_speed;
943079d20a6SManish Chopra 	p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
944079d20a6SManish Chopra 	p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
945079d20a6SManish Chopra 	p_bulletin->partner_adv_pause = link->partner_adv_pause;
946079d20a6SManish Chopra 	p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
947079d20a6SManish Chopra 
948079d20a6SManish Chopra 	p_bulletin->capability_speed = p_caps->speed_capabilities;
949079d20a6SManish Chopra }
950079d20a6SManish Chopra 
9510b55e27dSYuval Mintz static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
9520b55e27dSYuval Mintz 				     struct qed_ptt *p_ptt, u16 rel_vf_id)
9530b55e27dSYuval Mintz {
954079d20a6SManish Chopra 	struct qed_mcp_link_capabilities caps;
955079d20a6SManish Chopra 	struct qed_mcp_link_params params;
956079d20a6SManish Chopra 	struct qed_mcp_link_state link;
9570b55e27dSYuval Mintz 	struct qed_vf_info *vf = NULL;
9580b55e27dSYuval Mintz 
9590b55e27dSYuval Mintz 	vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
9600b55e27dSYuval Mintz 	if (!vf) {
9610b55e27dSYuval Mintz 		DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
9620b55e27dSYuval Mintz 		return -EINVAL;
9630b55e27dSYuval Mintz 	}
9640b55e27dSYuval Mintz 
96536558c3dSYuval Mintz 	if (vf->bulletin.p_virt)
96636558c3dSYuval Mintz 		memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
96736558c3dSYuval Mintz 
96836558c3dSYuval Mintz 	memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
96936558c3dSYuval Mintz 
970079d20a6SManish Chopra 	/* Get the link configuration back in bulletin so
971079d20a6SManish Chopra 	 * that when VFs are re-enabled they get the actual
972079d20a6SManish Chopra 	 * link configuration.
973079d20a6SManish Chopra 	 */
974079d20a6SManish Chopra 	memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
975079d20a6SManish Chopra 	memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
976079d20a6SManish Chopra 	memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
977079d20a6SManish Chopra 	qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
978079d20a6SManish Chopra 
9791fe614d1SYuval Mintz 	/* Forget the VF's acquisition message */
9801fe614d1SYuval Mintz 	memset(&vf->acquire, 0, sizeof(vf->acquire));
9810b55e27dSYuval Mintz 
9820b55e27dSYuval Mintz 	/* disablng interrupts and resetting permission table was done during
9830b55e27dSYuval Mintz 	 * vf-close, however, we could get here without going through vf_close
9840b55e27dSYuval Mintz 	 */
9850b55e27dSYuval Mintz 	/* Disable Interrupts for VF */
9860b55e27dSYuval Mintz 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
9870b55e27dSYuval Mintz 
9880b55e27dSYuval Mintz 	/* Reset Permission table */
9890b55e27dSYuval Mintz 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
9900b55e27dSYuval Mintz 
9910b55e27dSYuval Mintz 	vf->num_rxqs = 0;
9920b55e27dSYuval Mintz 	vf->num_txqs = 0;
9930b55e27dSYuval Mintz 	qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
9940b55e27dSYuval Mintz 
9950b55e27dSYuval Mintz 	if (vf->b_init) {
9960b55e27dSYuval Mintz 		vf->b_init = false;
9970b55e27dSYuval Mintz 
9980b55e27dSYuval Mintz 		if (IS_LEAD_HWFN(p_hwfn))
9990b55e27dSYuval Mintz 			p_hwfn->cdev->p_iov_info->num_vfs--;
10000b55e27dSYuval Mintz 	}
10010b55e27dSYuval Mintz 
10020b55e27dSYuval Mintz 	return 0;
10030b55e27dSYuval Mintz }
10040b55e27dSYuval Mintz 
100537bff2b9SYuval Mintz static bool qed_iov_tlv_supported(u16 tlvtype)
100637bff2b9SYuval Mintz {
100737bff2b9SYuval Mintz 	return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
100837bff2b9SYuval Mintz }
100937bff2b9SYuval Mintz 
101037bff2b9SYuval Mintz /* place a given tlv on the tlv buffer, continuing current tlv list */
101137bff2b9SYuval Mintz void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
101237bff2b9SYuval Mintz {
101337bff2b9SYuval Mintz 	struct channel_tlv *tl = (struct channel_tlv *)*offset;
101437bff2b9SYuval Mintz 
101537bff2b9SYuval Mintz 	tl->type = type;
101637bff2b9SYuval Mintz 	tl->length = length;
101737bff2b9SYuval Mintz 
101837bff2b9SYuval Mintz 	/* Offset should keep pointing to next TLV (the end of the last) */
101937bff2b9SYuval Mintz 	*offset += length;
102037bff2b9SYuval Mintz 
102137bff2b9SYuval Mintz 	/* Return a pointer to the start of the added tlv */
102237bff2b9SYuval Mintz 	return *offset - length;
102337bff2b9SYuval Mintz }
102437bff2b9SYuval Mintz 
102537bff2b9SYuval Mintz /* list the types and lengths of the tlvs on the buffer */
102637bff2b9SYuval Mintz void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
102737bff2b9SYuval Mintz {
102837bff2b9SYuval Mintz 	u16 i = 1, total_length = 0;
102937bff2b9SYuval Mintz 	struct channel_tlv *tlv;
103037bff2b9SYuval Mintz 
103137bff2b9SYuval Mintz 	do {
103237bff2b9SYuval Mintz 		tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
103337bff2b9SYuval Mintz 
103437bff2b9SYuval Mintz 		/* output tlv */
103537bff2b9SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
103637bff2b9SYuval Mintz 			   "TLV number %d: type %d, length %d\n",
103737bff2b9SYuval Mintz 			   i, tlv->type, tlv->length);
103837bff2b9SYuval Mintz 
103937bff2b9SYuval Mintz 		if (tlv->type == CHANNEL_TLV_LIST_END)
104037bff2b9SYuval Mintz 			return;
104137bff2b9SYuval Mintz 
104237bff2b9SYuval Mintz 		/* Validate entry - protect against malicious VFs */
104337bff2b9SYuval Mintz 		if (!tlv->length) {
104437bff2b9SYuval Mintz 			DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
104537bff2b9SYuval Mintz 			return;
104637bff2b9SYuval Mintz 		}
104737bff2b9SYuval Mintz 
104837bff2b9SYuval Mintz 		total_length += tlv->length;
104937bff2b9SYuval Mintz 
105037bff2b9SYuval Mintz 		if (total_length >= sizeof(struct tlv_buffer_size)) {
105137bff2b9SYuval Mintz 			DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
105237bff2b9SYuval Mintz 			return;
105337bff2b9SYuval Mintz 		}
105437bff2b9SYuval Mintz 
105537bff2b9SYuval Mintz 		i++;
105637bff2b9SYuval Mintz 	} while (1);
105737bff2b9SYuval Mintz }
105837bff2b9SYuval Mintz 
105937bff2b9SYuval Mintz static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
106037bff2b9SYuval Mintz 				  struct qed_ptt *p_ptt,
106137bff2b9SYuval Mintz 				  struct qed_vf_info *p_vf,
106237bff2b9SYuval Mintz 				  u16 length, u8 status)
106337bff2b9SYuval Mintz {
106437bff2b9SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
106537bff2b9SYuval Mintz 	struct qed_dmae_params params;
106637bff2b9SYuval Mintz 	u8 eng_vf_id;
106737bff2b9SYuval Mintz 
106837bff2b9SYuval Mintz 	mbx->reply_virt->default_resp.hdr.status = status;
106937bff2b9SYuval Mintz 
107037bff2b9SYuval Mintz 	qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
107137bff2b9SYuval Mintz 
107237bff2b9SYuval Mintz 	eng_vf_id = p_vf->abs_vf_id;
107337bff2b9SYuval Mintz 
107437bff2b9SYuval Mintz 	memset(&params, 0, sizeof(struct qed_dmae_params));
107537bff2b9SYuval Mintz 	params.flags = QED_DMAE_FLAG_VF_DST;
107637bff2b9SYuval Mintz 	params.dst_vfid = eng_vf_id;
107737bff2b9SYuval Mintz 
107837bff2b9SYuval Mintz 	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
107937bff2b9SYuval Mintz 			   mbx->req_virt->first_tlv.reply_address +
108037bff2b9SYuval Mintz 			   sizeof(u64),
108137bff2b9SYuval Mintz 			   (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
108237bff2b9SYuval Mintz 			   &params);
108337bff2b9SYuval Mintz 
108437bff2b9SYuval Mintz 	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
108537bff2b9SYuval Mintz 			   mbx->req_virt->first_tlv.reply_address,
108637bff2b9SYuval Mintz 			   sizeof(u64) / 4, &params);
108737bff2b9SYuval Mintz 
108837bff2b9SYuval Mintz 	REG_WR(p_hwfn,
108937bff2b9SYuval Mintz 	       GTT_BAR0_MAP_REG_USDM_RAM +
109037bff2b9SYuval Mintz 	       USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
109137bff2b9SYuval Mintz }
109237bff2b9SYuval Mintz 
1093dacd88d6SYuval Mintz static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1094dacd88d6SYuval Mintz 				enum qed_iov_vport_update_flag flag)
1095dacd88d6SYuval Mintz {
1096dacd88d6SYuval Mintz 	switch (flag) {
1097dacd88d6SYuval Mintz 	case QED_IOV_VP_UPDATE_ACTIVATE:
1098dacd88d6SYuval Mintz 		return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
109917b235c1SYuval Mintz 	case QED_IOV_VP_UPDATE_VLAN_STRIP:
110017b235c1SYuval Mintz 		return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
110117b235c1SYuval Mintz 	case QED_IOV_VP_UPDATE_TX_SWITCH:
110217b235c1SYuval Mintz 		return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1103dacd88d6SYuval Mintz 	case QED_IOV_VP_UPDATE_MCAST:
1104dacd88d6SYuval Mintz 		return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1105dacd88d6SYuval Mintz 	case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1106dacd88d6SYuval Mintz 		return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1107dacd88d6SYuval Mintz 	case QED_IOV_VP_UPDATE_RSS:
1108dacd88d6SYuval Mintz 		return CHANNEL_TLV_VPORT_UPDATE_RSS;
110917b235c1SYuval Mintz 	case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
111017b235c1SYuval Mintz 		return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
111117b235c1SYuval Mintz 	case QED_IOV_VP_UPDATE_SGE_TPA:
111217b235c1SYuval Mintz 		return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1113dacd88d6SYuval Mintz 	default:
1114dacd88d6SYuval Mintz 		return 0;
1115dacd88d6SYuval Mintz 	}
1116dacd88d6SYuval Mintz }
1117dacd88d6SYuval Mintz 
1118dacd88d6SYuval Mintz static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1119dacd88d6SYuval Mintz 					    struct qed_vf_info *p_vf,
1120dacd88d6SYuval Mintz 					    struct qed_iov_vf_mbx *p_mbx,
1121dacd88d6SYuval Mintz 					    u8 status,
1122dacd88d6SYuval Mintz 					    u16 tlvs_mask, u16 tlvs_accepted)
1123dacd88d6SYuval Mintz {
1124dacd88d6SYuval Mintz 	struct pfvf_def_resp_tlv *resp;
1125dacd88d6SYuval Mintz 	u16 size, total_len, i;
1126dacd88d6SYuval Mintz 
1127dacd88d6SYuval Mintz 	memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1128dacd88d6SYuval Mintz 	p_mbx->offset = (u8 *)p_mbx->reply_virt;
1129dacd88d6SYuval Mintz 	size = sizeof(struct pfvf_def_resp_tlv);
1130dacd88d6SYuval Mintz 	total_len = size;
1131dacd88d6SYuval Mintz 
1132dacd88d6SYuval Mintz 	qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1133dacd88d6SYuval Mintz 
1134dacd88d6SYuval Mintz 	/* Prepare response for all extended tlvs if they are found by PF */
1135dacd88d6SYuval Mintz 	for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
11361a635e48SYuval Mintz 		if (!(tlvs_mask & BIT(i)))
1137dacd88d6SYuval Mintz 			continue;
1138dacd88d6SYuval Mintz 
1139dacd88d6SYuval Mintz 		resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1140dacd88d6SYuval Mintz 				   qed_iov_vport_to_tlv(p_hwfn, i), size);
1141dacd88d6SYuval Mintz 
11421a635e48SYuval Mintz 		if (tlvs_accepted & BIT(i))
1143dacd88d6SYuval Mintz 			resp->hdr.status = status;
1144dacd88d6SYuval Mintz 		else
1145dacd88d6SYuval Mintz 			resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1146dacd88d6SYuval Mintz 
1147dacd88d6SYuval Mintz 		DP_VERBOSE(p_hwfn,
1148dacd88d6SYuval Mintz 			   QED_MSG_IOV,
1149dacd88d6SYuval Mintz 			   "VF[%d] - vport_update response: TLV %d, status %02x\n",
1150dacd88d6SYuval Mintz 			   p_vf->relative_vf_id,
1151dacd88d6SYuval Mintz 			   qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1152dacd88d6SYuval Mintz 
1153dacd88d6SYuval Mintz 		total_len += size;
1154dacd88d6SYuval Mintz 	}
1155dacd88d6SYuval Mintz 
1156dacd88d6SYuval Mintz 	qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1157dacd88d6SYuval Mintz 		    sizeof(struct channel_list_end_tlv));
1158dacd88d6SYuval Mintz 
1159dacd88d6SYuval Mintz 	return total_len;
1160dacd88d6SYuval Mintz }
1161dacd88d6SYuval Mintz 
116237bff2b9SYuval Mintz static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
116337bff2b9SYuval Mintz 				 struct qed_ptt *p_ptt,
116437bff2b9SYuval Mintz 				 struct qed_vf_info *vf_info,
116537bff2b9SYuval Mintz 				 u16 type, u16 length, u8 status)
116637bff2b9SYuval Mintz {
116737bff2b9SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
116837bff2b9SYuval Mintz 
116937bff2b9SYuval Mintz 	mbx->offset = (u8 *)mbx->reply_virt;
117037bff2b9SYuval Mintz 
117137bff2b9SYuval Mintz 	qed_add_tlv(p_hwfn, &mbx->offset, type, length);
117237bff2b9SYuval Mintz 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
117337bff2b9SYuval Mintz 		    sizeof(struct channel_list_end_tlv));
117437bff2b9SYuval Mintz 
117537bff2b9SYuval Mintz 	qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
117637bff2b9SYuval Mintz }
117737bff2b9SYuval Mintz 
1178ba56947aSBaoyou Xie static struct
1179ba56947aSBaoyou Xie qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
11800b55e27dSYuval Mintz 					       u16 relative_vf_id,
11810b55e27dSYuval Mintz 					       bool b_enabled_only)
11820b55e27dSYuval Mintz {
11830b55e27dSYuval Mintz 	struct qed_vf_info *vf = NULL;
11840b55e27dSYuval Mintz 
11850b55e27dSYuval Mintz 	vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
11860b55e27dSYuval Mintz 	if (!vf)
11870b55e27dSYuval Mintz 		return NULL;
11880b55e27dSYuval Mintz 
11890b55e27dSYuval Mintz 	return &vf->p_vf_info;
11900b55e27dSYuval Mintz }
11910b55e27dSYuval Mintz 
1192ba56947aSBaoyou Xie static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
11930b55e27dSYuval Mintz {
11940b55e27dSYuval Mintz 	struct qed_public_vf_info *vf_info;
11950b55e27dSYuval Mintz 
11960b55e27dSYuval Mintz 	vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
11970b55e27dSYuval Mintz 
11980b55e27dSYuval Mintz 	if (!vf_info)
11990b55e27dSYuval Mintz 		return;
12000b55e27dSYuval Mintz 
12010b55e27dSYuval Mintz 	/* Clear the VF mac */
12020b55e27dSYuval Mintz 	memset(vf_info->mac, 0, ETH_ALEN);
12030b55e27dSYuval Mintz }
12040b55e27dSYuval Mintz 
12050b55e27dSYuval Mintz static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
12060b55e27dSYuval Mintz 			       struct qed_vf_info *p_vf)
12070b55e27dSYuval Mintz {
12080b55e27dSYuval Mintz 	u32 i;
12090b55e27dSYuval Mintz 
12100b55e27dSYuval Mintz 	p_vf->vf_bulletin = 0;
1211dacd88d6SYuval Mintz 	p_vf->vport_instance = 0;
121208feecd7SYuval Mintz 	p_vf->configured_features = 0;
12130b55e27dSYuval Mintz 
12140b55e27dSYuval Mintz 	/* If VF previously requested less resources, go back to default */
12150b55e27dSYuval Mintz 	p_vf->num_rxqs = p_vf->num_sbs;
12160b55e27dSYuval Mintz 	p_vf->num_txqs = p_vf->num_sbs;
12170b55e27dSYuval Mintz 
1218dacd88d6SYuval Mintz 	p_vf->num_active_rxqs = 0;
1219dacd88d6SYuval Mintz 
12203da7a37aSMintz, Yuval 	for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
12213da7a37aSMintz, Yuval 		struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i];
12223da7a37aSMintz, Yuval 
12233da7a37aSMintz, Yuval 		if (p_queue->p_rx_cid) {
12243da7a37aSMintz, Yuval 			qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
12253da7a37aSMintz, Yuval 			p_queue->p_rx_cid = NULL;
12263da7a37aSMintz, Yuval 		}
12273da7a37aSMintz, Yuval 
12283da7a37aSMintz, Yuval 		if (p_queue->p_tx_cid) {
12293da7a37aSMintz, Yuval 			qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
12303da7a37aSMintz, Yuval 			p_queue->p_tx_cid = NULL;
12313da7a37aSMintz, Yuval 		}
12323da7a37aSMintz, Yuval 	}
12330b55e27dSYuval Mintz 
123408feecd7SYuval Mintz 	memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
12351fe614d1SYuval Mintz 	memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
12360b55e27dSYuval Mintz 	qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
12370b55e27dSYuval Mintz }
12380b55e27dSYuval Mintz 
12391cf2b1a9SYuval Mintz static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
12401cf2b1a9SYuval Mintz 				      struct qed_ptt *p_ptt,
12411cf2b1a9SYuval Mintz 				      struct qed_vf_info *p_vf,
12421cf2b1a9SYuval Mintz 				      struct vf_pf_resc_request *p_req,
12431cf2b1a9SYuval Mintz 				      struct pf_vf_resc *p_resp)
12441cf2b1a9SYuval Mintz {
12451cf2b1a9SYuval Mintz 	int i;
12461cf2b1a9SYuval Mintz 
12471cf2b1a9SYuval Mintz 	/* Queue related information */
12481cf2b1a9SYuval Mintz 	p_resp->num_rxqs = p_vf->num_rxqs;
12491cf2b1a9SYuval Mintz 	p_resp->num_txqs = p_vf->num_txqs;
12501cf2b1a9SYuval Mintz 	p_resp->num_sbs = p_vf->num_sbs;
12511cf2b1a9SYuval Mintz 
12521cf2b1a9SYuval Mintz 	for (i = 0; i < p_resp->num_sbs; i++) {
12531cf2b1a9SYuval Mintz 		p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
12541cf2b1a9SYuval Mintz 		p_resp->hw_sbs[i].sb_qid = 0;
12551cf2b1a9SYuval Mintz 	}
12561cf2b1a9SYuval Mintz 
12571cf2b1a9SYuval Mintz 	/* These fields are filled for backward compatibility.
12581cf2b1a9SYuval Mintz 	 * Unused by modern vfs.
12591cf2b1a9SYuval Mintz 	 */
12601cf2b1a9SYuval Mintz 	for (i = 0; i < p_resp->num_rxqs; i++) {
12611cf2b1a9SYuval Mintz 		qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
12621cf2b1a9SYuval Mintz 				(u16 *)&p_resp->hw_qid[i]);
12631cf2b1a9SYuval Mintz 		p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
12641cf2b1a9SYuval Mintz 	}
12651cf2b1a9SYuval Mintz 
12661cf2b1a9SYuval Mintz 	/* Filter related information */
12671cf2b1a9SYuval Mintz 	p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
12681cf2b1a9SYuval Mintz 					p_req->num_mac_filters);
12691cf2b1a9SYuval Mintz 	p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
12701cf2b1a9SYuval Mintz 					 p_req->num_vlan_filters);
12711cf2b1a9SYuval Mintz 
12721cf2b1a9SYuval Mintz 	/* This isn't really needed/enforced, but some legacy VFs might depend
12731cf2b1a9SYuval Mintz 	 * on the correct filling of this field.
12741cf2b1a9SYuval Mintz 	 */
12751cf2b1a9SYuval Mintz 	p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
12761cf2b1a9SYuval Mintz 
12771cf2b1a9SYuval Mintz 	/* Validate sufficient resources for VF */
12781cf2b1a9SYuval Mintz 	if (p_resp->num_rxqs < p_req->num_rxqs ||
12791cf2b1a9SYuval Mintz 	    p_resp->num_txqs < p_req->num_txqs ||
12801cf2b1a9SYuval Mintz 	    p_resp->num_sbs < p_req->num_sbs ||
12811cf2b1a9SYuval Mintz 	    p_resp->num_mac_filters < p_req->num_mac_filters ||
12821cf2b1a9SYuval Mintz 	    p_resp->num_vlan_filters < p_req->num_vlan_filters ||
12831cf2b1a9SYuval Mintz 	    p_resp->num_mc_filters < p_req->num_mc_filters) {
12841cf2b1a9SYuval Mintz 		DP_VERBOSE(p_hwfn,
12851cf2b1a9SYuval Mintz 			   QED_MSG_IOV,
12861cf2b1a9SYuval Mintz 			   "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
12871cf2b1a9SYuval Mintz 			   p_vf->abs_vf_id,
12881cf2b1a9SYuval Mintz 			   p_req->num_rxqs,
12891cf2b1a9SYuval Mintz 			   p_resp->num_rxqs,
12901cf2b1a9SYuval Mintz 			   p_req->num_rxqs,
12911cf2b1a9SYuval Mintz 			   p_resp->num_txqs,
12921cf2b1a9SYuval Mintz 			   p_req->num_sbs,
12931cf2b1a9SYuval Mintz 			   p_resp->num_sbs,
12941cf2b1a9SYuval Mintz 			   p_req->num_mac_filters,
12951cf2b1a9SYuval Mintz 			   p_resp->num_mac_filters,
12961cf2b1a9SYuval Mintz 			   p_req->num_vlan_filters,
12971cf2b1a9SYuval Mintz 			   p_resp->num_vlan_filters,
12981cf2b1a9SYuval Mintz 			   p_req->num_mc_filters, p_resp->num_mc_filters);
1299a044df83SYuval Mintz 
1300a044df83SYuval Mintz 		/* Some legacy OSes are incapable of correctly handling this
1301a044df83SYuval Mintz 		 * failure.
1302a044df83SYuval Mintz 		 */
1303a044df83SYuval Mintz 		if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1304a044df83SYuval Mintz 		     ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1305a044df83SYuval Mintz 		    (p_vf->acquire.vfdev_info.os_type ==
1306a044df83SYuval Mintz 		     VFPF_ACQUIRE_OS_WINDOWS))
1307a044df83SYuval Mintz 			return PFVF_STATUS_SUCCESS;
1308a044df83SYuval Mintz 
13091cf2b1a9SYuval Mintz 		return PFVF_STATUS_NO_RESOURCE;
13101cf2b1a9SYuval Mintz 	}
13111cf2b1a9SYuval Mintz 
13121cf2b1a9SYuval Mintz 	return PFVF_STATUS_SUCCESS;
13131cf2b1a9SYuval Mintz }
13141cf2b1a9SYuval Mintz 
13151cf2b1a9SYuval Mintz static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
13161cf2b1a9SYuval Mintz 					 struct pfvf_stats_info *p_stats)
13171cf2b1a9SYuval Mintz {
13181cf2b1a9SYuval Mintz 	p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
13191cf2b1a9SYuval Mintz 				  offsetof(struct mstorm_vf_zone,
13201cf2b1a9SYuval Mintz 					   non_trigger.eth_queue_stat);
13211cf2b1a9SYuval Mintz 	p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
13221cf2b1a9SYuval Mintz 	p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
13231cf2b1a9SYuval Mintz 				  offsetof(struct ustorm_vf_zone,
13241cf2b1a9SYuval Mintz 					   non_trigger.eth_queue_stat);
13251cf2b1a9SYuval Mintz 	p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
13261cf2b1a9SYuval Mintz 	p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
13271cf2b1a9SYuval Mintz 				  offsetof(struct pstorm_vf_zone,
13281cf2b1a9SYuval Mintz 					   non_trigger.eth_queue_stat);
13291cf2b1a9SYuval Mintz 	p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
13301cf2b1a9SYuval Mintz 	p_stats->tstats.address = 0;
13311cf2b1a9SYuval Mintz 	p_stats->tstats.len = 0;
13321cf2b1a9SYuval Mintz }
13331cf2b1a9SYuval Mintz 
13341408cc1fSYuval Mintz static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
133537bff2b9SYuval Mintz 				   struct qed_ptt *p_ptt,
13361408cc1fSYuval Mintz 				   struct qed_vf_info *vf)
133737bff2b9SYuval Mintz {
13381408cc1fSYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
13391408cc1fSYuval Mintz 	struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
13401408cc1fSYuval Mintz 	struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
13411408cc1fSYuval Mintz 	struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
13421cf2b1a9SYuval Mintz 	u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
13431408cc1fSYuval Mintz 	struct pf_vf_resc *resc = &resp->resc;
13441fe614d1SYuval Mintz 	int rc;
13451fe614d1SYuval Mintz 
13461fe614d1SYuval Mintz 	memset(resp, 0, sizeof(*resp));
13471408cc1fSYuval Mintz 
134805fafbfbSYuval Mintz 	/* Write the PF version so that VF would know which version
134905fafbfbSYuval Mintz 	 * is supported - might be later overriden. This guarantees that
135005fafbfbSYuval Mintz 	 * VF could recognize legacy PF based on lack of versions in reply.
135105fafbfbSYuval Mintz 	 */
135205fafbfbSYuval Mintz 	pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
135305fafbfbSYuval Mintz 	pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
135405fafbfbSYuval Mintz 
1355a044df83SYuval Mintz 	if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
1356a044df83SYuval Mintz 		DP_VERBOSE(p_hwfn,
1357a044df83SYuval Mintz 			   QED_MSG_IOV,
1358a044df83SYuval Mintz 			   "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1359a044df83SYuval Mintz 			   vf->abs_vf_id, vf->state);
1360a044df83SYuval Mintz 		goto out;
1361a044df83SYuval Mintz 	}
1362a044df83SYuval Mintz 
13631408cc1fSYuval Mintz 	/* Validate FW compatibility */
13641fe614d1SYuval Mintz 	if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1365a044df83SYuval Mintz 		if (req->vfdev_info.capabilities &
1366a044df83SYuval Mintz 		    VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1367a044df83SYuval Mintz 			struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1368a044df83SYuval Mintz 
1369a044df83SYuval Mintz 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1370a044df83SYuval Mintz 				   "VF[%d] is pre-fastpath HSI\n",
1371a044df83SYuval Mintz 				   vf->abs_vf_id);
1372a044df83SYuval Mintz 			p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1373a044df83SYuval Mintz 			p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1374a044df83SYuval Mintz 		} else {
13751408cc1fSYuval Mintz 			DP_INFO(p_hwfn,
13761fe614d1SYuval Mintz 				"VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
13771408cc1fSYuval Mintz 				vf->abs_vf_id,
13781fe614d1SYuval Mintz 				req->vfdev_info.eth_fp_hsi_major,
13791fe614d1SYuval Mintz 				req->vfdev_info.eth_fp_hsi_minor,
13801fe614d1SYuval Mintz 				ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
13811fe614d1SYuval Mintz 
13821408cc1fSYuval Mintz 			goto out;
13831408cc1fSYuval Mintz 		}
1384a044df83SYuval Mintz 	}
13851408cc1fSYuval Mintz 
13861408cc1fSYuval Mintz 	/* On 100g PFs, prevent old VFs from loading */
13871408cc1fSYuval Mintz 	if ((p_hwfn->cdev->num_hwfns > 1) &&
13881408cc1fSYuval Mintz 	    !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
13891408cc1fSYuval Mintz 		DP_INFO(p_hwfn,
13901408cc1fSYuval Mintz 			"VF[%d] is running an old driver that doesn't support 100g\n",
13911408cc1fSYuval Mintz 			vf->abs_vf_id);
13921408cc1fSYuval Mintz 		goto out;
13931408cc1fSYuval Mintz 	}
13941408cc1fSYuval Mintz 
13951fe614d1SYuval Mintz 	/* Store the acquire message */
13961fe614d1SYuval Mintz 	memcpy(&vf->acquire, req, sizeof(vf->acquire));
13971408cc1fSYuval Mintz 
13981408cc1fSYuval Mintz 	vf->opaque_fid = req->vfdev_info.opaque_fid;
13991408cc1fSYuval Mintz 
14001408cc1fSYuval Mintz 	vf->vf_bulletin = req->bulletin_addr;
14011408cc1fSYuval Mintz 	vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
14021408cc1fSYuval Mintz 			    vf->bulletin.size : req->bulletin_size;
14031408cc1fSYuval Mintz 
14041408cc1fSYuval Mintz 	/* fill in pfdev info */
14051408cc1fSYuval Mintz 	pfdev_info->chip_num = p_hwfn->cdev->chip_num;
14061408cc1fSYuval Mintz 	pfdev_info->db_size = 0;
14071408cc1fSYuval Mintz 	pfdev_info->indices_per_sb = PIS_PER_SB;
14081408cc1fSYuval Mintz 
14091408cc1fSYuval Mintz 	pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
14101408cc1fSYuval Mintz 				   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
14111408cc1fSYuval Mintz 	if (p_hwfn->cdev->num_hwfns > 1)
14121408cc1fSYuval Mintz 		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
14131408cc1fSYuval Mintz 
14141cf2b1a9SYuval Mintz 	qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
14151408cc1fSYuval Mintz 
14161408cc1fSYuval Mintz 	memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
14171408cc1fSYuval Mintz 
14181408cc1fSYuval Mintz 	pfdev_info->fw_major = FW_MAJOR_VERSION;
14191408cc1fSYuval Mintz 	pfdev_info->fw_minor = FW_MINOR_VERSION;
14201408cc1fSYuval Mintz 	pfdev_info->fw_rev = FW_REVISION_VERSION;
14211408cc1fSYuval Mintz 	pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1422a044df83SYuval Mintz 
1423a044df83SYuval Mintz 	/* Incorrect when legacy, but doesn't matter as legacy isn't reading
1424a044df83SYuval Mintz 	 * this field.
1425a044df83SYuval Mintz 	 */
14261a635e48SYuval Mintz 	pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
14271fe614d1SYuval Mintz 					 req->vfdev_info.eth_fp_hsi_minor);
14281408cc1fSYuval Mintz 	pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
14291408cc1fSYuval Mintz 	qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
14301408cc1fSYuval Mintz 
14311408cc1fSYuval Mintz 	pfdev_info->dev_type = p_hwfn->cdev->type;
14321408cc1fSYuval Mintz 	pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
14331408cc1fSYuval Mintz 
14341cf2b1a9SYuval Mintz 	/* Fill resources available to VF; Make sure there are enough to
14351cf2b1a9SYuval Mintz 	 * satisfy the VF's request.
14361408cc1fSYuval Mintz 	 */
14371cf2b1a9SYuval Mintz 	vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
14381cf2b1a9SYuval Mintz 						  &req->resc_request, resc);
14391cf2b1a9SYuval Mintz 	if (vfpf_status != PFVF_STATUS_SUCCESS)
14401cf2b1a9SYuval Mintz 		goto out;
14411408cc1fSYuval Mintz 
14421fe614d1SYuval Mintz 	/* Start the VF in FW */
14431fe614d1SYuval Mintz 	rc = qed_sp_vf_start(p_hwfn, vf);
14441fe614d1SYuval Mintz 	if (rc) {
14451fe614d1SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
14461fe614d1SYuval Mintz 		vfpf_status = PFVF_STATUS_FAILURE;
14471fe614d1SYuval Mintz 		goto out;
14481fe614d1SYuval Mintz 	}
14491fe614d1SYuval Mintz 
14501408cc1fSYuval Mintz 	/* Fill agreed size of bulletin board in response */
14511408cc1fSYuval Mintz 	resp->bulletin_size = vf->bulletin.size;
145236558c3dSYuval Mintz 	qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
14531408cc1fSYuval Mintz 
14541408cc1fSYuval Mintz 	DP_VERBOSE(p_hwfn,
14551408cc1fSYuval Mintz 		   QED_MSG_IOV,
14561408cc1fSYuval Mintz 		   "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
14571408cc1fSYuval Mintz 		   "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
14581408cc1fSYuval Mintz 		   vf->abs_vf_id,
14591408cc1fSYuval Mintz 		   resp->pfdev_info.chip_num,
14601408cc1fSYuval Mintz 		   resp->pfdev_info.db_size,
14611408cc1fSYuval Mintz 		   resp->pfdev_info.indices_per_sb,
14621408cc1fSYuval Mintz 		   resp->pfdev_info.capabilities,
14631408cc1fSYuval Mintz 		   resc->num_rxqs,
14641408cc1fSYuval Mintz 		   resc->num_txqs,
14651408cc1fSYuval Mintz 		   resc->num_sbs,
14661408cc1fSYuval Mintz 		   resc->num_mac_filters,
14671408cc1fSYuval Mintz 		   resc->num_vlan_filters);
14681408cc1fSYuval Mintz 	vf->state = VF_ACQUIRED;
14691408cc1fSYuval Mintz 
14701408cc1fSYuval Mintz 	/* Prepare Response */
14711408cc1fSYuval Mintz out:
14721408cc1fSYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
14731408cc1fSYuval Mintz 			     sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
147437bff2b9SYuval Mintz }
147537bff2b9SYuval Mintz 
14766ddc7608SYuval Mintz static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
14776ddc7608SYuval Mintz 				  struct qed_vf_info *p_vf, bool val)
14786ddc7608SYuval Mintz {
14796ddc7608SYuval Mintz 	struct qed_sp_vport_update_params params;
14806ddc7608SYuval Mintz 	int rc;
14816ddc7608SYuval Mintz 
14826ddc7608SYuval Mintz 	if (val == p_vf->spoof_chk) {
14836ddc7608SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
14846ddc7608SYuval Mintz 			   "Spoofchk value[%d] is already configured\n", val);
14856ddc7608SYuval Mintz 		return 0;
14866ddc7608SYuval Mintz 	}
14876ddc7608SYuval Mintz 
14886ddc7608SYuval Mintz 	memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
14896ddc7608SYuval Mintz 	params.opaque_fid = p_vf->opaque_fid;
14906ddc7608SYuval Mintz 	params.vport_id = p_vf->vport_id;
14916ddc7608SYuval Mintz 	params.update_anti_spoofing_en_flg = 1;
14926ddc7608SYuval Mintz 	params.anti_spoofing_en = val;
14936ddc7608SYuval Mintz 
14946ddc7608SYuval Mintz 	rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
1495cb1fa088SYuval Mintz 	if (!rc) {
14966ddc7608SYuval Mintz 		p_vf->spoof_chk = val;
14976ddc7608SYuval Mintz 		p_vf->req_spoofchk_val = p_vf->spoof_chk;
14986ddc7608SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
14996ddc7608SYuval Mintz 			   "Spoofchk val[%d] configured\n", val);
15006ddc7608SYuval Mintz 	} else {
15016ddc7608SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
15026ddc7608SYuval Mintz 			   "Spoofchk configuration[val:%d] failed for VF[%d]\n",
15036ddc7608SYuval Mintz 			   val, p_vf->relative_vf_id);
15046ddc7608SYuval Mintz 	}
15056ddc7608SYuval Mintz 
15066ddc7608SYuval Mintz 	return rc;
15076ddc7608SYuval Mintz }
15086ddc7608SYuval Mintz 
150908feecd7SYuval Mintz static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
151008feecd7SYuval Mintz 					    struct qed_vf_info *p_vf)
151108feecd7SYuval Mintz {
151208feecd7SYuval Mintz 	struct qed_filter_ucast filter;
151308feecd7SYuval Mintz 	int rc = 0;
151408feecd7SYuval Mintz 	int i;
151508feecd7SYuval Mintz 
151608feecd7SYuval Mintz 	memset(&filter, 0, sizeof(filter));
151708feecd7SYuval Mintz 	filter.is_rx_filter = 1;
151808feecd7SYuval Mintz 	filter.is_tx_filter = 1;
151908feecd7SYuval Mintz 	filter.vport_to_add_to = p_vf->vport_id;
152008feecd7SYuval Mintz 	filter.opcode = QED_FILTER_ADD;
152108feecd7SYuval Mintz 
152208feecd7SYuval Mintz 	/* Reconfigure vlans */
152308feecd7SYuval Mintz 	for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
152408feecd7SYuval Mintz 		if (!p_vf->shadow_config.vlans[i].used)
152508feecd7SYuval Mintz 			continue;
152608feecd7SYuval Mintz 
152708feecd7SYuval Mintz 		filter.type = QED_FILTER_VLAN;
152808feecd7SYuval Mintz 		filter.vlan = p_vf->shadow_config.vlans[i].vid;
15291a635e48SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
153008feecd7SYuval Mintz 			   "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
153108feecd7SYuval Mintz 			   filter.vlan, p_vf->relative_vf_id);
15321a635e48SYuval Mintz 		rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
15331a635e48SYuval Mintz 					     &filter, QED_SPQ_MODE_CB, NULL);
153408feecd7SYuval Mintz 		if (rc) {
153508feecd7SYuval Mintz 			DP_NOTICE(p_hwfn,
153608feecd7SYuval Mintz 				  "Failed to configure VLAN [%04x] to VF [%04x]\n",
153708feecd7SYuval Mintz 				  filter.vlan, p_vf->relative_vf_id);
153808feecd7SYuval Mintz 			break;
153908feecd7SYuval Mintz 		}
154008feecd7SYuval Mintz 	}
154108feecd7SYuval Mintz 
154208feecd7SYuval Mintz 	return rc;
154308feecd7SYuval Mintz }
154408feecd7SYuval Mintz 
154508feecd7SYuval Mintz static int
154608feecd7SYuval Mintz qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
154708feecd7SYuval Mintz 				   struct qed_vf_info *p_vf, u64 events)
154808feecd7SYuval Mintz {
154908feecd7SYuval Mintz 	int rc = 0;
155008feecd7SYuval Mintz 
15511a635e48SYuval Mintz 	if ((events & BIT(VLAN_ADDR_FORCED)) &&
155208feecd7SYuval Mintz 	    !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
155308feecd7SYuval Mintz 		rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
155408feecd7SYuval Mintz 
155508feecd7SYuval Mintz 	return rc;
155608feecd7SYuval Mintz }
155708feecd7SYuval Mintz 
155808feecd7SYuval Mintz static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
155908feecd7SYuval Mintz 					  struct qed_vf_info *p_vf, u64 events)
156008feecd7SYuval Mintz {
156108feecd7SYuval Mintz 	int rc = 0;
156208feecd7SYuval Mintz 	struct qed_filter_ucast filter;
156308feecd7SYuval Mintz 
156408feecd7SYuval Mintz 	if (!p_vf->vport_instance)
156508feecd7SYuval Mintz 		return -EINVAL;
156608feecd7SYuval Mintz 
15671a635e48SYuval Mintz 	if (events & BIT(MAC_ADDR_FORCED)) {
1568eff16960SYuval Mintz 		/* Since there's no way [currently] of removing the MAC,
1569eff16960SYuval Mintz 		 * we can always assume this means we need to force it.
1570eff16960SYuval Mintz 		 */
1571eff16960SYuval Mintz 		memset(&filter, 0, sizeof(filter));
1572eff16960SYuval Mintz 		filter.type = QED_FILTER_MAC;
1573eff16960SYuval Mintz 		filter.opcode = QED_FILTER_REPLACE;
1574eff16960SYuval Mintz 		filter.is_rx_filter = 1;
1575eff16960SYuval Mintz 		filter.is_tx_filter = 1;
1576eff16960SYuval Mintz 		filter.vport_to_add_to = p_vf->vport_id;
1577eff16960SYuval Mintz 		ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1578eff16960SYuval Mintz 
1579eff16960SYuval Mintz 		rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1580eff16960SYuval Mintz 					     &filter, QED_SPQ_MODE_CB, NULL);
1581eff16960SYuval Mintz 		if (rc) {
1582eff16960SYuval Mintz 			DP_NOTICE(p_hwfn,
1583eff16960SYuval Mintz 				  "PF failed to configure MAC for VF\n");
1584eff16960SYuval Mintz 			return rc;
1585eff16960SYuval Mintz 		}
1586eff16960SYuval Mintz 
1587eff16960SYuval Mintz 		p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1588eff16960SYuval Mintz 	}
1589eff16960SYuval Mintz 
15901a635e48SYuval Mintz 	if (events & BIT(VLAN_ADDR_FORCED)) {
159108feecd7SYuval Mintz 		struct qed_sp_vport_update_params vport_update;
159208feecd7SYuval Mintz 		u8 removal;
159308feecd7SYuval Mintz 		int i;
159408feecd7SYuval Mintz 
159508feecd7SYuval Mintz 		memset(&filter, 0, sizeof(filter));
159608feecd7SYuval Mintz 		filter.type = QED_FILTER_VLAN;
159708feecd7SYuval Mintz 		filter.is_rx_filter = 1;
159808feecd7SYuval Mintz 		filter.is_tx_filter = 1;
159908feecd7SYuval Mintz 		filter.vport_to_add_to = p_vf->vport_id;
160008feecd7SYuval Mintz 		filter.vlan = p_vf->bulletin.p_virt->pvid;
160108feecd7SYuval Mintz 		filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
160208feecd7SYuval Mintz 					      QED_FILTER_FLUSH;
160308feecd7SYuval Mintz 
160408feecd7SYuval Mintz 		/* Send the ramrod */
160508feecd7SYuval Mintz 		rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
160608feecd7SYuval Mintz 					     &filter, QED_SPQ_MODE_CB, NULL);
160708feecd7SYuval Mintz 		if (rc) {
160808feecd7SYuval Mintz 			DP_NOTICE(p_hwfn,
160908feecd7SYuval Mintz 				  "PF failed to configure VLAN for VF\n");
161008feecd7SYuval Mintz 			return rc;
161108feecd7SYuval Mintz 		}
161208feecd7SYuval Mintz 
161308feecd7SYuval Mintz 		/* Update the default-vlan & silent vlan stripping */
161408feecd7SYuval Mintz 		memset(&vport_update, 0, sizeof(vport_update));
161508feecd7SYuval Mintz 		vport_update.opaque_fid = p_vf->opaque_fid;
161608feecd7SYuval Mintz 		vport_update.vport_id = p_vf->vport_id;
161708feecd7SYuval Mintz 		vport_update.update_default_vlan_enable_flg = 1;
161808feecd7SYuval Mintz 		vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
161908feecd7SYuval Mintz 		vport_update.update_default_vlan_flg = 1;
162008feecd7SYuval Mintz 		vport_update.default_vlan = filter.vlan;
162108feecd7SYuval Mintz 
162208feecd7SYuval Mintz 		vport_update.update_inner_vlan_removal_flg = 1;
162308feecd7SYuval Mintz 		removal = filter.vlan ? 1
162408feecd7SYuval Mintz 				      : p_vf->shadow_config.inner_vlan_removal;
162508feecd7SYuval Mintz 		vport_update.inner_vlan_removal_flg = removal;
162608feecd7SYuval Mintz 		vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
162708feecd7SYuval Mintz 		rc = qed_sp_vport_update(p_hwfn,
162808feecd7SYuval Mintz 					 &vport_update,
162908feecd7SYuval Mintz 					 QED_SPQ_MODE_EBLOCK, NULL);
163008feecd7SYuval Mintz 		if (rc) {
163108feecd7SYuval Mintz 			DP_NOTICE(p_hwfn,
163208feecd7SYuval Mintz 				  "PF failed to configure VF vport for vlan\n");
163308feecd7SYuval Mintz 			return rc;
163408feecd7SYuval Mintz 		}
163508feecd7SYuval Mintz 
163608feecd7SYuval Mintz 		/* Update all the Rx queues */
163708feecd7SYuval Mintz 		for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
16383da7a37aSMintz, Yuval 			struct qed_queue_cid *p_cid;
163908feecd7SYuval Mintz 
16403da7a37aSMintz, Yuval 			p_cid = p_vf->vf_queues[i].p_rx_cid;
16413da7a37aSMintz, Yuval 			if (!p_cid)
164208feecd7SYuval Mintz 				continue;
164308feecd7SYuval Mintz 
16443da7a37aSMintz, Yuval 			rc = qed_sp_eth_rx_queues_update(p_hwfn,
16453da7a37aSMintz, Yuval 							 (void **)&p_cid,
164608feecd7SYuval Mintz 							 1, 0, 1,
164708feecd7SYuval Mintz 							 QED_SPQ_MODE_EBLOCK,
164808feecd7SYuval Mintz 							 NULL);
164908feecd7SYuval Mintz 			if (rc) {
165008feecd7SYuval Mintz 				DP_NOTICE(p_hwfn,
165108feecd7SYuval Mintz 					  "Failed to send Rx update fo queue[0x%04x]\n",
16523da7a37aSMintz, Yuval 					  p_cid->rel.queue_id);
165308feecd7SYuval Mintz 				return rc;
165408feecd7SYuval Mintz 			}
165508feecd7SYuval Mintz 		}
165608feecd7SYuval Mintz 
165708feecd7SYuval Mintz 		if (filter.vlan)
165808feecd7SYuval Mintz 			p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
165908feecd7SYuval Mintz 		else
16601a635e48SYuval Mintz 			p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
166108feecd7SYuval Mintz 	}
166208feecd7SYuval Mintz 
166308feecd7SYuval Mintz 	/* If forced features are terminated, we need to configure the shadow
166408feecd7SYuval Mintz 	 * configuration back again.
166508feecd7SYuval Mintz 	 */
166608feecd7SYuval Mintz 	if (events)
166708feecd7SYuval Mintz 		qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
166808feecd7SYuval Mintz 
166908feecd7SYuval Mintz 	return rc;
167008feecd7SYuval Mintz }
167108feecd7SYuval Mintz 
1672dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1673dacd88d6SYuval Mintz 				       struct qed_ptt *p_ptt,
1674dacd88d6SYuval Mintz 				       struct qed_vf_info *vf)
1675dacd88d6SYuval Mintz {
1676dacd88d6SYuval Mintz 	struct qed_sp_vport_start_params params = { 0 };
1677dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1678dacd88d6SYuval Mintz 	struct vfpf_vport_start_tlv *start;
1679dacd88d6SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
1680dacd88d6SYuval Mintz 	struct qed_vf_info *vf_info;
168108feecd7SYuval Mintz 	u64 *p_bitmap;
1682dacd88d6SYuval Mintz 	int sb_id;
1683dacd88d6SYuval Mintz 	int rc;
1684dacd88d6SYuval Mintz 
1685dacd88d6SYuval Mintz 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1686dacd88d6SYuval Mintz 	if (!vf_info) {
1687dacd88d6SYuval Mintz 		DP_NOTICE(p_hwfn->cdev,
1688dacd88d6SYuval Mintz 			  "Failed to get VF info, invalid vfid [%d]\n",
1689dacd88d6SYuval Mintz 			  vf->relative_vf_id);
1690dacd88d6SYuval Mintz 		return;
1691dacd88d6SYuval Mintz 	}
1692dacd88d6SYuval Mintz 
1693dacd88d6SYuval Mintz 	vf->state = VF_ENABLED;
1694dacd88d6SYuval Mintz 	start = &mbx->req_virt->start_vport;
1695dacd88d6SYuval Mintz 
1696dacd88d6SYuval Mintz 	/* Initialize Status block in CAU */
1697dacd88d6SYuval Mintz 	for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1698dacd88d6SYuval Mintz 		if (!start->sb_addr[sb_id]) {
1699dacd88d6SYuval Mintz 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1700dacd88d6SYuval Mintz 				   "VF[%d] did not fill the address of SB %d\n",
1701dacd88d6SYuval Mintz 				   vf->relative_vf_id, sb_id);
1702dacd88d6SYuval Mintz 			break;
1703dacd88d6SYuval Mintz 		}
1704dacd88d6SYuval Mintz 
1705dacd88d6SYuval Mintz 		qed_int_cau_conf_sb(p_hwfn, p_ptt,
1706dacd88d6SYuval Mintz 				    start->sb_addr[sb_id],
17071a635e48SYuval Mintz 				    vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
1708dacd88d6SYuval Mintz 	}
1709dacd88d6SYuval Mintz 	qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1710dacd88d6SYuval Mintz 
1711dacd88d6SYuval Mintz 	vf->mtu = start->mtu;
171208feecd7SYuval Mintz 	vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
171308feecd7SYuval Mintz 
171408feecd7SYuval Mintz 	/* Take into consideration configuration forced by hypervisor;
171508feecd7SYuval Mintz 	 * If none is configured, use the supplied VF values [for old
171608feecd7SYuval Mintz 	 * vfs that would still be fine, since they passed '0' as padding].
171708feecd7SYuval Mintz 	 */
171808feecd7SYuval Mintz 	p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
17191a635e48SYuval Mintz 	if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
172008feecd7SYuval Mintz 		u8 vf_req = start->only_untagged;
172108feecd7SYuval Mintz 
172208feecd7SYuval Mintz 		vf_info->bulletin.p_virt->default_only_untagged = vf_req;
172308feecd7SYuval Mintz 		*p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
172408feecd7SYuval Mintz 	}
1725dacd88d6SYuval Mintz 
1726dacd88d6SYuval Mintz 	params.tpa_mode = start->tpa_mode;
1727dacd88d6SYuval Mintz 	params.remove_inner_vlan = start->inner_vlan_removal;
1728831bfb0eSYuval Mintz 	params.tx_switching = true;
1729dacd88d6SYuval Mintz 
173008feecd7SYuval Mintz 	params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1731dacd88d6SYuval Mintz 	params.drop_ttl0 = false;
1732dacd88d6SYuval Mintz 	params.concrete_fid = vf->concrete_fid;
1733dacd88d6SYuval Mintz 	params.opaque_fid = vf->opaque_fid;
1734dacd88d6SYuval Mintz 	params.vport_id = vf->vport_id;
1735dacd88d6SYuval Mintz 	params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1736dacd88d6SYuval Mintz 	params.mtu = vf->mtu;
173711a85d75SYuval Mintz 	params.check_mac = true;
1738dacd88d6SYuval Mintz 
1739dacd88d6SYuval Mintz 	rc = qed_sp_eth_vport_start(p_hwfn, &params);
17401a635e48SYuval Mintz 	if (rc) {
1741dacd88d6SYuval Mintz 		DP_ERR(p_hwfn,
1742dacd88d6SYuval Mintz 		       "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1743dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
1744dacd88d6SYuval Mintz 	} else {
1745dacd88d6SYuval Mintz 		vf->vport_instance++;
174608feecd7SYuval Mintz 
174708feecd7SYuval Mintz 		/* Force configuration if needed on the newly opened vport */
174808feecd7SYuval Mintz 		qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
17496ddc7608SYuval Mintz 
17506ddc7608SYuval Mintz 		__qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1751dacd88d6SYuval Mintz 	}
1752dacd88d6SYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1753dacd88d6SYuval Mintz 			     sizeof(struct pfvf_def_resp_tlv), status);
1754dacd88d6SYuval Mintz }
1755dacd88d6SYuval Mintz 
1756dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1757dacd88d6SYuval Mintz 				      struct qed_ptt *p_ptt,
1758dacd88d6SYuval Mintz 				      struct qed_vf_info *vf)
1759dacd88d6SYuval Mintz {
1760dacd88d6SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
1761dacd88d6SYuval Mintz 	int rc;
1762dacd88d6SYuval Mintz 
1763dacd88d6SYuval Mintz 	vf->vport_instance--;
17646ddc7608SYuval Mintz 	vf->spoof_chk = false;
1765dacd88d6SYuval Mintz 
1766dacd88d6SYuval Mintz 	rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
17671a635e48SYuval Mintz 	if (rc) {
1768dacd88d6SYuval Mintz 		DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1769dacd88d6SYuval Mintz 		       rc);
1770dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
1771dacd88d6SYuval Mintz 	}
1772dacd88d6SYuval Mintz 
177308feecd7SYuval Mintz 	/* Forget the configuration on the vport */
177408feecd7SYuval Mintz 	vf->configured_features = 0;
177508feecd7SYuval Mintz 	memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
177608feecd7SYuval Mintz 
1777dacd88d6SYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
1778dacd88d6SYuval Mintz 			     sizeof(struct pfvf_def_resp_tlv), status);
1779dacd88d6SYuval Mintz }
1780dacd88d6SYuval Mintz 
1781dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
1782dacd88d6SYuval Mintz 					  struct qed_ptt *p_ptt,
1783a044df83SYuval Mintz 					  struct qed_vf_info *vf,
1784a044df83SYuval Mintz 					  u8 status, bool b_legacy)
1785dacd88d6SYuval Mintz {
1786dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1787dacd88d6SYuval Mintz 	struct pfvf_start_queue_resp_tlv *p_tlv;
1788dacd88d6SYuval Mintz 	struct vfpf_start_rxq_tlv *req;
1789a044df83SYuval Mintz 	u16 length;
1790dacd88d6SYuval Mintz 
1791dacd88d6SYuval Mintz 	mbx->offset = (u8 *)mbx->reply_virt;
1792dacd88d6SYuval Mintz 
1793a044df83SYuval Mintz 	/* Taking a bigger struct instead of adding a TLV to list was a
1794a044df83SYuval Mintz 	 * mistake, but one which we're now stuck with, as some older
1795a044df83SYuval Mintz 	 * clients assume the size of the previous response.
1796a044df83SYuval Mintz 	 */
1797a044df83SYuval Mintz 	if (!b_legacy)
1798a044df83SYuval Mintz 		length = sizeof(*p_tlv);
1799a044df83SYuval Mintz 	else
1800a044df83SYuval Mintz 		length = sizeof(struct pfvf_def_resp_tlv);
1801a044df83SYuval Mintz 
1802dacd88d6SYuval Mintz 	p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
1803a044df83SYuval Mintz 			    length);
1804dacd88d6SYuval Mintz 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1805dacd88d6SYuval Mintz 		    sizeof(struct channel_list_end_tlv));
1806dacd88d6SYuval Mintz 
1807dacd88d6SYuval Mintz 	/* Update the TLV with the response */
1808a044df83SYuval Mintz 	if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
1809dacd88d6SYuval Mintz 		req = &mbx->req_virt->start_rxq;
1810351a4dedSYuval Mintz 		p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
1811351a4dedSYuval Mintz 				offsetof(struct mstorm_vf_zone,
1812351a4dedSYuval Mintz 					 non_trigger.eth_rx_queue_producers) +
1813351a4dedSYuval Mintz 				sizeof(struct eth_rx_prod_data) * req->rx_qid;
1814dacd88d6SYuval Mintz 	}
1815dacd88d6SYuval Mintz 
1816a044df83SYuval Mintz 	qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
1817dacd88d6SYuval Mintz }
1818dacd88d6SYuval Mintz 
1819dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
1820dacd88d6SYuval Mintz 				     struct qed_ptt *p_ptt,
1821dacd88d6SYuval Mintz 				     struct qed_vf_info *vf)
1822dacd88d6SYuval Mintz {
1823dacd88d6SYuval Mintz 	struct qed_queue_start_common_params params;
1824dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
182541086467SYuval Mintz 	u8 status = PFVF_STATUS_NO_RESOURCE;
18263da7a37aSMintz, Yuval 	struct qed_vf_q_info *p_queue;
1827dacd88d6SYuval Mintz 	struct vfpf_start_rxq_tlv *req;
1828a044df83SYuval Mintz 	bool b_legacy_vf = false;
1829dacd88d6SYuval Mintz 	int rc;
1830dacd88d6SYuval Mintz 
1831dacd88d6SYuval Mintz 	req = &mbx->req_virt->start_rxq;
183241086467SYuval Mintz 
183341086467SYuval Mintz 	if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
183441086467SYuval Mintz 	    !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
183541086467SYuval Mintz 		goto out;
183641086467SYuval Mintz 
18373da7a37aSMintz, Yuval 	/* Acquire a new queue-cid */
18383da7a37aSMintz, Yuval 	p_queue = &vf->vf_queues[req->rx_qid];
18393da7a37aSMintz, Yuval 
18403da7a37aSMintz, Yuval 	memset(&params, 0, sizeof(params));
18413da7a37aSMintz, Yuval 	params.queue_id = p_queue->fw_rx_qid;
1842dacd88d6SYuval Mintz 	params.vport_id = vf->vport_id;
18433da7a37aSMintz, Yuval 	params.stats_id = vf->abs_vf_id + 0x10;
1844dacd88d6SYuval Mintz 	params.sb = req->hw_sb;
1845dacd88d6SYuval Mintz 	params.sb_idx = req->sb_index;
1846dacd88d6SYuval Mintz 
18473da7a37aSMintz, Yuval 	p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn,
18483da7a37aSMintz, Yuval 						  vf->opaque_fid,
18493da7a37aSMintz, Yuval 						  p_queue->fw_cid,
18503da7a37aSMintz, Yuval 						  req->rx_qid, &params);
18513da7a37aSMintz, Yuval 	if (!p_queue->p_rx_cid)
18523da7a37aSMintz, Yuval 		goto out;
18533da7a37aSMintz, Yuval 
1854a044df83SYuval Mintz 	/* Legacy VFs have their Producers in a different location, which they
1855a044df83SYuval Mintz 	 * calculate on their own and clean the producer prior to this.
1856a044df83SYuval Mintz 	 */
1857a044df83SYuval Mintz 	if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1858a044df83SYuval Mintz 	    ETH_HSI_VER_NO_PKT_LEN_TUNN) {
1859a044df83SYuval Mintz 		b_legacy_vf = true;
1860a044df83SYuval Mintz 	} else {
1861a044df83SYuval Mintz 		REG_WR(p_hwfn,
1862a044df83SYuval Mintz 		       GTT_BAR0_MAP_REG_MSDM_RAM +
1863a044df83SYuval Mintz 		       MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
1864a044df83SYuval Mintz 		       0);
1865a044df83SYuval Mintz 	}
18663da7a37aSMintz, Yuval 	p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
1867a044df83SYuval Mintz 
18683da7a37aSMintz, Yuval 	rc = qed_eth_rxq_start_ramrod(p_hwfn,
18693da7a37aSMintz, Yuval 				      p_queue->p_rx_cid,
1870dacd88d6SYuval Mintz 				      req->bd_max_bytes,
1871dacd88d6SYuval Mintz 				      req->rxq_addr,
18723da7a37aSMintz, Yuval 				      req->cqe_pbl_addr, req->cqe_pbl_size);
1873dacd88d6SYuval Mintz 	if (rc) {
1874dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
18753da7a37aSMintz, Yuval 		qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
18763da7a37aSMintz, Yuval 		p_queue->p_rx_cid = NULL;
1877dacd88d6SYuval Mintz 	} else {
187841086467SYuval Mintz 		status = PFVF_STATUS_SUCCESS;
1879dacd88d6SYuval Mintz 		vf->num_active_rxqs++;
1880dacd88d6SYuval Mintz 	}
1881dacd88d6SYuval Mintz 
188241086467SYuval Mintz out:
1883a044df83SYuval Mintz 	qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
1884dacd88d6SYuval Mintz }
1885dacd88d6SYuval Mintz 
18865040acf5SYuval Mintz static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
18875040acf5SYuval Mintz 					  struct qed_ptt *p_ptt,
18885040acf5SYuval Mintz 					  struct qed_vf_info *p_vf, u8 status)
18895040acf5SYuval Mintz {
18905040acf5SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
18915040acf5SYuval Mintz 	struct pfvf_start_queue_resp_tlv *p_tlv;
1892a044df83SYuval Mintz 	bool b_legacy = false;
1893a044df83SYuval Mintz 	u16 length;
18945040acf5SYuval Mintz 
18955040acf5SYuval Mintz 	mbx->offset = (u8 *)mbx->reply_virt;
18965040acf5SYuval Mintz 
1897a044df83SYuval Mintz 	/* Taking a bigger struct instead of adding a TLV to list was a
1898a044df83SYuval Mintz 	 * mistake, but one which we're now stuck with, as some older
1899a044df83SYuval Mintz 	 * clients assume the size of the previous response.
1900a044df83SYuval Mintz 	 */
1901a044df83SYuval Mintz 	if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1902a044df83SYuval Mintz 	    ETH_HSI_VER_NO_PKT_LEN_TUNN)
1903a044df83SYuval Mintz 		b_legacy = true;
1904a044df83SYuval Mintz 
1905a044df83SYuval Mintz 	if (!b_legacy)
1906a044df83SYuval Mintz 		length = sizeof(*p_tlv);
1907a044df83SYuval Mintz 	else
1908a044df83SYuval Mintz 		length = sizeof(struct pfvf_def_resp_tlv);
1909a044df83SYuval Mintz 
19105040acf5SYuval Mintz 	p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
1911a044df83SYuval Mintz 			    length);
19125040acf5SYuval Mintz 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
19135040acf5SYuval Mintz 		    sizeof(struct channel_list_end_tlv));
19145040acf5SYuval Mintz 
19155040acf5SYuval Mintz 	/* Update the TLV with the response */
1916a044df83SYuval Mintz 	if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
19175040acf5SYuval Mintz 		u16 qid = mbx->req_virt->start_txq.tx_qid;
19185040acf5SYuval Mintz 
191951ff1725SRam Amrani 		p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid,
19205040acf5SYuval Mintz 					       DQ_DEMS_LEGACY);
19215040acf5SYuval Mintz 	}
19225040acf5SYuval Mintz 
1923a044df83SYuval Mintz 	qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
19245040acf5SYuval Mintz }
19255040acf5SYuval Mintz 
1926dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
1927dacd88d6SYuval Mintz 				     struct qed_ptt *p_ptt,
1928dacd88d6SYuval Mintz 				     struct qed_vf_info *vf)
1929dacd88d6SYuval Mintz {
1930dacd88d6SYuval Mintz 	struct qed_queue_start_common_params params;
1931dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
193241086467SYuval Mintz 	u8 status = PFVF_STATUS_NO_RESOURCE;
1933dacd88d6SYuval Mintz 	union qed_qm_pq_params pq_params;
1934dacd88d6SYuval Mintz 	struct vfpf_start_txq_tlv *req;
19353da7a37aSMintz, Yuval 	struct qed_vf_q_info *p_queue;
1936dacd88d6SYuval Mintz 	int rc;
19373da7a37aSMintz, Yuval 	u16 pq;
1938dacd88d6SYuval Mintz 
1939dacd88d6SYuval Mintz 	/* Prepare the parameters which would choose the right PQ */
1940dacd88d6SYuval Mintz 	memset(&pq_params, 0, sizeof(pq_params));
1941dacd88d6SYuval Mintz 	pq_params.eth.is_vf = 1;
1942dacd88d6SYuval Mintz 	pq_params.eth.vf_id = vf->relative_vf_id;
1943dacd88d6SYuval Mintz 
1944dacd88d6SYuval Mintz 	memset(&params, 0, sizeof(params));
1945dacd88d6SYuval Mintz 	req = &mbx->req_virt->start_txq;
194641086467SYuval Mintz 
194741086467SYuval Mintz 	if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
194841086467SYuval Mintz 	    !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
194941086467SYuval Mintz 		goto out;
195041086467SYuval Mintz 
19513da7a37aSMintz, Yuval 	/* Acquire a new queue-cid */
19523da7a37aSMintz, Yuval 	p_queue = &vf->vf_queues[req->tx_qid];
19533da7a37aSMintz, Yuval 
19543da7a37aSMintz, Yuval 	params.queue_id = p_queue->fw_tx_qid;
1955dacd88d6SYuval Mintz 	params.vport_id = vf->vport_id;
19563da7a37aSMintz, Yuval 	params.stats_id = vf->abs_vf_id + 0x10;
1957dacd88d6SYuval Mintz 	params.sb = req->hw_sb;
1958dacd88d6SYuval Mintz 	params.sb_idx = req->sb_index;
1959dacd88d6SYuval Mintz 
19603da7a37aSMintz, Yuval 	p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn,
1961dacd88d6SYuval Mintz 						  vf->opaque_fid,
19623da7a37aSMintz, Yuval 						  p_queue->fw_cid,
19633da7a37aSMintz, Yuval 						  req->tx_qid, &params);
19643da7a37aSMintz, Yuval 	if (!p_queue->p_tx_cid)
19653da7a37aSMintz, Yuval 		goto out;
1966dacd88d6SYuval Mintz 
19673da7a37aSMintz, Yuval 	pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, &pq_params);
19683da7a37aSMintz, Yuval 	rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
19693da7a37aSMintz, Yuval 				      req->pbl_addr, req->pbl_size, pq);
197041086467SYuval Mintz 	if (rc) {
1971dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
19723da7a37aSMintz, Yuval 		qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
19733da7a37aSMintz, Yuval 		p_queue->p_tx_cid = NULL;
197441086467SYuval Mintz 	} else {
197541086467SYuval Mintz 		status = PFVF_STATUS_SUCCESS;
197641086467SYuval Mintz 	}
1977dacd88d6SYuval Mintz 
197841086467SYuval Mintz out:
19795040acf5SYuval Mintz 	qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
1980dacd88d6SYuval Mintz }
1981dacd88d6SYuval Mintz 
1982dacd88d6SYuval Mintz static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
1983dacd88d6SYuval Mintz 				struct qed_vf_info *vf,
1984dacd88d6SYuval Mintz 				u16 rxq_id, u8 num_rxqs, bool cqe_completion)
1985dacd88d6SYuval Mintz {
19863da7a37aSMintz, Yuval 	struct qed_vf_q_info *p_queue;
1987dacd88d6SYuval Mintz 	int rc = 0;
1988dacd88d6SYuval Mintz 	int qid;
1989dacd88d6SYuval Mintz 
1990dacd88d6SYuval Mintz 	if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
1991dacd88d6SYuval Mintz 		return -EINVAL;
1992dacd88d6SYuval Mintz 
1993dacd88d6SYuval Mintz 	for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
19943da7a37aSMintz, Yuval 		p_queue = &vf->vf_queues[qid];
1995dacd88d6SYuval Mintz 
19963da7a37aSMintz, Yuval 		if (!p_queue->p_rx_cid)
19973da7a37aSMintz, Yuval 			continue;
19983da7a37aSMintz, Yuval 
19993da7a37aSMintz, Yuval 		rc = qed_eth_rx_queue_stop(p_hwfn,
20003da7a37aSMintz, Yuval 					   p_queue->p_rx_cid,
20013da7a37aSMintz, Yuval 					   false, cqe_completion);
2002dacd88d6SYuval Mintz 		if (rc)
2003dacd88d6SYuval Mintz 			return rc;
20043da7a37aSMintz, Yuval 
20053da7a37aSMintz, Yuval 		vf->vf_queues[qid].p_rx_cid = NULL;
2006dacd88d6SYuval Mintz 		vf->num_active_rxqs--;
2007dacd88d6SYuval Mintz 	}
2008dacd88d6SYuval Mintz 
2009dacd88d6SYuval Mintz 	return rc;
2010dacd88d6SYuval Mintz }
2011dacd88d6SYuval Mintz 
2012dacd88d6SYuval Mintz static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
2013dacd88d6SYuval Mintz 				struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
2014dacd88d6SYuval Mintz {
2015dacd88d6SYuval Mintz 	int rc = 0;
20163da7a37aSMintz, Yuval 	struct qed_vf_q_info *p_queue;
2017dacd88d6SYuval Mintz 	int qid;
2018dacd88d6SYuval Mintz 
2019dacd88d6SYuval Mintz 	if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
2020dacd88d6SYuval Mintz 		return -EINVAL;
2021dacd88d6SYuval Mintz 
2022dacd88d6SYuval Mintz 	for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
20233da7a37aSMintz, Yuval 		p_queue = &vf->vf_queues[qid];
20243da7a37aSMintz, Yuval 		if (!p_queue->p_tx_cid)
20253da7a37aSMintz, Yuval 			continue;
2026dacd88d6SYuval Mintz 
20273da7a37aSMintz, Yuval 		rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
2028dacd88d6SYuval Mintz 		if (rc)
2029dacd88d6SYuval Mintz 			return rc;
20303da7a37aSMintz, Yuval 
20313da7a37aSMintz, Yuval 		p_queue->p_tx_cid = NULL;
2032dacd88d6SYuval Mintz 	}
20333da7a37aSMintz, Yuval 
2034dacd88d6SYuval Mintz 	return rc;
2035dacd88d6SYuval Mintz }
2036dacd88d6SYuval Mintz 
2037dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
2038dacd88d6SYuval Mintz 				     struct qed_ptt *p_ptt,
2039dacd88d6SYuval Mintz 				     struct qed_vf_info *vf)
2040dacd88d6SYuval Mintz {
2041dacd88d6SYuval Mintz 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2042dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2043dacd88d6SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
2044dacd88d6SYuval Mintz 	struct vfpf_stop_rxqs_tlv *req;
2045dacd88d6SYuval Mintz 	int rc;
2046dacd88d6SYuval Mintz 
2047dacd88d6SYuval Mintz 	/* We give the option of starting from qid != 0, in this case we
2048dacd88d6SYuval Mintz 	 * need to make sure that qid + num_qs doesn't exceed the actual
2049dacd88d6SYuval Mintz 	 * amount of queues that exist.
2050dacd88d6SYuval Mintz 	 */
2051dacd88d6SYuval Mintz 	req = &mbx->req_virt->stop_rxqs;
2052dacd88d6SYuval Mintz 	rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2053dacd88d6SYuval Mintz 				  req->num_rxqs, req->cqe_completion);
2054dacd88d6SYuval Mintz 	if (rc)
2055dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
2056dacd88d6SYuval Mintz 
2057dacd88d6SYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2058dacd88d6SYuval Mintz 			     length, status);
2059dacd88d6SYuval Mintz }
2060dacd88d6SYuval Mintz 
2061dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
2062dacd88d6SYuval Mintz 				     struct qed_ptt *p_ptt,
2063dacd88d6SYuval Mintz 				     struct qed_vf_info *vf)
2064dacd88d6SYuval Mintz {
2065dacd88d6SYuval Mintz 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2066dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2067dacd88d6SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
2068dacd88d6SYuval Mintz 	struct vfpf_stop_txqs_tlv *req;
2069dacd88d6SYuval Mintz 	int rc;
2070dacd88d6SYuval Mintz 
2071dacd88d6SYuval Mintz 	/* We give the option of starting from qid != 0, in this case we
2072dacd88d6SYuval Mintz 	 * need to make sure that qid + num_qs doesn't exceed the actual
2073dacd88d6SYuval Mintz 	 * amount of queues that exist.
2074dacd88d6SYuval Mintz 	 */
2075dacd88d6SYuval Mintz 	req = &mbx->req_virt->stop_txqs;
2076dacd88d6SYuval Mintz 	rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
2077dacd88d6SYuval Mintz 	if (rc)
2078dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
2079dacd88d6SYuval Mintz 
2080dacd88d6SYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2081dacd88d6SYuval Mintz 			     length, status);
2082dacd88d6SYuval Mintz }
2083dacd88d6SYuval Mintz 
208417b235c1SYuval Mintz static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
208517b235c1SYuval Mintz 				       struct qed_ptt *p_ptt,
208617b235c1SYuval Mintz 				       struct qed_vf_info *vf)
208717b235c1SYuval Mintz {
20883da7a37aSMintz, Yuval 	struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
208917b235c1SYuval Mintz 	u16 length = sizeof(struct pfvf_def_resp_tlv);
209017b235c1SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
209117b235c1SYuval Mintz 	struct vfpf_update_rxq_tlv *req;
20923da7a37aSMintz, Yuval 	u8 status = PFVF_STATUS_FAILURE;
209317b235c1SYuval Mintz 	u8 complete_event_flg;
209417b235c1SYuval Mintz 	u8 complete_cqe_flg;
209517b235c1SYuval Mintz 	u16 qid;
209617b235c1SYuval Mintz 	int rc;
209717b235c1SYuval Mintz 	u8 i;
209817b235c1SYuval Mintz 
209917b235c1SYuval Mintz 	req = &mbx->req_virt->update_rxq;
210017b235c1SYuval Mintz 	complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
210117b235c1SYuval Mintz 	complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
210217b235c1SYuval Mintz 
21033da7a37aSMintz, Yuval 	/* Validate inputs */
21043da7a37aSMintz, Yuval 	if (req->num_rxqs + req->rx_qid > QED_MAX_VF_CHAINS_PER_PF ||
21053da7a37aSMintz, Yuval 	    !qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
21063da7a37aSMintz, Yuval 		DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
21073da7a37aSMintz, Yuval 			vf->relative_vf_id, req->rx_qid, req->num_rxqs);
21083da7a37aSMintz, Yuval 		goto out;
210917b235c1SYuval Mintz 	}
211017b235c1SYuval Mintz 
21113da7a37aSMintz, Yuval 	for (i = 0; i < req->num_rxqs; i++) {
21123da7a37aSMintz, Yuval 		qid = req->rx_qid + i;
21133da7a37aSMintz, Yuval 		if (!vf->vf_queues[qid].p_rx_cid) {
21143da7a37aSMintz, Yuval 			DP_INFO(p_hwfn,
21153da7a37aSMintz, Yuval 				"VF[%d] rx_qid = %d isn`t active!\n",
21163da7a37aSMintz, Yuval 				vf->relative_vf_id, qid);
21173da7a37aSMintz, Yuval 			goto out;
21183da7a37aSMintz, Yuval 		}
21193da7a37aSMintz, Yuval 
21203da7a37aSMintz, Yuval 		handlers[i] = vf->vf_queues[qid].p_rx_cid;
21213da7a37aSMintz, Yuval 	}
21223da7a37aSMintz, Yuval 
21233da7a37aSMintz, Yuval 	rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
21243da7a37aSMintz, Yuval 					 req->num_rxqs,
212517b235c1SYuval Mintz 					 complete_cqe_flg,
212617b235c1SYuval Mintz 					 complete_event_flg,
212717b235c1SYuval Mintz 					 QED_SPQ_MODE_EBLOCK, NULL);
21283da7a37aSMintz, Yuval 	if (rc)
21293da7a37aSMintz, Yuval 		goto out;
213017b235c1SYuval Mintz 
21313da7a37aSMintz, Yuval 	status = PFVF_STATUS_SUCCESS;
21323da7a37aSMintz, Yuval out:
213317b235c1SYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
213417b235c1SYuval Mintz 			     length, status);
213517b235c1SYuval Mintz }
213617b235c1SYuval Mintz 
2137dacd88d6SYuval Mintz void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
2138dacd88d6SYuval Mintz 			       void *p_tlvs_list, u16 req_type)
2139dacd88d6SYuval Mintz {
2140dacd88d6SYuval Mintz 	struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2141dacd88d6SYuval Mintz 	int len = 0;
2142dacd88d6SYuval Mintz 
2143dacd88d6SYuval Mintz 	do {
2144dacd88d6SYuval Mintz 		if (!p_tlv->length) {
2145dacd88d6SYuval Mintz 			DP_NOTICE(p_hwfn, "Zero length TLV found\n");
2146dacd88d6SYuval Mintz 			return NULL;
2147dacd88d6SYuval Mintz 		}
2148dacd88d6SYuval Mintz 
2149dacd88d6SYuval Mintz 		if (p_tlv->type == req_type) {
2150dacd88d6SYuval Mintz 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2151dacd88d6SYuval Mintz 				   "Extended tlv type %d, length %d found\n",
2152dacd88d6SYuval Mintz 				   p_tlv->type, p_tlv->length);
2153dacd88d6SYuval Mintz 			return p_tlv;
2154dacd88d6SYuval Mintz 		}
2155dacd88d6SYuval Mintz 
2156dacd88d6SYuval Mintz 		len += p_tlv->length;
2157dacd88d6SYuval Mintz 		p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2158dacd88d6SYuval Mintz 
2159dacd88d6SYuval Mintz 		if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2160dacd88d6SYuval Mintz 			DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2161dacd88d6SYuval Mintz 			return NULL;
2162dacd88d6SYuval Mintz 		}
2163dacd88d6SYuval Mintz 	} while (p_tlv->type != CHANNEL_TLV_LIST_END);
2164dacd88d6SYuval Mintz 
2165dacd88d6SYuval Mintz 	return NULL;
2166dacd88d6SYuval Mintz }
2167dacd88d6SYuval Mintz 
2168dacd88d6SYuval Mintz static void
2169dacd88d6SYuval Mintz qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2170dacd88d6SYuval Mintz 			    struct qed_sp_vport_update_params *p_data,
2171dacd88d6SYuval Mintz 			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2172dacd88d6SYuval Mintz {
2173dacd88d6SYuval Mintz 	struct vfpf_vport_update_activate_tlv *p_act_tlv;
2174dacd88d6SYuval Mintz 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2175dacd88d6SYuval Mintz 
2176dacd88d6SYuval Mintz 	p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2177dacd88d6SYuval Mintz 		    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2178dacd88d6SYuval Mintz 	if (!p_act_tlv)
2179dacd88d6SYuval Mintz 		return;
2180dacd88d6SYuval Mintz 
2181dacd88d6SYuval Mintz 	p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2182dacd88d6SYuval Mintz 	p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2183dacd88d6SYuval Mintz 	p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2184dacd88d6SYuval Mintz 	p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2185dacd88d6SYuval Mintz 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2186dacd88d6SYuval Mintz }
2187dacd88d6SYuval Mintz 
2188dacd88d6SYuval Mintz static void
218917b235c1SYuval Mintz qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
219017b235c1SYuval Mintz 			     struct qed_sp_vport_update_params *p_data,
219117b235c1SYuval Mintz 			     struct qed_vf_info *p_vf,
219217b235c1SYuval Mintz 			     struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
219317b235c1SYuval Mintz {
219417b235c1SYuval Mintz 	struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
219517b235c1SYuval Mintz 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
219617b235c1SYuval Mintz 
219717b235c1SYuval Mintz 	p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
219817b235c1SYuval Mintz 		     qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
219917b235c1SYuval Mintz 	if (!p_vlan_tlv)
220017b235c1SYuval Mintz 		return;
220117b235c1SYuval Mintz 
220208feecd7SYuval Mintz 	p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
220308feecd7SYuval Mintz 
220408feecd7SYuval Mintz 	/* Ignore the VF request if we're forcing a vlan */
22051a635e48SYuval Mintz 	if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
220617b235c1SYuval Mintz 		p_data->update_inner_vlan_removal_flg = 1;
220717b235c1SYuval Mintz 		p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
220808feecd7SYuval Mintz 	}
220917b235c1SYuval Mintz 
221017b235c1SYuval Mintz 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
221117b235c1SYuval Mintz }
221217b235c1SYuval Mintz 
221317b235c1SYuval Mintz static void
221417b235c1SYuval Mintz qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
221517b235c1SYuval Mintz 			    struct qed_sp_vport_update_params *p_data,
221617b235c1SYuval Mintz 			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
221717b235c1SYuval Mintz {
221817b235c1SYuval Mintz 	struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
221917b235c1SYuval Mintz 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
222017b235c1SYuval Mintz 
222117b235c1SYuval Mintz 	p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
222217b235c1SYuval Mintz 			  qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
222317b235c1SYuval Mintz 						   tlv);
222417b235c1SYuval Mintz 	if (!p_tx_switch_tlv)
222517b235c1SYuval Mintz 		return;
222617b235c1SYuval Mintz 
222717b235c1SYuval Mintz 	p_data->update_tx_switching_flg = 1;
222817b235c1SYuval Mintz 	p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
222917b235c1SYuval Mintz 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
223017b235c1SYuval Mintz }
223117b235c1SYuval Mintz 
223217b235c1SYuval Mintz static void
2233dacd88d6SYuval Mintz qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2234dacd88d6SYuval Mintz 				  struct qed_sp_vport_update_params *p_data,
2235dacd88d6SYuval Mintz 				  struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2236dacd88d6SYuval Mintz {
2237dacd88d6SYuval Mintz 	struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2238dacd88d6SYuval Mintz 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2239dacd88d6SYuval Mintz 
2240dacd88d6SYuval Mintz 	p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2241dacd88d6SYuval Mintz 	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2242dacd88d6SYuval Mintz 	if (!p_mcast_tlv)
2243dacd88d6SYuval Mintz 		return;
2244dacd88d6SYuval Mintz 
2245dacd88d6SYuval Mintz 	p_data->update_approx_mcast_flg = 1;
2246dacd88d6SYuval Mintz 	memcpy(p_data->bins, p_mcast_tlv->bins,
2247dacd88d6SYuval Mintz 	       sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2248dacd88d6SYuval Mintz 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2249dacd88d6SYuval Mintz }
2250dacd88d6SYuval Mintz 
2251dacd88d6SYuval Mintz static void
2252dacd88d6SYuval Mintz qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2253dacd88d6SYuval Mintz 			      struct qed_sp_vport_update_params *p_data,
2254dacd88d6SYuval Mintz 			      struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2255dacd88d6SYuval Mintz {
2256dacd88d6SYuval Mintz 	struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2257dacd88d6SYuval Mintz 	struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2258dacd88d6SYuval Mintz 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2259dacd88d6SYuval Mintz 
2260dacd88d6SYuval Mintz 	p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2261dacd88d6SYuval Mintz 	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2262dacd88d6SYuval Mintz 	if (!p_accept_tlv)
2263dacd88d6SYuval Mintz 		return;
2264dacd88d6SYuval Mintz 
2265dacd88d6SYuval Mintz 	p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2266dacd88d6SYuval Mintz 	p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2267dacd88d6SYuval Mintz 	p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2268dacd88d6SYuval Mintz 	p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2269dacd88d6SYuval Mintz 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2270dacd88d6SYuval Mintz }
2271dacd88d6SYuval Mintz 
2272dacd88d6SYuval Mintz static void
227317b235c1SYuval Mintz qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
227417b235c1SYuval Mintz 				  struct qed_sp_vport_update_params *p_data,
227517b235c1SYuval Mintz 				  struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
227617b235c1SYuval Mintz {
227717b235c1SYuval Mintz 	struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
227817b235c1SYuval Mintz 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
227917b235c1SYuval Mintz 
228017b235c1SYuval Mintz 	p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
228117b235c1SYuval Mintz 			    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
228217b235c1SYuval Mintz 						     tlv);
228317b235c1SYuval Mintz 	if (!p_accept_any_vlan)
228417b235c1SYuval Mintz 		return;
228517b235c1SYuval Mintz 
228617b235c1SYuval Mintz 	p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
228717b235c1SYuval Mintz 	p_data->update_accept_any_vlan_flg =
228817b235c1SYuval Mintz 		    p_accept_any_vlan->update_accept_any_vlan_flg;
228917b235c1SYuval Mintz 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
229017b235c1SYuval Mintz }
229117b235c1SYuval Mintz 
229217b235c1SYuval Mintz static void
2293dacd88d6SYuval Mintz qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2294dacd88d6SYuval Mintz 			    struct qed_vf_info *vf,
2295dacd88d6SYuval Mintz 			    struct qed_sp_vport_update_params *p_data,
2296dacd88d6SYuval Mintz 			    struct qed_rss_params *p_rss,
2297dacd88d6SYuval Mintz 			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2298dacd88d6SYuval Mintz {
2299dacd88d6SYuval Mintz 	struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2300dacd88d6SYuval Mintz 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2301dacd88d6SYuval Mintz 	u16 i, q_idx, max_q_idx;
2302dacd88d6SYuval Mintz 	u16 table_size;
2303dacd88d6SYuval Mintz 
2304dacd88d6SYuval Mintz 	p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2305dacd88d6SYuval Mintz 		    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2306dacd88d6SYuval Mintz 	if (!p_rss_tlv) {
2307dacd88d6SYuval Mintz 		p_data->rss_params = NULL;
2308dacd88d6SYuval Mintz 		return;
2309dacd88d6SYuval Mintz 	}
2310dacd88d6SYuval Mintz 
2311dacd88d6SYuval Mintz 	memset(p_rss, 0, sizeof(struct qed_rss_params));
2312dacd88d6SYuval Mintz 
2313dacd88d6SYuval Mintz 	p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2314dacd88d6SYuval Mintz 				      VFPF_UPDATE_RSS_CONFIG_FLAG);
2315dacd88d6SYuval Mintz 	p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2316dacd88d6SYuval Mintz 					    VFPF_UPDATE_RSS_CAPS_FLAG);
2317dacd88d6SYuval Mintz 	p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2318dacd88d6SYuval Mintz 					 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2319dacd88d6SYuval Mintz 	p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2320dacd88d6SYuval Mintz 				   VFPF_UPDATE_RSS_KEY_FLAG);
2321dacd88d6SYuval Mintz 
2322dacd88d6SYuval Mintz 	p_rss->rss_enable = p_rss_tlv->rss_enable;
2323dacd88d6SYuval Mintz 	p_rss->rss_eng_id = vf->relative_vf_id + 1;
2324dacd88d6SYuval Mintz 	p_rss->rss_caps = p_rss_tlv->rss_caps;
2325dacd88d6SYuval Mintz 	p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2326dacd88d6SYuval Mintz 	memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
2327dacd88d6SYuval Mintz 	       sizeof(p_rss->rss_ind_table));
2328dacd88d6SYuval Mintz 	memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2329dacd88d6SYuval Mintz 
2330dacd88d6SYuval Mintz 	table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2331dacd88d6SYuval Mintz 			   (1 << p_rss_tlv->rss_table_size_log));
2332dacd88d6SYuval Mintz 
2333dacd88d6SYuval Mintz 	max_q_idx = ARRAY_SIZE(vf->vf_queues);
2334dacd88d6SYuval Mintz 
2335dacd88d6SYuval Mintz 	for (i = 0; i < table_size; i++) {
2336dacd88d6SYuval Mintz 		u16 index = vf->vf_queues[0].fw_rx_qid;
2337dacd88d6SYuval Mintz 
2338dacd88d6SYuval Mintz 		q_idx = p_rss->rss_ind_table[i];
2339dacd88d6SYuval Mintz 		if (q_idx >= max_q_idx)
2340dacd88d6SYuval Mintz 			DP_NOTICE(p_hwfn,
2341dacd88d6SYuval Mintz 				  "rss_ind_table[%d] = %d, rxq is out of range\n",
2342dacd88d6SYuval Mintz 				  i, q_idx);
23433da7a37aSMintz, Yuval 		else if (!vf->vf_queues[q_idx].p_rx_cid)
2344dacd88d6SYuval Mintz 			DP_NOTICE(p_hwfn,
2345dacd88d6SYuval Mintz 				  "rss_ind_table[%d] = %d, rxq is not active\n",
2346dacd88d6SYuval Mintz 				  i, q_idx);
2347dacd88d6SYuval Mintz 		else
2348dacd88d6SYuval Mintz 			index = vf->vf_queues[q_idx].fw_rx_qid;
2349dacd88d6SYuval Mintz 		p_rss->rss_ind_table[i] = index;
2350dacd88d6SYuval Mintz 	}
2351dacd88d6SYuval Mintz 
2352dacd88d6SYuval Mintz 	p_data->rss_params = p_rss;
2353dacd88d6SYuval Mintz 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
2354dacd88d6SYuval Mintz }
2355dacd88d6SYuval Mintz 
235617b235c1SYuval Mintz static void
235717b235c1SYuval Mintz qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
235817b235c1SYuval Mintz 				struct qed_vf_info *vf,
235917b235c1SYuval Mintz 				struct qed_sp_vport_update_params *p_data,
236017b235c1SYuval Mintz 				struct qed_sge_tpa_params *p_sge_tpa,
236117b235c1SYuval Mintz 				struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
236217b235c1SYuval Mintz {
236317b235c1SYuval Mintz 	struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
236417b235c1SYuval Mintz 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
236517b235c1SYuval Mintz 
236617b235c1SYuval Mintz 	p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
236717b235c1SYuval Mintz 	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
236817b235c1SYuval Mintz 
236917b235c1SYuval Mintz 	if (!p_sge_tpa_tlv) {
237017b235c1SYuval Mintz 		p_data->sge_tpa_params = NULL;
237117b235c1SYuval Mintz 		return;
237217b235c1SYuval Mintz 	}
237317b235c1SYuval Mintz 
237417b235c1SYuval Mintz 	memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
237517b235c1SYuval Mintz 
237617b235c1SYuval Mintz 	p_sge_tpa->update_tpa_en_flg =
237717b235c1SYuval Mintz 	    !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
237817b235c1SYuval Mintz 	p_sge_tpa->update_tpa_param_flg =
237917b235c1SYuval Mintz 	    !!(p_sge_tpa_tlv->update_sge_tpa_flags &
238017b235c1SYuval Mintz 		VFPF_UPDATE_TPA_PARAM_FLAG);
238117b235c1SYuval Mintz 
238217b235c1SYuval Mintz 	p_sge_tpa->tpa_ipv4_en_flg =
238317b235c1SYuval Mintz 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
238417b235c1SYuval Mintz 	p_sge_tpa->tpa_ipv6_en_flg =
238517b235c1SYuval Mintz 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
238617b235c1SYuval Mintz 	p_sge_tpa->tpa_pkt_split_flg =
238717b235c1SYuval Mintz 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
238817b235c1SYuval Mintz 	p_sge_tpa->tpa_hdr_data_split_flg =
238917b235c1SYuval Mintz 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
239017b235c1SYuval Mintz 	p_sge_tpa->tpa_gro_consistent_flg =
239117b235c1SYuval Mintz 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
239217b235c1SYuval Mintz 
239317b235c1SYuval Mintz 	p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
239417b235c1SYuval Mintz 	p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
239517b235c1SYuval Mintz 	p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
239617b235c1SYuval Mintz 	p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
239717b235c1SYuval Mintz 	p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
239817b235c1SYuval Mintz 
239917b235c1SYuval Mintz 	p_data->sge_tpa_params = p_sge_tpa;
240017b235c1SYuval Mintz 
240117b235c1SYuval Mintz 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
240217b235c1SYuval Mintz }
240317b235c1SYuval Mintz 
2404dacd88d6SYuval Mintz static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
2405dacd88d6SYuval Mintz 					struct qed_ptt *p_ptt,
2406dacd88d6SYuval Mintz 					struct qed_vf_info *vf)
2407dacd88d6SYuval Mintz {
2408dacd88d6SYuval Mintz 	struct qed_sp_vport_update_params params;
2409dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
241017b235c1SYuval Mintz 	struct qed_sge_tpa_params sge_tpa_params;
2411dacd88d6SYuval Mintz 	struct qed_rss_params rss_params;
2412dacd88d6SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
2413dacd88d6SYuval Mintz 	u16 tlvs_mask = 0;
2414dacd88d6SYuval Mintz 	u16 length;
2415dacd88d6SYuval Mintz 	int rc;
2416dacd88d6SYuval Mintz 
241741086467SYuval Mintz 	/* Valiate PF can send such a request */
241841086467SYuval Mintz 	if (!vf->vport_instance) {
241941086467SYuval Mintz 		DP_VERBOSE(p_hwfn,
242041086467SYuval Mintz 			   QED_MSG_IOV,
242141086467SYuval Mintz 			   "No VPORT instance available for VF[%d], failing vport update\n",
242241086467SYuval Mintz 			   vf->abs_vf_id);
242341086467SYuval Mintz 		status = PFVF_STATUS_FAILURE;
242441086467SYuval Mintz 		goto out;
242541086467SYuval Mintz 	}
242641086467SYuval Mintz 
2427dacd88d6SYuval Mintz 	memset(&params, 0, sizeof(params));
2428dacd88d6SYuval Mintz 	params.opaque_fid = vf->opaque_fid;
2429dacd88d6SYuval Mintz 	params.vport_id = vf->vport_id;
2430dacd88d6SYuval Mintz 	params.rss_params = NULL;
2431dacd88d6SYuval Mintz 
2432dacd88d6SYuval Mintz 	/* Search for extended tlvs list and update values
2433dacd88d6SYuval Mintz 	 * from VF in struct qed_sp_vport_update_params.
2434dacd88d6SYuval Mintz 	 */
2435dacd88d6SYuval Mintz 	qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
243617b235c1SYuval Mintz 	qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
243717b235c1SYuval Mintz 	qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
2438dacd88d6SYuval Mintz 	qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
2439dacd88d6SYuval Mintz 	qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
2440dacd88d6SYuval Mintz 	qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
2441dacd88d6SYuval Mintz 				    mbx, &tlvs_mask);
244217b235c1SYuval Mintz 	qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
244317b235c1SYuval Mintz 	qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
244417b235c1SYuval Mintz 					&sge_tpa_params, mbx, &tlvs_mask);
2445dacd88d6SYuval Mintz 
2446dacd88d6SYuval Mintz 	/* Just log a message if there is no single extended tlv in buffer.
2447dacd88d6SYuval Mintz 	 * When all features of vport update ramrod would be requested by VF
2448dacd88d6SYuval Mintz 	 * as extended TLVs in buffer then an error can be returned in response
2449dacd88d6SYuval Mintz 	 * if there is no extended TLV present in buffer.
2450dacd88d6SYuval Mintz 	 */
2451dacd88d6SYuval Mintz 	if (!tlvs_mask) {
2452dacd88d6SYuval Mintz 		DP_NOTICE(p_hwfn,
2453dacd88d6SYuval Mintz 			  "No feature tlvs found for vport update\n");
2454dacd88d6SYuval Mintz 		status = PFVF_STATUS_NOT_SUPPORTED;
2455dacd88d6SYuval Mintz 		goto out;
2456dacd88d6SYuval Mintz 	}
2457dacd88d6SYuval Mintz 
2458dacd88d6SYuval Mintz 	rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
2459dacd88d6SYuval Mintz 
2460dacd88d6SYuval Mintz 	if (rc)
2461dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
2462dacd88d6SYuval Mintz 
2463dacd88d6SYuval Mintz out:
2464dacd88d6SYuval Mintz 	length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
2465dacd88d6SYuval Mintz 						  tlvs_mask, tlvs_mask);
2466dacd88d6SYuval Mintz 	qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2467dacd88d6SYuval Mintz }
2468dacd88d6SYuval Mintz 
24698246d0b4SYuval Mintz static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
247008feecd7SYuval Mintz 					 struct qed_vf_info *p_vf,
247108feecd7SYuval Mintz 					 struct qed_filter_ucast *p_params)
247208feecd7SYuval Mintz {
247308feecd7SYuval Mintz 	int i;
247408feecd7SYuval Mintz 
247508feecd7SYuval Mintz 	/* First remove entries and then add new ones */
247608feecd7SYuval Mintz 	if (p_params->opcode == QED_FILTER_REMOVE) {
247708feecd7SYuval Mintz 		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
247808feecd7SYuval Mintz 			if (p_vf->shadow_config.vlans[i].used &&
247908feecd7SYuval Mintz 			    p_vf->shadow_config.vlans[i].vid ==
248008feecd7SYuval Mintz 			    p_params->vlan) {
248108feecd7SYuval Mintz 				p_vf->shadow_config.vlans[i].used = false;
248208feecd7SYuval Mintz 				break;
248308feecd7SYuval Mintz 			}
248408feecd7SYuval Mintz 		if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
248508feecd7SYuval Mintz 			DP_VERBOSE(p_hwfn,
248608feecd7SYuval Mintz 				   QED_MSG_IOV,
248708feecd7SYuval Mintz 				   "VF [%d] - Tries to remove a non-existing vlan\n",
248808feecd7SYuval Mintz 				   p_vf->relative_vf_id);
248908feecd7SYuval Mintz 			return -EINVAL;
249008feecd7SYuval Mintz 		}
249108feecd7SYuval Mintz 	} else if (p_params->opcode == QED_FILTER_REPLACE ||
249208feecd7SYuval Mintz 		   p_params->opcode == QED_FILTER_FLUSH) {
249308feecd7SYuval Mintz 		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
249408feecd7SYuval Mintz 			p_vf->shadow_config.vlans[i].used = false;
249508feecd7SYuval Mintz 	}
249608feecd7SYuval Mintz 
249708feecd7SYuval Mintz 	/* In forced mode, we're willing to remove entries - but we don't add
249808feecd7SYuval Mintz 	 * new ones.
249908feecd7SYuval Mintz 	 */
25001a635e48SYuval Mintz 	if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
250108feecd7SYuval Mintz 		return 0;
250208feecd7SYuval Mintz 
250308feecd7SYuval Mintz 	if (p_params->opcode == QED_FILTER_ADD ||
250408feecd7SYuval Mintz 	    p_params->opcode == QED_FILTER_REPLACE) {
250508feecd7SYuval Mintz 		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
250608feecd7SYuval Mintz 			if (p_vf->shadow_config.vlans[i].used)
250708feecd7SYuval Mintz 				continue;
250808feecd7SYuval Mintz 
250908feecd7SYuval Mintz 			p_vf->shadow_config.vlans[i].used = true;
251008feecd7SYuval Mintz 			p_vf->shadow_config.vlans[i].vid = p_params->vlan;
251108feecd7SYuval Mintz 			break;
251208feecd7SYuval Mintz 		}
251308feecd7SYuval Mintz 
251408feecd7SYuval Mintz 		if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
251508feecd7SYuval Mintz 			DP_VERBOSE(p_hwfn,
251608feecd7SYuval Mintz 				   QED_MSG_IOV,
251708feecd7SYuval Mintz 				   "VF [%d] - Tries to configure more than %d vlan filters\n",
251808feecd7SYuval Mintz 				   p_vf->relative_vf_id,
251908feecd7SYuval Mintz 				   QED_ETH_VF_NUM_VLAN_FILTERS + 1);
252008feecd7SYuval Mintz 			return -EINVAL;
252108feecd7SYuval Mintz 		}
252208feecd7SYuval Mintz 	}
252308feecd7SYuval Mintz 
252408feecd7SYuval Mintz 	return 0;
252508feecd7SYuval Mintz }
252608feecd7SYuval Mintz 
25278246d0b4SYuval Mintz static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
25288246d0b4SYuval Mintz 					struct qed_vf_info *p_vf,
25298246d0b4SYuval Mintz 					struct qed_filter_ucast *p_params)
25308246d0b4SYuval Mintz {
25318246d0b4SYuval Mintz 	int i;
25328246d0b4SYuval Mintz 
25338246d0b4SYuval Mintz 	/* If we're in forced-mode, we don't allow any change */
25341a635e48SYuval Mintz 	if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
25358246d0b4SYuval Mintz 		return 0;
25368246d0b4SYuval Mintz 
25378246d0b4SYuval Mintz 	/* First remove entries and then add new ones */
25388246d0b4SYuval Mintz 	if (p_params->opcode == QED_FILTER_REMOVE) {
25398246d0b4SYuval Mintz 		for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
25408246d0b4SYuval Mintz 			if (ether_addr_equal(p_vf->shadow_config.macs[i],
25418246d0b4SYuval Mintz 					     p_params->mac)) {
25428246d0b4SYuval Mintz 				memset(p_vf->shadow_config.macs[i], 0,
25438246d0b4SYuval Mintz 				       ETH_ALEN);
25448246d0b4SYuval Mintz 				break;
25458246d0b4SYuval Mintz 			}
25468246d0b4SYuval Mintz 		}
25478246d0b4SYuval Mintz 
25488246d0b4SYuval Mintz 		if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
25498246d0b4SYuval Mintz 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
25508246d0b4SYuval Mintz 				   "MAC isn't configured\n");
25518246d0b4SYuval Mintz 			return -EINVAL;
25528246d0b4SYuval Mintz 		}
25538246d0b4SYuval Mintz 	} else if (p_params->opcode == QED_FILTER_REPLACE ||
25548246d0b4SYuval Mintz 		   p_params->opcode == QED_FILTER_FLUSH) {
25558246d0b4SYuval Mintz 		for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
25568246d0b4SYuval Mintz 			memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
25578246d0b4SYuval Mintz 	}
25588246d0b4SYuval Mintz 
25598246d0b4SYuval Mintz 	/* List the new MAC address */
25608246d0b4SYuval Mintz 	if (p_params->opcode != QED_FILTER_ADD &&
25618246d0b4SYuval Mintz 	    p_params->opcode != QED_FILTER_REPLACE)
25628246d0b4SYuval Mintz 		return 0;
25638246d0b4SYuval Mintz 
25648246d0b4SYuval Mintz 	for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
25658246d0b4SYuval Mintz 		if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
25668246d0b4SYuval Mintz 			ether_addr_copy(p_vf->shadow_config.macs[i],
25678246d0b4SYuval Mintz 					p_params->mac);
25688246d0b4SYuval Mintz 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
25698246d0b4SYuval Mintz 				   "Added MAC at %d entry in shadow\n", i);
25708246d0b4SYuval Mintz 			break;
25718246d0b4SYuval Mintz 		}
25728246d0b4SYuval Mintz 	}
25738246d0b4SYuval Mintz 
25748246d0b4SYuval Mintz 	if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
25758246d0b4SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
25768246d0b4SYuval Mintz 		return -EINVAL;
25778246d0b4SYuval Mintz 	}
25788246d0b4SYuval Mintz 
25798246d0b4SYuval Mintz 	return 0;
25808246d0b4SYuval Mintz }
25818246d0b4SYuval Mintz 
25828246d0b4SYuval Mintz static int
25838246d0b4SYuval Mintz qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
25848246d0b4SYuval Mintz 				 struct qed_vf_info *p_vf,
25858246d0b4SYuval Mintz 				 struct qed_filter_ucast *p_params)
25868246d0b4SYuval Mintz {
25878246d0b4SYuval Mintz 	int rc = 0;
25888246d0b4SYuval Mintz 
25898246d0b4SYuval Mintz 	if (p_params->type == QED_FILTER_MAC) {
25908246d0b4SYuval Mintz 		rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
25918246d0b4SYuval Mintz 		if (rc)
25928246d0b4SYuval Mintz 			return rc;
25938246d0b4SYuval Mintz 	}
25948246d0b4SYuval Mintz 
25958246d0b4SYuval Mintz 	if (p_params->type == QED_FILTER_VLAN)
25968246d0b4SYuval Mintz 		rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
25978246d0b4SYuval Mintz 
25988246d0b4SYuval Mintz 	return rc;
25998246d0b4SYuval Mintz }
26008246d0b4SYuval Mintz 
2601ba56947aSBaoyou Xie static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
2602dacd88d6SYuval Mintz 			     int vfid, struct qed_filter_ucast *params)
2603dacd88d6SYuval Mintz {
2604dacd88d6SYuval Mintz 	struct qed_public_vf_info *vf;
2605dacd88d6SYuval Mintz 
2606dacd88d6SYuval Mintz 	vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
2607dacd88d6SYuval Mintz 	if (!vf)
2608dacd88d6SYuval Mintz 		return -EINVAL;
2609dacd88d6SYuval Mintz 
2610dacd88d6SYuval Mintz 	/* No real decision to make; Store the configured MAC */
2611dacd88d6SYuval Mintz 	if (params->type == QED_FILTER_MAC ||
2612dacd88d6SYuval Mintz 	    params->type == QED_FILTER_MAC_VLAN)
2613dacd88d6SYuval Mintz 		ether_addr_copy(vf->mac, params->mac);
2614dacd88d6SYuval Mintz 
2615dacd88d6SYuval Mintz 	return 0;
2616dacd88d6SYuval Mintz }
2617dacd88d6SYuval Mintz 
2618dacd88d6SYuval Mintz static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
2619dacd88d6SYuval Mintz 					struct qed_ptt *p_ptt,
2620dacd88d6SYuval Mintz 					struct qed_vf_info *vf)
2621dacd88d6SYuval Mintz {
262208feecd7SYuval Mintz 	struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
2623dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2624dacd88d6SYuval Mintz 	struct vfpf_ucast_filter_tlv *req;
2625dacd88d6SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
2626dacd88d6SYuval Mintz 	struct qed_filter_ucast params;
2627dacd88d6SYuval Mintz 	int rc;
2628dacd88d6SYuval Mintz 
2629dacd88d6SYuval Mintz 	/* Prepare the unicast filter params */
2630dacd88d6SYuval Mintz 	memset(&params, 0, sizeof(struct qed_filter_ucast));
2631dacd88d6SYuval Mintz 	req = &mbx->req_virt->ucast_filter;
2632dacd88d6SYuval Mintz 	params.opcode = (enum qed_filter_opcode)req->opcode;
2633dacd88d6SYuval Mintz 	params.type = (enum qed_filter_ucast_type)req->type;
2634dacd88d6SYuval Mintz 
2635dacd88d6SYuval Mintz 	params.is_rx_filter = 1;
2636dacd88d6SYuval Mintz 	params.is_tx_filter = 1;
2637dacd88d6SYuval Mintz 	params.vport_to_remove_from = vf->vport_id;
2638dacd88d6SYuval Mintz 	params.vport_to_add_to = vf->vport_id;
2639dacd88d6SYuval Mintz 	memcpy(params.mac, req->mac, ETH_ALEN);
2640dacd88d6SYuval Mintz 	params.vlan = req->vlan;
2641dacd88d6SYuval Mintz 
2642dacd88d6SYuval Mintz 	DP_VERBOSE(p_hwfn,
2643dacd88d6SYuval Mintz 		   QED_MSG_IOV,
2644dacd88d6SYuval Mintz 		   "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
2645dacd88d6SYuval Mintz 		   vf->abs_vf_id, params.opcode, params.type,
2646dacd88d6SYuval Mintz 		   params.is_rx_filter ? "RX" : "",
2647dacd88d6SYuval Mintz 		   params.is_tx_filter ? "TX" : "",
2648dacd88d6SYuval Mintz 		   params.vport_to_add_to,
2649dacd88d6SYuval Mintz 		   params.mac[0], params.mac[1],
2650dacd88d6SYuval Mintz 		   params.mac[2], params.mac[3],
2651dacd88d6SYuval Mintz 		   params.mac[4], params.mac[5], params.vlan);
2652dacd88d6SYuval Mintz 
2653dacd88d6SYuval Mintz 	if (!vf->vport_instance) {
2654dacd88d6SYuval Mintz 		DP_VERBOSE(p_hwfn,
2655dacd88d6SYuval Mintz 			   QED_MSG_IOV,
2656dacd88d6SYuval Mintz 			   "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
2657dacd88d6SYuval Mintz 			   vf->abs_vf_id);
2658dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
2659dacd88d6SYuval Mintz 		goto out;
2660dacd88d6SYuval Mintz 	}
2661dacd88d6SYuval Mintz 
266208feecd7SYuval Mintz 	/* Update shadow copy of the VF configuration */
266308feecd7SYuval Mintz 	if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
266408feecd7SYuval Mintz 		status = PFVF_STATUS_FAILURE;
266508feecd7SYuval Mintz 		goto out;
266608feecd7SYuval Mintz 	}
266708feecd7SYuval Mintz 
266808feecd7SYuval Mintz 	/* Determine if the unicast filtering is acceptible by PF */
26691a635e48SYuval Mintz 	if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
267008feecd7SYuval Mintz 	    (params.type == QED_FILTER_VLAN ||
267108feecd7SYuval Mintz 	     params.type == QED_FILTER_MAC_VLAN)) {
267208feecd7SYuval Mintz 		/* Once VLAN is forced or PVID is set, do not allow
267308feecd7SYuval Mintz 		 * to add/replace any further VLANs.
267408feecd7SYuval Mintz 		 */
267508feecd7SYuval Mintz 		if (params.opcode == QED_FILTER_ADD ||
267608feecd7SYuval Mintz 		    params.opcode == QED_FILTER_REPLACE)
267708feecd7SYuval Mintz 			status = PFVF_STATUS_FORCED;
267808feecd7SYuval Mintz 		goto out;
267908feecd7SYuval Mintz 	}
268008feecd7SYuval Mintz 
26811a635e48SYuval Mintz 	if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
2682eff16960SYuval Mintz 	    (params.type == QED_FILTER_MAC ||
2683eff16960SYuval Mintz 	     params.type == QED_FILTER_MAC_VLAN)) {
2684eff16960SYuval Mintz 		if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
2685eff16960SYuval Mintz 		    (params.opcode != QED_FILTER_ADD &&
2686eff16960SYuval Mintz 		     params.opcode != QED_FILTER_REPLACE))
2687eff16960SYuval Mintz 			status = PFVF_STATUS_FORCED;
2688eff16960SYuval Mintz 		goto out;
2689eff16960SYuval Mintz 	}
2690eff16960SYuval Mintz 
2691dacd88d6SYuval Mintz 	rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
2692dacd88d6SYuval Mintz 	if (rc) {
2693dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
2694dacd88d6SYuval Mintz 		goto out;
2695dacd88d6SYuval Mintz 	}
2696dacd88d6SYuval Mintz 
2697dacd88d6SYuval Mintz 	rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
2698dacd88d6SYuval Mintz 				     QED_SPQ_MODE_CB, NULL);
2699dacd88d6SYuval Mintz 	if (rc)
2700dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
2701dacd88d6SYuval Mintz 
2702dacd88d6SYuval Mintz out:
2703dacd88d6SYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
2704dacd88d6SYuval Mintz 			     sizeof(struct pfvf_def_resp_tlv), status);
2705dacd88d6SYuval Mintz }
2706dacd88d6SYuval Mintz 
27070b55e27dSYuval Mintz static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
27080b55e27dSYuval Mintz 				       struct qed_ptt *p_ptt,
27090b55e27dSYuval Mintz 				       struct qed_vf_info *vf)
27100b55e27dSYuval Mintz {
27110b55e27dSYuval Mintz 	int i;
27120b55e27dSYuval Mintz 
27130b55e27dSYuval Mintz 	/* Reset the SBs */
27140b55e27dSYuval Mintz 	for (i = 0; i < vf->num_sbs; i++)
27150b55e27dSYuval Mintz 		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
27160b55e27dSYuval Mintz 						vf->igu_sbs[i],
27170b55e27dSYuval Mintz 						vf->opaque_fid, false);
27180b55e27dSYuval Mintz 
27190b55e27dSYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
27200b55e27dSYuval Mintz 			     sizeof(struct pfvf_def_resp_tlv),
27210b55e27dSYuval Mintz 			     PFVF_STATUS_SUCCESS);
27220b55e27dSYuval Mintz }
27230b55e27dSYuval Mintz 
27240b55e27dSYuval Mintz static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
27250b55e27dSYuval Mintz 				 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
27260b55e27dSYuval Mintz {
27270b55e27dSYuval Mintz 	u16 length = sizeof(struct pfvf_def_resp_tlv);
27280b55e27dSYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
27290b55e27dSYuval Mintz 
27300b55e27dSYuval Mintz 	/* Disable Interrupts for VF */
27310b55e27dSYuval Mintz 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
27320b55e27dSYuval Mintz 
27330b55e27dSYuval Mintz 	/* Reset Permission table */
27340b55e27dSYuval Mintz 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
27350b55e27dSYuval Mintz 
27360b55e27dSYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
27370b55e27dSYuval Mintz 			     length, status);
27380b55e27dSYuval Mintz }
27390b55e27dSYuval Mintz 
27400b55e27dSYuval Mintz static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
27410b55e27dSYuval Mintz 				   struct qed_ptt *p_ptt,
27420b55e27dSYuval Mintz 				   struct qed_vf_info *p_vf)
27430b55e27dSYuval Mintz {
27440b55e27dSYuval Mintz 	u16 length = sizeof(struct pfvf_def_resp_tlv);
27451fe614d1SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
27461fe614d1SYuval Mintz 	int rc = 0;
27470b55e27dSYuval Mintz 
27480b55e27dSYuval Mintz 	qed_iov_vf_cleanup(p_hwfn, p_vf);
27490b55e27dSYuval Mintz 
27501fe614d1SYuval Mintz 	if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
27511fe614d1SYuval Mintz 		/* Stopping the VF */
27521fe614d1SYuval Mintz 		rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
27531fe614d1SYuval Mintz 				    p_vf->opaque_fid);
27541fe614d1SYuval Mintz 
27551fe614d1SYuval Mintz 		if (rc) {
27561fe614d1SYuval Mintz 			DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
27571fe614d1SYuval Mintz 			       rc);
27581fe614d1SYuval Mintz 			status = PFVF_STATUS_FAILURE;
27591fe614d1SYuval Mintz 		}
27601fe614d1SYuval Mintz 
27611fe614d1SYuval Mintz 		p_vf->state = VF_STOPPED;
27621fe614d1SYuval Mintz 	}
27631fe614d1SYuval Mintz 
27640b55e27dSYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
27651fe614d1SYuval Mintz 			     length, status);
27660b55e27dSYuval Mintz }
27670b55e27dSYuval Mintz 
27680b55e27dSYuval Mintz static int
27690b55e27dSYuval Mintz qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
27700b55e27dSYuval Mintz 			 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
27710b55e27dSYuval Mintz {
27720b55e27dSYuval Mintz 	int cnt;
27730b55e27dSYuval Mintz 	u32 val;
27740b55e27dSYuval Mintz 
27750b55e27dSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
27760b55e27dSYuval Mintz 
27770b55e27dSYuval Mintz 	for (cnt = 0; cnt < 50; cnt++) {
27780b55e27dSYuval Mintz 		val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
27790b55e27dSYuval Mintz 		if (!val)
27800b55e27dSYuval Mintz 			break;
27810b55e27dSYuval Mintz 		msleep(20);
27820b55e27dSYuval Mintz 	}
27830b55e27dSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
27840b55e27dSYuval Mintz 
27850b55e27dSYuval Mintz 	if (cnt == 50) {
27860b55e27dSYuval Mintz 		DP_ERR(p_hwfn,
27870b55e27dSYuval Mintz 		       "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
27880b55e27dSYuval Mintz 		       p_vf->abs_vf_id, val);
27890b55e27dSYuval Mintz 		return -EBUSY;
27900b55e27dSYuval Mintz 	}
27910b55e27dSYuval Mintz 
27920b55e27dSYuval Mintz 	return 0;
27930b55e27dSYuval Mintz }
27940b55e27dSYuval Mintz 
27950b55e27dSYuval Mintz static int
27960b55e27dSYuval Mintz qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
27970b55e27dSYuval Mintz 			struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
27980b55e27dSYuval Mintz {
27990b55e27dSYuval Mintz 	u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
28000b55e27dSYuval Mintz 	int i, cnt;
28010b55e27dSYuval Mintz 
28020b55e27dSYuval Mintz 	/* Read initial consumers & producers */
28030b55e27dSYuval Mintz 	for (i = 0; i < MAX_NUM_VOQS; i++) {
28040b55e27dSYuval Mintz 		u32 prod;
28050b55e27dSYuval Mintz 
28060b55e27dSYuval Mintz 		cons[i] = qed_rd(p_hwfn, p_ptt,
28070b55e27dSYuval Mintz 				 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
28080b55e27dSYuval Mintz 				 i * 0x40);
28090b55e27dSYuval Mintz 		prod = qed_rd(p_hwfn, p_ptt,
28100b55e27dSYuval Mintz 			      PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
28110b55e27dSYuval Mintz 			      i * 0x40);
28120b55e27dSYuval Mintz 		distance[i] = prod - cons[i];
28130b55e27dSYuval Mintz 	}
28140b55e27dSYuval Mintz 
28150b55e27dSYuval Mintz 	/* Wait for consumers to pass the producers */
28160b55e27dSYuval Mintz 	i = 0;
28170b55e27dSYuval Mintz 	for (cnt = 0; cnt < 50; cnt++) {
28180b55e27dSYuval Mintz 		for (; i < MAX_NUM_VOQS; i++) {
28190b55e27dSYuval Mintz 			u32 tmp;
28200b55e27dSYuval Mintz 
28210b55e27dSYuval Mintz 			tmp = qed_rd(p_hwfn, p_ptt,
28220b55e27dSYuval Mintz 				     PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
28230b55e27dSYuval Mintz 				     i * 0x40);
28240b55e27dSYuval Mintz 			if (distance[i] > tmp - cons[i])
28250b55e27dSYuval Mintz 				break;
28260b55e27dSYuval Mintz 		}
28270b55e27dSYuval Mintz 
28280b55e27dSYuval Mintz 		if (i == MAX_NUM_VOQS)
28290b55e27dSYuval Mintz 			break;
28300b55e27dSYuval Mintz 
28310b55e27dSYuval Mintz 		msleep(20);
28320b55e27dSYuval Mintz 	}
28330b55e27dSYuval Mintz 
28340b55e27dSYuval Mintz 	if (cnt == 50) {
28350b55e27dSYuval Mintz 		DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
28360b55e27dSYuval Mintz 		       p_vf->abs_vf_id, i);
28370b55e27dSYuval Mintz 		return -EBUSY;
28380b55e27dSYuval Mintz 	}
28390b55e27dSYuval Mintz 
28400b55e27dSYuval Mintz 	return 0;
28410b55e27dSYuval Mintz }
28420b55e27dSYuval Mintz 
28430b55e27dSYuval Mintz static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
28440b55e27dSYuval Mintz 			       struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
28450b55e27dSYuval Mintz {
28460b55e27dSYuval Mintz 	int rc;
28470b55e27dSYuval Mintz 
28480b55e27dSYuval Mintz 	rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
28490b55e27dSYuval Mintz 	if (rc)
28500b55e27dSYuval Mintz 		return rc;
28510b55e27dSYuval Mintz 
28520b55e27dSYuval Mintz 	rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
28530b55e27dSYuval Mintz 	if (rc)
28540b55e27dSYuval Mintz 		return rc;
28550b55e27dSYuval Mintz 
28560b55e27dSYuval Mintz 	return 0;
28570b55e27dSYuval Mintz }
28580b55e27dSYuval Mintz 
28590b55e27dSYuval Mintz static int
28600b55e27dSYuval Mintz qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
28610b55e27dSYuval Mintz 			       struct qed_ptt *p_ptt,
28620b55e27dSYuval Mintz 			       u16 rel_vf_id, u32 *ack_vfs)
28630b55e27dSYuval Mintz {
28640b55e27dSYuval Mintz 	struct qed_vf_info *p_vf;
28650b55e27dSYuval Mintz 	int rc = 0;
28660b55e27dSYuval Mintz 
28670b55e27dSYuval Mintz 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
28680b55e27dSYuval Mintz 	if (!p_vf)
28690b55e27dSYuval Mintz 		return 0;
28700b55e27dSYuval Mintz 
28710b55e27dSYuval Mintz 	if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
28720b55e27dSYuval Mintz 	    (1ULL << (rel_vf_id % 64))) {
28730b55e27dSYuval Mintz 		u16 vfid = p_vf->abs_vf_id;
28740b55e27dSYuval Mintz 
28750b55e27dSYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
28760b55e27dSYuval Mintz 			   "VF[%d] - Handling FLR\n", vfid);
28770b55e27dSYuval Mintz 
28780b55e27dSYuval Mintz 		qed_iov_vf_cleanup(p_hwfn, p_vf);
28790b55e27dSYuval Mintz 
28800b55e27dSYuval Mintz 		/* If VF isn't active, no need for anything but SW */
28810b55e27dSYuval Mintz 		if (!p_vf->b_init)
28820b55e27dSYuval Mintz 			goto cleanup;
28830b55e27dSYuval Mintz 
28840b55e27dSYuval Mintz 		rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
28850b55e27dSYuval Mintz 		if (rc)
28860b55e27dSYuval Mintz 			goto cleanup;
28870b55e27dSYuval Mintz 
28880b55e27dSYuval Mintz 		rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
28890b55e27dSYuval Mintz 		if (rc) {
28900b55e27dSYuval Mintz 			DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
28910b55e27dSYuval Mintz 			return rc;
28920b55e27dSYuval Mintz 		}
28930b55e27dSYuval Mintz 
28947eff82b0SYuval Mintz 		/* Workaround to make VF-PF channel ready, as FW
28957eff82b0SYuval Mintz 		 * doesn't do that as a part of FLR.
28967eff82b0SYuval Mintz 		 */
28977eff82b0SYuval Mintz 		REG_WR(p_hwfn,
28987eff82b0SYuval Mintz 		       GTT_BAR0_MAP_REG_USDM_RAM +
28997eff82b0SYuval Mintz 		       USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
29007eff82b0SYuval Mintz 
29010b55e27dSYuval Mintz 		/* VF_STOPPED has to be set only after final cleanup
29020b55e27dSYuval Mintz 		 * but prior to re-enabling the VF.
29030b55e27dSYuval Mintz 		 */
29040b55e27dSYuval Mintz 		p_vf->state = VF_STOPPED;
29050b55e27dSYuval Mintz 
29060b55e27dSYuval Mintz 		rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
29070b55e27dSYuval Mintz 		if (rc) {
29080b55e27dSYuval Mintz 			DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
29090b55e27dSYuval Mintz 			       vfid);
29100b55e27dSYuval Mintz 			return rc;
29110b55e27dSYuval Mintz 		}
29120b55e27dSYuval Mintz cleanup:
29130b55e27dSYuval Mintz 		/* Mark VF for ack and clean pending state */
29140b55e27dSYuval Mintz 		if (p_vf->state == VF_RESET)
29150b55e27dSYuval Mintz 			p_vf->state = VF_STOPPED;
29161a635e48SYuval Mintz 		ack_vfs[vfid / 32] |= BIT((vfid % 32));
29170b55e27dSYuval Mintz 		p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
29180b55e27dSYuval Mintz 		    ~(1ULL << (rel_vf_id % 64));
29190b55e27dSYuval Mintz 		p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
29200b55e27dSYuval Mintz 		    ~(1ULL << (rel_vf_id % 64));
29210b55e27dSYuval Mintz 	}
29220b55e27dSYuval Mintz 
29230b55e27dSYuval Mintz 	return rc;
29240b55e27dSYuval Mintz }
29250b55e27dSYuval Mintz 
2926ba56947aSBaoyou Xie static int
2927ba56947aSBaoyou Xie qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
29280b55e27dSYuval Mintz {
29290b55e27dSYuval Mintz 	u32 ack_vfs[VF_MAX_STATIC / 32];
29300b55e27dSYuval Mintz 	int rc = 0;
29310b55e27dSYuval Mintz 	u16 i;
29320b55e27dSYuval Mintz 
29330b55e27dSYuval Mintz 	memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
29340b55e27dSYuval Mintz 
29350b55e27dSYuval Mintz 	/* Since BRB <-> PRS interface can't be tested as part of the flr
29360b55e27dSYuval Mintz 	 * polling due to HW limitations, simply sleep a bit. And since
29370b55e27dSYuval Mintz 	 * there's no need to wait per-vf, do it before looping.
29380b55e27dSYuval Mintz 	 */
29390b55e27dSYuval Mintz 	msleep(100);
29400b55e27dSYuval Mintz 
29410b55e27dSYuval Mintz 	for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
29420b55e27dSYuval Mintz 		qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
29430b55e27dSYuval Mintz 
29440b55e27dSYuval Mintz 	rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
29450b55e27dSYuval Mintz 	return rc;
29460b55e27dSYuval Mintz }
29470b55e27dSYuval Mintz 
29480b55e27dSYuval Mintz int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
29490b55e27dSYuval Mintz {
29500b55e27dSYuval Mintz 	u16 i, found = 0;
29510b55e27dSYuval Mintz 
29520b55e27dSYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
29530b55e27dSYuval Mintz 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
29540b55e27dSYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
29550b55e27dSYuval Mintz 			   "[%08x,...,%08x]: %08x\n",
29560b55e27dSYuval Mintz 			   i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
29570b55e27dSYuval Mintz 
29580b55e27dSYuval Mintz 	if (!p_hwfn->cdev->p_iov_info) {
29590b55e27dSYuval Mintz 		DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
29600b55e27dSYuval Mintz 		return 0;
29610b55e27dSYuval Mintz 	}
29620b55e27dSYuval Mintz 
29630b55e27dSYuval Mintz 	/* Mark VFs */
29640b55e27dSYuval Mintz 	for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
29650b55e27dSYuval Mintz 		struct qed_vf_info *p_vf;
29660b55e27dSYuval Mintz 		u8 vfid;
29670b55e27dSYuval Mintz 
29680b55e27dSYuval Mintz 		p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
29690b55e27dSYuval Mintz 		if (!p_vf)
29700b55e27dSYuval Mintz 			continue;
29710b55e27dSYuval Mintz 
29720b55e27dSYuval Mintz 		vfid = p_vf->abs_vf_id;
29731a635e48SYuval Mintz 		if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
29740b55e27dSYuval Mintz 			u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
29750b55e27dSYuval Mintz 			u16 rel_vf_id = p_vf->relative_vf_id;
29760b55e27dSYuval Mintz 
29770b55e27dSYuval Mintz 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
29780b55e27dSYuval Mintz 				   "VF[%d] [rel %d] got FLR-ed\n",
29790b55e27dSYuval Mintz 				   vfid, rel_vf_id);
29800b55e27dSYuval Mintz 
29810b55e27dSYuval Mintz 			p_vf->state = VF_RESET;
29820b55e27dSYuval Mintz 
29830b55e27dSYuval Mintz 			/* No need to lock here, since pending_flr should
29840b55e27dSYuval Mintz 			 * only change here and before ACKing MFw. Since
29850b55e27dSYuval Mintz 			 * MFW will not trigger an additional attention for
29860b55e27dSYuval Mintz 			 * VF flr until ACKs, we're safe.
29870b55e27dSYuval Mintz 			 */
29880b55e27dSYuval Mintz 			p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
29890b55e27dSYuval Mintz 			found = 1;
29900b55e27dSYuval Mintz 		}
29910b55e27dSYuval Mintz 	}
29920b55e27dSYuval Mintz 
29930b55e27dSYuval Mintz 	return found;
29940b55e27dSYuval Mintz }
29950b55e27dSYuval Mintz 
299673390ac9SYuval Mintz static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
299773390ac9SYuval Mintz 			     u16 vfid,
299873390ac9SYuval Mintz 			     struct qed_mcp_link_params *p_params,
299973390ac9SYuval Mintz 			     struct qed_mcp_link_state *p_link,
300073390ac9SYuval Mintz 			     struct qed_mcp_link_capabilities *p_caps)
300173390ac9SYuval Mintz {
300273390ac9SYuval Mintz 	struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
300373390ac9SYuval Mintz 						       vfid,
300473390ac9SYuval Mintz 						       false);
300573390ac9SYuval Mintz 	struct qed_bulletin_content *p_bulletin;
300673390ac9SYuval Mintz 
300773390ac9SYuval Mintz 	if (!p_vf)
300873390ac9SYuval Mintz 		return;
300973390ac9SYuval Mintz 
301073390ac9SYuval Mintz 	p_bulletin = p_vf->bulletin.p_virt;
301173390ac9SYuval Mintz 
301273390ac9SYuval Mintz 	if (p_params)
301373390ac9SYuval Mintz 		__qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
301473390ac9SYuval Mintz 	if (p_link)
301573390ac9SYuval Mintz 		__qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
301673390ac9SYuval Mintz 	if (p_caps)
301773390ac9SYuval Mintz 		__qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
301873390ac9SYuval Mintz }
301973390ac9SYuval Mintz 
302037bff2b9SYuval Mintz static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
302137bff2b9SYuval Mintz 				    struct qed_ptt *p_ptt, int vfid)
302237bff2b9SYuval Mintz {
302337bff2b9SYuval Mintz 	struct qed_iov_vf_mbx *mbx;
302437bff2b9SYuval Mintz 	struct qed_vf_info *p_vf;
302537bff2b9SYuval Mintz 
302637bff2b9SYuval Mintz 	p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
302737bff2b9SYuval Mintz 	if (!p_vf)
302837bff2b9SYuval Mintz 		return;
302937bff2b9SYuval Mintz 
303037bff2b9SYuval Mintz 	mbx = &p_vf->vf_mbx;
303137bff2b9SYuval Mintz 
303237bff2b9SYuval Mintz 	/* qed_iov_process_mbx_request */
303354fdd80fSYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
303454fdd80fSYuval Mintz 		   "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
303537bff2b9SYuval Mintz 
303637bff2b9SYuval Mintz 	mbx->first_tlv = mbx->req_virt->first_tlv;
303737bff2b9SYuval Mintz 
303837bff2b9SYuval Mintz 	/* check if tlv type is known */
30397eff82b0SYuval Mintz 	if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
30407eff82b0SYuval Mintz 	    !p_vf->b_malicious) {
30411408cc1fSYuval Mintz 		switch (mbx->first_tlv.tl.type) {
30421408cc1fSYuval Mintz 		case CHANNEL_TLV_ACQUIRE:
30431408cc1fSYuval Mintz 			qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
30441408cc1fSYuval Mintz 			break;
3045dacd88d6SYuval Mintz 		case CHANNEL_TLV_VPORT_START:
3046dacd88d6SYuval Mintz 			qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3047dacd88d6SYuval Mintz 			break;
3048dacd88d6SYuval Mintz 		case CHANNEL_TLV_VPORT_TEARDOWN:
3049dacd88d6SYuval Mintz 			qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3050dacd88d6SYuval Mintz 			break;
3051dacd88d6SYuval Mintz 		case CHANNEL_TLV_START_RXQ:
3052dacd88d6SYuval Mintz 			qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3053dacd88d6SYuval Mintz 			break;
3054dacd88d6SYuval Mintz 		case CHANNEL_TLV_START_TXQ:
3055dacd88d6SYuval Mintz 			qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3056dacd88d6SYuval Mintz 			break;
3057dacd88d6SYuval Mintz 		case CHANNEL_TLV_STOP_RXQS:
3058dacd88d6SYuval Mintz 			qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3059dacd88d6SYuval Mintz 			break;
3060dacd88d6SYuval Mintz 		case CHANNEL_TLV_STOP_TXQS:
3061dacd88d6SYuval Mintz 			qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3062dacd88d6SYuval Mintz 			break;
306317b235c1SYuval Mintz 		case CHANNEL_TLV_UPDATE_RXQ:
306417b235c1SYuval Mintz 			qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
306517b235c1SYuval Mintz 			break;
3066dacd88d6SYuval Mintz 		case CHANNEL_TLV_VPORT_UPDATE:
3067dacd88d6SYuval Mintz 			qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3068dacd88d6SYuval Mintz 			break;
3069dacd88d6SYuval Mintz 		case CHANNEL_TLV_UCAST_FILTER:
3070dacd88d6SYuval Mintz 			qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3071dacd88d6SYuval Mintz 			break;
30720b55e27dSYuval Mintz 		case CHANNEL_TLV_CLOSE:
30730b55e27dSYuval Mintz 			qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
30740b55e27dSYuval Mintz 			break;
30750b55e27dSYuval Mintz 		case CHANNEL_TLV_INT_CLEANUP:
30760b55e27dSYuval Mintz 			qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
30770b55e27dSYuval Mintz 			break;
30780b55e27dSYuval Mintz 		case CHANNEL_TLV_RELEASE:
30790b55e27dSYuval Mintz 			qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
30800b55e27dSYuval Mintz 			break;
30811408cc1fSYuval Mintz 		}
30827eff82b0SYuval Mintz 	} else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
30837eff82b0SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
30847eff82b0SYuval Mintz 			   "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
30857eff82b0SYuval Mintz 			   p_vf->abs_vf_id, mbx->first_tlv.tl.type);
30867eff82b0SYuval Mintz 
30877eff82b0SYuval Mintz 		qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
30887eff82b0SYuval Mintz 				     mbx->first_tlv.tl.type,
30897eff82b0SYuval Mintz 				     sizeof(struct pfvf_def_resp_tlv),
30907eff82b0SYuval Mintz 				     PFVF_STATUS_MALICIOUS);
309137bff2b9SYuval Mintz 	} else {
309237bff2b9SYuval Mintz 		/* unknown TLV - this may belong to a VF driver from the future
309337bff2b9SYuval Mintz 		 * - a version written after this PF driver was written, which
309437bff2b9SYuval Mintz 		 * supports features unknown as of yet. Too bad since we don't
309537bff2b9SYuval Mintz 		 * support them. Or this may be because someone wrote a crappy
309637bff2b9SYuval Mintz 		 * VF driver and is sending garbage over the channel.
309737bff2b9SYuval Mintz 		 */
309854fdd80fSYuval Mintz 		DP_NOTICE(p_hwfn,
309954fdd80fSYuval Mintz 			  "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
310054fdd80fSYuval Mintz 			  p_vf->abs_vf_id,
310154fdd80fSYuval Mintz 			  mbx->first_tlv.tl.type,
310254fdd80fSYuval Mintz 			  mbx->first_tlv.tl.length,
310354fdd80fSYuval Mintz 			  mbx->first_tlv.padding, mbx->first_tlv.reply_address);
310437bff2b9SYuval Mintz 
310554fdd80fSYuval Mintz 		/* Try replying in case reply address matches the acquisition's
310654fdd80fSYuval Mintz 		 * posted address.
310754fdd80fSYuval Mintz 		 */
310854fdd80fSYuval Mintz 		if (p_vf->acquire.first_tlv.reply_address &&
310954fdd80fSYuval Mintz 		    (mbx->first_tlv.reply_address ==
311054fdd80fSYuval Mintz 		     p_vf->acquire.first_tlv.reply_address)) {
311154fdd80fSYuval Mintz 			qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
311254fdd80fSYuval Mintz 					     mbx->first_tlv.tl.type,
311354fdd80fSYuval Mintz 					     sizeof(struct pfvf_def_resp_tlv),
311454fdd80fSYuval Mintz 					     PFVF_STATUS_NOT_SUPPORTED);
311554fdd80fSYuval Mintz 		} else {
311637bff2b9SYuval Mintz 			DP_VERBOSE(p_hwfn,
311737bff2b9SYuval Mintz 				   QED_MSG_IOV,
311854fdd80fSYuval Mintz 				   "VF[%02x]: Can't respond to TLV - no valid reply address\n",
311954fdd80fSYuval Mintz 				   p_vf->abs_vf_id);
312037bff2b9SYuval Mintz 		}
312137bff2b9SYuval Mintz 	}
312237bff2b9SYuval Mintz }
312337bff2b9SYuval Mintz 
3124ba56947aSBaoyou Xie static void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
312537bff2b9SYuval Mintz {
312637bff2b9SYuval Mintz 	u64 add_bit = 1ULL << (vfid % 64);
312737bff2b9SYuval Mintz 
312837bff2b9SYuval Mintz 	p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
312937bff2b9SYuval Mintz }
313037bff2b9SYuval Mintz 
313137bff2b9SYuval Mintz static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
313237bff2b9SYuval Mintz 						    u64 *events)
313337bff2b9SYuval Mintz {
313437bff2b9SYuval Mintz 	u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
313537bff2b9SYuval Mintz 
313637bff2b9SYuval Mintz 	memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
313737bff2b9SYuval Mintz 	memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
313837bff2b9SYuval Mintz }
313937bff2b9SYuval Mintz 
31407eff82b0SYuval Mintz static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
31417eff82b0SYuval Mintz 						       u16 abs_vfid)
31427eff82b0SYuval Mintz {
31437eff82b0SYuval Mintz 	u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
31447eff82b0SYuval Mintz 
31457eff82b0SYuval Mintz 	if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
31467eff82b0SYuval Mintz 		DP_VERBOSE(p_hwfn,
31477eff82b0SYuval Mintz 			   QED_MSG_IOV,
31487eff82b0SYuval Mintz 			   "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
31497eff82b0SYuval Mintz 			   abs_vfid);
31507eff82b0SYuval Mintz 		return NULL;
31517eff82b0SYuval Mintz 	}
31527eff82b0SYuval Mintz 
31537eff82b0SYuval Mintz 	return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
31547eff82b0SYuval Mintz }
31557eff82b0SYuval Mintz 
315637bff2b9SYuval Mintz static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
315737bff2b9SYuval Mintz 			      u16 abs_vfid, struct regpair *vf_msg)
315837bff2b9SYuval Mintz {
31597eff82b0SYuval Mintz 	struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
316037bff2b9SYuval Mintz 			   abs_vfid);
31617eff82b0SYuval Mintz 
31627eff82b0SYuval Mintz 	if (!p_vf)
316337bff2b9SYuval Mintz 		return 0;
316437bff2b9SYuval Mintz 
316537bff2b9SYuval Mintz 	/* List the physical address of the request so that handler
316637bff2b9SYuval Mintz 	 * could later on copy the message from it.
316737bff2b9SYuval Mintz 	 */
316837bff2b9SYuval Mintz 	p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
316937bff2b9SYuval Mintz 
317037bff2b9SYuval Mintz 	/* Mark the event and schedule the workqueue */
317137bff2b9SYuval Mintz 	qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
317237bff2b9SYuval Mintz 	qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
317337bff2b9SYuval Mintz 
317437bff2b9SYuval Mintz 	return 0;
317537bff2b9SYuval Mintz }
317637bff2b9SYuval Mintz 
31777eff82b0SYuval Mintz static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
31787eff82b0SYuval Mintz 				     struct malicious_vf_eqe_data *p_data)
31797eff82b0SYuval Mintz {
31807eff82b0SYuval Mintz 	struct qed_vf_info *p_vf;
31817eff82b0SYuval Mintz 
31827eff82b0SYuval Mintz 	p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
31837eff82b0SYuval Mintz 
31847eff82b0SYuval Mintz 	if (!p_vf)
31857eff82b0SYuval Mintz 		return;
31867eff82b0SYuval Mintz 
31877eff82b0SYuval Mintz 	DP_INFO(p_hwfn,
31887eff82b0SYuval Mintz 		"VF [%d] - Malicious behavior [%02x]\n",
31897eff82b0SYuval Mintz 		p_vf->abs_vf_id, p_data->err_id);
31907eff82b0SYuval Mintz 
31917eff82b0SYuval Mintz 	p_vf->b_malicious = true;
31927eff82b0SYuval Mintz }
31937eff82b0SYuval Mintz 
319437bff2b9SYuval Mintz int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
319537bff2b9SYuval Mintz 			u8 opcode, __le16 echo, union event_ring_data *data)
319637bff2b9SYuval Mintz {
319737bff2b9SYuval Mintz 	switch (opcode) {
319837bff2b9SYuval Mintz 	case COMMON_EVENT_VF_PF_CHANNEL:
319937bff2b9SYuval Mintz 		return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
320037bff2b9SYuval Mintz 					  &data->vf_pf_channel.msg_addr);
32017eff82b0SYuval Mintz 	case COMMON_EVENT_MALICIOUS_VF:
32027eff82b0SYuval Mintz 		qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
32037eff82b0SYuval Mintz 		return 0;
320437bff2b9SYuval Mintz 	default:
320537bff2b9SYuval Mintz 		DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
320637bff2b9SYuval Mintz 			opcode);
320737bff2b9SYuval Mintz 		return -EINVAL;
320837bff2b9SYuval Mintz 	}
320937bff2b9SYuval Mintz }
321037bff2b9SYuval Mintz 
321132a47e72SYuval Mintz u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
321232a47e72SYuval Mintz {
321332a47e72SYuval Mintz 	struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
321432a47e72SYuval Mintz 	u16 i;
321532a47e72SYuval Mintz 
321632a47e72SYuval Mintz 	if (!p_iov)
321732a47e72SYuval Mintz 		goto out;
321832a47e72SYuval Mintz 
321932a47e72SYuval Mintz 	for (i = rel_vf_id; i < p_iov->total_vfs; i++)
32207eff82b0SYuval Mintz 		if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
322132a47e72SYuval Mintz 			return i;
322232a47e72SYuval Mintz 
322332a47e72SYuval Mintz out:
322432a47e72SYuval Mintz 	return MAX_NUM_VFS;
322532a47e72SYuval Mintz }
322637bff2b9SYuval Mintz 
322737bff2b9SYuval Mintz static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
322837bff2b9SYuval Mintz 			       int vfid)
322937bff2b9SYuval Mintz {
323037bff2b9SYuval Mintz 	struct qed_dmae_params params;
323137bff2b9SYuval Mintz 	struct qed_vf_info *vf_info;
323237bff2b9SYuval Mintz 
323337bff2b9SYuval Mintz 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
323437bff2b9SYuval Mintz 	if (!vf_info)
323537bff2b9SYuval Mintz 		return -EINVAL;
323637bff2b9SYuval Mintz 
323737bff2b9SYuval Mintz 	memset(&params, 0, sizeof(struct qed_dmae_params));
323837bff2b9SYuval Mintz 	params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
323937bff2b9SYuval Mintz 	params.src_vfid = vf_info->abs_vf_id;
324037bff2b9SYuval Mintz 
324137bff2b9SYuval Mintz 	if (qed_dmae_host2host(p_hwfn, ptt,
324237bff2b9SYuval Mintz 			       vf_info->vf_mbx.pending_req,
324337bff2b9SYuval Mintz 			       vf_info->vf_mbx.req_phys,
324437bff2b9SYuval Mintz 			       sizeof(union vfpf_tlvs) / 4, &params)) {
324537bff2b9SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
324637bff2b9SYuval Mintz 			   "Failed to copy message from VF 0x%02x\n", vfid);
324737bff2b9SYuval Mintz 
324837bff2b9SYuval Mintz 		return -EIO;
324937bff2b9SYuval Mintz 	}
325037bff2b9SYuval Mintz 
325137bff2b9SYuval Mintz 	return 0;
325237bff2b9SYuval Mintz }
325337bff2b9SYuval Mintz 
3254eff16960SYuval Mintz static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
3255eff16960SYuval Mintz 					    u8 *mac, int vfid)
3256eff16960SYuval Mintz {
3257eff16960SYuval Mintz 	struct qed_vf_info *vf_info;
3258eff16960SYuval Mintz 	u64 feature;
3259eff16960SYuval Mintz 
3260eff16960SYuval Mintz 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3261eff16960SYuval Mintz 	if (!vf_info) {
3262eff16960SYuval Mintz 		DP_NOTICE(p_hwfn->cdev,
3263eff16960SYuval Mintz 			  "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3264eff16960SYuval Mintz 		return;
3265eff16960SYuval Mintz 	}
3266eff16960SYuval Mintz 
32677eff82b0SYuval Mintz 	if (vf_info->b_malicious) {
32687eff82b0SYuval Mintz 		DP_NOTICE(p_hwfn->cdev,
32697eff82b0SYuval Mintz 			  "Can't set forced MAC to malicious VF [%d]\n", vfid);
32707eff82b0SYuval Mintz 		return;
32717eff82b0SYuval Mintz 	}
32727eff82b0SYuval Mintz 
3273eff16960SYuval Mintz 	feature = 1 << MAC_ADDR_FORCED;
3274eff16960SYuval Mintz 	memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3275eff16960SYuval Mintz 
3276eff16960SYuval Mintz 	vf_info->bulletin.p_virt->valid_bitmap |= feature;
3277eff16960SYuval Mintz 	/* Forced MAC will disable MAC_ADDR */
32781a635e48SYuval Mintz 	vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
3279eff16960SYuval Mintz 
3280eff16960SYuval Mintz 	qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3281eff16960SYuval Mintz }
3282eff16960SYuval Mintz 
3283ba56947aSBaoyou Xie static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
328408feecd7SYuval Mintz 					     u16 pvid, int vfid)
328508feecd7SYuval Mintz {
328608feecd7SYuval Mintz 	struct qed_vf_info *vf_info;
328708feecd7SYuval Mintz 	u64 feature;
328808feecd7SYuval Mintz 
328908feecd7SYuval Mintz 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
329008feecd7SYuval Mintz 	if (!vf_info) {
329108feecd7SYuval Mintz 		DP_NOTICE(p_hwfn->cdev,
329208feecd7SYuval Mintz 			  "Can not set forced MAC, invalid vfid [%d]\n", vfid);
329308feecd7SYuval Mintz 		return;
329408feecd7SYuval Mintz 	}
329508feecd7SYuval Mintz 
32967eff82b0SYuval Mintz 	if (vf_info->b_malicious) {
32977eff82b0SYuval Mintz 		DP_NOTICE(p_hwfn->cdev,
32987eff82b0SYuval Mintz 			  "Can't set forced vlan to malicious VF [%d]\n", vfid);
32997eff82b0SYuval Mintz 		return;
33007eff82b0SYuval Mintz 	}
33017eff82b0SYuval Mintz 
330208feecd7SYuval Mintz 	feature = 1 << VLAN_ADDR_FORCED;
330308feecd7SYuval Mintz 	vf_info->bulletin.p_virt->pvid = pvid;
330408feecd7SYuval Mintz 	if (pvid)
330508feecd7SYuval Mintz 		vf_info->bulletin.p_virt->valid_bitmap |= feature;
330608feecd7SYuval Mintz 	else
330708feecd7SYuval Mintz 		vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
330808feecd7SYuval Mintz 
330908feecd7SYuval Mintz 	qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
331008feecd7SYuval Mintz }
331108feecd7SYuval Mintz 
33126ddc7608SYuval Mintz static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
33136ddc7608SYuval Mintz {
33146ddc7608SYuval Mintz 	struct qed_vf_info *p_vf_info;
33156ddc7608SYuval Mintz 
33166ddc7608SYuval Mintz 	p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
33176ddc7608SYuval Mintz 	if (!p_vf_info)
33186ddc7608SYuval Mintz 		return false;
33196ddc7608SYuval Mintz 
33206ddc7608SYuval Mintz 	return !!p_vf_info->vport_instance;
33216ddc7608SYuval Mintz }
33226ddc7608SYuval Mintz 
3323ba56947aSBaoyou Xie static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
33240b55e27dSYuval Mintz {
33250b55e27dSYuval Mintz 	struct qed_vf_info *p_vf_info;
33260b55e27dSYuval Mintz 
33270b55e27dSYuval Mintz 	p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
33280b55e27dSYuval Mintz 	if (!p_vf_info)
33290b55e27dSYuval Mintz 		return true;
33300b55e27dSYuval Mintz 
33310b55e27dSYuval Mintz 	return p_vf_info->state == VF_STOPPED;
33320b55e27dSYuval Mintz }
33330b55e27dSYuval Mintz 
333473390ac9SYuval Mintz static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
333573390ac9SYuval Mintz {
333673390ac9SYuval Mintz 	struct qed_vf_info *vf_info;
333773390ac9SYuval Mintz 
333873390ac9SYuval Mintz 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
333973390ac9SYuval Mintz 	if (!vf_info)
334073390ac9SYuval Mintz 		return false;
334173390ac9SYuval Mintz 
334273390ac9SYuval Mintz 	return vf_info->spoof_chk;
334373390ac9SYuval Mintz }
334473390ac9SYuval Mintz 
3345ba56947aSBaoyou Xie static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
33466ddc7608SYuval Mintz {
33476ddc7608SYuval Mintz 	struct qed_vf_info *vf;
33486ddc7608SYuval Mintz 	int rc = -EINVAL;
33496ddc7608SYuval Mintz 
33506ddc7608SYuval Mintz 	if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
33516ddc7608SYuval Mintz 		DP_NOTICE(p_hwfn,
33526ddc7608SYuval Mintz 			  "SR-IOV sanity check failed, can't set spoofchk\n");
33536ddc7608SYuval Mintz 		goto out;
33546ddc7608SYuval Mintz 	}
33556ddc7608SYuval Mintz 
33566ddc7608SYuval Mintz 	vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
33576ddc7608SYuval Mintz 	if (!vf)
33586ddc7608SYuval Mintz 		goto out;
33596ddc7608SYuval Mintz 
33606ddc7608SYuval Mintz 	if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
33616ddc7608SYuval Mintz 		/* After VF VPORT start PF will configure spoof check */
33626ddc7608SYuval Mintz 		vf->req_spoofchk_val = val;
33636ddc7608SYuval Mintz 		rc = 0;
33646ddc7608SYuval Mintz 		goto out;
33656ddc7608SYuval Mintz 	}
33666ddc7608SYuval Mintz 
33676ddc7608SYuval Mintz 	rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
33686ddc7608SYuval Mintz 
33696ddc7608SYuval Mintz out:
33706ddc7608SYuval Mintz 	return rc;
33716ddc7608SYuval Mintz }
33726ddc7608SYuval Mintz 
3373eff16960SYuval Mintz static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
3374eff16960SYuval Mintz 					   u16 rel_vf_id)
3375eff16960SYuval Mintz {
3376eff16960SYuval Mintz 	struct qed_vf_info *p_vf;
3377eff16960SYuval Mintz 
3378eff16960SYuval Mintz 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3379eff16960SYuval Mintz 	if (!p_vf || !p_vf->bulletin.p_virt)
3380eff16960SYuval Mintz 		return NULL;
3381eff16960SYuval Mintz 
33821a635e48SYuval Mintz 	if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
3383eff16960SYuval Mintz 		return NULL;
3384eff16960SYuval Mintz 
3385eff16960SYuval Mintz 	return p_vf->bulletin.p_virt->mac;
3386eff16960SYuval Mintz }
3387eff16960SYuval Mintz 
3388ba56947aSBaoyou Xie static u16
3389ba56947aSBaoyou Xie qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
339008feecd7SYuval Mintz {
339108feecd7SYuval Mintz 	struct qed_vf_info *p_vf;
339208feecd7SYuval Mintz 
339308feecd7SYuval Mintz 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
339408feecd7SYuval Mintz 	if (!p_vf || !p_vf->bulletin.p_virt)
339508feecd7SYuval Mintz 		return 0;
339608feecd7SYuval Mintz 
33971a635e48SYuval Mintz 	if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
339808feecd7SYuval Mintz 		return 0;
339908feecd7SYuval Mintz 
340008feecd7SYuval Mintz 	return p_vf->bulletin.p_virt->pvid;
340108feecd7SYuval Mintz }
340208feecd7SYuval Mintz 
3403733def6aSYuval Mintz static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
3404733def6aSYuval Mintz 				     struct qed_ptt *p_ptt, int vfid, int val)
3405733def6aSYuval Mintz {
3406733def6aSYuval Mintz 	struct qed_vf_info *vf;
3407733def6aSYuval Mintz 	u8 abs_vp_id = 0;
3408733def6aSYuval Mintz 	int rc;
3409733def6aSYuval Mintz 
3410733def6aSYuval Mintz 	vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3411733def6aSYuval Mintz 	if (!vf)
3412733def6aSYuval Mintz 		return -EINVAL;
3413733def6aSYuval Mintz 
3414733def6aSYuval Mintz 	rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
3415733def6aSYuval Mintz 	if (rc)
3416733def6aSYuval Mintz 		return rc;
3417733def6aSYuval Mintz 
3418733def6aSYuval Mintz 	return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
3419733def6aSYuval Mintz }
3420733def6aSYuval Mintz 
3421ba56947aSBaoyou Xie static int
3422ba56947aSBaoyou Xie qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
3423733def6aSYuval Mintz {
3424733def6aSYuval Mintz 	struct qed_vf_info *vf;
3425733def6aSYuval Mintz 	u8 vport_id;
3426733def6aSYuval Mintz 	int i;
3427733def6aSYuval Mintz 
3428733def6aSYuval Mintz 	for_each_hwfn(cdev, i) {
3429733def6aSYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3430733def6aSYuval Mintz 
3431733def6aSYuval Mintz 		if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3432733def6aSYuval Mintz 			DP_NOTICE(p_hwfn,
3433733def6aSYuval Mintz 				  "SR-IOV sanity check failed, can't set min rate\n");
3434733def6aSYuval Mintz 			return -EINVAL;
3435733def6aSYuval Mintz 		}
3436733def6aSYuval Mintz 	}
3437733def6aSYuval Mintz 
3438733def6aSYuval Mintz 	vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
3439733def6aSYuval Mintz 	vport_id = vf->vport_id;
3440733def6aSYuval Mintz 
3441733def6aSYuval Mintz 	return qed_configure_vport_wfq(cdev, vport_id, rate);
3442733def6aSYuval Mintz }
3443733def6aSYuval Mintz 
344473390ac9SYuval Mintz static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
344573390ac9SYuval Mintz {
344673390ac9SYuval Mintz 	struct qed_wfq_data *vf_vp_wfq;
344773390ac9SYuval Mintz 	struct qed_vf_info *vf_info;
344873390ac9SYuval Mintz 
344973390ac9SYuval Mintz 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
345073390ac9SYuval Mintz 	if (!vf_info)
345173390ac9SYuval Mintz 		return 0;
345273390ac9SYuval Mintz 
345373390ac9SYuval Mintz 	vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
345473390ac9SYuval Mintz 
345573390ac9SYuval Mintz 	if (vf_vp_wfq->configured)
345673390ac9SYuval Mintz 		return vf_vp_wfq->min_speed;
345773390ac9SYuval Mintz 	else
345873390ac9SYuval Mintz 		return 0;
345973390ac9SYuval Mintz }
346073390ac9SYuval Mintz 
346137bff2b9SYuval Mintz /**
346237bff2b9SYuval Mintz  * qed_schedule_iov - schedules IOV task for VF and PF
346337bff2b9SYuval Mintz  * @hwfn: hardware function pointer
346437bff2b9SYuval Mintz  * @flag: IOV flag for VF/PF
346537bff2b9SYuval Mintz  */
346637bff2b9SYuval Mintz void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
346737bff2b9SYuval Mintz {
346837bff2b9SYuval Mintz 	smp_mb__before_atomic();
346937bff2b9SYuval Mintz 	set_bit(flag, &hwfn->iov_task_flags);
347037bff2b9SYuval Mintz 	smp_mb__after_atomic();
347137bff2b9SYuval Mintz 	DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
347237bff2b9SYuval Mintz 	queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
347337bff2b9SYuval Mintz }
347437bff2b9SYuval Mintz 
34751408cc1fSYuval Mintz void qed_vf_start_iov_wq(struct qed_dev *cdev)
34761408cc1fSYuval Mintz {
34771408cc1fSYuval Mintz 	int i;
34781408cc1fSYuval Mintz 
34791408cc1fSYuval Mintz 	for_each_hwfn(cdev, i)
34801408cc1fSYuval Mintz 	    queue_delayed_work(cdev->hwfns[i].iov_wq,
34811408cc1fSYuval Mintz 			       &cdev->hwfns[i].iov_task, 0);
34821408cc1fSYuval Mintz }
34831408cc1fSYuval Mintz 
34840b55e27dSYuval Mintz int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
34850b55e27dSYuval Mintz {
34860b55e27dSYuval Mintz 	int i, j;
34870b55e27dSYuval Mintz 
34880b55e27dSYuval Mintz 	for_each_hwfn(cdev, i)
34890b55e27dSYuval Mintz 	    if (cdev->hwfns[i].iov_wq)
34900b55e27dSYuval Mintz 		flush_workqueue(cdev->hwfns[i].iov_wq);
34910b55e27dSYuval Mintz 
34920b55e27dSYuval Mintz 	/* Mark VFs for disablement */
34930b55e27dSYuval Mintz 	qed_iov_set_vfs_to_disable(cdev, true);
34940b55e27dSYuval Mintz 
34950b55e27dSYuval Mintz 	if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
34960b55e27dSYuval Mintz 		pci_disable_sriov(cdev->pdev);
34970b55e27dSYuval Mintz 
34980b55e27dSYuval Mintz 	for_each_hwfn(cdev, i) {
34990b55e27dSYuval Mintz 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
35000b55e27dSYuval Mintz 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
35010b55e27dSYuval Mintz 
35020b55e27dSYuval Mintz 		/* Failure to acquire the ptt in 100g creates an odd error
35030b55e27dSYuval Mintz 		 * where the first engine has already relased IOV.
35040b55e27dSYuval Mintz 		 */
35050b55e27dSYuval Mintz 		if (!ptt) {
35060b55e27dSYuval Mintz 			DP_ERR(hwfn, "Failed to acquire ptt\n");
35070b55e27dSYuval Mintz 			return -EBUSY;
35080b55e27dSYuval Mintz 		}
35090b55e27dSYuval Mintz 
3510733def6aSYuval Mintz 		/* Clean WFQ db and configure equal weight for all vports */
3511733def6aSYuval Mintz 		qed_clean_wfq_db(hwfn, ptt);
3512733def6aSYuval Mintz 
35130b55e27dSYuval Mintz 		qed_for_each_vf(hwfn, j) {
35140b55e27dSYuval Mintz 			int k;
35150b55e27dSYuval Mintz 
35167eff82b0SYuval Mintz 			if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
35170b55e27dSYuval Mintz 				continue;
35180b55e27dSYuval Mintz 
35190b55e27dSYuval Mintz 			/* Wait until VF is disabled before releasing */
35200b55e27dSYuval Mintz 			for (k = 0; k < 100; k++) {
35210b55e27dSYuval Mintz 				if (!qed_iov_is_vf_stopped(hwfn, j))
35220b55e27dSYuval Mintz 					msleep(20);
35230b55e27dSYuval Mintz 				else
35240b55e27dSYuval Mintz 					break;
35250b55e27dSYuval Mintz 			}
35260b55e27dSYuval Mintz 
35270b55e27dSYuval Mintz 			if (k < 100)
35280b55e27dSYuval Mintz 				qed_iov_release_hw_for_vf(&cdev->hwfns[i],
35290b55e27dSYuval Mintz 							  ptt, j);
35300b55e27dSYuval Mintz 			else
35310b55e27dSYuval Mintz 				DP_ERR(hwfn,
35320b55e27dSYuval Mintz 				       "Timeout waiting for VF's FLR to end\n");
35330b55e27dSYuval Mintz 		}
35340b55e27dSYuval Mintz 
35350b55e27dSYuval Mintz 		qed_ptt_release(hwfn, ptt);
35360b55e27dSYuval Mintz 	}
35370b55e27dSYuval Mintz 
35380b55e27dSYuval Mintz 	qed_iov_set_vfs_to_disable(cdev, false);
35390b55e27dSYuval Mintz 
35400b55e27dSYuval Mintz 	return 0;
35410b55e27dSYuval Mintz }
35420b55e27dSYuval Mintz 
35433da7a37aSMintz, Yuval static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
35443da7a37aSMintz, Yuval 					u16 vfid,
35453da7a37aSMintz, Yuval 					struct qed_iov_vf_init_params *params)
35463da7a37aSMintz, Yuval {
35473da7a37aSMintz, Yuval 	u16 base, i;
35483da7a37aSMintz, Yuval 
35493da7a37aSMintz, Yuval 	/* Since we have an equal resource distribution per-VF, and we assume
35503da7a37aSMintz, Yuval 	 * PF has acquired the QED_PF_L2_QUE first queues, we start setting
35513da7a37aSMintz, Yuval 	 * sequentially from there.
35523da7a37aSMintz, Yuval 	 */
35533da7a37aSMintz, Yuval 	base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
35543da7a37aSMintz, Yuval 
35553da7a37aSMintz, Yuval 	params->rel_vf_id = vfid;
35563da7a37aSMintz, Yuval 	for (i = 0; i < params->num_queues; i++) {
35573da7a37aSMintz, Yuval 		params->req_rx_queue[i] = base + i;
35583da7a37aSMintz, Yuval 		params->req_tx_queue[i] = base + i;
35593da7a37aSMintz, Yuval 	}
35603da7a37aSMintz, Yuval }
35613da7a37aSMintz, Yuval 
35620b55e27dSYuval Mintz static int qed_sriov_enable(struct qed_dev *cdev, int num)
35630b55e27dSYuval Mintz {
35643da7a37aSMintz, Yuval 	struct qed_iov_vf_init_params params;
35650b55e27dSYuval Mintz 	int i, j, rc;
35660b55e27dSYuval Mintz 
35670b55e27dSYuval Mintz 	if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
35680b55e27dSYuval Mintz 		DP_NOTICE(cdev, "Can start at most %d VFs\n",
35690b55e27dSYuval Mintz 			  RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
35700b55e27dSYuval Mintz 		return -EINVAL;
35710b55e27dSYuval Mintz 	}
35720b55e27dSYuval Mintz 
35733da7a37aSMintz, Yuval 	memset(&params, 0, sizeof(params));
35743da7a37aSMintz, Yuval 
35750b55e27dSYuval Mintz 	/* Initialize HW for VF access */
35760b55e27dSYuval Mintz 	for_each_hwfn(cdev, j) {
35770b55e27dSYuval Mintz 		struct qed_hwfn *hwfn = &cdev->hwfns[j];
35780b55e27dSYuval Mintz 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
35795a1f965aSMintz, Yuval 
35805a1f965aSMintz, Yuval 		/* Make sure not to use more than 16 queues per VF */
35813da7a37aSMintz, Yuval 		params.num_queues = min_t(int,
35823da7a37aSMintz, Yuval 					  FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
35833da7a37aSMintz, Yuval 					  16);
35840b55e27dSYuval Mintz 
35850b55e27dSYuval Mintz 		if (!ptt) {
35860b55e27dSYuval Mintz 			DP_ERR(hwfn, "Failed to acquire ptt\n");
35870b55e27dSYuval Mintz 			rc = -EBUSY;
35880b55e27dSYuval Mintz 			goto err;
35890b55e27dSYuval Mintz 		}
35900b55e27dSYuval Mintz 
35910b55e27dSYuval Mintz 		for (i = 0; i < num; i++) {
35927eff82b0SYuval Mintz 			if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
35930b55e27dSYuval Mintz 				continue;
35940b55e27dSYuval Mintz 
35953da7a37aSMintz, Yuval 			qed_sriov_enable_qid_config(hwfn, i, &params);
35963da7a37aSMintz, Yuval 			rc = qed_iov_init_hw_for_vf(hwfn, ptt, &params);
35970b55e27dSYuval Mintz 			if (rc) {
35980b55e27dSYuval Mintz 				DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
35990b55e27dSYuval Mintz 				qed_ptt_release(hwfn, ptt);
36000b55e27dSYuval Mintz 				goto err;
36010b55e27dSYuval Mintz 			}
36020b55e27dSYuval Mintz 		}
36030b55e27dSYuval Mintz 
36040b55e27dSYuval Mintz 		qed_ptt_release(hwfn, ptt);
36050b55e27dSYuval Mintz 	}
36060b55e27dSYuval Mintz 
36070b55e27dSYuval Mintz 	/* Enable SRIOV PCIe functions */
36080b55e27dSYuval Mintz 	rc = pci_enable_sriov(cdev->pdev, num);
36090b55e27dSYuval Mintz 	if (rc) {
36100b55e27dSYuval Mintz 		DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
36110b55e27dSYuval Mintz 		goto err;
36120b55e27dSYuval Mintz 	}
36130b55e27dSYuval Mintz 
36140b55e27dSYuval Mintz 	return num;
36150b55e27dSYuval Mintz 
36160b55e27dSYuval Mintz err:
36170b55e27dSYuval Mintz 	qed_sriov_disable(cdev, false);
36180b55e27dSYuval Mintz 	return rc;
36190b55e27dSYuval Mintz }
36200b55e27dSYuval Mintz 
36210b55e27dSYuval Mintz static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
36220b55e27dSYuval Mintz {
36230b55e27dSYuval Mintz 	if (!IS_QED_SRIOV(cdev)) {
36240b55e27dSYuval Mintz 		DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
36250b55e27dSYuval Mintz 		return -EOPNOTSUPP;
36260b55e27dSYuval Mintz 	}
36270b55e27dSYuval Mintz 
36280b55e27dSYuval Mintz 	if (num_vfs_param)
36290b55e27dSYuval Mintz 		return qed_sriov_enable(cdev, num_vfs_param);
36300b55e27dSYuval Mintz 	else
36310b55e27dSYuval Mintz 		return qed_sriov_disable(cdev, true);
36320b55e27dSYuval Mintz }
36330b55e27dSYuval Mintz 
3634eff16960SYuval Mintz static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
3635eff16960SYuval Mintz {
3636eff16960SYuval Mintz 	int i;
3637eff16960SYuval Mintz 
3638eff16960SYuval Mintz 	if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
3639eff16960SYuval Mintz 		DP_VERBOSE(cdev, QED_MSG_IOV,
3640eff16960SYuval Mintz 			   "Cannot set a VF MAC; Sriov is not enabled\n");
3641eff16960SYuval Mintz 		return -EINVAL;
3642eff16960SYuval Mintz 	}
3643eff16960SYuval Mintz 
36447eff82b0SYuval Mintz 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
3645eff16960SYuval Mintz 		DP_VERBOSE(cdev, QED_MSG_IOV,
3646eff16960SYuval Mintz 			   "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
3647eff16960SYuval Mintz 		return -EINVAL;
3648eff16960SYuval Mintz 	}
3649eff16960SYuval Mintz 
3650eff16960SYuval Mintz 	for_each_hwfn(cdev, i) {
3651eff16960SYuval Mintz 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
3652eff16960SYuval Mintz 		struct qed_public_vf_info *vf_info;
3653eff16960SYuval Mintz 
3654eff16960SYuval Mintz 		vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
3655eff16960SYuval Mintz 		if (!vf_info)
3656eff16960SYuval Mintz 			continue;
3657eff16960SYuval Mintz 
3658eff16960SYuval Mintz 		/* Set the forced MAC, and schedule the IOV task */
3659eff16960SYuval Mintz 		ether_addr_copy(vf_info->forced_mac, mac);
3660eff16960SYuval Mintz 		qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
3661eff16960SYuval Mintz 	}
3662eff16960SYuval Mintz 
3663eff16960SYuval Mintz 	return 0;
3664eff16960SYuval Mintz }
3665eff16960SYuval Mintz 
366608feecd7SYuval Mintz static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
366708feecd7SYuval Mintz {
366808feecd7SYuval Mintz 	int i;
366908feecd7SYuval Mintz 
367008feecd7SYuval Mintz 	if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
367108feecd7SYuval Mintz 		DP_VERBOSE(cdev, QED_MSG_IOV,
367208feecd7SYuval Mintz 			   "Cannot set a VF MAC; Sriov is not enabled\n");
367308feecd7SYuval Mintz 		return -EINVAL;
367408feecd7SYuval Mintz 	}
367508feecd7SYuval Mintz 
36767eff82b0SYuval Mintz 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
367708feecd7SYuval Mintz 		DP_VERBOSE(cdev, QED_MSG_IOV,
367808feecd7SYuval Mintz 			   "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
367908feecd7SYuval Mintz 		return -EINVAL;
368008feecd7SYuval Mintz 	}
368108feecd7SYuval Mintz 
368208feecd7SYuval Mintz 	for_each_hwfn(cdev, i) {
368308feecd7SYuval Mintz 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
368408feecd7SYuval Mintz 		struct qed_public_vf_info *vf_info;
368508feecd7SYuval Mintz 
368608feecd7SYuval Mintz 		vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
368708feecd7SYuval Mintz 		if (!vf_info)
368808feecd7SYuval Mintz 			continue;
368908feecd7SYuval Mintz 
369008feecd7SYuval Mintz 		/* Set the forced vlan, and schedule the IOV task */
369108feecd7SYuval Mintz 		vf_info->forced_vlan = vid;
369208feecd7SYuval Mintz 		qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
369308feecd7SYuval Mintz 	}
369408feecd7SYuval Mintz 
369508feecd7SYuval Mintz 	return 0;
369608feecd7SYuval Mintz }
369708feecd7SYuval Mintz 
369873390ac9SYuval Mintz static int qed_get_vf_config(struct qed_dev *cdev,
369973390ac9SYuval Mintz 			     int vf_id, struct ifla_vf_info *ivi)
370073390ac9SYuval Mintz {
370173390ac9SYuval Mintz 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
370273390ac9SYuval Mintz 	struct qed_public_vf_info *vf_info;
370373390ac9SYuval Mintz 	struct qed_mcp_link_state link;
370473390ac9SYuval Mintz 	u32 tx_rate;
370573390ac9SYuval Mintz 
370673390ac9SYuval Mintz 	/* Sanitize request */
370773390ac9SYuval Mintz 	if (IS_VF(cdev))
370873390ac9SYuval Mintz 		return -EINVAL;
370973390ac9SYuval Mintz 
37107eff82b0SYuval Mintz 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
371173390ac9SYuval Mintz 		DP_VERBOSE(cdev, QED_MSG_IOV,
371273390ac9SYuval Mintz 			   "VF index [%d] isn't active\n", vf_id);
371373390ac9SYuval Mintz 		return -EINVAL;
371473390ac9SYuval Mintz 	}
371573390ac9SYuval Mintz 
371673390ac9SYuval Mintz 	vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
371773390ac9SYuval Mintz 
371873390ac9SYuval Mintz 	qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
371973390ac9SYuval Mintz 
372073390ac9SYuval Mintz 	/* Fill information about VF */
372173390ac9SYuval Mintz 	ivi->vf = vf_id;
372273390ac9SYuval Mintz 
372373390ac9SYuval Mintz 	if (is_valid_ether_addr(vf_info->forced_mac))
372473390ac9SYuval Mintz 		ether_addr_copy(ivi->mac, vf_info->forced_mac);
372573390ac9SYuval Mintz 	else
372673390ac9SYuval Mintz 		ether_addr_copy(ivi->mac, vf_info->mac);
372773390ac9SYuval Mintz 
372873390ac9SYuval Mintz 	ivi->vlan = vf_info->forced_vlan;
372973390ac9SYuval Mintz 	ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
373073390ac9SYuval Mintz 	ivi->linkstate = vf_info->link_state;
373173390ac9SYuval Mintz 	tx_rate = vf_info->tx_rate;
373273390ac9SYuval Mintz 	ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
373373390ac9SYuval Mintz 	ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
373473390ac9SYuval Mintz 
373573390ac9SYuval Mintz 	return 0;
373673390ac9SYuval Mintz }
373773390ac9SYuval Mintz 
373836558c3dSYuval Mintz void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
373936558c3dSYuval Mintz {
374036558c3dSYuval Mintz 	struct qed_mcp_link_capabilities caps;
374136558c3dSYuval Mintz 	struct qed_mcp_link_params params;
374236558c3dSYuval Mintz 	struct qed_mcp_link_state link;
374336558c3dSYuval Mintz 	int i;
374436558c3dSYuval Mintz 
374536558c3dSYuval Mintz 	if (!hwfn->pf_iov_info)
374636558c3dSYuval Mintz 		return;
374736558c3dSYuval Mintz 
374836558c3dSYuval Mintz 	/* Update bulletin of all future possible VFs with link configuration */
374936558c3dSYuval Mintz 	for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
3750733def6aSYuval Mintz 		struct qed_public_vf_info *vf_info;
3751733def6aSYuval Mintz 
3752733def6aSYuval Mintz 		vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
3753733def6aSYuval Mintz 		if (!vf_info)
3754733def6aSYuval Mintz 			continue;
3755733def6aSYuval Mintz 
375636558c3dSYuval Mintz 		memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
375736558c3dSYuval Mintz 		memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
375836558c3dSYuval Mintz 		memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
375936558c3dSYuval Mintz 		       sizeof(caps));
376036558c3dSYuval Mintz 
3761733def6aSYuval Mintz 		/* Modify link according to the VF's configured link state */
3762733def6aSYuval Mintz 		switch (vf_info->link_state) {
3763733def6aSYuval Mintz 		case IFLA_VF_LINK_STATE_DISABLE:
3764733def6aSYuval Mintz 			link.link_up = false;
3765733def6aSYuval Mintz 			break;
3766733def6aSYuval Mintz 		case IFLA_VF_LINK_STATE_ENABLE:
3767733def6aSYuval Mintz 			link.link_up = true;
3768733def6aSYuval Mintz 			/* Set speed according to maximum supported by HW.
3769733def6aSYuval Mintz 			 * that is 40G for regular devices and 100G for CMT
3770733def6aSYuval Mintz 			 * mode devices.
3771733def6aSYuval Mintz 			 */
3772733def6aSYuval Mintz 			link.speed = (hwfn->cdev->num_hwfns > 1) ?
3773733def6aSYuval Mintz 				     100000 : 40000;
3774733def6aSYuval Mintz 		default:
3775733def6aSYuval Mintz 			/* In auto mode pass PF link image to VF */
3776733def6aSYuval Mintz 			break;
3777733def6aSYuval Mintz 		}
3778733def6aSYuval Mintz 
3779733def6aSYuval Mintz 		if (link.link_up && vf_info->tx_rate) {
3780733def6aSYuval Mintz 			struct qed_ptt *ptt;
3781733def6aSYuval Mintz 			int rate;
3782733def6aSYuval Mintz 
3783733def6aSYuval Mintz 			rate = min_t(int, vf_info->tx_rate, link.speed);
3784733def6aSYuval Mintz 
3785733def6aSYuval Mintz 			ptt = qed_ptt_acquire(hwfn);
3786733def6aSYuval Mintz 			if (!ptt) {
3787733def6aSYuval Mintz 				DP_NOTICE(hwfn, "Failed to acquire PTT\n");
3788733def6aSYuval Mintz 				return;
3789733def6aSYuval Mintz 			}
3790733def6aSYuval Mintz 
3791733def6aSYuval Mintz 			if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
3792733def6aSYuval Mintz 				vf_info->tx_rate = rate;
3793733def6aSYuval Mintz 				link.speed = rate;
3794733def6aSYuval Mintz 			}
3795733def6aSYuval Mintz 
3796733def6aSYuval Mintz 			qed_ptt_release(hwfn, ptt);
3797733def6aSYuval Mintz 		}
3798733def6aSYuval Mintz 
379936558c3dSYuval Mintz 		qed_iov_set_link(hwfn, i, &params, &link, &caps);
380036558c3dSYuval Mintz 	}
380136558c3dSYuval Mintz 
380236558c3dSYuval Mintz 	qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
380336558c3dSYuval Mintz }
380436558c3dSYuval Mintz 
3805733def6aSYuval Mintz static int qed_set_vf_link_state(struct qed_dev *cdev,
3806733def6aSYuval Mintz 				 int vf_id, int link_state)
3807733def6aSYuval Mintz {
3808733def6aSYuval Mintz 	int i;
3809733def6aSYuval Mintz 
3810733def6aSYuval Mintz 	/* Sanitize request */
3811733def6aSYuval Mintz 	if (IS_VF(cdev))
3812733def6aSYuval Mintz 		return -EINVAL;
3813733def6aSYuval Mintz 
38147eff82b0SYuval Mintz 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
3815733def6aSYuval Mintz 		DP_VERBOSE(cdev, QED_MSG_IOV,
3816733def6aSYuval Mintz 			   "VF index [%d] isn't active\n", vf_id);
3817733def6aSYuval Mintz 		return -EINVAL;
3818733def6aSYuval Mintz 	}
3819733def6aSYuval Mintz 
3820733def6aSYuval Mintz 	/* Handle configuration of link state */
3821733def6aSYuval Mintz 	for_each_hwfn(cdev, i) {
3822733def6aSYuval Mintz 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
3823733def6aSYuval Mintz 		struct qed_public_vf_info *vf;
3824733def6aSYuval Mintz 
3825733def6aSYuval Mintz 		vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
3826733def6aSYuval Mintz 		if (!vf)
3827733def6aSYuval Mintz 			continue;
3828733def6aSYuval Mintz 
3829733def6aSYuval Mintz 		if (vf->link_state == link_state)
3830733def6aSYuval Mintz 			continue;
3831733def6aSYuval Mintz 
3832733def6aSYuval Mintz 		vf->link_state = link_state;
3833733def6aSYuval Mintz 		qed_inform_vf_link_state(&cdev->hwfns[i]);
3834733def6aSYuval Mintz 	}
3835733def6aSYuval Mintz 
3836733def6aSYuval Mintz 	return 0;
3837733def6aSYuval Mintz }
3838733def6aSYuval Mintz 
38396ddc7608SYuval Mintz static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
38406ddc7608SYuval Mintz {
38416ddc7608SYuval Mintz 	int i, rc = -EINVAL;
38426ddc7608SYuval Mintz 
38436ddc7608SYuval Mintz 	for_each_hwfn(cdev, i) {
38446ddc7608SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
38456ddc7608SYuval Mintz 
38466ddc7608SYuval Mintz 		rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
38476ddc7608SYuval Mintz 		if (rc)
38486ddc7608SYuval Mintz 			break;
38496ddc7608SYuval Mintz 	}
38506ddc7608SYuval Mintz 
38516ddc7608SYuval Mintz 	return rc;
38526ddc7608SYuval Mintz }
38536ddc7608SYuval Mintz 
3854733def6aSYuval Mintz static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
3855733def6aSYuval Mintz {
3856733def6aSYuval Mintz 	int i;
3857733def6aSYuval Mintz 
3858733def6aSYuval Mintz 	for_each_hwfn(cdev, i) {
3859733def6aSYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3860733def6aSYuval Mintz 		struct qed_public_vf_info *vf;
3861733def6aSYuval Mintz 
3862733def6aSYuval Mintz 		if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3863733def6aSYuval Mintz 			DP_NOTICE(p_hwfn,
3864733def6aSYuval Mintz 				  "SR-IOV sanity check failed, can't set tx rate\n");
3865733def6aSYuval Mintz 			return -EINVAL;
3866733def6aSYuval Mintz 		}
3867733def6aSYuval Mintz 
3868733def6aSYuval Mintz 		vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
3869733def6aSYuval Mintz 
3870733def6aSYuval Mintz 		vf->tx_rate = rate;
3871733def6aSYuval Mintz 
3872733def6aSYuval Mintz 		qed_inform_vf_link_state(p_hwfn);
3873733def6aSYuval Mintz 	}
3874733def6aSYuval Mintz 
3875733def6aSYuval Mintz 	return 0;
3876733def6aSYuval Mintz }
3877733def6aSYuval Mintz 
3878733def6aSYuval Mintz static int qed_set_vf_rate(struct qed_dev *cdev,
3879733def6aSYuval Mintz 			   int vfid, u32 min_rate, u32 max_rate)
3880733def6aSYuval Mintz {
3881733def6aSYuval Mintz 	int rc_min = 0, rc_max = 0;
3882733def6aSYuval Mintz 
3883733def6aSYuval Mintz 	if (max_rate)
3884733def6aSYuval Mintz 		rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
3885733def6aSYuval Mintz 
3886733def6aSYuval Mintz 	if (min_rate)
3887733def6aSYuval Mintz 		rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
3888733def6aSYuval Mintz 
3889733def6aSYuval Mintz 	if (rc_max | rc_min)
3890733def6aSYuval Mintz 		return -EINVAL;
3891733def6aSYuval Mintz 
3892733def6aSYuval Mintz 	return 0;
3893733def6aSYuval Mintz }
3894733def6aSYuval Mintz 
389537bff2b9SYuval Mintz static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
389637bff2b9SYuval Mintz {
389737bff2b9SYuval Mintz 	u64 events[QED_VF_ARRAY_LENGTH];
389837bff2b9SYuval Mintz 	struct qed_ptt *ptt;
389937bff2b9SYuval Mintz 	int i;
390037bff2b9SYuval Mintz 
390137bff2b9SYuval Mintz 	ptt = qed_ptt_acquire(hwfn);
390237bff2b9SYuval Mintz 	if (!ptt) {
390337bff2b9SYuval Mintz 		DP_VERBOSE(hwfn, QED_MSG_IOV,
390437bff2b9SYuval Mintz 			   "Can't acquire PTT; re-scheduling\n");
390537bff2b9SYuval Mintz 		qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
390637bff2b9SYuval Mintz 		return;
390737bff2b9SYuval Mintz 	}
390837bff2b9SYuval Mintz 
390937bff2b9SYuval Mintz 	qed_iov_pf_get_and_clear_pending_events(hwfn, events);
391037bff2b9SYuval Mintz 
391137bff2b9SYuval Mintz 	DP_VERBOSE(hwfn, QED_MSG_IOV,
391237bff2b9SYuval Mintz 		   "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
391337bff2b9SYuval Mintz 		   events[0], events[1], events[2]);
391437bff2b9SYuval Mintz 
391537bff2b9SYuval Mintz 	qed_for_each_vf(hwfn, i) {
391637bff2b9SYuval Mintz 		/* Skip VFs with no pending messages */
391737bff2b9SYuval Mintz 		if (!(events[i / 64] & (1ULL << (i % 64))))
391837bff2b9SYuval Mintz 			continue;
391937bff2b9SYuval Mintz 
392037bff2b9SYuval Mintz 		DP_VERBOSE(hwfn, QED_MSG_IOV,
392137bff2b9SYuval Mintz 			   "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
392237bff2b9SYuval Mintz 			   i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
392337bff2b9SYuval Mintz 
392437bff2b9SYuval Mintz 		/* Copy VF's message to PF's request buffer for that VF */
392537bff2b9SYuval Mintz 		if (qed_iov_copy_vf_msg(hwfn, ptt, i))
392637bff2b9SYuval Mintz 			continue;
392737bff2b9SYuval Mintz 
392837bff2b9SYuval Mintz 		qed_iov_process_mbx_req(hwfn, ptt, i);
392937bff2b9SYuval Mintz 	}
393037bff2b9SYuval Mintz 
393137bff2b9SYuval Mintz 	qed_ptt_release(hwfn, ptt);
393237bff2b9SYuval Mintz }
393337bff2b9SYuval Mintz 
393408feecd7SYuval Mintz static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
393508feecd7SYuval Mintz {
393608feecd7SYuval Mintz 	int i;
393708feecd7SYuval Mintz 
393808feecd7SYuval Mintz 	qed_for_each_vf(hwfn, i) {
393908feecd7SYuval Mintz 		struct qed_public_vf_info *info;
394008feecd7SYuval Mintz 		bool update = false;
3941eff16960SYuval Mintz 		u8 *mac;
394208feecd7SYuval Mintz 
394308feecd7SYuval Mintz 		info = qed_iov_get_public_vf_info(hwfn, i, true);
394408feecd7SYuval Mintz 		if (!info)
394508feecd7SYuval Mintz 			continue;
394608feecd7SYuval Mintz 
394708feecd7SYuval Mintz 		/* Update data on bulletin board */
3948eff16960SYuval Mintz 		mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
3949eff16960SYuval Mintz 		if (is_valid_ether_addr(info->forced_mac) &&
3950eff16960SYuval Mintz 		    (!mac || !ether_addr_equal(mac, info->forced_mac))) {
3951eff16960SYuval Mintz 			DP_VERBOSE(hwfn,
3952eff16960SYuval Mintz 				   QED_MSG_IOV,
3953eff16960SYuval Mintz 				   "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
3954eff16960SYuval Mintz 				   i,
3955eff16960SYuval Mintz 				   hwfn->cdev->p_iov_info->first_vf_in_pf + i);
3956eff16960SYuval Mintz 
3957eff16960SYuval Mintz 			/* Update bulletin board with forced MAC */
3958eff16960SYuval Mintz 			qed_iov_bulletin_set_forced_mac(hwfn,
3959eff16960SYuval Mintz 							info->forced_mac, i);
3960eff16960SYuval Mintz 			update = true;
3961eff16960SYuval Mintz 		}
396208feecd7SYuval Mintz 
396308feecd7SYuval Mintz 		if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
396408feecd7SYuval Mintz 		    info->forced_vlan) {
396508feecd7SYuval Mintz 			DP_VERBOSE(hwfn,
396608feecd7SYuval Mintz 				   QED_MSG_IOV,
396708feecd7SYuval Mintz 				   "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
396808feecd7SYuval Mintz 				   info->forced_vlan,
396908feecd7SYuval Mintz 				   i,
397008feecd7SYuval Mintz 				   hwfn->cdev->p_iov_info->first_vf_in_pf + i);
397108feecd7SYuval Mintz 			qed_iov_bulletin_set_forced_vlan(hwfn,
397208feecd7SYuval Mintz 							 info->forced_vlan, i);
397308feecd7SYuval Mintz 			update = true;
397408feecd7SYuval Mintz 		}
397508feecd7SYuval Mintz 
397608feecd7SYuval Mintz 		if (update)
397708feecd7SYuval Mintz 			qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
397808feecd7SYuval Mintz 	}
397908feecd7SYuval Mintz }
398008feecd7SYuval Mintz 
398136558c3dSYuval Mintz static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
398236558c3dSYuval Mintz {
398336558c3dSYuval Mintz 	struct qed_ptt *ptt;
398436558c3dSYuval Mintz 	int i;
398536558c3dSYuval Mintz 
398636558c3dSYuval Mintz 	ptt = qed_ptt_acquire(hwfn);
398736558c3dSYuval Mintz 	if (!ptt) {
398836558c3dSYuval Mintz 		DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
398936558c3dSYuval Mintz 		qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
399036558c3dSYuval Mintz 		return;
399136558c3dSYuval Mintz 	}
399236558c3dSYuval Mintz 
399336558c3dSYuval Mintz 	qed_for_each_vf(hwfn, i)
399436558c3dSYuval Mintz 	    qed_iov_post_vf_bulletin(hwfn, i, ptt);
399536558c3dSYuval Mintz 
399636558c3dSYuval Mintz 	qed_ptt_release(hwfn, ptt);
399736558c3dSYuval Mintz }
399836558c3dSYuval Mintz 
3999ba56947aSBaoyou Xie static void qed_iov_pf_task(struct work_struct *work)
4000ba56947aSBaoyou Xie 
400137bff2b9SYuval Mintz {
400237bff2b9SYuval Mintz 	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
400337bff2b9SYuval Mintz 					     iov_task.work);
40040b55e27dSYuval Mintz 	int rc;
400537bff2b9SYuval Mintz 
400637bff2b9SYuval Mintz 	if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
400737bff2b9SYuval Mintz 		return;
400837bff2b9SYuval Mintz 
40090b55e27dSYuval Mintz 	if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
40100b55e27dSYuval Mintz 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
40110b55e27dSYuval Mintz 
40120b55e27dSYuval Mintz 		if (!ptt) {
40130b55e27dSYuval Mintz 			qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
40140b55e27dSYuval Mintz 			return;
40150b55e27dSYuval Mintz 		}
40160b55e27dSYuval Mintz 
40170b55e27dSYuval Mintz 		rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
40180b55e27dSYuval Mintz 		if (rc)
40190b55e27dSYuval Mintz 			qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
40200b55e27dSYuval Mintz 
40210b55e27dSYuval Mintz 		qed_ptt_release(hwfn, ptt);
40220b55e27dSYuval Mintz 	}
40230b55e27dSYuval Mintz 
402437bff2b9SYuval Mintz 	if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
402537bff2b9SYuval Mintz 		qed_handle_vf_msg(hwfn);
402608feecd7SYuval Mintz 
402708feecd7SYuval Mintz 	if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
402808feecd7SYuval Mintz 			       &hwfn->iov_task_flags))
402908feecd7SYuval Mintz 		qed_handle_pf_set_vf_unicast(hwfn);
403008feecd7SYuval Mintz 
403136558c3dSYuval Mintz 	if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
403236558c3dSYuval Mintz 			       &hwfn->iov_task_flags))
403336558c3dSYuval Mintz 		qed_handle_bulletin_post(hwfn);
403437bff2b9SYuval Mintz }
403537bff2b9SYuval Mintz 
403637bff2b9SYuval Mintz void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
403737bff2b9SYuval Mintz {
403837bff2b9SYuval Mintz 	int i;
403937bff2b9SYuval Mintz 
404037bff2b9SYuval Mintz 	for_each_hwfn(cdev, i) {
404137bff2b9SYuval Mintz 		if (!cdev->hwfns[i].iov_wq)
404237bff2b9SYuval Mintz 			continue;
404337bff2b9SYuval Mintz 
404437bff2b9SYuval Mintz 		if (schedule_first) {
404537bff2b9SYuval Mintz 			qed_schedule_iov(&cdev->hwfns[i],
404637bff2b9SYuval Mintz 					 QED_IOV_WQ_STOP_WQ_FLAG);
404737bff2b9SYuval Mintz 			cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
404837bff2b9SYuval Mintz 		}
404937bff2b9SYuval Mintz 
405037bff2b9SYuval Mintz 		flush_workqueue(cdev->hwfns[i].iov_wq);
405137bff2b9SYuval Mintz 		destroy_workqueue(cdev->hwfns[i].iov_wq);
405237bff2b9SYuval Mintz 	}
405337bff2b9SYuval Mintz }
405437bff2b9SYuval Mintz 
405537bff2b9SYuval Mintz int qed_iov_wq_start(struct qed_dev *cdev)
405637bff2b9SYuval Mintz {
405737bff2b9SYuval Mintz 	char name[NAME_SIZE];
405837bff2b9SYuval Mintz 	int i;
405937bff2b9SYuval Mintz 
406037bff2b9SYuval Mintz 	for_each_hwfn(cdev, i) {
406137bff2b9SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
406237bff2b9SYuval Mintz 
406336558c3dSYuval Mintz 		/* PFs needs a dedicated workqueue only if they support IOV.
406436558c3dSYuval Mintz 		 * VFs always require one.
406536558c3dSYuval Mintz 		 */
406636558c3dSYuval Mintz 		if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
406737bff2b9SYuval Mintz 			continue;
406837bff2b9SYuval Mintz 
406937bff2b9SYuval Mintz 		snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
407037bff2b9SYuval Mintz 			 cdev->pdev->bus->number,
407137bff2b9SYuval Mintz 			 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
407237bff2b9SYuval Mintz 
407337bff2b9SYuval Mintz 		p_hwfn->iov_wq = create_singlethread_workqueue(name);
407437bff2b9SYuval Mintz 		if (!p_hwfn->iov_wq) {
407537bff2b9SYuval Mintz 			DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
407637bff2b9SYuval Mintz 			return -ENOMEM;
407737bff2b9SYuval Mintz 		}
407837bff2b9SYuval Mintz 
407936558c3dSYuval Mintz 		if (IS_PF(cdev))
408037bff2b9SYuval Mintz 			INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
408136558c3dSYuval Mintz 		else
408236558c3dSYuval Mintz 			INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
408337bff2b9SYuval Mintz 	}
408437bff2b9SYuval Mintz 
408537bff2b9SYuval Mintz 	return 0;
408637bff2b9SYuval Mintz }
40870b55e27dSYuval Mintz 
40880b55e27dSYuval Mintz const struct qed_iov_hv_ops qed_iov_ops_pass = {
40890b55e27dSYuval Mintz 	.configure = &qed_sriov_configure,
4090eff16960SYuval Mintz 	.set_mac = &qed_sriov_pf_set_mac,
409108feecd7SYuval Mintz 	.set_vlan = &qed_sriov_pf_set_vlan,
409273390ac9SYuval Mintz 	.get_config = &qed_get_vf_config,
4093733def6aSYuval Mintz 	.set_link_state = &qed_set_vf_link_state,
40946ddc7608SYuval Mintz 	.set_spoof = &qed_spoof_configure,
4095733def6aSYuval Mintz 	.set_rate = &qed_set_vf_rate,
40960b55e27dSYuval Mintz };
4097