132a47e72SYuval Mintz /* QLogic qed NIC Driver
232a47e72SYuval Mintz  * Copyright (c) 2015 QLogic Corporation
332a47e72SYuval Mintz  *
432a47e72SYuval Mintz  * This software is available under the terms of the GNU General Public License
532a47e72SYuval Mintz  * (GPL) Version 2, available from the file COPYING in the main directory of
632a47e72SYuval Mintz  * this source tree.
732a47e72SYuval Mintz  */
832a47e72SYuval Mintz 
9dacd88d6SYuval Mintz #include <linux/etherdevice.h>
100b55e27dSYuval Mintz #include <linux/qed/qed_iov_if.h>
111408cc1fSYuval Mintz #include "qed_cxt.h"
121408cc1fSYuval Mintz #include "qed_hsi.h"
1332a47e72SYuval Mintz #include "qed_hw.h"
141408cc1fSYuval Mintz #include "qed_init_ops.h"
1532a47e72SYuval Mintz #include "qed_int.h"
161408cc1fSYuval Mintz #include "qed_mcp.h"
1732a47e72SYuval Mintz #include "qed_reg_addr.h"
181408cc1fSYuval Mintz #include "qed_sp.h"
1932a47e72SYuval Mintz #include "qed_sriov.h"
2032a47e72SYuval Mintz #include "qed_vf.h"
2132a47e72SYuval Mintz 
221408cc1fSYuval Mintz /* IOV ramrods */
231408cc1fSYuval Mintz static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
241408cc1fSYuval Mintz 			   u32 concrete_vfid, u16 opaque_vfid)
251408cc1fSYuval Mintz {
261408cc1fSYuval Mintz 	struct vf_start_ramrod_data *p_ramrod = NULL;
271408cc1fSYuval Mintz 	struct qed_spq_entry *p_ent = NULL;
281408cc1fSYuval Mintz 	struct qed_sp_init_data init_data;
291408cc1fSYuval Mintz 	int rc = -EINVAL;
301408cc1fSYuval Mintz 
311408cc1fSYuval Mintz 	/* Get SPQ entry */
321408cc1fSYuval Mintz 	memset(&init_data, 0, sizeof(init_data));
331408cc1fSYuval Mintz 	init_data.cid = qed_spq_get_cid(p_hwfn);
341408cc1fSYuval Mintz 	init_data.opaque_fid = opaque_vfid;
351408cc1fSYuval Mintz 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
361408cc1fSYuval Mintz 
371408cc1fSYuval Mintz 	rc = qed_sp_init_request(p_hwfn, &p_ent,
381408cc1fSYuval Mintz 				 COMMON_RAMROD_VF_START,
391408cc1fSYuval Mintz 				 PROTOCOLID_COMMON, &init_data);
401408cc1fSYuval Mintz 	if (rc)
411408cc1fSYuval Mintz 		return rc;
421408cc1fSYuval Mintz 
431408cc1fSYuval Mintz 	p_ramrod = &p_ent->ramrod.vf_start;
441408cc1fSYuval Mintz 
451408cc1fSYuval Mintz 	p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
461408cc1fSYuval Mintz 	p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
471408cc1fSYuval Mintz 
481408cc1fSYuval Mintz 	p_ramrod->personality = PERSONALITY_ETH;
491408cc1fSYuval Mintz 
501408cc1fSYuval Mintz 	return qed_spq_post(p_hwfn, p_ent, NULL);
511408cc1fSYuval Mintz }
521408cc1fSYuval Mintz 
530b55e27dSYuval Mintz static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
540b55e27dSYuval Mintz 			  u32 concrete_vfid, u16 opaque_vfid)
550b55e27dSYuval Mintz {
560b55e27dSYuval Mintz 	struct vf_stop_ramrod_data *p_ramrod = NULL;
570b55e27dSYuval Mintz 	struct qed_spq_entry *p_ent = NULL;
580b55e27dSYuval Mintz 	struct qed_sp_init_data init_data;
590b55e27dSYuval Mintz 	int rc = -EINVAL;
600b55e27dSYuval Mintz 
610b55e27dSYuval Mintz 	/* Get SPQ entry */
620b55e27dSYuval Mintz 	memset(&init_data, 0, sizeof(init_data));
630b55e27dSYuval Mintz 	init_data.cid = qed_spq_get_cid(p_hwfn);
640b55e27dSYuval Mintz 	init_data.opaque_fid = opaque_vfid;
650b55e27dSYuval Mintz 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
660b55e27dSYuval Mintz 
670b55e27dSYuval Mintz 	rc = qed_sp_init_request(p_hwfn, &p_ent,
680b55e27dSYuval Mintz 				 COMMON_RAMROD_VF_STOP,
690b55e27dSYuval Mintz 				 PROTOCOLID_COMMON, &init_data);
700b55e27dSYuval Mintz 	if (rc)
710b55e27dSYuval Mintz 		return rc;
720b55e27dSYuval Mintz 
730b55e27dSYuval Mintz 	p_ramrod = &p_ent->ramrod.vf_stop;
740b55e27dSYuval Mintz 
750b55e27dSYuval Mintz 	p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
760b55e27dSYuval Mintz 
770b55e27dSYuval Mintz 	return qed_spq_post(p_hwfn, p_ent, NULL);
780b55e27dSYuval Mintz }
790b55e27dSYuval Mintz 
8032a47e72SYuval Mintz bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
8132a47e72SYuval Mintz 			   int rel_vf_id, bool b_enabled_only)
8232a47e72SYuval Mintz {
8332a47e72SYuval Mintz 	if (!p_hwfn->pf_iov_info) {
8432a47e72SYuval Mintz 		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
8532a47e72SYuval Mintz 		return false;
8632a47e72SYuval Mintz 	}
8732a47e72SYuval Mintz 
8832a47e72SYuval Mintz 	if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
8932a47e72SYuval Mintz 	    (rel_vf_id < 0))
9032a47e72SYuval Mintz 		return false;
9132a47e72SYuval Mintz 
9232a47e72SYuval Mintz 	if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
9332a47e72SYuval Mintz 	    b_enabled_only)
9432a47e72SYuval Mintz 		return false;
9532a47e72SYuval Mintz 
9632a47e72SYuval Mintz 	return true;
9732a47e72SYuval Mintz }
9832a47e72SYuval Mintz 
9937bff2b9SYuval Mintz static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
10037bff2b9SYuval Mintz 					       u16 relative_vf_id,
10137bff2b9SYuval Mintz 					       bool b_enabled_only)
10237bff2b9SYuval Mintz {
10337bff2b9SYuval Mintz 	struct qed_vf_info *vf = NULL;
10437bff2b9SYuval Mintz 
10537bff2b9SYuval Mintz 	if (!p_hwfn->pf_iov_info) {
10637bff2b9SYuval Mintz 		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
10737bff2b9SYuval Mintz 		return NULL;
10837bff2b9SYuval Mintz 	}
10937bff2b9SYuval Mintz 
11037bff2b9SYuval Mintz 	if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
11137bff2b9SYuval Mintz 		vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
11237bff2b9SYuval Mintz 	else
11337bff2b9SYuval Mintz 		DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
11437bff2b9SYuval Mintz 		       relative_vf_id);
11537bff2b9SYuval Mintz 
11637bff2b9SYuval Mintz 	return vf;
11737bff2b9SYuval Mintz }
11837bff2b9SYuval Mintz 
11932a47e72SYuval Mintz static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
12032a47e72SYuval Mintz {
12132a47e72SYuval Mintz 	struct qed_hw_sriov_info *iov = cdev->p_iov_info;
12232a47e72SYuval Mintz 	int pos = iov->pos;
12332a47e72SYuval Mintz 
12432a47e72SYuval Mintz 	DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
12532a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
12632a47e72SYuval Mintz 
12732a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
12832a47e72SYuval Mintz 			     pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
12932a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
13032a47e72SYuval Mintz 			     pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
13132a47e72SYuval Mintz 
13232a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
13332a47e72SYuval Mintz 	if (iov->num_vfs) {
13432a47e72SYuval Mintz 		DP_VERBOSE(cdev,
13532a47e72SYuval Mintz 			   QED_MSG_IOV,
13632a47e72SYuval Mintz 			   "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
13732a47e72SYuval Mintz 		iov->num_vfs = 0;
13832a47e72SYuval Mintz 	}
13932a47e72SYuval Mintz 
14032a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
14132a47e72SYuval Mintz 			     pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
14232a47e72SYuval Mintz 
14332a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
14432a47e72SYuval Mintz 			     pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
14532a47e72SYuval Mintz 
14632a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
14732a47e72SYuval Mintz 			     pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
14832a47e72SYuval Mintz 
14932a47e72SYuval Mintz 	pci_read_config_dword(cdev->pdev,
15032a47e72SYuval Mintz 			      pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
15132a47e72SYuval Mintz 
15232a47e72SYuval Mintz 	pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
15332a47e72SYuval Mintz 
15432a47e72SYuval Mintz 	pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
15532a47e72SYuval Mintz 
15632a47e72SYuval Mintz 	DP_VERBOSE(cdev,
15732a47e72SYuval Mintz 		   QED_MSG_IOV,
15832a47e72SYuval Mintz 		   "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
15932a47e72SYuval Mintz 		   iov->nres,
16032a47e72SYuval Mintz 		   iov->cap,
16132a47e72SYuval Mintz 		   iov->ctrl,
16232a47e72SYuval Mintz 		   iov->total_vfs,
16332a47e72SYuval Mintz 		   iov->initial_vfs,
16432a47e72SYuval Mintz 		   iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
16532a47e72SYuval Mintz 
16632a47e72SYuval Mintz 	/* Some sanity checks */
16732a47e72SYuval Mintz 	if (iov->num_vfs > NUM_OF_VFS(cdev) ||
16832a47e72SYuval Mintz 	    iov->total_vfs > NUM_OF_VFS(cdev)) {
16932a47e72SYuval Mintz 		/* This can happen only due to a bug. In this case we set
17032a47e72SYuval Mintz 		 * num_vfs to zero to avoid memory corruption in the code that
17132a47e72SYuval Mintz 		 * assumes max number of vfs
17232a47e72SYuval Mintz 		 */
17332a47e72SYuval Mintz 		DP_NOTICE(cdev,
17432a47e72SYuval Mintz 			  "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
17532a47e72SYuval Mintz 			  iov->num_vfs);
17632a47e72SYuval Mintz 
17732a47e72SYuval Mintz 		iov->num_vfs = 0;
17832a47e72SYuval Mintz 		iov->total_vfs = 0;
17932a47e72SYuval Mintz 	}
18032a47e72SYuval Mintz 
18132a47e72SYuval Mintz 	return 0;
18232a47e72SYuval Mintz }
18332a47e72SYuval Mintz 
18432a47e72SYuval Mintz static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
18532a47e72SYuval Mintz 					struct qed_ptt *p_ptt)
18632a47e72SYuval Mintz {
18732a47e72SYuval Mintz 	struct qed_igu_block *p_sb;
18832a47e72SYuval Mintz 	u16 sb_id;
18932a47e72SYuval Mintz 	u32 val;
19032a47e72SYuval Mintz 
19132a47e72SYuval Mintz 	if (!p_hwfn->hw_info.p_igu_info) {
19232a47e72SYuval Mintz 		DP_ERR(p_hwfn,
19332a47e72SYuval Mintz 		       "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
19432a47e72SYuval Mintz 		return;
19532a47e72SYuval Mintz 	}
19632a47e72SYuval Mintz 
19732a47e72SYuval Mintz 	for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
19832a47e72SYuval Mintz 	     sb_id++) {
19932a47e72SYuval Mintz 		p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
20032a47e72SYuval Mintz 		if ((p_sb->status & QED_IGU_STATUS_FREE) &&
20132a47e72SYuval Mintz 		    !(p_sb->status & QED_IGU_STATUS_PF)) {
20232a47e72SYuval Mintz 			val = qed_rd(p_hwfn, p_ptt,
20332a47e72SYuval Mintz 				     IGU_REG_MAPPING_MEMORY + sb_id * 4);
20432a47e72SYuval Mintz 			SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
20532a47e72SYuval Mintz 			qed_wr(p_hwfn, p_ptt,
20632a47e72SYuval Mintz 			       IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
20732a47e72SYuval Mintz 		}
20832a47e72SYuval Mintz 	}
20932a47e72SYuval Mintz }
21032a47e72SYuval Mintz 
21132a47e72SYuval Mintz static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
21232a47e72SYuval Mintz {
21332a47e72SYuval Mintz 	struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
21432a47e72SYuval Mintz 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
21532a47e72SYuval Mintz 	struct qed_bulletin_content *p_bulletin_virt;
21632a47e72SYuval Mintz 	dma_addr_t req_p, rply_p, bulletin_p;
21732a47e72SYuval Mintz 	union pfvf_tlvs *p_reply_virt_addr;
21832a47e72SYuval Mintz 	union vfpf_tlvs *p_req_virt_addr;
21932a47e72SYuval Mintz 	u8 idx = 0;
22032a47e72SYuval Mintz 
22132a47e72SYuval Mintz 	memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
22232a47e72SYuval Mintz 
22332a47e72SYuval Mintz 	p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
22432a47e72SYuval Mintz 	req_p = p_iov_info->mbx_msg_phys_addr;
22532a47e72SYuval Mintz 	p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
22632a47e72SYuval Mintz 	rply_p = p_iov_info->mbx_reply_phys_addr;
22732a47e72SYuval Mintz 	p_bulletin_virt = p_iov_info->p_bulletins;
22832a47e72SYuval Mintz 	bulletin_p = p_iov_info->bulletins_phys;
22932a47e72SYuval Mintz 	if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
23032a47e72SYuval Mintz 		DP_ERR(p_hwfn,
23132a47e72SYuval Mintz 		       "qed_iov_setup_vfdb called without allocating mem first\n");
23232a47e72SYuval Mintz 		return;
23332a47e72SYuval Mintz 	}
23432a47e72SYuval Mintz 
23532a47e72SYuval Mintz 	for (idx = 0; idx < p_iov->total_vfs; idx++) {
23632a47e72SYuval Mintz 		struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
23732a47e72SYuval Mintz 		u32 concrete;
23832a47e72SYuval Mintz 
23932a47e72SYuval Mintz 		vf->vf_mbx.req_virt = p_req_virt_addr + idx;
24032a47e72SYuval Mintz 		vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
24132a47e72SYuval Mintz 		vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
24232a47e72SYuval Mintz 		vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
24332a47e72SYuval Mintz 
24432a47e72SYuval Mintz 		vf->state = VF_STOPPED;
24532a47e72SYuval Mintz 		vf->b_init = false;
24632a47e72SYuval Mintz 
24732a47e72SYuval Mintz 		vf->bulletin.phys = idx *
24832a47e72SYuval Mintz 				    sizeof(struct qed_bulletin_content) +
24932a47e72SYuval Mintz 				    bulletin_p;
25032a47e72SYuval Mintz 		vf->bulletin.p_virt = p_bulletin_virt + idx;
25132a47e72SYuval Mintz 		vf->bulletin.size = sizeof(struct qed_bulletin_content);
25232a47e72SYuval Mintz 
25332a47e72SYuval Mintz 		vf->relative_vf_id = idx;
25432a47e72SYuval Mintz 		vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
25532a47e72SYuval Mintz 		concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
25632a47e72SYuval Mintz 		vf->concrete_fid = concrete;
25732a47e72SYuval Mintz 		vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
25832a47e72SYuval Mintz 				 (vf->abs_vf_id << 8);
25932a47e72SYuval Mintz 		vf->vport_id = idx + 1;
26032a47e72SYuval Mintz 	}
26132a47e72SYuval Mintz }
26232a47e72SYuval Mintz 
26332a47e72SYuval Mintz static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
26432a47e72SYuval Mintz {
26532a47e72SYuval Mintz 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
26632a47e72SYuval Mintz 	void **p_v_addr;
26732a47e72SYuval Mintz 	u16 num_vfs = 0;
26832a47e72SYuval Mintz 
26932a47e72SYuval Mintz 	num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
27032a47e72SYuval Mintz 
27132a47e72SYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
27232a47e72SYuval Mintz 		   "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
27332a47e72SYuval Mintz 
27432a47e72SYuval Mintz 	/* Allocate PF Mailbox buffer (per-VF) */
27532a47e72SYuval Mintz 	p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
27632a47e72SYuval Mintz 	p_v_addr = &p_iov_info->mbx_msg_virt_addr;
27732a47e72SYuval Mintz 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
27832a47e72SYuval Mintz 				       p_iov_info->mbx_msg_size,
27932a47e72SYuval Mintz 				       &p_iov_info->mbx_msg_phys_addr,
28032a47e72SYuval Mintz 				       GFP_KERNEL);
28132a47e72SYuval Mintz 	if (!*p_v_addr)
28232a47e72SYuval Mintz 		return -ENOMEM;
28332a47e72SYuval Mintz 
28432a47e72SYuval Mintz 	/* Allocate PF Mailbox Reply buffer (per-VF) */
28532a47e72SYuval Mintz 	p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
28632a47e72SYuval Mintz 	p_v_addr = &p_iov_info->mbx_reply_virt_addr;
28732a47e72SYuval Mintz 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
28832a47e72SYuval Mintz 				       p_iov_info->mbx_reply_size,
28932a47e72SYuval Mintz 				       &p_iov_info->mbx_reply_phys_addr,
29032a47e72SYuval Mintz 				       GFP_KERNEL);
29132a47e72SYuval Mintz 	if (!*p_v_addr)
29232a47e72SYuval Mintz 		return -ENOMEM;
29332a47e72SYuval Mintz 
29432a47e72SYuval Mintz 	p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
29532a47e72SYuval Mintz 				     num_vfs;
29632a47e72SYuval Mintz 	p_v_addr = &p_iov_info->p_bulletins;
29732a47e72SYuval Mintz 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
29832a47e72SYuval Mintz 				       p_iov_info->bulletins_size,
29932a47e72SYuval Mintz 				       &p_iov_info->bulletins_phys,
30032a47e72SYuval Mintz 				       GFP_KERNEL);
30132a47e72SYuval Mintz 	if (!*p_v_addr)
30232a47e72SYuval Mintz 		return -ENOMEM;
30332a47e72SYuval Mintz 
30432a47e72SYuval Mintz 	DP_VERBOSE(p_hwfn,
30532a47e72SYuval Mintz 		   QED_MSG_IOV,
30632a47e72SYuval Mintz 		   "PF's Requests mailbox [%p virt 0x%llx phys],  Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
30732a47e72SYuval Mintz 		   p_iov_info->mbx_msg_virt_addr,
30832a47e72SYuval Mintz 		   (u64) p_iov_info->mbx_msg_phys_addr,
30932a47e72SYuval Mintz 		   p_iov_info->mbx_reply_virt_addr,
31032a47e72SYuval Mintz 		   (u64) p_iov_info->mbx_reply_phys_addr,
31132a47e72SYuval Mintz 		   p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
31232a47e72SYuval Mintz 
31332a47e72SYuval Mintz 	return 0;
31432a47e72SYuval Mintz }
31532a47e72SYuval Mintz 
31632a47e72SYuval Mintz static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
31732a47e72SYuval Mintz {
31832a47e72SYuval Mintz 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
31932a47e72SYuval Mintz 
32032a47e72SYuval Mintz 	if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
32132a47e72SYuval Mintz 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
32232a47e72SYuval Mintz 				  p_iov_info->mbx_msg_size,
32332a47e72SYuval Mintz 				  p_iov_info->mbx_msg_virt_addr,
32432a47e72SYuval Mintz 				  p_iov_info->mbx_msg_phys_addr);
32532a47e72SYuval Mintz 
32632a47e72SYuval Mintz 	if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
32732a47e72SYuval Mintz 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
32832a47e72SYuval Mintz 				  p_iov_info->mbx_reply_size,
32932a47e72SYuval Mintz 				  p_iov_info->mbx_reply_virt_addr,
33032a47e72SYuval Mintz 				  p_iov_info->mbx_reply_phys_addr);
33132a47e72SYuval Mintz 
33232a47e72SYuval Mintz 	if (p_iov_info->p_bulletins)
33332a47e72SYuval Mintz 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
33432a47e72SYuval Mintz 				  p_iov_info->bulletins_size,
33532a47e72SYuval Mintz 				  p_iov_info->p_bulletins,
33632a47e72SYuval Mintz 				  p_iov_info->bulletins_phys);
33732a47e72SYuval Mintz }
33832a47e72SYuval Mintz 
33932a47e72SYuval Mintz int qed_iov_alloc(struct qed_hwfn *p_hwfn)
34032a47e72SYuval Mintz {
34132a47e72SYuval Mintz 	struct qed_pf_iov *p_sriov;
34232a47e72SYuval Mintz 
34332a47e72SYuval Mintz 	if (!IS_PF_SRIOV(p_hwfn)) {
34432a47e72SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
34532a47e72SYuval Mintz 			   "No SR-IOV - no need for IOV db\n");
34632a47e72SYuval Mintz 		return 0;
34732a47e72SYuval Mintz 	}
34832a47e72SYuval Mintz 
34932a47e72SYuval Mintz 	p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
35032a47e72SYuval Mintz 	if (!p_sriov) {
35132a47e72SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
35232a47e72SYuval Mintz 		return -ENOMEM;
35332a47e72SYuval Mintz 	}
35432a47e72SYuval Mintz 
35532a47e72SYuval Mintz 	p_hwfn->pf_iov_info = p_sriov;
35632a47e72SYuval Mintz 
35732a47e72SYuval Mintz 	return qed_iov_allocate_vfdb(p_hwfn);
35832a47e72SYuval Mintz }
35932a47e72SYuval Mintz 
36032a47e72SYuval Mintz void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
36132a47e72SYuval Mintz {
36232a47e72SYuval Mintz 	if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
36332a47e72SYuval Mintz 		return;
36432a47e72SYuval Mintz 
36532a47e72SYuval Mintz 	qed_iov_setup_vfdb(p_hwfn);
36632a47e72SYuval Mintz 	qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
36732a47e72SYuval Mintz }
36832a47e72SYuval Mintz 
36932a47e72SYuval Mintz void qed_iov_free(struct qed_hwfn *p_hwfn)
37032a47e72SYuval Mintz {
37132a47e72SYuval Mintz 	if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
37232a47e72SYuval Mintz 		qed_iov_free_vfdb(p_hwfn);
37332a47e72SYuval Mintz 		kfree(p_hwfn->pf_iov_info);
37432a47e72SYuval Mintz 	}
37532a47e72SYuval Mintz }
37632a47e72SYuval Mintz 
37732a47e72SYuval Mintz void qed_iov_free_hw_info(struct qed_dev *cdev)
37832a47e72SYuval Mintz {
37932a47e72SYuval Mintz 	kfree(cdev->p_iov_info);
38032a47e72SYuval Mintz 	cdev->p_iov_info = NULL;
38132a47e72SYuval Mintz }
38232a47e72SYuval Mintz 
38332a47e72SYuval Mintz int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
38432a47e72SYuval Mintz {
38532a47e72SYuval Mintz 	struct qed_dev *cdev = p_hwfn->cdev;
38632a47e72SYuval Mintz 	int pos;
38732a47e72SYuval Mintz 	int rc;
38832a47e72SYuval Mintz 
3891408cc1fSYuval Mintz 	if (IS_VF(p_hwfn->cdev))
3901408cc1fSYuval Mintz 		return 0;
3911408cc1fSYuval Mintz 
39232a47e72SYuval Mintz 	/* Learn the PCI configuration */
39332a47e72SYuval Mintz 	pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
39432a47e72SYuval Mintz 				      PCI_EXT_CAP_ID_SRIOV);
39532a47e72SYuval Mintz 	if (!pos) {
39632a47e72SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
39732a47e72SYuval Mintz 		return 0;
39832a47e72SYuval Mintz 	}
39932a47e72SYuval Mintz 
40032a47e72SYuval Mintz 	/* Allocate a new struct for IOV information */
40132a47e72SYuval Mintz 	cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
40232a47e72SYuval Mintz 	if (!cdev->p_iov_info) {
40332a47e72SYuval Mintz 		DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
40432a47e72SYuval Mintz 		return -ENOMEM;
40532a47e72SYuval Mintz 	}
40632a47e72SYuval Mintz 	cdev->p_iov_info->pos = pos;
40732a47e72SYuval Mintz 
40832a47e72SYuval Mintz 	rc = qed_iov_pci_cfg_info(cdev);
40932a47e72SYuval Mintz 	if (rc)
41032a47e72SYuval Mintz 		return rc;
41132a47e72SYuval Mintz 
41232a47e72SYuval Mintz 	/* We want PF IOV to be synonemous with the existance of p_iov_info;
41332a47e72SYuval Mintz 	 * In case the capability is published but there are no VFs, simply
41432a47e72SYuval Mintz 	 * de-allocate the struct.
41532a47e72SYuval Mintz 	 */
41632a47e72SYuval Mintz 	if (!cdev->p_iov_info->total_vfs) {
41732a47e72SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
41832a47e72SYuval Mintz 			   "IOV capabilities, but no VFs are published\n");
41932a47e72SYuval Mintz 		kfree(cdev->p_iov_info);
42032a47e72SYuval Mintz 		cdev->p_iov_info = NULL;
42132a47e72SYuval Mintz 		return 0;
42232a47e72SYuval Mintz 	}
42332a47e72SYuval Mintz 
42432a47e72SYuval Mintz 	/* Calculate the first VF index - this is a bit tricky; Basically,
42532a47e72SYuval Mintz 	 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
42632a47e72SYuval Mintz 	 * after the first engine's VFs.
42732a47e72SYuval Mintz 	 */
42832a47e72SYuval Mintz 	cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
42932a47e72SYuval Mintz 					   p_hwfn->abs_pf_id - 16;
43032a47e72SYuval Mintz 	if (QED_PATH_ID(p_hwfn))
43132a47e72SYuval Mintz 		cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
43232a47e72SYuval Mintz 
43332a47e72SYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
43432a47e72SYuval Mintz 		   "First VF in hwfn 0x%08x\n",
43532a47e72SYuval Mintz 		   cdev->p_iov_info->first_vf_in_pf);
43632a47e72SYuval Mintz 
43732a47e72SYuval Mintz 	return 0;
43832a47e72SYuval Mintz }
43932a47e72SYuval Mintz 
44037bff2b9SYuval Mintz static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
44137bff2b9SYuval Mintz {
44237bff2b9SYuval Mintz 	/* Check PF supports sriov */
44337bff2b9SYuval Mintz 	if (!IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn))
44437bff2b9SYuval Mintz 		return false;
44537bff2b9SYuval Mintz 
44637bff2b9SYuval Mintz 	/* Check VF validity */
4471408cc1fSYuval Mintz 	if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
4481408cc1fSYuval Mintz 	    !IS_PF_SRIOV_ALLOC(p_hwfn))
44937bff2b9SYuval Mintz 		return false;
45037bff2b9SYuval Mintz 
45137bff2b9SYuval Mintz 	return true;
45237bff2b9SYuval Mintz }
45337bff2b9SYuval Mintz 
4540b55e27dSYuval Mintz static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
4550b55e27dSYuval Mintz 				      u16 rel_vf_id, u8 to_disable)
4560b55e27dSYuval Mintz {
4570b55e27dSYuval Mintz 	struct qed_vf_info *vf;
4580b55e27dSYuval Mintz 	int i;
4590b55e27dSYuval Mintz 
4600b55e27dSYuval Mintz 	for_each_hwfn(cdev, i) {
4610b55e27dSYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4620b55e27dSYuval Mintz 
4630b55e27dSYuval Mintz 		vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
4640b55e27dSYuval Mintz 		if (!vf)
4650b55e27dSYuval Mintz 			continue;
4660b55e27dSYuval Mintz 
4670b55e27dSYuval Mintz 		vf->to_disable = to_disable;
4680b55e27dSYuval Mintz 	}
4690b55e27dSYuval Mintz }
4700b55e27dSYuval Mintz 
4710b55e27dSYuval Mintz void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
4720b55e27dSYuval Mintz {
4730b55e27dSYuval Mintz 	u16 i;
4740b55e27dSYuval Mintz 
4750b55e27dSYuval Mintz 	if (!IS_QED_SRIOV(cdev))
4760b55e27dSYuval Mintz 		return;
4770b55e27dSYuval Mintz 
4780b55e27dSYuval Mintz 	for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
4790b55e27dSYuval Mintz 		qed_iov_set_vf_to_disable(cdev, i, to_disable);
4800b55e27dSYuval Mintz }
4810b55e27dSYuval Mintz 
4821408cc1fSYuval Mintz static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
4831408cc1fSYuval Mintz 				       struct qed_ptt *p_ptt, u8 abs_vfid)
4841408cc1fSYuval Mintz {
4851408cc1fSYuval Mintz 	qed_wr(p_hwfn, p_ptt,
4861408cc1fSYuval Mintz 	       PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
4871408cc1fSYuval Mintz 	       1 << (abs_vfid & 0x1f));
4881408cc1fSYuval Mintz }
4891408cc1fSYuval Mintz 
490dacd88d6SYuval Mintz static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
491dacd88d6SYuval Mintz 				 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
492dacd88d6SYuval Mintz {
493dacd88d6SYuval Mintz 	u16 igu_sb_id;
494dacd88d6SYuval Mintz 	int i;
495dacd88d6SYuval Mintz 
496dacd88d6SYuval Mintz 	/* Set VF masks and configuration - pretend */
497dacd88d6SYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
498dacd88d6SYuval Mintz 
499dacd88d6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
500dacd88d6SYuval Mintz 
501dacd88d6SYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
502dacd88d6SYuval Mintz 		   "value in VF_CONFIGURATION of vf %d after write %x\n",
503dacd88d6SYuval Mintz 		   vf->abs_vf_id,
504dacd88d6SYuval Mintz 		   qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION));
505dacd88d6SYuval Mintz 
506dacd88d6SYuval Mintz 	/* unpretend */
507dacd88d6SYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
508dacd88d6SYuval Mintz 
509dacd88d6SYuval Mintz 	/* iterate over all queues, clear sb consumer */
510dacd88d6SYuval Mintz 	for (i = 0; i < vf->num_sbs; i++) {
511dacd88d6SYuval Mintz 		igu_sb_id = vf->igu_sbs[i];
512dacd88d6SYuval Mintz 		/* Set then clear... */
513dacd88d6SYuval Mintz 		qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1,
514dacd88d6SYuval Mintz 				       vf->opaque_fid);
515dacd88d6SYuval Mintz 		qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0,
516dacd88d6SYuval Mintz 				       vf->opaque_fid);
517dacd88d6SYuval Mintz 	}
518dacd88d6SYuval Mintz }
519dacd88d6SYuval Mintz 
5200b55e27dSYuval Mintz static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
5210b55e27dSYuval Mintz 				   struct qed_ptt *p_ptt,
5220b55e27dSYuval Mintz 				   struct qed_vf_info *vf, bool enable)
5230b55e27dSYuval Mintz {
5240b55e27dSYuval Mintz 	u32 igu_vf_conf;
5250b55e27dSYuval Mintz 
5260b55e27dSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
5270b55e27dSYuval Mintz 
5280b55e27dSYuval Mintz 	igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
5290b55e27dSYuval Mintz 
5300b55e27dSYuval Mintz 	if (enable)
5310b55e27dSYuval Mintz 		igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
5320b55e27dSYuval Mintz 	else
5330b55e27dSYuval Mintz 		igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
5340b55e27dSYuval Mintz 
5350b55e27dSYuval Mintz 	qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
5360b55e27dSYuval Mintz 
5370b55e27dSYuval Mintz 	/* unpretend */
5380b55e27dSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
5390b55e27dSYuval Mintz }
5400b55e27dSYuval Mintz 
5411408cc1fSYuval Mintz static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
5421408cc1fSYuval Mintz 				    struct qed_ptt *p_ptt,
5431408cc1fSYuval Mintz 				    struct qed_vf_info *vf)
5441408cc1fSYuval Mintz {
5451408cc1fSYuval Mintz 	u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
5461408cc1fSYuval Mintz 	int rc;
5471408cc1fSYuval Mintz 
5480b55e27dSYuval Mintz 	if (vf->to_disable)
5490b55e27dSYuval Mintz 		return 0;
5500b55e27dSYuval Mintz 
5511408cc1fSYuval Mintz 	DP_VERBOSE(p_hwfn,
5521408cc1fSYuval Mintz 		   QED_MSG_IOV,
5531408cc1fSYuval Mintz 		   "Enable internal access for vf %x [abs %x]\n",
5541408cc1fSYuval Mintz 		   vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
5551408cc1fSYuval Mintz 
5561408cc1fSYuval Mintz 	qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
5571408cc1fSYuval Mintz 
5581408cc1fSYuval Mintz 	rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
5591408cc1fSYuval Mintz 	if (rc)
5601408cc1fSYuval Mintz 		return rc;
5611408cc1fSYuval Mintz 
5621408cc1fSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
5631408cc1fSYuval Mintz 
5641408cc1fSYuval Mintz 	SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
5651408cc1fSYuval Mintz 	STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
5661408cc1fSYuval Mintz 
5671408cc1fSYuval Mintz 	qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
5681408cc1fSYuval Mintz 		     p_hwfn->hw_info.hw_mode);
5691408cc1fSYuval Mintz 
5701408cc1fSYuval Mintz 	/* unpretend */
5711408cc1fSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
5721408cc1fSYuval Mintz 
5731408cc1fSYuval Mintz 	if (vf->state != VF_STOPPED) {
5741408cc1fSYuval Mintz 		DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
5751408cc1fSYuval Mintz 			  vf->abs_vf_id);
5761408cc1fSYuval Mintz 		return -EINVAL;
5771408cc1fSYuval Mintz 	}
5781408cc1fSYuval Mintz 
5791408cc1fSYuval Mintz 	/* Start VF */
5801408cc1fSYuval Mintz 	rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
5811408cc1fSYuval Mintz 	if (rc)
5821408cc1fSYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
5831408cc1fSYuval Mintz 
5841408cc1fSYuval Mintz 	vf->state = VF_FREE;
5851408cc1fSYuval Mintz 
5861408cc1fSYuval Mintz 	return rc;
5871408cc1fSYuval Mintz }
5881408cc1fSYuval Mintz 
5890b55e27dSYuval Mintz /**
5900b55e27dSYuval Mintz  * @brief qed_iov_config_perm_table - configure the permission
5910b55e27dSYuval Mintz  *      zone table.
5920b55e27dSYuval Mintz  *      In E4, queue zone permission table size is 320x9. There
5930b55e27dSYuval Mintz  *      are 320 VF queues for single engine device (256 for dual
5940b55e27dSYuval Mintz  *      engine device), and each entry has the following format:
5950b55e27dSYuval Mintz  *      {Valid, VF[7:0]}
5960b55e27dSYuval Mintz  * @param p_hwfn
5970b55e27dSYuval Mintz  * @param p_ptt
5980b55e27dSYuval Mintz  * @param vf
5990b55e27dSYuval Mintz  * @param enable
6000b55e27dSYuval Mintz  */
6010b55e27dSYuval Mintz static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
6020b55e27dSYuval Mintz 				      struct qed_ptt *p_ptt,
6030b55e27dSYuval Mintz 				      struct qed_vf_info *vf, u8 enable)
6040b55e27dSYuval Mintz {
6050b55e27dSYuval Mintz 	u32 reg_addr, val;
6060b55e27dSYuval Mintz 	u16 qzone_id = 0;
6070b55e27dSYuval Mintz 	int qid;
6080b55e27dSYuval Mintz 
6090b55e27dSYuval Mintz 	for (qid = 0; qid < vf->num_rxqs; qid++) {
6100b55e27dSYuval Mintz 		qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
6110b55e27dSYuval Mintz 				&qzone_id);
6120b55e27dSYuval Mintz 
6130b55e27dSYuval Mintz 		reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
6140b55e27dSYuval Mintz 		val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
6150b55e27dSYuval Mintz 		qed_wr(p_hwfn, p_ptt, reg_addr, val);
6160b55e27dSYuval Mintz 	}
6170b55e27dSYuval Mintz }
6180b55e27dSYuval Mintz 
619dacd88d6SYuval Mintz static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
620dacd88d6SYuval Mintz 				      struct qed_ptt *p_ptt,
621dacd88d6SYuval Mintz 				      struct qed_vf_info *vf)
622dacd88d6SYuval Mintz {
623dacd88d6SYuval Mintz 	/* Reset vf in IGU - interrupts are still disabled */
624dacd88d6SYuval Mintz 	qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
625dacd88d6SYuval Mintz 
626dacd88d6SYuval Mintz 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
627dacd88d6SYuval Mintz 
628dacd88d6SYuval Mintz 	/* Permission Table */
629dacd88d6SYuval Mintz 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
630dacd88d6SYuval Mintz }
631dacd88d6SYuval Mintz 
6321408cc1fSYuval Mintz static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
6331408cc1fSYuval Mintz 				   struct qed_ptt *p_ptt,
6341408cc1fSYuval Mintz 				   struct qed_vf_info *vf, u16 num_rx_queues)
6351408cc1fSYuval Mintz {
6361408cc1fSYuval Mintz 	struct qed_igu_block *igu_blocks;
6371408cc1fSYuval Mintz 	int qid = 0, igu_id = 0;
6381408cc1fSYuval Mintz 	u32 val = 0;
6391408cc1fSYuval Mintz 
6401408cc1fSYuval Mintz 	igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
6411408cc1fSYuval Mintz 
6421408cc1fSYuval Mintz 	if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
6431408cc1fSYuval Mintz 		num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
6441408cc1fSYuval Mintz 	p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
6451408cc1fSYuval Mintz 
6461408cc1fSYuval Mintz 	SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
6471408cc1fSYuval Mintz 	SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
6481408cc1fSYuval Mintz 	SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
6491408cc1fSYuval Mintz 
6501408cc1fSYuval Mintz 	while ((qid < num_rx_queues) &&
6511408cc1fSYuval Mintz 	       (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
6521408cc1fSYuval Mintz 		if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
6531408cc1fSYuval Mintz 			struct cau_sb_entry sb_entry;
6541408cc1fSYuval Mintz 
6551408cc1fSYuval Mintz 			vf->igu_sbs[qid] = (u16)igu_id;
6561408cc1fSYuval Mintz 			igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
6571408cc1fSYuval Mintz 
6581408cc1fSYuval Mintz 			SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
6591408cc1fSYuval Mintz 
6601408cc1fSYuval Mintz 			qed_wr(p_hwfn, p_ptt,
6611408cc1fSYuval Mintz 			       IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
6621408cc1fSYuval Mintz 			       val);
6631408cc1fSYuval Mintz 
6641408cc1fSYuval Mintz 			/* Configure igu sb in CAU which were marked valid */
6651408cc1fSYuval Mintz 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
6661408cc1fSYuval Mintz 					      p_hwfn->rel_pf_id,
6671408cc1fSYuval Mintz 					      vf->abs_vf_id, 1);
6681408cc1fSYuval Mintz 			qed_dmae_host2grc(p_hwfn, p_ptt,
6691408cc1fSYuval Mintz 					  (u64)(uintptr_t)&sb_entry,
6701408cc1fSYuval Mintz 					  CAU_REG_SB_VAR_MEMORY +
6711408cc1fSYuval Mintz 					  igu_id * sizeof(u64), 2, 0);
6721408cc1fSYuval Mintz 			qid++;
6731408cc1fSYuval Mintz 		}
6741408cc1fSYuval Mintz 		igu_id++;
6751408cc1fSYuval Mintz 	}
6761408cc1fSYuval Mintz 
6771408cc1fSYuval Mintz 	vf->num_sbs = (u8) num_rx_queues;
6781408cc1fSYuval Mintz 
6791408cc1fSYuval Mintz 	return vf->num_sbs;
6801408cc1fSYuval Mintz }
6811408cc1fSYuval Mintz 
6820b55e27dSYuval Mintz static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
6830b55e27dSYuval Mintz 				    struct qed_ptt *p_ptt,
6840b55e27dSYuval Mintz 				    struct qed_vf_info *vf)
6850b55e27dSYuval Mintz {
6860b55e27dSYuval Mintz 	struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
6870b55e27dSYuval Mintz 	int idx, igu_id;
6880b55e27dSYuval Mintz 	u32 addr, val;
6890b55e27dSYuval Mintz 
6900b55e27dSYuval Mintz 	/* Invalidate igu CAM lines and mark them as free */
6910b55e27dSYuval Mintz 	for (idx = 0; idx < vf->num_sbs; idx++) {
6920b55e27dSYuval Mintz 		igu_id = vf->igu_sbs[idx];
6930b55e27dSYuval Mintz 		addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
6940b55e27dSYuval Mintz 
6950b55e27dSYuval Mintz 		val = qed_rd(p_hwfn, p_ptt, addr);
6960b55e27dSYuval Mintz 		SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
6970b55e27dSYuval Mintz 		qed_wr(p_hwfn, p_ptt, addr, val);
6980b55e27dSYuval Mintz 
6990b55e27dSYuval Mintz 		p_info->igu_map.igu_blocks[igu_id].status |=
7000b55e27dSYuval Mintz 		    QED_IGU_STATUS_FREE;
7010b55e27dSYuval Mintz 
7020b55e27dSYuval Mintz 		p_hwfn->hw_info.p_igu_info->free_blks++;
7030b55e27dSYuval Mintz 	}
7040b55e27dSYuval Mintz 
7050b55e27dSYuval Mintz 	vf->num_sbs = 0;
7060b55e27dSYuval Mintz }
7070b55e27dSYuval Mintz 
7081408cc1fSYuval Mintz static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
7091408cc1fSYuval Mintz 				  struct qed_ptt *p_ptt,
7101408cc1fSYuval Mintz 				  u16 rel_vf_id, u16 num_rx_queues)
7111408cc1fSYuval Mintz {
7121408cc1fSYuval Mintz 	u8 num_of_vf_avaiable_chains = 0;
7131408cc1fSYuval Mintz 	struct qed_vf_info *vf = NULL;
7141408cc1fSYuval Mintz 	int rc = 0;
7151408cc1fSYuval Mintz 	u32 cids;
7161408cc1fSYuval Mintz 	u8 i;
7171408cc1fSYuval Mintz 
7181408cc1fSYuval Mintz 	vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
7191408cc1fSYuval Mintz 	if (!vf) {
7201408cc1fSYuval Mintz 		DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
7211408cc1fSYuval Mintz 		return -EINVAL;
7221408cc1fSYuval Mintz 	}
7231408cc1fSYuval Mintz 
7241408cc1fSYuval Mintz 	if (vf->b_init) {
7251408cc1fSYuval Mintz 		DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
7261408cc1fSYuval Mintz 		return -EINVAL;
7271408cc1fSYuval Mintz 	}
7281408cc1fSYuval Mintz 
7291408cc1fSYuval Mintz 	/* Limit number of queues according to number of CIDs */
7301408cc1fSYuval Mintz 	qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
7311408cc1fSYuval Mintz 	DP_VERBOSE(p_hwfn,
7321408cc1fSYuval Mintz 		   QED_MSG_IOV,
7331408cc1fSYuval Mintz 		   "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
7341408cc1fSYuval Mintz 		   vf->relative_vf_id, num_rx_queues, (u16) cids);
7351408cc1fSYuval Mintz 	num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
7361408cc1fSYuval Mintz 
7371408cc1fSYuval Mintz 	num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
7381408cc1fSYuval Mintz 							     p_ptt,
7391408cc1fSYuval Mintz 							     vf,
7401408cc1fSYuval Mintz 							     num_rx_queues);
7411408cc1fSYuval Mintz 	if (!num_of_vf_avaiable_chains) {
7421408cc1fSYuval Mintz 		DP_ERR(p_hwfn, "no available igu sbs\n");
7431408cc1fSYuval Mintz 		return -ENOMEM;
7441408cc1fSYuval Mintz 	}
7451408cc1fSYuval Mintz 
7461408cc1fSYuval Mintz 	/* Choose queue number and index ranges */
7471408cc1fSYuval Mintz 	vf->num_rxqs = num_of_vf_avaiable_chains;
7481408cc1fSYuval Mintz 	vf->num_txqs = num_of_vf_avaiable_chains;
7491408cc1fSYuval Mintz 
7501408cc1fSYuval Mintz 	for (i = 0; i < vf->num_rxqs; i++) {
7511408cc1fSYuval Mintz 		u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
7521408cc1fSYuval Mintz 							   vf->igu_sbs[i]);
7531408cc1fSYuval Mintz 
7541408cc1fSYuval Mintz 		if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
7551408cc1fSYuval Mintz 			DP_NOTICE(p_hwfn,
7561408cc1fSYuval Mintz 				  "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
7571408cc1fSYuval Mintz 				  vf->relative_vf_id, queue_id);
7581408cc1fSYuval Mintz 			return -EINVAL;
7591408cc1fSYuval Mintz 		}
7601408cc1fSYuval Mintz 
7611408cc1fSYuval Mintz 		/* CIDs are per-VF, so no problem having them 0-based. */
7621408cc1fSYuval Mintz 		vf->vf_queues[i].fw_rx_qid = queue_id;
7631408cc1fSYuval Mintz 		vf->vf_queues[i].fw_tx_qid = queue_id;
7641408cc1fSYuval Mintz 		vf->vf_queues[i].fw_cid = i;
7651408cc1fSYuval Mintz 
7661408cc1fSYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
7671408cc1fSYuval Mintz 			   "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
7681408cc1fSYuval Mintz 			   vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
7691408cc1fSYuval Mintz 	}
7701408cc1fSYuval Mintz 	rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
7711408cc1fSYuval Mintz 	if (!rc) {
7721408cc1fSYuval Mintz 		vf->b_init = true;
7731408cc1fSYuval Mintz 
7741408cc1fSYuval Mintz 		if (IS_LEAD_HWFN(p_hwfn))
7751408cc1fSYuval Mintz 			p_hwfn->cdev->p_iov_info->num_vfs++;
7761408cc1fSYuval Mintz 	}
7771408cc1fSYuval Mintz 
7781408cc1fSYuval Mintz 	return rc;
7791408cc1fSYuval Mintz }
7801408cc1fSYuval Mintz 
7810b55e27dSYuval Mintz static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
7820b55e27dSYuval Mintz 				     struct qed_ptt *p_ptt, u16 rel_vf_id)
7830b55e27dSYuval Mintz {
7840b55e27dSYuval Mintz 	struct qed_vf_info *vf = NULL;
7850b55e27dSYuval Mintz 	int rc = 0;
7860b55e27dSYuval Mintz 
7870b55e27dSYuval Mintz 	vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
7880b55e27dSYuval Mintz 	if (!vf) {
7890b55e27dSYuval Mintz 		DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
7900b55e27dSYuval Mintz 		return -EINVAL;
7910b55e27dSYuval Mintz 	}
7920b55e27dSYuval Mintz 
7930b55e27dSYuval Mintz 	if (vf->state != VF_STOPPED) {
7940b55e27dSYuval Mintz 		/* Stopping the VF */
7950b55e27dSYuval Mintz 		rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
7960b55e27dSYuval Mintz 
7970b55e27dSYuval Mintz 		if (rc != 0) {
7980b55e27dSYuval Mintz 			DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
7990b55e27dSYuval Mintz 			       rc);
8000b55e27dSYuval Mintz 			return rc;
8010b55e27dSYuval Mintz 		}
8020b55e27dSYuval Mintz 
8030b55e27dSYuval Mintz 		vf->state = VF_STOPPED;
8040b55e27dSYuval Mintz 	}
8050b55e27dSYuval Mintz 
8060b55e27dSYuval Mintz 	/* disablng interrupts and resetting permission table was done during
8070b55e27dSYuval Mintz 	 * vf-close, however, we could get here without going through vf_close
8080b55e27dSYuval Mintz 	 */
8090b55e27dSYuval Mintz 	/* Disable Interrupts for VF */
8100b55e27dSYuval Mintz 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
8110b55e27dSYuval Mintz 
8120b55e27dSYuval Mintz 	/* Reset Permission table */
8130b55e27dSYuval Mintz 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
8140b55e27dSYuval Mintz 
8150b55e27dSYuval Mintz 	vf->num_rxqs = 0;
8160b55e27dSYuval Mintz 	vf->num_txqs = 0;
8170b55e27dSYuval Mintz 	qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
8180b55e27dSYuval Mintz 
8190b55e27dSYuval Mintz 	if (vf->b_init) {
8200b55e27dSYuval Mintz 		vf->b_init = false;
8210b55e27dSYuval Mintz 
8220b55e27dSYuval Mintz 		if (IS_LEAD_HWFN(p_hwfn))
8230b55e27dSYuval Mintz 			p_hwfn->cdev->p_iov_info->num_vfs--;
8240b55e27dSYuval Mintz 	}
8250b55e27dSYuval Mintz 
8260b55e27dSYuval Mintz 	return 0;
8270b55e27dSYuval Mintz }
8280b55e27dSYuval Mintz 
82937bff2b9SYuval Mintz static bool qed_iov_tlv_supported(u16 tlvtype)
83037bff2b9SYuval Mintz {
83137bff2b9SYuval Mintz 	return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
83237bff2b9SYuval Mintz }
83337bff2b9SYuval Mintz 
83437bff2b9SYuval Mintz /* place a given tlv on the tlv buffer, continuing current tlv list */
83537bff2b9SYuval Mintz void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
83637bff2b9SYuval Mintz {
83737bff2b9SYuval Mintz 	struct channel_tlv *tl = (struct channel_tlv *)*offset;
83837bff2b9SYuval Mintz 
83937bff2b9SYuval Mintz 	tl->type = type;
84037bff2b9SYuval Mintz 	tl->length = length;
84137bff2b9SYuval Mintz 
84237bff2b9SYuval Mintz 	/* Offset should keep pointing to next TLV (the end of the last) */
84337bff2b9SYuval Mintz 	*offset += length;
84437bff2b9SYuval Mintz 
84537bff2b9SYuval Mintz 	/* Return a pointer to the start of the added tlv */
84637bff2b9SYuval Mintz 	return *offset - length;
84737bff2b9SYuval Mintz }
84837bff2b9SYuval Mintz 
84937bff2b9SYuval Mintz /* list the types and lengths of the tlvs on the buffer */
85037bff2b9SYuval Mintz void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
85137bff2b9SYuval Mintz {
85237bff2b9SYuval Mintz 	u16 i = 1, total_length = 0;
85337bff2b9SYuval Mintz 	struct channel_tlv *tlv;
85437bff2b9SYuval Mintz 
85537bff2b9SYuval Mintz 	do {
85637bff2b9SYuval Mintz 		tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
85737bff2b9SYuval Mintz 
85837bff2b9SYuval Mintz 		/* output tlv */
85937bff2b9SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
86037bff2b9SYuval Mintz 			   "TLV number %d: type %d, length %d\n",
86137bff2b9SYuval Mintz 			   i, tlv->type, tlv->length);
86237bff2b9SYuval Mintz 
86337bff2b9SYuval Mintz 		if (tlv->type == CHANNEL_TLV_LIST_END)
86437bff2b9SYuval Mintz 			return;
86537bff2b9SYuval Mintz 
86637bff2b9SYuval Mintz 		/* Validate entry - protect against malicious VFs */
86737bff2b9SYuval Mintz 		if (!tlv->length) {
86837bff2b9SYuval Mintz 			DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
86937bff2b9SYuval Mintz 			return;
87037bff2b9SYuval Mintz 		}
87137bff2b9SYuval Mintz 
87237bff2b9SYuval Mintz 		total_length += tlv->length;
87337bff2b9SYuval Mintz 
87437bff2b9SYuval Mintz 		if (total_length >= sizeof(struct tlv_buffer_size)) {
87537bff2b9SYuval Mintz 			DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
87637bff2b9SYuval Mintz 			return;
87737bff2b9SYuval Mintz 		}
87837bff2b9SYuval Mintz 
87937bff2b9SYuval Mintz 		i++;
88037bff2b9SYuval Mintz 	} while (1);
88137bff2b9SYuval Mintz }
88237bff2b9SYuval Mintz 
88337bff2b9SYuval Mintz static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
88437bff2b9SYuval Mintz 				  struct qed_ptt *p_ptt,
88537bff2b9SYuval Mintz 				  struct qed_vf_info *p_vf,
88637bff2b9SYuval Mintz 				  u16 length, u8 status)
88737bff2b9SYuval Mintz {
88837bff2b9SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
88937bff2b9SYuval Mintz 	struct qed_dmae_params params;
89037bff2b9SYuval Mintz 	u8 eng_vf_id;
89137bff2b9SYuval Mintz 
89237bff2b9SYuval Mintz 	mbx->reply_virt->default_resp.hdr.status = status;
89337bff2b9SYuval Mintz 
89437bff2b9SYuval Mintz 	qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
89537bff2b9SYuval Mintz 
89637bff2b9SYuval Mintz 	eng_vf_id = p_vf->abs_vf_id;
89737bff2b9SYuval Mintz 
89837bff2b9SYuval Mintz 	memset(&params, 0, sizeof(struct qed_dmae_params));
89937bff2b9SYuval Mintz 	params.flags = QED_DMAE_FLAG_VF_DST;
90037bff2b9SYuval Mintz 	params.dst_vfid = eng_vf_id;
90137bff2b9SYuval Mintz 
90237bff2b9SYuval Mintz 	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
90337bff2b9SYuval Mintz 			   mbx->req_virt->first_tlv.reply_address +
90437bff2b9SYuval Mintz 			   sizeof(u64),
90537bff2b9SYuval Mintz 			   (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
90637bff2b9SYuval Mintz 			   &params);
90737bff2b9SYuval Mintz 
90837bff2b9SYuval Mintz 	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
90937bff2b9SYuval Mintz 			   mbx->req_virt->first_tlv.reply_address,
91037bff2b9SYuval Mintz 			   sizeof(u64) / 4, &params);
91137bff2b9SYuval Mintz 
91237bff2b9SYuval Mintz 	REG_WR(p_hwfn,
91337bff2b9SYuval Mintz 	       GTT_BAR0_MAP_REG_USDM_RAM +
91437bff2b9SYuval Mintz 	       USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
91537bff2b9SYuval Mintz }
91637bff2b9SYuval Mintz 
917dacd88d6SYuval Mintz static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
918dacd88d6SYuval Mintz 				enum qed_iov_vport_update_flag flag)
919dacd88d6SYuval Mintz {
920dacd88d6SYuval Mintz 	switch (flag) {
921dacd88d6SYuval Mintz 	case QED_IOV_VP_UPDATE_ACTIVATE:
922dacd88d6SYuval Mintz 		return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
923dacd88d6SYuval Mintz 	case QED_IOV_VP_UPDATE_MCAST:
924dacd88d6SYuval Mintz 		return CHANNEL_TLV_VPORT_UPDATE_MCAST;
925dacd88d6SYuval Mintz 	case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
926dacd88d6SYuval Mintz 		return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
927dacd88d6SYuval Mintz 	case QED_IOV_VP_UPDATE_RSS:
928dacd88d6SYuval Mintz 		return CHANNEL_TLV_VPORT_UPDATE_RSS;
929dacd88d6SYuval Mintz 	default:
930dacd88d6SYuval Mintz 		return 0;
931dacd88d6SYuval Mintz 	}
932dacd88d6SYuval Mintz }
933dacd88d6SYuval Mintz 
934dacd88d6SYuval Mintz static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
935dacd88d6SYuval Mintz 					    struct qed_vf_info *p_vf,
936dacd88d6SYuval Mintz 					    struct qed_iov_vf_mbx *p_mbx,
937dacd88d6SYuval Mintz 					    u8 status,
938dacd88d6SYuval Mintz 					    u16 tlvs_mask, u16 tlvs_accepted)
939dacd88d6SYuval Mintz {
940dacd88d6SYuval Mintz 	struct pfvf_def_resp_tlv *resp;
941dacd88d6SYuval Mintz 	u16 size, total_len, i;
942dacd88d6SYuval Mintz 
943dacd88d6SYuval Mintz 	memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
944dacd88d6SYuval Mintz 	p_mbx->offset = (u8 *)p_mbx->reply_virt;
945dacd88d6SYuval Mintz 	size = sizeof(struct pfvf_def_resp_tlv);
946dacd88d6SYuval Mintz 	total_len = size;
947dacd88d6SYuval Mintz 
948dacd88d6SYuval Mintz 	qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
949dacd88d6SYuval Mintz 
950dacd88d6SYuval Mintz 	/* Prepare response for all extended tlvs if they are found by PF */
951dacd88d6SYuval Mintz 	for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
952dacd88d6SYuval Mintz 		if (!(tlvs_mask & (1 << i)))
953dacd88d6SYuval Mintz 			continue;
954dacd88d6SYuval Mintz 
955dacd88d6SYuval Mintz 		resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
956dacd88d6SYuval Mintz 				   qed_iov_vport_to_tlv(p_hwfn, i), size);
957dacd88d6SYuval Mintz 
958dacd88d6SYuval Mintz 		if (tlvs_accepted & (1 << i))
959dacd88d6SYuval Mintz 			resp->hdr.status = status;
960dacd88d6SYuval Mintz 		else
961dacd88d6SYuval Mintz 			resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
962dacd88d6SYuval Mintz 
963dacd88d6SYuval Mintz 		DP_VERBOSE(p_hwfn,
964dacd88d6SYuval Mintz 			   QED_MSG_IOV,
965dacd88d6SYuval Mintz 			   "VF[%d] - vport_update response: TLV %d, status %02x\n",
966dacd88d6SYuval Mintz 			   p_vf->relative_vf_id,
967dacd88d6SYuval Mintz 			   qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
968dacd88d6SYuval Mintz 
969dacd88d6SYuval Mintz 		total_len += size;
970dacd88d6SYuval Mintz 	}
971dacd88d6SYuval Mintz 
972dacd88d6SYuval Mintz 	qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
973dacd88d6SYuval Mintz 		    sizeof(struct channel_list_end_tlv));
974dacd88d6SYuval Mintz 
975dacd88d6SYuval Mintz 	return total_len;
976dacd88d6SYuval Mintz }
977dacd88d6SYuval Mintz 
97837bff2b9SYuval Mintz static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
97937bff2b9SYuval Mintz 				 struct qed_ptt *p_ptt,
98037bff2b9SYuval Mintz 				 struct qed_vf_info *vf_info,
98137bff2b9SYuval Mintz 				 u16 type, u16 length, u8 status)
98237bff2b9SYuval Mintz {
98337bff2b9SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
98437bff2b9SYuval Mintz 
98537bff2b9SYuval Mintz 	mbx->offset = (u8 *)mbx->reply_virt;
98637bff2b9SYuval Mintz 
98737bff2b9SYuval Mintz 	qed_add_tlv(p_hwfn, &mbx->offset, type, length);
98837bff2b9SYuval Mintz 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
98937bff2b9SYuval Mintz 		    sizeof(struct channel_list_end_tlv));
99037bff2b9SYuval Mintz 
99137bff2b9SYuval Mintz 	qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
99237bff2b9SYuval Mintz }
99337bff2b9SYuval Mintz 
9940b55e27dSYuval Mintz struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
9950b55e27dSYuval Mintz 						      u16 relative_vf_id,
9960b55e27dSYuval Mintz 						      bool b_enabled_only)
9970b55e27dSYuval Mintz {
9980b55e27dSYuval Mintz 	struct qed_vf_info *vf = NULL;
9990b55e27dSYuval Mintz 
10000b55e27dSYuval Mintz 	vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
10010b55e27dSYuval Mintz 	if (!vf)
10020b55e27dSYuval Mintz 		return NULL;
10030b55e27dSYuval Mintz 
10040b55e27dSYuval Mintz 	return &vf->p_vf_info;
10050b55e27dSYuval Mintz }
10060b55e27dSYuval Mintz 
10070b55e27dSYuval Mintz void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
10080b55e27dSYuval Mintz {
10090b55e27dSYuval Mintz 	struct qed_public_vf_info *vf_info;
10100b55e27dSYuval Mintz 
10110b55e27dSYuval Mintz 	vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
10120b55e27dSYuval Mintz 
10130b55e27dSYuval Mintz 	if (!vf_info)
10140b55e27dSYuval Mintz 		return;
10150b55e27dSYuval Mintz 
10160b55e27dSYuval Mintz 	/* Clear the VF mac */
10170b55e27dSYuval Mintz 	memset(vf_info->mac, 0, ETH_ALEN);
10180b55e27dSYuval Mintz }
10190b55e27dSYuval Mintz 
10200b55e27dSYuval Mintz static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
10210b55e27dSYuval Mintz 			       struct qed_vf_info *p_vf)
10220b55e27dSYuval Mintz {
10230b55e27dSYuval Mintz 	u32 i;
10240b55e27dSYuval Mintz 
10250b55e27dSYuval Mintz 	p_vf->vf_bulletin = 0;
1026dacd88d6SYuval Mintz 	p_vf->vport_instance = 0;
10270b55e27dSYuval Mintz 	p_vf->num_mac_filters = 0;
10280b55e27dSYuval Mintz 	p_vf->num_vlan_filters = 0;
10290b55e27dSYuval Mintz 
10300b55e27dSYuval Mintz 	/* If VF previously requested less resources, go back to default */
10310b55e27dSYuval Mintz 	p_vf->num_rxqs = p_vf->num_sbs;
10320b55e27dSYuval Mintz 	p_vf->num_txqs = p_vf->num_sbs;
10330b55e27dSYuval Mintz 
1034dacd88d6SYuval Mintz 	p_vf->num_active_rxqs = 0;
1035dacd88d6SYuval Mintz 
10360b55e27dSYuval Mintz 	for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
10370b55e27dSYuval Mintz 		p_vf->vf_queues[i].rxq_active = 0;
10380b55e27dSYuval Mintz 
10390b55e27dSYuval Mintz 	qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
10400b55e27dSYuval Mintz }
10410b55e27dSYuval Mintz 
10421408cc1fSYuval Mintz static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
104337bff2b9SYuval Mintz 				   struct qed_ptt *p_ptt,
10441408cc1fSYuval Mintz 				   struct qed_vf_info *vf)
104537bff2b9SYuval Mintz {
10461408cc1fSYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
10471408cc1fSYuval Mintz 	struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
10481408cc1fSYuval Mintz 	struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
10491408cc1fSYuval Mintz 	struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
10501408cc1fSYuval Mintz 	u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
10511408cc1fSYuval Mintz 	struct pf_vf_resc *resc = &resp->resc;
10521408cc1fSYuval Mintz 
10531408cc1fSYuval Mintz 	/* Validate FW compatibility */
10541408cc1fSYuval Mintz 	if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
10551408cc1fSYuval Mintz 	    req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
10561408cc1fSYuval Mintz 	    req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
10571408cc1fSYuval Mintz 	    req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
10581408cc1fSYuval Mintz 		DP_INFO(p_hwfn,
10591408cc1fSYuval Mintz 			"VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
10601408cc1fSYuval Mintz 			vf->abs_vf_id,
10611408cc1fSYuval Mintz 			req->vfdev_info.fw_major,
10621408cc1fSYuval Mintz 			req->vfdev_info.fw_minor,
10631408cc1fSYuval Mintz 			req->vfdev_info.fw_revision,
10641408cc1fSYuval Mintz 			req->vfdev_info.fw_engineering,
10651408cc1fSYuval Mintz 			FW_MAJOR_VERSION,
10661408cc1fSYuval Mintz 			FW_MINOR_VERSION,
10671408cc1fSYuval Mintz 			FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
10681408cc1fSYuval Mintz 		vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
10691408cc1fSYuval Mintz 		goto out;
10701408cc1fSYuval Mintz 	}
10711408cc1fSYuval Mintz 
10721408cc1fSYuval Mintz 	/* On 100g PFs, prevent old VFs from loading */
10731408cc1fSYuval Mintz 	if ((p_hwfn->cdev->num_hwfns > 1) &&
10741408cc1fSYuval Mintz 	    !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
10751408cc1fSYuval Mintz 		DP_INFO(p_hwfn,
10761408cc1fSYuval Mintz 			"VF[%d] is running an old driver that doesn't support 100g\n",
10771408cc1fSYuval Mintz 			vf->abs_vf_id);
10781408cc1fSYuval Mintz 		vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
10791408cc1fSYuval Mintz 		goto out;
10801408cc1fSYuval Mintz 	}
10811408cc1fSYuval Mintz 
10821408cc1fSYuval Mintz 	memset(resp, 0, sizeof(*resp));
10831408cc1fSYuval Mintz 
10841408cc1fSYuval Mintz 	/* Fill in vf info stuff */
10851408cc1fSYuval Mintz 	vf->opaque_fid = req->vfdev_info.opaque_fid;
10861408cc1fSYuval Mintz 	vf->num_mac_filters = 1;
10871408cc1fSYuval Mintz 	vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
10881408cc1fSYuval Mintz 
10891408cc1fSYuval Mintz 	vf->vf_bulletin = req->bulletin_addr;
10901408cc1fSYuval Mintz 	vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
10911408cc1fSYuval Mintz 			    vf->bulletin.size : req->bulletin_size;
10921408cc1fSYuval Mintz 
10931408cc1fSYuval Mintz 	/* fill in pfdev info */
10941408cc1fSYuval Mintz 	pfdev_info->chip_num = p_hwfn->cdev->chip_num;
10951408cc1fSYuval Mintz 	pfdev_info->db_size = 0;
10961408cc1fSYuval Mintz 	pfdev_info->indices_per_sb = PIS_PER_SB;
10971408cc1fSYuval Mintz 
10981408cc1fSYuval Mintz 	pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
10991408cc1fSYuval Mintz 				   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
11001408cc1fSYuval Mintz 	if (p_hwfn->cdev->num_hwfns > 1)
11011408cc1fSYuval Mintz 		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
11021408cc1fSYuval Mintz 
11031408cc1fSYuval Mintz 	pfdev_info->stats_info.mstats.address =
11041408cc1fSYuval Mintz 	    PXP_VF_BAR0_START_MSDM_ZONE_B +
11051408cc1fSYuval Mintz 	    offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
11061408cc1fSYuval Mintz 	pfdev_info->stats_info.mstats.len =
11071408cc1fSYuval Mintz 	    sizeof(struct eth_mstorm_per_queue_stat);
11081408cc1fSYuval Mintz 
11091408cc1fSYuval Mintz 	pfdev_info->stats_info.ustats.address =
11101408cc1fSYuval Mintz 	    PXP_VF_BAR0_START_USDM_ZONE_B +
11111408cc1fSYuval Mintz 	    offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
11121408cc1fSYuval Mintz 	pfdev_info->stats_info.ustats.len =
11131408cc1fSYuval Mintz 	    sizeof(struct eth_ustorm_per_queue_stat);
11141408cc1fSYuval Mintz 
11151408cc1fSYuval Mintz 	pfdev_info->stats_info.pstats.address =
11161408cc1fSYuval Mintz 	    PXP_VF_BAR0_START_PSDM_ZONE_B +
11171408cc1fSYuval Mintz 	    offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
11181408cc1fSYuval Mintz 	pfdev_info->stats_info.pstats.len =
11191408cc1fSYuval Mintz 	    sizeof(struct eth_pstorm_per_queue_stat);
11201408cc1fSYuval Mintz 
11211408cc1fSYuval Mintz 	pfdev_info->stats_info.tstats.address = 0;
11221408cc1fSYuval Mintz 	pfdev_info->stats_info.tstats.len = 0;
11231408cc1fSYuval Mintz 
11241408cc1fSYuval Mintz 	memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
11251408cc1fSYuval Mintz 
11261408cc1fSYuval Mintz 	pfdev_info->fw_major = FW_MAJOR_VERSION;
11271408cc1fSYuval Mintz 	pfdev_info->fw_minor = FW_MINOR_VERSION;
11281408cc1fSYuval Mintz 	pfdev_info->fw_rev = FW_REVISION_VERSION;
11291408cc1fSYuval Mintz 	pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
11301408cc1fSYuval Mintz 	pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
11311408cc1fSYuval Mintz 	qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
11321408cc1fSYuval Mintz 
11331408cc1fSYuval Mintz 	pfdev_info->dev_type = p_hwfn->cdev->type;
11341408cc1fSYuval Mintz 	pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
11351408cc1fSYuval Mintz 
11361408cc1fSYuval Mintz 	resc->num_rxqs = vf->num_rxqs;
11371408cc1fSYuval Mintz 	resc->num_txqs = vf->num_txqs;
11381408cc1fSYuval Mintz 	resc->num_sbs = vf->num_sbs;
11391408cc1fSYuval Mintz 	for (i = 0; i < resc->num_sbs; i++) {
11401408cc1fSYuval Mintz 		resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
11411408cc1fSYuval Mintz 		resc->hw_sbs[i].sb_qid = 0;
11421408cc1fSYuval Mintz 	}
11431408cc1fSYuval Mintz 
11441408cc1fSYuval Mintz 	for (i = 0; i < resc->num_rxqs; i++) {
11451408cc1fSYuval Mintz 		qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
11461408cc1fSYuval Mintz 				(u16 *)&resc->hw_qid[i]);
11471408cc1fSYuval Mintz 		resc->cid[i] = vf->vf_queues[i].fw_cid;
11481408cc1fSYuval Mintz 	}
11491408cc1fSYuval Mintz 
11501408cc1fSYuval Mintz 	resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
11511408cc1fSYuval Mintz 				      req->resc_request.num_mac_filters);
11521408cc1fSYuval Mintz 	resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
11531408cc1fSYuval Mintz 				       req->resc_request.num_vlan_filters);
11541408cc1fSYuval Mintz 
11551408cc1fSYuval Mintz 	/* This isn't really required as VF isn't limited, but some VFs might
11561408cc1fSYuval Mintz 	 * actually test this value, so need to provide it.
11571408cc1fSYuval Mintz 	 */
11581408cc1fSYuval Mintz 	resc->num_mc_filters = req->resc_request.num_mc_filters;
11591408cc1fSYuval Mintz 
11601408cc1fSYuval Mintz 	/* Fill agreed size of bulletin board in response */
11611408cc1fSYuval Mintz 	resp->bulletin_size = vf->bulletin.size;
11621408cc1fSYuval Mintz 
11631408cc1fSYuval Mintz 	DP_VERBOSE(p_hwfn,
11641408cc1fSYuval Mintz 		   QED_MSG_IOV,
11651408cc1fSYuval Mintz 		   "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
11661408cc1fSYuval Mintz 		   "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
11671408cc1fSYuval Mintz 		   vf->abs_vf_id,
11681408cc1fSYuval Mintz 		   resp->pfdev_info.chip_num,
11691408cc1fSYuval Mintz 		   resp->pfdev_info.db_size,
11701408cc1fSYuval Mintz 		   resp->pfdev_info.indices_per_sb,
11711408cc1fSYuval Mintz 		   resp->pfdev_info.capabilities,
11721408cc1fSYuval Mintz 		   resc->num_rxqs,
11731408cc1fSYuval Mintz 		   resc->num_txqs,
11741408cc1fSYuval Mintz 		   resc->num_sbs,
11751408cc1fSYuval Mintz 		   resc->num_mac_filters,
11761408cc1fSYuval Mintz 		   resc->num_vlan_filters);
11771408cc1fSYuval Mintz 	vf->state = VF_ACQUIRED;
11781408cc1fSYuval Mintz 
11791408cc1fSYuval Mintz 	/* Prepare Response */
11801408cc1fSYuval Mintz out:
11811408cc1fSYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
11821408cc1fSYuval Mintz 			     sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
118337bff2b9SYuval Mintz }
118437bff2b9SYuval Mintz 
1185dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1186dacd88d6SYuval Mintz 				       struct qed_ptt *p_ptt,
1187dacd88d6SYuval Mintz 				       struct qed_vf_info *vf)
1188dacd88d6SYuval Mintz {
1189dacd88d6SYuval Mintz 	struct qed_sp_vport_start_params params = { 0 };
1190dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1191dacd88d6SYuval Mintz 	struct vfpf_vport_start_tlv *start;
1192dacd88d6SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
1193dacd88d6SYuval Mintz 	struct qed_vf_info *vf_info;
1194dacd88d6SYuval Mintz 	int sb_id;
1195dacd88d6SYuval Mintz 	int rc;
1196dacd88d6SYuval Mintz 
1197dacd88d6SYuval Mintz 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1198dacd88d6SYuval Mintz 	if (!vf_info) {
1199dacd88d6SYuval Mintz 		DP_NOTICE(p_hwfn->cdev,
1200dacd88d6SYuval Mintz 			  "Failed to get VF info, invalid vfid [%d]\n",
1201dacd88d6SYuval Mintz 			  vf->relative_vf_id);
1202dacd88d6SYuval Mintz 		return;
1203dacd88d6SYuval Mintz 	}
1204dacd88d6SYuval Mintz 
1205dacd88d6SYuval Mintz 	vf->state = VF_ENABLED;
1206dacd88d6SYuval Mintz 	start = &mbx->req_virt->start_vport;
1207dacd88d6SYuval Mintz 
1208dacd88d6SYuval Mintz 	/* Initialize Status block in CAU */
1209dacd88d6SYuval Mintz 	for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1210dacd88d6SYuval Mintz 		if (!start->sb_addr[sb_id]) {
1211dacd88d6SYuval Mintz 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1212dacd88d6SYuval Mintz 				   "VF[%d] did not fill the address of SB %d\n",
1213dacd88d6SYuval Mintz 				   vf->relative_vf_id, sb_id);
1214dacd88d6SYuval Mintz 			break;
1215dacd88d6SYuval Mintz 		}
1216dacd88d6SYuval Mintz 
1217dacd88d6SYuval Mintz 		qed_int_cau_conf_sb(p_hwfn, p_ptt,
1218dacd88d6SYuval Mintz 				    start->sb_addr[sb_id],
1219dacd88d6SYuval Mintz 				    vf->igu_sbs[sb_id],
1220dacd88d6SYuval Mintz 				    vf->abs_vf_id, 1);
1221dacd88d6SYuval Mintz 	}
1222dacd88d6SYuval Mintz 	qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1223dacd88d6SYuval Mintz 
1224dacd88d6SYuval Mintz 	vf->mtu = start->mtu;
1225dacd88d6SYuval Mintz 
1226dacd88d6SYuval Mintz 	params.tpa_mode = start->tpa_mode;
1227dacd88d6SYuval Mintz 	params.remove_inner_vlan = start->inner_vlan_removal;
1228dacd88d6SYuval Mintz 
1229dacd88d6SYuval Mintz 	params.drop_ttl0 = false;
1230dacd88d6SYuval Mintz 	params.concrete_fid = vf->concrete_fid;
1231dacd88d6SYuval Mintz 	params.opaque_fid = vf->opaque_fid;
1232dacd88d6SYuval Mintz 	params.vport_id = vf->vport_id;
1233dacd88d6SYuval Mintz 	params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1234dacd88d6SYuval Mintz 	params.mtu = vf->mtu;
1235dacd88d6SYuval Mintz 
1236dacd88d6SYuval Mintz 	rc = qed_sp_eth_vport_start(p_hwfn, &params);
1237dacd88d6SYuval Mintz 	if (rc != 0) {
1238dacd88d6SYuval Mintz 		DP_ERR(p_hwfn,
1239dacd88d6SYuval Mintz 		       "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1240dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
1241dacd88d6SYuval Mintz 	} else {
1242dacd88d6SYuval Mintz 		vf->vport_instance++;
1243dacd88d6SYuval Mintz 	}
1244dacd88d6SYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1245dacd88d6SYuval Mintz 			     sizeof(struct pfvf_def_resp_tlv), status);
1246dacd88d6SYuval Mintz }
1247dacd88d6SYuval Mintz 
1248dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1249dacd88d6SYuval Mintz 				      struct qed_ptt *p_ptt,
1250dacd88d6SYuval Mintz 				      struct qed_vf_info *vf)
1251dacd88d6SYuval Mintz {
1252dacd88d6SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
1253dacd88d6SYuval Mintz 	int rc;
1254dacd88d6SYuval Mintz 
1255dacd88d6SYuval Mintz 	vf->vport_instance--;
1256dacd88d6SYuval Mintz 
1257dacd88d6SYuval Mintz 	rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
1258dacd88d6SYuval Mintz 	if (rc != 0) {
1259dacd88d6SYuval Mintz 		DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1260dacd88d6SYuval Mintz 		       rc);
1261dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
1262dacd88d6SYuval Mintz 	}
1263dacd88d6SYuval Mintz 
1264dacd88d6SYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
1265dacd88d6SYuval Mintz 			     sizeof(struct pfvf_def_resp_tlv), status);
1266dacd88d6SYuval Mintz }
1267dacd88d6SYuval Mintz 
1268dacd88d6SYuval Mintz #define TSTORM_QZONE_START   PXP_VF_BAR0_START_SDM_ZONE_A
1269dacd88d6SYuval Mintz #define MSTORM_QZONE_START(dev)   (TSTORM_QZONE_START +	\
1270dacd88d6SYuval Mintz 				   (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
1271dacd88d6SYuval Mintz 
1272dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
1273dacd88d6SYuval Mintz 					  struct qed_ptt *p_ptt,
1274dacd88d6SYuval Mintz 					  struct qed_vf_info *vf, u8 status)
1275dacd88d6SYuval Mintz {
1276dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1277dacd88d6SYuval Mintz 	struct pfvf_start_queue_resp_tlv *p_tlv;
1278dacd88d6SYuval Mintz 	struct vfpf_start_rxq_tlv *req;
1279dacd88d6SYuval Mintz 
1280dacd88d6SYuval Mintz 	mbx->offset = (u8 *)mbx->reply_virt;
1281dacd88d6SYuval Mintz 
1282dacd88d6SYuval Mintz 	p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
1283dacd88d6SYuval Mintz 			    sizeof(*p_tlv));
1284dacd88d6SYuval Mintz 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1285dacd88d6SYuval Mintz 		    sizeof(struct channel_list_end_tlv));
1286dacd88d6SYuval Mintz 
1287dacd88d6SYuval Mintz 	/* Update the TLV with the response */
1288dacd88d6SYuval Mintz 	if (status == PFVF_STATUS_SUCCESS) {
1289dacd88d6SYuval Mintz 		u16 hw_qid = 0;
1290dacd88d6SYuval Mintz 
1291dacd88d6SYuval Mintz 		req = &mbx->req_virt->start_rxq;
1292dacd88d6SYuval Mintz 		qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid,
1293dacd88d6SYuval Mintz 				&hw_qid);
1294dacd88d6SYuval Mintz 
1295dacd88d6SYuval Mintz 		p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) +
1296dacd88d6SYuval Mintz 				hw_qid * MSTORM_QZONE_SIZE +
1297dacd88d6SYuval Mintz 				offsetof(struct mstorm_eth_queue_zone,
1298dacd88d6SYuval Mintz 					 rx_producers);
1299dacd88d6SYuval Mintz 	}
1300dacd88d6SYuval Mintz 
1301dacd88d6SYuval Mintz 	qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
1302dacd88d6SYuval Mintz }
1303dacd88d6SYuval Mintz 
1304dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
1305dacd88d6SYuval Mintz 				     struct qed_ptt *p_ptt,
1306dacd88d6SYuval Mintz 				     struct qed_vf_info *vf)
1307dacd88d6SYuval Mintz {
1308dacd88d6SYuval Mintz 	struct qed_queue_start_common_params params;
1309dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1310dacd88d6SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
1311dacd88d6SYuval Mintz 	struct vfpf_start_rxq_tlv *req;
1312dacd88d6SYuval Mintz 	int rc;
1313dacd88d6SYuval Mintz 
1314dacd88d6SYuval Mintz 	memset(&params, 0, sizeof(params));
1315dacd88d6SYuval Mintz 	req = &mbx->req_virt->start_rxq;
1316dacd88d6SYuval Mintz 	params.queue_id =  vf->vf_queues[req->rx_qid].fw_rx_qid;
1317dacd88d6SYuval Mintz 	params.vport_id = vf->vport_id;
1318dacd88d6SYuval Mintz 	params.sb = req->hw_sb;
1319dacd88d6SYuval Mintz 	params.sb_idx = req->sb_index;
1320dacd88d6SYuval Mintz 
1321dacd88d6SYuval Mintz 	rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
1322dacd88d6SYuval Mintz 					 vf->vf_queues[req->rx_qid].fw_cid,
1323dacd88d6SYuval Mintz 					 &params,
1324dacd88d6SYuval Mintz 					 vf->abs_vf_id + 0x10,
1325dacd88d6SYuval Mintz 					 req->bd_max_bytes,
1326dacd88d6SYuval Mintz 					 req->rxq_addr,
1327dacd88d6SYuval Mintz 					 req->cqe_pbl_addr, req->cqe_pbl_size);
1328dacd88d6SYuval Mintz 
1329dacd88d6SYuval Mintz 	if (rc) {
1330dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
1331dacd88d6SYuval Mintz 	} else {
1332dacd88d6SYuval Mintz 		vf->vf_queues[req->rx_qid].rxq_active = true;
1333dacd88d6SYuval Mintz 		vf->num_active_rxqs++;
1334dacd88d6SYuval Mintz 	}
1335dacd88d6SYuval Mintz 
1336dacd88d6SYuval Mintz 	qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
1337dacd88d6SYuval Mintz }
1338dacd88d6SYuval Mintz 
1339dacd88d6SYuval Mintz static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
1340dacd88d6SYuval Mintz 				     struct qed_ptt *p_ptt,
1341dacd88d6SYuval Mintz 				     struct qed_vf_info *vf)
1342dacd88d6SYuval Mintz {
1343dacd88d6SYuval Mintz 	u16 length = sizeof(struct pfvf_def_resp_tlv);
1344dacd88d6SYuval Mintz 	struct qed_queue_start_common_params params;
1345dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1346dacd88d6SYuval Mintz 	union qed_qm_pq_params pq_params;
1347dacd88d6SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
1348dacd88d6SYuval Mintz 	struct vfpf_start_txq_tlv *req;
1349dacd88d6SYuval Mintz 	int rc;
1350dacd88d6SYuval Mintz 
1351dacd88d6SYuval Mintz 	/* Prepare the parameters which would choose the right PQ */
1352dacd88d6SYuval Mintz 	memset(&pq_params, 0, sizeof(pq_params));
1353dacd88d6SYuval Mintz 	pq_params.eth.is_vf = 1;
1354dacd88d6SYuval Mintz 	pq_params.eth.vf_id = vf->relative_vf_id;
1355dacd88d6SYuval Mintz 
1356dacd88d6SYuval Mintz 	memset(&params, 0, sizeof(params));
1357dacd88d6SYuval Mintz 	req = &mbx->req_virt->start_txq;
1358dacd88d6SYuval Mintz 	params.queue_id =  vf->vf_queues[req->tx_qid].fw_tx_qid;
1359dacd88d6SYuval Mintz 	params.vport_id = vf->vport_id;
1360dacd88d6SYuval Mintz 	params.sb = req->hw_sb;
1361dacd88d6SYuval Mintz 	params.sb_idx = req->sb_index;
1362dacd88d6SYuval Mintz 
1363dacd88d6SYuval Mintz 	rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
1364dacd88d6SYuval Mintz 					 vf->opaque_fid,
1365dacd88d6SYuval Mintz 					 vf->vf_queues[req->tx_qid].fw_cid,
1366dacd88d6SYuval Mintz 					 &params,
1367dacd88d6SYuval Mintz 					 vf->abs_vf_id + 0x10,
1368dacd88d6SYuval Mintz 					 req->pbl_addr,
1369dacd88d6SYuval Mintz 					 req->pbl_size, &pq_params);
1370dacd88d6SYuval Mintz 
1371dacd88d6SYuval Mintz 	if (rc)
1372dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
1373dacd88d6SYuval Mintz 	else
1374dacd88d6SYuval Mintz 		vf->vf_queues[req->tx_qid].txq_active = true;
1375dacd88d6SYuval Mintz 
1376dacd88d6SYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
1377dacd88d6SYuval Mintz 			     length, status);
1378dacd88d6SYuval Mintz }
1379dacd88d6SYuval Mintz 
1380dacd88d6SYuval Mintz static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
1381dacd88d6SYuval Mintz 				struct qed_vf_info *vf,
1382dacd88d6SYuval Mintz 				u16 rxq_id, u8 num_rxqs, bool cqe_completion)
1383dacd88d6SYuval Mintz {
1384dacd88d6SYuval Mintz 	int rc = 0;
1385dacd88d6SYuval Mintz 	int qid;
1386dacd88d6SYuval Mintz 
1387dacd88d6SYuval Mintz 	if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
1388dacd88d6SYuval Mintz 		return -EINVAL;
1389dacd88d6SYuval Mintz 
1390dacd88d6SYuval Mintz 	for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
1391dacd88d6SYuval Mintz 		if (vf->vf_queues[qid].rxq_active) {
1392dacd88d6SYuval Mintz 			rc = qed_sp_eth_rx_queue_stop(p_hwfn,
1393dacd88d6SYuval Mintz 						      vf->vf_queues[qid].
1394dacd88d6SYuval Mintz 						      fw_rx_qid, false,
1395dacd88d6SYuval Mintz 						      cqe_completion);
1396dacd88d6SYuval Mintz 
1397dacd88d6SYuval Mintz 			if (rc)
1398dacd88d6SYuval Mintz 				return rc;
1399dacd88d6SYuval Mintz 		}
1400dacd88d6SYuval Mintz 		vf->vf_queues[qid].rxq_active = false;
1401dacd88d6SYuval Mintz 		vf->num_active_rxqs--;
1402dacd88d6SYuval Mintz 	}
1403dacd88d6SYuval Mintz 
1404dacd88d6SYuval Mintz 	return rc;
1405dacd88d6SYuval Mintz }
1406dacd88d6SYuval Mintz 
1407dacd88d6SYuval Mintz static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
1408dacd88d6SYuval Mintz 				struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
1409dacd88d6SYuval Mintz {
1410dacd88d6SYuval Mintz 	int rc = 0;
1411dacd88d6SYuval Mintz 	int qid;
1412dacd88d6SYuval Mintz 
1413dacd88d6SYuval Mintz 	if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
1414dacd88d6SYuval Mintz 		return -EINVAL;
1415dacd88d6SYuval Mintz 
1416dacd88d6SYuval Mintz 	for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
1417dacd88d6SYuval Mintz 		if (vf->vf_queues[qid].txq_active) {
1418dacd88d6SYuval Mintz 			rc = qed_sp_eth_tx_queue_stop(p_hwfn,
1419dacd88d6SYuval Mintz 						      vf->vf_queues[qid].
1420dacd88d6SYuval Mintz 						      fw_tx_qid);
1421dacd88d6SYuval Mintz 
1422dacd88d6SYuval Mintz 			if (rc)
1423dacd88d6SYuval Mintz 				return rc;
1424dacd88d6SYuval Mintz 		}
1425dacd88d6SYuval Mintz 		vf->vf_queues[qid].txq_active = false;
1426dacd88d6SYuval Mintz 	}
1427dacd88d6SYuval Mintz 	return rc;
1428dacd88d6SYuval Mintz }
1429dacd88d6SYuval Mintz 
1430dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
1431dacd88d6SYuval Mintz 				     struct qed_ptt *p_ptt,
1432dacd88d6SYuval Mintz 				     struct qed_vf_info *vf)
1433dacd88d6SYuval Mintz {
1434dacd88d6SYuval Mintz 	u16 length = sizeof(struct pfvf_def_resp_tlv);
1435dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1436dacd88d6SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
1437dacd88d6SYuval Mintz 	struct vfpf_stop_rxqs_tlv *req;
1438dacd88d6SYuval Mintz 	int rc;
1439dacd88d6SYuval Mintz 
1440dacd88d6SYuval Mintz 	/* We give the option of starting from qid != 0, in this case we
1441dacd88d6SYuval Mintz 	 * need to make sure that qid + num_qs doesn't exceed the actual
1442dacd88d6SYuval Mintz 	 * amount of queues that exist.
1443dacd88d6SYuval Mintz 	 */
1444dacd88d6SYuval Mintz 	req = &mbx->req_virt->stop_rxqs;
1445dacd88d6SYuval Mintz 	rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
1446dacd88d6SYuval Mintz 				  req->num_rxqs, req->cqe_completion);
1447dacd88d6SYuval Mintz 	if (rc)
1448dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
1449dacd88d6SYuval Mintz 
1450dacd88d6SYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
1451dacd88d6SYuval Mintz 			     length, status);
1452dacd88d6SYuval Mintz }
1453dacd88d6SYuval Mintz 
1454dacd88d6SYuval Mintz static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
1455dacd88d6SYuval Mintz 				     struct qed_ptt *p_ptt,
1456dacd88d6SYuval Mintz 				     struct qed_vf_info *vf)
1457dacd88d6SYuval Mintz {
1458dacd88d6SYuval Mintz 	u16 length = sizeof(struct pfvf_def_resp_tlv);
1459dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1460dacd88d6SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
1461dacd88d6SYuval Mintz 	struct vfpf_stop_txqs_tlv *req;
1462dacd88d6SYuval Mintz 	int rc;
1463dacd88d6SYuval Mintz 
1464dacd88d6SYuval Mintz 	/* We give the option of starting from qid != 0, in this case we
1465dacd88d6SYuval Mintz 	 * need to make sure that qid + num_qs doesn't exceed the actual
1466dacd88d6SYuval Mintz 	 * amount of queues that exist.
1467dacd88d6SYuval Mintz 	 */
1468dacd88d6SYuval Mintz 	req = &mbx->req_virt->stop_txqs;
1469dacd88d6SYuval Mintz 	rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
1470dacd88d6SYuval Mintz 	if (rc)
1471dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
1472dacd88d6SYuval Mintz 
1473dacd88d6SYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
1474dacd88d6SYuval Mintz 			     length, status);
1475dacd88d6SYuval Mintz }
1476dacd88d6SYuval Mintz 
1477dacd88d6SYuval Mintz void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
1478dacd88d6SYuval Mintz 			       void *p_tlvs_list, u16 req_type)
1479dacd88d6SYuval Mintz {
1480dacd88d6SYuval Mintz 	struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
1481dacd88d6SYuval Mintz 	int len = 0;
1482dacd88d6SYuval Mintz 
1483dacd88d6SYuval Mintz 	do {
1484dacd88d6SYuval Mintz 		if (!p_tlv->length) {
1485dacd88d6SYuval Mintz 			DP_NOTICE(p_hwfn, "Zero length TLV found\n");
1486dacd88d6SYuval Mintz 			return NULL;
1487dacd88d6SYuval Mintz 		}
1488dacd88d6SYuval Mintz 
1489dacd88d6SYuval Mintz 		if (p_tlv->type == req_type) {
1490dacd88d6SYuval Mintz 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1491dacd88d6SYuval Mintz 				   "Extended tlv type %d, length %d found\n",
1492dacd88d6SYuval Mintz 				   p_tlv->type, p_tlv->length);
1493dacd88d6SYuval Mintz 			return p_tlv;
1494dacd88d6SYuval Mintz 		}
1495dacd88d6SYuval Mintz 
1496dacd88d6SYuval Mintz 		len += p_tlv->length;
1497dacd88d6SYuval Mintz 		p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
1498dacd88d6SYuval Mintz 
1499dacd88d6SYuval Mintz 		if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
1500dacd88d6SYuval Mintz 			DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
1501dacd88d6SYuval Mintz 			return NULL;
1502dacd88d6SYuval Mintz 		}
1503dacd88d6SYuval Mintz 	} while (p_tlv->type != CHANNEL_TLV_LIST_END);
1504dacd88d6SYuval Mintz 
1505dacd88d6SYuval Mintz 	return NULL;
1506dacd88d6SYuval Mintz }
1507dacd88d6SYuval Mintz 
1508dacd88d6SYuval Mintz static void
1509dacd88d6SYuval Mintz qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
1510dacd88d6SYuval Mintz 			    struct qed_sp_vport_update_params *p_data,
1511dacd88d6SYuval Mintz 			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1512dacd88d6SYuval Mintz {
1513dacd88d6SYuval Mintz 	struct vfpf_vport_update_activate_tlv *p_act_tlv;
1514dacd88d6SYuval Mintz 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1515dacd88d6SYuval Mintz 
1516dacd88d6SYuval Mintz 	p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
1517dacd88d6SYuval Mintz 		    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1518dacd88d6SYuval Mintz 	if (!p_act_tlv)
1519dacd88d6SYuval Mintz 		return;
1520dacd88d6SYuval Mintz 
1521dacd88d6SYuval Mintz 	p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
1522dacd88d6SYuval Mintz 	p_data->vport_active_rx_flg = p_act_tlv->active_rx;
1523dacd88d6SYuval Mintz 	p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
1524dacd88d6SYuval Mintz 	p_data->vport_active_tx_flg = p_act_tlv->active_tx;
1525dacd88d6SYuval Mintz 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
1526dacd88d6SYuval Mintz }
1527dacd88d6SYuval Mintz 
1528dacd88d6SYuval Mintz static void
1529dacd88d6SYuval Mintz qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
1530dacd88d6SYuval Mintz 				  struct qed_sp_vport_update_params *p_data,
1531dacd88d6SYuval Mintz 				  struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1532dacd88d6SYuval Mintz {
1533dacd88d6SYuval Mintz 	struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
1534dacd88d6SYuval Mintz 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
1535dacd88d6SYuval Mintz 
1536dacd88d6SYuval Mintz 	p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
1537dacd88d6SYuval Mintz 	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1538dacd88d6SYuval Mintz 	if (!p_mcast_tlv)
1539dacd88d6SYuval Mintz 		return;
1540dacd88d6SYuval Mintz 
1541dacd88d6SYuval Mintz 	p_data->update_approx_mcast_flg = 1;
1542dacd88d6SYuval Mintz 	memcpy(p_data->bins, p_mcast_tlv->bins,
1543dacd88d6SYuval Mintz 	       sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1544dacd88d6SYuval Mintz 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
1545dacd88d6SYuval Mintz }
1546dacd88d6SYuval Mintz 
1547dacd88d6SYuval Mintz static void
1548dacd88d6SYuval Mintz qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
1549dacd88d6SYuval Mintz 			      struct qed_sp_vport_update_params *p_data,
1550dacd88d6SYuval Mintz 			      struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1551dacd88d6SYuval Mintz {
1552dacd88d6SYuval Mintz 	struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
1553dacd88d6SYuval Mintz 	struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
1554dacd88d6SYuval Mintz 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1555dacd88d6SYuval Mintz 
1556dacd88d6SYuval Mintz 	p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
1557dacd88d6SYuval Mintz 	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1558dacd88d6SYuval Mintz 	if (!p_accept_tlv)
1559dacd88d6SYuval Mintz 		return;
1560dacd88d6SYuval Mintz 
1561dacd88d6SYuval Mintz 	p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
1562dacd88d6SYuval Mintz 	p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
1563dacd88d6SYuval Mintz 	p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
1564dacd88d6SYuval Mintz 	p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
1565dacd88d6SYuval Mintz 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
1566dacd88d6SYuval Mintz }
1567dacd88d6SYuval Mintz 
1568dacd88d6SYuval Mintz static void
1569dacd88d6SYuval Mintz qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
1570dacd88d6SYuval Mintz 			    struct qed_vf_info *vf,
1571dacd88d6SYuval Mintz 			    struct qed_sp_vport_update_params *p_data,
1572dacd88d6SYuval Mintz 			    struct qed_rss_params *p_rss,
1573dacd88d6SYuval Mintz 			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1574dacd88d6SYuval Mintz {
1575dacd88d6SYuval Mintz 	struct vfpf_vport_update_rss_tlv *p_rss_tlv;
1576dacd88d6SYuval Mintz 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
1577dacd88d6SYuval Mintz 	u16 i, q_idx, max_q_idx;
1578dacd88d6SYuval Mintz 	u16 table_size;
1579dacd88d6SYuval Mintz 
1580dacd88d6SYuval Mintz 	p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
1581dacd88d6SYuval Mintz 		    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1582dacd88d6SYuval Mintz 	if (!p_rss_tlv) {
1583dacd88d6SYuval Mintz 		p_data->rss_params = NULL;
1584dacd88d6SYuval Mintz 		return;
1585dacd88d6SYuval Mintz 	}
1586dacd88d6SYuval Mintz 
1587dacd88d6SYuval Mintz 	memset(p_rss, 0, sizeof(struct qed_rss_params));
1588dacd88d6SYuval Mintz 
1589dacd88d6SYuval Mintz 	p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
1590dacd88d6SYuval Mintz 				      VFPF_UPDATE_RSS_CONFIG_FLAG);
1591dacd88d6SYuval Mintz 	p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
1592dacd88d6SYuval Mintz 					    VFPF_UPDATE_RSS_CAPS_FLAG);
1593dacd88d6SYuval Mintz 	p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
1594dacd88d6SYuval Mintz 					 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
1595dacd88d6SYuval Mintz 	p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
1596dacd88d6SYuval Mintz 				   VFPF_UPDATE_RSS_KEY_FLAG);
1597dacd88d6SYuval Mintz 
1598dacd88d6SYuval Mintz 	p_rss->rss_enable = p_rss_tlv->rss_enable;
1599dacd88d6SYuval Mintz 	p_rss->rss_eng_id = vf->relative_vf_id + 1;
1600dacd88d6SYuval Mintz 	p_rss->rss_caps = p_rss_tlv->rss_caps;
1601dacd88d6SYuval Mintz 	p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
1602dacd88d6SYuval Mintz 	memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
1603dacd88d6SYuval Mintz 	       sizeof(p_rss->rss_ind_table));
1604dacd88d6SYuval Mintz 	memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
1605dacd88d6SYuval Mintz 
1606dacd88d6SYuval Mintz 	table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
1607dacd88d6SYuval Mintz 			   (1 << p_rss_tlv->rss_table_size_log));
1608dacd88d6SYuval Mintz 
1609dacd88d6SYuval Mintz 	max_q_idx = ARRAY_SIZE(vf->vf_queues);
1610dacd88d6SYuval Mintz 
1611dacd88d6SYuval Mintz 	for (i = 0; i < table_size; i++) {
1612dacd88d6SYuval Mintz 		u16 index = vf->vf_queues[0].fw_rx_qid;
1613dacd88d6SYuval Mintz 
1614dacd88d6SYuval Mintz 		q_idx = p_rss->rss_ind_table[i];
1615dacd88d6SYuval Mintz 		if (q_idx >= max_q_idx)
1616dacd88d6SYuval Mintz 			DP_NOTICE(p_hwfn,
1617dacd88d6SYuval Mintz 				  "rss_ind_table[%d] = %d, rxq is out of range\n",
1618dacd88d6SYuval Mintz 				  i, q_idx);
1619dacd88d6SYuval Mintz 		else if (!vf->vf_queues[q_idx].rxq_active)
1620dacd88d6SYuval Mintz 			DP_NOTICE(p_hwfn,
1621dacd88d6SYuval Mintz 				  "rss_ind_table[%d] = %d, rxq is not active\n",
1622dacd88d6SYuval Mintz 				  i, q_idx);
1623dacd88d6SYuval Mintz 		else
1624dacd88d6SYuval Mintz 			index = vf->vf_queues[q_idx].fw_rx_qid;
1625dacd88d6SYuval Mintz 		p_rss->rss_ind_table[i] = index;
1626dacd88d6SYuval Mintz 	}
1627dacd88d6SYuval Mintz 
1628dacd88d6SYuval Mintz 	p_data->rss_params = p_rss;
1629dacd88d6SYuval Mintz 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
1630dacd88d6SYuval Mintz }
1631dacd88d6SYuval Mintz 
1632dacd88d6SYuval Mintz static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
1633dacd88d6SYuval Mintz 					struct qed_ptt *p_ptt,
1634dacd88d6SYuval Mintz 					struct qed_vf_info *vf)
1635dacd88d6SYuval Mintz {
1636dacd88d6SYuval Mintz 	struct qed_sp_vport_update_params params;
1637dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1638dacd88d6SYuval Mintz 	struct qed_rss_params rss_params;
1639dacd88d6SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
1640dacd88d6SYuval Mintz 	u16 tlvs_mask = 0;
1641dacd88d6SYuval Mintz 	u16 length;
1642dacd88d6SYuval Mintz 	int rc;
1643dacd88d6SYuval Mintz 
1644dacd88d6SYuval Mintz 	memset(&params, 0, sizeof(params));
1645dacd88d6SYuval Mintz 	params.opaque_fid = vf->opaque_fid;
1646dacd88d6SYuval Mintz 	params.vport_id = vf->vport_id;
1647dacd88d6SYuval Mintz 	params.rss_params = NULL;
1648dacd88d6SYuval Mintz 
1649dacd88d6SYuval Mintz 	/* Search for extended tlvs list and update values
1650dacd88d6SYuval Mintz 	 * from VF in struct qed_sp_vport_update_params.
1651dacd88d6SYuval Mintz 	 */
1652dacd88d6SYuval Mintz 	qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
1653dacd88d6SYuval Mintz 	qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
1654dacd88d6SYuval Mintz 	qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
1655dacd88d6SYuval Mintz 	qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
1656dacd88d6SYuval Mintz 				    mbx, &tlvs_mask);
1657dacd88d6SYuval Mintz 
1658dacd88d6SYuval Mintz 	/* Just log a message if there is no single extended tlv in buffer.
1659dacd88d6SYuval Mintz 	 * When all features of vport update ramrod would be requested by VF
1660dacd88d6SYuval Mintz 	 * as extended TLVs in buffer then an error can be returned in response
1661dacd88d6SYuval Mintz 	 * if there is no extended TLV present in buffer.
1662dacd88d6SYuval Mintz 	 */
1663dacd88d6SYuval Mintz 	if (!tlvs_mask) {
1664dacd88d6SYuval Mintz 		DP_NOTICE(p_hwfn,
1665dacd88d6SYuval Mintz 			  "No feature tlvs found for vport update\n");
1666dacd88d6SYuval Mintz 		status = PFVF_STATUS_NOT_SUPPORTED;
1667dacd88d6SYuval Mintz 		goto out;
1668dacd88d6SYuval Mintz 	}
1669dacd88d6SYuval Mintz 
1670dacd88d6SYuval Mintz 	rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
1671dacd88d6SYuval Mintz 
1672dacd88d6SYuval Mintz 	if (rc)
1673dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
1674dacd88d6SYuval Mintz 
1675dacd88d6SYuval Mintz out:
1676dacd88d6SYuval Mintz 	length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
1677dacd88d6SYuval Mintz 						  tlvs_mask, tlvs_mask);
1678dacd88d6SYuval Mintz 	qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
1679dacd88d6SYuval Mintz }
1680dacd88d6SYuval Mintz 
1681dacd88d6SYuval Mintz int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
1682dacd88d6SYuval Mintz 		      int vfid, struct qed_filter_ucast *params)
1683dacd88d6SYuval Mintz {
1684dacd88d6SYuval Mintz 	struct qed_public_vf_info *vf;
1685dacd88d6SYuval Mintz 
1686dacd88d6SYuval Mintz 	vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
1687dacd88d6SYuval Mintz 	if (!vf)
1688dacd88d6SYuval Mintz 		return -EINVAL;
1689dacd88d6SYuval Mintz 
1690dacd88d6SYuval Mintz 	/* No real decision to make; Store the configured MAC */
1691dacd88d6SYuval Mintz 	if (params->type == QED_FILTER_MAC ||
1692dacd88d6SYuval Mintz 	    params->type == QED_FILTER_MAC_VLAN)
1693dacd88d6SYuval Mintz 		ether_addr_copy(vf->mac, params->mac);
1694dacd88d6SYuval Mintz 
1695dacd88d6SYuval Mintz 	return 0;
1696dacd88d6SYuval Mintz }
1697dacd88d6SYuval Mintz 
1698dacd88d6SYuval Mintz static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
1699dacd88d6SYuval Mintz 					struct qed_ptt *p_ptt,
1700dacd88d6SYuval Mintz 					struct qed_vf_info *vf)
1701dacd88d6SYuval Mintz {
1702dacd88d6SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1703dacd88d6SYuval Mintz 	struct vfpf_ucast_filter_tlv *req;
1704dacd88d6SYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
1705dacd88d6SYuval Mintz 	struct qed_filter_ucast params;
1706dacd88d6SYuval Mintz 	int rc;
1707dacd88d6SYuval Mintz 
1708dacd88d6SYuval Mintz 	/* Prepare the unicast filter params */
1709dacd88d6SYuval Mintz 	memset(&params, 0, sizeof(struct qed_filter_ucast));
1710dacd88d6SYuval Mintz 	req = &mbx->req_virt->ucast_filter;
1711dacd88d6SYuval Mintz 	params.opcode = (enum qed_filter_opcode)req->opcode;
1712dacd88d6SYuval Mintz 	params.type = (enum qed_filter_ucast_type)req->type;
1713dacd88d6SYuval Mintz 
1714dacd88d6SYuval Mintz 	params.is_rx_filter = 1;
1715dacd88d6SYuval Mintz 	params.is_tx_filter = 1;
1716dacd88d6SYuval Mintz 	params.vport_to_remove_from = vf->vport_id;
1717dacd88d6SYuval Mintz 	params.vport_to_add_to = vf->vport_id;
1718dacd88d6SYuval Mintz 	memcpy(params.mac, req->mac, ETH_ALEN);
1719dacd88d6SYuval Mintz 	params.vlan = req->vlan;
1720dacd88d6SYuval Mintz 
1721dacd88d6SYuval Mintz 	DP_VERBOSE(p_hwfn,
1722dacd88d6SYuval Mintz 		   QED_MSG_IOV,
1723dacd88d6SYuval Mintz 		   "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
1724dacd88d6SYuval Mintz 		   vf->abs_vf_id, params.opcode, params.type,
1725dacd88d6SYuval Mintz 		   params.is_rx_filter ? "RX" : "",
1726dacd88d6SYuval Mintz 		   params.is_tx_filter ? "TX" : "",
1727dacd88d6SYuval Mintz 		   params.vport_to_add_to,
1728dacd88d6SYuval Mintz 		   params.mac[0], params.mac[1],
1729dacd88d6SYuval Mintz 		   params.mac[2], params.mac[3],
1730dacd88d6SYuval Mintz 		   params.mac[4], params.mac[5], params.vlan);
1731dacd88d6SYuval Mintz 
1732dacd88d6SYuval Mintz 	if (!vf->vport_instance) {
1733dacd88d6SYuval Mintz 		DP_VERBOSE(p_hwfn,
1734dacd88d6SYuval Mintz 			   QED_MSG_IOV,
1735dacd88d6SYuval Mintz 			   "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
1736dacd88d6SYuval Mintz 			   vf->abs_vf_id);
1737dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
1738dacd88d6SYuval Mintz 		goto out;
1739dacd88d6SYuval Mintz 	}
1740dacd88d6SYuval Mintz 
1741dacd88d6SYuval Mintz 	rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
1742dacd88d6SYuval Mintz 	if (rc) {
1743dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
1744dacd88d6SYuval Mintz 		goto out;
1745dacd88d6SYuval Mintz 	}
1746dacd88d6SYuval Mintz 
1747dacd88d6SYuval Mintz 	rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
1748dacd88d6SYuval Mintz 				     QED_SPQ_MODE_CB, NULL);
1749dacd88d6SYuval Mintz 	if (rc)
1750dacd88d6SYuval Mintz 		status = PFVF_STATUS_FAILURE;
1751dacd88d6SYuval Mintz 
1752dacd88d6SYuval Mintz out:
1753dacd88d6SYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
1754dacd88d6SYuval Mintz 			     sizeof(struct pfvf_def_resp_tlv), status);
1755dacd88d6SYuval Mintz }
1756dacd88d6SYuval Mintz 
17570b55e27dSYuval Mintz static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
17580b55e27dSYuval Mintz 				       struct qed_ptt *p_ptt,
17590b55e27dSYuval Mintz 				       struct qed_vf_info *vf)
17600b55e27dSYuval Mintz {
17610b55e27dSYuval Mintz 	int i;
17620b55e27dSYuval Mintz 
17630b55e27dSYuval Mintz 	/* Reset the SBs */
17640b55e27dSYuval Mintz 	for (i = 0; i < vf->num_sbs; i++)
17650b55e27dSYuval Mintz 		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
17660b55e27dSYuval Mintz 						vf->igu_sbs[i],
17670b55e27dSYuval Mintz 						vf->opaque_fid, false);
17680b55e27dSYuval Mintz 
17690b55e27dSYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
17700b55e27dSYuval Mintz 			     sizeof(struct pfvf_def_resp_tlv),
17710b55e27dSYuval Mintz 			     PFVF_STATUS_SUCCESS);
17720b55e27dSYuval Mintz }
17730b55e27dSYuval Mintz 
17740b55e27dSYuval Mintz static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
17750b55e27dSYuval Mintz 				 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
17760b55e27dSYuval Mintz {
17770b55e27dSYuval Mintz 	u16 length = sizeof(struct pfvf_def_resp_tlv);
17780b55e27dSYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
17790b55e27dSYuval Mintz 
17800b55e27dSYuval Mintz 	/* Disable Interrupts for VF */
17810b55e27dSYuval Mintz 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
17820b55e27dSYuval Mintz 
17830b55e27dSYuval Mintz 	/* Reset Permission table */
17840b55e27dSYuval Mintz 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
17850b55e27dSYuval Mintz 
17860b55e27dSYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
17870b55e27dSYuval Mintz 			     length, status);
17880b55e27dSYuval Mintz }
17890b55e27dSYuval Mintz 
17900b55e27dSYuval Mintz static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
17910b55e27dSYuval Mintz 				   struct qed_ptt *p_ptt,
17920b55e27dSYuval Mintz 				   struct qed_vf_info *p_vf)
17930b55e27dSYuval Mintz {
17940b55e27dSYuval Mintz 	u16 length = sizeof(struct pfvf_def_resp_tlv);
17950b55e27dSYuval Mintz 
17960b55e27dSYuval Mintz 	qed_iov_vf_cleanup(p_hwfn, p_vf);
17970b55e27dSYuval Mintz 
17980b55e27dSYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
17990b55e27dSYuval Mintz 			     length, PFVF_STATUS_SUCCESS);
18000b55e27dSYuval Mintz }
18010b55e27dSYuval Mintz 
18020b55e27dSYuval Mintz static int
18030b55e27dSYuval Mintz qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
18040b55e27dSYuval Mintz 			 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
18050b55e27dSYuval Mintz {
18060b55e27dSYuval Mintz 	int cnt;
18070b55e27dSYuval Mintz 	u32 val;
18080b55e27dSYuval Mintz 
18090b55e27dSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
18100b55e27dSYuval Mintz 
18110b55e27dSYuval Mintz 	for (cnt = 0; cnt < 50; cnt++) {
18120b55e27dSYuval Mintz 		val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
18130b55e27dSYuval Mintz 		if (!val)
18140b55e27dSYuval Mintz 			break;
18150b55e27dSYuval Mintz 		msleep(20);
18160b55e27dSYuval Mintz 	}
18170b55e27dSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
18180b55e27dSYuval Mintz 
18190b55e27dSYuval Mintz 	if (cnt == 50) {
18200b55e27dSYuval Mintz 		DP_ERR(p_hwfn,
18210b55e27dSYuval Mintz 		       "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
18220b55e27dSYuval Mintz 		       p_vf->abs_vf_id, val);
18230b55e27dSYuval Mintz 		return -EBUSY;
18240b55e27dSYuval Mintz 	}
18250b55e27dSYuval Mintz 
18260b55e27dSYuval Mintz 	return 0;
18270b55e27dSYuval Mintz }
18280b55e27dSYuval Mintz 
18290b55e27dSYuval Mintz static int
18300b55e27dSYuval Mintz qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
18310b55e27dSYuval Mintz 			struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
18320b55e27dSYuval Mintz {
18330b55e27dSYuval Mintz 	u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
18340b55e27dSYuval Mintz 	int i, cnt;
18350b55e27dSYuval Mintz 
18360b55e27dSYuval Mintz 	/* Read initial consumers & producers */
18370b55e27dSYuval Mintz 	for (i = 0; i < MAX_NUM_VOQS; i++) {
18380b55e27dSYuval Mintz 		u32 prod;
18390b55e27dSYuval Mintz 
18400b55e27dSYuval Mintz 		cons[i] = qed_rd(p_hwfn, p_ptt,
18410b55e27dSYuval Mintz 				 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
18420b55e27dSYuval Mintz 				 i * 0x40);
18430b55e27dSYuval Mintz 		prod = qed_rd(p_hwfn, p_ptt,
18440b55e27dSYuval Mintz 			      PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
18450b55e27dSYuval Mintz 			      i * 0x40);
18460b55e27dSYuval Mintz 		distance[i] = prod - cons[i];
18470b55e27dSYuval Mintz 	}
18480b55e27dSYuval Mintz 
18490b55e27dSYuval Mintz 	/* Wait for consumers to pass the producers */
18500b55e27dSYuval Mintz 	i = 0;
18510b55e27dSYuval Mintz 	for (cnt = 0; cnt < 50; cnt++) {
18520b55e27dSYuval Mintz 		for (; i < MAX_NUM_VOQS; i++) {
18530b55e27dSYuval Mintz 			u32 tmp;
18540b55e27dSYuval Mintz 
18550b55e27dSYuval Mintz 			tmp = qed_rd(p_hwfn, p_ptt,
18560b55e27dSYuval Mintz 				     PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
18570b55e27dSYuval Mintz 				     i * 0x40);
18580b55e27dSYuval Mintz 			if (distance[i] > tmp - cons[i])
18590b55e27dSYuval Mintz 				break;
18600b55e27dSYuval Mintz 		}
18610b55e27dSYuval Mintz 
18620b55e27dSYuval Mintz 		if (i == MAX_NUM_VOQS)
18630b55e27dSYuval Mintz 			break;
18640b55e27dSYuval Mintz 
18650b55e27dSYuval Mintz 		msleep(20);
18660b55e27dSYuval Mintz 	}
18670b55e27dSYuval Mintz 
18680b55e27dSYuval Mintz 	if (cnt == 50) {
18690b55e27dSYuval Mintz 		DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
18700b55e27dSYuval Mintz 		       p_vf->abs_vf_id, i);
18710b55e27dSYuval Mintz 		return -EBUSY;
18720b55e27dSYuval Mintz 	}
18730b55e27dSYuval Mintz 
18740b55e27dSYuval Mintz 	return 0;
18750b55e27dSYuval Mintz }
18760b55e27dSYuval Mintz 
18770b55e27dSYuval Mintz static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
18780b55e27dSYuval Mintz 			       struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
18790b55e27dSYuval Mintz {
18800b55e27dSYuval Mintz 	int rc;
18810b55e27dSYuval Mintz 
18820b55e27dSYuval Mintz 	rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
18830b55e27dSYuval Mintz 	if (rc)
18840b55e27dSYuval Mintz 		return rc;
18850b55e27dSYuval Mintz 
18860b55e27dSYuval Mintz 	rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
18870b55e27dSYuval Mintz 	if (rc)
18880b55e27dSYuval Mintz 		return rc;
18890b55e27dSYuval Mintz 
18900b55e27dSYuval Mintz 	return 0;
18910b55e27dSYuval Mintz }
18920b55e27dSYuval Mintz 
18930b55e27dSYuval Mintz static int
18940b55e27dSYuval Mintz qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
18950b55e27dSYuval Mintz 			       struct qed_ptt *p_ptt,
18960b55e27dSYuval Mintz 			       u16 rel_vf_id, u32 *ack_vfs)
18970b55e27dSYuval Mintz {
18980b55e27dSYuval Mintz 	struct qed_vf_info *p_vf;
18990b55e27dSYuval Mintz 	int rc = 0;
19000b55e27dSYuval Mintz 
19010b55e27dSYuval Mintz 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
19020b55e27dSYuval Mintz 	if (!p_vf)
19030b55e27dSYuval Mintz 		return 0;
19040b55e27dSYuval Mintz 
19050b55e27dSYuval Mintz 	if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
19060b55e27dSYuval Mintz 	    (1ULL << (rel_vf_id % 64))) {
19070b55e27dSYuval Mintz 		u16 vfid = p_vf->abs_vf_id;
19080b55e27dSYuval Mintz 
19090b55e27dSYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
19100b55e27dSYuval Mintz 			   "VF[%d] - Handling FLR\n", vfid);
19110b55e27dSYuval Mintz 
19120b55e27dSYuval Mintz 		qed_iov_vf_cleanup(p_hwfn, p_vf);
19130b55e27dSYuval Mintz 
19140b55e27dSYuval Mintz 		/* If VF isn't active, no need for anything but SW */
19150b55e27dSYuval Mintz 		if (!p_vf->b_init)
19160b55e27dSYuval Mintz 			goto cleanup;
19170b55e27dSYuval Mintz 
19180b55e27dSYuval Mintz 		rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
19190b55e27dSYuval Mintz 		if (rc)
19200b55e27dSYuval Mintz 			goto cleanup;
19210b55e27dSYuval Mintz 
19220b55e27dSYuval Mintz 		rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
19230b55e27dSYuval Mintz 		if (rc) {
19240b55e27dSYuval Mintz 			DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
19250b55e27dSYuval Mintz 			return rc;
19260b55e27dSYuval Mintz 		}
19270b55e27dSYuval Mintz 
19280b55e27dSYuval Mintz 		/* VF_STOPPED has to be set only after final cleanup
19290b55e27dSYuval Mintz 		 * but prior to re-enabling the VF.
19300b55e27dSYuval Mintz 		 */
19310b55e27dSYuval Mintz 		p_vf->state = VF_STOPPED;
19320b55e27dSYuval Mintz 
19330b55e27dSYuval Mintz 		rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
19340b55e27dSYuval Mintz 		if (rc) {
19350b55e27dSYuval Mintz 			DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
19360b55e27dSYuval Mintz 			       vfid);
19370b55e27dSYuval Mintz 			return rc;
19380b55e27dSYuval Mintz 		}
19390b55e27dSYuval Mintz cleanup:
19400b55e27dSYuval Mintz 		/* Mark VF for ack and clean pending state */
19410b55e27dSYuval Mintz 		if (p_vf->state == VF_RESET)
19420b55e27dSYuval Mintz 			p_vf->state = VF_STOPPED;
19430b55e27dSYuval Mintz 		ack_vfs[vfid / 32] |= (1 << (vfid % 32));
19440b55e27dSYuval Mintz 		p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
19450b55e27dSYuval Mintz 		    ~(1ULL << (rel_vf_id % 64));
19460b55e27dSYuval Mintz 		p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
19470b55e27dSYuval Mintz 		    ~(1ULL << (rel_vf_id % 64));
19480b55e27dSYuval Mintz 	}
19490b55e27dSYuval Mintz 
19500b55e27dSYuval Mintz 	return rc;
19510b55e27dSYuval Mintz }
19520b55e27dSYuval Mintz 
19530b55e27dSYuval Mintz int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
19540b55e27dSYuval Mintz {
19550b55e27dSYuval Mintz 	u32 ack_vfs[VF_MAX_STATIC / 32];
19560b55e27dSYuval Mintz 	int rc = 0;
19570b55e27dSYuval Mintz 	u16 i;
19580b55e27dSYuval Mintz 
19590b55e27dSYuval Mintz 	memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
19600b55e27dSYuval Mintz 
19610b55e27dSYuval Mintz 	/* Since BRB <-> PRS interface can't be tested as part of the flr
19620b55e27dSYuval Mintz 	 * polling due to HW limitations, simply sleep a bit. And since
19630b55e27dSYuval Mintz 	 * there's no need to wait per-vf, do it before looping.
19640b55e27dSYuval Mintz 	 */
19650b55e27dSYuval Mintz 	msleep(100);
19660b55e27dSYuval Mintz 
19670b55e27dSYuval Mintz 	for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
19680b55e27dSYuval Mintz 		qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
19690b55e27dSYuval Mintz 
19700b55e27dSYuval Mintz 	rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
19710b55e27dSYuval Mintz 	return rc;
19720b55e27dSYuval Mintz }
19730b55e27dSYuval Mintz 
19740b55e27dSYuval Mintz int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
19750b55e27dSYuval Mintz {
19760b55e27dSYuval Mintz 	u16 i, found = 0;
19770b55e27dSYuval Mintz 
19780b55e27dSYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
19790b55e27dSYuval Mintz 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
19800b55e27dSYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
19810b55e27dSYuval Mintz 			   "[%08x,...,%08x]: %08x\n",
19820b55e27dSYuval Mintz 			   i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
19830b55e27dSYuval Mintz 
19840b55e27dSYuval Mintz 	if (!p_hwfn->cdev->p_iov_info) {
19850b55e27dSYuval Mintz 		DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
19860b55e27dSYuval Mintz 		return 0;
19870b55e27dSYuval Mintz 	}
19880b55e27dSYuval Mintz 
19890b55e27dSYuval Mintz 	/* Mark VFs */
19900b55e27dSYuval Mintz 	for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
19910b55e27dSYuval Mintz 		struct qed_vf_info *p_vf;
19920b55e27dSYuval Mintz 		u8 vfid;
19930b55e27dSYuval Mintz 
19940b55e27dSYuval Mintz 		p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
19950b55e27dSYuval Mintz 		if (!p_vf)
19960b55e27dSYuval Mintz 			continue;
19970b55e27dSYuval Mintz 
19980b55e27dSYuval Mintz 		vfid = p_vf->abs_vf_id;
19990b55e27dSYuval Mintz 		if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
20000b55e27dSYuval Mintz 			u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
20010b55e27dSYuval Mintz 			u16 rel_vf_id = p_vf->relative_vf_id;
20020b55e27dSYuval Mintz 
20030b55e27dSYuval Mintz 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
20040b55e27dSYuval Mintz 				   "VF[%d] [rel %d] got FLR-ed\n",
20050b55e27dSYuval Mintz 				   vfid, rel_vf_id);
20060b55e27dSYuval Mintz 
20070b55e27dSYuval Mintz 			p_vf->state = VF_RESET;
20080b55e27dSYuval Mintz 
20090b55e27dSYuval Mintz 			/* No need to lock here, since pending_flr should
20100b55e27dSYuval Mintz 			 * only change here and before ACKing MFw. Since
20110b55e27dSYuval Mintz 			 * MFW will not trigger an additional attention for
20120b55e27dSYuval Mintz 			 * VF flr until ACKs, we're safe.
20130b55e27dSYuval Mintz 			 */
20140b55e27dSYuval Mintz 			p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
20150b55e27dSYuval Mintz 			found = 1;
20160b55e27dSYuval Mintz 		}
20170b55e27dSYuval Mintz 	}
20180b55e27dSYuval Mintz 
20190b55e27dSYuval Mintz 	return found;
20200b55e27dSYuval Mintz }
20210b55e27dSYuval Mintz 
202237bff2b9SYuval Mintz static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
202337bff2b9SYuval Mintz 				    struct qed_ptt *p_ptt, int vfid)
202437bff2b9SYuval Mintz {
202537bff2b9SYuval Mintz 	struct qed_iov_vf_mbx *mbx;
202637bff2b9SYuval Mintz 	struct qed_vf_info *p_vf;
202737bff2b9SYuval Mintz 	int i;
202837bff2b9SYuval Mintz 
202937bff2b9SYuval Mintz 	p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
203037bff2b9SYuval Mintz 	if (!p_vf)
203137bff2b9SYuval Mintz 		return;
203237bff2b9SYuval Mintz 
203337bff2b9SYuval Mintz 	mbx = &p_vf->vf_mbx;
203437bff2b9SYuval Mintz 
203537bff2b9SYuval Mintz 	/* qed_iov_process_mbx_request */
203637bff2b9SYuval Mintz 	DP_VERBOSE(p_hwfn,
203737bff2b9SYuval Mintz 		   QED_MSG_IOV,
203837bff2b9SYuval Mintz 		   "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
203937bff2b9SYuval Mintz 
204037bff2b9SYuval Mintz 	mbx->first_tlv = mbx->req_virt->first_tlv;
204137bff2b9SYuval Mintz 
204237bff2b9SYuval Mintz 	/* check if tlv type is known */
204337bff2b9SYuval Mintz 	if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
20441408cc1fSYuval Mintz 		switch (mbx->first_tlv.tl.type) {
20451408cc1fSYuval Mintz 		case CHANNEL_TLV_ACQUIRE:
20461408cc1fSYuval Mintz 			qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
20471408cc1fSYuval Mintz 			break;
2048dacd88d6SYuval Mintz 		case CHANNEL_TLV_VPORT_START:
2049dacd88d6SYuval Mintz 			qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
2050dacd88d6SYuval Mintz 			break;
2051dacd88d6SYuval Mintz 		case CHANNEL_TLV_VPORT_TEARDOWN:
2052dacd88d6SYuval Mintz 			qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
2053dacd88d6SYuval Mintz 			break;
2054dacd88d6SYuval Mintz 		case CHANNEL_TLV_START_RXQ:
2055dacd88d6SYuval Mintz 			qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
2056dacd88d6SYuval Mintz 			break;
2057dacd88d6SYuval Mintz 		case CHANNEL_TLV_START_TXQ:
2058dacd88d6SYuval Mintz 			qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
2059dacd88d6SYuval Mintz 			break;
2060dacd88d6SYuval Mintz 		case CHANNEL_TLV_STOP_RXQS:
2061dacd88d6SYuval Mintz 			qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
2062dacd88d6SYuval Mintz 			break;
2063dacd88d6SYuval Mintz 		case CHANNEL_TLV_STOP_TXQS:
2064dacd88d6SYuval Mintz 			qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
2065dacd88d6SYuval Mintz 			break;
2066dacd88d6SYuval Mintz 		case CHANNEL_TLV_VPORT_UPDATE:
2067dacd88d6SYuval Mintz 			qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
2068dacd88d6SYuval Mintz 			break;
2069dacd88d6SYuval Mintz 		case CHANNEL_TLV_UCAST_FILTER:
2070dacd88d6SYuval Mintz 			qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
2071dacd88d6SYuval Mintz 			break;
20720b55e27dSYuval Mintz 		case CHANNEL_TLV_CLOSE:
20730b55e27dSYuval Mintz 			qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
20740b55e27dSYuval Mintz 			break;
20750b55e27dSYuval Mintz 		case CHANNEL_TLV_INT_CLEANUP:
20760b55e27dSYuval Mintz 			qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
20770b55e27dSYuval Mintz 			break;
20780b55e27dSYuval Mintz 		case CHANNEL_TLV_RELEASE:
20790b55e27dSYuval Mintz 			qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
20800b55e27dSYuval Mintz 			break;
20811408cc1fSYuval Mintz 		}
208237bff2b9SYuval Mintz 	} else {
208337bff2b9SYuval Mintz 		/* unknown TLV - this may belong to a VF driver from the future
208437bff2b9SYuval Mintz 		 * - a version written after this PF driver was written, which
208537bff2b9SYuval Mintz 		 * supports features unknown as of yet. Too bad since we don't
208637bff2b9SYuval Mintz 		 * support them. Or this may be because someone wrote a crappy
208737bff2b9SYuval Mintz 		 * VF driver and is sending garbage over the channel.
208837bff2b9SYuval Mintz 		 */
208937bff2b9SYuval Mintz 		DP_ERR(p_hwfn,
209037bff2b9SYuval Mintz 		       "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
209137bff2b9SYuval Mintz 		       mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
209237bff2b9SYuval Mintz 
209337bff2b9SYuval Mintz 		for (i = 0; i < 20; i++) {
209437bff2b9SYuval Mintz 			DP_VERBOSE(p_hwfn,
209537bff2b9SYuval Mintz 				   QED_MSG_IOV,
209637bff2b9SYuval Mintz 				   "%x ",
209737bff2b9SYuval Mintz 				   mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
209837bff2b9SYuval Mintz 		}
209937bff2b9SYuval Mintz 	}
210037bff2b9SYuval Mintz }
210137bff2b9SYuval Mintz 
210237bff2b9SYuval Mintz void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
210337bff2b9SYuval Mintz {
210437bff2b9SYuval Mintz 	u64 add_bit = 1ULL << (vfid % 64);
210537bff2b9SYuval Mintz 
210637bff2b9SYuval Mintz 	p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
210737bff2b9SYuval Mintz }
210837bff2b9SYuval Mintz 
210937bff2b9SYuval Mintz static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
211037bff2b9SYuval Mintz 						    u64 *events)
211137bff2b9SYuval Mintz {
211237bff2b9SYuval Mintz 	u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
211337bff2b9SYuval Mintz 
211437bff2b9SYuval Mintz 	memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
211537bff2b9SYuval Mintz 	memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
211637bff2b9SYuval Mintz }
211737bff2b9SYuval Mintz 
211837bff2b9SYuval Mintz static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
211937bff2b9SYuval Mintz 			      u16 abs_vfid, struct regpair *vf_msg)
212037bff2b9SYuval Mintz {
212137bff2b9SYuval Mintz 	u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
212237bff2b9SYuval Mintz 	struct qed_vf_info *p_vf;
212337bff2b9SYuval Mintz 
212437bff2b9SYuval Mintz 	if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
212537bff2b9SYuval Mintz 		DP_VERBOSE(p_hwfn,
212637bff2b9SYuval Mintz 			   QED_MSG_IOV,
212737bff2b9SYuval Mintz 			   "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
212837bff2b9SYuval Mintz 			   abs_vfid);
212937bff2b9SYuval Mintz 		return 0;
213037bff2b9SYuval Mintz 	}
213137bff2b9SYuval Mintz 	p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
213237bff2b9SYuval Mintz 
213337bff2b9SYuval Mintz 	/* List the physical address of the request so that handler
213437bff2b9SYuval Mintz 	 * could later on copy the message from it.
213537bff2b9SYuval Mintz 	 */
213637bff2b9SYuval Mintz 	p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
213737bff2b9SYuval Mintz 
213837bff2b9SYuval Mintz 	/* Mark the event and schedule the workqueue */
213937bff2b9SYuval Mintz 	qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
214037bff2b9SYuval Mintz 	qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
214137bff2b9SYuval Mintz 
214237bff2b9SYuval Mintz 	return 0;
214337bff2b9SYuval Mintz }
214437bff2b9SYuval Mintz 
214537bff2b9SYuval Mintz int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
214637bff2b9SYuval Mintz 			u8 opcode, __le16 echo, union event_ring_data *data)
214737bff2b9SYuval Mintz {
214837bff2b9SYuval Mintz 	switch (opcode) {
214937bff2b9SYuval Mintz 	case COMMON_EVENT_VF_PF_CHANNEL:
215037bff2b9SYuval Mintz 		return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
215137bff2b9SYuval Mintz 					  &data->vf_pf_channel.msg_addr);
215237bff2b9SYuval Mintz 	default:
215337bff2b9SYuval Mintz 		DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
215437bff2b9SYuval Mintz 			opcode);
215537bff2b9SYuval Mintz 		return -EINVAL;
215637bff2b9SYuval Mintz 	}
215737bff2b9SYuval Mintz }
215837bff2b9SYuval Mintz 
215932a47e72SYuval Mintz u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
216032a47e72SYuval Mintz {
216132a47e72SYuval Mintz 	struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
216232a47e72SYuval Mintz 	u16 i;
216332a47e72SYuval Mintz 
216432a47e72SYuval Mintz 	if (!p_iov)
216532a47e72SYuval Mintz 		goto out;
216632a47e72SYuval Mintz 
216732a47e72SYuval Mintz 	for (i = rel_vf_id; i < p_iov->total_vfs; i++)
216832a47e72SYuval Mintz 		if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
216932a47e72SYuval Mintz 			return i;
217032a47e72SYuval Mintz 
217132a47e72SYuval Mintz out:
217232a47e72SYuval Mintz 	return MAX_NUM_VFS;
217332a47e72SYuval Mintz }
217437bff2b9SYuval Mintz 
217537bff2b9SYuval Mintz static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
217637bff2b9SYuval Mintz 			       int vfid)
217737bff2b9SYuval Mintz {
217837bff2b9SYuval Mintz 	struct qed_dmae_params params;
217937bff2b9SYuval Mintz 	struct qed_vf_info *vf_info;
218037bff2b9SYuval Mintz 
218137bff2b9SYuval Mintz 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
218237bff2b9SYuval Mintz 	if (!vf_info)
218337bff2b9SYuval Mintz 		return -EINVAL;
218437bff2b9SYuval Mintz 
218537bff2b9SYuval Mintz 	memset(&params, 0, sizeof(struct qed_dmae_params));
218637bff2b9SYuval Mintz 	params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
218737bff2b9SYuval Mintz 	params.src_vfid = vf_info->abs_vf_id;
218837bff2b9SYuval Mintz 
218937bff2b9SYuval Mintz 	if (qed_dmae_host2host(p_hwfn, ptt,
219037bff2b9SYuval Mintz 			       vf_info->vf_mbx.pending_req,
219137bff2b9SYuval Mintz 			       vf_info->vf_mbx.req_phys,
219237bff2b9SYuval Mintz 			       sizeof(union vfpf_tlvs) / 4, &params)) {
219337bff2b9SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
219437bff2b9SYuval Mintz 			   "Failed to copy message from VF 0x%02x\n", vfid);
219537bff2b9SYuval Mintz 
219637bff2b9SYuval Mintz 		return -EIO;
219737bff2b9SYuval Mintz 	}
219837bff2b9SYuval Mintz 
219937bff2b9SYuval Mintz 	return 0;
220037bff2b9SYuval Mintz }
220137bff2b9SYuval Mintz 
22020b55e27dSYuval Mintz bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
22030b55e27dSYuval Mintz {
22040b55e27dSYuval Mintz 	struct qed_vf_info *p_vf_info;
22050b55e27dSYuval Mintz 
22060b55e27dSYuval Mintz 	p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
22070b55e27dSYuval Mintz 	if (!p_vf_info)
22080b55e27dSYuval Mintz 		return true;
22090b55e27dSYuval Mintz 
22100b55e27dSYuval Mintz 	return p_vf_info->state == VF_STOPPED;
22110b55e27dSYuval Mintz }
22120b55e27dSYuval Mintz 
221337bff2b9SYuval Mintz /**
221437bff2b9SYuval Mintz  * qed_schedule_iov - schedules IOV task for VF and PF
221537bff2b9SYuval Mintz  * @hwfn: hardware function pointer
221637bff2b9SYuval Mintz  * @flag: IOV flag for VF/PF
221737bff2b9SYuval Mintz  */
221837bff2b9SYuval Mintz void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
221937bff2b9SYuval Mintz {
222037bff2b9SYuval Mintz 	smp_mb__before_atomic();
222137bff2b9SYuval Mintz 	set_bit(flag, &hwfn->iov_task_flags);
222237bff2b9SYuval Mintz 	smp_mb__after_atomic();
222337bff2b9SYuval Mintz 	DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
222437bff2b9SYuval Mintz 	queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
222537bff2b9SYuval Mintz }
222637bff2b9SYuval Mintz 
22271408cc1fSYuval Mintz void qed_vf_start_iov_wq(struct qed_dev *cdev)
22281408cc1fSYuval Mintz {
22291408cc1fSYuval Mintz 	int i;
22301408cc1fSYuval Mintz 
22311408cc1fSYuval Mintz 	for_each_hwfn(cdev, i)
22321408cc1fSYuval Mintz 	    queue_delayed_work(cdev->hwfns[i].iov_wq,
22331408cc1fSYuval Mintz 			       &cdev->hwfns[i].iov_task, 0);
22341408cc1fSYuval Mintz }
22351408cc1fSYuval Mintz 
22360b55e27dSYuval Mintz int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
22370b55e27dSYuval Mintz {
22380b55e27dSYuval Mintz 	int i, j;
22390b55e27dSYuval Mintz 
22400b55e27dSYuval Mintz 	for_each_hwfn(cdev, i)
22410b55e27dSYuval Mintz 	    if (cdev->hwfns[i].iov_wq)
22420b55e27dSYuval Mintz 		flush_workqueue(cdev->hwfns[i].iov_wq);
22430b55e27dSYuval Mintz 
22440b55e27dSYuval Mintz 	/* Mark VFs for disablement */
22450b55e27dSYuval Mintz 	qed_iov_set_vfs_to_disable(cdev, true);
22460b55e27dSYuval Mintz 
22470b55e27dSYuval Mintz 	if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
22480b55e27dSYuval Mintz 		pci_disable_sriov(cdev->pdev);
22490b55e27dSYuval Mintz 
22500b55e27dSYuval Mintz 	for_each_hwfn(cdev, i) {
22510b55e27dSYuval Mintz 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
22520b55e27dSYuval Mintz 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
22530b55e27dSYuval Mintz 
22540b55e27dSYuval Mintz 		/* Failure to acquire the ptt in 100g creates an odd error
22550b55e27dSYuval Mintz 		 * where the first engine has already relased IOV.
22560b55e27dSYuval Mintz 		 */
22570b55e27dSYuval Mintz 		if (!ptt) {
22580b55e27dSYuval Mintz 			DP_ERR(hwfn, "Failed to acquire ptt\n");
22590b55e27dSYuval Mintz 			return -EBUSY;
22600b55e27dSYuval Mintz 		}
22610b55e27dSYuval Mintz 
22620b55e27dSYuval Mintz 		qed_for_each_vf(hwfn, j) {
22630b55e27dSYuval Mintz 			int k;
22640b55e27dSYuval Mintz 
22650b55e27dSYuval Mintz 			if (!qed_iov_is_valid_vfid(hwfn, j, true))
22660b55e27dSYuval Mintz 				continue;
22670b55e27dSYuval Mintz 
22680b55e27dSYuval Mintz 			/* Wait until VF is disabled before releasing */
22690b55e27dSYuval Mintz 			for (k = 0; k < 100; k++) {
22700b55e27dSYuval Mintz 				if (!qed_iov_is_vf_stopped(hwfn, j))
22710b55e27dSYuval Mintz 					msleep(20);
22720b55e27dSYuval Mintz 				else
22730b55e27dSYuval Mintz 					break;
22740b55e27dSYuval Mintz 			}
22750b55e27dSYuval Mintz 
22760b55e27dSYuval Mintz 			if (k < 100)
22770b55e27dSYuval Mintz 				qed_iov_release_hw_for_vf(&cdev->hwfns[i],
22780b55e27dSYuval Mintz 							  ptt, j);
22790b55e27dSYuval Mintz 			else
22800b55e27dSYuval Mintz 				DP_ERR(hwfn,
22810b55e27dSYuval Mintz 				       "Timeout waiting for VF's FLR to end\n");
22820b55e27dSYuval Mintz 		}
22830b55e27dSYuval Mintz 
22840b55e27dSYuval Mintz 		qed_ptt_release(hwfn, ptt);
22850b55e27dSYuval Mintz 	}
22860b55e27dSYuval Mintz 
22870b55e27dSYuval Mintz 	qed_iov_set_vfs_to_disable(cdev, false);
22880b55e27dSYuval Mintz 
22890b55e27dSYuval Mintz 	return 0;
22900b55e27dSYuval Mintz }
22910b55e27dSYuval Mintz 
22920b55e27dSYuval Mintz static int qed_sriov_enable(struct qed_dev *cdev, int num)
22930b55e27dSYuval Mintz {
22940b55e27dSYuval Mintz 	struct qed_sb_cnt_info sb_cnt_info;
22950b55e27dSYuval Mintz 	int i, j, rc;
22960b55e27dSYuval Mintz 
22970b55e27dSYuval Mintz 	if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
22980b55e27dSYuval Mintz 		DP_NOTICE(cdev, "Can start at most %d VFs\n",
22990b55e27dSYuval Mintz 			  RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
23000b55e27dSYuval Mintz 		return -EINVAL;
23010b55e27dSYuval Mintz 	}
23020b55e27dSYuval Mintz 
23030b55e27dSYuval Mintz 	/* Initialize HW for VF access */
23040b55e27dSYuval Mintz 	for_each_hwfn(cdev, j) {
23050b55e27dSYuval Mintz 		struct qed_hwfn *hwfn = &cdev->hwfns[j];
23060b55e27dSYuval Mintz 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
23070b55e27dSYuval Mintz 		int num_sbs = 0, limit = 16;
23080b55e27dSYuval Mintz 
23090b55e27dSYuval Mintz 		if (!ptt) {
23100b55e27dSYuval Mintz 			DP_ERR(hwfn, "Failed to acquire ptt\n");
23110b55e27dSYuval Mintz 			rc = -EBUSY;
23120b55e27dSYuval Mintz 			goto err;
23130b55e27dSYuval Mintz 		}
23140b55e27dSYuval Mintz 
23150b55e27dSYuval Mintz 		memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
23160b55e27dSYuval Mintz 		qed_int_get_num_sbs(hwfn, &sb_cnt_info);
23170b55e27dSYuval Mintz 		num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
23180b55e27dSYuval Mintz 
23190b55e27dSYuval Mintz 		for (i = 0; i < num; i++) {
23200b55e27dSYuval Mintz 			if (!qed_iov_is_valid_vfid(hwfn, i, false))
23210b55e27dSYuval Mintz 				continue;
23220b55e27dSYuval Mintz 
23230b55e27dSYuval Mintz 			rc = qed_iov_init_hw_for_vf(hwfn,
23240b55e27dSYuval Mintz 						    ptt, i, num_sbs / num);
23250b55e27dSYuval Mintz 			if (rc) {
23260b55e27dSYuval Mintz 				DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
23270b55e27dSYuval Mintz 				qed_ptt_release(hwfn, ptt);
23280b55e27dSYuval Mintz 				goto err;
23290b55e27dSYuval Mintz 			}
23300b55e27dSYuval Mintz 		}
23310b55e27dSYuval Mintz 
23320b55e27dSYuval Mintz 		qed_ptt_release(hwfn, ptt);
23330b55e27dSYuval Mintz 	}
23340b55e27dSYuval Mintz 
23350b55e27dSYuval Mintz 	/* Enable SRIOV PCIe functions */
23360b55e27dSYuval Mintz 	rc = pci_enable_sriov(cdev->pdev, num);
23370b55e27dSYuval Mintz 	if (rc) {
23380b55e27dSYuval Mintz 		DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
23390b55e27dSYuval Mintz 		goto err;
23400b55e27dSYuval Mintz 	}
23410b55e27dSYuval Mintz 
23420b55e27dSYuval Mintz 	return num;
23430b55e27dSYuval Mintz 
23440b55e27dSYuval Mintz err:
23450b55e27dSYuval Mintz 	qed_sriov_disable(cdev, false);
23460b55e27dSYuval Mintz 	return rc;
23470b55e27dSYuval Mintz }
23480b55e27dSYuval Mintz 
23490b55e27dSYuval Mintz static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
23500b55e27dSYuval Mintz {
23510b55e27dSYuval Mintz 	if (!IS_QED_SRIOV(cdev)) {
23520b55e27dSYuval Mintz 		DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
23530b55e27dSYuval Mintz 		return -EOPNOTSUPP;
23540b55e27dSYuval Mintz 	}
23550b55e27dSYuval Mintz 
23560b55e27dSYuval Mintz 	if (num_vfs_param)
23570b55e27dSYuval Mintz 		return qed_sriov_enable(cdev, num_vfs_param);
23580b55e27dSYuval Mintz 	else
23590b55e27dSYuval Mintz 		return qed_sriov_disable(cdev, true);
23600b55e27dSYuval Mintz }
23610b55e27dSYuval Mintz 
236237bff2b9SYuval Mintz static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
236337bff2b9SYuval Mintz {
236437bff2b9SYuval Mintz 	u64 events[QED_VF_ARRAY_LENGTH];
236537bff2b9SYuval Mintz 	struct qed_ptt *ptt;
236637bff2b9SYuval Mintz 	int i;
236737bff2b9SYuval Mintz 
236837bff2b9SYuval Mintz 	ptt = qed_ptt_acquire(hwfn);
236937bff2b9SYuval Mintz 	if (!ptt) {
237037bff2b9SYuval Mintz 		DP_VERBOSE(hwfn, QED_MSG_IOV,
237137bff2b9SYuval Mintz 			   "Can't acquire PTT; re-scheduling\n");
237237bff2b9SYuval Mintz 		qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
237337bff2b9SYuval Mintz 		return;
237437bff2b9SYuval Mintz 	}
237537bff2b9SYuval Mintz 
237637bff2b9SYuval Mintz 	qed_iov_pf_get_and_clear_pending_events(hwfn, events);
237737bff2b9SYuval Mintz 
237837bff2b9SYuval Mintz 	DP_VERBOSE(hwfn, QED_MSG_IOV,
237937bff2b9SYuval Mintz 		   "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
238037bff2b9SYuval Mintz 		   events[0], events[1], events[2]);
238137bff2b9SYuval Mintz 
238237bff2b9SYuval Mintz 	qed_for_each_vf(hwfn, i) {
238337bff2b9SYuval Mintz 		/* Skip VFs with no pending messages */
238437bff2b9SYuval Mintz 		if (!(events[i / 64] & (1ULL << (i % 64))))
238537bff2b9SYuval Mintz 			continue;
238637bff2b9SYuval Mintz 
238737bff2b9SYuval Mintz 		DP_VERBOSE(hwfn, QED_MSG_IOV,
238837bff2b9SYuval Mintz 			   "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
238937bff2b9SYuval Mintz 			   i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
239037bff2b9SYuval Mintz 
239137bff2b9SYuval Mintz 		/* Copy VF's message to PF's request buffer for that VF */
239237bff2b9SYuval Mintz 		if (qed_iov_copy_vf_msg(hwfn, ptt, i))
239337bff2b9SYuval Mintz 			continue;
239437bff2b9SYuval Mintz 
239537bff2b9SYuval Mintz 		qed_iov_process_mbx_req(hwfn, ptt, i);
239637bff2b9SYuval Mintz 	}
239737bff2b9SYuval Mintz 
239837bff2b9SYuval Mintz 	qed_ptt_release(hwfn, ptt);
239937bff2b9SYuval Mintz }
240037bff2b9SYuval Mintz 
240137bff2b9SYuval Mintz void qed_iov_pf_task(struct work_struct *work)
240237bff2b9SYuval Mintz {
240337bff2b9SYuval Mintz 	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
240437bff2b9SYuval Mintz 					     iov_task.work);
24050b55e27dSYuval Mintz 	int rc;
240637bff2b9SYuval Mintz 
240737bff2b9SYuval Mintz 	if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
240837bff2b9SYuval Mintz 		return;
240937bff2b9SYuval Mintz 
24100b55e27dSYuval Mintz 	if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
24110b55e27dSYuval Mintz 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
24120b55e27dSYuval Mintz 
24130b55e27dSYuval Mintz 		if (!ptt) {
24140b55e27dSYuval Mintz 			qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
24150b55e27dSYuval Mintz 			return;
24160b55e27dSYuval Mintz 		}
24170b55e27dSYuval Mintz 
24180b55e27dSYuval Mintz 		rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
24190b55e27dSYuval Mintz 		if (rc)
24200b55e27dSYuval Mintz 			qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
24210b55e27dSYuval Mintz 
24220b55e27dSYuval Mintz 		qed_ptt_release(hwfn, ptt);
24230b55e27dSYuval Mintz 	}
24240b55e27dSYuval Mintz 
242537bff2b9SYuval Mintz 	if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
242637bff2b9SYuval Mintz 		qed_handle_vf_msg(hwfn);
242737bff2b9SYuval Mintz }
242837bff2b9SYuval Mintz 
242937bff2b9SYuval Mintz void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
243037bff2b9SYuval Mintz {
243137bff2b9SYuval Mintz 	int i;
243237bff2b9SYuval Mintz 
243337bff2b9SYuval Mintz 	for_each_hwfn(cdev, i) {
243437bff2b9SYuval Mintz 		if (!cdev->hwfns[i].iov_wq)
243537bff2b9SYuval Mintz 			continue;
243637bff2b9SYuval Mintz 
243737bff2b9SYuval Mintz 		if (schedule_first) {
243837bff2b9SYuval Mintz 			qed_schedule_iov(&cdev->hwfns[i],
243937bff2b9SYuval Mintz 					 QED_IOV_WQ_STOP_WQ_FLAG);
244037bff2b9SYuval Mintz 			cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
244137bff2b9SYuval Mintz 		}
244237bff2b9SYuval Mintz 
244337bff2b9SYuval Mintz 		flush_workqueue(cdev->hwfns[i].iov_wq);
244437bff2b9SYuval Mintz 		destroy_workqueue(cdev->hwfns[i].iov_wq);
244537bff2b9SYuval Mintz 	}
244637bff2b9SYuval Mintz }
244737bff2b9SYuval Mintz 
244837bff2b9SYuval Mintz int qed_iov_wq_start(struct qed_dev *cdev)
244937bff2b9SYuval Mintz {
245037bff2b9SYuval Mintz 	char name[NAME_SIZE];
245137bff2b9SYuval Mintz 	int i;
245237bff2b9SYuval Mintz 
245337bff2b9SYuval Mintz 	for_each_hwfn(cdev, i) {
245437bff2b9SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
245537bff2b9SYuval Mintz 
245637bff2b9SYuval Mintz 		/* PFs needs a dedicated workqueue only if they support IOV. */
245737bff2b9SYuval Mintz 		if (!IS_PF_SRIOV(p_hwfn))
245837bff2b9SYuval Mintz 			continue;
245937bff2b9SYuval Mintz 
246037bff2b9SYuval Mintz 		snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
246137bff2b9SYuval Mintz 			 cdev->pdev->bus->number,
246237bff2b9SYuval Mintz 			 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
246337bff2b9SYuval Mintz 
246437bff2b9SYuval Mintz 		p_hwfn->iov_wq = create_singlethread_workqueue(name);
246537bff2b9SYuval Mintz 		if (!p_hwfn->iov_wq) {
246637bff2b9SYuval Mintz 			DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
246737bff2b9SYuval Mintz 			return -ENOMEM;
246837bff2b9SYuval Mintz 		}
246937bff2b9SYuval Mintz 
247037bff2b9SYuval Mintz 		INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
247137bff2b9SYuval Mintz 	}
247237bff2b9SYuval Mintz 
247337bff2b9SYuval Mintz 	return 0;
247437bff2b9SYuval Mintz }
24750b55e27dSYuval Mintz 
24760b55e27dSYuval Mintz const struct qed_iov_hv_ops qed_iov_ops_pass = {
24770b55e27dSYuval Mintz 	.configure = &qed_sriov_configure,
24780b55e27dSYuval Mintz };
2479