132a47e72SYuval Mintz /* QLogic qed NIC Driver
232a47e72SYuval Mintz  * Copyright (c) 2015 QLogic Corporation
332a47e72SYuval Mintz  *
432a47e72SYuval Mintz  * This software is available under the terms of the GNU General Public License
532a47e72SYuval Mintz  * (GPL) Version 2, available from the file COPYING in the main directory of
632a47e72SYuval Mintz  * this source tree.
732a47e72SYuval Mintz  */
832a47e72SYuval Mintz 
90b55e27dSYuval Mintz #include <linux/qed/qed_iov_if.h>
101408cc1fSYuval Mintz #include "qed_cxt.h"
111408cc1fSYuval Mintz #include "qed_hsi.h"
1232a47e72SYuval Mintz #include "qed_hw.h"
131408cc1fSYuval Mintz #include "qed_init_ops.h"
1432a47e72SYuval Mintz #include "qed_int.h"
151408cc1fSYuval Mintz #include "qed_mcp.h"
1632a47e72SYuval Mintz #include "qed_reg_addr.h"
171408cc1fSYuval Mintz #include "qed_sp.h"
1832a47e72SYuval Mintz #include "qed_sriov.h"
1932a47e72SYuval Mintz #include "qed_vf.h"
2032a47e72SYuval Mintz 
211408cc1fSYuval Mintz /* IOV ramrods */
221408cc1fSYuval Mintz static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
231408cc1fSYuval Mintz 			   u32 concrete_vfid, u16 opaque_vfid)
241408cc1fSYuval Mintz {
251408cc1fSYuval Mintz 	struct vf_start_ramrod_data *p_ramrod = NULL;
261408cc1fSYuval Mintz 	struct qed_spq_entry *p_ent = NULL;
271408cc1fSYuval Mintz 	struct qed_sp_init_data init_data;
281408cc1fSYuval Mintz 	int rc = -EINVAL;
291408cc1fSYuval Mintz 
301408cc1fSYuval Mintz 	/* Get SPQ entry */
311408cc1fSYuval Mintz 	memset(&init_data, 0, sizeof(init_data));
321408cc1fSYuval Mintz 	init_data.cid = qed_spq_get_cid(p_hwfn);
331408cc1fSYuval Mintz 	init_data.opaque_fid = opaque_vfid;
341408cc1fSYuval Mintz 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
351408cc1fSYuval Mintz 
361408cc1fSYuval Mintz 	rc = qed_sp_init_request(p_hwfn, &p_ent,
371408cc1fSYuval Mintz 				 COMMON_RAMROD_VF_START,
381408cc1fSYuval Mintz 				 PROTOCOLID_COMMON, &init_data);
391408cc1fSYuval Mintz 	if (rc)
401408cc1fSYuval Mintz 		return rc;
411408cc1fSYuval Mintz 
421408cc1fSYuval Mintz 	p_ramrod = &p_ent->ramrod.vf_start;
431408cc1fSYuval Mintz 
441408cc1fSYuval Mintz 	p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
451408cc1fSYuval Mintz 	p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
461408cc1fSYuval Mintz 
471408cc1fSYuval Mintz 	p_ramrod->personality = PERSONALITY_ETH;
481408cc1fSYuval Mintz 
491408cc1fSYuval Mintz 	return qed_spq_post(p_hwfn, p_ent, NULL);
501408cc1fSYuval Mintz }
511408cc1fSYuval Mintz 
520b55e27dSYuval Mintz static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
530b55e27dSYuval Mintz 			  u32 concrete_vfid, u16 opaque_vfid)
540b55e27dSYuval Mintz {
550b55e27dSYuval Mintz 	struct vf_stop_ramrod_data *p_ramrod = NULL;
560b55e27dSYuval Mintz 	struct qed_spq_entry *p_ent = NULL;
570b55e27dSYuval Mintz 	struct qed_sp_init_data init_data;
580b55e27dSYuval Mintz 	int rc = -EINVAL;
590b55e27dSYuval Mintz 
600b55e27dSYuval Mintz 	/* Get SPQ entry */
610b55e27dSYuval Mintz 	memset(&init_data, 0, sizeof(init_data));
620b55e27dSYuval Mintz 	init_data.cid = qed_spq_get_cid(p_hwfn);
630b55e27dSYuval Mintz 	init_data.opaque_fid = opaque_vfid;
640b55e27dSYuval Mintz 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
650b55e27dSYuval Mintz 
660b55e27dSYuval Mintz 	rc = qed_sp_init_request(p_hwfn, &p_ent,
670b55e27dSYuval Mintz 				 COMMON_RAMROD_VF_STOP,
680b55e27dSYuval Mintz 				 PROTOCOLID_COMMON, &init_data);
690b55e27dSYuval Mintz 	if (rc)
700b55e27dSYuval Mintz 		return rc;
710b55e27dSYuval Mintz 
720b55e27dSYuval Mintz 	p_ramrod = &p_ent->ramrod.vf_stop;
730b55e27dSYuval Mintz 
740b55e27dSYuval Mintz 	p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
750b55e27dSYuval Mintz 
760b55e27dSYuval Mintz 	return qed_spq_post(p_hwfn, p_ent, NULL);
770b55e27dSYuval Mintz }
780b55e27dSYuval Mintz 
7932a47e72SYuval Mintz bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
8032a47e72SYuval Mintz 			   int rel_vf_id, bool b_enabled_only)
8132a47e72SYuval Mintz {
8232a47e72SYuval Mintz 	if (!p_hwfn->pf_iov_info) {
8332a47e72SYuval Mintz 		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
8432a47e72SYuval Mintz 		return false;
8532a47e72SYuval Mintz 	}
8632a47e72SYuval Mintz 
8732a47e72SYuval Mintz 	if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
8832a47e72SYuval Mintz 	    (rel_vf_id < 0))
8932a47e72SYuval Mintz 		return false;
9032a47e72SYuval Mintz 
9132a47e72SYuval Mintz 	if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
9232a47e72SYuval Mintz 	    b_enabled_only)
9332a47e72SYuval Mintz 		return false;
9432a47e72SYuval Mintz 
9532a47e72SYuval Mintz 	return true;
9632a47e72SYuval Mintz }
9732a47e72SYuval Mintz 
9837bff2b9SYuval Mintz static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
9937bff2b9SYuval Mintz 					       u16 relative_vf_id,
10037bff2b9SYuval Mintz 					       bool b_enabled_only)
10137bff2b9SYuval Mintz {
10237bff2b9SYuval Mintz 	struct qed_vf_info *vf = NULL;
10337bff2b9SYuval Mintz 
10437bff2b9SYuval Mintz 	if (!p_hwfn->pf_iov_info) {
10537bff2b9SYuval Mintz 		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
10637bff2b9SYuval Mintz 		return NULL;
10737bff2b9SYuval Mintz 	}
10837bff2b9SYuval Mintz 
10937bff2b9SYuval Mintz 	if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
11037bff2b9SYuval Mintz 		vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
11137bff2b9SYuval Mintz 	else
11237bff2b9SYuval Mintz 		DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
11337bff2b9SYuval Mintz 		       relative_vf_id);
11437bff2b9SYuval Mintz 
11537bff2b9SYuval Mintz 	return vf;
11637bff2b9SYuval Mintz }
11737bff2b9SYuval Mintz 
11832a47e72SYuval Mintz static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
11932a47e72SYuval Mintz {
12032a47e72SYuval Mintz 	struct qed_hw_sriov_info *iov = cdev->p_iov_info;
12132a47e72SYuval Mintz 	int pos = iov->pos;
12232a47e72SYuval Mintz 
12332a47e72SYuval Mintz 	DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
12432a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
12532a47e72SYuval Mintz 
12632a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
12732a47e72SYuval Mintz 			     pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
12832a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
12932a47e72SYuval Mintz 			     pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
13032a47e72SYuval Mintz 
13132a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
13232a47e72SYuval Mintz 	if (iov->num_vfs) {
13332a47e72SYuval Mintz 		DP_VERBOSE(cdev,
13432a47e72SYuval Mintz 			   QED_MSG_IOV,
13532a47e72SYuval Mintz 			   "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
13632a47e72SYuval Mintz 		iov->num_vfs = 0;
13732a47e72SYuval Mintz 	}
13832a47e72SYuval Mintz 
13932a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
14032a47e72SYuval Mintz 			     pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
14132a47e72SYuval Mintz 
14232a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
14332a47e72SYuval Mintz 			     pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
14432a47e72SYuval Mintz 
14532a47e72SYuval Mintz 	pci_read_config_word(cdev->pdev,
14632a47e72SYuval Mintz 			     pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
14732a47e72SYuval Mintz 
14832a47e72SYuval Mintz 	pci_read_config_dword(cdev->pdev,
14932a47e72SYuval Mintz 			      pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
15032a47e72SYuval Mintz 
15132a47e72SYuval Mintz 	pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
15232a47e72SYuval Mintz 
15332a47e72SYuval Mintz 	pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
15432a47e72SYuval Mintz 
15532a47e72SYuval Mintz 	DP_VERBOSE(cdev,
15632a47e72SYuval Mintz 		   QED_MSG_IOV,
15732a47e72SYuval Mintz 		   "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
15832a47e72SYuval Mintz 		   iov->nres,
15932a47e72SYuval Mintz 		   iov->cap,
16032a47e72SYuval Mintz 		   iov->ctrl,
16132a47e72SYuval Mintz 		   iov->total_vfs,
16232a47e72SYuval Mintz 		   iov->initial_vfs,
16332a47e72SYuval Mintz 		   iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
16432a47e72SYuval Mintz 
16532a47e72SYuval Mintz 	/* Some sanity checks */
16632a47e72SYuval Mintz 	if (iov->num_vfs > NUM_OF_VFS(cdev) ||
16732a47e72SYuval Mintz 	    iov->total_vfs > NUM_OF_VFS(cdev)) {
16832a47e72SYuval Mintz 		/* This can happen only due to a bug. In this case we set
16932a47e72SYuval Mintz 		 * num_vfs to zero to avoid memory corruption in the code that
17032a47e72SYuval Mintz 		 * assumes max number of vfs
17132a47e72SYuval Mintz 		 */
17232a47e72SYuval Mintz 		DP_NOTICE(cdev,
17332a47e72SYuval Mintz 			  "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
17432a47e72SYuval Mintz 			  iov->num_vfs);
17532a47e72SYuval Mintz 
17632a47e72SYuval Mintz 		iov->num_vfs = 0;
17732a47e72SYuval Mintz 		iov->total_vfs = 0;
17832a47e72SYuval Mintz 	}
17932a47e72SYuval Mintz 
18032a47e72SYuval Mintz 	return 0;
18132a47e72SYuval Mintz }
18232a47e72SYuval Mintz 
18332a47e72SYuval Mintz static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
18432a47e72SYuval Mintz 					struct qed_ptt *p_ptt)
18532a47e72SYuval Mintz {
18632a47e72SYuval Mintz 	struct qed_igu_block *p_sb;
18732a47e72SYuval Mintz 	u16 sb_id;
18832a47e72SYuval Mintz 	u32 val;
18932a47e72SYuval Mintz 
19032a47e72SYuval Mintz 	if (!p_hwfn->hw_info.p_igu_info) {
19132a47e72SYuval Mintz 		DP_ERR(p_hwfn,
19232a47e72SYuval Mintz 		       "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
19332a47e72SYuval Mintz 		return;
19432a47e72SYuval Mintz 	}
19532a47e72SYuval Mintz 
19632a47e72SYuval Mintz 	for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
19732a47e72SYuval Mintz 	     sb_id++) {
19832a47e72SYuval Mintz 		p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
19932a47e72SYuval Mintz 		if ((p_sb->status & QED_IGU_STATUS_FREE) &&
20032a47e72SYuval Mintz 		    !(p_sb->status & QED_IGU_STATUS_PF)) {
20132a47e72SYuval Mintz 			val = qed_rd(p_hwfn, p_ptt,
20232a47e72SYuval Mintz 				     IGU_REG_MAPPING_MEMORY + sb_id * 4);
20332a47e72SYuval Mintz 			SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
20432a47e72SYuval Mintz 			qed_wr(p_hwfn, p_ptt,
20532a47e72SYuval Mintz 			       IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
20632a47e72SYuval Mintz 		}
20732a47e72SYuval Mintz 	}
20832a47e72SYuval Mintz }
20932a47e72SYuval Mintz 
21032a47e72SYuval Mintz static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
21132a47e72SYuval Mintz {
21232a47e72SYuval Mintz 	struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
21332a47e72SYuval Mintz 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
21432a47e72SYuval Mintz 	struct qed_bulletin_content *p_bulletin_virt;
21532a47e72SYuval Mintz 	dma_addr_t req_p, rply_p, bulletin_p;
21632a47e72SYuval Mintz 	union pfvf_tlvs *p_reply_virt_addr;
21732a47e72SYuval Mintz 	union vfpf_tlvs *p_req_virt_addr;
21832a47e72SYuval Mintz 	u8 idx = 0;
21932a47e72SYuval Mintz 
22032a47e72SYuval Mintz 	memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
22132a47e72SYuval Mintz 
22232a47e72SYuval Mintz 	p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
22332a47e72SYuval Mintz 	req_p = p_iov_info->mbx_msg_phys_addr;
22432a47e72SYuval Mintz 	p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
22532a47e72SYuval Mintz 	rply_p = p_iov_info->mbx_reply_phys_addr;
22632a47e72SYuval Mintz 	p_bulletin_virt = p_iov_info->p_bulletins;
22732a47e72SYuval Mintz 	bulletin_p = p_iov_info->bulletins_phys;
22832a47e72SYuval Mintz 	if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
22932a47e72SYuval Mintz 		DP_ERR(p_hwfn,
23032a47e72SYuval Mintz 		       "qed_iov_setup_vfdb called without allocating mem first\n");
23132a47e72SYuval Mintz 		return;
23232a47e72SYuval Mintz 	}
23332a47e72SYuval Mintz 
23432a47e72SYuval Mintz 	for (idx = 0; idx < p_iov->total_vfs; idx++) {
23532a47e72SYuval Mintz 		struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
23632a47e72SYuval Mintz 		u32 concrete;
23732a47e72SYuval Mintz 
23832a47e72SYuval Mintz 		vf->vf_mbx.req_virt = p_req_virt_addr + idx;
23932a47e72SYuval Mintz 		vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
24032a47e72SYuval Mintz 		vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
24132a47e72SYuval Mintz 		vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
24232a47e72SYuval Mintz 
24332a47e72SYuval Mintz 		vf->state = VF_STOPPED;
24432a47e72SYuval Mintz 		vf->b_init = false;
24532a47e72SYuval Mintz 
24632a47e72SYuval Mintz 		vf->bulletin.phys = idx *
24732a47e72SYuval Mintz 				    sizeof(struct qed_bulletin_content) +
24832a47e72SYuval Mintz 				    bulletin_p;
24932a47e72SYuval Mintz 		vf->bulletin.p_virt = p_bulletin_virt + idx;
25032a47e72SYuval Mintz 		vf->bulletin.size = sizeof(struct qed_bulletin_content);
25132a47e72SYuval Mintz 
25232a47e72SYuval Mintz 		vf->relative_vf_id = idx;
25332a47e72SYuval Mintz 		vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
25432a47e72SYuval Mintz 		concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
25532a47e72SYuval Mintz 		vf->concrete_fid = concrete;
25632a47e72SYuval Mintz 		vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
25732a47e72SYuval Mintz 				 (vf->abs_vf_id << 8);
25832a47e72SYuval Mintz 		vf->vport_id = idx + 1;
25932a47e72SYuval Mintz 	}
26032a47e72SYuval Mintz }
26132a47e72SYuval Mintz 
26232a47e72SYuval Mintz static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
26332a47e72SYuval Mintz {
26432a47e72SYuval Mintz 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
26532a47e72SYuval Mintz 	void **p_v_addr;
26632a47e72SYuval Mintz 	u16 num_vfs = 0;
26732a47e72SYuval Mintz 
26832a47e72SYuval Mintz 	num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
26932a47e72SYuval Mintz 
27032a47e72SYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
27132a47e72SYuval Mintz 		   "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
27232a47e72SYuval Mintz 
27332a47e72SYuval Mintz 	/* Allocate PF Mailbox buffer (per-VF) */
27432a47e72SYuval Mintz 	p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
27532a47e72SYuval Mintz 	p_v_addr = &p_iov_info->mbx_msg_virt_addr;
27632a47e72SYuval Mintz 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
27732a47e72SYuval Mintz 				       p_iov_info->mbx_msg_size,
27832a47e72SYuval Mintz 				       &p_iov_info->mbx_msg_phys_addr,
27932a47e72SYuval Mintz 				       GFP_KERNEL);
28032a47e72SYuval Mintz 	if (!*p_v_addr)
28132a47e72SYuval Mintz 		return -ENOMEM;
28232a47e72SYuval Mintz 
28332a47e72SYuval Mintz 	/* Allocate PF Mailbox Reply buffer (per-VF) */
28432a47e72SYuval Mintz 	p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
28532a47e72SYuval Mintz 	p_v_addr = &p_iov_info->mbx_reply_virt_addr;
28632a47e72SYuval Mintz 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
28732a47e72SYuval Mintz 				       p_iov_info->mbx_reply_size,
28832a47e72SYuval Mintz 				       &p_iov_info->mbx_reply_phys_addr,
28932a47e72SYuval Mintz 				       GFP_KERNEL);
29032a47e72SYuval Mintz 	if (!*p_v_addr)
29132a47e72SYuval Mintz 		return -ENOMEM;
29232a47e72SYuval Mintz 
29332a47e72SYuval Mintz 	p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
29432a47e72SYuval Mintz 				     num_vfs;
29532a47e72SYuval Mintz 	p_v_addr = &p_iov_info->p_bulletins;
29632a47e72SYuval Mintz 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
29732a47e72SYuval Mintz 				       p_iov_info->bulletins_size,
29832a47e72SYuval Mintz 				       &p_iov_info->bulletins_phys,
29932a47e72SYuval Mintz 				       GFP_KERNEL);
30032a47e72SYuval Mintz 	if (!*p_v_addr)
30132a47e72SYuval Mintz 		return -ENOMEM;
30232a47e72SYuval Mintz 
30332a47e72SYuval Mintz 	DP_VERBOSE(p_hwfn,
30432a47e72SYuval Mintz 		   QED_MSG_IOV,
30532a47e72SYuval Mintz 		   "PF's Requests mailbox [%p virt 0x%llx phys],  Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
30632a47e72SYuval Mintz 		   p_iov_info->mbx_msg_virt_addr,
30732a47e72SYuval Mintz 		   (u64) p_iov_info->mbx_msg_phys_addr,
30832a47e72SYuval Mintz 		   p_iov_info->mbx_reply_virt_addr,
30932a47e72SYuval Mintz 		   (u64) p_iov_info->mbx_reply_phys_addr,
31032a47e72SYuval Mintz 		   p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
31132a47e72SYuval Mintz 
31232a47e72SYuval Mintz 	return 0;
31332a47e72SYuval Mintz }
31432a47e72SYuval Mintz 
31532a47e72SYuval Mintz static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
31632a47e72SYuval Mintz {
31732a47e72SYuval Mintz 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
31832a47e72SYuval Mintz 
31932a47e72SYuval Mintz 	if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
32032a47e72SYuval Mintz 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
32132a47e72SYuval Mintz 				  p_iov_info->mbx_msg_size,
32232a47e72SYuval Mintz 				  p_iov_info->mbx_msg_virt_addr,
32332a47e72SYuval Mintz 				  p_iov_info->mbx_msg_phys_addr);
32432a47e72SYuval Mintz 
32532a47e72SYuval Mintz 	if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
32632a47e72SYuval Mintz 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
32732a47e72SYuval Mintz 				  p_iov_info->mbx_reply_size,
32832a47e72SYuval Mintz 				  p_iov_info->mbx_reply_virt_addr,
32932a47e72SYuval Mintz 				  p_iov_info->mbx_reply_phys_addr);
33032a47e72SYuval Mintz 
33132a47e72SYuval Mintz 	if (p_iov_info->p_bulletins)
33232a47e72SYuval Mintz 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
33332a47e72SYuval Mintz 				  p_iov_info->bulletins_size,
33432a47e72SYuval Mintz 				  p_iov_info->p_bulletins,
33532a47e72SYuval Mintz 				  p_iov_info->bulletins_phys);
33632a47e72SYuval Mintz }
33732a47e72SYuval Mintz 
33832a47e72SYuval Mintz int qed_iov_alloc(struct qed_hwfn *p_hwfn)
33932a47e72SYuval Mintz {
34032a47e72SYuval Mintz 	struct qed_pf_iov *p_sriov;
34132a47e72SYuval Mintz 
34232a47e72SYuval Mintz 	if (!IS_PF_SRIOV(p_hwfn)) {
34332a47e72SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
34432a47e72SYuval Mintz 			   "No SR-IOV - no need for IOV db\n");
34532a47e72SYuval Mintz 		return 0;
34632a47e72SYuval Mintz 	}
34732a47e72SYuval Mintz 
34832a47e72SYuval Mintz 	p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
34932a47e72SYuval Mintz 	if (!p_sriov) {
35032a47e72SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
35132a47e72SYuval Mintz 		return -ENOMEM;
35232a47e72SYuval Mintz 	}
35332a47e72SYuval Mintz 
35432a47e72SYuval Mintz 	p_hwfn->pf_iov_info = p_sriov;
35532a47e72SYuval Mintz 
35632a47e72SYuval Mintz 	return qed_iov_allocate_vfdb(p_hwfn);
35732a47e72SYuval Mintz }
35832a47e72SYuval Mintz 
35932a47e72SYuval Mintz void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
36032a47e72SYuval Mintz {
36132a47e72SYuval Mintz 	if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
36232a47e72SYuval Mintz 		return;
36332a47e72SYuval Mintz 
36432a47e72SYuval Mintz 	qed_iov_setup_vfdb(p_hwfn);
36532a47e72SYuval Mintz 	qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
36632a47e72SYuval Mintz }
36732a47e72SYuval Mintz 
36832a47e72SYuval Mintz void qed_iov_free(struct qed_hwfn *p_hwfn)
36932a47e72SYuval Mintz {
37032a47e72SYuval Mintz 	if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
37132a47e72SYuval Mintz 		qed_iov_free_vfdb(p_hwfn);
37232a47e72SYuval Mintz 		kfree(p_hwfn->pf_iov_info);
37332a47e72SYuval Mintz 	}
37432a47e72SYuval Mintz }
37532a47e72SYuval Mintz 
37632a47e72SYuval Mintz void qed_iov_free_hw_info(struct qed_dev *cdev)
37732a47e72SYuval Mintz {
37832a47e72SYuval Mintz 	kfree(cdev->p_iov_info);
37932a47e72SYuval Mintz 	cdev->p_iov_info = NULL;
38032a47e72SYuval Mintz }
38132a47e72SYuval Mintz 
38232a47e72SYuval Mintz int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
38332a47e72SYuval Mintz {
38432a47e72SYuval Mintz 	struct qed_dev *cdev = p_hwfn->cdev;
38532a47e72SYuval Mintz 	int pos;
38632a47e72SYuval Mintz 	int rc;
38732a47e72SYuval Mintz 
3881408cc1fSYuval Mintz 	if (IS_VF(p_hwfn->cdev))
3891408cc1fSYuval Mintz 		return 0;
3901408cc1fSYuval Mintz 
39132a47e72SYuval Mintz 	/* Learn the PCI configuration */
39232a47e72SYuval Mintz 	pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
39332a47e72SYuval Mintz 				      PCI_EXT_CAP_ID_SRIOV);
39432a47e72SYuval Mintz 	if (!pos) {
39532a47e72SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
39632a47e72SYuval Mintz 		return 0;
39732a47e72SYuval Mintz 	}
39832a47e72SYuval Mintz 
39932a47e72SYuval Mintz 	/* Allocate a new struct for IOV information */
40032a47e72SYuval Mintz 	cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
40132a47e72SYuval Mintz 	if (!cdev->p_iov_info) {
40232a47e72SYuval Mintz 		DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
40332a47e72SYuval Mintz 		return -ENOMEM;
40432a47e72SYuval Mintz 	}
40532a47e72SYuval Mintz 	cdev->p_iov_info->pos = pos;
40632a47e72SYuval Mintz 
40732a47e72SYuval Mintz 	rc = qed_iov_pci_cfg_info(cdev);
40832a47e72SYuval Mintz 	if (rc)
40932a47e72SYuval Mintz 		return rc;
41032a47e72SYuval Mintz 
41132a47e72SYuval Mintz 	/* We want PF IOV to be synonemous with the existance of p_iov_info;
41232a47e72SYuval Mintz 	 * In case the capability is published but there are no VFs, simply
41332a47e72SYuval Mintz 	 * de-allocate the struct.
41432a47e72SYuval Mintz 	 */
41532a47e72SYuval Mintz 	if (!cdev->p_iov_info->total_vfs) {
41632a47e72SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
41732a47e72SYuval Mintz 			   "IOV capabilities, but no VFs are published\n");
41832a47e72SYuval Mintz 		kfree(cdev->p_iov_info);
41932a47e72SYuval Mintz 		cdev->p_iov_info = NULL;
42032a47e72SYuval Mintz 		return 0;
42132a47e72SYuval Mintz 	}
42232a47e72SYuval Mintz 
42332a47e72SYuval Mintz 	/* Calculate the first VF index - this is a bit tricky; Basically,
42432a47e72SYuval Mintz 	 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
42532a47e72SYuval Mintz 	 * after the first engine's VFs.
42632a47e72SYuval Mintz 	 */
42732a47e72SYuval Mintz 	cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
42832a47e72SYuval Mintz 					   p_hwfn->abs_pf_id - 16;
42932a47e72SYuval Mintz 	if (QED_PATH_ID(p_hwfn))
43032a47e72SYuval Mintz 		cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
43132a47e72SYuval Mintz 
43232a47e72SYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
43332a47e72SYuval Mintz 		   "First VF in hwfn 0x%08x\n",
43432a47e72SYuval Mintz 		   cdev->p_iov_info->first_vf_in_pf);
43532a47e72SYuval Mintz 
43632a47e72SYuval Mintz 	return 0;
43732a47e72SYuval Mintz }
43832a47e72SYuval Mintz 
43937bff2b9SYuval Mintz static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
44037bff2b9SYuval Mintz {
44137bff2b9SYuval Mintz 	/* Check PF supports sriov */
44237bff2b9SYuval Mintz 	if (!IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn))
44337bff2b9SYuval Mintz 		return false;
44437bff2b9SYuval Mintz 
44537bff2b9SYuval Mintz 	/* Check VF validity */
4461408cc1fSYuval Mintz 	if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
4471408cc1fSYuval Mintz 	    !IS_PF_SRIOV_ALLOC(p_hwfn))
44837bff2b9SYuval Mintz 		return false;
44937bff2b9SYuval Mintz 
45037bff2b9SYuval Mintz 	return true;
45137bff2b9SYuval Mintz }
45237bff2b9SYuval Mintz 
4530b55e27dSYuval Mintz static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
4540b55e27dSYuval Mintz 				      u16 rel_vf_id, u8 to_disable)
4550b55e27dSYuval Mintz {
4560b55e27dSYuval Mintz 	struct qed_vf_info *vf;
4570b55e27dSYuval Mintz 	int i;
4580b55e27dSYuval Mintz 
4590b55e27dSYuval Mintz 	for_each_hwfn(cdev, i) {
4600b55e27dSYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4610b55e27dSYuval Mintz 
4620b55e27dSYuval Mintz 		vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
4630b55e27dSYuval Mintz 		if (!vf)
4640b55e27dSYuval Mintz 			continue;
4650b55e27dSYuval Mintz 
4660b55e27dSYuval Mintz 		vf->to_disable = to_disable;
4670b55e27dSYuval Mintz 	}
4680b55e27dSYuval Mintz }
4690b55e27dSYuval Mintz 
4700b55e27dSYuval Mintz void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
4710b55e27dSYuval Mintz {
4720b55e27dSYuval Mintz 	u16 i;
4730b55e27dSYuval Mintz 
4740b55e27dSYuval Mintz 	if (!IS_QED_SRIOV(cdev))
4750b55e27dSYuval Mintz 		return;
4760b55e27dSYuval Mintz 
4770b55e27dSYuval Mintz 	for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
4780b55e27dSYuval Mintz 		qed_iov_set_vf_to_disable(cdev, i, to_disable);
4790b55e27dSYuval Mintz }
4800b55e27dSYuval Mintz 
4811408cc1fSYuval Mintz static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
4821408cc1fSYuval Mintz 				       struct qed_ptt *p_ptt, u8 abs_vfid)
4831408cc1fSYuval Mintz {
4841408cc1fSYuval Mintz 	qed_wr(p_hwfn, p_ptt,
4851408cc1fSYuval Mintz 	       PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
4861408cc1fSYuval Mintz 	       1 << (abs_vfid & 0x1f));
4871408cc1fSYuval Mintz }
4881408cc1fSYuval Mintz 
4890b55e27dSYuval Mintz static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
4900b55e27dSYuval Mintz 				   struct qed_ptt *p_ptt,
4910b55e27dSYuval Mintz 				   struct qed_vf_info *vf, bool enable)
4920b55e27dSYuval Mintz {
4930b55e27dSYuval Mintz 	u32 igu_vf_conf;
4940b55e27dSYuval Mintz 
4950b55e27dSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
4960b55e27dSYuval Mintz 
4970b55e27dSYuval Mintz 	igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
4980b55e27dSYuval Mintz 
4990b55e27dSYuval Mintz 	if (enable)
5000b55e27dSYuval Mintz 		igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
5010b55e27dSYuval Mintz 	else
5020b55e27dSYuval Mintz 		igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
5030b55e27dSYuval Mintz 
5040b55e27dSYuval Mintz 	qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
5050b55e27dSYuval Mintz 
5060b55e27dSYuval Mintz 	/* unpretend */
5070b55e27dSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
5080b55e27dSYuval Mintz }
5090b55e27dSYuval Mintz 
5101408cc1fSYuval Mintz static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
5111408cc1fSYuval Mintz 				    struct qed_ptt *p_ptt,
5121408cc1fSYuval Mintz 				    struct qed_vf_info *vf)
5131408cc1fSYuval Mintz {
5141408cc1fSYuval Mintz 	u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
5151408cc1fSYuval Mintz 	int rc;
5161408cc1fSYuval Mintz 
5170b55e27dSYuval Mintz 	if (vf->to_disable)
5180b55e27dSYuval Mintz 		return 0;
5190b55e27dSYuval Mintz 
5201408cc1fSYuval Mintz 	DP_VERBOSE(p_hwfn,
5211408cc1fSYuval Mintz 		   QED_MSG_IOV,
5221408cc1fSYuval Mintz 		   "Enable internal access for vf %x [abs %x]\n",
5231408cc1fSYuval Mintz 		   vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
5241408cc1fSYuval Mintz 
5251408cc1fSYuval Mintz 	qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
5261408cc1fSYuval Mintz 
5271408cc1fSYuval Mintz 	rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
5281408cc1fSYuval Mintz 	if (rc)
5291408cc1fSYuval Mintz 		return rc;
5301408cc1fSYuval Mintz 
5311408cc1fSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
5321408cc1fSYuval Mintz 
5331408cc1fSYuval Mintz 	SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
5341408cc1fSYuval Mintz 	STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
5351408cc1fSYuval Mintz 
5361408cc1fSYuval Mintz 	qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
5371408cc1fSYuval Mintz 		     p_hwfn->hw_info.hw_mode);
5381408cc1fSYuval Mintz 
5391408cc1fSYuval Mintz 	/* unpretend */
5401408cc1fSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
5411408cc1fSYuval Mintz 
5421408cc1fSYuval Mintz 	if (vf->state != VF_STOPPED) {
5431408cc1fSYuval Mintz 		DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
5441408cc1fSYuval Mintz 			  vf->abs_vf_id);
5451408cc1fSYuval Mintz 		return -EINVAL;
5461408cc1fSYuval Mintz 	}
5471408cc1fSYuval Mintz 
5481408cc1fSYuval Mintz 	/* Start VF */
5491408cc1fSYuval Mintz 	rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
5501408cc1fSYuval Mintz 	if (rc)
5511408cc1fSYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
5521408cc1fSYuval Mintz 
5531408cc1fSYuval Mintz 	vf->state = VF_FREE;
5541408cc1fSYuval Mintz 
5551408cc1fSYuval Mintz 	return rc;
5561408cc1fSYuval Mintz }
5571408cc1fSYuval Mintz 
5580b55e27dSYuval Mintz /**
5590b55e27dSYuval Mintz  * @brief qed_iov_config_perm_table - configure the permission
5600b55e27dSYuval Mintz  *      zone table.
5610b55e27dSYuval Mintz  *      In E4, queue zone permission table size is 320x9. There
5620b55e27dSYuval Mintz  *      are 320 VF queues for single engine device (256 for dual
5630b55e27dSYuval Mintz  *      engine device), and each entry has the following format:
5640b55e27dSYuval Mintz  *      {Valid, VF[7:0]}
5650b55e27dSYuval Mintz  * @param p_hwfn
5660b55e27dSYuval Mintz  * @param p_ptt
5670b55e27dSYuval Mintz  * @param vf
5680b55e27dSYuval Mintz  * @param enable
5690b55e27dSYuval Mintz  */
5700b55e27dSYuval Mintz static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
5710b55e27dSYuval Mintz 				      struct qed_ptt *p_ptt,
5720b55e27dSYuval Mintz 				      struct qed_vf_info *vf, u8 enable)
5730b55e27dSYuval Mintz {
5740b55e27dSYuval Mintz 	u32 reg_addr, val;
5750b55e27dSYuval Mintz 	u16 qzone_id = 0;
5760b55e27dSYuval Mintz 	int qid;
5770b55e27dSYuval Mintz 
5780b55e27dSYuval Mintz 	for (qid = 0; qid < vf->num_rxqs; qid++) {
5790b55e27dSYuval Mintz 		qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
5800b55e27dSYuval Mintz 				&qzone_id);
5810b55e27dSYuval Mintz 
5820b55e27dSYuval Mintz 		reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
5830b55e27dSYuval Mintz 		val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
5840b55e27dSYuval Mintz 		qed_wr(p_hwfn, p_ptt, reg_addr, val);
5850b55e27dSYuval Mintz 	}
5860b55e27dSYuval Mintz }
5870b55e27dSYuval Mintz 
5881408cc1fSYuval Mintz static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
5891408cc1fSYuval Mintz 				   struct qed_ptt *p_ptt,
5901408cc1fSYuval Mintz 				   struct qed_vf_info *vf, u16 num_rx_queues)
5911408cc1fSYuval Mintz {
5921408cc1fSYuval Mintz 	struct qed_igu_block *igu_blocks;
5931408cc1fSYuval Mintz 	int qid = 0, igu_id = 0;
5941408cc1fSYuval Mintz 	u32 val = 0;
5951408cc1fSYuval Mintz 
5961408cc1fSYuval Mintz 	igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
5971408cc1fSYuval Mintz 
5981408cc1fSYuval Mintz 	if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
5991408cc1fSYuval Mintz 		num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
6001408cc1fSYuval Mintz 	p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
6011408cc1fSYuval Mintz 
6021408cc1fSYuval Mintz 	SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
6031408cc1fSYuval Mintz 	SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
6041408cc1fSYuval Mintz 	SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
6051408cc1fSYuval Mintz 
6061408cc1fSYuval Mintz 	while ((qid < num_rx_queues) &&
6071408cc1fSYuval Mintz 	       (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
6081408cc1fSYuval Mintz 		if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
6091408cc1fSYuval Mintz 			struct cau_sb_entry sb_entry;
6101408cc1fSYuval Mintz 
6111408cc1fSYuval Mintz 			vf->igu_sbs[qid] = (u16)igu_id;
6121408cc1fSYuval Mintz 			igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
6131408cc1fSYuval Mintz 
6141408cc1fSYuval Mintz 			SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
6151408cc1fSYuval Mintz 
6161408cc1fSYuval Mintz 			qed_wr(p_hwfn, p_ptt,
6171408cc1fSYuval Mintz 			       IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
6181408cc1fSYuval Mintz 			       val);
6191408cc1fSYuval Mintz 
6201408cc1fSYuval Mintz 			/* Configure igu sb in CAU which were marked valid */
6211408cc1fSYuval Mintz 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
6221408cc1fSYuval Mintz 					      p_hwfn->rel_pf_id,
6231408cc1fSYuval Mintz 					      vf->abs_vf_id, 1);
6241408cc1fSYuval Mintz 			qed_dmae_host2grc(p_hwfn, p_ptt,
6251408cc1fSYuval Mintz 					  (u64)(uintptr_t)&sb_entry,
6261408cc1fSYuval Mintz 					  CAU_REG_SB_VAR_MEMORY +
6271408cc1fSYuval Mintz 					  igu_id * sizeof(u64), 2, 0);
6281408cc1fSYuval Mintz 			qid++;
6291408cc1fSYuval Mintz 		}
6301408cc1fSYuval Mintz 		igu_id++;
6311408cc1fSYuval Mintz 	}
6321408cc1fSYuval Mintz 
6331408cc1fSYuval Mintz 	vf->num_sbs = (u8) num_rx_queues;
6341408cc1fSYuval Mintz 
6351408cc1fSYuval Mintz 	return vf->num_sbs;
6361408cc1fSYuval Mintz }
6371408cc1fSYuval Mintz 
6380b55e27dSYuval Mintz static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
6390b55e27dSYuval Mintz 				    struct qed_ptt *p_ptt,
6400b55e27dSYuval Mintz 				    struct qed_vf_info *vf)
6410b55e27dSYuval Mintz {
6420b55e27dSYuval Mintz 	struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
6430b55e27dSYuval Mintz 	int idx, igu_id;
6440b55e27dSYuval Mintz 	u32 addr, val;
6450b55e27dSYuval Mintz 
6460b55e27dSYuval Mintz 	/* Invalidate igu CAM lines and mark them as free */
6470b55e27dSYuval Mintz 	for (idx = 0; idx < vf->num_sbs; idx++) {
6480b55e27dSYuval Mintz 		igu_id = vf->igu_sbs[idx];
6490b55e27dSYuval Mintz 		addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
6500b55e27dSYuval Mintz 
6510b55e27dSYuval Mintz 		val = qed_rd(p_hwfn, p_ptt, addr);
6520b55e27dSYuval Mintz 		SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
6530b55e27dSYuval Mintz 		qed_wr(p_hwfn, p_ptt, addr, val);
6540b55e27dSYuval Mintz 
6550b55e27dSYuval Mintz 		p_info->igu_map.igu_blocks[igu_id].status |=
6560b55e27dSYuval Mintz 		    QED_IGU_STATUS_FREE;
6570b55e27dSYuval Mintz 
6580b55e27dSYuval Mintz 		p_hwfn->hw_info.p_igu_info->free_blks++;
6590b55e27dSYuval Mintz 	}
6600b55e27dSYuval Mintz 
6610b55e27dSYuval Mintz 	vf->num_sbs = 0;
6620b55e27dSYuval Mintz }
6630b55e27dSYuval Mintz 
6641408cc1fSYuval Mintz static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
6651408cc1fSYuval Mintz 				  struct qed_ptt *p_ptt,
6661408cc1fSYuval Mintz 				  u16 rel_vf_id, u16 num_rx_queues)
6671408cc1fSYuval Mintz {
6681408cc1fSYuval Mintz 	u8 num_of_vf_avaiable_chains = 0;
6691408cc1fSYuval Mintz 	struct qed_vf_info *vf = NULL;
6701408cc1fSYuval Mintz 	int rc = 0;
6711408cc1fSYuval Mintz 	u32 cids;
6721408cc1fSYuval Mintz 	u8 i;
6731408cc1fSYuval Mintz 
6741408cc1fSYuval Mintz 	vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
6751408cc1fSYuval Mintz 	if (!vf) {
6761408cc1fSYuval Mintz 		DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
6771408cc1fSYuval Mintz 		return -EINVAL;
6781408cc1fSYuval Mintz 	}
6791408cc1fSYuval Mintz 
6801408cc1fSYuval Mintz 	if (vf->b_init) {
6811408cc1fSYuval Mintz 		DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
6821408cc1fSYuval Mintz 		return -EINVAL;
6831408cc1fSYuval Mintz 	}
6841408cc1fSYuval Mintz 
6851408cc1fSYuval Mintz 	/* Limit number of queues according to number of CIDs */
6861408cc1fSYuval Mintz 	qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
6871408cc1fSYuval Mintz 	DP_VERBOSE(p_hwfn,
6881408cc1fSYuval Mintz 		   QED_MSG_IOV,
6891408cc1fSYuval Mintz 		   "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
6901408cc1fSYuval Mintz 		   vf->relative_vf_id, num_rx_queues, (u16) cids);
6911408cc1fSYuval Mintz 	num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
6921408cc1fSYuval Mintz 
6931408cc1fSYuval Mintz 	num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
6941408cc1fSYuval Mintz 							     p_ptt,
6951408cc1fSYuval Mintz 							     vf,
6961408cc1fSYuval Mintz 							     num_rx_queues);
6971408cc1fSYuval Mintz 	if (!num_of_vf_avaiable_chains) {
6981408cc1fSYuval Mintz 		DP_ERR(p_hwfn, "no available igu sbs\n");
6991408cc1fSYuval Mintz 		return -ENOMEM;
7001408cc1fSYuval Mintz 	}
7011408cc1fSYuval Mintz 
7021408cc1fSYuval Mintz 	/* Choose queue number and index ranges */
7031408cc1fSYuval Mintz 	vf->num_rxqs = num_of_vf_avaiable_chains;
7041408cc1fSYuval Mintz 	vf->num_txqs = num_of_vf_avaiable_chains;
7051408cc1fSYuval Mintz 
7061408cc1fSYuval Mintz 	for (i = 0; i < vf->num_rxqs; i++) {
7071408cc1fSYuval Mintz 		u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
7081408cc1fSYuval Mintz 							   vf->igu_sbs[i]);
7091408cc1fSYuval Mintz 
7101408cc1fSYuval Mintz 		if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
7111408cc1fSYuval Mintz 			DP_NOTICE(p_hwfn,
7121408cc1fSYuval Mintz 				  "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
7131408cc1fSYuval Mintz 				  vf->relative_vf_id, queue_id);
7141408cc1fSYuval Mintz 			return -EINVAL;
7151408cc1fSYuval Mintz 		}
7161408cc1fSYuval Mintz 
7171408cc1fSYuval Mintz 		/* CIDs are per-VF, so no problem having them 0-based. */
7181408cc1fSYuval Mintz 		vf->vf_queues[i].fw_rx_qid = queue_id;
7191408cc1fSYuval Mintz 		vf->vf_queues[i].fw_tx_qid = queue_id;
7201408cc1fSYuval Mintz 		vf->vf_queues[i].fw_cid = i;
7211408cc1fSYuval Mintz 
7221408cc1fSYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
7231408cc1fSYuval Mintz 			   "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
7241408cc1fSYuval Mintz 			   vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
7251408cc1fSYuval Mintz 	}
7261408cc1fSYuval Mintz 	rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
7271408cc1fSYuval Mintz 	if (!rc) {
7281408cc1fSYuval Mintz 		vf->b_init = true;
7291408cc1fSYuval Mintz 
7301408cc1fSYuval Mintz 		if (IS_LEAD_HWFN(p_hwfn))
7311408cc1fSYuval Mintz 			p_hwfn->cdev->p_iov_info->num_vfs++;
7321408cc1fSYuval Mintz 	}
7331408cc1fSYuval Mintz 
7341408cc1fSYuval Mintz 	return rc;
7351408cc1fSYuval Mintz }
7361408cc1fSYuval Mintz 
7370b55e27dSYuval Mintz static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
7380b55e27dSYuval Mintz 				     struct qed_ptt *p_ptt, u16 rel_vf_id)
7390b55e27dSYuval Mintz {
7400b55e27dSYuval Mintz 	struct qed_vf_info *vf = NULL;
7410b55e27dSYuval Mintz 	int rc = 0;
7420b55e27dSYuval Mintz 
7430b55e27dSYuval Mintz 	vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
7440b55e27dSYuval Mintz 	if (!vf) {
7450b55e27dSYuval Mintz 		DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
7460b55e27dSYuval Mintz 		return -EINVAL;
7470b55e27dSYuval Mintz 	}
7480b55e27dSYuval Mintz 
7490b55e27dSYuval Mintz 	if (vf->state != VF_STOPPED) {
7500b55e27dSYuval Mintz 		/* Stopping the VF */
7510b55e27dSYuval Mintz 		rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
7520b55e27dSYuval Mintz 
7530b55e27dSYuval Mintz 		if (rc != 0) {
7540b55e27dSYuval Mintz 			DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
7550b55e27dSYuval Mintz 			       rc);
7560b55e27dSYuval Mintz 			return rc;
7570b55e27dSYuval Mintz 		}
7580b55e27dSYuval Mintz 
7590b55e27dSYuval Mintz 		vf->state = VF_STOPPED;
7600b55e27dSYuval Mintz 	}
7610b55e27dSYuval Mintz 
7620b55e27dSYuval Mintz 	/* disablng interrupts and resetting permission table was done during
7630b55e27dSYuval Mintz 	 * vf-close, however, we could get here without going through vf_close
7640b55e27dSYuval Mintz 	 */
7650b55e27dSYuval Mintz 	/* Disable Interrupts for VF */
7660b55e27dSYuval Mintz 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
7670b55e27dSYuval Mintz 
7680b55e27dSYuval Mintz 	/* Reset Permission table */
7690b55e27dSYuval Mintz 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
7700b55e27dSYuval Mintz 
7710b55e27dSYuval Mintz 	vf->num_rxqs = 0;
7720b55e27dSYuval Mintz 	vf->num_txqs = 0;
7730b55e27dSYuval Mintz 	qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
7740b55e27dSYuval Mintz 
7750b55e27dSYuval Mintz 	if (vf->b_init) {
7760b55e27dSYuval Mintz 		vf->b_init = false;
7770b55e27dSYuval Mintz 
7780b55e27dSYuval Mintz 		if (IS_LEAD_HWFN(p_hwfn))
7790b55e27dSYuval Mintz 			p_hwfn->cdev->p_iov_info->num_vfs--;
7800b55e27dSYuval Mintz 	}
7810b55e27dSYuval Mintz 
7820b55e27dSYuval Mintz 	return 0;
7830b55e27dSYuval Mintz }
7840b55e27dSYuval Mintz 
78537bff2b9SYuval Mintz static bool qed_iov_tlv_supported(u16 tlvtype)
78637bff2b9SYuval Mintz {
78737bff2b9SYuval Mintz 	return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
78837bff2b9SYuval Mintz }
78937bff2b9SYuval Mintz 
79037bff2b9SYuval Mintz /* place a given tlv on the tlv buffer, continuing current tlv list */
79137bff2b9SYuval Mintz void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
79237bff2b9SYuval Mintz {
79337bff2b9SYuval Mintz 	struct channel_tlv *tl = (struct channel_tlv *)*offset;
79437bff2b9SYuval Mintz 
79537bff2b9SYuval Mintz 	tl->type = type;
79637bff2b9SYuval Mintz 	tl->length = length;
79737bff2b9SYuval Mintz 
79837bff2b9SYuval Mintz 	/* Offset should keep pointing to next TLV (the end of the last) */
79937bff2b9SYuval Mintz 	*offset += length;
80037bff2b9SYuval Mintz 
80137bff2b9SYuval Mintz 	/* Return a pointer to the start of the added tlv */
80237bff2b9SYuval Mintz 	return *offset - length;
80337bff2b9SYuval Mintz }
80437bff2b9SYuval Mintz 
80537bff2b9SYuval Mintz /* list the types and lengths of the tlvs on the buffer */
80637bff2b9SYuval Mintz void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
80737bff2b9SYuval Mintz {
80837bff2b9SYuval Mintz 	u16 i = 1, total_length = 0;
80937bff2b9SYuval Mintz 	struct channel_tlv *tlv;
81037bff2b9SYuval Mintz 
81137bff2b9SYuval Mintz 	do {
81237bff2b9SYuval Mintz 		tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
81337bff2b9SYuval Mintz 
81437bff2b9SYuval Mintz 		/* output tlv */
81537bff2b9SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
81637bff2b9SYuval Mintz 			   "TLV number %d: type %d, length %d\n",
81737bff2b9SYuval Mintz 			   i, tlv->type, tlv->length);
81837bff2b9SYuval Mintz 
81937bff2b9SYuval Mintz 		if (tlv->type == CHANNEL_TLV_LIST_END)
82037bff2b9SYuval Mintz 			return;
82137bff2b9SYuval Mintz 
82237bff2b9SYuval Mintz 		/* Validate entry - protect against malicious VFs */
82337bff2b9SYuval Mintz 		if (!tlv->length) {
82437bff2b9SYuval Mintz 			DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
82537bff2b9SYuval Mintz 			return;
82637bff2b9SYuval Mintz 		}
82737bff2b9SYuval Mintz 
82837bff2b9SYuval Mintz 		total_length += tlv->length;
82937bff2b9SYuval Mintz 
83037bff2b9SYuval Mintz 		if (total_length >= sizeof(struct tlv_buffer_size)) {
83137bff2b9SYuval Mintz 			DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
83237bff2b9SYuval Mintz 			return;
83337bff2b9SYuval Mintz 		}
83437bff2b9SYuval Mintz 
83537bff2b9SYuval Mintz 		i++;
83637bff2b9SYuval Mintz 	} while (1);
83737bff2b9SYuval Mintz }
83837bff2b9SYuval Mintz 
83937bff2b9SYuval Mintz static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
84037bff2b9SYuval Mintz 				  struct qed_ptt *p_ptt,
84137bff2b9SYuval Mintz 				  struct qed_vf_info *p_vf,
84237bff2b9SYuval Mintz 				  u16 length, u8 status)
84337bff2b9SYuval Mintz {
84437bff2b9SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
84537bff2b9SYuval Mintz 	struct qed_dmae_params params;
84637bff2b9SYuval Mintz 	u8 eng_vf_id;
84737bff2b9SYuval Mintz 
84837bff2b9SYuval Mintz 	mbx->reply_virt->default_resp.hdr.status = status;
84937bff2b9SYuval Mintz 
85037bff2b9SYuval Mintz 	qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
85137bff2b9SYuval Mintz 
85237bff2b9SYuval Mintz 	eng_vf_id = p_vf->abs_vf_id;
85337bff2b9SYuval Mintz 
85437bff2b9SYuval Mintz 	memset(&params, 0, sizeof(struct qed_dmae_params));
85537bff2b9SYuval Mintz 	params.flags = QED_DMAE_FLAG_VF_DST;
85637bff2b9SYuval Mintz 	params.dst_vfid = eng_vf_id;
85737bff2b9SYuval Mintz 
85837bff2b9SYuval Mintz 	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
85937bff2b9SYuval Mintz 			   mbx->req_virt->first_tlv.reply_address +
86037bff2b9SYuval Mintz 			   sizeof(u64),
86137bff2b9SYuval Mintz 			   (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
86237bff2b9SYuval Mintz 			   &params);
86337bff2b9SYuval Mintz 
86437bff2b9SYuval Mintz 	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
86537bff2b9SYuval Mintz 			   mbx->req_virt->first_tlv.reply_address,
86637bff2b9SYuval Mintz 			   sizeof(u64) / 4, &params);
86737bff2b9SYuval Mintz 
86837bff2b9SYuval Mintz 	REG_WR(p_hwfn,
86937bff2b9SYuval Mintz 	       GTT_BAR0_MAP_REG_USDM_RAM +
87037bff2b9SYuval Mintz 	       USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
87137bff2b9SYuval Mintz }
87237bff2b9SYuval Mintz 
87337bff2b9SYuval Mintz static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
87437bff2b9SYuval Mintz 				 struct qed_ptt *p_ptt,
87537bff2b9SYuval Mintz 				 struct qed_vf_info *vf_info,
87637bff2b9SYuval Mintz 				 u16 type, u16 length, u8 status)
87737bff2b9SYuval Mintz {
87837bff2b9SYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
87937bff2b9SYuval Mintz 
88037bff2b9SYuval Mintz 	mbx->offset = (u8 *)mbx->reply_virt;
88137bff2b9SYuval Mintz 
88237bff2b9SYuval Mintz 	qed_add_tlv(p_hwfn, &mbx->offset, type, length);
88337bff2b9SYuval Mintz 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
88437bff2b9SYuval Mintz 		    sizeof(struct channel_list_end_tlv));
88537bff2b9SYuval Mintz 
88637bff2b9SYuval Mintz 	qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
88737bff2b9SYuval Mintz }
88837bff2b9SYuval Mintz 
8890b55e27dSYuval Mintz struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
8900b55e27dSYuval Mintz 						      u16 relative_vf_id,
8910b55e27dSYuval Mintz 						      bool b_enabled_only)
8920b55e27dSYuval Mintz {
8930b55e27dSYuval Mintz 	struct qed_vf_info *vf = NULL;
8940b55e27dSYuval Mintz 
8950b55e27dSYuval Mintz 	vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
8960b55e27dSYuval Mintz 	if (!vf)
8970b55e27dSYuval Mintz 		return NULL;
8980b55e27dSYuval Mintz 
8990b55e27dSYuval Mintz 	return &vf->p_vf_info;
9000b55e27dSYuval Mintz }
9010b55e27dSYuval Mintz 
9020b55e27dSYuval Mintz void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
9030b55e27dSYuval Mintz {
9040b55e27dSYuval Mintz 	struct qed_public_vf_info *vf_info;
9050b55e27dSYuval Mintz 
9060b55e27dSYuval Mintz 	vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
9070b55e27dSYuval Mintz 
9080b55e27dSYuval Mintz 	if (!vf_info)
9090b55e27dSYuval Mintz 		return;
9100b55e27dSYuval Mintz 
9110b55e27dSYuval Mintz 	/* Clear the VF mac */
9120b55e27dSYuval Mintz 	memset(vf_info->mac, 0, ETH_ALEN);
9130b55e27dSYuval Mintz }
9140b55e27dSYuval Mintz 
9150b55e27dSYuval Mintz static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
9160b55e27dSYuval Mintz 			       struct qed_vf_info *p_vf)
9170b55e27dSYuval Mintz {
9180b55e27dSYuval Mintz 	u32 i;
9190b55e27dSYuval Mintz 
9200b55e27dSYuval Mintz 	p_vf->vf_bulletin = 0;
9210b55e27dSYuval Mintz 	p_vf->num_mac_filters = 0;
9220b55e27dSYuval Mintz 	p_vf->num_vlan_filters = 0;
9230b55e27dSYuval Mintz 
9240b55e27dSYuval Mintz 	/* If VF previously requested less resources, go back to default */
9250b55e27dSYuval Mintz 	p_vf->num_rxqs = p_vf->num_sbs;
9260b55e27dSYuval Mintz 	p_vf->num_txqs = p_vf->num_sbs;
9270b55e27dSYuval Mintz 
9280b55e27dSYuval Mintz 	for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
9290b55e27dSYuval Mintz 		p_vf->vf_queues[i].rxq_active = 0;
9300b55e27dSYuval Mintz 
9310b55e27dSYuval Mintz 	qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
9320b55e27dSYuval Mintz }
9330b55e27dSYuval Mintz 
9341408cc1fSYuval Mintz static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
93537bff2b9SYuval Mintz 				   struct qed_ptt *p_ptt,
9361408cc1fSYuval Mintz 				   struct qed_vf_info *vf)
93737bff2b9SYuval Mintz {
9381408cc1fSYuval Mintz 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
9391408cc1fSYuval Mintz 	struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
9401408cc1fSYuval Mintz 	struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
9411408cc1fSYuval Mintz 	struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
9421408cc1fSYuval Mintz 	u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
9431408cc1fSYuval Mintz 	struct pf_vf_resc *resc = &resp->resc;
9441408cc1fSYuval Mintz 
9451408cc1fSYuval Mintz 	/* Validate FW compatibility */
9461408cc1fSYuval Mintz 	if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
9471408cc1fSYuval Mintz 	    req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
9481408cc1fSYuval Mintz 	    req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
9491408cc1fSYuval Mintz 	    req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
9501408cc1fSYuval Mintz 		DP_INFO(p_hwfn,
9511408cc1fSYuval Mintz 			"VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
9521408cc1fSYuval Mintz 			vf->abs_vf_id,
9531408cc1fSYuval Mintz 			req->vfdev_info.fw_major,
9541408cc1fSYuval Mintz 			req->vfdev_info.fw_minor,
9551408cc1fSYuval Mintz 			req->vfdev_info.fw_revision,
9561408cc1fSYuval Mintz 			req->vfdev_info.fw_engineering,
9571408cc1fSYuval Mintz 			FW_MAJOR_VERSION,
9581408cc1fSYuval Mintz 			FW_MINOR_VERSION,
9591408cc1fSYuval Mintz 			FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
9601408cc1fSYuval Mintz 		vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
9611408cc1fSYuval Mintz 		goto out;
9621408cc1fSYuval Mintz 	}
9631408cc1fSYuval Mintz 
9641408cc1fSYuval Mintz 	/* On 100g PFs, prevent old VFs from loading */
9651408cc1fSYuval Mintz 	if ((p_hwfn->cdev->num_hwfns > 1) &&
9661408cc1fSYuval Mintz 	    !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
9671408cc1fSYuval Mintz 		DP_INFO(p_hwfn,
9681408cc1fSYuval Mintz 			"VF[%d] is running an old driver that doesn't support 100g\n",
9691408cc1fSYuval Mintz 			vf->abs_vf_id);
9701408cc1fSYuval Mintz 		vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
9711408cc1fSYuval Mintz 		goto out;
9721408cc1fSYuval Mintz 	}
9731408cc1fSYuval Mintz 
9741408cc1fSYuval Mintz 	memset(resp, 0, sizeof(*resp));
9751408cc1fSYuval Mintz 
9761408cc1fSYuval Mintz 	/* Fill in vf info stuff */
9771408cc1fSYuval Mintz 	vf->opaque_fid = req->vfdev_info.opaque_fid;
9781408cc1fSYuval Mintz 	vf->num_mac_filters = 1;
9791408cc1fSYuval Mintz 	vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
9801408cc1fSYuval Mintz 
9811408cc1fSYuval Mintz 	vf->vf_bulletin = req->bulletin_addr;
9821408cc1fSYuval Mintz 	vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
9831408cc1fSYuval Mintz 			    vf->bulletin.size : req->bulletin_size;
9841408cc1fSYuval Mintz 
9851408cc1fSYuval Mintz 	/* fill in pfdev info */
9861408cc1fSYuval Mintz 	pfdev_info->chip_num = p_hwfn->cdev->chip_num;
9871408cc1fSYuval Mintz 	pfdev_info->db_size = 0;
9881408cc1fSYuval Mintz 	pfdev_info->indices_per_sb = PIS_PER_SB;
9891408cc1fSYuval Mintz 
9901408cc1fSYuval Mintz 	pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
9911408cc1fSYuval Mintz 				   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
9921408cc1fSYuval Mintz 	if (p_hwfn->cdev->num_hwfns > 1)
9931408cc1fSYuval Mintz 		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
9941408cc1fSYuval Mintz 
9951408cc1fSYuval Mintz 	pfdev_info->stats_info.mstats.address =
9961408cc1fSYuval Mintz 	    PXP_VF_BAR0_START_MSDM_ZONE_B +
9971408cc1fSYuval Mintz 	    offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
9981408cc1fSYuval Mintz 	pfdev_info->stats_info.mstats.len =
9991408cc1fSYuval Mintz 	    sizeof(struct eth_mstorm_per_queue_stat);
10001408cc1fSYuval Mintz 
10011408cc1fSYuval Mintz 	pfdev_info->stats_info.ustats.address =
10021408cc1fSYuval Mintz 	    PXP_VF_BAR0_START_USDM_ZONE_B +
10031408cc1fSYuval Mintz 	    offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
10041408cc1fSYuval Mintz 	pfdev_info->stats_info.ustats.len =
10051408cc1fSYuval Mintz 	    sizeof(struct eth_ustorm_per_queue_stat);
10061408cc1fSYuval Mintz 
10071408cc1fSYuval Mintz 	pfdev_info->stats_info.pstats.address =
10081408cc1fSYuval Mintz 	    PXP_VF_BAR0_START_PSDM_ZONE_B +
10091408cc1fSYuval Mintz 	    offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
10101408cc1fSYuval Mintz 	pfdev_info->stats_info.pstats.len =
10111408cc1fSYuval Mintz 	    sizeof(struct eth_pstorm_per_queue_stat);
10121408cc1fSYuval Mintz 
10131408cc1fSYuval Mintz 	pfdev_info->stats_info.tstats.address = 0;
10141408cc1fSYuval Mintz 	pfdev_info->stats_info.tstats.len = 0;
10151408cc1fSYuval Mintz 
10161408cc1fSYuval Mintz 	memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
10171408cc1fSYuval Mintz 
10181408cc1fSYuval Mintz 	pfdev_info->fw_major = FW_MAJOR_VERSION;
10191408cc1fSYuval Mintz 	pfdev_info->fw_minor = FW_MINOR_VERSION;
10201408cc1fSYuval Mintz 	pfdev_info->fw_rev = FW_REVISION_VERSION;
10211408cc1fSYuval Mintz 	pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
10221408cc1fSYuval Mintz 	pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
10231408cc1fSYuval Mintz 	qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
10241408cc1fSYuval Mintz 
10251408cc1fSYuval Mintz 	pfdev_info->dev_type = p_hwfn->cdev->type;
10261408cc1fSYuval Mintz 	pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
10271408cc1fSYuval Mintz 
10281408cc1fSYuval Mintz 	resc->num_rxqs = vf->num_rxqs;
10291408cc1fSYuval Mintz 	resc->num_txqs = vf->num_txqs;
10301408cc1fSYuval Mintz 	resc->num_sbs = vf->num_sbs;
10311408cc1fSYuval Mintz 	for (i = 0; i < resc->num_sbs; i++) {
10321408cc1fSYuval Mintz 		resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
10331408cc1fSYuval Mintz 		resc->hw_sbs[i].sb_qid = 0;
10341408cc1fSYuval Mintz 	}
10351408cc1fSYuval Mintz 
10361408cc1fSYuval Mintz 	for (i = 0; i < resc->num_rxqs; i++) {
10371408cc1fSYuval Mintz 		qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
10381408cc1fSYuval Mintz 				(u16 *)&resc->hw_qid[i]);
10391408cc1fSYuval Mintz 		resc->cid[i] = vf->vf_queues[i].fw_cid;
10401408cc1fSYuval Mintz 	}
10411408cc1fSYuval Mintz 
10421408cc1fSYuval Mintz 	resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
10431408cc1fSYuval Mintz 				      req->resc_request.num_mac_filters);
10441408cc1fSYuval Mintz 	resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
10451408cc1fSYuval Mintz 				       req->resc_request.num_vlan_filters);
10461408cc1fSYuval Mintz 
10471408cc1fSYuval Mintz 	/* This isn't really required as VF isn't limited, but some VFs might
10481408cc1fSYuval Mintz 	 * actually test this value, so need to provide it.
10491408cc1fSYuval Mintz 	 */
10501408cc1fSYuval Mintz 	resc->num_mc_filters = req->resc_request.num_mc_filters;
10511408cc1fSYuval Mintz 
10521408cc1fSYuval Mintz 	/* Fill agreed size of bulletin board in response */
10531408cc1fSYuval Mintz 	resp->bulletin_size = vf->bulletin.size;
10541408cc1fSYuval Mintz 
10551408cc1fSYuval Mintz 	DP_VERBOSE(p_hwfn,
10561408cc1fSYuval Mintz 		   QED_MSG_IOV,
10571408cc1fSYuval Mintz 		   "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
10581408cc1fSYuval Mintz 		   "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
10591408cc1fSYuval Mintz 		   vf->abs_vf_id,
10601408cc1fSYuval Mintz 		   resp->pfdev_info.chip_num,
10611408cc1fSYuval Mintz 		   resp->pfdev_info.db_size,
10621408cc1fSYuval Mintz 		   resp->pfdev_info.indices_per_sb,
10631408cc1fSYuval Mintz 		   resp->pfdev_info.capabilities,
10641408cc1fSYuval Mintz 		   resc->num_rxqs,
10651408cc1fSYuval Mintz 		   resc->num_txqs,
10661408cc1fSYuval Mintz 		   resc->num_sbs,
10671408cc1fSYuval Mintz 		   resc->num_mac_filters,
10681408cc1fSYuval Mintz 		   resc->num_vlan_filters);
10691408cc1fSYuval Mintz 	vf->state = VF_ACQUIRED;
10701408cc1fSYuval Mintz 
10711408cc1fSYuval Mintz 	/* Prepare Response */
10721408cc1fSYuval Mintz out:
10731408cc1fSYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
10741408cc1fSYuval Mintz 			     sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
107537bff2b9SYuval Mintz }
107637bff2b9SYuval Mintz 
10770b55e27dSYuval Mintz static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
10780b55e27dSYuval Mintz 				       struct qed_ptt *p_ptt,
10790b55e27dSYuval Mintz 				       struct qed_vf_info *vf)
10800b55e27dSYuval Mintz {
10810b55e27dSYuval Mintz 	int i;
10820b55e27dSYuval Mintz 
10830b55e27dSYuval Mintz 	/* Reset the SBs */
10840b55e27dSYuval Mintz 	for (i = 0; i < vf->num_sbs; i++)
10850b55e27dSYuval Mintz 		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
10860b55e27dSYuval Mintz 						vf->igu_sbs[i],
10870b55e27dSYuval Mintz 						vf->opaque_fid, false);
10880b55e27dSYuval Mintz 
10890b55e27dSYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
10900b55e27dSYuval Mintz 			     sizeof(struct pfvf_def_resp_tlv),
10910b55e27dSYuval Mintz 			     PFVF_STATUS_SUCCESS);
10920b55e27dSYuval Mintz }
10930b55e27dSYuval Mintz 
10940b55e27dSYuval Mintz static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
10950b55e27dSYuval Mintz 				 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
10960b55e27dSYuval Mintz {
10970b55e27dSYuval Mintz 	u16 length = sizeof(struct pfvf_def_resp_tlv);
10980b55e27dSYuval Mintz 	u8 status = PFVF_STATUS_SUCCESS;
10990b55e27dSYuval Mintz 
11000b55e27dSYuval Mintz 	/* Disable Interrupts for VF */
11010b55e27dSYuval Mintz 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
11020b55e27dSYuval Mintz 
11030b55e27dSYuval Mintz 	/* Reset Permission table */
11040b55e27dSYuval Mintz 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
11050b55e27dSYuval Mintz 
11060b55e27dSYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
11070b55e27dSYuval Mintz 			     length, status);
11080b55e27dSYuval Mintz }
11090b55e27dSYuval Mintz 
11100b55e27dSYuval Mintz static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
11110b55e27dSYuval Mintz 				   struct qed_ptt *p_ptt,
11120b55e27dSYuval Mintz 				   struct qed_vf_info *p_vf)
11130b55e27dSYuval Mintz {
11140b55e27dSYuval Mintz 	u16 length = sizeof(struct pfvf_def_resp_tlv);
11150b55e27dSYuval Mintz 
11160b55e27dSYuval Mintz 	qed_iov_vf_cleanup(p_hwfn, p_vf);
11170b55e27dSYuval Mintz 
11180b55e27dSYuval Mintz 	qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
11190b55e27dSYuval Mintz 			     length, PFVF_STATUS_SUCCESS);
11200b55e27dSYuval Mintz }
11210b55e27dSYuval Mintz 
11220b55e27dSYuval Mintz static int
11230b55e27dSYuval Mintz qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
11240b55e27dSYuval Mintz 			 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
11250b55e27dSYuval Mintz {
11260b55e27dSYuval Mintz 	int cnt;
11270b55e27dSYuval Mintz 	u32 val;
11280b55e27dSYuval Mintz 
11290b55e27dSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
11300b55e27dSYuval Mintz 
11310b55e27dSYuval Mintz 	for (cnt = 0; cnt < 50; cnt++) {
11320b55e27dSYuval Mintz 		val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
11330b55e27dSYuval Mintz 		if (!val)
11340b55e27dSYuval Mintz 			break;
11350b55e27dSYuval Mintz 		msleep(20);
11360b55e27dSYuval Mintz 	}
11370b55e27dSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
11380b55e27dSYuval Mintz 
11390b55e27dSYuval Mintz 	if (cnt == 50) {
11400b55e27dSYuval Mintz 		DP_ERR(p_hwfn,
11410b55e27dSYuval Mintz 		       "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
11420b55e27dSYuval Mintz 		       p_vf->abs_vf_id, val);
11430b55e27dSYuval Mintz 		return -EBUSY;
11440b55e27dSYuval Mintz 	}
11450b55e27dSYuval Mintz 
11460b55e27dSYuval Mintz 	return 0;
11470b55e27dSYuval Mintz }
11480b55e27dSYuval Mintz 
11490b55e27dSYuval Mintz static int
11500b55e27dSYuval Mintz qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
11510b55e27dSYuval Mintz 			struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
11520b55e27dSYuval Mintz {
11530b55e27dSYuval Mintz 	u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
11540b55e27dSYuval Mintz 	int i, cnt;
11550b55e27dSYuval Mintz 
11560b55e27dSYuval Mintz 	/* Read initial consumers & producers */
11570b55e27dSYuval Mintz 	for (i = 0; i < MAX_NUM_VOQS; i++) {
11580b55e27dSYuval Mintz 		u32 prod;
11590b55e27dSYuval Mintz 
11600b55e27dSYuval Mintz 		cons[i] = qed_rd(p_hwfn, p_ptt,
11610b55e27dSYuval Mintz 				 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
11620b55e27dSYuval Mintz 				 i * 0x40);
11630b55e27dSYuval Mintz 		prod = qed_rd(p_hwfn, p_ptt,
11640b55e27dSYuval Mintz 			      PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
11650b55e27dSYuval Mintz 			      i * 0x40);
11660b55e27dSYuval Mintz 		distance[i] = prod - cons[i];
11670b55e27dSYuval Mintz 	}
11680b55e27dSYuval Mintz 
11690b55e27dSYuval Mintz 	/* Wait for consumers to pass the producers */
11700b55e27dSYuval Mintz 	i = 0;
11710b55e27dSYuval Mintz 	for (cnt = 0; cnt < 50; cnt++) {
11720b55e27dSYuval Mintz 		for (; i < MAX_NUM_VOQS; i++) {
11730b55e27dSYuval Mintz 			u32 tmp;
11740b55e27dSYuval Mintz 
11750b55e27dSYuval Mintz 			tmp = qed_rd(p_hwfn, p_ptt,
11760b55e27dSYuval Mintz 				     PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
11770b55e27dSYuval Mintz 				     i * 0x40);
11780b55e27dSYuval Mintz 			if (distance[i] > tmp - cons[i])
11790b55e27dSYuval Mintz 				break;
11800b55e27dSYuval Mintz 		}
11810b55e27dSYuval Mintz 
11820b55e27dSYuval Mintz 		if (i == MAX_NUM_VOQS)
11830b55e27dSYuval Mintz 			break;
11840b55e27dSYuval Mintz 
11850b55e27dSYuval Mintz 		msleep(20);
11860b55e27dSYuval Mintz 	}
11870b55e27dSYuval Mintz 
11880b55e27dSYuval Mintz 	if (cnt == 50) {
11890b55e27dSYuval Mintz 		DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
11900b55e27dSYuval Mintz 		       p_vf->abs_vf_id, i);
11910b55e27dSYuval Mintz 		return -EBUSY;
11920b55e27dSYuval Mintz 	}
11930b55e27dSYuval Mintz 
11940b55e27dSYuval Mintz 	return 0;
11950b55e27dSYuval Mintz }
11960b55e27dSYuval Mintz 
11970b55e27dSYuval Mintz static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
11980b55e27dSYuval Mintz 			       struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
11990b55e27dSYuval Mintz {
12000b55e27dSYuval Mintz 	int rc;
12010b55e27dSYuval Mintz 
12020b55e27dSYuval Mintz 	rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
12030b55e27dSYuval Mintz 	if (rc)
12040b55e27dSYuval Mintz 		return rc;
12050b55e27dSYuval Mintz 
12060b55e27dSYuval Mintz 	rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
12070b55e27dSYuval Mintz 	if (rc)
12080b55e27dSYuval Mintz 		return rc;
12090b55e27dSYuval Mintz 
12100b55e27dSYuval Mintz 	return 0;
12110b55e27dSYuval Mintz }
12120b55e27dSYuval Mintz 
12130b55e27dSYuval Mintz static int
12140b55e27dSYuval Mintz qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
12150b55e27dSYuval Mintz 			       struct qed_ptt *p_ptt,
12160b55e27dSYuval Mintz 			       u16 rel_vf_id, u32 *ack_vfs)
12170b55e27dSYuval Mintz {
12180b55e27dSYuval Mintz 	struct qed_vf_info *p_vf;
12190b55e27dSYuval Mintz 	int rc = 0;
12200b55e27dSYuval Mintz 
12210b55e27dSYuval Mintz 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
12220b55e27dSYuval Mintz 	if (!p_vf)
12230b55e27dSYuval Mintz 		return 0;
12240b55e27dSYuval Mintz 
12250b55e27dSYuval Mintz 	if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
12260b55e27dSYuval Mintz 	    (1ULL << (rel_vf_id % 64))) {
12270b55e27dSYuval Mintz 		u16 vfid = p_vf->abs_vf_id;
12280b55e27dSYuval Mintz 
12290b55e27dSYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
12300b55e27dSYuval Mintz 			   "VF[%d] - Handling FLR\n", vfid);
12310b55e27dSYuval Mintz 
12320b55e27dSYuval Mintz 		qed_iov_vf_cleanup(p_hwfn, p_vf);
12330b55e27dSYuval Mintz 
12340b55e27dSYuval Mintz 		/* If VF isn't active, no need for anything but SW */
12350b55e27dSYuval Mintz 		if (!p_vf->b_init)
12360b55e27dSYuval Mintz 			goto cleanup;
12370b55e27dSYuval Mintz 
12380b55e27dSYuval Mintz 		rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
12390b55e27dSYuval Mintz 		if (rc)
12400b55e27dSYuval Mintz 			goto cleanup;
12410b55e27dSYuval Mintz 
12420b55e27dSYuval Mintz 		rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
12430b55e27dSYuval Mintz 		if (rc) {
12440b55e27dSYuval Mintz 			DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
12450b55e27dSYuval Mintz 			return rc;
12460b55e27dSYuval Mintz 		}
12470b55e27dSYuval Mintz 
12480b55e27dSYuval Mintz 		/* VF_STOPPED has to be set only after final cleanup
12490b55e27dSYuval Mintz 		 * but prior to re-enabling the VF.
12500b55e27dSYuval Mintz 		 */
12510b55e27dSYuval Mintz 		p_vf->state = VF_STOPPED;
12520b55e27dSYuval Mintz 
12530b55e27dSYuval Mintz 		rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
12540b55e27dSYuval Mintz 		if (rc) {
12550b55e27dSYuval Mintz 			DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
12560b55e27dSYuval Mintz 			       vfid);
12570b55e27dSYuval Mintz 			return rc;
12580b55e27dSYuval Mintz 		}
12590b55e27dSYuval Mintz cleanup:
12600b55e27dSYuval Mintz 		/* Mark VF for ack and clean pending state */
12610b55e27dSYuval Mintz 		if (p_vf->state == VF_RESET)
12620b55e27dSYuval Mintz 			p_vf->state = VF_STOPPED;
12630b55e27dSYuval Mintz 		ack_vfs[vfid / 32] |= (1 << (vfid % 32));
12640b55e27dSYuval Mintz 		p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
12650b55e27dSYuval Mintz 		    ~(1ULL << (rel_vf_id % 64));
12660b55e27dSYuval Mintz 		p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
12670b55e27dSYuval Mintz 		    ~(1ULL << (rel_vf_id % 64));
12680b55e27dSYuval Mintz 	}
12690b55e27dSYuval Mintz 
12700b55e27dSYuval Mintz 	return rc;
12710b55e27dSYuval Mintz }
12720b55e27dSYuval Mintz 
12730b55e27dSYuval Mintz int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
12740b55e27dSYuval Mintz {
12750b55e27dSYuval Mintz 	u32 ack_vfs[VF_MAX_STATIC / 32];
12760b55e27dSYuval Mintz 	int rc = 0;
12770b55e27dSYuval Mintz 	u16 i;
12780b55e27dSYuval Mintz 
12790b55e27dSYuval Mintz 	memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
12800b55e27dSYuval Mintz 
12810b55e27dSYuval Mintz 	/* Since BRB <-> PRS interface can't be tested as part of the flr
12820b55e27dSYuval Mintz 	 * polling due to HW limitations, simply sleep a bit. And since
12830b55e27dSYuval Mintz 	 * there's no need to wait per-vf, do it before looping.
12840b55e27dSYuval Mintz 	 */
12850b55e27dSYuval Mintz 	msleep(100);
12860b55e27dSYuval Mintz 
12870b55e27dSYuval Mintz 	for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
12880b55e27dSYuval Mintz 		qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
12890b55e27dSYuval Mintz 
12900b55e27dSYuval Mintz 	rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
12910b55e27dSYuval Mintz 	return rc;
12920b55e27dSYuval Mintz }
12930b55e27dSYuval Mintz 
12940b55e27dSYuval Mintz int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
12950b55e27dSYuval Mintz {
12960b55e27dSYuval Mintz 	u16 i, found = 0;
12970b55e27dSYuval Mintz 
12980b55e27dSYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
12990b55e27dSYuval Mintz 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
13000b55e27dSYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
13010b55e27dSYuval Mintz 			   "[%08x,...,%08x]: %08x\n",
13020b55e27dSYuval Mintz 			   i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
13030b55e27dSYuval Mintz 
13040b55e27dSYuval Mintz 	if (!p_hwfn->cdev->p_iov_info) {
13050b55e27dSYuval Mintz 		DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
13060b55e27dSYuval Mintz 		return 0;
13070b55e27dSYuval Mintz 	}
13080b55e27dSYuval Mintz 
13090b55e27dSYuval Mintz 	/* Mark VFs */
13100b55e27dSYuval Mintz 	for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
13110b55e27dSYuval Mintz 		struct qed_vf_info *p_vf;
13120b55e27dSYuval Mintz 		u8 vfid;
13130b55e27dSYuval Mintz 
13140b55e27dSYuval Mintz 		p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
13150b55e27dSYuval Mintz 		if (!p_vf)
13160b55e27dSYuval Mintz 			continue;
13170b55e27dSYuval Mintz 
13180b55e27dSYuval Mintz 		vfid = p_vf->abs_vf_id;
13190b55e27dSYuval Mintz 		if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
13200b55e27dSYuval Mintz 			u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
13210b55e27dSYuval Mintz 			u16 rel_vf_id = p_vf->relative_vf_id;
13220b55e27dSYuval Mintz 
13230b55e27dSYuval Mintz 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
13240b55e27dSYuval Mintz 				   "VF[%d] [rel %d] got FLR-ed\n",
13250b55e27dSYuval Mintz 				   vfid, rel_vf_id);
13260b55e27dSYuval Mintz 
13270b55e27dSYuval Mintz 			p_vf->state = VF_RESET;
13280b55e27dSYuval Mintz 
13290b55e27dSYuval Mintz 			/* No need to lock here, since pending_flr should
13300b55e27dSYuval Mintz 			 * only change here and before ACKing MFw. Since
13310b55e27dSYuval Mintz 			 * MFW will not trigger an additional attention for
13320b55e27dSYuval Mintz 			 * VF flr until ACKs, we're safe.
13330b55e27dSYuval Mintz 			 */
13340b55e27dSYuval Mintz 			p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
13350b55e27dSYuval Mintz 			found = 1;
13360b55e27dSYuval Mintz 		}
13370b55e27dSYuval Mintz 	}
13380b55e27dSYuval Mintz 
13390b55e27dSYuval Mintz 	return found;
13400b55e27dSYuval Mintz }
13410b55e27dSYuval Mintz 
134237bff2b9SYuval Mintz static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
134337bff2b9SYuval Mintz 				    struct qed_ptt *p_ptt, int vfid)
134437bff2b9SYuval Mintz {
134537bff2b9SYuval Mintz 	struct qed_iov_vf_mbx *mbx;
134637bff2b9SYuval Mintz 	struct qed_vf_info *p_vf;
134737bff2b9SYuval Mintz 	int i;
134837bff2b9SYuval Mintz 
134937bff2b9SYuval Mintz 	p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
135037bff2b9SYuval Mintz 	if (!p_vf)
135137bff2b9SYuval Mintz 		return;
135237bff2b9SYuval Mintz 
135337bff2b9SYuval Mintz 	mbx = &p_vf->vf_mbx;
135437bff2b9SYuval Mintz 
135537bff2b9SYuval Mintz 	/* qed_iov_process_mbx_request */
135637bff2b9SYuval Mintz 	DP_VERBOSE(p_hwfn,
135737bff2b9SYuval Mintz 		   QED_MSG_IOV,
135837bff2b9SYuval Mintz 		   "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
135937bff2b9SYuval Mintz 
136037bff2b9SYuval Mintz 	mbx->first_tlv = mbx->req_virt->first_tlv;
136137bff2b9SYuval Mintz 
136237bff2b9SYuval Mintz 	/* check if tlv type is known */
136337bff2b9SYuval Mintz 	if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
13641408cc1fSYuval Mintz 		switch (mbx->first_tlv.tl.type) {
13651408cc1fSYuval Mintz 		case CHANNEL_TLV_ACQUIRE:
13661408cc1fSYuval Mintz 			qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
13671408cc1fSYuval Mintz 			break;
13680b55e27dSYuval Mintz 		case CHANNEL_TLV_CLOSE:
13690b55e27dSYuval Mintz 			qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
13700b55e27dSYuval Mintz 			break;
13710b55e27dSYuval Mintz 		case CHANNEL_TLV_INT_CLEANUP:
13720b55e27dSYuval Mintz 			qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
13730b55e27dSYuval Mintz 			break;
13740b55e27dSYuval Mintz 		case CHANNEL_TLV_RELEASE:
13750b55e27dSYuval Mintz 			qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
13760b55e27dSYuval Mintz 			break;
13771408cc1fSYuval Mintz 		}
137837bff2b9SYuval Mintz 	} else {
137937bff2b9SYuval Mintz 		/* unknown TLV - this may belong to a VF driver from the future
138037bff2b9SYuval Mintz 		 * - a version written after this PF driver was written, which
138137bff2b9SYuval Mintz 		 * supports features unknown as of yet. Too bad since we don't
138237bff2b9SYuval Mintz 		 * support them. Or this may be because someone wrote a crappy
138337bff2b9SYuval Mintz 		 * VF driver and is sending garbage over the channel.
138437bff2b9SYuval Mintz 		 */
138537bff2b9SYuval Mintz 		DP_ERR(p_hwfn,
138637bff2b9SYuval Mintz 		       "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
138737bff2b9SYuval Mintz 		       mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
138837bff2b9SYuval Mintz 
138937bff2b9SYuval Mintz 		for (i = 0; i < 20; i++) {
139037bff2b9SYuval Mintz 			DP_VERBOSE(p_hwfn,
139137bff2b9SYuval Mintz 				   QED_MSG_IOV,
139237bff2b9SYuval Mintz 				   "%x ",
139337bff2b9SYuval Mintz 				   mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
139437bff2b9SYuval Mintz 		}
139537bff2b9SYuval Mintz 	}
139637bff2b9SYuval Mintz }
139737bff2b9SYuval Mintz 
139837bff2b9SYuval Mintz void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
139937bff2b9SYuval Mintz {
140037bff2b9SYuval Mintz 	u64 add_bit = 1ULL << (vfid % 64);
140137bff2b9SYuval Mintz 
140237bff2b9SYuval Mintz 	p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
140337bff2b9SYuval Mintz }
140437bff2b9SYuval Mintz 
140537bff2b9SYuval Mintz static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
140637bff2b9SYuval Mintz 						    u64 *events)
140737bff2b9SYuval Mintz {
140837bff2b9SYuval Mintz 	u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
140937bff2b9SYuval Mintz 
141037bff2b9SYuval Mintz 	memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
141137bff2b9SYuval Mintz 	memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
141237bff2b9SYuval Mintz }
141337bff2b9SYuval Mintz 
141437bff2b9SYuval Mintz static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
141537bff2b9SYuval Mintz 			      u16 abs_vfid, struct regpair *vf_msg)
141637bff2b9SYuval Mintz {
141737bff2b9SYuval Mintz 	u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
141837bff2b9SYuval Mintz 	struct qed_vf_info *p_vf;
141937bff2b9SYuval Mintz 
142037bff2b9SYuval Mintz 	if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
142137bff2b9SYuval Mintz 		DP_VERBOSE(p_hwfn,
142237bff2b9SYuval Mintz 			   QED_MSG_IOV,
142337bff2b9SYuval Mintz 			   "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
142437bff2b9SYuval Mintz 			   abs_vfid);
142537bff2b9SYuval Mintz 		return 0;
142637bff2b9SYuval Mintz 	}
142737bff2b9SYuval Mintz 	p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
142837bff2b9SYuval Mintz 
142937bff2b9SYuval Mintz 	/* List the physical address of the request so that handler
143037bff2b9SYuval Mintz 	 * could later on copy the message from it.
143137bff2b9SYuval Mintz 	 */
143237bff2b9SYuval Mintz 	p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
143337bff2b9SYuval Mintz 
143437bff2b9SYuval Mintz 	/* Mark the event and schedule the workqueue */
143537bff2b9SYuval Mintz 	qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
143637bff2b9SYuval Mintz 	qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
143737bff2b9SYuval Mintz 
143837bff2b9SYuval Mintz 	return 0;
143937bff2b9SYuval Mintz }
144037bff2b9SYuval Mintz 
144137bff2b9SYuval Mintz int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
144237bff2b9SYuval Mintz 			u8 opcode, __le16 echo, union event_ring_data *data)
144337bff2b9SYuval Mintz {
144437bff2b9SYuval Mintz 	switch (opcode) {
144537bff2b9SYuval Mintz 	case COMMON_EVENT_VF_PF_CHANNEL:
144637bff2b9SYuval Mintz 		return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
144737bff2b9SYuval Mintz 					  &data->vf_pf_channel.msg_addr);
144837bff2b9SYuval Mintz 	default:
144937bff2b9SYuval Mintz 		DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
145037bff2b9SYuval Mintz 			opcode);
145137bff2b9SYuval Mintz 		return -EINVAL;
145237bff2b9SYuval Mintz 	}
145337bff2b9SYuval Mintz }
145437bff2b9SYuval Mintz 
145532a47e72SYuval Mintz u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
145632a47e72SYuval Mintz {
145732a47e72SYuval Mintz 	struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
145832a47e72SYuval Mintz 	u16 i;
145932a47e72SYuval Mintz 
146032a47e72SYuval Mintz 	if (!p_iov)
146132a47e72SYuval Mintz 		goto out;
146232a47e72SYuval Mintz 
146332a47e72SYuval Mintz 	for (i = rel_vf_id; i < p_iov->total_vfs; i++)
146432a47e72SYuval Mintz 		if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
146532a47e72SYuval Mintz 			return i;
146632a47e72SYuval Mintz 
146732a47e72SYuval Mintz out:
146832a47e72SYuval Mintz 	return MAX_NUM_VFS;
146932a47e72SYuval Mintz }
147037bff2b9SYuval Mintz 
147137bff2b9SYuval Mintz static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
147237bff2b9SYuval Mintz 			       int vfid)
147337bff2b9SYuval Mintz {
147437bff2b9SYuval Mintz 	struct qed_dmae_params params;
147537bff2b9SYuval Mintz 	struct qed_vf_info *vf_info;
147637bff2b9SYuval Mintz 
147737bff2b9SYuval Mintz 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
147837bff2b9SYuval Mintz 	if (!vf_info)
147937bff2b9SYuval Mintz 		return -EINVAL;
148037bff2b9SYuval Mintz 
148137bff2b9SYuval Mintz 	memset(&params, 0, sizeof(struct qed_dmae_params));
148237bff2b9SYuval Mintz 	params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
148337bff2b9SYuval Mintz 	params.src_vfid = vf_info->abs_vf_id;
148437bff2b9SYuval Mintz 
148537bff2b9SYuval Mintz 	if (qed_dmae_host2host(p_hwfn, ptt,
148637bff2b9SYuval Mintz 			       vf_info->vf_mbx.pending_req,
148737bff2b9SYuval Mintz 			       vf_info->vf_mbx.req_phys,
148837bff2b9SYuval Mintz 			       sizeof(union vfpf_tlvs) / 4, &params)) {
148937bff2b9SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
149037bff2b9SYuval Mintz 			   "Failed to copy message from VF 0x%02x\n", vfid);
149137bff2b9SYuval Mintz 
149237bff2b9SYuval Mintz 		return -EIO;
149337bff2b9SYuval Mintz 	}
149437bff2b9SYuval Mintz 
149537bff2b9SYuval Mintz 	return 0;
149637bff2b9SYuval Mintz }
149737bff2b9SYuval Mintz 
14980b55e27dSYuval Mintz bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
14990b55e27dSYuval Mintz {
15000b55e27dSYuval Mintz 	struct qed_vf_info *p_vf_info;
15010b55e27dSYuval Mintz 
15020b55e27dSYuval Mintz 	p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
15030b55e27dSYuval Mintz 	if (!p_vf_info)
15040b55e27dSYuval Mintz 		return true;
15050b55e27dSYuval Mintz 
15060b55e27dSYuval Mintz 	return p_vf_info->state == VF_STOPPED;
15070b55e27dSYuval Mintz }
15080b55e27dSYuval Mintz 
150937bff2b9SYuval Mintz /**
151037bff2b9SYuval Mintz  * qed_schedule_iov - schedules IOV task for VF and PF
151137bff2b9SYuval Mintz  * @hwfn: hardware function pointer
151237bff2b9SYuval Mintz  * @flag: IOV flag for VF/PF
151337bff2b9SYuval Mintz  */
151437bff2b9SYuval Mintz void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
151537bff2b9SYuval Mintz {
151637bff2b9SYuval Mintz 	smp_mb__before_atomic();
151737bff2b9SYuval Mintz 	set_bit(flag, &hwfn->iov_task_flags);
151837bff2b9SYuval Mintz 	smp_mb__after_atomic();
151937bff2b9SYuval Mintz 	DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
152037bff2b9SYuval Mintz 	queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
152137bff2b9SYuval Mintz }
152237bff2b9SYuval Mintz 
15231408cc1fSYuval Mintz void qed_vf_start_iov_wq(struct qed_dev *cdev)
15241408cc1fSYuval Mintz {
15251408cc1fSYuval Mintz 	int i;
15261408cc1fSYuval Mintz 
15271408cc1fSYuval Mintz 	for_each_hwfn(cdev, i)
15281408cc1fSYuval Mintz 	    queue_delayed_work(cdev->hwfns[i].iov_wq,
15291408cc1fSYuval Mintz 			       &cdev->hwfns[i].iov_task, 0);
15301408cc1fSYuval Mintz }
15311408cc1fSYuval Mintz 
15320b55e27dSYuval Mintz int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
15330b55e27dSYuval Mintz {
15340b55e27dSYuval Mintz 	int i, j;
15350b55e27dSYuval Mintz 
15360b55e27dSYuval Mintz 	for_each_hwfn(cdev, i)
15370b55e27dSYuval Mintz 	    if (cdev->hwfns[i].iov_wq)
15380b55e27dSYuval Mintz 		flush_workqueue(cdev->hwfns[i].iov_wq);
15390b55e27dSYuval Mintz 
15400b55e27dSYuval Mintz 	/* Mark VFs for disablement */
15410b55e27dSYuval Mintz 	qed_iov_set_vfs_to_disable(cdev, true);
15420b55e27dSYuval Mintz 
15430b55e27dSYuval Mintz 	if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
15440b55e27dSYuval Mintz 		pci_disable_sriov(cdev->pdev);
15450b55e27dSYuval Mintz 
15460b55e27dSYuval Mintz 	for_each_hwfn(cdev, i) {
15470b55e27dSYuval Mintz 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
15480b55e27dSYuval Mintz 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
15490b55e27dSYuval Mintz 
15500b55e27dSYuval Mintz 		/* Failure to acquire the ptt in 100g creates an odd error
15510b55e27dSYuval Mintz 		 * where the first engine has already relased IOV.
15520b55e27dSYuval Mintz 		 */
15530b55e27dSYuval Mintz 		if (!ptt) {
15540b55e27dSYuval Mintz 			DP_ERR(hwfn, "Failed to acquire ptt\n");
15550b55e27dSYuval Mintz 			return -EBUSY;
15560b55e27dSYuval Mintz 		}
15570b55e27dSYuval Mintz 
15580b55e27dSYuval Mintz 		qed_for_each_vf(hwfn, j) {
15590b55e27dSYuval Mintz 			int k;
15600b55e27dSYuval Mintz 
15610b55e27dSYuval Mintz 			if (!qed_iov_is_valid_vfid(hwfn, j, true))
15620b55e27dSYuval Mintz 				continue;
15630b55e27dSYuval Mintz 
15640b55e27dSYuval Mintz 			/* Wait until VF is disabled before releasing */
15650b55e27dSYuval Mintz 			for (k = 0; k < 100; k++) {
15660b55e27dSYuval Mintz 				if (!qed_iov_is_vf_stopped(hwfn, j))
15670b55e27dSYuval Mintz 					msleep(20);
15680b55e27dSYuval Mintz 				else
15690b55e27dSYuval Mintz 					break;
15700b55e27dSYuval Mintz 			}
15710b55e27dSYuval Mintz 
15720b55e27dSYuval Mintz 			if (k < 100)
15730b55e27dSYuval Mintz 				qed_iov_release_hw_for_vf(&cdev->hwfns[i],
15740b55e27dSYuval Mintz 							  ptt, j);
15750b55e27dSYuval Mintz 			else
15760b55e27dSYuval Mintz 				DP_ERR(hwfn,
15770b55e27dSYuval Mintz 				       "Timeout waiting for VF's FLR to end\n");
15780b55e27dSYuval Mintz 		}
15790b55e27dSYuval Mintz 
15800b55e27dSYuval Mintz 		qed_ptt_release(hwfn, ptt);
15810b55e27dSYuval Mintz 	}
15820b55e27dSYuval Mintz 
15830b55e27dSYuval Mintz 	qed_iov_set_vfs_to_disable(cdev, false);
15840b55e27dSYuval Mintz 
15850b55e27dSYuval Mintz 	return 0;
15860b55e27dSYuval Mintz }
15870b55e27dSYuval Mintz 
15880b55e27dSYuval Mintz static int qed_sriov_enable(struct qed_dev *cdev, int num)
15890b55e27dSYuval Mintz {
15900b55e27dSYuval Mintz 	struct qed_sb_cnt_info sb_cnt_info;
15910b55e27dSYuval Mintz 	int i, j, rc;
15920b55e27dSYuval Mintz 
15930b55e27dSYuval Mintz 	if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
15940b55e27dSYuval Mintz 		DP_NOTICE(cdev, "Can start at most %d VFs\n",
15950b55e27dSYuval Mintz 			  RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
15960b55e27dSYuval Mintz 		return -EINVAL;
15970b55e27dSYuval Mintz 	}
15980b55e27dSYuval Mintz 
15990b55e27dSYuval Mintz 	/* Initialize HW for VF access */
16000b55e27dSYuval Mintz 	for_each_hwfn(cdev, j) {
16010b55e27dSYuval Mintz 		struct qed_hwfn *hwfn = &cdev->hwfns[j];
16020b55e27dSYuval Mintz 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
16030b55e27dSYuval Mintz 		int num_sbs = 0, limit = 16;
16040b55e27dSYuval Mintz 
16050b55e27dSYuval Mintz 		if (!ptt) {
16060b55e27dSYuval Mintz 			DP_ERR(hwfn, "Failed to acquire ptt\n");
16070b55e27dSYuval Mintz 			rc = -EBUSY;
16080b55e27dSYuval Mintz 			goto err;
16090b55e27dSYuval Mintz 		}
16100b55e27dSYuval Mintz 
16110b55e27dSYuval Mintz 		memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
16120b55e27dSYuval Mintz 		qed_int_get_num_sbs(hwfn, &sb_cnt_info);
16130b55e27dSYuval Mintz 		num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
16140b55e27dSYuval Mintz 
16150b55e27dSYuval Mintz 		for (i = 0; i < num; i++) {
16160b55e27dSYuval Mintz 			if (!qed_iov_is_valid_vfid(hwfn, i, false))
16170b55e27dSYuval Mintz 				continue;
16180b55e27dSYuval Mintz 
16190b55e27dSYuval Mintz 			rc = qed_iov_init_hw_for_vf(hwfn,
16200b55e27dSYuval Mintz 						    ptt, i, num_sbs / num);
16210b55e27dSYuval Mintz 			if (rc) {
16220b55e27dSYuval Mintz 				DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
16230b55e27dSYuval Mintz 				qed_ptt_release(hwfn, ptt);
16240b55e27dSYuval Mintz 				goto err;
16250b55e27dSYuval Mintz 			}
16260b55e27dSYuval Mintz 		}
16270b55e27dSYuval Mintz 
16280b55e27dSYuval Mintz 		qed_ptt_release(hwfn, ptt);
16290b55e27dSYuval Mintz 	}
16300b55e27dSYuval Mintz 
16310b55e27dSYuval Mintz 	/* Enable SRIOV PCIe functions */
16320b55e27dSYuval Mintz 	rc = pci_enable_sriov(cdev->pdev, num);
16330b55e27dSYuval Mintz 	if (rc) {
16340b55e27dSYuval Mintz 		DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
16350b55e27dSYuval Mintz 		goto err;
16360b55e27dSYuval Mintz 	}
16370b55e27dSYuval Mintz 
16380b55e27dSYuval Mintz 	return num;
16390b55e27dSYuval Mintz 
16400b55e27dSYuval Mintz err:
16410b55e27dSYuval Mintz 	qed_sriov_disable(cdev, false);
16420b55e27dSYuval Mintz 	return rc;
16430b55e27dSYuval Mintz }
16440b55e27dSYuval Mintz 
16450b55e27dSYuval Mintz static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
16460b55e27dSYuval Mintz {
16470b55e27dSYuval Mintz 	if (!IS_QED_SRIOV(cdev)) {
16480b55e27dSYuval Mintz 		DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
16490b55e27dSYuval Mintz 		return -EOPNOTSUPP;
16500b55e27dSYuval Mintz 	}
16510b55e27dSYuval Mintz 
16520b55e27dSYuval Mintz 	if (num_vfs_param)
16530b55e27dSYuval Mintz 		return qed_sriov_enable(cdev, num_vfs_param);
16540b55e27dSYuval Mintz 	else
16550b55e27dSYuval Mintz 		return qed_sriov_disable(cdev, true);
16560b55e27dSYuval Mintz }
16570b55e27dSYuval Mintz 
165837bff2b9SYuval Mintz static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
165937bff2b9SYuval Mintz {
166037bff2b9SYuval Mintz 	u64 events[QED_VF_ARRAY_LENGTH];
166137bff2b9SYuval Mintz 	struct qed_ptt *ptt;
166237bff2b9SYuval Mintz 	int i;
166337bff2b9SYuval Mintz 
166437bff2b9SYuval Mintz 	ptt = qed_ptt_acquire(hwfn);
166537bff2b9SYuval Mintz 	if (!ptt) {
166637bff2b9SYuval Mintz 		DP_VERBOSE(hwfn, QED_MSG_IOV,
166737bff2b9SYuval Mintz 			   "Can't acquire PTT; re-scheduling\n");
166837bff2b9SYuval Mintz 		qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
166937bff2b9SYuval Mintz 		return;
167037bff2b9SYuval Mintz 	}
167137bff2b9SYuval Mintz 
167237bff2b9SYuval Mintz 	qed_iov_pf_get_and_clear_pending_events(hwfn, events);
167337bff2b9SYuval Mintz 
167437bff2b9SYuval Mintz 	DP_VERBOSE(hwfn, QED_MSG_IOV,
167537bff2b9SYuval Mintz 		   "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
167637bff2b9SYuval Mintz 		   events[0], events[1], events[2]);
167737bff2b9SYuval Mintz 
167837bff2b9SYuval Mintz 	qed_for_each_vf(hwfn, i) {
167937bff2b9SYuval Mintz 		/* Skip VFs with no pending messages */
168037bff2b9SYuval Mintz 		if (!(events[i / 64] & (1ULL << (i % 64))))
168137bff2b9SYuval Mintz 			continue;
168237bff2b9SYuval Mintz 
168337bff2b9SYuval Mintz 		DP_VERBOSE(hwfn, QED_MSG_IOV,
168437bff2b9SYuval Mintz 			   "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
168537bff2b9SYuval Mintz 			   i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
168637bff2b9SYuval Mintz 
168737bff2b9SYuval Mintz 		/* Copy VF's message to PF's request buffer for that VF */
168837bff2b9SYuval Mintz 		if (qed_iov_copy_vf_msg(hwfn, ptt, i))
168937bff2b9SYuval Mintz 			continue;
169037bff2b9SYuval Mintz 
169137bff2b9SYuval Mintz 		qed_iov_process_mbx_req(hwfn, ptt, i);
169237bff2b9SYuval Mintz 	}
169337bff2b9SYuval Mintz 
169437bff2b9SYuval Mintz 	qed_ptt_release(hwfn, ptt);
169537bff2b9SYuval Mintz }
169637bff2b9SYuval Mintz 
169737bff2b9SYuval Mintz void qed_iov_pf_task(struct work_struct *work)
169837bff2b9SYuval Mintz {
169937bff2b9SYuval Mintz 	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
170037bff2b9SYuval Mintz 					     iov_task.work);
17010b55e27dSYuval Mintz 	int rc;
170237bff2b9SYuval Mintz 
170337bff2b9SYuval Mintz 	if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
170437bff2b9SYuval Mintz 		return;
170537bff2b9SYuval Mintz 
17060b55e27dSYuval Mintz 	if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
17070b55e27dSYuval Mintz 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
17080b55e27dSYuval Mintz 
17090b55e27dSYuval Mintz 		if (!ptt) {
17100b55e27dSYuval Mintz 			qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
17110b55e27dSYuval Mintz 			return;
17120b55e27dSYuval Mintz 		}
17130b55e27dSYuval Mintz 
17140b55e27dSYuval Mintz 		rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
17150b55e27dSYuval Mintz 		if (rc)
17160b55e27dSYuval Mintz 			qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
17170b55e27dSYuval Mintz 
17180b55e27dSYuval Mintz 		qed_ptt_release(hwfn, ptt);
17190b55e27dSYuval Mintz 	}
17200b55e27dSYuval Mintz 
172137bff2b9SYuval Mintz 	if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
172237bff2b9SYuval Mintz 		qed_handle_vf_msg(hwfn);
172337bff2b9SYuval Mintz }
172437bff2b9SYuval Mintz 
172537bff2b9SYuval Mintz void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
172637bff2b9SYuval Mintz {
172737bff2b9SYuval Mintz 	int i;
172837bff2b9SYuval Mintz 
172937bff2b9SYuval Mintz 	for_each_hwfn(cdev, i) {
173037bff2b9SYuval Mintz 		if (!cdev->hwfns[i].iov_wq)
173137bff2b9SYuval Mintz 			continue;
173237bff2b9SYuval Mintz 
173337bff2b9SYuval Mintz 		if (schedule_first) {
173437bff2b9SYuval Mintz 			qed_schedule_iov(&cdev->hwfns[i],
173537bff2b9SYuval Mintz 					 QED_IOV_WQ_STOP_WQ_FLAG);
173637bff2b9SYuval Mintz 			cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
173737bff2b9SYuval Mintz 		}
173837bff2b9SYuval Mintz 
173937bff2b9SYuval Mintz 		flush_workqueue(cdev->hwfns[i].iov_wq);
174037bff2b9SYuval Mintz 		destroy_workqueue(cdev->hwfns[i].iov_wq);
174137bff2b9SYuval Mintz 	}
174237bff2b9SYuval Mintz }
174337bff2b9SYuval Mintz 
174437bff2b9SYuval Mintz int qed_iov_wq_start(struct qed_dev *cdev)
174537bff2b9SYuval Mintz {
174637bff2b9SYuval Mintz 	char name[NAME_SIZE];
174737bff2b9SYuval Mintz 	int i;
174837bff2b9SYuval Mintz 
174937bff2b9SYuval Mintz 	for_each_hwfn(cdev, i) {
175037bff2b9SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
175137bff2b9SYuval Mintz 
175237bff2b9SYuval Mintz 		/* PFs needs a dedicated workqueue only if they support IOV. */
175337bff2b9SYuval Mintz 		if (!IS_PF_SRIOV(p_hwfn))
175437bff2b9SYuval Mintz 			continue;
175537bff2b9SYuval Mintz 
175637bff2b9SYuval Mintz 		snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
175737bff2b9SYuval Mintz 			 cdev->pdev->bus->number,
175837bff2b9SYuval Mintz 			 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
175937bff2b9SYuval Mintz 
176037bff2b9SYuval Mintz 		p_hwfn->iov_wq = create_singlethread_workqueue(name);
176137bff2b9SYuval Mintz 		if (!p_hwfn->iov_wq) {
176237bff2b9SYuval Mintz 			DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
176337bff2b9SYuval Mintz 			return -ENOMEM;
176437bff2b9SYuval Mintz 		}
176537bff2b9SYuval Mintz 
176637bff2b9SYuval Mintz 		INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
176737bff2b9SYuval Mintz 	}
176837bff2b9SYuval Mintz 
176937bff2b9SYuval Mintz 	return 0;
177037bff2b9SYuval Mintz }
17710b55e27dSYuval Mintz 
17720b55e27dSYuval Mintz const struct qed_iov_hv_ops qed_iov_ops_pass = {
17730b55e27dSYuval Mintz 	.configure = &qed_sriov_configure,
17740b55e27dSYuval Mintz };
1775