1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/etherdevice.h>
10 #include <linux/crc32.h>
11 #include <linux/qed/qed_iov_if.h>
12 #include "qed_cxt.h"
13 #include "qed_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_init_ops.h"
16 #include "qed_int.h"
17 #include "qed_mcp.h"
18 #include "qed_reg_addr.h"
19 #include "qed_sp.h"
20 #include "qed_sriov.h"
21 #include "qed_vf.h"
22 
23 /* IOV ramrods */
24 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
25 {
26 	struct vf_start_ramrod_data *p_ramrod = NULL;
27 	struct qed_spq_entry *p_ent = NULL;
28 	struct qed_sp_init_data init_data;
29 	int rc = -EINVAL;
30 	u8 fp_minor;
31 
32 	/* Get SPQ entry */
33 	memset(&init_data, 0, sizeof(init_data));
34 	init_data.cid = qed_spq_get_cid(p_hwfn);
35 	init_data.opaque_fid = p_vf->opaque_fid;
36 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
37 
38 	rc = qed_sp_init_request(p_hwfn, &p_ent,
39 				 COMMON_RAMROD_VF_START,
40 				 PROTOCOLID_COMMON, &init_data);
41 	if (rc)
42 		return rc;
43 
44 	p_ramrod = &p_ent->ramrod.vf_start;
45 
46 	p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
47 	p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
48 
49 	switch (p_hwfn->hw_info.personality) {
50 	case QED_PCI_ETH:
51 		p_ramrod->personality = PERSONALITY_ETH;
52 		break;
53 	case QED_PCI_ETH_ROCE:
54 		p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
55 		break;
56 	default:
57 		DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
58 			  p_hwfn->hw_info.personality);
59 		return -EINVAL;
60 	}
61 
62 	fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
63 	if (fp_minor > ETH_HSI_VER_MINOR) {
64 		DP_VERBOSE(p_hwfn,
65 			   QED_MSG_IOV,
66 			   "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
67 			   p_vf->abs_vf_id,
68 			   ETH_HSI_VER_MAJOR,
69 			   fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
70 		fp_minor = ETH_HSI_VER_MINOR;
71 	}
72 
73 	p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
74 	p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
75 
76 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
77 		   "VF[%d] - Starting using HSI %02x.%02x\n",
78 		   p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
79 
80 	return qed_spq_post(p_hwfn, p_ent, NULL);
81 }
82 
83 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
84 			  u32 concrete_vfid, u16 opaque_vfid)
85 {
86 	struct vf_stop_ramrod_data *p_ramrod = NULL;
87 	struct qed_spq_entry *p_ent = NULL;
88 	struct qed_sp_init_data init_data;
89 	int rc = -EINVAL;
90 
91 	/* Get SPQ entry */
92 	memset(&init_data, 0, sizeof(init_data));
93 	init_data.cid = qed_spq_get_cid(p_hwfn);
94 	init_data.opaque_fid = opaque_vfid;
95 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
96 
97 	rc = qed_sp_init_request(p_hwfn, &p_ent,
98 				 COMMON_RAMROD_VF_STOP,
99 				 PROTOCOLID_COMMON, &init_data);
100 	if (rc)
101 		return rc;
102 
103 	p_ramrod = &p_ent->ramrod.vf_stop;
104 
105 	p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
106 
107 	return qed_spq_post(p_hwfn, p_ent, NULL);
108 }
109 
110 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
111 			   int rel_vf_id, bool b_enabled_only)
112 {
113 	if (!p_hwfn->pf_iov_info) {
114 		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
115 		return false;
116 	}
117 
118 	if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
119 	    (rel_vf_id < 0))
120 		return false;
121 
122 	if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
123 	    b_enabled_only)
124 		return false;
125 
126 	return true;
127 }
128 
129 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
130 					       u16 relative_vf_id,
131 					       bool b_enabled_only)
132 {
133 	struct qed_vf_info *vf = NULL;
134 
135 	if (!p_hwfn->pf_iov_info) {
136 		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
137 		return NULL;
138 	}
139 
140 	if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
141 		vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
142 	else
143 		DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
144 		       relative_vf_id);
145 
146 	return vf;
147 }
148 
149 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
150 				 struct qed_vf_info *p_vf, u16 rx_qid)
151 {
152 	if (rx_qid >= p_vf->num_rxqs)
153 		DP_VERBOSE(p_hwfn,
154 			   QED_MSG_IOV,
155 			   "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
156 			   p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
157 	return rx_qid < p_vf->num_rxqs;
158 }
159 
160 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
161 				 struct qed_vf_info *p_vf, u16 tx_qid)
162 {
163 	if (tx_qid >= p_vf->num_txqs)
164 		DP_VERBOSE(p_hwfn,
165 			   QED_MSG_IOV,
166 			   "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
167 			   p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
168 	return tx_qid < p_vf->num_txqs;
169 }
170 
171 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
172 				struct qed_vf_info *p_vf, u16 sb_idx)
173 {
174 	int i;
175 
176 	for (i = 0; i < p_vf->num_sbs; i++)
177 		if (p_vf->igu_sbs[i] == sb_idx)
178 			return true;
179 
180 	DP_VERBOSE(p_hwfn,
181 		   QED_MSG_IOV,
182 		   "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
183 		   p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
184 
185 	return false;
186 }
187 
188 int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
189 			     int vfid, struct qed_ptt *p_ptt)
190 {
191 	struct qed_bulletin_content *p_bulletin;
192 	int crc_size = sizeof(p_bulletin->crc);
193 	struct qed_dmae_params params;
194 	struct qed_vf_info *p_vf;
195 
196 	p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
197 	if (!p_vf)
198 		return -EINVAL;
199 
200 	if (!p_vf->vf_bulletin)
201 		return -EINVAL;
202 
203 	p_bulletin = p_vf->bulletin.p_virt;
204 
205 	/* Increment bulletin board version and compute crc */
206 	p_bulletin->version++;
207 	p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
208 				p_vf->bulletin.size - crc_size);
209 
210 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
211 		   "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
212 		   p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
213 
214 	/* propagate bulletin board via dmae to vm memory */
215 	memset(&params, 0, sizeof(params));
216 	params.flags = QED_DMAE_FLAG_VF_DST;
217 	params.dst_vfid = p_vf->abs_vf_id;
218 	return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
219 				  p_vf->vf_bulletin, p_vf->bulletin.size / 4,
220 				  &params);
221 }
222 
223 static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
224 {
225 	struct qed_hw_sriov_info *iov = cdev->p_iov_info;
226 	int pos = iov->pos;
227 
228 	DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
229 	pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
230 
231 	pci_read_config_word(cdev->pdev,
232 			     pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
233 	pci_read_config_word(cdev->pdev,
234 			     pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
235 
236 	pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
237 	if (iov->num_vfs) {
238 		DP_VERBOSE(cdev,
239 			   QED_MSG_IOV,
240 			   "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
241 		iov->num_vfs = 0;
242 	}
243 
244 	pci_read_config_word(cdev->pdev,
245 			     pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
246 
247 	pci_read_config_word(cdev->pdev,
248 			     pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
249 
250 	pci_read_config_word(cdev->pdev,
251 			     pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
252 
253 	pci_read_config_dword(cdev->pdev,
254 			      pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
255 
256 	pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
257 
258 	pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
259 
260 	DP_VERBOSE(cdev,
261 		   QED_MSG_IOV,
262 		   "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
263 		   iov->nres,
264 		   iov->cap,
265 		   iov->ctrl,
266 		   iov->total_vfs,
267 		   iov->initial_vfs,
268 		   iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
269 
270 	/* Some sanity checks */
271 	if (iov->num_vfs > NUM_OF_VFS(cdev) ||
272 	    iov->total_vfs > NUM_OF_VFS(cdev)) {
273 		/* This can happen only due to a bug. In this case we set
274 		 * num_vfs to zero to avoid memory corruption in the code that
275 		 * assumes max number of vfs
276 		 */
277 		DP_NOTICE(cdev,
278 			  "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
279 			  iov->num_vfs);
280 
281 		iov->num_vfs = 0;
282 		iov->total_vfs = 0;
283 	}
284 
285 	return 0;
286 }
287 
288 static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
289 					struct qed_ptt *p_ptt)
290 {
291 	struct qed_igu_block *p_sb;
292 	u16 sb_id;
293 	u32 val;
294 
295 	if (!p_hwfn->hw_info.p_igu_info) {
296 		DP_ERR(p_hwfn,
297 		       "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
298 		return;
299 	}
300 
301 	for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
302 	     sb_id++) {
303 		p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
304 		if ((p_sb->status & QED_IGU_STATUS_FREE) &&
305 		    !(p_sb->status & QED_IGU_STATUS_PF)) {
306 			val = qed_rd(p_hwfn, p_ptt,
307 				     IGU_REG_MAPPING_MEMORY + sb_id * 4);
308 			SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
309 			qed_wr(p_hwfn, p_ptt,
310 			       IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
311 		}
312 	}
313 }
314 
315 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
316 {
317 	struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
318 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
319 	struct qed_bulletin_content *p_bulletin_virt;
320 	dma_addr_t req_p, rply_p, bulletin_p;
321 	union pfvf_tlvs *p_reply_virt_addr;
322 	union vfpf_tlvs *p_req_virt_addr;
323 	u8 idx = 0;
324 
325 	memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
326 
327 	p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
328 	req_p = p_iov_info->mbx_msg_phys_addr;
329 	p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
330 	rply_p = p_iov_info->mbx_reply_phys_addr;
331 	p_bulletin_virt = p_iov_info->p_bulletins;
332 	bulletin_p = p_iov_info->bulletins_phys;
333 	if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
334 		DP_ERR(p_hwfn,
335 		       "qed_iov_setup_vfdb called without allocating mem first\n");
336 		return;
337 	}
338 
339 	for (idx = 0; idx < p_iov->total_vfs; idx++) {
340 		struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
341 		u32 concrete;
342 
343 		vf->vf_mbx.req_virt = p_req_virt_addr + idx;
344 		vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
345 		vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
346 		vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
347 
348 		vf->state = VF_STOPPED;
349 		vf->b_init = false;
350 
351 		vf->bulletin.phys = idx *
352 				    sizeof(struct qed_bulletin_content) +
353 				    bulletin_p;
354 		vf->bulletin.p_virt = p_bulletin_virt + idx;
355 		vf->bulletin.size = sizeof(struct qed_bulletin_content);
356 
357 		vf->relative_vf_id = idx;
358 		vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
359 		concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
360 		vf->concrete_fid = concrete;
361 		vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
362 				 (vf->abs_vf_id << 8);
363 		vf->vport_id = idx + 1;
364 
365 		vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
366 		vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
367 	}
368 }
369 
370 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
371 {
372 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
373 	void **p_v_addr;
374 	u16 num_vfs = 0;
375 
376 	num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
377 
378 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
379 		   "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
380 
381 	/* Allocate PF Mailbox buffer (per-VF) */
382 	p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
383 	p_v_addr = &p_iov_info->mbx_msg_virt_addr;
384 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
385 				       p_iov_info->mbx_msg_size,
386 				       &p_iov_info->mbx_msg_phys_addr,
387 				       GFP_KERNEL);
388 	if (!*p_v_addr)
389 		return -ENOMEM;
390 
391 	/* Allocate PF Mailbox Reply buffer (per-VF) */
392 	p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
393 	p_v_addr = &p_iov_info->mbx_reply_virt_addr;
394 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
395 				       p_iov_info->mbx_reply_size,
396 				       &p_iov_info->mbx_reply_phys_addr,
397 				       GFP_KERNEL);
398 	if (!*p_v_addr)
399 		return -ENOMEM;
400 
401 	p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
402 				     num_vfs;
403 	p_v_addr = &p_iov_info->p_bulletins;
404 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
405 				       p_iov_info->bulletins_size,
406 				       &p_iov_info->bulletins_phys,
407 				       GFP_KERNEL);
408 	if (!*p_v_addr)
409 		return -ENOMEM;
410 
411 	DP_VERBOSE(p_hwfn,
412 		   QED_MSG_IOV,
413 		   "PF's Requests mailbox [%p virt 0x%llx phys],  Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
414 		   p_iov_info->mbx_msg_virt_addr,
415 		   (u64) p_iov_info->mbx_msg_phys_addr,
416 		   p_iov_info->mbx_reply_virt_addr,
417 		   (u64) p_iov_info->mbx_reply_phys_addr,
418 		   p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
419 
420 	return 0;
421 }
422 
423 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
424 {
425 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
426 
427 	if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
428 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
429 				  p_iov_info->mbx_msg_size,
430 				  p_iov_info->mbx_msg_virt_addr,
431 				  p_iov_info->mbx_msg_phys_addr);
432 
433 	if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
434 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
435 				  p_iov_info->mbx_reply_size,
436 				  p_iov_info->mbx_reply_virt_addr,
437 				  p_iov_info->mbx_reply_phys_addr);
438 
439 	if (p_iov_info->p_bulletins)
440 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
441 				  p_iov_info->bulletins_size,
442 				  p_iov_info->p_bulletins,
443 				  p_iov_info->bulletins_phys);
444 }
445 
446 int qed_iov_alloc(struct qed_hwfn *p_hwfn)
447 {
448 	struct qed_pf_iov *p_sriov;
449 
450 	if (!IS_PF_SRIOV(p_hwfn)) {
451 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
452 			   "No SR-IOV - no need for IOV db\n");
453 		return 0;
454 	}
455 
456 	p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
457 	if (!p_sriov) {
458 		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
459 		return -ENOMEM;
460 	}
461 
462 	p_hwfn->pf_iov_info = p_sriov;
463 
464 	return qed_iov_allocate_vfdb(p_hwfn);
465 }
466 
467 void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
468 {
469 	if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
470 		return;
471 
472 	qed_iov_setup_vfdb(p_hwfn);
473 	qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
474 }
475 
476 void qed_iov_free(struct qed_hwfn *p_hwfn)
477 {
478 	if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
479 		qed_iov_free_vfdb(p_hwfn);
480 		kfree(p_hwfn->pf_iov_info);
481 	}
482 }
483 
484 void qed_iov_free_hw_info(struct qed_dev *cdev)
485 {
486 	kfree(cdev->p_iov_info);
487 	cdev->p_iov_info = NULL;
488 }
489 
490 int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
491 {
492 	struct qed_dev *cdev = p_hwfn->cdev;
493 	int pos;
494 	int rc;
495 
496 	if (IS_VF(p_hwfn->cdev))
497 		return 0;
498 
499 	/* Learn the PCI configuration */
500 	pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
501 				      PCI_EXT_CAP_ID_SRIOV);
502 	if (!pos) {
503 		DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
504 		return 0;
505 	}
506 
507 	/* Allocate a new struct for IOV information */
508 	cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
509 	if (!cdev->p_iov_info) {
510 		DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
511 		return -ENOMEM;
512 	}
513 	cdev->p_iov_info->pos = pos;
514 
515 	rc = qed_iov_pci_cfg_info(cdev);
516 	if (rc)
517 		return rc;
518 
519 	/* We want PF IOV to be synonemous with the existance of p_iov_info;
520 	 * In case the capability is published but there are no VFs, simply
521 	 * de-allocate the struct.
522 	 */
523 	if (!cdev->p_iov_info->total_vfs) {
524 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
525 			   "IOV capabilities, but no VFs are published\n");
526 		kfree(cdev->p_iov_info);
527 		cdev->p_iov_info = NULL;
528 		return 0;
529 	}
530 
531 	/* Calculate the first VF index - this is a bit tricky; Basically,
532 	 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
533 	 * after the first engine's VFs.
534 	 */
535 	cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
536 					   p_hwfn->abs_pf_id - 16;
537 	if (QED_PATH_ID(p_hwfn))
538 		cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
539 
540 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
541 		   "First VF in hwfn 0x%08x\n",
542 		   cdev->p_iov_info->first_vf_in_pf);
543 
544 	return 0;
545 }
546 
547 static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
548 {
549 	/* Check PF supports sriov */
550 	if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
551 	    !IS_PF_SRIOV_ALLOC(p_hwfn))
552 		return false;
553 
554 	/* Check VF validity */
555 	if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true))
556 		return false;
557 
558 	return true;
559 }
560 
561 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
562 				      u16 rel_vf_id, u8 to_disable)
563 {
564 	struct qed_vf_info *vf;
565 	int i;
566 
567 	for_each_hwfn(cdev, i) {
568 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
569 
570 		vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
571 		if (!vf)
572 			continue;
573 
574 		vf->to_disable = to_disable;
575 	}
576 }
577 
578 void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
579 {
580 	u16 i;
581 
582 	if (!IS_QED_SRIOV(cdev))
583 		return;
584 
585 	for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
586 		qed_iov_set_vf_to_disable(cdev, i, to_disable);
587 }
588 
589 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
590 				       struct qed_ptt *p_ptt, u8 abs_vfid)
591 {
592 	qed_wr(p_hwfn, p_ptt,
593 	       PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
594 	       1 << (abs_vfid & 0x1f));
595 }
596 
597 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
598 				 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
599 {
600 	int i;
601 
602 	/* Set VF masks and configuration - pretend */
603 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
604 
605 	qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
606 
607 	/* unpretend */
608 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
609 
610 	/* iterate over all queues, clear sb consumer */
611 	for (i = 0; i < vf->num_sbs; i++)
612 		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
613 						vf->igu_sbs[i],
614 						vf->opaque_fid, true);
615 }
616 
617 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
618 				   struct qed_ptt *p_ptt,
619 				   struct qed_vf_info *vf, bool enable)
620 {
621 	u32 igu_vf_conf;
622 
623 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
624 
625 	igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
626 
627 	if (enable)
628 		igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
629 	else
630 		igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
631 
632 	qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
633 
634 	/* unpretend */
635 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
636 }
637 
638 static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
639 				    struct qed_ptt *p_ptt,
640 				    struct qed_vf_info *vf)
641 {
642 	u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
643 	int rc;
644 
645 	if (vf->to_disable)
646 		return 0;
647 
648 	DP_VERBOSE(p_hwfn,
649 		   QED_MSG_IOV,
650 		   "Enable internal access for vf %x [abs %x]\n",
651 		   vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
652 
653 	qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
654 
655 	qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
656 
657 	rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
658 	if (rc)
659 		return rc;
660 
661 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
662 
663 	SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
664 	STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
665 
666 	qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
667 		     p_hwfn->hw_info.hw_mode);
668 
669 	/* unpretend */
670 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
671 
672 	vf->state = VF_FREE;
673 
674 	return rc;
675 }
676 
677 /**
678  * @brief qed_iov_config_perm_table - configure the permission
679  *      zone table.
680  *      In E4, queue zone permission table size is 320x9. There
681  *      are 320 VF queues for single engine device (256 for dual
682  *      engine device), and each entry has the following format:
683  *      {Valid, VF[7:0]}
684  * @param p_hwfn
685  * @param p_ptt
686  * @param vf
687  * @param enable
688  */
689 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
690 				      struct qed_ptt *p_ptt,
691 				      struct qed_vf_info *vf, u8 enable)
692 {
693 	u32 reg_addr, val;
694 	u16 qzone_id = 0;
695 	int qid;
696 
697 	for (qid = 0; qid < vf->num_rxqs; qid++) {
698 		qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
699 				&qzone_id);
700 
701 		reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
702 		val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
703 		qed_wr(p_hwfn, p_ptt, reg_addr, val);
704 	}
705 }
706 
707 static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
708 				      struct qed_ptt *p_ptt,
709 				      struct qed_vf_info *vf)
710 {
711 	/* Reset vf in IGU - interrupts are still disabled */
712 	qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
713 
714 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
715 
716 	/* Permission Table */
717 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
718 }
719 
720 static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
721 				   struct qed_ptt *p_ptt,
722 				   struct qed_vf_info *vf, u16 num_rx_queues)
723 {
724 	struct qed_igu_block *igu_blocks;
725 	int qid = 0, igu_id = 0;
726 	u32 val = 0;
727 
728 	igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
729 
730 	if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
731 		num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
732 	p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
733 
734 	SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
735 	SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
736 	SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
737 
738 	while ((qid < num_rx_queues) &&
739 	       (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
740 		if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
741 			struct cau_sb_entry sb_entry;
742 
743 			vf->igu_sbs[qid] = (u16)igu_id;
744 			igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
745 
746 			SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
747 
748 			qed_wr(p_hwfn, p_ptt,
749 			       IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
750 			       val);
751 
752 			/* Configure igu sb in CAU which were marked valid */
753 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
754 					      p_hwfn->rel_pf_id,
755 					      vf->abs_vf_id, 1);
756 			qed_dmae_host2grc(p_hwfn, p_ptt,
757 					  (u64)(uintptr_t)&sb_entry,
758 					  CAU_REG_SB_VAR_MEMORY +
759 					  igu_id * sizeof(u64), 2, 0);
760 			qid++;
761 		}
762 		igu_id++;
763 	}
764 
765 	vf->num_sbs = (u8) num_rx_queues;
766 
767 	return vf->num_sbs;
768 }
769 
770 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
771 				    struct qed_ptt *p_ptt,
772 				    struct qed_vf_info *vf)
773 {
774 	struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
775 	int idx, igu_id;
776 	u32 addr, val;
777 
778 	/* Invalidate igu CAM lines and mark them as free */
779 	for (idx = 0; idx < vf->num_sbs; idx++) {
780 		igu_id = vf->igu_sbs[idx];
781 		addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
782 
783 		val = qed_rd(p_hwfn, p_ptt, addr);
784 		SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
785 		qed_wr(p_hwfn, p_ptt, addr, val);
786 
787 		p_info->igu_map.igu_blocks[igu_id].status |=
788 		    QED_IGU_STATUS_FREE;
789 
790 		p_hwfn->hw_info.p_igu_info->free_blks++;
791 	}
792 
793 	vf->num_sbs = 0;
794 }
795 
796 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
797 				  struct qed_ptt *p_ptt,
798 				  u16 rel_vf_id, u16 num_rx_queues)
799 {
800 	u8 num_of_vf_avaiable_chains = 0;
801 	struct qed_vf_info *vf = NULL;
802 	int rc = 0;
803 	u32 cids;
804 	u8 i;
805 
806 	vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
807 	if (!vf) {
808 		DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
809 		return -EINVAL;
810 	}
811 
812 	if (vf->b_init) {
813 		DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
814 		return -EINVAL;
815 	}
816 
817 	/* Limit number of queues according to number of CIDs */
818 	qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
819 	DP_VERBOSE(p_hwfn,
820 		   QED_MSG_IOV,
821 		   "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
822 		   vf->relative_vf_id, num_rx_queues, (u16) cids);
823 	num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
824 
825 	num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
826 							     p_ptt,
827 							     vf,
828 							     num_rx_queues);
829 	if (!num_of_vf_avaiable_chains) {
830 		DP_ERR(p_hwfn, "no available igu sbs\n");
831 		return -ENOMEM;
832 	}
833 
834 	/* Choose queue number and index ranges */
835 	vf->num_rxqs = num_of_vf_avaiable_chains;
836 	vf->num_txqs = num_of_vf_avaiable_chains;
837 
838 	for (i = 0; i < vf->num_rxqs; i++) {
839 		u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
840 							   vf->igu_sbs[i]);
841 
842 		if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
843 			DP_NOTICE(p_hwfn,
844 				  "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
845 				  vf->relative_vf_id, queue_id);
846 			return -EINVAL;
847 		}
848 
849 		/* CIDs are per-VF, so no problem having them 0-based. */
850 		vf->vf_queues[i].fw_rx_qid = queue_id;
851 		vf->vf_queues[i].fw_tx_qid = queue_id;
852 		vf->vf_queues[i].fw_cid = i;
853 
854 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
855 			   "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
856 			   vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
857 	}
858 	rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
859 	if (!rc) {
860 		vf->b_init = true;
861 
862 		if (IS_LEAD_HWFN(p_hwfn))
863 			p_hwfn->cdev->p_iov_info->num_vfs++;
864 	}
865 
866 	return rc;
867 }
868 
869 static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
870 			     u16 vfid,
871 			     struct qed_mcp_link_params *params,
872 			     struct qed_mcp_link_state *link,
873 			     struct qed_mcp_link_capabilities *p_caps)
874 {
875 	struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
876 						       vfid,
877 						       false);
878 	struct qed_bulletin_content *p_bulletin;
879 
880 	if (!p_vf)
881 		return;
882 
883 	p_bulletin = p_vf->bulletin.p_virt;
884 	p_bulletin->req_autoneg = params->speed.autoneg;
885 	p_bulletin->req_adv_speed = params->speed.advertised_speeds;
886 	p_bulletin->req_forced_speed = params->speed.forced_speed;
887 	p_bulletin->req_autoneg_pause = params->pause.autoneg;
888 	p_bulletin->req_forced_rx = params->pause.forced_rx;
889 	p_bulletin->req_forced_tx = params->pause.forced_tx;
890 	p_bulletin->req_loopback = params->loopback_mode;
891 
892 	p_bulletin->link_up = link->link_up;
893 	p_bulletin->speed = link->speed;
894 	p_bulletin->full_duplex = link->full_duplex;
895 	p_bulletin->autoneg = link->an;
896 	p_bulletin->autoneg_complete = link->an_complete;
897 	p_bulletin->parallel_detection = link->parallel_detection;
898 	p_bulletin->pfc_enabled = link->pfc_enabled;
899 	p_bulletin->partner_adv_speed = link->partner_adv_speed;
900 	p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
901 	p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
902 	p_bulletin->partner_adv_pause = link->partner_adv_pause;
903 	p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
904 
905 	p_bulletin->capability_speed = p_caps->speed_capabilities;
906 }
907 
908 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
909 				     struct qed_ptt *p_ptt, u16 rel_vf_id)
910 {
911 	struct qed_mcp_link_capabilities caps;
912 	struct qed_mcp_link_params params;
913 	struct qed_mcp_link_state link;
914 	struct qed_vf_info *vf = NULL;
915 
916 	vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
917 	if (!vf) {
918 		DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
919 		return -EINVAL;
920 	}
921 
922 	if (vf->bulletin.p_virt)
923 		memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
924 
925 	memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
926 
927 	/* Get the link configuration back in bulletin so
928 	 * that when VFs are re-enabled they get the actual
929 	 * link configuration.
930 	 */
931 	memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
932 	memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
933 	memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
934 	qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
935 
936 	/* Forget the VF's acquisition message */
937 	memset(&vf->acquire, 0, sizeof(vf->acquire));
938 
939 	/* disablng interrupts and resetting permission table was done during
940 	 * vf-close, however, we could get here without going through vf_close
941 	 */
942 	/* Disable Interrupts for VF */
943 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
944 
945 	/* Reset Permission table */
946 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
947 
948 	vf->num_rxqs = 0;
949 	vf->num_txqs = 0;
950 	qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
951 
952 	if (vf->b_init) {
953 		vf->b_init = false;
954 
955 		if (IS_LEAD_HWFN(p_hwfn))
956 			p_hwfn->cdev->p_iov_info->num_vfs--;
957 	}
958 
959 	return 0;
960 }
961 
962 static bool qed_iov_tlv_supported(u16 tlvtype)
963 {
964 	return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
965 }
966 
967 /* place a given tlv on the tlv buffer, continuing current tlv list */
968 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
969 {
970 	struct channel_tlv *tl = (struct channel_tlv *)*offset;
971 
972 	tl->type = type;
973 	tl->length = length;
974 
975 	/* Offset should keep pointing to next TLV (the end of the last) */
976 	*offset += length;
977 
978 	/* Return a pointer to the start of the added tlv */
979 	return *offset - length;
980 }
981 
982 /* list the types and lengths of the tlvs on the buffer */
983 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
984 {
985 	u16 i = 1, total_length = 0;
986 	struct channel_tlv *tlv;
987 
988 	do {
989 		tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
990 
991 		/* output tlv */
992 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
993 			   "TLV number %d: type %d, length %d\n",
994 			   i, tlv->type, tlv->length);
995 
996 		if (tlv->type == CHANNEL_TLV_LIST_END)
997 			return;
998 
999 		/* Validate entry - protect against malicious VFs */
1000 		if (!tlv->length) {
1001 			DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
1002 			return;
1003 		}
1004 
1005 		total_length += tlv->length;
1006 
1007 		if (total_length >= sizeof(struct tlv_buffer_size)) {
1008 			DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
1009 			return;
1010 		}
1011 
1012 		i++;
1013 	} while (1);
1014 }
1015 
1016 static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
1017 				  struct qed_ptt *p_ptt,
1018 				  struct qed_vf_info *p_vf,
1019 				  u16 length, u8 status)
1020 {
1021 	struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1022 	struct qed_dmae_params params;
1023 	u8 eng_vf_id;
1024 
1025 	mbx->reply_virt->default_resp.hdr.status = status;
1026 
1027 	qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
1028 
1029 	eng_vf_id = p_vf->abs_vf_id;
1030 
1031 	memset(&params, 0, sizeof(struct qed_dmae_params));
1032 	params.flags = QED_DMAE_FLAG_VF_DST;
1033 	params.dst_vfid = eng_vf_id;
1034 
1035 	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1036 			   mbx->req_virt->first_tlv.reply_address +
1037 			   sizeof(u64),
1038 			   (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1039 			   &params);
1040 
1041 	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1042 			   mbx->req_virt->first_tlv.reply_address,
1043 			   sizeof(u64) / 4, &params);
1044 
1045 	REG_WR(p_hwfn,
1046 	       GTT_BAR0_MAP_REG_USDM_RAM +
1047 	       USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1048 }
1049 
1050 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1051 				enum qed_iov_vport_update_flag flag)
1052 {
1053 	switch (flag) {
1054 	case QED_IOV_VP_UPDATE_ACTIVATE:
1055 		return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1056 	case QED_IOV_VP_UPDATE_VLAN_STRIP:
1057 		return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1058 	case QED_IOV_VP_UPDATE_TX_SWITCH:
1059 		return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1060 	case QED_IOV_VP_UPDATE_MCAST:
1061 		return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1062 	case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1063 		return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1064 	case QED_IOV_VP_UPDATE_RSS:
1065 		return CHANNEL_TLV_VPORT_UPDATE_RSS;
1066 	case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1067 		return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1068 	case QED_IOV_VP_UPDATE_SGE_TPA:
1069 		return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1070 	default:
1071 		return 0;
1072 	}
1073 }
1074 
1075 static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1076 					    struct qed_vf_info *p_vf,
1077 					    struct qed_iov_vf_mbx *p_mbx,
1078 					    u8 status,
1079 					    u16 tlvs_mask, u16 tlvs_accepted)
1080 {
1081 	struct pfvf_def_resp_tlv *resp;
1082 	u16 size, total_len, i;
1083 
1084 	memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1085 	p_mbx->offset = (u8 *)p_mbx->reply_virt;
1086 	size = sizeof(struct pfvf_def_resp_tlv);
1087 	total_len = size;
1088 
1089 	qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1090 
1091 	/* Prepare response for all extended tlvs if they are found by PF */
1092 	for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
1093 		if (!(tlvs_mask & (1 << i)))
1094 			continue;
1095 
1096 		resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1097 				   qed_iov_vport_to_tlv(p_hwfn, i), size);
1098 
1099 		if (tlvs_accepted & (1 << i))
1100 			resp->hdr.status = status;
1101 		else
1102 			resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1103 
1104 		DP_VERBOSE(p_hwfn,
1105 			   QED_MSG_IOV,
1106 			   "VF[%d] - vport_update response: TLV %d, status %02x\n",
1107 			   p_vf->relative_vf_id,
1108 			   qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1109 
1110 		total_len += size;
1111 	}
1112 
1113 	qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1114 		    sizeof(struct channel_list_end_tlv));
1115 
1116 	return total_len;
1117 }
1118 
1119 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
1120 				 struct qed_ptt *p_ptt,
1121 				 struct qed_vf_info *vf_info,
1122 				 u16 type, u16 length, u8 status)
1123 {
1124 	struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1125 
1126 	mbx->offset = (u8 *)mbx->reply_virt;
1127 
1128 	qed_add_tlv(p_hwfn, &mbx->offset, type, length);
1129 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1130 		    sizeof(struct channel_list_end_tlv));
1131 
1132 	qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1133 }
1134 
1135 struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
1136 						      u16 relative_vf_id,
1137 						      bool b_enabled_only)
1138 {
1139 	struct qed_vf_info *vf = NULL;
1140 
1141 	vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1142 	if (!vf)
1143 		return NULL;
1144 
1145 	return &vf->p_vf_info;
1146 }
1147 
1148 void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
1149 {
1150 	struct qed_public_vf_info *vf_info;
1151 
1152 	vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1153 
1154 	if (!vf_info)
1155 		return;
1156 
1157 	/* Clear the VF mac */
1158 	memset(vf_info->mac, 0, ETH_ALEN);
1159 }
1160 
1161 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1162 			       struct qed_vf_info *p_vf)
1163 {
1164 	u32 i;
1165 
1166 	p_vf->vf_bulletin = 0;
1167 	p_vf->vport_instance = 0;
1168 	p_vf->configured_features = 0;
1169 
1170 	/* If VF previously requested less resources, go back to default */
1171 	p_vf->num_rxqs = p_vf->num_sbs;
1172 	p_vf->num_txqs = p_vf->num_sbs;
1173 
1174 	p_vf->num_active_rxqs = 0;
1175 
1176 	for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
1177 		p_vf->vf_queues[i].rxq_active = 0;
1178 
1179 	memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1180 	memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1181 	qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1182 }
1183 
1184 static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1185 				      struct qed_ptt *p_ptt,
1186 				      struct qed_vf_info *p_vf,
1187 				      struct vf_pf_resc_request *p_req,
1188 				      struct pf_vf_resc *p_resp)
1189 {
1190 	int i;
1191 
1192 	/* Queue related information */
1193 	p_resp->num_rxqs = p_vf->num_rxqs;
1194 	p_resp->num_txqs = p_vf->num_txqs;
1195 	p_resp->num_sbs = p_vf->num_sbs;
1196 
1197 	for (i = 0; i < p_resp->num_sbs; i++) {
1198 		p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1199 		p_resp->hw_sbs[i].sb_qid = 0;
1200 	}
1201 
1202 	/* These fields are filled for backward compatibility.
1203 	 * Unused by modern vfs.
1204 	 */
1205 	for (i = 0; i < p_resp->num_rxqs; i++) {
1206 		qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1207 				(u16 *)&p_resp->hw_qid[i]);
1208 		p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
1209 	}
1210 
1211 	/* Filter related information */
1212 	p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1213 					p_req->num_mac_filters);
1214 	p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1215 					 p_req->num_vlan_filters);
1216 
1217 	/* This isn't really needed/enforced, but some legacy VFs might depend
1218 	 * on the correct filling of this field.
1219 	 */
1220 	p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1221 
1222 	/* Validate sufficient resources for VF */
1223 	if (p_resp->num_rxqs < p_req->num_rxqs ||
1224 	    p_resp->num_txqs < p_req->num_txqs ||
1225 	    p_resp->num_sbs < p_req->num_sbs ||
1226 	    p_resp->num_mac_filters < p_req->num_mac_filters ||
1227 	    p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1228 	    p_resp->num_mc_filters < p_req->num_mc_filters) {
1229 		DP_VERBOSE(p_hwfn,
1230 			   QED_MSG_IOV,
1231 			   "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
1232 			   p_vf->abs_vf_id,
1233 			   p_req->num_rxqs,
1234 			   p_resp->num_rxqs,
1235 			   p_req->num_rxqs,
1236 			   p_resp->num_txqs,
1237 			   p_req->num_sbs,
1238 			   p_resp->num_sbs,
1239 			   p_req->num_mac_filters,
1240 			   p_resp->num_mac_filters,
1241 			   p_req->num_vlan_filters,
1242 			   p_resp->num_vlan_filters,
1243 			   p_req->num_mc_filters, p_resp->num_mc_filters);
1244 		return PFVF_STATUS_NO_RESOURCE;
1245 	}
1246 
1247 	return PFVF_STATUS_SUCCESS;
1248 }
1249 
1250 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1251 					 struct pfvf_stats_info *p_stats)
1252 {
1253 	p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1254 				  offsetof(struct mstorm_vf_zone,
1255 					   non_trigger.eth_queue_stat);
1256 	p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1257 	p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1258 				  offsetof(struct ustorm_vf_zone,
1259 					   non_trigger.eth_queue_stat);
1260 	p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1261 	p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1262 				  offsetof(struct pstorm_vf_zone,
1263 					   non_trigger.eth_queue_stat);
1264 	p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1265 	p_stats->tstats.address = 0;
1266 	p_stats->tstats.len = 0;
1267 }
1268 
1269 static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1270 				   struct qed_ptt *p_ptt,
1271 				   struct qed_vf_info *vf)
1272 {
1273 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1274 	struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1275 	struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1276 	struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1277 	u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1278 	struct pf_vf_resc *resc = &resp->resc;
1279 	int rc;
1280 
1281 	memset(resp, 0, sizeof(*resp));
1282 
1283 	/* Validate FW compatibility */
1284 	if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1285 		DP_INFO(p_hwfn,
1286 			"VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
1287 			vf->abs_vf_id,
1288 			req->vfdev_info.eth_fp_hsi_major,
1289 			req->vfdev_info.eth_fp_hsi_minor,
1290 			ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1291 
1292 		/* Write the PF version so that VF would know which version
1293 		 * is supported.
1294 		 */
1295 		pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1296 		pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1297 
1298 		goto out;
1299 	}
1300 
1301 	/* On 100g PFs, prevent old VFs from loading */
1302 	if ((p_hwfn->cdev->num_hwfns > 1) &&
1303 	    !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1304 		DP_INFO(p_hwfn,
1305 			"VF[%d] is running an old driver that doesn't support 100g\n",
1306 			vf->abs_vf_id);
1307 		goto out;
1308 	}
1309 
1310 	/* Store the acquire message */
1311 	memcpy(&vf->acquire, req, sizeof(vf->acquire));
1312 
1313 	vf->opaque_fid = req->vfdev_info.opaque_fid;
1314 
1315 	vf->vf_bulletin = req->bulletin_addr;
1316 	vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1317 			    vf->bulletin.size : req->bulletin_size;
1318 
1319 	/* fill in pfdev info */
1320 	pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1321 	pfdev_info->db_size = 0;
1322 	pfdev_info->indices_per_sb = PIS_PER_SB;
1323 
1324 	pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1325 				   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1326 	if (p_hwfn->cdev->num_hwfns > 1)
1327 		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1328 
1329 	qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1330 
1331 	memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1332 
1333 	pfdev_info->fw_major = FW_MAJOR_VERSION;
1334 	pfdev_info->fw_minor = FW_MINOR_VERSION;
1335 	pfdev_info->fw_rev = FW_REVISION_VERSION;
1336 	pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1337 	pfdev_info->minor_fp_hsi = min_t(u8,
1338 					 ETH_HSI_VER_MINOR,
1339 					 req->vfdev_info.eth_fp_hsi_minor);
1340 	pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1341 	qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1342 
1343 	pfdev_info->dev_type = p_hwfn->cdev->type;
1344 	pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1345 
1346 	/* Fill resources available to VF; Make sure there are enough to
1347 	 * satisfy the VF's request.
1348 	 */
1349 	vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1350 						  &req->resc_request, resc);
1351 	if (vfpf_status != PFVF_STATUS_SUCCESS)
1352 		goto out;
1353 
1354 	/* Start the VF in FW */
1355 	rc = qed_sp_vf_start(p_hwfn, vf);
1356 	if (rc) {
1357 		DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1358 		vfpf_status = PFVF_STATUS_FAILURE;
1359 		goto out;
1360 	}
1361 
1362 	/* Fill agreed size of bulletin board in response */
1363 	resp->bulletin_size = vf->bulletin.size;
1364 	qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1365 
1366 	DP_VERBOSE(p_hwfn,
1367 		   QED_MSG_IOV,
1368 		   "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1369 		   "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1370 		   vf->abs_vf_id,
1371 		   resp->pfdev_info.chip_num,
1372 		   resp->pfdev_info.db_size,
1373 		   resp->pfdev_info.indices_per_sb,
1374 		   resp->pfdev_info.capabilities,
1375 		   resc->num_rxqs,
1376 		   resc->num_txqs,
1377 		   resc->num_sbs,
1378 		   resc->num_mac_filters,
1379 		   resc->num_vlan_filters);
1380 	vf->state = VF_ACQUIRED;
1381 
1382 	/* Prepare Response */
1383 out:
1384 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1385 			     sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
1386 }
1387 
1388 static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
1389 				  struct qed_vf_info *p_vf, bool val)
1390 {
1391 	struct qed_sp_vport_update_params params;
1392 	int rc;
1393 
1394 	if (val == p_vf->spoof_chk) {
1395 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1396 			   "Spoofchk value[%d] is already configured\n", val);
1397 		return 0;
1398 	}
1399 
1400 	memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
1401 	params.opaque_fid = p_vf->opaque_fid;
1402 	params.vport_id = p_vf->vport_id;
1403 	params.update_anti_spoofing_en_flg = 1;
1404 	params.anti_spoofing_en = val;
1405 
1406 	rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
1407 	if (!rc) {
1408 		p_vf->spoof_chk = val;
1409 		p_vf->req_spoofchk_val = p_vf->spoof_chk;
1410 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1411 			   "Spoofchk val[%d] configured\n", val);
1412 	} else {
1413 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1414 			   "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1415 			   val, p_vf->relative_vf_id);
1416 	}
1417 
1418 	return rc;
1419 }
1420 
1421 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
1422 					    struct qed_vf_info *p_vf)
1423 {
1424 	struct qed_filter_ucast filter;
1425 	int rc = 0;
1426 	int i;
1427 
1428 	memset(&filter, 0, sizeof(filter));
1429 	filter.is_rx_filter = 1;
1430 	filter.is_tx_filter = 1;
1431 	filter.vport_to_add_to = p_vf->vport_id;
1432 	filter.opcode = QED_FILTER_ADD;
1433 
1434 	/* Reconfigure vlans */
1435 	for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1436 		if (!p_vf->shadow_config.vlans[i].used)
1437 			continue;
1438 
1439 		filter.type = QED_FILTER_VLAN;
1440 		filter.vlan = p_vf->shadow_config.vlans[i].vid;
1441 		DP_VERBOSE(p_hwfn,
1442 			   QED_MSG_IOV,
1443 			   "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1444 			   filter.vlan, p_vf->relative_vf_id);
1445 		rc = qed_sp_eth_filter_ucast(p_hwfn,
1446 					     p_vf->opaque_fid,
1447 					     &filter,
1448 					     QED_SPQ_MODE_CB, NULL);
1449 		if (rc) {
1450 			DP_NOTICE(p_hwfn,
1451 				  "Failed to configure VLAN [%04x] to VF [%04x]\n",
1452 				  filter.vlan, p_vf->relative_vf_id);
1453 			break;
1454 		}
1455 	}
1456 
1457 	return rc;
1458 }
1459 
1460 static int
1461 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
1462 				   struct qed_vf_info *p_vf, u64 events)
1463 {
1464 	int rc = 0;
1465 
1466 	if ((events & (1 << VLAN_ADDR_FORCED)) &&
1467 	    !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1468 		rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1469 
1470 	return rc;
1471 }
1472 
1473 static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
1474 					  struct qed_vf_info *p_vf, u64 events)
1475 {
1476 	int rc = 0;
1477 	struct qed_filter_ucast filter;
1478 
1479 	if (!p_vf->vport_instance)
1480 		return -EINVAL;
1481 
1482 	if (events & (1 << MAC_ADDR_FORCED)) {
1483 		/* Since there's no way [currently] of removing the MAC,
1484 		 * we can always assume this means we need to force it.
1485 		 */
1486 		memset(&filter, 0, sizeof(filter));
1487 		filter.type = QED_FILTER_MAC;
1488 		filter.opcode = QED_FILTER_REPLACE;
1489 		filter.is_rx_filter = 1;
1490 		filter.is_tx_filter = 1;
1491 		filter.vport_to_add_to = p_vf->vport_id;
1492 		ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1493 
1494 		rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1495 					     &filter, QED_SPQ_MODE_CB, NULL);
1496 		if (rc) {
1497 			DP_NOTICE(p_hwfn,
1498 				  "PF failed to configure MAC for VF\n");
1499 			return rc;
1500 		}
1501 
1502 		p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1503 	}
1504 
1505 	if (events & (1 << VLAN_ADDR_FORCED)) {
1506 		struct qed_sp_vport_update_params vport_update;
1507 		u8 removal;
1508 		int i;
1509 
1510 		memset(&filter, 0, sizeof(filter));
1511 		filter.type = QED_FILTER_VLAN;
1512 		filter.is_rx_filter = 1;
1513 		filter.is_tx_filter = 1;
1514 		filter.vport_to_add_to = p_vf->vport_id;
1515 		filter.vlan = p_vf->bulletin.p_virt->pvid;
1516 		filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
1517 					      QED_FILTER_FLUSH;
1518 
1519 		/* Send the ramrod */
1520 		rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1521 					     &filter, QED_SPQ_MODE_CB, NULL);
1522 		if (rc) {
1523 			DP_NOTICE(p_hwfn,
1524 				  "PF failed to configure VLAN for VF\n");
1525 			return rc;
1526 		}
1527 
1528 		/* Update the default-vlan & silent vlan stripping */
1529 		memset(&vport_update, 0, sizeof(vport_update));
1530 		vport_update.opaque_fid = p_vf->opaque_fid;
1531 		vport_update.vport_id = p_vf->vport_id;
1532 		vport_update.update_default_vlan_enable_flg = 1;
1533 		vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1534 		vport_update.update_default_vlan_flg = 1;
1535 		vport_update.default_vlan = filter.vlan;
1536 
1537 		vport_update.update_inner_vlan_removal_flg = 1;
1538 		removal = filter.vlan ? 1
1539 				      : p_vf->shadow_config.inner_vlan_removal;
1540 		vport_update.inner_vlan_removal_flg = removal;
1541 		vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1542 		rc = qed_sp_vport_update(p_hwfn,
1543 					 &vport_update,
1544 					 QED_SPQ_MODE_EBLOCK, NULL);
1545 		if (rc) {
1546 			DP_NOTICE(p_hwfn,
1547 				  "PF failed to configure VF vport for vlan\n");
1548 			return rc;
1549 		}
1550 
1551 		/* Update all the Rx queues */
1552 		for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1553 			u16 qid;
1554 
1555 			if (!p_vf->vf_queues[i].rxq_active)
1556 				continue;
1557 
1558 			qid = p_vf->vf_queues[i].fw_rx_qid;
1559 
1560 			rc = qed_sp_eth_rx_queues_update(p_hwfn, qid,
1561 							 1, 0, 1,
1562 							 QED_SPQ_MODE_EBLOCK,
1563 							 NULL);
1564 			if (rc) {
1565 				DP_NOTICE(p_hwfn,
1566 					  "Failed to send Rx update fo queue[0x%04x]\n",
1567 					  qid);
1568 				return rc;
1569 			}
1570 		}
1571 
1572 		if (filter.vlan)
1573 			p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1574 		else
1575 			p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
1576 	}
1577 
1578 	/* If forced features are terminated, we need to configure the shadow
1579 	 * configuration back again.
1580 	 */
1581 	if (events)
1582 		qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1583 
1584 	return rc;
1585 }
1586 
1587 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1588 				       struct qed_ptt *p_ptt,
1589 				       struct qed_vf_info *vf)
1590 {
1591 	struct qed_sp_vport_start_params params = { 0 };
1592 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1593 	struct vfpf_vport_start_tlv *start;
1594 	u8 status = PFVF_STATUS_SUCCESS;
1595 	struct qed_vf_info *vf_info;
1596 	u64 *p_bitmap;
1597 	int sb_id;
1598 	int rc;
1599 
1600 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1601 	if (!vf_info) {
1602 		DP_NOTICE(p_hwfn->cdev,
1603 			  "Failed to get VF info, invalid vfid [%d]\n",
1604 			  vf->relative_vf_id);
1605 		return;
1606 	}
1607 
1608 	vf->state = VF_ENABLED;
1609 	start = &mbx->req_virt->start_vport;
1610 
1611 	/* Initialize Status block in CAU */
1612 	for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1613 		if (!start->sb_addr[sb_id]) {
1614 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1615 				   "VF[%d] did not fill the address of SB %d\n",
1616 				   vf->relative_vf_id, sb_id);
1617 			break;
1618 		}
1619 
1620 		qed_int_cau_conf_sb(p_hwfn, p_ptt,
1621 				    start->sb_addr[sb_id],
1622 				    vf->igu_sbs[sb_id],
1623 				    vf->abs_vf_id, 1);
1624 	}
1625 	qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1626 
1627 	vf->mtu = start->mtu;
1628 	vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1629 
1630 	/* Take into consideration configuration forced by hypervisor;
1631 	 * If none is configured, use the supplied VF values [for old
1632 	 * vfs that would still be fine, since they passed '0' as padding].
1633 	 */
1634 	p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1635 	if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1636 		u8 vf_req = start->only_untagged;
1637 
1638 		vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1639 		*p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1640 	}
1641 
1642 	params.tpa_mode = start->tpa_mode;
1643 	params.remove_inner_vlan = start->inner_vlan_removal;
1644 	params.tx_switching = true;
1645 
1646 	params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1647 	params.drop_ttl0 = false;
1648 	params.concrete_fid = vf->concrete_fid;
1649 	params.opaque_fid = vf->opaque_fid;
1650 	params.vport_id = vf->vport_id;
1651 	params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1652 	params.mtu = vf->mtu;
1653 
1654 	rc = qed_sp_eth_vport_start(p_hwfn, &params);
1655 	if (rc != 0) {
1656 		DP_ERR(p_hwfn,
1657 		       "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1658 		status = PFVF_STATUS_FAILURE;
1659 	} else {
1660 		vf->vport_instance++;
1661 
1662 		/* Force configuration if needed on the newly opened vport */
1663 		qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
1664 
1665 		__qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1666 	}
1667 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1668 			     sizeof(struct pfvf_def_resp_tlv), status);
1669 }
1670 
1671 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1672 				      struct qed_ptt *p_ptt,
1673 				      struct qed_vf_info *vf)
1674 {
1675 	u8 status = PFVF_STATUS_SUCCESS;
1676 	int rc;
1677 
1678 	vf->vport_instance--;
1679 	vf->spoof_chk = false;
1680 
1681 	rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
1682 	if (rc != 0) {
1683 		DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1684 		       rc);
1685 		status = PFVF_STATUS_FAILURE;
1686 	}
1687 
1688 	/* Forget the configuration on the vport */
1689 	vf->configured_features = 0;
1690 	memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
1691 
1692 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
1693 			     sizeof(struct pfvf_def_resp_tlv), status);
1694 }
1695 
1696 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
1697 					  struct qed_ptt *p_ptt,
1698 					  struct qed_vf_info *vf, u8 status)
1699 {
1700 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1701 	struct pfvf_start_queue_resp_tlv *p_tlv;
1702 	struct vfpf_start_rxq_tlv *req;
1703 
1704 	mbx->offset = (u8 *)mbx->reply_virt;
1705 
1706 	p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
1707 			    sizeof(*p_tlv));
1708 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1709 		    sizeof(struct channel_list_end_tlv));
1710 
1711 	/* Update the TLV with the response */
1712 	if (status == PFVF_STATUS_SUCCESS) {
1713 		req = &mbx->req_virt->start_rxq;
1714 		p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
1715 				offsetof(struct mstorm_vf_zone,
1716 					 non_trigger.eth_rx_queue_producers) +
1717 				sizeof(struct eth_rx_prod_data) * req->rx_qid;
1718 	}
1719 
1720 	qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
1721 }
1722 
1723 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
1724 				     struct qed_ptt *p_ptt,
1725 				     struct qed_vf_info *vf)
1726 {
1727 	struct qed_queue_start_common_params params;
1728 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1729 	u8 status = PFVF_STATUS_NO_RESOURCE;
1730 	struct vfpf_start_rxq_tlv *req;
1731 	int rc;
1732 
1733 	memset(&params, 0, sizeof(params));
1734 	req = &mbx->req_virt->start_rxq;
1735 
1736 	if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
1737 	    !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
1738 		goto out;
1739 
1740 	params.queue_id =  vf->vf_queues[req->rx_qid].fw_rx_qid;
1741 	params.vf_qid = req->rx_qid;
1742 	params.vport_id = vf->vport_id;
1743 	params.sb = req->hw_sb;
1744 	params.sb_idx = req->sb_index;
1745 
1746 	rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
1747 					 vf->vf_queues[req->rx_qid].fw_cid,
1748 					 &params,
1749 					 vf->abs_vf_id + 0x10,
1750 					 req->bd_max_bytes,
1751 					 req->rxq_addr,
1752 					 req->cqe_pbl_addr, req->cqe_pbl_size);
1753 
1754 	if (rc) {
1755 		status = PFVF_STATUS_FAILURE;
1756 	} else {
1757 		status = PFVF_STATUS_SUCCESS;
1758 		vf->vf_queues[req->rx_qid].rxq_active = true;
1759 		vf->num_active_rxqs++;
1760 	}
1761 
1762 out:
1763 	qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
1764 }
1765 
1766 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
1767 					  struct qed_ptt *p_ptt,
1768 					  struct qed_vf_info *p_vf, u8 status)
1769 {
1770 	struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1771 	struct pfvf_start_queue_resp_tlv *p_tlv;
1772 
1773 	mbx->offset = (u8 *)mbx->reply_virt;
1774 
1775 	p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
1776 			    sizeof(*p_tlv));
1777 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1778 		    sizeof(struct channel_list_end_tlv));
1779 
1780 	/* Update the TLV with the response */
1781 	if (status == PFVF_STATUS_SUCCESS) {
1782 		u16 qid = mbx->req_virt->start_txq.tx_qid;
1783 
1784 		p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
1785 					    DQ_DEMS_LEGACY);
1786 	}
1787 
1788 	qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status);
1789 }
1790 
1791 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
1792 				     struct qed_ptt *p_ptt,
1793 				     struct qed_vf_info *vf)
1794 {
1795 	struct qed_queue_start_common_params params;
1796 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1797 	u8 status = PFVF_STATUS_NO_RESOURCE;
1798 	union qed_qm_pq_params pq_params;
1799 	struct vfpf_start_txq_tlv *req;
1800 	int rc;
1801 
1802 	/* Prepare the parameters which would choose the right PQ */
1803 	memset(&pq_params, 0, sizeof(pq_params));
1804 	pq_params.eth.is_vf = 1;
1805 	pq_params.eth.vf_id = vf->relative_vf_id;
1806 
1807 	memset(&params, 0, sizeof(params));
1808 	req = &mbx->req_virt->start_txq;
1809 
1810 	if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
1811 	    !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
1812 		goto out;
1813 
1814 	params.queue_id =  vf->vf_queues[req->tx_qid].fw_tx_qid;
1815 	params.vport_id = vf->vport_id;
1816 	params.sb = req->hw_sb;
1817 	params.sb_idx = req->sb_index;
1818 
1819 	rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
1820 					 vf->opaque_fid,
1821 					 vf->vf_queues[req->tx_qid].fw_cid,
1822 					 &params,
1823 					 vf->abs_vf_id + 0x10,
1824 					 req->pbl_addr,
1825 					 req->pbl_size, &pq_params);
1826 
1827 	if (rc) {
1828 		status = PFVF_STATUS_FAILURE;
1829 	} else {
1830 		status = PFVF_STATUS_SUCCESS;
1831 		vf->vf_queues[req->tx_qid].txq_active = true;
1832 	}
1833 
1834 out:
1835 	qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
1836 }
1837 
1838 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
1839 				struct qed_vf_info *vf,
1840 				u16 rxq_id, u8 num_rxqs, bool cqe_completion)
1841 {
1842 	int rc = 0;
1843 	int qid;
1844 
1845 	if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
1846 		return -EINVAL;
1847 
1848 	for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
1849 		if (vf->vf_queues[qid].rxq_active) {
1850 			rc = qed_sp_eth_rx_queue_stop(p_hwfn,
1851 						      vf->vf_queues[qid].
1852 						      fw_rx_qid, false,
1853 						      cqe_completion);
1854 
1855 			if (rc)
1856 				return rc;
1857 		}
1858 		vf->vf_queues[qid].rxq_active = false;
1859 		vf->num_active_rxqs--;
1860 	}
1861 
1862 	return rc;
1863 }
1864 
1865 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
1866 				struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
1867 {
1868 	int rc = 0;
1869 	int qid;
1870 
1871 	if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
1872 		return -EINVAL;
1873 
1874 	for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
1875 		if (vf->vf_queues[qid].txq_active) {
1876 			rc = qed_sp_eth_tx_queue_stop(p_hwfn,
1877 						      vf->vf_queues[qid].
1878 						      fw_tx_qid);
1879 
1880 			if (rc)
1881 				return rc;
1882 		}
1883 		vf->vf_queues[qid].txq_active = false;
1884 	}
1885 	return rc;
1886 }
1887 
1888 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
1889 				     struct qed_ptt *p_ptt,
1890 				     struct qed_vf_info *vf)
1891 {
1892 	u16 length = sizeof(struct pfvf_def_resp_tlv);
1893 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1894 	u8 status = PFVF_STATUS_SUCCESS;
1895 	struct vfpf_stop_rxqs_tlv *req;
1896 	int rc;
1897 
1898 	/* We give the option of starting from qid != 0, in this case we
1899 	 * need to make sure that qid + num_qs doesn't exceed the actual
1900 	 * amount of queues that exist.
1901 	 */
1902 	req = &mbx->req_virt->stop_rxqs;
1903 	rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
1904 				  req->num_rxqs, req->cqe_completion);
1905 	if (rc)
1906 		status = PFVF_STATUS_FAILURE;
1907 
1908 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
1909 			     length, status);
1910 }
1911 
1912 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
1913 				     struct qed_ptt *p_ptt,
1914 				     struct qed_vf_info *vf)
1915 {
1916 	u16 length = sizeof(struct pfvf_def_resp_tlv);
1917 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1918 	u8 status = PFVF_STATUS_SUCCESS;
1919 	struct vfpf_stop_txqs_tlv *req;
1920 	int rc;
1921 
1922 	/* We give the option of starting from qid != 0, in this case we
1923 	 * need to make sure that qid + num_qs doesn't exceed the actual
1924 	 * amount of queues that exist.
1925 	 */
1926 	req = &mbx->req_virt->stop_txqs;
1927 	rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
1928 	if (rc)
1929 		status = PFVF_STATUS_FAILURE;
1930 
1931 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
1932 			     length, status);
1933 }
1934 
1935 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
1936 				       struct qed_ptt *p_ptt,
1937 				       struct qed_vf_info *vf)
1938 {
1939 	u16 length = sizeof(struct pfvf_def_resp_tlv);
1940 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1941 	struct vfpf_update_rxq_tlv *req;
1942 	u8 status = PFVF_STATUS_SUCCESS;
1943 	u8 complete_event_flg;
1944 	u8 complete_cqe_flg;
1945 	u16 qid;
1946 	int rc;
1947 	u8 i;
1948 
1949 	req = &mbx->req_virt->update_rxq;
1950 	complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
1951 	complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
1952 
1953 	for (i = 0; i < req->num_rxqs; i++) {
1954 		qid = req->rx_qid + i;
1955 
1956 		if (!vf->vf_queues[qid].rxq_active) {
1957 			DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n",
1958 				  qid);
1959 			status = PFVF_STATUS_FAILURE;
1960 			break;
1961 		}
1962 
1963 		rc = qed_sp_eth_rx_queues_update(p_hwfn,
1964 						 vf->vf_queues[qid].fw_rx_qid,
1965 						 1,
1966 						 complete_cqe_flg,
1967 						 complete_event_flg,
1968 						 QED_SPQ_MODE_EBLOCK, NULL);
1969 
1970 		if (rc) {
1971 			status = PFVF_STATUS_FAILURE;
1972 			break;
1973 		}
1974 	}
1975 
1976 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
1977 			     length, status);
1978 }
1979 
1980 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
1981 			       void *p_tlvs_list, u16 req_type)
1982 {
1983 	struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
1984 	int len = 0;
1985 
1986 	do {
1987 		if (!p_tlv->length) {
1988 			DP_NOTICE(p_hwfn, "Zero length TLV found\n");
1989 			return NULL;
1990 		}
1991 
1992 		if (p_tlv->type == req_type) {
1993 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1994 				   "Extended tlv type %d, length %d found\n",
1995 				   p_tlv->type, p_tlv->length);
1996 			return p_tlv;
1997 		}
1998 
1999 		len += p_tlv->length;
2000 		p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2001 
2002 		if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2003 			DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2004 			return NULL;
2005 		}
2006 	} while (p_tlv->type != CHANNEL_TLV_LIST_END);
2007 
2008 	return NULL;
2009 }
2010 
2011 static void
2012 qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2013 			    struct qed_sp_vport_update_params *p_data,
2014 			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2015 {
2016 	struct vfpf_vport_update_activate_tlv *p_act_tlv;
2017 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2018 
2019 	p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2020 		    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2021 	if (!p_act_tlv)
2022 		return;
2023 
2024 	p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2025 	p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2026 	p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2027 	p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2028 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2029 }
2030 
2031 static void
2032 qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
2033 			     struct qed_sp_vport_update_params *p_data,
2034 			     struct qed_vf_info *p_vf,
2035 			     struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2036 {
2037 	struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2038 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2039 
2040 	p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2041 		     qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2042 	if (!p_vlan_tlv)
2043 		return;
2044 
2045 	p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2046 
2047 	/* Ignore the VF request if we're forcing a vlan */
2048 	if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2049 		p_data->update_inner_vlan_removal_flg = 1;
2050 		p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2051 	}
2052 
2053 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
2054 }
2055 
2056 static void
2057 qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
2058 			    struct qed_sp_vport_update_params *p_data,
2059 			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2060 {
2061 	struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2062 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2063 
2064 	p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2065 			  qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2066 						   tlv);
2067 	if (!p_tx_switch_tlv)
2068 		return;
2069 
2070 	p_data->update_tx_switching_flg = 1;
2071 	p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2072 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
2073 }
2074 
2075 static void
2076 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2077 				  struct qed_sp_vport_update_params *p_data,
2078 				  struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2079 {
2080 	struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2081 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2082 
2083 	p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2084 	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2085 	if (!p_mcast_tlv)
2086 		return;
2087 
2088 	p_data->update_approx_mcast_flg = 1;
2089 	memcpy(p_data->bins, p_mcast_tlv->bins,
2090 	       sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2091 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2092 }
2093 
2094 static void
2095 qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2096 			      struct qed_sp_vport_update_params *p_data,
2097 			      struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2098 {
2099 	struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2100 	struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2101 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2102 
2103 	p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2104 	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2105 	if (!p_accept_tlv)
2106 		return;
2107 
2108 	p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2109 	p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2110 	p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2111 	p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2112 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2113 }
2114 
2115 static void
2116 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
2117 				  struct qed_sp_vport_update_params *p_data,
2118 				  struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2119 {
2120 	struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2121 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2122 
2123 	p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2124 			    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2125 						     tlv);
2126 	if (!p_accept_any_vlan)
2127 		return;
2128 
2129 	p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2130 	p_data->update_accept_any_vlan_flg =
2131 		    p_accept_any_vlan->update_accept_any_vlan_flg;
2132 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2133 }
2134 
2135 static void
2136 qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2137 			    struct qed_vf_info *vf,
2138 			    struct qed_sp_vport_update_params *p_data,
2139 			    struct qed_rss_params *p_rss,
2140 			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2141 {
2142 	struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2143 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2144 	u16 i, q_idx, max_q_idx;
2145 	u16 table_size;
2146 
2147 	p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2148 		    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2149 	if (!p_rss_tlv) {
2150 		p_data->rss_params = NULL;
2151 		return;
2152 	}
2153 
2154 	memset(p_rss, 0, sizeof(struct qed_rss_params));
2155 
2156 	p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2157 				      VFPF_UPDATE_RSS_CONFIG_FLAG);
2158 	p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2159 					    VFPF_UPDATE_RSS_CAPS_FLAG);
2160 	p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2161 					 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2162 	p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2163 				   VFPF_UPDATE_RSS_KEY_FLAG);
2164 
2165 	p_rss->rss_enable = p_rss_tlv->rss_enable;
2166 	p_rss->rss_eng_id = vf->relative_vf_id + 1;
2167 	p_rss->rss_caps = p_rss_tlv->rss_caps;
2168 	p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2169 	memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
2170 	       sizeof(p_rss->rss_ind_table));
2171 	memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2172 
2173 	table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2174 			   (1 << p_rss_tlv->rss_table_size_log));
2175 
2176 	max_q_idx = ARRAY_SIZE(vf->vf_queues);
2177 
2178 	for (i = 0; i < table_size; i++) {
2179 		u16 index = vf->vf_queues[0].fw_rx_qid;
2180 
2181 		q_idx = p_rss->rss_ind_table[i];
2182 		if (q_idx >= max_q_idx)
2183 			DP_NOTICE(p_hwfn,
2184 				  "rss_ind_table[%d] = %d, rxq is out of range\n",
2185 				  i, q_idx);
2186 		else if (!vf->vf_queues[q_idx].rxq_active)
2187 			DP_NOTICE(p_hwfn,
2188 				  "rss_ind_table[%d] = %d, rxq is not active\n",
2189 				  i, q_idx);
2190 		else
2191 			index = vf->vf_queues[q_idx].fw_rx_qid;
2192 		p_rss->rss_ind_table[i] = index;
2193 	}
2194 
2195 	p_data->rss_params = p_rss;
2196 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
2197 }
2198 
2199 static void
2200 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
2201 				struct qed_vf_info *vf,
2202 				struct qed_sp_vport_update_params *p_data,
2203 				struct qed_sge_tpa_params *p_sge_tpa,
2204 				struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2205 {
2206 	struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2207 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2208 
2209 	p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2210 	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2211 
2212 	if (!p_sge_tpa_tlv) {
2213 		p_data->sge_tpa_params = NULL;
2214 		return;
2215 	}
2216 
2217 	memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
2218 
2219 	p_sge_tpa->update_tpa_en_flg =
2220 	    !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2221 	p_sge_tpa->update_tpa_param_flg =
2222 	    !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2223 		VFPF_UPDATE_TPA_PARAM_FLAG);
2224 
2225 	p_sge_tpa->tpa_ipv4_en_flg =
2226 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2227 	p_sge_tpa->tpa_ipv6_en_flg =
2228 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2229 	p_sge_tpa->tpa_pkt_split_flg =
2230 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2231 	p_sge_tpa->tpa_hdr_data_split_flg =
2232 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2233 	p_sge_tpa->tpa_gro_consistent_flg =
2234 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2235 
2236 	p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2237 	p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2238 	p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2239 	p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2240 	p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2241 
2242 	p_data->sge_tpa_params = p_sge_tpa;
2243 
2244 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
2245 }
2246 
2247 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
2248 					struct qed_ptt *p_ptt,
2249 					struct qed_vf_info *vf)
2250 {
2251 	struct qed_sp_vport_update_params params;
2252 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2253 	struct qed_sge_tpa_params sge_tpa_params;
2254 	struct qed_rss_params rss_params;
2255 	u8 status = PFVF_STATUS_SUCCESS;
2256 	u16 tlvs_mask = 0;
2257 	u16 length;
2258 	int rc;
2259 
2260 	/* Valiate PF can send such a request */
2261 	if (!vf->vport_instance) {
2262 		DP_VERBOSE(p_hwfn,
2263 			   QED_MSG_IOV,
2264 			   "No VPORT instance available for VF[%d], failing vport update\n",
2265 			   vf->abs_vf_id);
2266 		status = PFVF_STATUS_FAILURE;
2267 		goto out;
2268 	}
2269 
2270 	memset(&params, 0, sizeof(params));
2271 	params.opaque_fid = vf->opaque_fid;
2272 	params.vport_id = vf->vport_id;
2273 	params.rss_params = NULL;
2274 
2275 	/* Search for extended tlvs list and update values
2276 	 * from VF in struct qed_sp_vport_update_params.
2277 	 */
2278 	qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
2279 	qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
2280 	qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
2281 	qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
2282 	qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
2283 	qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
2284 				    mbx, &tlvs_mask);
2285 	qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
2286 	qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
2287 					&sge_tpa_params, mbx, &tlvs_mask);
2288 
2289 	/* Just log a message if there is no single extended tlv in buffer.
2290 	 * When all features of vport update ramrod would be requested by VF
2291 	 * as extended TLVs in buffer then an error can be returned in response
2292 	 * if there is no extended TLV present in buffer.
2293 	 */
2294 	if (!tlvs_mask) {
2295 		DP_NOTICE(p_hwfn,
2296 			  "No feature tlvs found for vport update\n");
2297 		status = PFVF_STATUS_NOT_SUPPORTED;
2298 		goto out;
2299 	}
2300 
2301 	rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
2302 
2303 	if (rc)
2304 		status = PFVF_STATUS_FAILURE;
2305 
2306 out:
2307 	length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
2308 						  tlvs_mask, tlvs_mask);
2309 	qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2310 }
2311 
2312 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
2313 					 struct qed_vf_info *p_vf,
2314 					 struct qed_filter_ucast *p_params)
2315 {
2316 	int i;
2317 
2318 	/* First remove entries and then add new ones */
2319 	if (p_params->opcode == QED_FILTER_REMOVE) {
2320 		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2321 			if (p_vf->shadow_config.vlans[i].used &&
2322 			    p_vf->shadow_config.vlans[i].vid ==
2323 			    p_params->vlan) {
2324 				p_vf->shadow_config.vlans[i].used = false;
2325 				break;
2326 			}
2327 		if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
2328 			DP_VERBOSE(p_hwfn,
2329 				   QED_MSG_IOV,
2330 				   "VF [%d] - Tries to remove a non-existing vlan\n",
2331 				   p_vf->relative_vf_id);
2332 			return -EINVAL;
2333 		}
2334 	} else if (p_params->opcode == QED_FILTER_REPLACE ||
2335 		   p_params->opcode == QED_FILTER_FLUSH) {
2336 		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2337 			p_vf->shadow_config.vlans[i].used = false;
2338 	}
2339 
2340 	/* In forced mode, we're willing to remove entries - but we don't add
2341 	 * new ones.
2342 	 */
2343 	if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
2344 		return 0;
2345 
2346 	if (p_params->opcode == QED_FILTER_ADD ||
2347 	    p_params->opcode == QED_FILTER_REPLACE) {
2348 		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
2349 			if (p_vf->shadow_config.vlans[i].used)
2350 				continue;
2351 
2352 			p_vf->shadow_config.vlans[i].used = true;
2353 			p_vf->shadow_config.vlans[i].vid = p_params->vlan;
2354 			break;
2355 		}
2356 
2357 		if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
2358 			DP_VERBOSE(p_hwfn,
2359 				   QED_MSG_IOV,
2360 				   "VF [%d] - Tries to configure more than %d vlan filters\n",
2361 				   p_vf->relative_vf_id,
2362 				   QED_ETH_VF_NUM_VLAN_FILTERS + 1);
2363 			return -EINVAL;
2364 		}
2365 	}
2366 
2367 	return 0;
2368 }
2369 
2370 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
2371 					struct qed_vf_info *p_vf,
2372 					struct qed_filter_ucast *p_params)
2373 {
2374 	int i;
2375 
2376 	/* If we're in forced-mode, we don't allow any change */
2377 	if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
2378 		return 0;
2379 
2380 	/* First remove entries and then add new ones */
2381 	if (p_params->opcode == QED_FILTER_REMOVE) {
2382 		for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
2383 			if (ether_addr_equal(p_vf->shadow_config.macs[i],
2384 					     p_params->mac)) {
2385 				memset(p_vf->shadow_config.macs[i], 0,
2386 				       ETH_ALEN);
2387 				break;
2388 			}
2389 		}
2390 
2391 		if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
2392 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2393 				   "MAC isn't configured\n");
2394 			return -EINVAL;
2395 		}
2396 	} else if (p_params->opcode == QED_FILTER_REPLACE ||
2397 		   p_params->opcode == QED_FILTER_FLUSH) {
2398 		for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
2399 			memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
2400 	}
2401 
2402 	/* List the new MAC address */
2403 	if (p_params->opcode != QED_FILTER_ADD &&
2404 	    p_params->opcode != QED_FILTER_REPLACE)
2405 		return 0;
2406 
2407 	for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
2408 		if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
2409 			ether_addr_copy(p_vf->shadow_config.macs[i],
2410 					p_params->mac);
2411 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2412 				   "Added MAC at %d entry in shadow\n", i);
2413 			break;
2414 		}
2415 	}
2416 
2417 	if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
2418 		DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
2419 		return -EINVAL;
2420 	}
2421 
2422 	return 0;
2423 }
2424 
2425 static int
2426 qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
2427 				 struct qed_vf_info *p_vf,
2428 				 struct qed_filter_ucast *p_params)
2429 {
2430 	int rc = 0;
2431 
2432 	if (p_params->type == QED_FILTER_MAC) {
2433 		rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
2434 		if (rc)
2435 			return rc;
2436 	}
2437 
2438 	if (p_params->type == QED_FILTER_VLAN)
2439 		rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
2440 
2441 	return rc;
2442 }
2443 
2444 int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
2445 		      int vfid, struct qed_filter_ucast *params)
2446 {
2447 	struct qed_public_vf_info *vf;
2448 
2449 	vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
2450 	if (!vf)
2451 		return -EINVAL;
2452 
2453 	/* No real decision to make; Store the configured MAC */
2454 	if (params->type == QED_FILTER_MAC ||
2455 	    params->type == QED_FILTER_MAC_VLAN)
2456 		ether_addr_copy(vf->mac, params->mac);
2457 
2458 	return 0;
2459 }
2460 
2461 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
2462 					struct qed_ptt *p_ptt,
2463 					struct qed_vf_info *vf)
2464 {
2465 	struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
2466 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2467 	struct vfpf_ucast_filter_tlv *req;
2468 	u8 status = PFVF_STATUS_SUCCESS;
2469 	struct qed_filter_ucast params;
2470 	int rc;
2471 
2472 	/* Prepare the unicast filter params */
2473 	memset(&params, 0, sizeof(struct qed_filter_ucast));
2474 	req = &mbx->req_virt->ucast_filter;
2475 	params.opcode = (enum qed_filter_opcode)req->opcode;
2476 	params.type = (enum qed_filter_ucast_type)req->type;
2477 
2478 	params.is_rx_filter = 1;
2479 	params.is_tx_filter = 1;
2480 	params.vport_to_remove_from = vf->vport_id;
2481 	params.vport_to_add_to = vf->vport_id;
2482 	memcpy(params.mac, req->mac, ETH_ALEN);
2483 	params.vlan = req->vlan;
2484 
2485 	DP_VERBOSE(p_hwfn,
2486 		   QED_MSG_IOV,
2487 		   "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
2488 		   vf->abs_vf_id, params.opcode, params.type,
2489 		   params.is_rx_filter ? "RX" : "",
2490 		   params.is_tx_filter ? "TX" : "",
2491 		   params.vport_to_add_to,
2492 		   params.mac[0], params.mac[1],
2493 		   params.mac[2], params.mac[3],
2494 		   params.mac[4], params.mac[5], params.vlan);
2495 
2496 	if (!vf->vport_instance) {
2497 		DP_VERBOSE(p_hwfn,
2498 			   QED_MSG_IOV,
2499 			   "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
2500 			   vf->abs_vf_id);
2501 		status = PFVF_STATUS_FAILURE;
2502 		goto out;
2503 	}
2504 
2505 	/* Update shadow copy of the VF configuration */
2506 	if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
2507 		status = PFVF_STATUS_FAILURE;
2508 		goto out;
2509 	}
2510 
2511 	/* Determine if the unicast filtering is acceptible by PF */
2512 	if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
2513 	    (params.type == QED_FILTER_VLAN ||
2514 	     params.type == QED_FILTER_MAC_VLAN)) {
2515 		/* Once VLAN is forced or PVID is set, do not allow
2516 		 * to add/replace any further VLANs.
2517 		 */
2518 		if (params.opcode == QED_FILTER_ADD ||
2519 		    params.opcode == QED_FILTER_REPLACE)
2520 			status = PFVF_STATUS_FORCED;
2521 		goto out;
2522 	}
2523 
2524 	if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
2525 	    (params.type == QED_FILTER_MAC ||
2526 	     params.type == QED_FILTER_MAC_VLAN)) {
2527 		if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
2528 		    (params.opcode != QED_FILTER_ADD &&
2529 		     params.opcode != QED_FILTER_REPLACE))
2530 			status = PFVF_STATUS_FORCED;
2531 		goto out;
2532 	}
2533 
2534 	rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
2535 	if (rc) {
2536 		status = PFVF_STATUS_FAILURE;
2537 		goto out;
2538 	}
2539 
2540 	rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
2541 				     QED_SPQ_MODE_CB, NULL);
2542 	if (rc)
2543 		status = PFVF_STATUS_FAILURE;
2544 
2545 out:
2546 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
2547 			     sizeof(struct pfvf_def_resp_tlv), status);
2548 }
2549 
2550 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
2551 				       struct qed_ptt *p_ptt,
2552 				       struct qed_vf_info *vf)
2553 {
2554 	int i;
2555 
2556 	/* Reset the SBs */
2557 	for (i = 0; i < vf->num_sbs; i++)
2558 		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2559 						vf->igu_sbs[i],
2560 						vf->opaque_fid, false);
2561 
2562 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
2563 			     sizeof(struct pfvf_def_resp_tlv),
2564 			     PFVF_STATUS_SUCCESS);
2565 }
2566 
2567 static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
2568 				 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
2569 {
2570 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2571 	u8 status = PFVF_STATUS_SUCCESS;
2572 
2573 	/* Disable Interrupts for VF */
2574 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
2575 
2576 	/* Reset Permission table */
2577 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
2578 
2579 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
2580 			     length, status);
2581 }
2582 
2583 static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
2584 				   struct qed_ptt *p_ptt,
2585 				   struct qed_vf_info *p_vf)
2586 {
2587 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2588 	u8 status = PFVF_STATUS_SUCCESS;
2589 	int rc = 0;
2590 
2591 	qed_iov_vf_cleanup(p_hwfn, p_vf);
2592 
2593 	if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
2594 		/* Stopping the VF */
2595 		rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
2596 				    p_vf->opaque_fid);
2597 
2598 		if (rc) {
2599 			DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
2600 			       rc);
2601 			status = PFVF_STATUS_FAILURE;
2602 		}
2603 
2604 		p_vf->state = VF_STOPPED;
2605 	}
2606 
2607 	qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
2608 			     length, status);
2609 }
2610 
2611 static int
2612 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
2613 			 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
2614 {
2615 	int cnt;
2616 	u32 val;
2617 
2618 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
2619 
2620 	for (cnt = 0; cnt < 50; cnt++) {
2621 		val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
2622 		if (!val)
2623 			break;
2624 		msleep(20);
2625 	}
2626 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
2627 
2628 	if (cnt == 50) {
2629 		DP_ERR(p_hwfn,
2630 		       "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
2631 		       p_vf->abs_vf_id, val);
2632 		return -EBUSY;
2633 	}
2634 
2635 	return 0;
2636 }
2637 
2638 static int
2639 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
2640 			struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
2641 {
2642 	u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
2643 	int i, cnt;
2644 
2645 	/* Read initial consumers & producers */
2646 	for (i = 0; i < MAX_NUM_VOQS; i++) {
2647 		u32 prod;
2648 
2649 		cons[i] = qed_rd(p_hwfn, p_ptt,
2650 				 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
2651 				 i * 0x40);
2652 		prod = qed_rd(p_hwfn, p_ptt,
2653 			      PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
2654 			      i * 0x40);
2655 		distance[i] = prod - cons[i];
2656 	}
2657 
2658 	/* Wait for consumers to pass the producers */
2659 	i = 0;
2660 	for (cnt = 0; cnt < 50; cnt++) {
2661 		for (; i < MAX_NUM_VOQS; i++) {
2662 			u32 tmp;
2663 
2664 			tmp = qed_rd(p_hwfn, p_ptt,
2665 				     PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
2666 				     i * 0x40);
2667 			if (distance[i] > tmp - cons[i])
2668 				break;
2669 		}
2670 
2671 		if (i == MAX_NUM_VOQS)
2672 			break;
2673 
2674 		msleep(20);
2675 	}
2676 
2677 	if (cnt == 50) {
2678 		DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
2679 		       p_vf->abs_vf_id, i);
2680 		return -EBUSY;
2681 	}
2682 
2683 	return 0;
2684 }
2685 
2686 static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
2687 			       struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
2688 {
2689 	int rc;
2690 
2691 	rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
2692 	if (rc)
2693 		return rc;
2694 
2695 	rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
2696 	if (rc)
2697 		return rc;
2698 
2699 	return 0;
2700 }
2701 
2702 static int
2703 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
2704 			       struct qed_ptt *p_ptt,
2705 			       u16 rel_vf_id, u32 *ack_vfs)
2706 {
2707 	struct qed_vf_info *p_vf;
2708 	int rc = 0;
2709 
2710 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
2711 	if (!p_vf)
2712 		return 0;
2713 
2714 	if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
2715 	    (1ULL << (rel_vf_id % 64))) {
2716 		u16 vfid = p_vf->abs_vf_id;
2717 
2718 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2719 			   "VF[%d] - Handling FLR\n", vfid);
2720 
2721 		qed_iov_vf_cleanup(p_hwfn, p_vf);
2722 
2723 		/* If VF isn't active, no need for anything but SW */
2724 		if (!p_vf->b_init)
2725 			goto cleanup;
2726 
2727 		rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
2728 		if (rc)
2729 			goto cleanup;
2730 
2731 		rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
2732 		if (rc) {
2733 			DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
2734 			return rc;
2735 		}
2736 
2737 		/* VF_STOPPED has to be set only after final cleanup
2738 		 * but prior to re-enabling the VF.
2739 		 */
2740 		p_vf->state = VF_STOPPED;
2741 
2742 		rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
2743 		if (rc) {
2744 			DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
2745 			       vfid);
2746 			return rc;
2747 		}
2748 cleanup:
2749 		/* Mark VF for ack and clean pending state */
2750 		if (p_vf->state == VF_RESET)
2751 			p_vf->state = VF_STOPPED;
2752 		ack_vfs[vfid / 32] |= (1 << (vfid % 32));
2753 		p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
2754 		    ~(1ULL << (rel_vf_id % 64));
2755 		p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
2756 		    ~(1ULL << (rel_vf_id % 64));
2757 	}
2758 
2759 	return rc;
2760 }
2761 
2762 int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2763 {
2764 	u32 ack_vfs[VF_MAX_STATIC / 32];
2765 	int rc = 0;
2766 	u16 i;
2767 
2768 	memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
2769 
2770 	/* Since BRB <-> PRS interface can't be tested as part of the flr
2771 	 * polling due to HW limitations, simply sleep a bit. And since
2772 	 * there's no need to wait per-vf, do it before looping.
2773 	 */
2774 	msleep(100);
2775 
2776 	for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
2777 		qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
2778 
2779 	rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
2780 	return rc;
2781 }
2782 
2783 int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
2784 {
2785 	u16 i, found = 0;
2786 
2787 	DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
2788 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
2789 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2790 			   "[%08x,...,%08x]: %08x\n",
2791 			   i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
2792 
2793 	if (!p_hwfn->cdev->p_iov_info) {
2794 		DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
2795 		return 0;
2796 	}
2797 
2798 	/* Mark VFs */
2799 	for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
2800 		struct qed_vf_info *p_vf;
2801 		u8 vfid;
2802 
2803 		p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
2804 		if (!p_vf)
2805 			continue;
2806 
2807 		vfid = p_vf->abs_vf_id;
2808 		if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
2809 			u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
2810 			u16 rel_vf_id = p_vf->relative_vf_id;
2811 
2812 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2813 				   "VF[%d] [rel %d] got FLR-ed\n",
2814 				   vfid, rel_vf_id);
2815 
2816 			p_vf->state = VF_RESET;
2817 
2818 			/* No need to lock here, since pending_flr should
2819 			 * only change here and before ACKing MFw. Since
2820 			 * MFW will not trigger an additional attention for
2821 			 * VF flr until ACKs, we're safe.
2822 			 */
2823 			p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
2824 			found = 1;
2825 		}
2826 	}
2827 
2828 	return found;
2829 }
2830 
2831 static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
2832 			     u16 vfid,
2833 			     struct qed_mcp_link_params *p_params,
2834 			     struct qed_mcp_link_state *p_link,
2835 			     struct qed_mcp_link_capabilities *p_caps)
2836 {
2837 	struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
2838 						       vfid,
2839 						       false);
2840 	struct qed_bulletin_content *p_bulletin;
2841 
2842 	if (!p_vf)
2843 		return;
2844 
2845 	p_bulletin = p_vf->bulletin.p_virt;
2846 
2847 	if (p_params)
2848 		__qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
2849 	if (p_link)
2850 		__qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
2851 	if (p_caps)
2852 		__qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
2853 }
2854 
2855 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
2856 				    struct qed_ptt *p_ptt, int vfid)
2857 {
2858 	struct qed_iov_vf_mbx *mbx;
2859 	struct qed_vf_info *p_vf;
2860 
2861 	p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
2862 	if (!p_vf)
2863 		return;
2864 
2865 	mbx = &p_vf->vf_mbx;
2866 
2867 	/* qed_iov_process_mbx_request */
2868 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2869 		   "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
2870 
2871 	mbx->first_tlv = mbx->req_virt->first_tlv;
2872 
2873 	/* check if tlv type is known */
2874 	if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
2875 		switch (mbx->first_tlv.tl.type) {
2876 		case CHANNEL_TLV_ACQUIRE:
2877 			qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
2878 			break;
2879 		case CHANNEL_TLV_VPORT_START:
2880 			qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
2881 			break;
2882 		case CHANNEL_TLV_VPORT_TEARDOWN:
2883 			qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
2884 			break;
2885 		case CHANNEL_TLV_START_RXQ:
2886 			qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
2887 			break;
2888 		case CHANNEL_TLV_START_TXQ:
2889 			qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
2890 			break;
2891 		case CHANNEL_TLV_STOP_RXQS:
2892 			qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
2893 			break;
2894 		case CHANNEL_TLV_STOP_TXQS:
2895 			qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
2896 			break;
2897 		case CHANNEL_TLV_UPDATE_RXQ:
2898 			qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
2899 			break;
2900 		case CHANNEL_TLV_VPORT_UPDATE:
2901 			qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
2902 			break;
2903 		case CHANNEL_TLV_UCAST_FILTER:
2904 			qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
2905 			break;
2906 		case CHANNEL_TLV_CLOSE:
2907 			qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
2908 			break;
2909 		case CHANNEL_TLV_INT_CLEANUP:
2910 			qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
2911 			break;
2912 		case CHANNEL_TLV_RELEASE:
2913 			qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
2914 			break;
2915 		}
2916 	} else {
2917 		/* unknown TLV - this may belong to a VF driver from the future
2918 		 * - a version written after this PF driver was written, which
2919 		 * supports features unknown as of yet. Too bad since we don't
2920 		 * support them. Or this may be because someone wrote a crappy
2921 		 * VF driver and is sending garbage over the channel.
2922 		 */
2923 		DP_NOTICE(p_hwfn,
2924 			  "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
2925 			  p_vf->abs_vf_id,
2926 			  mbx->first_tlv.tl.type,
2927 			  mbx->first_tlv.tl.length,
2928 			  mbx->first_tlv.padding, mbx->first_tlv.reply_address);
2929 
2930 		/* Try replying in case reply address matches the acquisition's
2931 		 * posted address.
2932 		 */
2933 		if (p_vf->acquire.first_tlv.reply_address &&
2934 		    (mbx->first_tlv.reply_address ==
2935 		     p_vf->acquire.first_tlv.reply_address)) {
2936 			qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
2937 					     mbx->first_tlv.tl.type,
2938 					     sizeof(struct pfvf_def_resp_tlv),
2939 					     PFVF_STATUS_NOT_SUPPORTED);
2940 		} else {
2941 			DP_VERBOSE(p_hwfn,
2942 				   QED_MSG_IOV,
2943 				   "VF[%02x]: Can't respond to TLV - no valid reply address\n",
2944 				   p_vf->abs_vf_id);
2945 		}
2946 	}
2947 }
2948 
2949 void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
2950 {
2951 	u64 add_bit = 1ULL << (vfid % 64);
2952 
2953 	p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
2954 }
2955 
2956 static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
2957 						    u64 *events)
2958 {
2959 	u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
2960 
2961 	memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
2962 	memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
2963 }
2964 
2965 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
2966 			      u16 abs_vfid, struct regpair *vf_msg)
2967 {
2968 	u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
2969 	struct qed_vf_info *p_vf;
2970 
2971 	if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
2972 		DP_VERBOSE(p_hwfn,
2973 			   QED_MSG_IOV,
2974 			   "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
2975 			   abs_vfid);
2976 		return 0;
2977 	}
2978 	p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
2979 
2980 	/* List the physical address of the request so that handler
2981 	 * could later on copy the message from it.
2982 	 */
2983 	p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
2984 
2985 	/* Mark the event and schedule the workqueue */
2986 	qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
2987 	qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
2988 
2989 	return 0;
2990 }
2991 
2992 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
2993 			u8 opcode, __le16 echo, union event_ring_data *data)
2994 {
2995 	switch (opcode) {
2996 	case COMMON_EVENT_VF_PF_CHANNEL:
2997 		return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
2998 					  &data->vf_pf_channel.msg_addr);
2999 	default:
3000 		DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
3001 			opcode);
3002 		return -EINVAL;
3003 	}
3004 }
3005 
3006 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
3007 {
3008 	struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
3009 	u16 i;
3010 
3011 	if (!p_iov)
3012 		goto out;
3013 
3014 	for (i = rel_vf_id; i < p_iov->total_vfs; i++)
3015 		if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
3016 			return i;
3017 
3018 out:
3019 	return MAX_NUM_VFS;
3020 }
3021 
3022 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
3023 			       int vfid)
3024 {
3025 	struct qed_dmae_params params;
3026 	struct qed_vf_info *vf_info;
3027 
3028 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3029 	if (!vf_info)
3030 		return -EINVAL;
3031 
3032 	memset(&params, 0, sizeof(struct qed_dmae_params));
3033 	params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
3034 	params.src_vfid = vf_info->abs_vf_id;
3035 
3036 	if (qed_dmae_host2host(p_hwfn, ptt,
3037 			       vf_info->vf_mbx.pending_req,
3038 			       vf_info->vf_mbx.req_phys,
3039 			       sizeof(union vfpf_tlvs) / 4, &params)) {
3040 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3041 			   "Failed to copy message from VF 0x%02x\n", vfid);
3042 
3043 		return -EIO;
3044 	}
3045 
3046 	return 0;
3047 }
3048 
3049 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
3050 					    u8 *mac, int vfid)
3051 {
3052 	struct qed_vf_info *vf_info;
3053 	u64 feature;
3054 
3055 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3056 	if (!vf_info) {
3057 		DP_NOTICE(p_hwfn->cdev,
3058 			  "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3059 		return;
3060 	}
3061 
3062 	feature = 1 << MAC_ADDR_FORCED;
3063 	memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3064 
3065 	vf_info->bulletin.p_virt->valid_bitmap |= feature;
3066 	/* Forced MAC will disable MAC_ADDR */
3067 	vf_info->bulletin.p_virt->valid_bitmap &=
3068 				~(1 << VFPF_BULLETIN_MAC_ADDR);
3069 
3070 	qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3071 }
3072 
3073 void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
3074 				      u16 pvid, int vfid)
3075 {
3076 	struct qed_vf_info *vf_info;
3077 	u64 feature;
3078 
3079 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3080 	if (!vf_info) {
3081 		DP_NOTICE(p_hwfn->cdev,
3082 			  "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3083 		return;
3084 	}
3085 
3086 	feature = 1 << VLAN_ADDR_FORCED;
3087 	vf_info->bulletin.p_virt->pvid = pvid;
3088 	if (pvid)
3089 		vf_info->bulletin.p_virt->valid_bitmap |= feature;
3090 	else
3091 		vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
3092 
3093 	qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3094 }
3095 
3096 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
3097 {
3098 	struct qed_vf_info *p_vf_info;
3099 
3100 	p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3101 	if (!p_vf_info)
3102 		return false;
3103 
3104 	return !!p_vf_info->vport_instance;
3105 }
3106 
3107 bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
3108 {
3109 	struct qed_vf_info *p_vf_info;
3110 
3111 	p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3112 	if (!p_vf_info)
3113 		return true;
3114 
3115 	return p_vf_info->state == VF_STOPPED;
3116 }
3117 
3118 static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
3119 {
3120 	struct qed_vf_info *vf_info;
3121 
3122 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3123 	if (!vf_info)
3124 		return false;
3125 
3126 	return vf_info->spoof_chk;
3127 }
3128 
3129 int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
3130 {
3131 	struct qed_vf_info *vf;
3132 	int rc = -EINVAL;
3133 
3134 	if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3135 		DP_NOTICE(p_hwfn,
3136 			  "SR-IOV sanity check failed, can't set spoofchk\n");
3137 		goto out;
3138 	}
3139 
3140 	vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3141 	if (!vf)
3142 		goto out;
3143 
3144 	if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
3145 		/* After VF VPORT start PF will configure spoof check */
3146 		vf->req_spoofchk_val = val;
3147 		rc = 0;
3148 		goto out;
3149 	}
3150 
3151 	rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
3152 
3153 out:
3154 	return rc;
3155 }
3156 
3157 static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
3158 					   u16 rel_vf_id)
3159 {
3160 	struct qed_vf_info *p_vf;
3161 
3162 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3163 	if (!p_vf || !p_vf->bulletin.p_virt)
3164 		return NULL;
3165 
3166 	if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
3167 		return NULL;
3168 
3169 	return p_vf->bulletin.p_virt->mac;
3170 }
3171 
3172 u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
3173 {
3174 	struct qed_vf_info *p_vf;
3175 
3176 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3177 	if (!p_vf || !p_vf->bulletin.p_virt)
3178 		return 0;
3179 
3180 	if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
3181 		return 0;
3182 
3183 	return p_vf->bulletin.p_virt->pvid;
3184 }
3185 
3186 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
3187 				     struct qed_ptt *p_ptt, int vfid, int val)
3188 {
3189 	struct qed_vf_info *vf;
3190 	u8 abs_vp_id = 0;
3191 	int rc;
3192 
3193 	vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3194 	if (!vf)
3195 		return -EINVAL;
3196 
3197 	rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
3198 	if (rc)
3199 		return rc;
3200 
3201 	return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
3202 }
3203 
3204 int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
3205 {
3206 	struct qed_vf_info *vf;
3207 	u8 vport_id;
3208 	int i;
3209 
3210 	for_each_hwfn(cdev, i) {
3211 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3212 
3213 		if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3214 			DP_NOTICE(p_hwfn,
3215 				  "SR-IOV sanity check failed, can't set min rate\n");
3216 			return -EINVAL;
3217 		}
3218 	}
3219 
3220 	vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
3221 	vport_id = vf->vport_id;
3222 
3223 	return qed_configure_vport_wfq(cdev, vport_id, rate);
3224 }
3225 
3226 static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
3227 {
3228 	struct qed_wfq_data *vf_vp_wfq;
3229 	struct qed_vf_info *vf_info;
3230 
3231 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3232 	if (!vf_info)
3233 		return 0;
3234 
3235 	vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
3236 
3237 	if (vf_vp_wfq->configured)
3238 		return vf_vp_wfq->min_speed;
3239 	else
3240 		return 0;
3241 }
3242 
3243 /**
3244  * qed_schedule_iov - schedules IOV task for VF and PF
3245  * @hwfn: hardware function pointer
3246  * @flag: IOV flag for VF/PF
3247  */
3248 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
3249 {
3250 	smp_mb__before_atomic();
3251 	set_bit(flag, &hwfn->iov_task_flags);
3252 	smp_mb__after_atomic();
3253 	DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3254 	queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
3255 }
3256 
3257 void qed_vf_start_iov_wq(struct qed_dev *cdev)
3258 {
3259 	int i;
3260 
3261 	for_each_hwfn(cdev, i)
3262 	    queue_delayed_work(cdev->hwfns[i].iov_wq,
3263 			       &cdev->hwfns[i].iov_task, 0);
3264 }
3265 
3266 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
3267 {
3268 	int i, j;
3269 
3270 	for_each_hwfn(cdev, i)
3271 	    if (cdev->hwfns[i].iov_wq)
3272 		flush_workqueue(cdev->hwfns[i].iov_wq);
3273 
3274 	/* Mark VFs for disablement */
3275 	qed_iov_set_vfs_to_disable(cdev, true);
3276 
3277 	if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
3278 		pci_disable_sriov(cdev->pdev);
3279 
3280 	for_each_hwfn(cdev, i) {
3281 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
3282 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
3283 
3284 		/* Failure to acquire the ptt in 100g creates an odd error
3285 		 * where the first engine has already relased IOV.
3286 		 */
3287 		if (!ptt) {
3288 			DP_ERR(hwfn, "Failed to acquire ptt\n");
3289 			return -EBUSY;
3290 		}
3291 
3292 		/* Clean WFQ db and configure equal weight for all vports */
3293 		qed_clean_wfq_db(hwfn, ptt);
3294 
3295 		qed_for_each_vf(hwfn, j) {
3296 			int k;
3297 
3298 			if (!qed_iov_is_valid_vfid(hwfn, j, true))
3299 				continue;
3300 
3301 			/* Wait until VF is disabled before releasing */
3302 			for (k = 0; k < 100; k++) {
3303 				if (!qed_iov_is_vf_stopped(hwfn, j))
3304 					msleep(20);
3305 				else
3306 					break;
3307 			}
3308 
3309 			if (k < 100)
3310 				qed_iov_release_hw_for_vf(&cdev->hwfns[i],
3311 							  ptt, j);
3312 			else
3313 				DP_ERR(hwfn,
3314 				       "Timeout waiting for VF's FLR to end\n");
3315 		}
3316 
3317 		qed_ptt_release(hwfn, ptt);
3318 	}
3319 
3320 	qed_iov_set_vfs_to_disable(cdev, false);
3321 
3322 	return 0;
3323 }
3324 
3325 static int qed_sriov_enable(struct qed_dev *cdev, int num)
3326 {
3327 	struct qed_sb_cnt_info sb_cnt_info;
3328 	int i, j, rc;
3329 
3330 	if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
3331 		DP_NOTICE(cdev, "Can start at most %d VFs\n",
3332 			  RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
3333 		return -EINVAL;
3334 	}
3335 
3336 	/* Initialize HW for VF access */
3337 	for_each_hwfn(cdev, j) {
3338 		struct qed_hwfn *hwfn = &cdev->hwfns[j];
3339 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
3340 		int num_sbs = 0, limit = 16;
3341 
3342 		if (!ptt) {
3343 			DP_ERR(hwfn, "Failed to acquire ptt\n");
3344 			rc = -EBUSY;
3345 			goto err;
3346 		}
3347 
3348 		if (IS_MF_DEFAULT(hwfn))
3349 			limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine;
3350 
3351 		memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
3352 		qed_int_get_num_sbs(hwfn, &sb_cnt_info);
3353 		num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
3354 
3355 		for (i = 0; i < num; i++) {
3356 			if (!qed_iov_is_valid_vfid(hwfn, i, false))
3357 				continue;
3358 
3359 			rc = qed_iov_init_hw_for_vf(hwfn,
3360 						    ptt, i, num_sbs / num);
3361 			if (rc) {
3362 				DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
3363 				qed_ptt_release(hwfn, ptt);
3364 				goto err;
3365 			}
3366 		}
3367 
3368 		qed_ptt_release(hwfn, ptt);
3369 	}
3370 
3371 	/* Enable SRIOV PCIe functions */
3372 	rc = pci_enable_sriov(cdev->pdev, num);
3373 	if (rc) {
3374 		DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
3375 		goto err;
3376 	}
3377 
3378 	return num;
3379 
3380 err:
3381 	qed_sriov_disable(cdev, false);
3382 	return rc;
3383 }
3384 
3385 static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
3386 {
3387 	if (!IS_QED_SRIOV(cdev)) {
3388 		DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
3389 		return -EOPNOTSUPP;
3390 	}
3391 
3392 	if (num_vfs_param)
3393 		return qed_sriov_enable(cdev, num_vfs_param);
3394 	else
3395 		return qed_sriov_disable(cdev, true);
3396 }
3397 
3398 static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
3399 {
3400 	int i;
3401 
3402 	if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
3403 		DP_VERBOSE(cdev, QED_MSG_IOV,
3404 			   "Cannot set a VF MAC; Sriov is not enabled\n");
3405 		return -EINVAL;
3406 	}
3407 
3408 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
3409 		DP_VERBOSE(cdev, QED_MSG_IOV,
3410 			   "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
3411 		return -EINVAL;
3412 	}
3413 
3414 	for_each_hwfn(cdev, i) {
3415 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
3416 		struct qed_public_vf_info *vf_info;
3417 
3418 		vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
3419 		if (!vf_info)
3420 			continue;
3421 
3422 		/* Set the forced MAC, and schedule the IOV task */
3423 		ether_addr_copy(vf_info->forced_mac, mac);
3424 		qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
3425 	}
3426 
3427 	return 0;
3428 }
3429 
3430 static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
3431 {
3432 	int i;
3433 
3434 	if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
3435 		DP_VERBOSE(cdev, QED_MSG_IOV,
3436 			   "Cannot set a VF MAC; Sriov is not enabled\n");
3437 		return -EINVAL;
3438 	}
3439 
3440 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
3441 		DP_VERBOSE(cdev, QED_MSG_IOV,
3442 			   "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
3443 		return -EINVAL;
3444 	}
3445 
3446 	for_each_hwfn(cdev, i) {
3447 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
3448 		struct qed_public_vf_info *vf_info;
3449 
3450 		vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
3451 		if (!vf_info)
3452 			continue;
3453 
3454 		/* Set the forced vlan, and schedule the IOV task */
3455 		vf_info->forced_vlan = vid;
3456 		qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
3457 	}
3458 
3459 	return 0;
3460 }
3461 
3462 static int qed_get_vf_config(struct qed_dev *cdev,
3463 			     int vf_id, struct ifla_vf_info *ivi)
3464 {
3465 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
3466 	struct qed_public_vf_info *vf_info;
3467 	struct qed_mcp_link_state link;
3468 	u32 tx_rate;
3469 
3470 	/* Sanitize request */
3471 	if (IS_VF(cdev))
3472 		return -EINVAL;
3473 
3474 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
3475 		DP_VERBOSE(cdev, QED_MSG_IOV,
3476 			   "VF index [%d] isn't active\n", vf_id);
3477 		return -EINVAL;
3478 	}
3479 
3480 	vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
3481 
3482 	qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
3483 
3484 	/* Fill information about VF */
3485 	ivi->vf = vf_id;
3486 
3487 	if (is_valid_ether_addr(vf_info->forced_mac))
3488 		ether_addr_copy(ivi->mac, vf_info->forced_mac);
3489 	else
3490 		ether_addr_copy(ivi->mac, vf_info->mac);
3491 
3492 	ivi->vlan = vf_info->forced_vlan;
3493 	ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
3494 	ivi->linkstate = vf_info->link_state;
3495 	tx_rate = vf_info->tx_rate;
3496 	ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
3497 	ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
3498 
3499 	return 0;
3500 }
3501 
3502 void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
3503 {
3504 	struct qed_mcp_link_capabilities caps;
3505 	struct qed_mcp_link_params params;
3506 	struct qed_mcp_link_state link;
3507 	int i;
3508 
3509 	if (!hwfn->pf_iov_info)
3510 		return;
3511 
3512 	/* Update bulletin of all future possible VFs with link configuration */
3513 	for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
3514 		struct qed_public_vf_info *vf_info;
3515 
3516 		vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
3517 		if (!vf_info)
3518 			continue;
3519 
3520 		memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
3521 		memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
3522 		memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
3523 		       sizeof(caps));
3524 
3525 		/* Modify link according to the VF's configured link state */
3526 		switch (vf_info->link_state) {
3527 		case IFLA_VF_LINK_STATE_DISABLE:
3528 			link.link_up = false;
3529 			break;
3530 		case IFLA_VF_LINK_STATE_ENABLE:
3531 			link.link_up = true;
3532 			/* Set speed according to maximum supported by HW.
3533 			 * that is 40G for regular devices and 100G for CMT
3534 			 * mode devices.
3535 			 */
3536 			link.speed = (hwfn->cdev->num_hwfns > 1) ?
3537 				     100000 : 40000;
3538 		default:
3539 			/* In auto mode pass PF link image to VF */
3540 			break;
3541 		}
3542 
3543 		if (link.link_up && vf_info->tx_rate) {
3544 			struct qed_ptt *ptt;
3545 			int rate;
3546 
3547 			rate = min_t(int, vf_info->tx_rate, link.speed);
3548 
3549 			ptt = qed_ptt_acquire(hwfn);
3550 			if (!ptt) {
3551 				DP_NOTICE(hwfn, "Failed to acquire PTT\n");
3552 				return;
3553 			}
3554 
3555 			if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
3556 				vf_info->tx_rate = rate;
3557 				link.speed = rate;
3558 			}
3559 
3560 			qed_ptt_release(hwfn, ptt);
3561 		}
3562 
3563 		qed_iov_set_link(hwfn, i, &params, &link, &caps);
3564 	}
3565 
3566 	qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3567 }
3568 
3569 static int qed_set_vf_link_state(struct qed_dev *cdev,
3570 				 int vf_id, int link_state)
3571 {
3572 	int i;
3573 
3574 	/* Sanitize request */
3575 	if (IS_VF(cdev))
3576 		return -EINVAL;
3577 
3578 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
3579 		DP_VERBOSE(cdev, QED_MSG_IOV,
3580 			   "VF index [%d] isn't active\n", vf_id);
3581 		return -EINVAL;
3582 	}
3583 
3584 	/* Handle configuration of link state */
3585 	for_each_hwfn(cdev, i) {
3586 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
3587 		struct qed_public_vf_info *vf;
3588 
3589 		vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
3590 		if (!vf)
3591 			continue;
3592 
3593 		if (vf->link_state == link_state)
3594 			continue;
3595 
3596 		vf->link_state = link_state;
3597 		qed_inform_vf_link_state(&cdev->hwfns[i]);
3598 	}
3599 
3600 	return 0;
3601 }
3602 
3603 static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
3604 {
3605 	int i, rc = -EINVAL;
3606 
3607 	for_each_hwfn(cdev, i) {
3608 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3609 
3610 		rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
3611 		if (rc)
3612 			break;
3613 	}
3614 
3615 	return rc;
3616 }
3617 
3618 static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
3619 {
3620 	int i;
3621 
3622 	for_each_hwfn(cdev, i) {
3623 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3624 		struct qed_public_vf_info *vf;
3625 
3626 		if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3627 			DP_NOTICE(p_hwfn,
3628 				  "SR-IOV sanity check failed, can't set tx rate\n");
3629 			return -EINVAL;
3630 		}
3631 
3632 		vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
3633 
3634 		vf->tx_rate = rate;
3635 
3636 		qed_inform_vf_link_state(p_hwfn);
3637 	}
3638 
3639 	return 0;
3640 }
3641 
3642 static int qed_set_vf_rate(struct qed_dev *cdev,
3643 			   int vfid, u32 min_rate, u32 max_rate)
3644 {
3645 	int rc_min = 0, rc_max = 0;
3646 
3647 	if (max_rate)
3648 		rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
3649 
3650 	if (min_rate)
3651 		rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
3652 
3653 	if (rc_max | rc_min)
3654 		return -EINVAL;
3655 
3656 	return 0;
3657 }
3658 
3659 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
3660 {
3661 	u64 events[QED_VF_ARRAY_LENGTH];
3662 	struct qed_ptt *ptt;
3663 	int i;
3664 
3665 	ptt = qed_ptt_acquire(hwfn);
3666 	if (!ptt) {
3667 		DP_VERBOSE(hwfn, QED_MSG_IOV,
3668 			   "Can't acquire PTT; re-scheduling\n");
3669 		qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
3670 		return;
3671 	}
3672 
3673 	qed_iov_pf_get_and_clear_pending_events(hwfn, events);
3674 
3675 	DP_VERBOSE(hwfn, QED_MSG_IOV,
3676 		   "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
3677 		   events[0], events[1], events[2]);
3678 
3679 	qed_for_each_vf(hwfn, i) {
3680 		/* Skip VFs with no pending messages */
3681 		if (!(events[i / 64] & (1ULL << (i % 64))))
3682 			continue;
3683 
3684 		DP_VERBOSE(hwfn, QED_MSG_IOV,
3685 			   "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
3686 			   i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
3687 
3688 		/* Copy VF's message to PF's request buffer for that VF */
3689 		if (qed_iov_copy_vf_msg(hwfn, ptt, i))
3690 			continue;
3691 
3692 		qed_iov_process_mbx_req(hwfn, ptt, i);
3693 	}
3694 
3695 	qed_ptt_release(hwfn, ptt);
3696 }
3697 
3698 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
3699 {
3700 	int i;
3701 
3702 	qed_for_each_vf(hwfn, i) {
3703 		struct qed_public_vf_info *info;
3704 		bool update = false;
3705 		u8 *mac;
3706 
3707 		info = qed_iov_get_public_vf_info(hwfn, i, true);
3708 		if (!info)
3709 			continue;
3710 
3711 		/* Update data on bulletin board */
3712 		mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
3713 		if (is_valid_ether_addr(info->forced_mac) &&
3714 		    (!mac || !ether_addr_equal(mac, info->forced_mac))) {
3715 			DP_VERBOSE(hwfn,
3716 				   QED_MSG_IOV,
3717 				   "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
3718 				   i,
3719 				   hwfn->cdev->p_iov_info->first_vf_in_pf + i);
3720 
3721 			/* Update bulletin board with forced MAC */
3722 			qed_iov_bulletin_set_forced_mac(hwfn,
3723 							info->forced_mac, i);
3724 			update = true;
3725 		}
3726 
3727 		if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
3728 		    info->forced_vlan) {
3729 			DP_VERBOSE(hwfn,
3730 				   QED_MSG_IOV,
3731 				   "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
3732 				   info->forced_vlan,
3733 				   i,
3734 				   hwfn->cdev->p_iov_info->first_vf_in_pf + i);
3735 			qed_iov_bulletin_set_forced_vlan(hwfn,
3736 							 info->forced_vlan, i);
3737 			update = true;
3738 		}
3739 
3740 		if (update)
3741 			qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3742 	}
3743 }
3744 
3745 static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
3746 {
3747 	struct qed_ptt *ptt;
3748 	int i;
3749 
3750 	ptt = qed_ptt_acquire(hwfn);
3751 	if (!ptt) {
3752 		DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
3753 		qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3754 		return;
3755 	}
3756 
3757 	qed_for_each_vf(hwfn, i)
3758 	    qed_iov_post_vf_bulletin(hwfn, i, ptt);
3759 
3760 	qed_ptt_release(hwfn, ptt);
3761 }
3762 
3763 void qed_iov_pf_task(struct work_struct *work)
3764 {
3765 	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
3766 					     iov_task.work);
3767 	int rc;
3768 
3769 	if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
3770 		return;
3771 
3772 	if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
3773 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
3774 
3775 		if (!ptt) {
3776 			qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
3777 			return;
3778 		}
3779 
3780 		rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
3781 		if (rc)
3782 			qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
3783 
3784 		qed_ptt_release(hwfn, ptt);
3785 	}
3786 
3787 	if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
3788 		qed_handle_vf_msg(hwfn);
3789 
3790 	if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
3791 			       &hwfn->iov_task_flags))
3792 		qed_handle_pf_set_vf_unicast(hwfn);
3793 
3794 	if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
3795 			       &hwfn->iov_task_flags))
3796 		qed_handle_bulletin_post(hwfn);
3797 }
3798 
3799 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
3800 {
3801 	int i;
3802 
3803 	for_each_hwfn(cdev, i) {
3804 		if (!cdev->hwfns[i].iov_wq)
3805 			continue;
3806 
3807 		if (schedule_first) {
3808 			qed_schedule_iov(&cdev->hwfns[i],
3809 					 QED_IOV_WQ_STOP_WQ_FLAG);
3810 			cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
3811 		}
3812 
3813 		flush_workqueue(cdev->hwfns[i].iov_wq);
3814 		destroy_workqueue(cdev->hwfns[i].iov_wq);
3815 	}
3816 }
3817 
3818 int qed_iov_wq_start(struct qed_dev *cdev)
3819 {
3820 	char name[NAME_SIZE];
3821 	int i;
3822 
3823 	for_each_hwfn(cdev, i) {
3824 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3825 
3826 		/* PFs needs a dedicated workqueue only if they support IOV.
3827 		 * VFs always require one.
3828 		 */
3829 		if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
3830 			continue;
3831 
3832 		snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
3833 			 cdev->pdev->bus->number,
3834 			 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
3835 
3836 		p_hwfn->iov_wq = create_singlethread_workqueue(name);
3837 		if (!p_hwfn->iov_wq) {
3838 			DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
3839 			return -ENOMEM;
3840 		}
3841 
3842 		if (IS_PF(cdev))
3843 			INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
3844 		else
3845 			INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
3846 	}
3847 
3848 	return 0;
3849 }
3850 
3851 const struct qed_iov_hv_ops qed_iov_ops_pass = {
3852 	.configure = &qed_sriov_configure,
3853 	.set_mac = &qed_sriov_pf_set_mac,
3854 	.set_vlan = &qed_sriov_pf_set_vlan,
3855 	.get_config = &qed_get_vf_config,
3856 	.set_link_state = &qed_set_vf_link_state,
3857 	.set_spoof = &qed_spoof_configure,
3858 	.set_rate = &qed_set_vf_rate,
3859 };
3860