1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/etherdevice.h>
34 #include <linux/crc32.h>
35 #include <linux/vmalloc.h>
36 #include <linux/crash_dump.h>
37 #include <linux/qed/qed_iov_if.h>
38 #include "qed_cxt.h"
39 #include "qed_hsi.h"
40 #include "qed_hw.h"
41 #include "qed_init_ops.h"
42 #include "qed_int.h"
43 #include "qed_mcp.h"
44 #include "qed_reg_addr.h"
45 #include "qed_sp.h"
46 #include "qed_sriov.h"
47 #include "qed_vf.h"
48 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
49 			       u8 opcode,
50 			       __le16 echo,
51 			       union event_ring_data *data, u8 fw_return_code);
52 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
53 
54 static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
55 {
56 	u8 legacy = 0;
57 
58 	if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
59 	    ETH_HSI_VER_NO_PKT_LEN_TUNN)
60 		legacy |= QED_QCID_LEGACY_VF_RX_PROD;
61 
62 	if (!(p_vf->acquire.vfdev_info.capabilities &
63 	      VFPF_ACQUIRE_CAP_QUEUE_QIDS))
64 		legacy |= QED_QCID_LEGACY_VF_CID;
65 
66 	return legacy;
67 }
68 
69 /* IOV ramrods */
70 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
71 {
72 	struct vf_start_ramrod_data *p_ramrod = NULL;
73 	struct qed_spq_entry *p_ent = NULL;
74 	struct qed_sp_init_data init_data;
75 	int rc = -EINVAL;
76 	u8 fp_minor;
77 
78 	/* Get SPQ entry */
79 	memset(&init_data, 0, sizeof(init_data));
80 	init_data.cid = qed_spq_get_cid(p_hwfn);
81 	init_data.opaque_fid = p_vf->opaque_fid;
82 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
83 
84 	rc = qed_sp_init_request(p_hwfn, &p_ent,
85 				 COMMON_RAMROD_VF_START,
86 				 PROTOCOLID_COMMON, &init_data);
87 	if (rc)
88 		return rc;
89 
90 	p_ramrod = &p_ent->ramrod.vf_start;
91 
92 	p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
93 	p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
94 
95 	switch (p_hwfn->hw_info.personality) {
96 	case QED_PCI_ETH:
97 		p_ramrod->personality = PERSONALITY_ETH;
98 		break;
99 	case QED_PCI_ETH_ROCE:
100 		p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
101 		break;
102 	default:
103 		DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
104 			  p_hwfn->hw_info.personality);
105 		qed_sp_destroy_request(p_hwfn, p_ent);
106 		return -EINVAL;
107 	}
108 
109 	fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
110 	if (fp_minor > ETH_HSI_VER_MINOR &&
111 	    fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
112 		DP_VERBOSE(p_hwfn,
113 			   QED_MSG_IOV,
114 			   "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
115 			   p_vf->abs_vf_id,
116 			   ETH_HSI_VER_MAJOR,
117 			   fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
118 		fp_minor = ETH_HSI_VER_MINOR;
119 	}
120 
121 	p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
122 	p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
123 
124 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
125 		   "VF[%d] - Starting using HSI %02x.%02x\n",
126 		   p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
127 
128 	return qed_spq_post(p_hwfn, p_ent, NULL);
129 }
130 
131 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
132 			  u32 concrete_vfid, u16 opaque_vfid)
133 {
134 	struct vf_stop_ramrod_data *p_ramrod = NULL;
135 	struct qed_spq_entry *p_ent = NULL;
136 	struct qed_sp_init_data init_data;
137 	int rc = -EINVAL;
138 
139 	/* Get SPQ entry */
140 	memset(&init_data, 0, sizeof(init_data));
141 	init_data.cid = qed_spq_get_cid(p_hwfn);
142 	init_data.opaque_fid = opaque_vfid;
143 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
144 
145 	rc = qed_sp_init_request(p_hwfn, &p_ent,
146 				 COMMON_RAMROD_VF_STOP,
147 				 PROTOCOLID_COMMON, &init_data);
148 	if (rc)
149 		return rc;
150 
151 	p_ramrod = &p_ent->ramrod.vf_stop;
152 
153 	p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
154 
155 	return qed_spq_post(p_hwfn, p_ent, NULL);
156 }
157 
158 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
159 			   int rel_vf_id,
160 			   bool b_enabled_only, bool b_non_malicious)
161 {
162 	if (!p_hwfn->pf_iov_info) {
163 		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
164 		return false;
165 	}
166 
167 	if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
168 	    (rel_vf_id < 0))
169 		return false;
170 
171 	if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
172 	    b_enabled_only)
173 		return false;
174 
175 	if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
176 	    b_non_malicious)
177 		return false;
178 
179 	return true;
180 }
181 
182 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
183 					       u16 relative_vf_id,
184 					       bool b_enabled_only)
185 {
186 	struct qed_vf_info *vf = NULL;
187 
188 	if (!p_hwfn->pf_iov_info) {
189 		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
190 		return NULL;
191 	}
192 
193 	if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
194 				  b_enabled_only, false))
195 		vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
196 	else
197 		DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
198 		       relative_vf_id);
199 
200 	return vf;
201 }
202 
203 static struct qed_queue_cid *
204 qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue)
205 {
206 	int i;
207 
208 	for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
209 		if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx)
210 			return p_queue->cids[i].p_cid;
211 	}
212 
213 	return NULL;
214 }
215 
216 enum qed_iov_validate_q_mode {
217 	QED_IOV_VALIDATE_Q_NA,
218 	QED_IOV_VALIDATE_Q_ENABLE,
219 	QED_IOV_VALIDATE_Q_DISABLE,
220 };
221 
222 static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
223 					struct qed_vf_info *p_vf,
224 					u16 qid,
225 					enum qed_iov_validate_q_mode mode,
226 					bool b_is_tx)
227 {
228 	int i;
229 
230 	if (mode == QED_IOV_VALIDATE_Q_NA)
231 		return true;
232 
233 	for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
234 		struct qed_vf_queue_cid *p_qcid;
235 
236 		p_qcid = &p_vf->vf_queues[qid].cids[i];
237 
238 		if (!p_qcid->p_cid)
239 			continue;
240 
241 		if (p_qcid->b_is_tx != b_is_tx)
242 			continue;
243 
244 		return mode == QED_IOV_VALIDATE_Q_ENABLE;
245 	}
246 
247 	/* In case we haven't found any valid cid, then its disabled */
248 	return mode == QED_IOV_VALIDATE_Q_DISABLE;
249 }
250 
251 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
252 				 struct qed_vf_info *p_vf,
253 				 u16 rx_qid,
254 				 enum qed_iov_validate_q_mode mode)
255 {
256 	if (rx_qid >= p_vf->num_rxqs) {
257 		DP_VERBOSE(p_hwfn,
258 			   QED_MSG_IOV,
259 			   "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
260 			   p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
261 		return false;
262 	}
263 
264 	return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
265 }
266 
267 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
268 				 struct qed_vf_info *p_vf,
269 				 u16 tx_qid,
270 				 enum qed_iov_validate_q_mode mode)
271 {
272 	if (tx_qid >= p_vf->num_txqs) {
273 		DP_VERBOSE(p_hwfn,
274 			   QED_MSG_IOV,
275 			   "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
276 			   p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
277 		return false;
278 	}
279 
280 	return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
281 }
282 
283 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
284 				struct qed_vf_info *p_vf, u16 sb_idx)
285 {
286 	int i;
287 
288 	for (i = 0; i < p_vf->num_sbs; i++)
289 		if (p_vf->igu_sbs[i] == sb_idx)
290 			return true;
291 
292 	DP_VERBOSE(p_hwfn,
293 		   QED_MSG_IOV,
294 		   "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
295 		   p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
296 
297 	return false;
298 }
299 
300 static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
301 					struct qed_vf_info *p_vf)
302 {
303 	u8 i;
304 
305 	for (i = 0; i < p_vf->num_rxqs; i++)
306 		if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
307 						QED_IOV_VALIDATE_Q_ENABLE,
308 						false))
309 			return true;
310 
311 	return false;
312 }
313 
314 static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
315 					struct qed_vf_info *p_vf)
316 {
317 	u8 i;
318 
319 	for (i = 0; i < p_vf->num_txqs; i++)
320 		if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
321 						QED_IOV_VALIDATE_Q_ENABLE,
322 						true))
323 			return true;
324 
325 	return false;
326 }
327 
328 static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
329 				    int vfid, struct qed_ptt *p_ptt)
330 {
331 	struct qed_bulletin_content *p_bulletin;
332 	int crc_size = sizeof(p_bulletin->crc);
333 	struct qed_dmae_params params;
334 	struct qed_vf_info *p_vf;
335 
336 	p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
337 	if (!p_vf)
338 		return -EINVAL;
339 
340 	if (!p_vf->vf_bulletin)
341 		return -EINVAL;
342 
343 	p_bulletin = p_vf->bulletin.p_virt;
344 
345 	/* Increment bulletin board version and compute crc */
346 	p_bulletin->version++;
347 	p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
348 				p_vf->bulletin.size - crc_size);
349 
350 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
351 		   "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
352 		   p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
353 
354 	/* propagate bulletin board via dmae to vm memory */
355 	memset(&params, 0, sizeof(params));
356 	SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
357 	params.dst_vfid = p_vf->abs_vf_id;
358 	return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
359 				  p_vf->vf_bulletin, p_vf->bulletin.size / 4,
360 				  &params);
361 }
362 
363 static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
364 {
365 	struct qed_hw_sriov_info *iov = cdev->p_iov_info;
366 	int pos = iov->pos;
367 
368 	DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
369 	pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
370 
371 	pci_read_config_word(cdev->pdev,
372 			     pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
373 	pci_read_config_word(cdev->pdev,
374 			     pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
375 
376 	pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
377 	if (iov->num_vfs) {
378 		DP_VERBOSE(cdev,
379 			   QED_MSG_IOV,
380 			   "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
381 		iov->num_vfs = 0;
382 	}
383 
384 	pci_read_config_word(cdev->pdev,
385 			     pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
386 
387 	pci_read_config_word(cdev->pdev,
388 			     pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
389 
390 	pci_read_config_word(cdev->pdev,
391 			     pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
392 
393 	pci_read_config_dword(cdev->pdev,
394 			      pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
395 
396 	pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
397 
398 	pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
399 
400 	DP_VERBOSE(cdev,
401 		   QED_MSG_IOV,
402 		   "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
403 		   iov->nres,
404 		   iov->cap,
405 		   iov->ctrl,
406 		   iov->total_vfs,
407 		   iov->initial_vfs,
408 		   iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
409 
410 	/* Some sanity checks */
411 	if (iov->num_vfs > NUM_OF_VFS(cdev) ||
412 	    iov->total_vfs > NUM_OF_VFS(cdev)) {
413 		/* This can happen only due to a bug. In this case we set
414 		 * num_vfs to zero to avoid memory corruption in the code that
415 		 * assumes max number of vfs
416 		 */
417 		DP_NOTICE(cdev,
418 			  "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
419 			  iov->num_vfs);
420 
421 		iov->num_vfs = 0;
422 		iov->total_vfs = 0;
423 	}
424 
425 	return 0;
426 }
427 
428 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
429 {
430 	struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
431 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
432 	struct qed_bulletin_content *p_bulletin_virt;
433 	dma_addr_t req_p, rply_p, bulletin_p;
434 	union pfvf_tlvs *p_reply_virt_addr;
435 	union vfpf_tlvs *p_req_virt_addr;
436 	u8 idx = 0;
437 
438 	memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
439 
440 	p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
441 	req_p = p_iov_info->mbx_msg_phys_addr;
442 	p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
443 	rply_p = p_iov_info->mbx_reply_phys_addr;
444 	p_bulletin_virt = p_iov_info->p_bulletins;
445 	bulletin_p = p_iov_info->bulletins_phys;
446 	if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
447 		DP_ERR(p_hwfn,
448 		       "qed_iov_setup_vfdb called without allocating mem first\n");
449 		return;
450 	}
451 
452 	for (idx = 0; idx < p_iov->total_vfs; idx++) {
453 		struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
454 		u32 concrete;
455 
456 		vf->vf_mbx.req_virt = p_req_virt_addr + idx;
457 		vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
458 		vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
459 		vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
460 
461 		vf->state = VF_STOPPED;
462 		vf->b_init = false;
463 
464 		vf->bulletin.phys = idx *
465 				    sizeof(struct qed_bulletin_content) +
466 				    bulletin_p;
467 		vf->bulletin.p_virt = p_bulletin_virt + idx;
468 		vf->bulletin.size = sizeof(struct qed_bulletin_content);
469 
470 		vf->relative_vf_id = idx;
471 		vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
472 		concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
473 		vf->concrete_fid = concrete;
474 		vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
475 				 (vf->abs_vf_id << 8);
476 		vf->vport_id = idx + 1;
477 
478 		vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
479 		vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
480 	}
481 }
482 
483 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
484 {
485 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
486 	void **p_v_addr;
487 	u16 num_vfs = 0;
488 
489 	num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
490 
491 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
492 		   "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
493 
494 	/* Allocate PF Mailbox buffer (per-VF) */
495 	p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
496 	p_v_addr = &p_iov_info->mbx_msg_virt_addr;
497 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
498 				       p_iov_info->mbx_msg_size,
499 				       &p_iov_info->mbx_msg_phys_addr,
500 				       GFP_KERNEL);
501 	if (!*p_v_addr)
502 		return -ENOMEM;
503 
504 	/* Allocate PF Mailbox Reply buffer (per-VF) */
505 	p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
506 	p_v_addr = &p_iov_info->mbx_reply_virt_addr;
507 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
508 				       p_iov_info->mbx_reply_size,
509 				       &p_iov_info->mbx_reply_phys_addr,
510 				       GFP_KERNEL);
511 	if (!*p_v_addr)
512 		return -ENOMEM;
513 
514 	p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
515 				     num_vfs;
516 	p_v_addr = &p_iov_info->p_bulletins;
517 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
518 				       p_iov_info->bulletins_size,
519 				       &p_iov_info->bulletins_phys,
520 				       GFP_KERNEL);
521 	if (!*p_v_addr)
522 		return -ENOMEM;
523 
524 	DP_VERBOSE(p_hwfn,
525 		   QED_MSG_IOV,
526 		   "PF's Requests mailbox [%p virt 0x%llx phys],  Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
527 		   p_iov_info->mbx_msg_virt_addr,
528 		   (u64) p_iov_info->mbx_msg_phys_addr,
529 		   p_iov_info->mbx_reply_virt_addr,
530 		   (u64) p_iov_info->mbx_reply_phys_addr,
531 		   p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
532 
533 	return 0;
534 }
535 
536 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
537 {
538 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
539 
540 	if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
541 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
542 				  p_iov_info->mbx_msg_size,
543 				  p_iov_info->mbx_msg_virt_addr,
544 				  p_iov_info->mbx_msg_phys_addr);
545 
546 	if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
547 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
548 				  p_iov_info->mbx_reply_size,
549 				  p_iov_info->mbx_reply_virt_addr,
550 				  p_iov_info->mbx_reply_phys_addr);
551 
552 	if (p_iov_info->p_bulletins)
553 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
554 				  p_iov_info->bulletins_size,
555 				  p_iov_info->p_bulletins,
556 				  p_iov_info->bulletins_phys);
557 }
558 
559 int qed_iov_alloc(struct qed_hwfn *p_hwfn)
560 {
561 	struct qed_pf_iov *p_sriov;
562 
563 	if (!IS_PF_SRIOV(p_hwfn)) {
564 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
565 			   "No SR-IOV - no need for IOV db\n");
566 		return 0;
567 	}
568 
569 	p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
570 	if (!p_sriov)
571 		return -ENOMEM;
572 
573 	p_hwfn->pf_iov_info = p_sriov;
574 
575 	qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
576 				  qed_sriov_eqe_event);
577 
578 	return qed_iov_allocate_vfdb(p_hwfn);
579 }
580 
581 void qed_iov_setup(struct qed_hwfn *p_hwfn)
582 {
583 	if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
584 		return;
585 
586 	qed_iov_setup_vfdb(p_hwfn);
587 }
588 
589 void qed_iov_free(struct qed_hwfn *p_hwfn)
590 {
591 	qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
592 
593 	if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
594 		qed_iov_free_vfdb(p_hwfn);
595 		kfree(p_hwfn->pf_iov_info);
596 	}
597 }
598 
599 void qed_iov_free_hw_info(struct qed_dev *cdev)
600 {
601 	kfree(cdev->p_iov_info);
602 	cdev->p_iov_info = NULL;
603 }
604 
605 int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
606 {
607 	struct qed_dev *cdev = p_hwfn->cdev;
608 	int pos;
609 	int rc;
610 
611 	if (is_kdump_kernel())
612 		return 0;
613 
614 	if (IS_VF(p_hwfn->cdev))
615 		return 0;
616 
617 	/* Learn the PCI configuration */
618 	pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
619 				      PCI_EXT_CAP_ID_SRIOV);
620 	if (!pos) {
621 		DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
622 		return 0;
623 	}
624 
625 	/* Allocate a new struct for IOV information */
626 	cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
627 	if (!cdev->p_iov_info)
628 		return -ENOMEM;
629 
630 	cdev->p_iov_info->pos = pos;
631 
632 	rc = qed_iov_pci_cfg_info(cdev);
633 	if (rc)
634 		return rc;
635 
636 	/* We want PF IOV to be synonemous with the existance of p_iov_info;
637 	 * In case the capability is published but there are no VFs, simply
638 	 * de-allocate the struct.
639 	 */
640 	if (!cdev->p_iov_info->total_vfs) {
641 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
642 			   "IOV capabilities, but no VFs are published\n");
643 		kfree(cdev->p_iov_info);
644 		cdev->p_iov_info = NULL;
645 		return 0;
646 	}
647 
648 	/* First VF index based on offset is tricky:
649 	 *  - If ARI is supported [likely], offset - (16 - pf_id) would
650 	 *    provide the number for eng0. 2nd engine Vfs would begin
651 	 *    after the first engine's VFs.
652 	 *  - If !ARI, VFs would start on next device.
653 	 *    so offset - (256 - pf_id) would provide the number.
654 	 * Utilize the fact that (256 - pf_id) is achieved only by later
655 	 * to differentiate between the two.
656 	 */
657 
658 	if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
659 		u32 first = p_hwfn->cdev->p_iov_info->offset +
660 			    p_hwfn->abs_pf_id - 16;
661 
662 		cdev->p_iov_info->first_vf_in_pf = first;
663 
664 		if (QED_PATH_ID(p_hwfn))
665 			cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
666 	} else {
667 		u32 first = p_hwfn->cdev->p_iov_info->offset +
668 			    p_hwfn->abs_pf_id - 256;
669 
670 		cdev->p_iov_info->first_vf_in_pf = first;
671 	}
672 
673 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
674 		   "First VF in hwfn 0x%08x\n",
675 		   cdev->p_iov_info->first_vf_in_pf);
676 
677 	return 0;
678 }
679 
680 static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
681 				     int vfid, bool b_fail_malicious)
682 {
683 	/* Check PF supports sriov */
684 	if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
685 	    !IS_PF_SRIOV_ALLOC(p_hwfn))
686 		return false;
687 
688 	/* Check VF validity */
689 	if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
690 		return false;
691 
692 	return true;
693 }
694 
695 static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
696 {
697 	return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
698 }
699 
700 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
701 				      u16 rel_vf_id, u8 to_disable)
702 {
703 	struct qed_vf_info *vf;
704 	int i;
705 
706 	for_each_hwfn(cdev, i) {
707 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
708 
709 		vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
710 		if (!vf)
711 			continue;
712 
713 		vf->to_disable = to_disable;
714 	}
715 }
716 
717 static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
718 {
719 	u16 i;
720 
721 	if (!IS_QED_SRIOV(cdev))
722 		return;
723 
724 	for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
725 		qed_iov_set_vf_to_disable(cdev, i, to_disable);
726 }
727 
728 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
729 				       struct qed_ptt *p_ptt, u8 abs_vfid)
730 {
731 	qed_wr(p_hwfn, p_ptt,
732 	       PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
733 	       1 << (abs_vfid & 0x1f));
734 }
735 
736 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
737 				 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
738 {
739 	int i;
740 
741 	/* Set VF masks and configuration - pretend */
742 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
743 
744 	qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
745 
746 	/* unpretend */
747 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
748 
749 	/* iterate over all queues, clear sb consumer */
750 	for (i = 0; i < vf->num_sbs; i++)
751 		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
752 						vf->igu_sbs[i],
753 						vf->opaque_fid, true);
754 }
755 
756 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
757 				   struct qed_ptt *p_ptt,
758 				   struct qed_vf_info *vf, bool enable)
759 {
760 	u32 igu_vf_conf;
761 
762 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
763 
764 	igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
765 
766 	if (enable)
767 		igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
768 	else
769 		igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
770 
771 	qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
772 
773 	/* unpretend */
774 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
775 }
776 
777 static int
778 qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn,
779 			      struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs)
780 {
781 	u8 current_max = 0;
782 	int i;
783 
784 	/* For AH onward, configuration is per-PF. Find maximum of all
785 	 * the currently enabled child VFs, and set the number to be that.
786 	 */
787 	if (!QED_IS_BB(p_hwfn->cdev)) {
788 		qed_for_each_vf(p_hwfn, i) {
789 			struct qed_vf_info *p_vf;
790 
791 			p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true);
792 			if (!p_vf)
793 				continue;
794 
795 			current_max = max_t(u8, current_max, p_vf->num_sbs);
796 		}
797 	}
798 
799 	if (num_sbs > current_max)
800 		return qed_mcp_config_vf_msix(p_hwfn, p_ptt,
801 					      abs_vf_id, num_sbs);
802 
803 	return 0;
804 }
805 
806 static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
807 				    struct qed_ptt *p_ptt,
808 				    struct qed_vf_info *vf)
809 {
810 	u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
811 	int rc;
812 
813 	/* It's possible VF was previously considered malicious -
814 	 * clear the indication even if we're only going to disable VF.
815 	 */
816 	vf->b_malicious = false;
817 
818 	if (vf->to_disable)
819 		return 0;
820 
821 	DP_VERBOSE(p_hwfn,
822 		   QED_MSG_IOV,
823 		   "Enable internal access for vf %x [abs %x]\n",
824 		   vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
825 
826 	qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
827 
828 	qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
829 
830 	rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt,
831 					   vf->abs_vf_id, vf->num_sbs);
832 	if (rc)
833 		return rc;
834 
835 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
836 
837 	SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
838 	STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
839 
840 	qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
841 		     p_hwfn->hw_info.hw_mode);
842 
843 	/* unpretend */
844 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
845 
846 	vf->state = VF_FREE;
847 
848 	return rc;
849 }
850 
851 /**
852  * @brief qed_iov_config_perm_table - configure the permission
853  *      zone table.
854  *      In E4, queue zone permission table size is 320x9. There
855  *      are 320 VF queues for single engine device (256 for dual
856  *      engine device), and each entry has the following format:
857  *      {Valid, VF[7:0]}
858  * @param p_hwfn
859  * @param p_ptt
860  * @param vf
861  * @param enable
862  */
863 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
864 				      struct qed_ptt *p_ptt,
865 				      struct qed_vf_info *vf, u8 enable)
866 {
867 	u32 reg_addr, val;
868 	u16 qzone_id = 0;
869 	int qid;
870 
871 	for (qid = 0; qid < vf->num_rxqs; qid++) {
872 		qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
873 				&qzone_id);
874 
875 		reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
876 		val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
877 		qed_wr(p_hwfn, p_ptt, reg_addr, val);
878 	}
879 }
880 
881 static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
882 				      struct qed_ptt *p_ptt,
883 				      struct qed_vf_info *vf)
884 {
885 	/* Reset vf in IGU - interrupts are still disabled */
886 	qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
887 
888 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
889 
890 	/* Permission Table */
891 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
892 }
893 
894 static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
895 				   struct qed_ptt *p_ptt,
896 				   struct qed_vf_info *vf, u16 num_rx_queues)
897 {
898 	struct qed_igu_block *p_block;
899 	struct cau_sb_entry sb_entry;
900 	int qid = 0;
901 	u32 val = 0;
902 
903 	if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
904 		num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
905 	p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
906 
907 	SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
908 	SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
909 	SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
910 
911 	for (qid = 0; qid < num_rx_queues; qid++) {
912 		p_block = qed_get_igu_free_sb(p_hwfn, false);
913 		vf->igu_sbs[qid] = p_block->igu_sb_id;
914 		p_block->status &= ~QED_IGU_STATUS_FREE;
915 		SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
916 
917 		qed_wr(p_hwfn, p_ptt,
918 		       IGU_REG_MAPPING_MEMORY +
919 		       sizeof(u32) * p_block->igu_sb_id, val);
920 
921 		/* Configure igu sb in CAU which were marked valid */
922 		qed_init_cau_sb_entry(p_hwfn, &sb_entry,
923 				      p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
924 
925 		qed_dmae_host2grc(p_hwfn, p_ptt,
926 				  (u64)(uintptr_t)&sb_entry,
927 				  CAU_REG_SB_VAR_MEMORY +
928 				  p_block->igu_sb_id * sizeof(u64), 2, NULL);
929 	}
930 
931 	vf->num_sbs = (u8) num_rx_queues;
932 
933 	return vf->num_sbs;
934 }
935 
936 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
937 				    struct qed_ptt *p_ptt,
938 				    struct qed_vf_info *vf)
939 {
940 	struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
941 	int idx, igu_id;
942 	u32 addr, val;
943 
944 	/* Invalidate igu CAM lines and mark them as free */
945 	for (idx = 0; idx < vf->num_sbs; idx++) {
946 		igu_id = vf->igu_sbs[idx];
947 		addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
948 
949 		val = qed_rd(p_hwfn, p_ptt, addr);
950 		SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
951 		qed_wr(p_hwfn, p_ptt, addr, val);
952 
953 		p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
954 		p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
955 	}
956 
957 	vf->num_sbs = 0;
958 }
959 
960 static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
961 			     u16 vfid,
962 			     struct qed_mcp_link_params *params,
963 			     struct qed_mcp_link_state *link,
964 			     struct qed_mcp_link_capabilities *p_caps)
965 {
966 	struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
967 						       vfid,
968 						       false);
969 	struct qed_bulletin_content *p_bulletin;
970 
971 	if (!p_vf)
972 		return;
973 
974 	p_bulletin = p_vf->bulletin.p_virt;
975 	p_bulletin->req_autoneg = params->speed.autoneg;
976 	p_bulletin->req_adv_speed = params->speed.advertised_speeds;
977 	p_bulletin->req_forced_speed = params->speed.forced_speed;
978 	p_bulletin->req_autoneg_pause = params->pause.autoneg;
979 	p_bulletin->req_forced_rx = params->pause.forced_rx;
980 	p_bulletin->req_forced_tx = params->pause.forced_tx;
981 	p_bulletin->req_loopback = params->loopback_mode;
982 
983 	p_bulletin->link_up = link->link_up;
984 	p_bulletin->speed = link->speed;
985 	p_bulletin->full_duplex = link->full_duplex;
986 	p_bulletin->autoneg = link->an;
987 	p_bulletin->autoneg_complete = link->an_complete;
988 	p_bulletin->parallel_detection = link->parallel_detection;
989 	p_bulletin->pfc_enabled = link->pfc_enabled;
990 	p_bulletin->partner_adv_speed = link->partner_adv_speed;
991 	p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
992 	p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
993 	p_bulletin->partner_adv_pause = link->partner_adv_pause;
994 	p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
995 
996 	p_bulletin->capability_speed = p_caps->speed_capabilities;
997 }
998 
999 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
1000 				  struct qed_ptt *p_ptt,
1001 				  struct qed_iov_vf_init_params *p_params)
1002 {
1003 	struct qed_mcp_link_capabilities link_caps;
1004 	struct qed_mcp_link_params link_params;
1005 	struct qed_mcp_link_state link_state;
1006 	u8 num_of_vf_avaiable_chains = 0;
1007 	struct qed_vf_info *vf = NULL;
1008 	u16 qid, num_irqs;
1009 	int rc = 0;
1010 	u32 cids;
1011 	u8 i;
1012 
1013 	vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1014 	if (!vf) {
1015 		DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
1016 		return -EINVAL;
1017 	}
1018 
1019 	if (vf->b_init) {
1020 		DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
1021 			  p_params->rel_vf_id);
1022 		return -EINVAL;
1023 	}
1024 
1025 	/* Perform sanity checking on the requested queue_id */
1026 	for (i = 0; i < p_params->num_queues; i++) {
1027 		u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
1028 		u16 max_vf_qzone = min_vf_qzone +
1029 		    FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
1030 
1031 		qid = p_params->req_rx_queue[i];
1032 		if (qid < min_vf_qzone || qid > max_vf_qzone) {
1033 			DP_NOTICE(p_hwfn,
1034 				  "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1035 				  qid,
1036 				  p_params->rel_vf_id,
1037 				  min_vf_qzone, max_vf_qzone);
1038 			return -EINVAL;
1039 		}
1040 
1041 		qid = p_params->req_tx_queue[i];
1042 		if (qid > max_vf_qzone) {
1043 			DP_NOTICE(p_hwfn,
1044 				  "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1045 				  qid, p_params->rel_vf_id, max_vf_qzone);
1046 			return -EINVAL;
1047 		}
1048 
1049 		/* If client *really* wants, Tx qid can be shared with PF */
1050 		if (qid < min_vf_qzone)
1051 			DP_VERBOSE(p_hwfn,
1052 				   QED_MSG_IOV,
1053 				   "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1054 				   p_params->rel_vf_id, qid, i);
1055 	}
1056 
1057 	/* Limit number of queues according to number of CIDs */
1058 	qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1059 	DP_VERBOSE(p_hwfn,
1060 		   QED_MSG_IOV,
1061 		   "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1062 		   vf->relative_vf_id, p_params->num_queues, (u16)cids);
1063 	num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
1064 
1065 	num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
1066 							     p_ptt,
1067 							     vf, num_irqs);
1068 	if (!num_of_vf_avaiable_chains) {
1069 		DP_ERR(p_hwfn, "no available igu sbs\n");
1070 		return -ENOMEM;
1071 	}
1072 
1073 	/* Choose queue number and index ranges */
1074 	vf->num_rxqs = num_of_vf_avaiable_chains;
1075 	vf->num_txqs = num_of_vf_avaiable_chains;
1076 
1077 	for (i = 0; i < vf->num_rxqs; i++) {
1078 		struct qed_vf_queue *p_queue = &vf->vf_queues[i];
1079 
1080 		p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1081 		p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1082 
1083 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1084 			   "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1085 			   vf->relative_vf_id, i, vf->igu_sbs[i],
1086 			   p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1087 	}
1088 
1089 	/* Update the link configuration in bulletin */
1090 	memcpy(&link_params, qed_mcp_get_link_params(p_hwfn),
1091 	       sizeof(link_params));
1092 	memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state));
1093 	memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn),
1094 	       sizeof(link_caps));
1095 	qed_iov_set_link(p_hwfn, p_params->rel_vf_id,
1096 			 &link_params, &link_state, &link_caps);
1097 
1098 	rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1099 	if (!rc) {
1100 		vf->b_init = true;
1101 
1102 		if (IS_LEAD_HWFN(p_hwfn))
1103 			p_hwfn->cdev->p_iov_info->num_vfs++;
1104 	}
1105 
1106 	return rc;
1107 }
1108 
1109 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
1110 				     struct qed_ptt *p_ptt, u16 rel_vf_id)
1111 {
1112 	struct qed_mcp_link_capabilities caps;
1113 	struct qed_mcp_link_params params;
1114 	struct qed_mcp_link_state link;
1115 	struct qed_vf_info *vf = NULL;
1116 
1117 	vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1118 	if (!vf) {
1119 		DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
1120 		return -EINVAL;
1121 	}
1122 
1123 	if (vf->bulletin.p_virt)
1124 		memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
1125 
1126 	memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1127 
1128 	/* Get the link configuration back in bulletin so
1129 	 * that when VFs are re-enabled they get the actual
1130 	 * link configuration.
1131 	 */
1132 	memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
1133 	memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
1134 	memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
1135 	qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
1136 
1137 	/* Forget the VF's acquisition message */
1138 	memset(&vf->acquire, 0, sizeof(vf->acquire));
1139 
1140 	/* disablng interrupts and resetting permission table was done during
1141 	 * vf-close, however, we could get here without going through vf_close
1142 	 */
1143 	/* Disable Interrupts for VF */
1144 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1145 
1146 	/* Reset Permission table */
1147 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1148 
1149 	vf->num_rxqs = 0;
1150 	vf->num_txqs = 0;
1151 	qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1152 
1153 	if (vf->b_init) {
1154 		vf->b_init = false;
1155 
1156 		if (IS_LEAD_HWFN(p_hwfn))
1157 			p_hwfn->cdev->p_iov_info->num_vfs--;
1158 	}
1159 
1160 	return 0;
1161 }
1162 
1163 static bool qed_iov_tlv_supported(u16 tlvtype)
1164 {
1165 	return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
1166 }
1167 
1168 /* place a given tlv on the tlv buffer, continuing current tlv list */
1169 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
1170 {
1171 	struct channel_tlv *tl = (struct channel_tlv *)*offset;
1172 
1173 	tl->type = type;
1174 	tl->length = length;
1175 
1176 	/* Offset should keep pointing to next TLV (the end of the last) */
1177 	*offset += length;
1178 
1179 	/* Return a pointer to the start of the added tlv */
1180 	return *offset - length;
1181 }
1182 
1183 /* list the types and lengths of the tlvs on the buffer */
1184 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
1185 {
1186 	u16 i = 1, total_length = 0;
1187 	struct channel_tlv *tlv;
1188 
1189 	do {
1190 		tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1191 
1192 		/* output tlv */
1193 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1194 			   "TLV number %d: type %d, length %d\n",
1195 			   i, tlv->type, tlv->length);
1196 
1197 		if (tlv->type == CHANNEL_TLV_LIST_END)
1198 			return;
1199 
1200 		/* Validate entry - protect against malicious VFs */
1201 		if (!tlv->length) {
1202 			DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
1203 			return;
1204 		}
1205 
1206 		total_length += tlv->length;
1207 
1208 		if (total_length >= sizeof(struct tlv_buffer_size)) {
1209 			DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
1210 			return;
1211 		}
1212 
1213 		i++;
1214 	} while (1);
1215 }
1216 
1217 static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
1218 				  struct qed_ptt *p_ptt,
1219 				  struct qed_vf_info *p_vf,
1220 				  u16 length, u8 status)
1221 {
1222 	struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1223 	struct qed_dmae_params params;
1224 	u8 eng_vf_id;
1225 
1226 	mbx->reply_virt->default_resp.hdr.status = status;
1227 
1228 	qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
1229 
1230 	eng_vf_id = p_vf->abs_vf_id;
1231 
1232 	memset(&params, 0, sizeof(params));
1233 	SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
1234 	params.dst_vfid = eng_vf_id;
1235 
1236 	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1237 			   mbx->req_virt->first_tlv.reply_address +
1238 			   sizeof(u64),
1239 			   (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1240 			   &params);
1241 
1242 	/* Once PF copies the rc to the VF, the latter can continue
1243 	 * and send an additional message. So we have to make sure the
1244 	 * channel would be re-set to ready prior to that.
1245 	 */
1246 	REG_WR(p_hwfn,
1247 	       GTT_BAR0_MAP_REG_USDM_RAM +
1248 	       USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1249 
1250 	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1251 			   mbx->req_virt->first_tlv.reply_address,
1252 			   sizeof(u64) / 4, &params);
1253 }
1254 
1255 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1256 				enum qed_iov_vport_update_flag flag)
1257 {
1258 	switch (flag) {
1259 	case QED_IOV_VP_UPDATE_ACTIVATE:
1260 		return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1261 	case QED_IOV_VP_UPDATE_VLAN_STRIP:
1262 		return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1263 	case QED_IOV_VP_UPDATE_TX_SWITCH:
1264 		return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1265 	case QED_IOV_VP_UPDATE_MCAST:
1266 		return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1267 	case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1268 		return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1269 	case QED_IOV_VP_UPDATE_RSS:
1270 		return CHANNEL_TLV_VPORT_UPDATE_RSS;
1271 	case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1272 		return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1273 	case QED_IOV_VP_UPDATE_SGE_TPA:
1274 		return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1275 	default:
1276 		return 0;
1277 	}
1278 }
1279 
1280 static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1281 					    struct qed_vf_info *p_vf,
1282 					    struct qed_iov_vf_mbx *p_mbx,
1283 					    u8 status,
1284 					    u16 tlvs_mask, u16 tlvs_accepted)
1285 {
1286 	struct pfvf_def_resp_tlv *resp;
1287 	u16 size, total_len, i;
1288 
1289 	memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1290 	p_mbx->offset = (u8 *)p_mbx->reply_virt;
1291 	size = sizeof(struct pfvf_def_resp_tlv);
1292 	total_len = size;
1293 
1294 	qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1295 
1296 	/* Prepare response for all extended tlvs if they are found by PF */
1297 	for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
1298 		if (!(tlvs_mask & BIT(i)))
1299 			continue;
1300 
1301 		resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1302 				   qed_iov_vport_to_tlv(p_hwfn, i), size);
1303 
1304 		if (tlvs_accepted & BIT(i))
1305 			resp->hdr.status = status;
1306 		else
1307 			resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1308 
1309 		DP_VERBOSE(p_hwfn,
1310 			   QED_MSG_IOV,
1311 			   "VF[%d] - vport_update response: TLV %d, status %02x\n",
1312 			   p_vf->relative_vf_id,
1313 			   qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1314 
1315 		total_len += size;
1316 	}
1317 
1318 	qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1319 		    sizeof(struct channel_list_end_tlv));
1320 
1321 	return total_len;
1322 }
1323 
1324 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
1325 				 struct qed_ptt *p_ptt,
1326 				 struct qed_vf_info *vf_info,
1327 				 u16 type, u16 length, u8 status)
1328 {
1329 	struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1330 
1331 	mbx->offset = (u8 *)mbx->reply_virt;
1332 
1333 	qed_add_tlv(p_hwfn, &mbx->offset, type, length);
1334 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1335 		    sizeof(struct channel_list_end_tlv));
1336 
1337 	qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1338 }
1339 
1340 static struct
1341 qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
1342 					       u16 relative_vf_id,
1343 					       bool b_enabled_only)
1344 {
1345 	struct qed_vf_info *vf = NULL;
1346 
1347 	vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1348 	if (!vf)
1349 		return NULL;
1350 
1351 	return &vf->p_vf_info;
1352 }
1353 
1354 static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
1355 {
1356 	struct qed_public_vf_info *vf_info;
1357 
1358 	vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1359 
1360 	if (!vf_info)
1361 		return;
1362 
1363 	/* Clear the VF mac */
1364 	eth_zero_addr(vf_info->mac);
1365 
1366 	vf_info->rx_accept_mode = 0;
1367 	vf_info->tx_accept_mode = 0;
1368 }
1369 
1370 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1371 			       struct qed_vf_info *p_vf)
1372 {
1373 	u32 i, j;
1374 
1375 	p_vf->vf_bulletin = 0;
1376 	p_vf->vport_instance = 0;
1377 	p_vf->configured_features = 0;
1378 
1379 	/* If VF previously requested less resources, go back to default */
1380 	p_vf->num_rxqs = p_vf->num_sbs;
1381 	p_vf->num_txqs = p_vf->num_sbs;
1382 
1383 	p_vf->num_active_rxqs = 0;
1384 
1385 	for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1386 		struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1387 
1388 		for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1389 			if (!p_queue->cids[j].p_cid)
1390 				continue;
1391 
1392 			qed_eth_queue_cid_release(p_hwfn,
1393 						  p_queue->cids[j].p_cid);
1394 			p_queue->cids[j].p_cid = NULL;
1395 		}
1396 	}
1397 
1398 	memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1399 	memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1400 	qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1401 }
1402 
1403 /* Returns either 0, or log(size) */
1404 static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn,
1405 				  struct qed_ptt *p_ptt)
1406 {
1407 	u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1408 
1409 	if (val)
1410 		return val + 11;
1411 	return 0;
1412 }
1413 
1414 static void
1415 qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn,
1416 				 struct qed_ptt *p_ptt,
1417 				 struct qed_vf_info *p_vf,
1418 				 struct vf_pf_resc_request *p_req,
1419 				 struct pf_vf_resc *p_resp)
1420 {
1421 	u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1422 	u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) -
1423 		     qed_db_addr_vf(0, DQ_DEMS_LEGACY);
1424 	u32 bar_size;
1425 
1426 	p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons);
1427 
1428 	/* If VF didn't bother asking for QIDs than don't bother limiting
1429 	 * number of CIDs. The VF doesn't care about the number, and this
1430 	 * has the likely result of causing an additional acquisition.
1431 	 */
1432 	if (!(p_vf->acquire.vfdev_info.capabilities &
1433 	      VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1434 		return;
1435 
1436 	/* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1437 	 * that would make sure doorbells for all CIDs fall within the bar.
1438 	 * If it doesn't, make sure regview window is sufficient.
1439 	 */
1440 	if (p_vf->acquire.vfdev_info.capabilities &
1441 	    VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1442 		bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1443 		if (bar_size)
1444 			bar_size = 1 << bar_size;
1445 
1446 		if (p_hwfn->cdev->num_hwfns > 1)
1447 			bar_size /= 2;
1448 	} else {
1449 		bar_size = PXP_VF_BAR0_DQ_LENGTH;
1450 	}
1451 
1452 	if (bar_size / db_size < 256)
1453 		p_resp->num_cids = min_t(u8, p_resp->num_cids,
1454 					 (u8)(bar_size / db_size));
1455 }
1456 
1457 static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1458 				      struct qed_ptt *p_ptt,
1459 				      struct qed_vf_info *p_vf,
1460 				      struct vf_pf_resc_request *p_req,
1461 				      struct pf_vf_resc *p_resp)
1462 {
1463 	u8 i;
1464 
1465 	/* Queue related information */
1466 	p_resp->num_rxqs = p_vf->num_rxqs;
1467 	p_resp->num_txqs = p_vf->num_txqs;
1468 	p_resp->num_sbs = p_vf->num_sbs;
1469 
1470 	for (i = 0; i < p_resp->num_sbs; i++) {
1471 		p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1472 		p_resp->hw_sbs[i].sb_qid = 0;
1473 	}
1474 
1475 	/* These fields are filled for backward compatibility.
1476 	 * Unused by modern vfs.
1477 	 */
1478 	for (i = 0; i < p_resp->num_rxqs; i++) {
1479 		qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1480 				(u16 *)&p_resp->hw_qid[i]);
1481 		p_resp->cid[i] = i;
1482 	}
1483 
1484 	/* Filter related information */
1485 	p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1486 					p_req->num_mac_filters);
1487 	p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1488 					 p_req->num_vlan_filters);
1489 
1490 	qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1491 
1492 	/* This isn't really needed/enforced, but some legacy VFs might depend
1493 	 * on the correct filling of this field.
1494 	 */
1495 	p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1496 
1497 	/* Validate sufficient resources for VF */
1498 	if (p_resp->num_rxqs < p_req->num_rxqs ||
1499 	    p_resp->num_txqs < p_req->num_txqs ||
1500 	    p_resp->num_sbs < p_req->num_sbs ||
1501 	    p_resp->num_mac_filters < p_req->num_mac_filters ||
1502 	    p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1503 	    p_resp->num_mc_filters < p_req->num_mc_filters ||
1504 	    p_resp->num_cids < p_req->num_cids) {
1505 		DP_VERBOSE(p_hwfn,
1506 			   QED_MSG_IOV,
1507 			   "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1508 			   p_vf->abs_vf_id,
1509 			   p_req->num_rxqs,
1510 			   p_resp->num_rxqs,
1511 			   p_req->num_rxqs,
1512 			   p_resp->num_txqs,
1513 			   p_req->num_sbs,
1514 			   p_resp->num_sbs,
1515 			   p_req->num_mac_filters,
1516 			   p_resp->num_mac_filters,
1517 			   p_req->num_vlan_filters,
1518 			   p_resp->num_vlan_filters,
1519 			   p_req->num_mc_filters,
1520 			   p_resp->num_mc_filters,
1521 			   p_req->num_cids, p_resp->num_cids);
1522 
1523 		/* Some legacy OSes are incapable of correctly handling this
1524 		 * failure.
1525 		 */
1526 		if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1527 		     ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1528 		    (p_vf->acquire.vfdev_info.os_type ==
1529 		     VFPF_ACQUIRE_OS_WINDOWS))
1530 			return PFVF_STATUS_SUCCESS;
1531 
1532 		return PFVF_STATUS_NO_RESOURCE;
1533 	}
1534 
1535 	return PFVF_STATUS_SUCCESS;
1536 }
1537 
1538 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1539 					 struct pfvf_stats_info *p_stats)
1540 {
1541 	p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1542 				  offsetof(struct mstorm_vf_zone,
1543 					   non_trigger.eth_queue_stat);
1544 	p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1545 	p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1546 				  offsetof(struct ustorm_vf_zone,
1547 					   non_trigger.eth_queue_stat);
1548 	p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1549 	p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1550 				  offsetof(struct pstorm_vf_zone,
1551 					   non_trigger.eth_queue_stat);
1552 	p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1553 	p_stats->tstats.address = 0;
1554 	p_stats->tstats.len = 0;
1555 }
1556 
1557 static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1558 				   struct qed_ptt *p_ptt,
1559 				   struct qed_vf_info *vf)
1560 {
1561 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1562 	struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1563 	struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1564 	struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1565 	u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1566 	struct pf_vf_resc *resc = &resp->resc;
1567 	int rc;
1568 
1569 	memset(resp, 0, sizeof(*resp));
1570 
1571 	/* Write the PF version so that VF would know which version
1572 	 * is supported - might be later overriden. This guarantees that
1573 	 * VF could recognize legacy PF based on lack of versions in reply.
1574 	 */
1575 	pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1576 	pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1577 
1578 	if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
1579 		DP_VERBOSE(p_hwfn,
1580 			   QED_MSG_IOV,
1581 			   "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1582 			   vf->abs_vf_id, vf->state);
1583 		goto out;
1584 	}
1585 
1586 	/* Validate FW compatibility */
1587 	if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1588 		if (req->vfdev_info.capabilities &
1589 		    VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1590 			struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1591 
1592 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1593 				   "VF[%d] is pre-fastpath HSI\n",
1594 				   vf->abs_vf_id);
1595 			p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1596 			p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1597 		} else {
1598 			DP_INFO(p_hwfn,
1599 				"VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
1600 				vf->abs_vf_id,
1601 				req->vfdev_info.eth_fp_hsi_major,
1602 				req->vfdev_info.eth_fp_hsi_minor,
1603 				ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1604 
1605 			goto out;
1606 		}
1607 	}
1608 
1609 	/* On 100g PFs, prevent old VFs from loading */
1610 	if ((p_hwfn->cdev->num_hwfns > 1) &&
1611 	    !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1612 		DP_INFO(p_hwfn,
1613 			"VF[%d] is running an old driver that doesn't support 100g\n",
1614 			vf->abs_vf_id);
1615 		goto out;
1616 	}
1617 
1618 	/* Store the acquire message */
1619 	memcpy(&vf->acquire, req, sizeof(vf->acquire));
1620 
1621 	vf->opaque_fid = req->vfdev_info.opaque_fid;
1622 
1623 	vf->vf_bulletin = req->bulletin_addr;
1624 	vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1625 			    vf->bulletin.size : req->bulletin_size;
1626 
1627 	/* fill in pfdev info */
1628 	pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1629 	pfdev_info->db_size = 0;
1630 	pfdev_info->indices_per_sb = PIS_PER_SB_E4;
1631 
1632 	pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1633 				   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1634 	if (p_hwfn->cdev->num_hwfns > 1)
1635 		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1636 
1637 	/* Share our ability to use multiple queue-ids only with VFs
1638 	 * that request it.
1639 	 */
1640 	if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1641 		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1642 
1643 	/* Share the sizes of the bars with VF */
1644 	resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1645 
1646 	qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1647 
1648 	memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1649 
1650 	pfdev_info->fw_major = FW_MAJOR_VERSION;
1651 	pfdev_info->fw_minor = FW_MINOR_VERSION;
1652 	pfdev_info->fw_rev = FW_REVISION_VERSION;
1653 	pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1654 
1655 	/* Incorrect when legacy, but doesn't matter as legacy isn't reading
1656 	 * this field.
1657 	 */
1658 	pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
1659 					 req->vfdev_info.eth_fp_hsi_minor);
1660 	pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1661 	qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1662 
1663 	pfdev_info->dev_type = p_hwfn->cdev->type;
1664 	pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1665 
1666 	/* Fill resources available to VF; Make sure there are enough to
1667 	 * satisfy the VF's request.
1668 	 */
1669 	vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1670 						  &req->resc_request, resc);
1671 	if (vfpf_status != PFVF_STATUS_SUCCESS)
1672 		goto out;
1673 
1674 	/* Start the VF in FW */
1675 	rc = qed_sp_vf_start(p_hwfn, vf);
1676 	if (rc) {
1677 		DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1678 		vfpf_status = PFVF_STATUS_FAILURE;
1679 		goto out;
1680 	}
1681 
1682 	/* Fill agreed size of bulletin board in response */
1683 	resp->bulletin_size = vf->bulletin.size;
1684 	qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1685 
1686 	DP_VERBOSE(p_hwfn,
1687 		   QED_MSG_IOV,
1688 		   "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1689 		   "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1690 		   vf->abs_vf_id,
1691 		   resp->pfdev_info.chip_num,
1692 		   resp->pfdev_info.db_size,
1693 		   resp->pfdev_info.indices_per_sb,
1694 		   resp->pfdev_info.capabilities,
1695 		   resc->num_rxqs,
1696 		   resc->num_txqs,
1697 		   resc->num_sbs,
1698 		   resc->num_mac_filters,
1699 		   resc->num_vlan_filters);
1700 	vf->state = VF_ACQUIRED;
1701 
1702 	/* Prepare Response */
1703 out:
1704 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1705 			     sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
1706 }
1707 
1708 static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
1709 				  struct qed_vf_info *p_vf, bool val)
1710 {
1711 	struct qed_sp_vport_update_params params;
1712 	int rc;
1713 
1714 	if (val == p_vf->spoof_chk) {
1715 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1716 			   "Spoofchk value[%d] is already configured\n", val);
1717 		return 0;
1718 	}
1719 
1720 	memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
1721 	params.opaque_fid = p_vf->opaque_fid;
1722 	params.vport_id = p_vf->vport_id;
1723 	params.update_anti_spoofing_en_flg = 1;
1724 	params.anti_spoofing_en = val;
1725 
1726 	rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
1727 	if (!rc) {
1728 		p_vf->spoof_chk = val;
1729 		p_vf->req_spoofchk_val = p_vf->spoof_chk;
1730 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1731 			   "Spoofchk val[%d] configured\n", val);
1732 	} else {
1733 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1734 			   "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1735 			   val, p_vf->relative_vf_id);
1736 	}
1737 
1738 	return rc;
1739 }
1740 
1741 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
1742 					    struct qed_vf_info *p_vf)
1743 {
1744 	struct qed_filter_ucast filter;
1745 	int rc = 0;
1746 	int i;
1747 
1748 	memset(&filter, 0, sizeof(filter));
1749 	filter.is_rx_filter = 1;
1750 	filter.is_tx_filter = 1;
1751 	filter.vport_to_add_to = p_vf->vport_id;
1752 	filter.opcode = QED_FILTER_ADD;
1753 
1754 	/* Reconfigure vlans */
1755 	for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1756 		if (!p_vf->shadow_config.vlans[i].used)
1757 			continue;
1758 
1759 		filter.type = QED_FILTER_VLAN;
1760 		filter.vlan = p_vf->shadow_config.vlans[i].vid;
1761 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1762 			   "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1763 			   filter.vlan, p_vf->relative_vf_id);
1764 		rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1765 					     &filter, QED_SPQ_MODE_CB, NULL);
1766 		if (rc) {
1767 			DP_NOTICE(p_hwfn,
1768 				  "Failed to configure VLAN [%04x] to VF [%04x]\n",
1769 				  filter.vlan, p_vf->relative_vf_id);
1770 			break;
1771 		}
1772 	}
1773 
1774 	return rc;
1775 }
1776 
1777 static int
1778 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
1779 				   struct qed_vf_info *p_vf, u64 events)
1780 {
1781 	int rc = 0;
1782 
1783 	if ((events & BIT(VLAN_ADDR_FORCED)) &&
1784 	    !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1785 		rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1786 
1787 	return rc;
1788 }
1789 
1790 static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
1791 					  struct qed_vf_info *p_vf, u64 events)
1792 {
1793 	int rc = 0;
1794 	struct qed_filter_ucast filter;
1795 
1796 	if (!p_vf->vport_instance)
1797 		return -EINVAL;
1798 
1799 	if ((events & BIT(MAC_ADDR_FORCED)) ||
1800 	    p_vf->p_vf_info.is_trusted_configured) {
1801 		/* Since there's no way [currently] of removing the MAC,
1802 		 * we can always assume this means we need to force it.
1803 		 */
1804 		memset(&filter, 0, sizeof(filter));
1805 		filter.type = QED_FILTER_MAC;
1806 		filter.opcode = QED_FILTER_REPLACE;
1807 		filter.is_rx_filter = 1;
1808 		filter.is_tx_filter = 1;
1809 		filter.vport_to_add_to = p_vf->vport_id;
1810 		ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1811 
1812 		rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1813 					     &filter, QED_SPQ_MODE_CB, NULL);
1814 		if (rc) {
1815 			DP_NOTICE(p_hwfn,
1816 				  "PF failed to configure MAC for VF\n");
1817 			return rc;
1818 		}
1819 		if (p_vf->p_vf_info.is_trusted_configured)
1820 			p_vf->configured_features |=
1821 				BIT(VFPF_BULLETIN_MAC_ADDR);
1822 		else
1823 			p_vf->configured_features |=
1824 				BIT(MAC_ADDR_FORCED);
1825 	}
1826 
1827 	if (events & BIT(VLAN_ADDR_FORCED)) {
1828 		struct qed_sp_vport_update_params vport_update;
1829 		u8 removal;
1830 		int i;
1831 
1832 		memset(&filter, 0, sizeof(filter));
1833 		filter.type = QED_FILTER_VLAN;
1834 		filter.is_rx_filter = 1;
1835 		filter.is_tx_filter = 1;
1836 		filter.vport_to_add_to = p_vf->vport_id;
1837 		filter.vlan = p_vf->bulletin.p_virt->pvid;
1838 		filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
1839 					      QED_FILTER_FLUSH;
1840 
1841 		/* Send the ramrod */
1842 		rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1843 					     &filter, QED_SPQ_MODE_CB, NULL);
1844 		if (rc) {
1845 			DP_NOTICE(p_hwfn,
1846 				  "PF failed to configure VLAN for VF\n");
1847 			return rc;
1848 		}
1849 
1850 		/* Update the default-vlan & silent vlan stripping */
1851 		memset(&vport_update, 0, sizeof(vport_update));
1852 		vport_update.opaque_fid = p_vf->opaque_fid;
1853 		vport_update.vport_id = p_vf->vport_id;
1854 		vport_update.update_default_vlan_enable_flg = 1;
1855 		vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1856 		vport_update.update_default_vlan_flg = 1;
1857 		vport_update.default_vlan = filter.vlan;
1858 
1859 		vport_update.update_inner_vlan_removal_flg = 1;
1860 		removal = filter.vlan ? 1
1861 				      : p_vf->shadow_config.inner_vlan_removal;
1862 		vport_update.inner_vlan_removal_flg = removal;
1863 		vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1864 		rc = qed_sp_vport_update(p_hwfn,
1865 					 &vport_update,
1866 					 QED_SPQ_MODE_EBLOCK, NULL);
1867 		if (rc) {
1868 			DP_NOTICE(p_hwfn,
1869 				  "PF failed to configure VF vport for vlan\n");
1870 			return rc;
1871 		}
1872 
1873 		/* Update all the Rx queues */
1874 		for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1875 			struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1876 			struct qed_queue_cid *p_cid = NULL;
1877 
1878 			/* There can be at most 1 Rx queue on qzone. Find it */
1879 			p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
1880 			if (!p_cid)
1881 				continue;
1882 
1883 			rc = qed_sp_eth_rx_queues_update(p_hwfn,
1884 							 (void **)&p_cid,
1885 							 1, 0, 1,
1886 							 QED_SPQ_MODE_EBLOCK,
1887 							 NULL);
1888 			if (rc) {
1889 				DP_NOTICE(p_hwfn,
1890 					  "Failed to send Rx update fo queue[0x%04x]\n",
1891 					  p_cid->rel.queue_id);
1892 				return rc;
1893 			}
1894 		}
1895 
1896 		if (filter.vlan)
1897 			p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1898 		else
1899 			p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
1900 	}
1901 
1902 	/* If forced features are terminated, we need to configure the shadow
1903 	 * configuration back again.
1904 	 */
1905 	if (events)
1906 		qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1907 
1908 	return rc;
1909 }
1910 
1911 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1912 				       struct qed_ptt *p_ptt,
1913 				       struct qed_vf_info *vf)
1914 {
1915 	struct qed_sp_vport_start_params params = { 0 };
1916 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1917 	struct vfpf_vport_start_tlv *start;
1918 	u8 status = PFVF_STATUS_SUCCESS;
1919 	struct qed_vf_info *vf_info;
1920 	u64 *p_bitmap;
1921 	int sb_id;
1922 	int rc;
1923 
1924 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1925 	if (!vf_info) {
1926 		DP_NOTICE(p_hwfn->cdev,
1927 			  "Failed to get VF info, invalid vfid [%d]\n",
1928 			  vf->relative_vf_id);
1929 		return;
1930 	}
1931 
1932 	vf->state = VF_ENABLED;
1933 	start = &mbx->req_virt->start_vport;
1934 
1935 	qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1936 
1937 	/* Initialize Status block in CAU */
1938 	for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1939 		if (!start->sb_addr[sb_id]) {
1940 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1941 				   "VF[%d] did not fill the address of SB %d\n",
1942 				   vf->relative_vf_id, sb_id);
1943 			break;
1944 		}
1945 
1946 		qed_int_cau_conf_sb(p_hwfn, p_ptt,
1947 				    start->sb_addr[sb_id],
1948 				    vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
1949 	}
1950 
1951 	vf->mtu = start->mtu;
1952 	vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1953 
1954 	/* Take into consideration configuration forced by hypervisor;
1955 	 * If none is configured, use the supplied VF values [for old
1956 	 * vfs that would still be fine, since they passed '0' as padding].
1957 	 */
1958 	p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1959 	if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1960 		u8 vf_req = start->only_untagged;
1961 
1962 		vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1963 		*p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1964 	}
1965 
1966 	params.tpa_mode = start->tpa_mode;
1967 	params.remove_inner_vlan = start->inner_vlan_removal;
1968 	params.tx_switching = true;
1969 
1970 	params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1971 	params.drop_ttl0 = false;
1972 	params.concrete_fid = vf->concrete_fid;
1973 	params.opaque_fid = vf->opaque_fid;
1974 	params.vport_id = vf->vport_id;
1975 	params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1976 	params.mtu = vf->mtu;
1977 
1978 	/* Non trusted VFs should enable control frame filtering */
1979 	params.check_mac = !vf->p_vf_info.is_trusted_configured;
1980 
1981 	rc = qed_sp_eth_vport_start(p_hwfn, &params);
1982 	if (rc) {
1983 		DP_ERR(p_hwfn,
1984 		       "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1985 		status = PFVF_STATUS_FAILURE;
1986 	} else {
1987 		vf->vport_instance++;
1988 
1989 		/* Force configuration if needed on the newly opened vport */
1990 		qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
1991 
1992 		__qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1993 	}
1994 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1995 			     sizeof(struct pfvf_def_resp_tlv), status);
1996 }
1997 
1998 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1999 				      struct qed_ptt *p_ptt,
2000 				      struct qed_vf_info *vf)
2001 {
2002 	u8 status = PFVF_STATUS_SUCCESS;
2003 	int rc;
2004 
2005 	vf->vport_instance--;
2006 	vf->spoof_chk = false;
2007 
2008 	if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
2009 	    (qed_iov_validate_active_txq(p_hwfn, vf))) {
2010 		vf->b_malicious = true;
2011 		DP_NOTICE(p_hwfn,
2012 			  "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
2013 			  vf->abs_vf_id);
2014 		status = PFVF_STATUS_MALICIOUS;
2015 		goto out;
2016 	}
2017 
2018 	rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2019 	if (rc) {
2020 		DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
2021 		       rc);
2022 		status = PFVF_STATUS_FAILURE;
2023 	}
2024 
2025 	/* Forget the configuration on the vport */
2026 	vf->configured_features = 0;
2027 	memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2028 
2029 out:
2030 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2031 			     sizeof(struct pfvf_def_resp_tlv), status);
2032 }
2033 
2034 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
2035 					  struct qed_ptt *p_ptt,
2036 					  struct qed_vf_info *vf,
2037 					  u8 status, bool b_legacy)
2038 {
2039 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2040 	struct pfvf_start_queue_resp_tlv *p_tlv;
2041 	struct vfpf_start_rxq_tlv *req;
2042 	u16 length;
2043 
2044 	mbx->offset = (u8 *)mbx->reply_virt;
2045 
2046 	/* Taking a bigger struct instead of adding a TLV to list was a
2047 	 * mistake, but one which we're now stuck with, as some older
2048 	 * clients assume the size of the previous response.
2049 	 */
2050 	if (!b_legacy)
2051 		length = sizeof(*p_tlv);
2052 	else
2053 		length = sizeof(struct pfvf_def_resp_tlv);
2054 
2055 	p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2056 			    length);
2057 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2058 		    sizeof(struct channel_list_end_tlv));
2059 
2060 	/* Update the TLV with the response */
2061 	if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2062 		req = &mbx->req_virt->start_rxq;
2063 		p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2064 				offsetof(struct mstorm_vf_zone,
2065 					 non_trigger.eth_rx_queue_producers) +
2066 				sizeof(struct eth_rx_prod_data) * req->rx_qid;
2067 	}
2068 
2069 	qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2070 }
2071 
2072 static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn,
2073 			     struct qed_vf_info *p_vf, bool b_is_tx)
2074 {
2075 	struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2076 	struct vfpf_qid_tlv *p_qid_tlv;
2077 
2078 	/* Search for the qid if the VF published its going to provide it */
2079 	if (!(p_vf->acquire.vfdev_info.capabilities &
2080 	      VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2081 		if (b_is_tx)
2082 			return QED_IOV_LEGACY_QID_TX;
2083 		else
2084 			return QED_IOV_LEGACY_QID_RX;
2085 	}
2086 
2087 	p_qid_tlv = (struct vfpf_qid_tlv *)
2088 		    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2089 					     CHANNEL_TLV_QID);
2090 	if (!p_qid_tlv) {
2091 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2092 			   "VF[%2x]: Failed to provide qid\n",
2093 			   p_vf->relative_vf_id);
2094 
2095 		return QED_IOV_QID_INVALID;
2096 	}
2097 
2098 	if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2099 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2100 			   "VF[%02x]: Provided qid out-of-bounds %02x\n",
2101 			   p_vf->relative_vf_id, p_qid_tlv->qid);
2102 		return QED_IOV_QID_INVALID;
2103 	}
2104 
2105 	return p_qid_tlv->qid;
2106 }
2107 
2108 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
2109 				     struct qed_ptt *p_ptt,
2110 				     struct qed_vf_info *vf)
2111 {
2112 	struct qed_queue_start_common_params params;
2113 	struct qed_queue_cid_vf_params vf_params;
2114 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2115 	u8 status = PFVF_STATUS_NO_RESOURCE;
2116 	u8 qid_usage_idx, vf_legacy = 0;
2117 	struct vfpf_start_rxq_tlv *req;
2118 	struct qed_vf_queue *p_queue;
2119 	struct qed_queue_cid *p_cid;
2120 	struct qed_sb_info sb_dummy;
2121 	int rc;
2122 
2123 	req = &mbx->req_virt->start_rxq;
2124 
2125 	if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2126 				  QED_IOV_VALIDATE_Q_DISABLE) ||
2127 	    !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2128 		goto out;
2129 
2130 	qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2131 	if (qid_usage_idx == QED_IOV_QID_INVALID)
2132 		goto out;
2133 
2134 	p_queue = &vf->vf_queues[req->rx_qid];
2135 	if (p_queue->cids[qid_usage_idx].p_cid)
2136 		goto out;
2137 
2138 	vf_legacy = qed_vf_calculate_legacy(vf);
2139 
2140 	/* Acquire a new queue-cid */
2141 	memset(&params, 0, sizeof(params));
2142 	params.queue_id = p_queue->fw_rx_qid;
2143 	params.vport_id = vf->vport_id;
2144 	params.stats_id = vf->abs_vf_id + 0x10;
2145 	/* Since IGU index is passed via sb_info, construct a dummy one */
2146 	memset(&sb_dummy, 0, sizeof(sb_dummy));
2147 	sb_dummy.igu_sb_id = req->hw_sb;
2148 	params.p_sb = &sb_dummy;
2149 	params.sb_idx = req->sb_index;
2150 
2151 	memset(&vf_params, 0, sizeof(vf_params));
2152 	vf_params.vfid = vf->relative_vf_id;
2153 	vf_params.vf_qid = (u8)req->rx_qid;
2154 	vf_params.vf_legacy = vf_legacy;
2155 	vf_params.qid_usage_idx = qid_usage_idx;
2156 	p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2157 				     &params, true, &vf_params);
2158 	if (!p_cid)
2159 		goto out;
2160 
2161 	/* Legacy VFs have their Producers in a different location, which they
2162 	 * calculate on their own and clean the producer prior to this.
2163 	 */
2164 	if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
2165 		REG_WR(p_hwfn,
2166 		       GTT_BAR0_MAP_REG_MSDM_RAM +
2167 		       MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2168 		       0);
2169 
2170 	rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
2171 				      req->bd_max_bytes,
2172 				      req->rxq_addr,
2173 				      req->cqe_pbl_addr, req->cqe_pbl_size);
2174 	if (rc) {
2175 		status = PFVF_STATUS_FAILURE;
2176 		qed_eth_queue_cid_release(p_hwfn, p_cid);
2177 	} else {
2178 		p_queue->cids[qid_usage_idx].p_cid = p_cid;
2179 		p_queue->cids[qid_usage_idx].b_is_tx = false;
2180 		status = PFVF_STATUS_SUCCESS;
2181 		vf->num_active_rxqs++;
2182 	}
2183 
2184 out:
2185 	qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2186 				      !!(vf_legacy &
2187 					 QED_QCID_LEGACY_VF_RX_PROD));
2188 }
2189 
2190 static void
2191 qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2192 			       struct qed_tunnel_info *p_tun,
2193 			       u16 tunn_feature_mask)
2194 {
2195 	p_resp->tunn_feature_mask = tunn_feature_mask;
2196 	p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2197 	p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2198 	p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2199 	p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2200 	p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2201 	p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2202 	p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2203 	p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2204 	p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2205 	p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2206 	p_resp->geneve_udp_port = p_tun->geneve_port.port;
2207 	p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2208 }
2209 
2210 static void
2211 __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2212 			      struct qed_tunn_update_type *p_tun,
2213 			      enum qed_tunn_mode mask, u8 tun_cls)
2214 {
2215 	if (p_req->tun_mode_update_mask & BIT(mask)) {
2216 		p_tun->b_update_mode = true;
2217 
2218 		if (p_req->tunn_mode & BIT(mask))
2219 			p_tun->b_mode_enabled = true;
2220 	}
2221 
2222 	p_tun->tun_cls = tun_cls;
2223 }
2224 
2225 static void
2226 qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2227 			    struct qed_tunn_update_type *p_tun,
2228 			    struct qed_tunn_update_udp_port *p_port,
2229 			    enum qed_tunn_mode mask,
2230 			    u8 tun_cls, u8 update_port, u16 port)
2231 {
2232 	if (update_port) {
2233 		p_port->b_update_port = true;
2234 		p_port->port = port;
2235 	}
2236 
2237 	__qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2238 }
2239 
2240 static bool
2241 qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2242 {
2243 	bool b_update_requested = false;
2244 
2245 	if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2246 	    p_req->update_geneve_port || p_req->update_vxlan_port)
2247 		b_update_requested = true;
2248 
2249 	return b_update_requested;
2250 }
2251 
2252 static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
2253 {
2254 	if (tun->b_update_mode && !tun->b_mode_enabled) {
2255 		tun->b_update_mode = false;
2256 		*rc = -EINVAL;
2257 	}
2258 }
2259 
2260 static int
2261 qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
2262 				   u16 *tun_features, bool *update,
2263 				   struct qed_tunnel_info *tun_src)
2264 {
2265 	struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
2266 	struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
2267 	u16 bultn_vxlan_port, bultn_geneve_port;
2268 	void *cookie = p_hwfn->cdev->ops_cookie;
2269 	int i, rc = 0;
2270 
2271 	*tun_features = p_hwfn->cdev->tunn_feature_mask;
2272 	bultn_vxlan_port = tun->vxlan_port.port;
2273 	bultn_geneve_port = tun->geneve_port.port;
2274 	qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
2275 	qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
2276 	qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
2277 	qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
2278 	qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
2279 
2280 	if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
2281 	    (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2282 	     tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2283 	     tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2284 	     tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2285 	     tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
2286 		tun_src->b_update_rx_cls = false;
2287 		tun_src->b_update_tx_cls = false;
2288 		rc = -EINVAL;
2289 	}
2290 
2291 	if (tun_src->vxlan_port.b_update_port) {
2292 		if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
2293 			tun_src->vxlan_port.b_update_port = false;
2294 		} else {
2295 			*update = true;
2296 			bultn_vxlan_port = tun_src->vxlan_port.port;
2297 		}
2298 	}
2299 
2300 	if (tun_src->geneve_port.b_update_port) {
2301 		if (tun_src->geneve_port.port == tun->geneve_port.port) {
2302 			tun_src->geneve_port.b_update_port = false;
2303 		} else {
2304 			*update = true;
2305 			bultn_geneve_port = tun_src->geneve_port.port;
2306 		}
2307 	}
2308 
2309 	qed_for_each_vf(p_hwfn, i) {
2310 		qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
2311 					       bultn_geneve_port);
2312 	}
2313 
2314 	qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2315 	ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
2316 
2317 	return rc;
2318 }
2319 
2320 static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
2321 					     struct qed_ptt *p_ptt,
2322 					     struct qed_vf_info *p_vf)
2323 {
2324 	struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
2325 	struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2326 	struct pfvf_update_tunn_param_tlv *p_resp;
2327 	struct vfpf_update_tunn_param_tlv *p_req;
2328 	u8 status = PFVF_STATUS_SUCCESS;
2329 	bool b_update_required = false;
2330 	struct qed_tunnel_info tunn;
2331 	u16 tunn_feature_mask = 0;
2332 	int i, rc = 0;
2333 
2334 	mbx->offset = (u8 *)mbx->reply_virt;
2335 
2336 	memset(&tunn, 0, sizeof(tunn));
2337 	p_req = &mbx->req_virt->tunn_param_update;
2338 
2339 	if (!qed_iov_pf_validate_tunn_param(p_req)) {
2340 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2341 			   "No tunnel update requested by VF\n");
2342 		status = PFVF_STATUS_FAILURE;
2343 		goto send_resp;
2344 	}
2345 
2346 	tunn.b_update_rx_cls = p_req->update_tun_cls;
2347 	tunn.b_update_tx_cls = p_req->update_tun_cls;
2348 
2349 	qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2350 				    QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2351 				    p_req->update_vxlan_port,
2352 				    p_req->vxlan_port);
2353 	qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2354 				    QED_MODE_L2GENEVE_TUNN,
2355 				    p_req->l2geneve_clss,
2356 				    p_req->update_geneve_port,
2357 				    p_req->geneve_port);
2358 	__qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2359 				      QED_MODE_IPGENEVE_TUNN,
2360 				      p_req->ipgeneve_clss);
2361 	__qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2362 				      QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
2363 	__qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2364 				      QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
2365 
2366 	/* If PF modifies VF's req then it should
2367 	 * still return an error in case of partial configuration
2368 	 * or modified configuration as opposed to requested one.
2369 	 */
2370 	rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
2371 						&b_update_required, &tunn);
2372 
2373 	if (rc)
2374 		status = PFVF_STATUS_FAILURE;
2375 
2376 	/* If QED client is willing to update anything ? */
2377 	if (b_update_required) {
2378 		u16 geneve_port;
2379 
2380 		rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2381 					       QED_SPQ_MODE_EBLOCK, NULL);
2382 		if (rc)
2383 			status = PFVF_STATUS_FAILURE;
2384 
2385 		geneve_port = p_tun->geneve_port.port;
2386 		qed_for_each_vf(p_hwfn, i) {
2387 			qed_iov_bulletin_set_udp_ports(p_hwfn, i,
2388 						       p_tun->vxlan_port.port,
2389 						       geneve_port);
2390 		}
2391 	}
2392 
2393 send_resp:
2394 	p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
2395 			     CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2396 
2397 	qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2398 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2399 		    sizeof(struct channel_list_end_tlv));
2400 
2401 	qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2402 }
2403 
2404 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
2405 					  struct qed_ptt *p_ptt,
2406 					  struct qed_vf_info *p_vf,
2407 					  u32 cid, u8 status)
2408 {
2409 	struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2410 	struct pfvf_start_queue_resp_tlv *p_tlv;
2411 	bool b_legacy = false;
2412 	u16 length;
2413 
2414 	mbx->offset = (u8 *)mbx->reply_virt;
2415 
2416 	/* Taking a bigger struct instead of adding a TLV to list was a
2417 	 * mistake, but one which we're now stuck with, as some older
2418 	 * clients assume the size of the previous response.
2419 	 */
2420 	if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2421 	    ETH_HSI_VER_NO_PKT_LEN_TUNN)
2422 		b_legacy = true;
2423 
2424 	if (!b_legacy)
2425 		length = sizeof(*p_tlv);
2426 	else
2427 		length = sizeof(struct pfvf_def_resp_tlv);
2428 
2429 	p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2430 			    length);
2431 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2432 		    sizeof(struct channel_list_end_tlv));
2433 
2434 	/* Update the TLV with the response */
2435 	if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2436 		p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
2437 
2438 	qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2439 }
2440 
2441 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
2442 				     struct qed_ptt *p_ptt,
2443 				     struct qed_vf_info *vf)
2444 {
2445 	struct qed_queue_start_common_params params;
2446 	struct qed_queue_cid_vf_params vf_params;
2447 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2448 	u8 status = PFVF_STATUS_NO_RESOURCE;
2449 	struct vfpf_start_txq_tlv *req;
2450 	struct qed_vf_queue *p_queue;
2451 	struct qed_queue_cid *p_cid;
2452 	struct qed_sb_info sb_dummy;
2453 	u8 qid_usage_idx, vf_legacy;
2454 	u32 cid = 0;
2455 	int rc;
2456 	u16 pq;
2457 
2458 	memset(&params, 0, sizeof(params));
2459 	req = &mbx->req_virt->start_txq;
2460 
2461 	if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2462 				  QED_IOV_VALIDATE_Q_NA) ||
2463 	    !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2464 		goto out;
2465 
2466 	qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2467 	if (qid_usage_idx == QED_IOV_QID_INVALID)
2468 		goto out;
2469 
2470 	p_queue = &vf->vf_queues[req->tx_qid];
2471 	if (p_queue->cids[qid_usage_idx].p_cid)
2472 		goto out;
2473 
2474 	vf_legacy = qed_vf_calculate_legacy(vf);
2475 
2476 	/* Acquire a new queue-cid */
2477 	params.queue_id = p_queue->fw_tx_qid;
2478 	params.vport_id = vf->vport_id;
2479 	params.stats_id = vf->abs_vf_id + 0x10;
2480 
2481 	/* Since IGU index is passed via sb_info, construct a dummy one */
2482 	memset(&sb_dummy, 0, sizeof(sb_dummy));
2483 	sb_dummy.igu_sb_id = req->hw_sb;
2484 	params.p_sb = &sb_dummy;
2485 	params.sb_idx = req->sb_index;
2486 
2487 	memset(&vf_params, 0, sizeof(vf_params));
2488 	vf_params.vfid = vf->relative_vf_id;
2489 	vf_params.vf_qid = (u8)req->tx_qid;
2490 	vf_params.vf_legacy = vf_legacy;
2491 	vf_params.qid_usage_idx = qid_usage_idx;
2492 
2493 	p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2494 				     &params, false, &vf_params);
2495 	if (!p_cid)
2496 		goto out;
2497 
2498 	pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
2499 	rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
2500 				      req->pbl_addr, req->pbl_size, pq);
2501 	if (rc) {
2502 		status = PFVF_STATUS_FAILURE;
2503 		qed_eth_queue_cid_release(p_hwfn, p_cid);
2504 	} else {
2505 		status = PFVF_STATUS_SUCCESS;
2506 		p_queue->cids[qid_usage_idx].p_cid = p_cid;
2507 		p_queue->cids[qid_usage_idx].b_is_tx = true;
2508 		cid = p_cid->cid;
2509 	}
2510 
2511 out:
2512 	qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status);
2513 }
2514 
2515 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
2516 				struct qed_vf_info *vf,
2517 				u16 rxq_id,
2518 				u8 qid_usage_idx, bool cqe_completion)
2519 {
2520 	struct qed_vf_queue *p_queue;
2521 	int rc = 0;
2522 
2523 	if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) {
2524 		DP_VERBOSE(p_hwfn,
2525 			   QED_MSG_IOV,
2526 			   "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2527 			   vf->relative_vf_id, rxq_id, qid_usage_idx);
2528 		return -EINVAL;
2529 	}
2530 
2531 	p_queue = &vf->vf_queues[rxq_id];
2532 
2533 	/* We've validated the index and the existence of the active RXQ -
2534 	 * now we need to make sure that it's using the correct qid.
2535 	 */
2536 	if (!p_queue->cids[qid_usage_idx].p_cid ||
2537 	    p_queue->cids[qid_usage_idx].b_is_tx) {
2538 		struct qed_queue_cid *p_cid;
2539 
2540 		p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
2541 		DP_VERBOSE(p_hwfn,
2542 			   QED_MSG_IOV,
2543 			   "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2544 			   vf->relative_vf_id,
2545 			   rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx);
2546 		return -EINVAL;
2547 	}
2548 
2549 	/* Now that we know we have a valid Rx-queue - close it */
2550 	rc = qed_eth_rx_queue_stop(p_hwfn,
2551 				   p_queue->cids[qid_usage_idx].p_cid,
2552 				   false, cqe_completion);
2553 	if (rc)
2554 		return rc;
2555 
2556 	p_queue->cids[qid_usage_idx].p_cid = NULL;
2557 	vf->num_active_rxqs--;
2558 
2559 	return 0;
2560 }
2561 
2562 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
2563 				struct qed_vf_info *vf,
2564 				u16 txq_id, u8 qid_usage_idx)
2565 {
2566 	struct qed_vf_queue *p_queue;
2567 	int rc = 0;
2568 
2569 	if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA))
2570 		return -EINVAL;
2571 
2572 	p_queue = &vf->vf_queues[txq_id];
2573 	if (!p_queue->cids[qid_usage_idx].p_cid ||
2574 	    !p_queue->cids[qid_usage_idx].b_is_tx)
2575 		return -EINVAL;
2576 
2577 	rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid);
2578 	if (rc)
2579 		return rc;
2580 
2581 	p_queue->cids[qid_usage_idx].p_cid = NULL;
2582 	return 0;
2583 }
2584 
2585 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
2586 				     struct qed_ptt *p_ptt,
2587 				     struct qed_vf_info *vf)
2588 {
2589 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2590 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2591 	u8 status = PFVF_STATUS_FAILURE;
2592 	struct vfpf_stop_rxqs_tlv *req;
2593 	u8 qid_usage_idx;
2594 	int rc;
2595 
2596 	/* There has never been an official driver that used this interface
2597 	 * for stopping multiple queues, and it is now considered deprecated.
2598 	 * Validate this isn't used here.
2599 	 */
2600 	req = &mbx->req_virt->stop_rxqs;
2601 	if (req->num_rxqs != 1) {
2602 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2603 			   "Odd; VF[%d] tried stopping multiple Rx queues\n",
2604 			   vf->relative_vf_id);
2605 		status = PFVF_STATUS_NOT_SUPPORTED;
2606 		goto out;
2607 	}
2608 
2609 	/* Find which qid-index is associated with the queue */
2610 	qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2611 	if (qid_usage_idx == QED_IOV_QID_INVALID)
2612 		goto out;
2613 
2614 	rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2615 				  qid_usage_idx, req->cqe_completion);
2616 	if (!rc)
2617 		status = PFVF_STATUS_SUCCESS;
2618 out:
2619 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2620 			     length, status);
2621 }
2622 
2623 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
2624 				     struct qed_ptt *p_ptt,
2625 				     struct qed_vf_info *vf)
2626 {
2627 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2628 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2629 	u8 status = PFVF_STATUS_FAILURE;
2630 	struct vfpf_stop_txqs_tlv *req;
2631 	u8 qid_usage_idx;
2632 	int rc;
2633 
2634 	/* There has never been an official driver that used this interface
2635 	 * for stopping multiple queues, and it is now considered deprecated.
2636 	 * Validate this isn't used here.
2637 	 */
2638 	req = &mbx->req_virt->stop_txqs;
2639 	if (req->num_txqs != 1) {
2640 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2641 			   "Odd; VF[%d] tried stopping multiple Tx queues\n",
2642 			   vf->relative_vf_id);
2643 		status = PFVF_STATUS_NOT_SUPPORTED;
2644 		goto out;
2645 	}
2646 
2647 	/* Find which qid-index is associated with the queue */
2648 	qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2649 	if (qid_usage_idx == QED_IOV_QID_INVALID)
2650 		goto out;
2651 
2652 	rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
2653 	if (!rc)
2654 		status = PFVF_STATUS_SUCCESS;
2655 
2656 out:
2657 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2658 			     length, status);
2659 }
2660 
2661 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
2662 				       struct qed_ptt *p_ptt,
2663 				       struct qed_vf_info *vf)
2664 {
2665 	struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
2666 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2667 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2668 	struct vfpf_update_rxq_tlv *req;
2669 	u8 status = PFVF_STATUS_FAILURE;
2670 	u8 complete_event_flg;
2671 	u8 complete_cqe_flg;
2672 	u8 qid_usage_idx;
2673 	int rc;
2674 	u8 i;
2675 
2676 	req = &mbx->req_virt->update_rxq;
2677 	complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2678 	complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2679 
2680 	qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2681 	if (qid_usage_idx == QED_IOV_QID_INVALID)
2682 		goto out;
2683 
2684 	/* There shouldn't exist a VF that uses queue-qids yet uses this
2685 	 * API with multiple Rx queues. Validate this.
2686 	 */
2687 	if ((vf->acquire.vfdev_info.capabilities &
2688 	     VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
2689 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2690 			   "VF[%d] supports QIDs but sends multiple queues\n",
2691 			   vf->relative_vf_id);
2692 		goto out;
2693 	}
2694 
2695 	/* Validate inputs - for the legacy case this is still true since
2696 	 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2697 	 */
2698 	for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2699 		if (!qed_iov_validate_rxq(p_hwfn, vf, i,
2700 					  QED_IOV_VALIDATE_Q_NA) ||
2701 		    !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2702 		    vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2703 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2704 				   "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2705 				   vf->relative_vf_id, req->rx_qid,
2706 				   req->num_rxqs);
2707 			goto out;
2708 		}
2709 	}
2710 
2711 	/* Prepare the handlers */
2712 	for (i = 0; i < req->num_rxqs; i++) {
2713 		u16 qid = req->rx_qid + i;
2714 
2715 		handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2716 	}
2717 
2718 	rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2719 					 req->num_rxqs,
2720 					 complete_cqe_flg,
2721 					 complete_event_flg,
2722 					 QED_SPQ_MODE_EBLOCK, NULL);
2723 	if (rc)
2724 		goto out;
2725 
2726 	status = PFVF_STATUS_SUCCESS;
2727 out:
2728 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2729 			     length, status);
2730 }
2731 
2732 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
2733 			       void *p_tlvs_list, u16 req_type)
2734 {
2735 	struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2736 	int len = 0;
2737 
2738 	do {
2739 		if (!p_tlv->length) {
2740 			DP_NOTICE(p_hwfn, "Zero length TLV found\n");
2741 			return NULL;
2742 		}
2743 
2744 		if (p_tlv->type == req_type) {
2745 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2746 				   "Extended tlv type %d, length %d found\n",
2747 				   p_tlv->type, p_tlv->length);
2748 			return p_tlv;
2749 		}
2750 
2751 		len += p_tlv->length;
2752 		p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2753 
2754 		if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2755 			DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2756 			return NULL;
2757 		}
2758 	} while (p_tlv->type != CHANNEL_TLV_LIST_END);
2759 
2760 	return NULL;
2761 }
2762 
2763 static void
2764 qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2765 			    struct qed_sp_vport_update_params *p_data,
2766 			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2767 {
2768 	struct vfpf_vport_update_activate_tlv *p_act_tlv;
2769 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2770 
2771 	p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2772 		    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2773 	if (!p_act_tlv)
2774 		return;
2775 
2776 	p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2777 	p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2778 	p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2779 	p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2780 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2781 }
2782 
2783 static void
2784 qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
2785 			     struct qed_sp_vport_update_params *p_data,
2786 			     struct qed_vf_info *p_vf,
2787 			     struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2788 {
2789 	struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2790 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2791 
2792 	p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2793 		     qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2794 	if (!p_vlan_tlv)
2795 		return;
2796 
2797 	p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2798 
2799 	/* Ignore the VF request if we're forcing a vlan */
2800 	if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
2801 		p_data->update_inner_vlan_removal_flg = 1;
2802 		p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2803 	}
2804 
2805 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
2806 }
2807 
2808 static void
2809 qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
2810 			    struct qed_sp_vport_update_params *p_data,
2811 			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2812 {
2813 	struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2814 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2815 
2816 	p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2817 			  qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2818 						   tlv);
2819 	if (!p_tx_switch_tlv)
2820 		return;
2821 
2822 	p_data->update_tx_switching_flg = 1;
2823 	p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2824 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
2825 }
2826 
2827 static void
2828 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2829 				  struct qed_sp_vport_update_params *p_data,
2830 				  struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2831 {
2832 	struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2833 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2834 
2835 	p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2836 	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2837 	if (!p_mcast_tlv)
2838 		return;
2839 
2840 	p_data->update_approx_mcast_flg = 1;
2841 	memcpy(p_data->bins, p_mcast_tlv->bins,
2842 	       sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2843 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2844 }
2845 
2846 static void
2847 qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2848 			      struct qed_sp_vport_update_params *p_data,
2849 			      struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2850 {
2851 	struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2852 	struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2853 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2854 
2855 	p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2856 	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2857 	if (!p_accept_tlv)
2858 		return;
2859 
2860 	p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2861 	p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2862 	p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2863 	p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2864 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2865 }
2866 
2867 static void
2868 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
2869 				  struct qed_sp_vport_update_params *p_data,
2870 				  struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2871 {
2872 	struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2873 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2874 
2875 	p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2876 			    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2877 						     tlv);
2878 	if (!p_accept_any_vlan)
2879 		return;
2880 
2881 	p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2882 	p_data->update_accept_any_vlan_flg =
2883 		    p_accept_any_vlan->update_accept_any_vlan_flg;
2884 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2885 }
2886 
2887 static void
2888 qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2889 			    struct qed_vf_info *vf,
2890 			    struct qed_sp_vport_update_params *p_data,
2891 			    struct qed_rss_params *p_rss,
2892 			    struct qed_iov_vf_mbx *p_mbx,
2893 			    u16 *tlvs_mask, u16 *tlvs_accepted)
2894 {
2895 	struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2896 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2897 	bool b_reject = false;
2898 	u16 table_size;
2899 	u16 i, q_idx;
2900 
2901 	p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2902 		    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2903 	if (!p_rss_tlv) {
2904 		p_data->rss_params = NULL;
2905 		return;
2906 	}
2907 
2908 	memset(p_rss, 0, sizeof(struct qed_rss_params));
2909 
2910 	p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2911 				      VFPF_UPDATE_RSS_CONFIG_FLAG);
2912 	p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2913 					    VFPF_UPDATE_RSS_CAPS_FLAG);
2914 	p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2915 					 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2916 	p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2917 				   VFPF_UPDATE_RSS_KEY_FLAG);
2918 
2919 	p_rss->rss_enable = p_rss_tlv->rss_enable;
2920 	p_rss->rss_eng_id = vf->relative_vf_id + 1;
2921 	p_rss->rss_caps = p_rss_tlv->rss_caps;
2922 	p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2923 	memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2924 
2925 	table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2926 			   (1 << p_rss_tlv->rss_table_size_log));
2927 
2928 	for (i = 0; i < table_size; i++) {
2929 		struct qed_queue_cid *p_cid;
2930 
2931 		q_idx = p_rss_tlv->rss_ind_table[i];
2932 		if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
2933 					  QED_IOV_VALIDATE_Q_ENABLE)) {
2934 			DP_VERBOSE(p_hwfn,
2935 				   QED_MSG_IOV,
2936 				   "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2937 				   vf->relative_vf_id, q_idx);
2938 			b_reject = true;
2939 			goto out;
2940 		}
2941 
2942 		p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
2943 		p_rss->rss_ind_table[i] = p_cid;
2944 	}
2945 
2946 	p_data->rss_params = p_rss;
2947 out:
2948 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
2949 	if (!b_reject)
2950 		*tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
2951 }
2952 
2953 static void
2954 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
2955 				struct qed_vf_info *vf,
2956 				struct qed_sp_vport_update_params *p_data,
2957 				struct qed_sge_tpa_params *p_sge_tpa,
2958 				struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2959 {
2960 	struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2961 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2962 
2963 	p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2964 	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2965 
2966 	if (!p_sge_tpa_tlv) {
2967 		p_data->sge_tpa_params = NULL;
2968 		return;
2969 	}
2970 
2971 	memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
2972 
2973 	p_sge_tpa->update_tpa_en_flg =
2974 	    !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2975 	p_sge_tpa->update_tpa_param_flg =
2976 	    !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2977 		VFPF_UPDATE_TPA_PARAM_FLAG);
2978 
2979 	p_sge_tpa->tpa_ipv4_en_flg =
2980 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2981 	p_sge_tpa->tpa_ipv6_en_flg =
2982 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2983 	p_sge_tpa->tpa_pkt_split_flg =
2984 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2985 	p_sge_tpa->tpa_hdr_data_split_flg =
2986 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2987 	p_sge_tpa->tpa_gro_consistent_flg =
2988 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2989 
2990 	p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2991 	p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2992 	p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2993 	p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2994 	p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2995 
2996 	p_data->sge_tpa_params = p_sge_tpa;
2997 
2998 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
2999 }
3000 
3001 static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
3002 				    u8 vfid,
3003 				    struct qed_sp_vport_update_params *params,
3004 				    u16 *tlvs)
3005 {
3006 	u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
3007 	struct qed_filter_accept_flags *flags = &params->accept_flags;
3008 	struct qed_public_vf_info *vf_info;
3009 
3010 	/* Untrusted VFs can't even be trusted to know that fact.
3011 	 * Simply indicate everything is configured fine, and trace
3012 	 * configuration 'behind their back'.
3013 	 */
3014 	if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
3015 		return 0;
3016 
3017 	vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
3018 
3019 	if (flags->update_rx_mode_config) {
3020 		vf_info->rx_accept_mode = flags->rx_accept_filter;
3021 		if (!vf_info->is_trusted_configured)
3022 			flags->rx_accept_filter &= ~mask;
3023 	}
3024 
3025 	if (flags->update_tx_mode_config) {
3026 		vf_info->tx_accept_mode = flags->tx_accept_filter;
3027 		if (!vf_info->is_trusted_configured)
3028 			flags->tx_accept_filter &= ~mask;
3029 	}
3030 
3031 	return 0;
3032 }
3033 
3034 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
3035 					struct qed_ptt *p_ptt,
3036 					struct qed_vf_info *vf)
3037 {
3038 	struct qed_rss_params *p_rss_params = NULL;
3039 	struct qed_sp_vport_update_params params;
3040 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3041 	struct qed_sge_tpa_params sge_tpa_params;
3042 	u16 tlvs_mask = 0, tlvs_accepted = 0;
3043 	u8 status = PFVF_STATUS_SUCCESS;
3044 	u16 length;
3045 	int rc;
3046 
3047 	/* Valiate PF can send such a request */
3048 	if (!vf->vport_instance) {
3049 		DP_VERBOSE(p_hwfn,
3050 			   QED_MSG_IOV,
3051 			   "No VPORT instance available for VF[%d], failing vport update\n",
3052 			   vf->abs_vf_id);
3053 		status = PFVF_STATUS_FAILURE;
3054 		goto out;
3055 	}
3056 	p_rss_params = vzalloc(sizeof(*p_rss_params));
3057 	if (p_rss_params == NULL) {
3058 		status = PFVF_STATUS_FAILURE;
3059 		goto out;
3060 	}
3061 
3062 	memset(&params, 0, sizeof(params));
3063 	params.opaque_fid = vf->opaque_fid;
3064 	params.vport_id = vf->vport_id;
3065 	params.rss_params = NULL;
3066 
3067 	/* Search for extended tlvs list and update values
3068 	 * from VF in struct qed_sp_vport_update_params.
3069 	 */
3070 	qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
3071 	qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
3072 	qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
3073 	qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
3074 	qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
3075 	qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
3076 	qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
3077 					&sge_tpa_params, mbx, &tlvs_mask);
3078 
3079 	tlvs_accepted = tlvs_mask;
3080 
3081 	/* Some of the extended TLVs need to be validated first; In that case,
3082 	 * they can update the mask without updating the accepted [so that
3083 	 * PF could communicate to VF it has rejected request].
3084 	 */
3085 	qed_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
3086 				    mbx, &tlvs_mask, &tlvs_accepted);
3087 
3088 	if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
3089 				     &params, &tlvs_accepted)) {
3090 		tlvs_accepted = 0;
3091 		status = PFVF_STATUS_NOT_SUPPORTED;
3092 		goto out;
3093 	}
3094 
3095 	if (!tlvs_accepted) {
3096 		if (tlvs_mask)
3097 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3098 				   "Upper-layer prevents VF vport configuration\n");
3099 		else
3100 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3101 				   "No feature tlvs found for vport update\n");
3102 		status = PFVF_STATUS_NOT_SUPPORTED;
3103 		goto out;
3104 	}
3105 
3106 	rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
3107 
3108 	if (rc)
3109 		status = PFVF_STATUS_FAILURE;
3110 
3111 out:
3112 	vfree(p_rss_params);
3113 	length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3114 						  tlvs_mask, tlvs_accepted);
3115 	qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3116 }
3117 
3118 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
3119 					 struct qed_vf_info *p_vf,
3120 					 struct qed_filter_ucast *p_params)
3121 {
3122 	int i;
3123 
3124 	/* First remove entries and then add new ones */
3125 	if (p_params->opcode == QED_FILTER_REMOVE) {
3126 		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3127 			if (p_vf->shadow_config.vlans[i].used &&
3128 			    p_vf->shadow_config.vlans[i].vid ==
3129 			    p_params->vlan) {
3130 				p_vf->shadow_config.vlans[i].used = false;
3131 				break;
3132 			}
3133 		if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3134 			DP_VERBOSE(p_hwfn,
3135 				   QED_MSG_IOV,
3136 				   "VF [%d] - Tries to remove a non-existing vlan\n",
3137 				   p_vf->relative_vf_id);
3138 			return -EINVAL;
3139 		}
3140 	} else if (p_params->opcode == QED_FILTER_REPLACE ||
3141 		   p_params->opcode == QED_FILTER_FLUSH) {
3142 		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3143 			p_vf->shadow_config.vlans[i].used = false;
3144 	}
3145 
3146 	/* In forced mode, we're willing to remove entries - but we don't add
3147 	 * new ones.
3148 	 */
3149 	if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
3150 		return 0;
3151 
3152 	if (p_params->opcode == QED_FILTER_ADD ||
3153 	    p_params->opcode == QED_FILTER_REPLACE) {
3154 		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3155 			if (p_vf->shadow_config.vlans[i].used)
3156 				continue;
3157 
3158 			p_vf->shadow_config.vlans[i].used = true;
3159 			p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3160 			break;
3161 		}
3162 
3163 		if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3164 			DP_VERBOSE(p_hwfn,
3165 				   QED_MSG_IOV,
3166 				   "VF [%d] - Tries to configure more than %d vlan filters\n",
3167 				   p_vf->relative_vf_id,
3168 				   QED_ETH_VF_NUM_VLAN_FILTERS + 1);
3169 			return -EINVAL;
3170 		}
3171 	}
3172 
3173 	return 0;
3174 }
3175 
3176 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
3177 					struct qed_vf_info *p_vf,
3178 					struct qed_filter_ucast *p_params)
3179 {
3180 	int i;
3181 
3182 	/* If we're in forced-mode, we don't allow any change */
3183 	if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
3184 		return 0;
3185 
3186 	/* Don't keep track of shadow copy since we don't intend to restore. */
3187 	if (p_vf->p_vf_info.is_trusted_configured)
3188 		return 0;
3189 
3190 	/* First remove entries and then add new ones */
3191 	if (p_params->opcode == QED_FILTER_REMOVE) {
3192 		for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3193 			if (ether_addr_equal(p_vf->shadow_config.macs[i],
3194 					     p_params->mac)) {
3195 				eth_zero_addr(p_vf->shadow_config.macs[i]);
3196 				break;
3197 			}
3198 		}
3199 
3200 		if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3201 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3202 				   "MAC isn't configured\n");
3203 			return -EINVAL;
3204 		}
3205 	} else if (p_params->opcode == QED_FILTER_REPLACE ||
3206 		   p_params->opcode == QED_FILTER_FLUSH) {
3207 		for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
3208 			eth_zero_addr(p_vf->shadow_config.macs[i]);
3209 	}
3210 
3211 	/* List the new MAC address */
3212 	if (p_params->opcode != QED_FILTER_ADD &&
3213 	    p_params->opcode != QED_FILTER_REPLACE)
3214 		return 0;
3215 
3216 	for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3217 		if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
3218 			ether_addr_copy(p_vf->shadow_config.macs[i],
3219 					p_params->mac);
3220 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3221 				   "Added MAC at %d entry in shadow\n", i);
3222 			break;
3223 		}
3224 	}
3225 
3226 	if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3227 		DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
3228 		return -EINVAL;
3229 	}
3230 
3231 	return 0;
3232 }
3233 
3234 static int
3235 qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
3236 				 struct qed_vf_info *p_vf,
3237 				 struct qed_filter_ucast *p_params)
3238 {
3239 	int rc = 0;
3240 
3241 	if (p_params->type == QED_FILTER_MAC) {
3242 		rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3243 		if (rc)
3244 			return rc;
3245 	}
3246 
3247 	if (p_params->type == QED_FILTER_VLAN)
3248 		rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3249 
3250 	return rc;
3251 }
3252 
3253 static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
3254 			     int vfid, struct qed_filter_ucast *params)
3255 {
3256 	struct qed_public_vf_info *vf;
3257 
3258 	vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
3259 	if (!vf)
3260 		return -EINVAL;
3261 
3262 	/* No real decision to make; Store the configured MAC */
3263 	if (params->type == QED_FILTER_MAC ||
3264 	    params->type == QED_FILTER_MAC_VLAN) {
3265 		ether_addr_copy(vf->mac, params->mac);
3266 
3267 		if (vf->is_trusted_configured) {
3268 			qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid);
3269 
3270 			/* Update and post bulleitin again */
3271 			qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3272 		}
3273 	}
3274 
3275 	return 0;
3276 }
3277 
3278 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
3279 					struct qed_ptt *p_ptt,
3280 					struct qed_vf_info *vf)
3281 {
3282 	struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3283 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3284 	struct vfpf_ucast_filter_tlv *req;
3285 	u8 status = PFVF_STATUS_SUCCESS;
3286 	struct qed_filter_ucast params;
3287 	int rc;
3288 
3289 	/* Prepare the unicast filter params */
3290 	memset(&params, 0, sizeof(struct qed_filter_ucast));
3291 	req = &mbx->req_virt->ucast_filter;
3292 	params.opcode = (enum qed_filter_opcode)req->opcode;
3293 	params.type = (enum qed_filter_ucast_type)req->type;
3294 
3295 	params.is_rx_filter = 1;
3296 	params.is_tx_filter = 1;
3297 	params.vport_to_remove_from = vf->vport_id;
3298 	params.vport_to_add_to = vf->vport_id;
3299 	memcpy(params.mac, req->mac, ETH_ALEN);
3300 	params.vlan = req->vlan;
3301 
3302 	DP_VERBOSE(p_hwfn,
3303 		   QED_MSG_IOV,
3304 		   "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3305 		   vf->abs_vf_id, params.opcode, params.type,
3306 		   params.is_rx_filter ? "RX" : "",
3307 		   params.is_tx_filter ? "TX" : "",
3308 		   params.vport_to_add_to,
3309 		   params.mac[0], params.mac[1],
3310 		   params.mac[2], params.mac[3],
3311 		   params.mac[4], params.mac[5], params.vlan);
3312 
3313 	if (!vf->vport_instance) {
3314 		DP_VERBOSE(p_hwfn,
3315 			   QED_MSG_IOV,
3316 			   "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3317 			   vf->abs_vf_id);
3318 		status = PFVF_STATUS_FAILURE;
3319 		goto out;
3320 	}
3321 
3322 	/* Update shadow copy of the VF configuration */
3323 	if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
3324 		status = PFVF_STATUS_FAILURE;
3325 		goto out;
3326 	}
3327 
3328 	/* Determine if the unicast filtering is acceptible by PF */
3329 	if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
3330 	    (params.type == QED_FILTER_VLAN ||
3331 	     params.type == QED_FILTER_MAC_VLAN)) {
3332 		/* Once VLAN is forced or PVID is set, do not allow
3333 		 * to add/replace any further VLANs.
3334 		 */
3335 		if (params.opcode == QED_FILTER_ADD ||
3336 		    params.opcode == QED_FILTER_REPLACE)
3337 			status = PFVF_STATUS_FORCED;
3338 		goto out;
3339 	}
3340 
3341 	if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
3342 	    (params.type == QED_FILTER_MAC ||
3343 	     params.type == QED_FILTER_MAC_VLAN)) {
3344 		if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
3345 		    (params.opcode != QED_FILTER_ADD &&
3346 		     params.opcode != QED_FILTER_REPLACE))
3347 			status = PFVF_STATUS_FORCED;
3348 		goto out;
3349 	}
3350 
3351 	rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
3352 	if (rc) {
3353 		status = PFVF_STATUS_FAILURE;
3354 		goto out;
3355 	}
3356 
3357 	rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
3358 				     QED_SPQ_MODE_CB, NULL);
3359 	if (rc)
3360 		status = PFVF_STATUS_FAILURE;
3361 
3362 out:
3363 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3364 			     sizeof(struct pfvf_def_resp_tlv), status);
3365 }
3366 
3367 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
3368 				       struct qed_ptt *p_ptt,
3369 				       struct qed_vf_info *vf)
3370 {
3371 	int i;
3372 
3373 	/* Reset the SBs */
3374 	for (i = 0; i < vf->num_sbs; i++)
3375 		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3376 						vf->igu_sbs[i],
3377 						vf->opaque_fid, false);
3378 
3379 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3380 			     sizeof(struct pfvf_def_resp_tlv),
3381 			     PFVF_STATUS_SUCCESS);
3382 }
3383 
3384 static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
3385 				 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
3386 {
3387 	u16 length = sizeof(struct pfvf_def_resp_tlv);
3388 	u8 status = PFVF_STATUS_SUCCESS;
3389 
3390 	/* Disable Interrupts for VF */
3391 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3392 
3393 	/* Reset Permission table */
3394 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3395 
3396 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3397 			     length, status);
3398 }
3399 
3400 static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
3401 				   struct qed_ptt *p_ptt,
3402 				   struct qed_vf_info *p_vf)
3403 {
3404 	u16 length = sizeof(struct pfvf_def_resp_tlv);
3405 	u8 status = PFVF_STATUS_SUCCESS;
3406 	int rc = 0;
3407 
3408 	qed_iov_vf_cleanup(p_hwfn, p_vf);
3409 
3410 	if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3411 		/* Stopping the VF */
3412 		rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3413 				    p_vf->opaque_fid);
3414 
3415 		if (rc) {
3416 			DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
3417 			       rc);
3418 			status = PFVF_STATUS_FAILURE;
3419 		}
3420 
3421 		p_vf->state = VF_STOPPED;
3422 	}
3423 
3424 	qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3425 			     length, status);
3426 }
3427 
3428 static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
3429 				       struct qed_ptt *p_ptt,
3430 				       struct qed_vf_info *p_vf)
3431 {
3432 	struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3433 	struct pfvf_read_coal_resp_tlv *p_resp;
3434 	struct vfpf_read_coal_req_tlv *req;
3435 	u8 status = PFVF_STATUS_FAILURE;
3436 	struct qed_vf_queue *p_queue;
3437 	struct qed_queue_cid *p_cid;
3438 	u16 coal = 0, qid, i;
3439 	bool b_is_rx;
3440 	int rc = 0;
3441 
3442 	mbx->offset = (u8 *)mbx->reply_virt;
3443 	req = &mbx->req_virt->read_coal_req;
3444 
3445 	qid = req->qid;
3446 	b_is_rx = req->is_rx ? true : false;
3447 
3448 	if (b_is_rx) {
3449 		if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid,
3450 					  QED_IOV_VALIDATE_Q_ENABLE)) {
3451 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3452 				   "VF[%d]: Invalid Rx queue_id = %d\n",
3453 				   p_vf->abs_vf_id, qid);
3454 			goto send_resp;
3455 		}
3456 
3457 		p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3458 		rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3459 		if (rc)
3460 			goto send_resp;
3461 	} else {
3462 		if (!qed_iov_validate_txq(p_hwfn, p_vf, qid,
3463 					  QED_IOV_VALIDATE_Q_ENABLE)) {
3464 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3465 				   "VF[%d]: Invalid Tx queue_id = %d\n",
3466 				   p_vf->abs_vf_id, qid);
3467 			goto send_resp;
3468 		}
3469 		for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3470 			p_queue = &p_vf->vf_queues[qid];
3471 			if ((!p_queue->cids[i].p_cid) ||
3472 			    (!p_queue->cids[i].b_is_tx))
3473 				continue;
3474 
3475 			p_cid = p_queue->cids[i].p_cid;
3476 
3477 			rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3478 			if (rc)
3479 				goto send_resp;
3480 			break;
3481 		}
3482 	}
3483 
3484 	status = PFVF_STATUS_SUCCESS;
3485 
3486 send_resp:
3487 	p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ,
3488 			     sizeof(*p_resp));
3489 	p_resp->coal = coal;
3490 
3491 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
3492 		    sizeof(struct channel_list_end_tlv));
3493 
3494 	qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3495 }
3496 
3497 static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
3498 				       struct qed_ptt *p_ptt,
3499 				       struct qed_vf_info *vf)
3500 {
3501 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3502 	struct vfpf_update_coalesce *req;
3503 	u8 status = PFVF_STATUS_FAILURE;
3504 	struct qed_queue_cid *p_cid;
3505 	u16 rx_coal, tx_coal;
3506 	int rc = 0, i;
3507 	u16 qid;
3508 
3509 	req = &mbx->req_virt->update_coalesce;
3510 
3511 	rx_coal = req->rx_coal;
3512 	tx_coal = req->tx_coal;
3513 	qid = req->qid;
3514 
3515 	if (!qed_iov_validate_rxq(p_hwfn, vf, qid,
3516 				  QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) {
3517 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3518 			   "VF[%d]: Invalid Rx queue_id = %d\n",
3519 			   vf->abs_vf_id, qid);
3520 		goto out;
3521 	}
3522 
3523 	if (!qed_iov_validate_txq(p_hwfn, vf, qid,
3524 				  QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) {
3525 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3526 			   "VF[%d]: Invalid Tx queue_id = %d\n",
3527 			   vf->abs_vf_id, qid);
3528 		goto out;
3529 	}
3530 
3531 	DP_VERBOSE(p_hwfn,
3532 		   QED_MSG_IOV,
3533 		   "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3534 		   vf->abs_vf_id, rx_coal, tx_coal, qid);
3535 
3536 	if (rx_coal) {
3537 		p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3538 
3539 		rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3540 		if (rc) {
3541 			DP_VERBOSE(p_hwfn,
3542 				   QED_MSG_IOV,
3543 				   "VF[%d]: Unable to set rx queue = %d coalesce\n",
3544 				   vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3545 			goto out;
3546 		}
3547 		vf->rx_coal = rx_coal;
3548 	}
3549 
3550 	if (tx_coal) {
3551 		struct qed_vf_queue *p_queue = &vf->vf_queues[qid];
3552 
3553 		for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3554 			if (!p_queue->cids[i].p_cid)
3555 				continue;
3556 
3557 			if (!p_queue->cids[i].b_is_tx)
3558 				continue;
3559 
3560 			rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3561 						  p_queue->cids[i].p_cid);
3562 
3563 			if (rc) {
3564 				DP_VERBOSE(p_hwfn,
3565 					   QED_MSG_IOV,
3566 					   "VF[%d]: Unable to set tx queue coalesce\n",
3567 					   vf->abs_vf_id);
3568 				goto out;
3569 			}
3570 		}
3571 		vf->tx_coal = tx_coal;
3572 	}
3573 
3574 	status = PFVF_STATUS_SUCCESS;
3575 out:
3576 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3577 			     sizeof(struct pfvf_def_resp_tlv), status);
3578 }
3579 static int
3580 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
3581 			 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3582 {
3583 	int cnt;
3584 	u32 val;
3585 
3586 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
3587 
3588 	for (cnt = 0; cnt < 50; cnt++) {
3589 		val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3590 		if (!val)
3591 			break;
3592 		msleep(20);
3593 	}
3594 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
3595 
3596 	if (cnt == 50) {
3597 		DP_ERR(p_hwfn,
3598 		       "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3599 		       p_vf->abs_vf_id, val);
3600 		return -EBUSY;
3601 	}
3602 
3603 	return 0;
3604 }
3605 
3606 static int
3607 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
3608 			struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3609 {
3610 	u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
3611 	int i, cnt;
3612 
3613 	/* Read initial consumers & producers */
3614 	for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
3615 		u32 prod;
3616 
3617 		cons[i] = qed_rd(p_hwfn, p_ptt,
3618 				 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3619 				 i * 0x40);
3620 		prod = qed_rd(p_hwfn, p_ptt,
3621 			      PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3622 			      i * 0x40);
3623 		distance[i] = prod - cons[i];
3624 	}
3625 
3626 	/* Wait for consumers to pass the producers */
3627 	i = 0;
3628 	for (cnt = 0; cnt < 50; cnt++) {
3629 		for (; i < MAX_NUM_VOQS_E4; i++) {
3630 			u32 tmp;
3631 
3632 			tmp = qed_rd(p_hwfn, p_ptt,
3633 				     PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3634 				     i * 0x40);
3635 			if (distance[i] > tmp - cons[i])
3636 				break;
3637 		}
3638 
3639 		if (i == MAX_NUM_VOQS_E4)
3640 			break;
3641 
3642 		msleep(20);
3643 	}
3644 
3645 	if (cnt == 50) {
3646 		DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3647 		       p_vf->abs_vf_id, i);
3648 		return -EBUSY;
3649 	}
3650 
3651 	return 0;
3652 }
3653 
3654 static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
3655 			       struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3656 {
3657 	int rc;
3658 
3659 	rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3660 	if (rc)
3661 		return rc;
3662 
3663 	rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3664 	if (rc)
3665 		return rc;
3666 
3667 	return 0;
3668 }
3669 
3670 static int
3671 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
3672 			       struct qed_ptt *p_ptt,
3673 			       u16 rel_vf_id, u32 *ack_vfs)
3674 {
3675 	struct qed_vf_info *p_vf;
3676 	int rc = 0;
3677 
3678 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3679 	if (!p_vf)
3680 		return 0;
3681 
3682 	if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3683 	    (1ULL << (rel_vf_id % 64))) {
3684 		u16 vfid = p_vf->abs_vf_id;
3685 
3686 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3687 			   "VF[%d] - Handling FLR\n", vfid);
3688 
3689 		qed_iov_vf_cleanup(p_hwfn, p_vf);
3690 
3691 		/* If VF isn't active, no need for anything but SW */
3692 		if (!p_vf->b_init)
3693 			goto cleanup;
3694 
3695 		rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3696 		if (rc)
3697 			goto cleanup;
3698 
3699 		rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
3700 		if (rc) {
3701 			DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3702 			return rc;
3703 		}
3704 
3705 		/* Workaround to make VF-PF channel ready, as FW
3706 		 * doesn't do that as a part of FLR.
3707 		 */
3708 		REG_WR(p_hwfn,
3709 		       GTT_BAR0_MAP_REG_USDM_RAM +
3710 		       USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3711 
3712 		/* VF_STOPPED has to be set only after final cleanup
3713 		 * but prior to re-enabling the VF.
3714 		 */
3715 		p_vf->state = VF_STOPPED;
3716 
3717 		rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3718 		if (rc) {
3719 			DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3720 			       vfid);
3721 			return rc;
3722 		}
3723 cleanup:
3724 		/* Mark VF for ack and clean pending state */
3725 		if (p_vf->state == VF_RESET)
3726 			p_vf->state = VF_STOPPED;
3727 		ack_vfs[vfid / 32] |= BIT((vfid % 32));
3728 		p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3729 		    ~(1ULL << (rel_vf_id % 64));
3730 		p_vf->vf_mbx.b_pending_msg = false;
3731 	}
3732 
3733 	return rc;
3734 }
3735 
3736 static int
3737 qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3738 {
3739 	u32 ack_vfs[VF_MAX_STATIC / 32];
3740 	int rc = 0;
3741 	u16 i;
3742 
3743 	memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3744 
3745 	/* Since BRB <-> PRS interface can't be tested as part of the flr
3746 	 * polling due to HW limitations, simply sleep a bit. And since
3747 	 * there's no need to wait per-vf, do it before looping.
3748 	 */
3749 	msleep(100);
3750 
3751 	for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
3752 		qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3753 
3754 	rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3755 	return rc;
3756 }
3757 
3758 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
3759 {
3760 	bool found = false;
3761 	u16 i;
3762 
3763 	DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
3764 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3765 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3766 			   "[%08x,...,%08x]: %08x\n",
3767 			   i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3768 
3769 	if (!p_hwfn->cdev->p_iov_info) {
3770 		DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
3771 		return false;
3772 	}
3773 
3774 	/* Mark VFs */
3775 	for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
3776 		struct qed_vf_info *p_vf;
3777 		u8 vfid;
3778 
3779 		p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
3780 		if (!p_vf)
3781 			continue;
3782 
3783 		vfid = p_vf->abs_vf_id;
3784 		if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3785 			u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3786 			u16 rel_vf_id = p_vf->relative_vf_id;
3787 
3788 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3789 				   "VF[%d] [rel %d] got FLR-ed\n",
3790 				   vfid, rel_vf_id);
3791 
3792 			p_vf->state = VF_RESET;
3793 
3794 			/* No need to lock here, since pending_flr should
3795 			 * only change here and before ACKing MFw. Since
3796 			 * MFW will not trigger an additional attention for
3797 			 * VF flr until ACKs, we're safe.
3798 			 */
3799 			p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3800 			found = true;
3801 		}
3802 	}
3803 
3804 	return found;
3805 }
3806 
3807 static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
3808 			     u16 vfid,
3809 			     struct qed_mcp_link_params *p_params,
3810 			     struct qed_mcp_link_state *p_link,
3811 			     struct qed_mcp_link_capabilities *p_caps)
3812 {
3813 	struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
3814 						       vfid,
3815 						       false);
3816 	struct qed_bulletin_content *p_bulletin;
3817 
3818 	if (!p_vf)
3819 		return;
3820 
3821 	p_bulletin = p_vf->bulletin.p_virt;
3822 
3823 	if (p_params)
3824 		__qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3825 	if (p_link)
3826 		__qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3827 	if (p_caps)
3828 		__qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3829 }
3830 
3831 static int
3832 qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
3833 				  struct qed_ptt *p_ptt,
3834 				  struct qed_vf_info *p_vf)
3835 {
3836 	struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt;
3837 	struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3838 	struct vfpf_bulletin_update_mac_tlv *p_req;
3839 	u8 status = PFVF_STATUS_SUCCESS;
3840 	int rc = 0;
3841 
3842 	if (!p_vf->p_vf_info.is_trusted_configured) {
3843 		DP_VERBOSE(p_hwfn,
3844 			   QED_MSG_IOV,
3845 			   "Blocking bulletin update request from untrusted VF[%d]\n",
3846 			   p_vf->abs_vf_id);
3847 		status = PFVF_STATUS_NOT_SUPPORTED;
3848 		rc = -EINVAL;
3849 		goto send_status;
3850 	}
3851 
3852 	p_req = &mbx->req_virt->bulletin_update_mac;
3853 	ether_addr_copy(p_bulletin->mac, p_req->mac);
3854 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3855 		   "Updated bulletin of VF[%d] with requested MAC[%pM]\n",
3856 		   p_vf->abs_vf_id, p_req->mac);
3857 
3858 send_status:
3859 	qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3860 			     CHANNEL_TLV_BULLETIN_UPDATE_MAC,
3861 			     sizeof(struct pfvf_def_resp_tlv), status);
3862 	return rc;
3863 }
3864 
3865 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
3866 				    struct qed_ptt *p_ptt, int vfid)
3867 {
3868 	struct qed_iov_vf_mbx *mbx;
3869 	struct qed_vf_info *p_vf;
3870 
3871 	p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3872 	if (!p_vf)
3873 		return;
3874 
3875 	mbx = &p_vf->vf_mbx;
3876 
3877 	/* qed_iov_process_mbx_request */
3878 	if (!mbx->b_pending_msg) {
3879 		DP_NOTICE(p_hwfn,
3880 			  "VF[%02x]: Trying to process mailbox message when none is pending\n",
3881 			  p_vf->abs_vf_id);
3882 		return;
3883 	}
3884 	mbx->b_pending_msg = false;
3885 
3886 	mbx->first_tlv = mbx->req_virt->first_tlv;
3887 
3888 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3889 		   "VF[%02x]: Processing mailbox message [type %04x]\n",
3890 		   p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3891 
3892 	/* check if tlv type is known */
3893 	if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3894 	    !p_vf->b_malicious) {
3895 		switch (mbx->first_tlv.tl.type) {
3896 		case CHANNEL_TLV_ACQUIRE:
3897 			qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3898 			break;
3899 		case CHANNEL_TLV_VPORT_START:
3900 			qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3901 			break;
3902 		case CHANNEL_TLV_VPORT_TEARDOWN:
3903 			qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3904 			break;
3905 		case CHANNEL_TLV_START_RXQ:
3906 			qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3907 			break;
3908 		case CHANNEL_TLV_START_TXQ:
3909 			qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3910 			break;
3911 		case CHANNEL_TLV_STOP_RXQS:
3912 			qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3913 			break;
3914 		case CHANNEL_TLV_STOP_TXQS:
3915 			qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3916 			break;
3917 		case CHANNEL_TLV_UPDATE_RXQ:
3918 			qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3919 			break;
3920 		case CHANNEL_TLV_VPORT_UPDATE:
3921 			qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3922 			break;
3923 		case CHANNEL_TLV_UCAST_FILTER:
3924 			qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3925 			break;
3926 		case CHANNEL_TLV_CLOSE:
3927 			qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3928 			break;
3929 		case CHANNEL_TLV_INT_CLEANUP:
3930 			qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3931 			break;
3932 		case CHANNEL_TLV_RELEASE:
3933 			qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3934 			break;
3935 		case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3936 			qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3937 			break;
3938 		case CHANNEL_TLV_COALESCE_UPDATE:
3939 			qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
3940 			break;
3941 		case CHANNEL_TLV_COALESCE_READ:
3942 			qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
3943 			break;
3944 		case CHANNEL_TLV_BULLETIN_UPDATE_MAC:
3945 			qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf);
3946 			break;
3947 		}
3948 	} else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3949 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3950 			   "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3951 			   p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3952 
3953 		qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3954 				     mbx->first_tlv.tl.type,
3955 				     sizeof(struct pfvf_def_resp_tlv),
3956 				     PFVF_STATUS_MALICIOUS);
3957 	} else {
3958 		/* unknown TLV - this may belong to a VF driver from the future
3959 		 * - a version written after this PF driver was written, which
3960 		 * supports features unknown as of yet. Too bad since we don't
3961 		 * support them. Or this may be because someone wrote a crappy
3962 		 * VF driver and is sending garbage over the channel.
3963 		 */
3964 		DP_NOTICE(p_hwfn,
3965 			  "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3966 			  p_vf->abs_vf_id,
3967 			  mbx->first_tlv.tl.type,
3968 			  mbx->first_tlv.tl.length,
3969 			  mbx->first_tlv.padding, mbx->first_tlv.reply_address);
3970 
3971 		/* Try replying in case reply address matches the acquisition's
3972 		 * posted address.
3973 		 */
3974 		if (p_vf->acquire.first_tlv.reply_address &&
3975 		    (mbx->first_tlv.reply_address ==
3976 		     p_vf->acquire.first_tlv.reply_address)) {
3977 			qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3978 					     mbx->first_tlv.tl.type,
3979 					     sizeof(struct pfvf_def_resp_tlv),
3980 					     PFVF_STATUS_NOT_SUPPORTED);
3981 		} else {
3982 			DP_VERBOSE(p_hwfn,
3983 				   QED_MSG_IOV,
3984 				   "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3985 				   p_vf->abs_vf_id);
3986 		}
3987 	}
3988 }
3989 
3990 static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
3991 {
3992 	int i;
3993 
3994 	memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3995 
3996 	qed_for_each_vf(p_hwfn, i) {
3997 		struct qed_vf_info *p_vf;
3998 
3999 		p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
4000 		if (p_vf->vf_mbx.b_pending_msg)
4001 			events[i / 64] |= 1ULL << (i % 64);
4002 	}
4003 }
4004 
4005 static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
4006 						       u16 abs_vfid)
4007 {
4008 	u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
4009 
4010 	if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
4011 		DP_VERBOSE(p_hwfn,
4012 			   QED_MSG_IOV,
4013 			   "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
4014 			   abs_vfid);
4015 		return NULL;
4016 	}
4017 
4018 	return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
4019 }
4020 
4021 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
4022 			      u16 abs_vfid, struct regpair *vf_msg)
4023 {
4024 	struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
4025 			   abs_vfid);
4026 
4027 	if (!p_vf)
4028 		return 0;
4029 
4030 	/* List the physical address of the request so that handler
4031 	 * could later on copy the message from it.
4032 	 */
4033 	p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
4034 
4035 	/* Mark the event and schedule the workqueue */
4036 	p_vf->vf_mbx.b_pending_msg = true;
4037 	qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
4038 
4039 	return 0;
4040 }
4041 
4042 static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
4043 				     struct malicious_vf_eqe_data *p_data)
4044 {
4045 	struct qed_vf_info *p_vf;
4046 
4047 	p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4048 
4049 	if (!p_vf)
4050 		return;
4051 
4052 	if (!p_vf->b_malicious) {
4053 		DP_NOTICE(p_hwfn,
4054 			  "VF [%d] - Malicious behavior [%02x]\n",
4055 			  p_vf->abs_vf_id, p_data->err_id);
4056 
4057 		p_vf->b_malicious = true;
4058 	} else {
4059 		DP_INFO(p_hwfn,
4060 			"VF [%d] - Malicious behavior [%02x]\n",
4061 			p_vf->abs_vf_id, p_data->err_id);
4062 	}
4063 }
4064 
4065 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
4066 			       u8 opcode,
4067 			       __le16 echo,
4068 			       union event_ring_data *data, u8 fw_return_code)
4069 {
4070 	switch (opcode) {
4071 	case COMMON_EVENT_VF_PF_CHANNEL:
4072 		return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
4073 					  &data->vf_pf_channel.msg_addr);
4074 	case COMMON_EVENT_MALICIOUS_VF:
4075 		qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4076 		return 0;
4077 	default:
4078 		DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
4079 			opcode);
4080 		return -EINVAL;
4081 	}
4082 }
4083 
4084 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4085 {
4086 	struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
4087 	u16 i;
4088 
4089 	if (!p_iov)
4090 		goto out;
4091 
4092 	for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4093 		if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4094 			return i;
4095 
4096 out:
4097 	return MAX_NUM_VFS;
4098 }
4099 
4100 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
4101 			       int vfid)
4102 {
4103 	struct qed_dmae_params params;
4104 	struct qed_vf_info *vf_info;
4105 
4106 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4107 	if (!vf_info)
4108 		return -EINVAL;
4109 
4110 	memset(&params, 0, sizeof(params));
4111 	SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1);
4112 	SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1);
4113 	params.src_vfid = vf_info->abs_vf_id;
4114 
4115 	if (qed_dmae_host2host(p_hwfn, ptt,
4116 			       vf_info->vf_mbx.pending_req,
4117 			       vf_info->vf_mbx.req_phys,
4118 			       sizeof(union vfpf_tlvs) / 4, &params)) {
4119 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4120 			   "Failed to copy message from VF 0x%02x\n", vfid);
4121 
4122 		return -EIO;
4123 	}
4124 
4125 	return 0;
4126 }
4127 
4128 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
4129 					    u8 *mac, int vfid)
4130 {
4131 	struct qed_vf_info *vf_info;
4132 	u64 feature;
4133 
4134 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4135 	if (!vf_info) {
4136 		DP_NOTICE(p_hwfn->cdev,
4137 			  "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4138 		return;
4139 	}
4140 
4141 	if (vf_info->b_malicious) {
4142 		DP_NOTICE(p_hwfn->cdev,
4143 			  "Can't set forced MAC to malicious VF [%d]\n", vfid);
4144 		return;
4145 	}
4146 
4147 	if (vf_info->p_vf_info.is_trusted_configured) {
4148 		feature = BIT(VFPF_BULLETIN_MAC_ADDR);
4149 		/* Trust mode will disable Forced MAC */
4150 		vf_info->bulletin.p_virt->valid_bitmap &=
4151 			~BIT(MAC_ADDR_FORCED);
4152 	} else {
4153 		feature = BIT(MAC_ADDR_FORCED);
4154 		/* Forced MAC will disable MAC_ADDR */
4155 		vf_info->bulletin.p_virt->valid_bitmap &=
4156 			~BIT(VFPF_BULLETIN_MAC_ADDR);
4157 	}
4158 
4159 	memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4160 
4161 	vf_info->bulletin.p_virt->valid_bitmap |= feature;
4162 
4163 	qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4164 }
4165 
4166 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid)
4167 {
4168 	struct qed_vf_info *vf_info;
4169 	u64 feature;
4170 
4171 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4172 	if (!vf_info) {
4173 		DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n",
4174 			  vfid);
4175 		return -EINVAL;
4176 	}
4177 
4178 	if (vf_info->b_malicious) {
4179 		DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n",
4180 			  vfid);
4181 		return -EINVAL;
4182 	}
4183 
4184 	if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) {
4185 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4186 			   "Can not set MAC, Forced MAC is configured\n");
4187 		return -EINVAL;
4188 	}
4189 
4190 	feature = BIT(VFPF_BULLETIN_MAC_ADDR);
4191 	ether_addr_copy(vf_info->bulletin.p_virt->mac, mac);
4192 
4193 	vf_info->bulletin.p_virt->valid_bitmap |= feature;
4194 
4195 	if (vf_info->p_vf_info.is_trusted_configured)
4196 		qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4197 
4198 	return 0;
4199 }
4200 
4201 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
4202 					     u16 pvid, int vfid)
4203 {
4204 	struct qed_vf_info *vf_info;
4205 	u64 feature;
4206 
4207 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4208 	if (!vf_info) {
4209 		DP_NOTICE(p_hwfn->cdev,
4210 			  "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4211 		return;
4212 	}
4213 
4214 	if (vf_info->b_malicious) {
4215 		DP_NOTICE(p_hwfn->cdev,
4216 			  "Can't set forced vlan to malicious VF [%d]\n", vfid);
4217 		return;
4218 	}
4219 
4220 	feature = 1 << VLAN_ADDR_FORCED;
4221 	vf_info->bulletin.p_virt->pvid = pvid;
4222 	if (pvid)
4223 		vf_info->bulletin.p_virt->valid_bitmap |= feature;
4224 	else
4225 		vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4226 
4227 	qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4228 }
4229 
4230 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
4231 				    int vfid, u16 vxlan_port, u16 geneve_port)
4232 {
4233 	struct qed_vf_info *vf_info;
4234 
4235 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4236 	if (!vf_info) {
4237 		DP_NOTICE(p_hwfn->cdev,
4238 			  "Can not set udp ports, invalid vfid [%d]\n", vfid);
4239 		return;
4240 	}
4241 
4242 	if (vf_info->b_malicious) {
4243 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4244 			   "Can not set udp ports to malicious VF [%d]\n",
4245 			   vfid);
4246 		return;
4247 	}
4248 
4249 	vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4250 	vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4251 }
4252 
4253 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
4254 {
4255 	struct qed_vf_info *p_vf_info;
4256 
4257 	p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4258 	if (!p_vf_info)
4259 		return false;
4260 
4261 	return !!p_vf_info->vport_instance;
4262 }
4263 
4264 static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
4265 {
4266 	struct qed_vf_info *p_vf_info;
4267 
4268 	p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4269 	if (!p_vf_info)
4270 		return true;
4271 
4272 	return p_vf_info->state == VF_STOPPED;
4273 }
4274 
4275 static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
4276 {
4277 	struct qed_vf_info *vf_info;
4278 
4279 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4280 	if (!vf_info)
4281 		return false;
4282 
4283 	return vf_info->spoof_chk;
4284 }
4285 
4286 static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
4287 {
4288 	struct qed_vf_info *vf;
4289 	int rc = -EINVAL;
4290 
4291 	if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4292 		DP_NOTICE(p_hwfn,
4293 			  "SR-IOV sanity check failed, can't set spoofchk\n");
4294 		goto out;
4295 	}
4296 
4297 	vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4298 	if (!vf)
4299 		goto out;
4300 
4301 	if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4302 		/* After VF VPORT start PF will configure spoof check */
4303 		vf->req_spoofchk_val = val;
4304 		rc = 0;
4305 		goto out;
4306 	}
4307 
4308 	rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
4309 
4310 out:
4311 	return rc;
4312 }
4313 
4314 static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4315 {
4316 	struct qed_vf_info *p_vf;
4317 
4318 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4319 	if (!p_vf || !p_vf->bulletin.p_virt)
4320 		return NULL;
4321 
4322 	if (!(p_vf->bulletin.p_virt->valid_bitmap &
4323 	      BIT(VFPF_BULLETIN_MAC_ADDR)))
4324 		return NULL;
4325 
4326 	return p_vf->bulletin.p_virt->mac;
4327 }
4328 
4329 static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
4330 					   u16 rel_vf_id)
4331 {
4332 	struct qed_vf_info *p_vf;
4333 
4334 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4335 	if (!p_vf || !p_vf->bulletin.p_virt)
4336 		return NULL;
4337 
4338 	if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
4339 		return NULL;
4340 
4341 	return p_vf->bulletin.p_virt->mac;
4342 }
4343 
4344 static u16
4345 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4346 {
4347 	struct qed_vf_info *p_vf;
4348 
4349 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4350 	if (!p_vf || !p_vf->bulletin.p_virt)
4351 		return 0;
4352 
4353 	if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
4354 		return 0;
4355 
4356 	return p_vf->bulletin.p_virt->pvid;
4357 }
4358 
4359 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
4360 				     struct qed_ptt *p_ptt, int vfid, int val)
4361 {
4362 	struct qed_vf_info *vf;
4363 	u8 abs_vp_id = 0;
4364 	u16 rl_id;
4365 	int rc;
4366 
4367 	vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4368 	if (!vf)
4369 		return -EINVAL;
4370 
4371 	rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4372 	if (rc)
4373 		return rc;
4374 
4375 	rl_id = abs_vp_id;	/* The "rl_id" is set as the "vport_id" */
4376 	return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
4377 }
4378 
4379 static int
4380 qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
4381 {
4382 	struct qed_vf_info *vf;
4383 	u8 vport_id;
4384 	int i;
4385 
4386 	for_each_hwfn(cdev, i) {
4387 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4388 
4389 		if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4390 			DP_NOTICE(p_hwfn,
4391 				  "SR-IOV sanity check failed, can't set min rate\n");
4392 			return -EINVAL;
4393 		}
4394 	}
4395 
4396 	vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
4397 	vport_id = vf->vport_id;
4398 
4399 	return qed_configure_vport_wfq(cdev, vport_id, rate);
4400 }
4401 
4402 static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
4403 {
4404 	struct qed_wfq_data *vf_vp_wfq;
4405 	struct qed_vf_info *vf_info;
4406 
4407 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4408 	if (!vf_info)
4409 		return 0;
4410 
4411 	vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4412 
4413 	if (vf_vp_wfq->configured)
4414 		return vf_vp_wfq->min_speed;
4415 	else
4416 		return 0;
4417 }
4418 
4419 /**
4420  * qed_schedule_iov - schedules IOV task for VF and PF
4421  * @hwfn: hardware function pointer
4422  * @flag: IOV flag for VF/PF
4423  */
4424 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
4425 {
4426 	smp_mb__before_atomic();
4427 	set_bit(flag, &hwfn->iov_task_flags);
4428 	smp_mb__after_atomic();
4429 	DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
4430 	queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
4431 }
4432 
4433 void qed_vf_start_iov_wq(struct qed_dev *cdev)
4434 {
4435 	int i;
4436 
4437 	for_each_hwfn(cdev, i)
4438 	    queue_delayed_work(cdev->hwfns[i].iov_wq,
4439 			       &cdev->hwfns[i].iov_task, 0);
4440 }
4441 
4442 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
4443 {
4444 	int i, j;
4445 
4446 	for_each_hwfn(cdev, i)
4447 	    if (cdev->hwfns[i].iov_wq)
4448 		flush_workqueue(cdev->hwfns[i].iov_wq);
4449 
4450 	/* Mark VFs for disablement */
4451 	qed_iov_set_vfs_to_disable(cdev, true);
4452 
4453 	if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
4454 		pci_disable_sriov(cdev->pdev);
4455 
4456 	if (cdev->recov_in_prog) {
4457 		DP_VERBOSE(cdev,
4458 			   QED_MSG_IOV,
4459 			   "Skip SRIOV disable operations in the device since a recovery is in progress\n");
4460 		goto out;
4461 	}
4462 
4463 	for_each_hwfn(cdev, i) {
4464 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
4465 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
4466 
4467 		/* Failure to acquire the ptt in 100g creates an odd error
4468 		 * where the first engine has already relased IOV.
4469 		 */
4470 		if (!ptt) {
4471 			DP_ERR(hwfn, "Failed to acquire ptt\n");
4472 			return -EBUSY;
4473 		}
4474 
4475 		/* Clean WFQ db and configure equal weight for all vports */
4476 		qed_clean_wfq_db(hwfn, ptt);
4477 
4478 		qed_for_each_vf(hwfn, j) {
4479 			int k;
4480 
4481 			if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
4482 				continue;
4483 
4484 			/* Wait until VF is disabled before releasing */
4485 			for (k = 0; k < 100; k++) {
4486 				if (!qed_iov_is_vf_stopped(hwfn, j))
4487 					msleep(20);
4488 				else
4489 					break;
4490 			}
4491 
4492 			if (k < 100)
4493 				qed_iov_release_hw_for_vf(&cdev->hwfns[i],
4494 							  ptt, j);
4495 			else
4496 				DP_ERR(hwfn,
4497 				       "Timeout waiting for VF's FLR to end\n");
4498 		}
4499 
4500 		qed_ptt_release(hwfn, ptt);
4501 	}
4502 out:
4503 	qed_iov_set_vfs_to_disable(cdev, false);
4504 
4505 	return 0;
4506 }
4507 
4508 static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
4509 					u16 vfid,
4510 					struct qed_iov_vf_init_params *params)
4511 {
4512 	u16 base, i;
4513 
4514 	/* Since we have an equal resource distribution per-VF, and we assume
4515 	 * PF has acquired the QED_PF_L2_QUE first queues, we start setting
4516 	 * sequentially from there.
4517 	 */
4518 	base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
4519 
4520 	params->rel_vf_id = vfid;
4521 	for (i = 0; i < params->num_queues; i++) {
4522 		params->req_rx_queue[i] = base + i;
4523 		params->req_tx_queue[i] = base + i;
4524 	}
4525 }
4526 
4527 static int qed_sriov_enable(struct qed_dev *cdev, int num)
4528 {
4529 	struct qed_iov_vf_init_params params;
4530 	struct qed_hwfn *hwfn;
4531 	struct qed_ptt *ptt;
4532 	int i, j, rc;
4533 
4534 	if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
4535 		DP_NOTICE(cdev, "Can start at most %d VFs\n",
4536 			  RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
4537 		return -EINVAL;
4538 	}
4539 
4540 	memset(&params, 0, sizeof(params));
4541 
4542 	/* Initialize HW for VF access */
4543 	for_each_hwfn(cdev, j) {
4544 		hwfn = &cdev->hwfns[j];
4545 		ptt = qed_ptt_acquire(hwfn);
4546 
4547 		/* Make sure not to use more than 16 queues per VF */
4548 		params.num_queues = min_t(int,
4549 					  FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
4550 					  16);
4551 
4552 		if (!ptt) {
4553 			DP_ERR(hwfn, "Failed to acquire ptt\n");
4554 			rc = -EBUSY;
4555 			goto err;
4556 		}
4557 
4558 		for (i = 0; i < num; i++) {
4559 			if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
4560 				continue;
4561 
4562 			qed_sriov_enable_qid_config(hwfn, i, &params);
4563 			rc = qed_iov_init_hw_for_vf(hwfn, ptt, &params);
4564 			if (rc) {
4565 				DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
4566 				qed_ptt_release(hwfn, ptt);
4567 				goto err;
4568 			}
4569 		}
4570 
4571 		qed_ptt_release(hwfn, ptt);
4572 	}
4573 
4574 	/* Enable SRIOV PCIe functions */
4575 	rc = pci_enable_sriov(cdev->pdev, num);
4576 	if (rc) {
4577 		DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
4578 		goto err;
4579 	}
4580 
4581 	hwfn = QED_LEADING_HWFN(cdev);
4582 	ptt = qed_ptt_acquire(hwfn);
4583 	if (!ptt) {
4584 		DP_ERR(hwfn, "Failed to acquire ptt\n");
4585 		rc = -EBUSY;
4586 		goto err;
4587 	}
4588 
4589 	rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
4590 	if (rc)
4591 		DP_INFO(cdev, "Failed to update eswitch mode\n");
4592 	qed_ptt_release(hwfn, ptt);
4593 
4594 	return num;
4595 
4596 err:
4597 	qed_sriov_disable(cdev, false);
4598 	return rc;
4599 }
4600 
4601 static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
4602 {
4603 	if (!IS_QED_SRIOV(cdev)) {
4604 		DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
4605 		return -EOPNOTSUPP;
4606 	}
4607 
4608 	if (num_vfs_param)
4609 		return qed_sriov_enable(cdev, num_vfs_param);
4610 	else
4611 		return qed_sriov_disable(cdev, true);
4612 }
4613 
4614 static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
4615 {
4616 	int i;
4617 
4618 	if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4619 		DP_VERBOSE(cdev, QED_MSG_IOV,
4620 			   "Cannot set a VF MAC; Sriov is not enabled\n");
4621 		return -EINVAL;
4622 	}
4623 
4624 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4625 		DP_VERBOSE(cdev, QED_MSG_IOV,
4626 			   "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4627 		return -EINVAL;
4628 	}
4629 
4630 	for_each_hwfn(cdev, i) {
4631 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
4632 		struct qed_public_vf_info *vf_info;
4633 
4634 		vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4635 		if (!vf_info)
4636 			continue;
4637 
4638 		/* Set the MAC, and schedule the IOV task */
4639 		if (vf_info->is_trusted_configured)
4640 			ether_addr_copy(vf_info->mac, mac);
4641 		else
4642 			ether_addr_copy(vf_info->forced_mac, mac);
4643 
4644 		qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4645 	}
4646 
4647 	return 0;
4648 }
4649 
4650 static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
4651 {
4652 	int i;
4653 
4654 	if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4655 		DP_VERBOSE(cdev, QED_MSG_IOV,
4656 			   "Cannot set a VF MAC; Sriov is not enabled\n");
4657 		return -EINVAL;
4658 	}
4659 
4660 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4661 		DP_VERBOSE(cdev, QED_MSG_IOV,
4662 			   "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4663 		return -EINVAL;
4664 	}
4665 
4666 	for_each_hwfn(cdev, i) {
4667 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
4668 		struct qed_public_vf_info *vf_info;
4669 
4670 		vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4671 		if (!vf_info)
4672 			continue;
4673 
4674 		/* Set the forced vlan, and schedule the IOV task */
4675 		vf_info->forced_vlan = vid;
4676 		qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4677 	}
4678 
4679 	return 0;
4680 }
4681 
4682 static int qed_get_vf_config(struct qed_dev *cdev,
4683 			     int vf_id, struct ifla_vf_info *ivi)
4684 {
4685 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
4686 	struct qed_public_vf_info *vf_info;
4687 	struct qed_mcp_link_state link;
4688 	u32 tx_rate;
4689 
4690 	/* Sanitize request */
4691 	if (IS_VF(cdev))
4692 		return -EINVAL;
4693 
4694 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
4695 		DP_VERBOSE(cdev, QED_MSG_IOV,
4696 			   "VF index [%d] isn't active\n", vf_id);
4697 		return -EINVAL;
4698 	}
4699 
4700 	vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4701 
4702 	qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
4703 
4704 	/* Fill information about VF */
4705 	ivi->vf = vf_id;
4706 
4707 	if (is_valid_ether_addr(vf_info->forced_mac))
4708 		ether_addr_copy(ivi->mac, vf_info->forced_mac);
4709 	else
4710 		ether_addr_copy(ivi->mac, vf_info->mac);
4711 
4712 	ivi->vlan = vf_info->forced_vlan;
4713 	ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
4714 	ivi->linkstate = vf_info->link_state;
4715 	tx_rate = vf_info->tx_rate;
4716 	ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
4717 	ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
4718 
4719 	return 0;
4720 }
4721 
4722 void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
4723 {
4724 	struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
4725 	struct qed_mcp_link_capabilities caps;
4726 	struct qed_mcp_link_params params;
4727 	struct qed_mcp_link_state link;
4728 	int i;
4729 
4730 	if (!hwfn->pf_iov_info)
4731 		return;
4732 
4733 	/* Update bulletin of all future possible VFs with link configuration */
4734 	for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
4735 		struct qed_public_vf_info *vf_info;
4736 
4737 		vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
4738 		if (!vf_info)
4739 			continue;
4740 
4741 		/* Only hwfn0 is actually interested in the link speed.
4742 		 * But since only it would receive an MFW indication of link,
4743 		 * need to take configuration from it - otherwise things like
4744 		 * rate limiting for hwfn1 VF would not work.
4745 		 */
4746 		memcpy(&params, qed_mcp_get_link_params(lead_hwfn),
4747 		       sizeof(params));
4748 		memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
4749 		memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
4750 		       sizeof(caps));
4751 
4752 		/* Modify link according to the VF's configured link state */
4753 		switch (vf_info->link_state) {
4754 		case IFLA_VF_LINK_STATE_DISABLE:
4755 			link.link_up = false;
4756 			break;
4757 		case IFLA_VF_LINK_STATE_ENABLE:
4758 			link.link_up = true;
4759 			/* Set speed according to maximum supported by HW.
4760 			 * that is 40G for regular devices and 100G for CMT
4761 			 * mode devices.
4762 			 */
4763 			link.speed = (hwfn->cdev->num_hwfns > 1) ?
4764 				     100000 : 40000;
4765 		default:
4766 			/* In auto mode pass PF link image to VF */
4767 			break;
4768 		}
4769 
4770 		if (link.link_up && vf_info->tx_rate) {
4771 			struct qed_ptt *ptt;
4772 			int rate;
4773 
4774 			rate = min_t(int, vf_info->tx_rate, link.speed);
4775 
4776 			ptt = qed_ptt_acquire(hwfn);
4777 			if (!ptt) {
4778 				DP_NOTICE(hwfn, "Failed to acquire PTT\n");
4779 				return;
4780 			}
4781 
4782 			if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
4783 				vf_info->tx_rate = rate;
4784 				link.speed = rate;
4785 			}
4786 
4787 			qed_ptt_release(hwfn, ptt);
4788 		}
4789 
4790 		qed_iov_set_link(hwfn, i, &params, &link, &caps);
4791 	}
4792 
4793 	qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4794 }
4795 
4796 static int qed_set_vf_link_state(struct qed_dev *cdev,
4797 				 int vf_id, int link_state)
4798 {
4799 	int i;
4800 
4801 	/* Sanitize request */
4802 	if (IS_VF(cdev))
4803 		return -EINVAL;
4804 
4805 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
4806 		DP_VERBOSE(cdev, QED_MSG_IOV,
4807 			   "VF index [%d] isn't active\n", vf_id);
4808 		return -EINVAL;
4809 	}
4810 
4811 	/* Handle configuration of link state */
4812 	for_each_hwfn(cdev, i) {
4813 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
4814 		struct qed_public_vf_info *vf;
4815 
4816 		vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4817 		if (!vf)
4818 			continue;
4819 
4820 		if (vf->link_state == link_state)
4821 			continue;
4822 
4823 		vf->link_state = link_state;
4824 		qed_inform_vf_link_state(&cdev->hwfns[i]);
4825 	}
4826 
4827 	return 0;
4828 }
4829 
4830 static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
4831 {
4832 	int i, rc = -EINVAL;
4833 
4834 	for_each_hwfn(cdev, i) {
4835 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4836 
4837 		rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
4838 		if (rc)
4839 			break;
4840 	}
4841 
4842 	return rc;
4843 }
4844 
4845 static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
4846 {
4847 	int i;
4848 
4849 	for_each_hwfn(cdev, i) {
4850 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4851 		struct qed_public_vf_info *vf;
4852 
4853 		if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4854 			DP_NOTICE(p_hwfn,
4855 				  "SR-IOV sanity check failed, can't set tx rate\n");
4856 			return -EINVAL;
4857 		}
4858 
4859 		vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
4860 
4861 		vf->tx_rate = rate;
4862 
4863 		qed_inform_vf_link_state(p_hwfn);
4864 	}
4865 
4866 	return 0;
4867 }
4868 
4869 static int qed_set_vf_rate(struct qed_dev *cdev,
4870 			   int vfid, u32 min_rate, u32 max_rate)
4871 {
4872 	int rc_min = 0, rc_max = 0;
4873 
4874 	if (max_rate)
4875 		rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
4876 
4877 	if (min_rate)
4878 		rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
4879 
4880 	if (rc_max | rc_min)
4881 		return -EINVAL;
4882 
4883 	return 0;
4884 }
4885 
4886 static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
4887 {
4888 	int i;
4889 
4890 	for_each_hwfn(cdev, i) {
4891 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
4892 		struct qed_public_vf_info *vf;
4893 
4894 		if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
4895 			DP_NOTICE(hwfn,
4896 				  "SR-IOV sanity check failed, can't set trust\n");
4897 			return -EINVAL;
4898 		}
4899 
4900 		vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
4901 
4902 		if (vf->is_trusted_request == trust)
4903 			return 0;
4904 		vf->is_trusted_request = trust;
4905 
4906 		qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
4907 	}
4908 
4909 	return 0;
4910 }
4911 
4912 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
4913 {
4914 	u64 events[QED_VF_ARRAY_LENGTH];
4915 	struct qed_ptt *ptt;
4916 	int i;
4917 
4918 	ptt = qed_ptt_acquire(hwfn);
4919 	if (!ptt) {
4920 		DP_VERBOSE(hwfn, QED_MSG_IOV,
4921 			   "Can't acquire PTT; re-scheduling\n");
4922 		qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
4923 		return;
4924 	}
4925 
4926 	qed_iov_pf_get_pending_events(hwfn, events);
4927 
4928 	DP_VERBOSE(hwfn, QED_MSG_IOV,
4929 		   "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4930 		   events[0], events[1], events[2]);
4931 
4932 	qed_for_each_vf(hwfn, i) {
4933 		/* Skip VFs with no pending messages */
4934 		if (!(events[i / 64] & (1ULL << (i % 64))))
4935 			continue;
4936 
4937 		DP_VERBOSE(hwfn, QED_MSG_IOV,
4938 			   "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4939 			   i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4940 
4941 		/* Copy VF's message to PF's request buffer for that VF */
4942 		if (qed_iov_copy_vf_msg(hwfn, ptt, i))
4943 			continue;
4944 
4945 		qed_iov_process_mbx_req(hwfn, ptt, i);
4946 	}
4947 
4948 	qed_ptt_release(hwfn, ptt);
4949 }
4950 
4951 static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn,
4952 				       u8 *mac,
4953 				       struct qed_public_vf_info *info)
4954 {
4955 	if (info->is_trusted_configured) {
4956 		if (is_valid_ether_addr(info->mac) &&
4957 		    (!mac || !ether_addr_equal(mac, info->mac)))
4958 			return true;
4959 	} else {
4960 		if (is_valid_ether_addr(info->forced_mac) &&
4961 		    (!mac || !ether_addr_equal(mac, info->forced_mac)))
4962 			return true;
4963 	}
4964 
4965 	return false;
4966 }
4967 
4968 static void qed_set_bulletin_mac(struct qed_hwfn *hwfn,
4969 				 struct qed_public_vf_info *info,
4970 				 int vfid)
4971 {
4972 	if (info->is_trusted_configured)
4973 		qed_iov_bulletin_set_mac(hwfn, info->mac, vfid);
4974 	else
4975 		qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid);
4976 }
4977 
4978 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
4979 {
4980 	int i;
4981 
4982 	qed_for_each_vf(hwfn, i) {
4983 		struct qed_public_vf_info *info;
4984 		bool update = false;
4985 		u8 *mac;
4986 
4987 		info = qed_iov_get_public_vf_info(hwfn, i, true);
4988 		if (!info)
4989 			continue;
4990 
4991 		/* Update data on bulletin board */
4992 		if (info->is_trusted_configured)
4993 			mac = qed_iov_bulletin_get_mac(hwfn, i);
4994 		else
4995 			mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
4996 
4997 		if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) {
4998 			DP_VERBOSE(hwfn,
4999 				   QED_MSG_IOV,
5000 				   "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
5001 				   i,
5002 				   hwfn->cdev->p_iov_info->first_vf_in_pf + i);
5003 
5004 			/* Update bulletin board with MAC */
5005 			qed_set_bulletin_mac(hwfn, info, i);
5006 			update = true;
5007 		}
5008 
5009 		if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
5010 		    info->forced_vlan) {
5011 			DP_VERBOSE(hwfn,
5012 				   QED_MSG_IOV,
5013 				   "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
5014 				   info->forced_vlan,
5015 				   i,
5016 				   hwfn->cdev->p_iov_info->first_vf_in_pf + i);
5017 			qed_iov_bulletin_set_forced_vlan(hwfn,
5018 							 info->forced_vlan, i);
5019 			update = true;
5020 		}
5021 
5022 		if (update)
5023 			qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5024 	}
5025 }
5026 
5027 static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
5028 {
5029 	struct qed_ptt *ptt;
5030 	int i;
5031 
5032 	ptt = qed_ptt_acquire(hwfn);
5033 	if (!ptt) {
5034 		DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
5035 		qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5036 		return;
5037 	}
5038 
5039 	qed_for_each_vf(hwfn, i)
5040 	    qed_iov_post_vf_bulletin(hwfn, i, ptt);
5041 
5042 	qed_ptt_release(hwfn, ptt);
5043 }
5044 
5045 static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
5046 {
5047 	struct qed_public_vf_info *vf_info;
5048 	struct qed_vf_info *vf;
5049 	u8 *force_mac;
5050 	int i;
5051 
5052 	vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
5053 	vf = qed_iov_get_vf_info(hwfn, vf_id, true);
5054 
5055 	if (!vf_info || !vf)
5056 		return;
5057 
5058 	/* Force MAC converted to generic MAC in case of VF trust on */
5059 	if (vf_info->is_trusted_configured &&
5060 	    (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) {
5061 		force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id);
5062 
5063 		if (force_mac) {
5064 			/* Clear existing shadow copy of MAC to have a clean
5065 			 * slate.
5066 			 */
5067 			for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
5068 				if (ether_addr_equal(vf->shadow_config.macs[i],
5069 						     vf_info->mac)) {
5070 					memset(vf->shadow_config.macs[i], 0,
5071 					       ETH_ALEN);
5072 					DP_VERBOSE(hwfn, QED_MSG_IOV,
5073 						   "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
5074 						    vf_info->mac, vf_id);
5075 					break;
5076 				}
5077 			}
5078 
5079 			ether_addr_copy(vf_info->mac, force_mac);
5080 			memset(vf_info->forced_mac, 0, ETH_ALEN);
5081 			vf->bulletin.p_virt->valid_bitmap &=
5082 					~BIT(MAC_ADDR_FORCED);
5083 			qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5084 		}
5085 	}
5086 
5087 	/* Update shadow copy with VF MAC when trust mode is turned off */
5088 	if (!vf_info->is_trusted_configured) {
5089 		u8 empty_mac[ETH_ALEN];
5090 
5091 		memset(empty_mac, 0, ETH_ALEN);
5092 		for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
5093 			if (ether_addr_equal(vf->shadow_config.macs[i],
5094 					     empty_mac)) {
5095 				ether_addr_copy(vf->shadow_config.macs[i],
5096 						vf_info->mac);
5097 				DP_VERBOSE(hwfn, QED_MSG_IOV,
5098 					   "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n",
5099 					    vf_info->mac, vf_id);
5100 				break;
5101 			}
5102 		}
5103 		/* Clear bulletin when trust mode is turned off,
5104 		 * to have a clean slate for next (normal) operations.
5105 		 */
5106 		qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id);
5107 		qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5108 	}
5109 }
5110 
5111 static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
5112 {
5113 	struct qed_sp_vport_update_params params;
5114 	struct qed_filter_accept_flags *flags;
5115 	struct qed_public_vf_info *vf_info;
5116 	struct qed_vf_info *vf;
5117 	u8 mask;
5118 	int i;
5119 
5120 	mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
5121 	flags = &params.accept_flags;
5122 
5123 	qed_for_each_vf(hwfn, i) {
5124 		/* Need to make sure current requested configuration didn't
5125 		 * flip so that we'll end up configuring something that's not
5126 		 * needed.
5127 		 */
5128 		vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
5129 		if (vf_info->is_trusted_configured ==
5130 		    vf_info->is_trusted_request)
5131 			continue;
5132 		vf_info->is_trusted_configured = vf_info->is_trusted_request;
5133 
5134 		/* Handle forced MAC mode */
5135 		qed_update_mac_for_vf_trust_change(hwfn, i);
5136 
5137 		/* Validate that the VF has a configured vport */
5138 		vf = qed_iov_get_vf_info(hwfn, i, true);
5139 		if (!vf->vport_instance)
5140 			continue;
5141 
5142 		memset(&params, 0, sizeof(params));
5143 		params.opaque_fid = vf->opaque_fid;
5144 		params.vport_id = vf->vport_id;
5145 
5146 		params.update_ctl_frame_check = 1;
5147 		params.mac_chk_en = !vf_info->is_trusted_configured;
5148 
5149 		if (vf_info->rx_accept_mode & mask) {
5150 			flags->update_rx_mode_config = 1;
5151 			flags->rx_accept_filter = vf_info->rx_accept_mode;
5152 		}
5153 
5154 		if (vf_info->tx_accept_mode & mask) {
5155 			flags->update_tx_mode_config = 1;
5156 			flags->tx_accept_filter = vf_info->tx_accept_mode;
5157 		}
5158 
5159 		/* Remove if needed; Otherwise this would set the mask */
5160 		if (!vf_info->is_trusted_configured) {
5161 			flags->rx_accept_filter &= ~mask;
5162 			flags->tx_accept_filter &= ~mask;
5163 		}
5164 
5165 		if (flags->update_rx_mode_config ||
5166 		    flags->update_tx_mode_config ||
5167 		    params.update_ctl_frame_check)
5168 			qed_sp_vport_update(hwfn, &params,
5169 					    QED_SPQ_MODE_EBLOCK, NULL);
5170 	}
5171 }
5172 
5173 static void qed_iov_pf_task(struct work_struct *work)
5174 
5175 {
5176 	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
5177 					     iov_task.work);
5178 	int rc;
5179 
5180 	if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
5181 		return;
5182 
5183 	if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
5184 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
5185 
5186 		if (!ptt) {
5187 			qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
5188 			return;
5189 		}
5190 
5191 		rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
5192 		if (rc)
5193 			qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
5194 
5195 		qed_ptt_release(hwfn, ptt);
5196 	}
5197 
5198 	if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
5199 		qed_handle_vf_msg(hwfn);
5200 
5201 	if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
5202 			       &hwfn->iov_task_flags))
5203 		qed_handle_pf_set_vf_unicast(hwfn);
5204 
5205 	if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
5206 			       &hwfn->iov_task_flags))
5207 		qed_handle_bulletin_post(hwfn);
5208 
5209 	if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
5210 		qed_iov_handle_trust_change(hwfn);
5211 }
5212 
5213 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
5214 {
5215 	int i;
5216 
5217 	for_each_hwfn(cdev, i) {
5218 		if (!cdev->hwfns[i].iov_wq)
5219 			continue;
5220 
5221 		if (schedule_first) {
5222 			qed_schedule_iov(&cdev->hwfns[i],
5223 					 QED_IOV_WQ_STOP_WQ_FLAG);
5224 			cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
5225 		}
5226 
5227 		flush_workqueue(cdev->hwfns[i].iov_wq);
5228 		destroy_workqueue(cdev->hwfns[i].iov_wq);
5229 	}
5230 }
5231 
5232 int qed_iov_wq_start(struct qed_dev *cdev)
5233 {
5234 	char name[NAME_SIZE];
5235 	int i;
5236 
5237 	for_each_hwfn(cdev, i) {
5238 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
5239 
5240 		/* PFs needs a dedicated workqueue only if they support IOV.
5241 		 * VFs always require one.
5242 		 */
5243 		if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
5244 			continue;
5245 
5246 		snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
5247 			 cdev->pdev->bus->number,
5248 			 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
5249 
5250 		p_hwfn->iov_wq = create_singlethread_workqueue(name);
5251 		if (!p_hwfn->iov_wq) {
5252 			DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
5253 			return -ENOMEM;
5254 		}
5255 
5256 		if (IS_PF(cdev))
5257 			INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
5258 		else
5259 			INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
5260 	}
5261 
5262 	return 0;
5263 }
5264 
5265 const struct qed_iov_hv_ops qed_iov_ops_pass = {
5266 	.configure = &qed_sriov_configure,
5267 	.set_mac = &qed_sriov_pf_set_mac,
5268 	.set_vlan = &qed_sriov_pf_set_vlan,
5269 	.get_config = &qed_get_vf_config,
5270 	.set_link_state = &qed_set_vf_link_state,
5271 	.set_spoof = &qed_spoof_configure,
5272 	.set_rate = &qed_set_vf_rate,
5273 	.set_trust = &qed_set_vf_trust,
5274 };
5275