xref: /openbmc/linux/drivers/net/ethernet/qlogic/qed/qed_sriov.c (revision e4781421e883340b796da5a724bda7226817990b)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/etherdevice.h>
34 #include <linux/crc32.h>
35 #include <linux/vmalloc.h>
36 #include <linux/qed/qed_iov_if.h>
37 #include "qed_cxt.h"
38 #include "qed_hsi.h"
39 #include "qed_hw.h"
40 #include "qed_init_ops.h"
41 #include "qed_int.h"
42 #include "qed_mcp.h"
43 #include "qed_reg_addr.h"
44 #include "qed_sp.h"
45 #include "qed_sriov.h"
46 #include "qed_vf.h"
47 
48 /* IOV ramrods */
49 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
50 {
51 	struct vf_start_ramrod_data *p_ramrod = NULL;
52 	struct qed_spq_entry *p_ent = NULL;
53 	struct qed_sp_init_data init_data;
54 	int rc = -EINVAL;
55 	u8 fp_minor;
56 
57 	/* Get SPQ entry */
58 	memset(&init_data, 0, sizeof(init_data));
59 	init_data.cid = qed_spq_get_cid(p_hwfn);
60 	init_data.opaque_fid = p_vf->opaque_fid;
61 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
62 
63 	rc = qed_sp_init_request(p_hwfn, &p_ent,
64 				 COMMON_RAMROD_VF_START,
65 				 PROTOCOLID_COMMON, &init_data);
66 	if (rc)
67 		return rc;
68 
69 	p_ramrod = &p_ent->ramrod.vf_start;
70 
71 	p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
72 	p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
73 
74 	switch (p_hwfn->hw_info.personality) {
75 	case QED_PCI_ETH:
76 		p_ramrod->personality = PERSONALITY_ETH;
77 		break;
78 	case QED_PCI_ETH_ROCE:
79 		p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
80 		break;
81 	default:
82 		DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
83 			  p_hwfn->hw_info.personality);
84 		return -EINVAL;
85 	}
86 
87 	fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
88 	if (fp_minor > ETH_HSI_VER_MINOR &&
89 	    fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
90 		DP_VERBOSE(p_hwfn,
91 			   QED_MSG_IOV,
92 			   "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
93 			   p_vf->abs_vf_id,
94 			   ETH_HSI_VER_MAJOR,
95 			   fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
96 		fp_minor = ETH_HSI_VER_MINOR;
97 	}
98 
99 	p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
100 	p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
101 
102 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
103 		   "VF[%d] - Starting using HSI %02x.%02x\n",
104 		   p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
105 
106 	return qed_spq_post(p_hwfn, p_ent, NULL);
107 }
108 
109 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
110 			  u32 concrete_vfid, u16 opaque_vfid)
111 {
112 	struct vf_stop_ramrod_data *p_ramrod = NULL;
113 	struct qed_spq_entry *p_ent = NULL;
114 	struct qed_sp_init_data init_data;
115 	int rc = -EINVAL;
116 
117 	/* Get SPQ entry */
118 	memset(&init_data, 0, sizeof(init_data));
119 	init_data.cid = qed_spq_get_cid(p_hwfn);
120 	init_data.opaque_fid = opaque_vfid;
121 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
122 
123 	rc = qed_sp_init_request(p_hwfn, &p_ent,
124 				 COMMON_RAMROD_VF_STOP,
125 				 PROTOCOLID_COMMON, &init_data);
126 	if (rc)
127 		return rc;
128 
129 	p_ramrod = &p_ent->ramrod.vf_stop;
130 
131 	p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
132 
133 	return qed_spq_post(p_hwfn, p_ent, NULL);
134 }
135 
136 static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
137 				  int rel_vf_id,
138 				  bool b_enabled_only, bool b_non_malicious)
139 {
140 	if (!p_hwfn->pf_iov_info) {
141 		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
142 		return false;
143 	}
144 
145 	if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
146 	    (rel_vf_id < 0))
147 		return false;
148 
149 	if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
150 	    b_enabled_only)
151 		return false;
152 
153 	if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
154 	    b_non_malicious)
155 		return false;
156 
157 	return true;
158 }
159 
160 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
161 					       u16 relative_vf_id,
162 					       bool b_enabled_only)
163 {
164 	struct qed_vf_info *vf = NULL;
165 
166 	if (!p_hwfn->pf_iov_info) {
167 		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
168 		return NULL;
169 	}
170 
171 	if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
172 				  b_enabled_only, false))
173 		vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
174 	else
175 		DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
176 		       relative_vf_id);
177 
178 	return vf;
179 }
180 
181 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
182 				 struct qed_vf_info *p_vf, u16 rx_qid)
183 {
184 	if (rx_qid >= p_vf->num_rxqs)
185 		DP_VERBOSE(p_hwfn,
186 			   QED_MSG_IOV,
187 			   "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
188 			   p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
189 	return rx_qid < p_vf->num_rxqs;
190 }
191 
192 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
193 				 struct qed_vf_info *p_vf, u16 tx_qid)
194 {
195 	if (tx_qid >= p_vf->num_txqs)
196 		DP_VERBOSE(p_hwfn,
197 			   QED_MSG_IOV,
198 			   "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
199 			   p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
200 	return tx_qid < p_vf->num_txqs;
201 }
202 
203 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
204 				struct qed_vf_info *p_vf, u16 sb_idx)
205 {
206 	int i;
207 
208 	for (i = 0; i < p_vf->num_sbs; i++)
209 		if (p_vf->igu_sbs[i] == sb_idx)
210 			return true;
211 
212 	DP_VERBOSE(p_hwfn,
213 		   QED_MSG_IOV,
214 		   "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
215 		   p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
216 
217 	return false;
218 }
219 
220 static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
221 				    int vfid, struct qed_ptt *p_ptt)
222 {
223 	struct qed_bulletin_content *p_bulletin;
224 	int crc_size = sizeof(p_bulletin->crc);
225 	struct qed_dmae_params params;
226 	struct qed_vf_info *p_vf;
227 
228 	p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
229 	if (!p_vf)
230 		return -EINVAL;
231 
232 	if (!p_vf->vf_bulletin)
233 		return -EINVAL;
234 
235 	p_bulletin = p_vf->bulletin.p_virt;
236 
237 	/* Increment bulletin board version and compute crc */
238 	p_bulletin->version++;
239 	p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
240 				p_vf->bulletin.size - crc_size);
241 
242 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
243 		   "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
244 		   p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
245 
246 	/* propagate bulletin board via dmae to vm memory */
247 	memset(&params, 0, sizeof(params));
248 	params.flags = QED_DMAE_FLAG_VF_DST;
249 	params.dst_vfid = p_vf->abs_vf_id;
250 	return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
251 				  p_vf->vf_bulletin, p_vf->bulletin.size / 4,
252 				  &params);
253 }
254 
255 static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
256 {
257 	struct qed_hw_sriov_info *iov = cdev->p_iov_info;
258 	int pos = iov->pos;
259 
260 	DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
261 	pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
262 
263 	pci_read_config_word(cdev->pdev,
264 			     pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
265 	pci_read_config_word(cdev->pdev,
266 			     pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
267 
268 	pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
269 	if (iov->num_vfs) {
270 		DP_VERBOSE(cdev,
271 			   QED_MSG_IOV,
272 			   "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
273 		iov->num_vfs = 0;
274 	}
275 
276 	pci_read_config_word(cdev->pdev,
277 			     pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
278 
279 	pci_read_config_word(cdev->pdev,
280 			     pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
281 
282 	pci_read_config_word(cdev->pdev,
283 			     pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
284 
285 	pci_read_config_dword(cdev->pdev,
286 			      pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
287 
288 	pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
289 
290 	pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
291 
292 	DP_VERBOSE(cdev,
293 		   QED_MSG_IOV,
294 		   "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
295 		   iov->nres,
296 		   iov->cap,
297 		   iov->ctrl,
298 		   iov->total_vfs,
299 		   iov->initial_vfs,
300 		   iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
301 
302 	/* Some sanity checks */
303 	if (iov->num_vfs > NUM_OF_VFS(cdev) ||
304 	    iov->total_vfs > NUM_OF_VFS(cdev)) {
305 		/* This can happen only due to a bug. In this case we set
306 		 * num_vfs to zero to avoid memory corruption in the code that
307 		 * assumes max number of vfs
308 		 */
309 		DP_NOTICE(cdev,
310 			  "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
311 			  iov->num_vfs);
312 
313 		iov->num_vfs = 0;
314 		iov->total_vfs = 0;
315 	}
316 
317 	return 0;
318 }
319 
320 static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
321 					struct qed_ptt *p_ptt)
322 {
323 	struct qed_igu_block *p_sb;
324 	u16 sb_id;
325 	u32 val;
326 
327 	if (!p_hwfn->hw_info.p_igu_info) {
328 		DP_ERR(p_hwfn,
329 		       "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
330 		return;
331 	}
332 
333 	for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
334 	     sb_id++) {
335 		p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
336 		if ((p_sb->status & QED_IGU_STATUS_FREE) &&
337 		    !(p_sb->status & QED_IGU_STATUS_PF)) {
338 			val = qed_rd(p_hwfn, p_ptt,
339 				     IGU_REG_MAPPING_MEMORY + sb_id * 4);
340 			SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
341 			qed_wr(p_hwfn, p_ptt,
342 			       IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
343 		}
344 	}
345 }
346 
347 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
348 {
349 	struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
350 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
351 	struct qed_bulletin_content *p_bulletin_virt;
352 	dma_addr_t req_p, rply_p, bulletin_p;
353 	union pfvf_tlvs *p_reply_virt_addr;
354 	union vfpf_tlvs *p_req_virt_addr;
355 	u8 idx = 0;
356 
357 	memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
358 
359 	p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
360 	req_p = p_iov_info->mbx_msg_phys_addr;
361 	p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
362 	rply_p = p_iov_info->mbx_reply_phys_addr;
363 	p_bulletin_virt = p_iov_info->p_bulletins;
364 	bulletin_p = p_iov_info->bulletins_phys;
365 	if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
366 		DP_ERR(p_hwfn,
367 		       "qed_iov_setup_vfdb called without allocating mem first\n");
368 		return;
369 	}
370 
371 	for (idx = 0; idx < p_iov->total_vfs; idx++) {
372 		struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
373 		u32 concrete;
374 
375 		vf->vf_mbx.req_virt = p_req_virt_addr + idx;
376 		vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
377 		vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
378 		vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
379 
380 		vf->state = VF_STOPPED;
381 		vf->b_init = false;
382 
383 		vf->bulletin.phys = idx *
384 				    sizeof(struct qed_bulletin_content) +
385 				    bulletin_p;
386 		vf->bulletin.p_virt = p_bulletin_virt + idx;
387 		vf->bulletin.size = sizeof(struct qed_bulletin_content);
388 
389 		vf->relative_vf_id = idx;
390 		vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
391 		concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
392 		vf->concrete_fid = concrete;
393 		vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
394 				 (vf->abs_vf_id << 8);
395 		vf->vport_id = idx + 1;
396 
397 		vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
398 		vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
399 	}
400 }
401 
402 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
403 {
404 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
405 	void **p_v_addr;
406 	u16 num_vfs = 0;
407 
408 	num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
409 
410 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
411 		   "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
412 
413 	/* Allocate PF Mailbox buffer (per-VF) */
414 	p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
415 	p_v_addr = &p_iov_info->mbx_msg_virt_addr;
416 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
417 				       p_iov_info->mbx_msg_size,
418 				       &p_iov_info->mbx_msg_phys_addr,
419 				       GFP_KERNEL);
420 	if (!*p_v_addr)
421 		return -ENOMEM;
422 
423 	/* Allocate PF Mailbox Reply buffer (per-VF) */
424 	p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
425 	p_v_addr = &p_iov_info->mbx_reply_virt_addr;
426 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
427 				       p_iov_info->mbx_reply_size,
428 				       &p_iov_info->mbx_reply_phys_addr,
429 				       GFP_KERNEL);
430 	if (!*p_v_addr)
431 		return -ENOMEM;
432 
433 	p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
434 				     num_vfs;
435 	p_v_addr = &p_iov_info->p_bulletins;
436 	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
437 				       p_iov_info->bulletins_size,
438 				       &p_iov_info->bulletins_phys,
439 				       GFP_KERNEL);
440 	if (!*p_v_addr)
441 		return -ENOMEM;
442 
443 	DP_VERBOSE(p_hwfn,
444 		   QED_MSG_IOV,
445 		   "PF's Requests mailbox [%p virt 0x%llx phys],  Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
446 		   p_iov_info->mbx_msg_virt_addr,
447 		   (u64) p_iov_info->mbx_msg_phys_addr,
448 		   p_iov_info->mbx_reply_virt_addr,
449 		   (u64) p_iov_info->mbx_reply_phys_addr,
450 		   p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
451 
452 	return 0;
453 }
454 
455 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
456 {
457 	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
458 
459 	if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
460 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
461 				  p_iov_info->mbx_msg_size,
462 				  p_iov_info->mbx_msg_virt_addr,
463 				  p_iov_info->mbx_msg_phys_addr);
464 
465 	if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
466 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
467 				  p_iov_info->mbx_reply_size,
468 				  p_iov_info->mbx_reply_virt_addr,
469 				  p_iov_info->mbx_reply_phys_addr);
470 
471 	if (p_iov_info->p_bulletins)
472 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
473 				  p_iov_info->bulletins_size,
474 				  p_iov_info->p_bulletins,
475 				  p_iov_info->bulletins_phys);
476 }
477 
478 int qed_iov_alloc(struct qed_hwfn *p_hwfn)
479 {
480 	struct qed_pf_iov *p_sriov;
481 
482 	if (!IS_PF_SRIOV(p_hwfn)) {
483 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
484 			   "No SR-IOV - no need for IOV db\n");
485 		return 0;
486 	}
487 
488 	p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
489 	if (!p_sriov)
490 		return -ENOMEM;
491 
492 	p_hwfn->pf_iov_info = p_sriov;
493 
494 	return qed_iov_allocate_vfdb(p_hwfn);
495 }
496 
497 void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
498 {
499 	if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
500 		return;
501 
502 	qed_iov_setup_vfdb(p_hwfn);
503 	qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
504 }
505 
506 void qed_iov_free(struct qed_hwfn *p_hwfn)
507 {
508 	if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
509 		qed_iov_free_vfdb(p_hwfn);
510 		kfree(p_hwfn->pf_iov_info);
511 	}
512 }
513 
514 void qed_iov_free_hw_info(struct qed_dev *cdev)
515 {
516 	kfree(cdev->p_iov_info);
517 	cdev->p_iov_info = NULL;
518 }
519 
520 int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
521 {
522 	struct qed_dev *cdev = p_hwfn->cdev;
523 	int pos;
524 	int rc;
525 
526 	if (IS_VF(p_hwfn->cdev))
527 		return 0;
528 
529 	/* Learn the PCI configuration */
530 	pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
531 				      PCI_EXT_CAP_ID_SRIOV);
532 	if (!pos) {
533 		DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
534 		return 0;
535 	}
536 
537 	/* Allocate a new struct for IOV information */
538 	cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
539 	if (!cdev->p_iov_info)
540 		return -ENOMEM;
541 
542 	cdev->p_iov_info->pos = pos;
543 
544 	rc = qed_iov_pci_cfg_info(cdev);
545 	if (rc)
546 		return rc;
547 
548 	/* We want PF IOV to be synonemous with the existance of p_iov_info;
549 	 * In case the capability is published but there are no VFs, simply
550 	 * de-allocate the struct.
551 	 */
552 	if (!cdev->p_iov_info->total_vfs) {
553 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
554 			   "IOV capabilities, but no VFs are published\n");
555 		kfree(cdev->p_iov_info);
556 		cdev->p_iov_info = NULL;
557 		return 0;
558 	}
559 
560 	/* Calculate the first VF index - this is a bit tricky; Basically,
561 	 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
562 	 * after the first engine's VFs.
563 	 */
564 	cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
565 					   p_hwfn->abs_pf_id - 16;
566 	if (QED_PATH_ID(p_hwfn))
567 		cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
568 
569 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
570 		   "First VF in hwfn 0x%08x\n",
571 		   cdev->p_iov_info->first_vf_in_pf);
572 
573 	return 0;
574 }
575 
576 bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
577 			      int vfid, bool b_fail_malicious)
578 {
579 	/* Check PF supports sriov */
580 	if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
581 	    !IS_PF_SRIOV_ALLOC(p_hwfn))
582 		return false;
583 
584 	/* Check VF validity */
585 	if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
586 		return false;
587 
588 	return true;
589 }
590 
591 bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
592 {
593 	return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
594 }
595 
596 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
597 				      u16 rel_vf_id, u8 to_disable)
598 {
599 	struct qed_vf_info *vf;
600 	int i;
601 
602 	for_each_hwfn(cdev, i) {
603 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
604 
605 		vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
606 		if (!vf)
607 			continue;
608 
609 		vf->to_disable = to_disable;
610 	}
611 }
612 
613 static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
614 {
615 	u16 i;
616 
617 	if (!IS_QED_SRIOV(cdev))
618 		return;
619 
620 	for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
621 		qed_iov_set_vf_to_disable(cdev, i, to_disable);
622 }
623 
624 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
625 				       struct qed_ptt *p_ptt, u8 abs_vfid)
626 {
627 	qed_wr(p_hwfn, p_ptt,
628 	       PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
629 	       1 << (abs_vfid & 0x1f));
630 }
631 
632 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
633 				 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
634 {
635 	int i;
636 
637 	/* Set VF masks and configuration - pretend */
638 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
639 
640 	qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
641 
642 	/* unpretend */
643 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
644 
645 	/* iterate over all queues, clear sb consumer */
646 	for (i = 0; i < vf->num_sbs; i++)
647 		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
648 						vf->igu_sbs[i],
649 						vf->opaque_fid, true);
650 }
651 
652 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
653 				   struct qed_ptt *p_ptt,
654 				   struct qed_vf_info *vf, bool enable)
655 {
656 	u32 igu_vf_conf;
657 
658 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
659 
660 	igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
661 
662 	if (enable)
663 		igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
664 	else
665 		igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
666 
667 	qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
668 
669 	/* unpretend */
670 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
671 }
672 
673 static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
674 				    struct qed_ptt *p_ptt,
675 				    struct qed_vf_info *vf)
676 {
677 	u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
678 	int rc;
679 
680 	if (vf->to_disable)
681 		return 0;
682 
683 	DP_VERBOSE(p_hwfn,
684 		   QED_MSG_IOV,
685 		   "Enable internal access for vf %x [abs %x]\n",
686 		   vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
687 
688 	qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
689 
690 	qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
691 
692 	/* It's possible VF was previously considered malicious */
693 	vf->b_malicious = false;
694 
695 	rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
696 	if (rc)
697 		return rc;
698 
699 	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
700 
701 	SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
702 	STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
703 
704 	qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
705 		     p_hwfn->hw_info.hw_mode);
706 
707 	/* unpretend */
708 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
709 
710 	vf->state = VF_FREE;
711 
712 	return rc;
713 }
714 
715 /**
716  * @brief qed_iov_config_perm_table - configure the permission
717  *      zone table.
718  *      In E4, queue zone permission table size is 320x9. There
719  *      are 320 VF queues for single engine device (256 for dual
720  *      engine device), and each entry has the following format:
721  *      {Valid, VF[7:0]}
722  * @param p_hwfn
723  * @param p_ptt
724  * @param vf
725  * @param enable
726  */
727 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
728 				      struct qed_ptt *p_ptt,
729 				      struct qed_vf_info *vf, u8 enable)
730 {
731 	u32 reg_addr, val;
732 	u16 qzone_id = 0;
733 	int qid;
734 
735 	for (qid = 0; qid < vf->num_rxqs; qid++) {
736 		qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
737 				&qzone_id);
738 
739 		reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
740 		val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
741 		qed_wr(p_hwfn, p_ptt, reg_addr, val);
742 	}
743 }
744 
745 static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
746 				      struct qed_ptt *p_ptt,
747 				      struct qed_vf_info *vf)
748 {
749 	/* Reset vf in IGU - interrupts are still disabled */
750 	qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
751 
752 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
753 
754 	/* Permission Table */
755 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
756 }
757 
758 static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
759 				   struct qed_ptt *p_ptt,
760 				   struct qed_vf_info *vf, u16 num_rx_queues)
761 {
762 	struct qed_igu_block *igu_blocks;
763 	int qid = 0, igu_id = 0;
764 	u32 val = 0;
765 
766 	igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
767 
768 	if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
769 		num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
770 	p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
771 
772 	SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
773 	SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
774 	SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
775 
776 	while ((qid < num_rx_queues) &&
777 	       (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
778 		if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
779 			struct cau_sb_entry sb_entry;
780 
781 			vf->igu_sbs[qid] = (u16)igu_id;
782 			igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
783 
784 			SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
785 
786 			qed_wr(p_hwfn, p_ptt,
787 			       IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
788 			       val);
789 
790 			/* Configure igu sb in CAU which were marked valid */
791 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
792 					      p_hwfn->rel_pf_id,
793 					      vf->abs_vf_id, 1);
794 			qed_dmae_host2grc(p_hwfn, p_ptt,
795 					  (u64)(uintptr_t)&sb_entry,
796 					  CAU_REG_SB_VAR_MEMORY +
797 					  igu_id * sizeof(u64), 2, 0);
798 			qid++;
799 		}
800 		igu_id++;
801 	}
802 
803 	vf->num_sbs = (u8) num_rx_queues;
804 
805 	return vf->num_sbs;
806 }
807 
808 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
809 				    struct qed_ptt *p_ptt,
810 				    struct qed_vf_info *vf)
811 {
812 	struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
813 	int idx, igu_id;
814 	u32 addr, val;
815 
816 	/* Invalidate igu CAM lines and mark them as free */
817 	for (idx = 0; idx < vf->num_sbs; idx++) {
818 		igu_id = vf->igu_sbs[idx];
819 		addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
820 
821 		val = qed_rd(p_hwfn, p_ptt, addr);
822 		SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
823 		qed_wr(p_hwfn, p_ptt, addr, val);
824 
825 		p_info->igu_map.igu_blocks[igu_id].status |=
826 		    QED_IGU_STATUS_FREE;
827 
828 		p_hwfn->hw_info.p_igu_info->free_blks++;
829 	}
830 
831 	vf->num_sbs = 0;
832 }
833 
834 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
835 				  struct qed_ptt *p_ptt,
836 				  struct qed_iov_vf_init_params *p_params)
837 {
838 	u8 num_of_vf_avaiable_chains = 0;
839 	struct qed_vf_info *vf = NULL;
840 	u16 qid, num_irqs;
841 	int rc = 0;
842 	u32 cids;
843 	u8 i;
844 
845 	vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
846 	if (!vf) {
847 		DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
848 		return -EINVAL;
849 	}
850 
851 	if (vf->b_init) {
852 		DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
853 			  p_params->rel_vf_id);
854 		return -EINVAL;
855 	}
856 
857 	/* Perform sanity checking on the requested queue_id */
858 	for (i = 0; i < p_params->num_queues; i++) {
859 		u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
860 		u16 max_vf_qzone = min_vf_qzone +
861 		    FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
862 
863 		qid = p_params->req_rx_queue[i];
864 		if (qid < min_vf_qzone || qid > max_vf_qzone) {
865 			DP_NOTICE(p_hwfn,
866 				  "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
867 				  qid,
868 				  p_params->rel_vf_id,
869 				  min_vf_qzone, max_vf_qzone);
870 			return -EINVAL;
871 		}
872 
873 		qid = p_params->req_tx_queue[i];
874 		if (qid > max_vf_qzone) {
875 			DP_NOTICE(p_hwfn,
876 				  "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
877 				  qid, p_params->rel_vf_id, max_vf_qzone);
878 			return -EINVAL;
879 		}
880 
881 		/* If client *really* wants, Tx qid can be shared with PF */
882 		if (qid < min_vf_qzone)
883 			DP_VERBOSE(p_hwfn,
884 				   QED_MSG_IOV,
885 				   "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
886 				   p_params->rel_vf_id, qid, i);
887 	}
888 
889 	/* Limit number of queues according to number of CIDs */
890 	qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
891 	DP_VERBOSE(p_hwfn,
892 		   QED_MSG_IOV,
893 		   "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
894 		   vf->relative_vf_id, p_params->num_queues, (u16)cids);
895 	num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
896 
897 	num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
898 							     p_ptt,
899 							     vf, num_irqs);
900 	if (!num_of_vf_avaiable_chains) {
901 		DP_ERR(p_hwfn, "no available igu sbs\n");
902 		return -ENOMEM;
903 	}
904 
905 	/* Choose queue number and index ranges */
906 	vf->num_rxqs = num_of_vf_avaiable_chains;
907 	vf->num_txqs = num_of_vf_avaiable_chains;
908 
909 	for (i = 0; i < vf->num_rxqs; i++) {
910 		struct qed_vf_q_info *p_queue = &vf->vf_queues[i];
911 
912 		p_queue->fw_rx_qid = p_params->req_rx_queue[i];
913 		p_queue->fw_tx_qid = p_params->req_tx_queue[i];
914 
915 		/* CIDs are per-VF, so no problem having them 0-based. */
916 		p_queue->fw_cid = i;
917 
918 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
919 			   "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]  CID %04x\n",
920 			   vf->relative_vf_id,
921 			   i, vf->igu_sbs[i],
922 			   p_queue->fw_rx_qid,
923 			   p_queue->fw_tx_qid, p_queue->fw_cid);
924 	}
925 
926 	rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
927 	if (!rc) {
928 		vf->b_init = true;
929 
930 		if (IS_LEAD_HWFN(p_hwfn))
931 			p_hwfn->cdev->p_iov_info->num_vfs++;
932 	}
933 
934 	return rc;
935 }
936 
937 static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
938 			     u16 vfid,
939 			     struct qed_mcp_link_params *params,
940 			     struct qed_mcp_link_state *link,
941 			     struct qed_mcp_link_capabilities *p_caps)
942 {
943 	struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
944 						       vfid,
945 						       false);
946 	struct qed_bulletin_content *p_bulletin;
947 
948 	if (!p_vf)
949 		return;
950 
951 	p_bulletin = p_vf->bulletin.p_virt;
952 	p_bulletin->req_autoneg = params->speed.autoneg;
953 	p_bulletin->req_adv_speed = params->speed.advertised_speeds;
954 	p_bulletin->req_forced_speed = params->speed.forced_speed;
955 	p_bulletin->req_autoneg_pause = params->pause.autoneg;
956 	p_bulletin->req_forced_rx = params->pause.forced_rx;
957 	p_bulletin->req_forced_tx = params->pause.forced_tx;
958 	p_bulletin->req_loopback = params->loopback_mode;
959 
960 	p_bulletin->link_up = link->link_up;
961 	p_bulletin->speed = link->speed;
962 	p_bulletin->full_duplex = link->full_duplex;
963 	p_bulletin->autoneg = link->an;
964 	p_bulletin->autoneg_complete = link->an_complete;
965 	p_bulletin->parallel_detection = link->parallel_detection;
966 	p_bulletin->pfc_enabled = link->pfc_enabled;
967 	p_bulletin->partner_adv_speed = link->partner_adv_speed;
968 	p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
969 	p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
970 	p_bulletin->partner_adv_pause = link->partner_adv_pause;
971 	p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
972 
973 	p_bulletin->capability_speed = p_caps->speed_capabilities;
974 }
975 
976 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
977 				     struct qed_ptt *p_ptt, u16 rel_vf_id)
978 {
979 	struct qed_mcp_link_capabilities caps;
980 	struct qed_mcp_link_params params;
981 	struct qed_mcp_link_state link;
982 	struct qed_vf_info *vf = NULL;
983 
984 	vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
985 	if (!vf) {
986 		DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
987 		return -EINVAL;
988 	}
989 
990 	if (vf->bulletin.p_virt)
991 		memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
992 
993 	memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
994 
995 	/* Get the link configuration back in bulletin so
996 	 * that when VFs are re-enabled they get the actual
997 	 * link configuration.
998 	 */
999 	memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
1000 	memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
1001 	memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
1002 	qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
1003 
1004 	/* Forget the VF's acquisition message */
1005 	memset(&vf->acquire, 0, sizeof(vf->acquire));
1006 
1007 	/* disablng interrupts and resetting permission table was done during
1008 	 * vf-close, however, we could get here without going through vf_close
1009 	 */
1010 	/* Disable Interrupts for VF */
1011 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1012 
1013 	/* Reset Permission table */
1014 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1015 
1016 	vf->num_rxqs = 0;
1017 	vf->num_txqs = 0;
1018 	qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1019 
1020 	if (vf->b_init) {
1021 		vf->b_init = false;
1022 
1023 		if (IS_LEAD_HWFN(p_hwfn))
1024 			p_hwfn->cdev->p_iov_info->num_vfs--;
1025 	}
1026 
1027 	return 0;
1028 }
1029 
1030 static bool qed_iov_tlv_supported(u16 tlvtype)
1031 {
1032 	return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
1033 }
1034 
1035 /* place a given tlv on the tlv buffer, continuing current tlv list */
1036 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
1037 {
1038 	struct channel_tlv *tl = (struct channel_tlv *)*offset;
1039 
1040 	tl->type = type;
1041 	tl->length = length;
1042 
1043 	/* Offset should keep pointing to next TLV (the end of the last) */
1044 	*offset += length;
1045 
1046 	/* Return a pointer to the start of the added tlv */
1047 	return *offset - length;
1048 }
1049 
1050 /* list the types and lengths of the tlvs on the buffer */
1051 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
1052 {
1053 	u16 i = 1, total_length = 0;
1054 	struct channel_tlv *tlv;
1055 
1056 	do {
1057 		tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1058 
1059 		/* output tlv */
1060 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1061 			   "TLV number %d: type %d, length %d\n",
1062 			   i, tlv->type, tlv->length);
1063 
1064 		if (tlv->type == CHANNEL_TLV_LIST_END)
1065 			return;
1066 
1067 		/* Validate entry - protect against malicious VFs */
1068 		if (!tlv->length) {
1069 			DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
1070 			return;
1071 		}
1072 
1073 		total_length += tlv->length;
1074 
1075 		if (total_length >= sizeof(struct tlv_buffer_size)) {
1076 			DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
1077 			return;
1078 		}
1079 
1080 		i++;
1081 	} while (1);
1082 }
1083 
1084 static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
1085 				  struct qed_ptt *p_ptt,
1086 				  struct qed_vf_info *p_vf,
1087 				  u16 length, u8 status)
1088 {
1089 	struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1090 	struct qed_dmae_params params;
1091 	u8 eng_vf_id;
1092 
1093 	mbx->reply_virt->default_resp.hdr.status = status;
1094 
1095 	qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
1096 
1097 	eng_vf_id = p_vf->abs_vf_id;
1098 
1099 	memset(&params, 0, sizeof(struct qed_dmae_params));
1100 	params.flags = QED_DMAE_FLAG_VF_DST;
1101 	params.dst_vfid = eng_vf_id;
1102 
1103 	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1104 			   mbx->req_virt->first_tlv.reply_address +
1105 			   sizeof(u64),
1106 			   (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1107 			   &params);
1108 
1109 	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1110 			   mbx->req_virt->first_tlv.reply_address,
1111 			   sizeof(u64) / 4, &params);
1112 
1113 	REG_WR(p_hwfn,
1114 	       GTT_BAR0_MAP_REG_USDM_RAM +
1115 	       USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1116 }
1117 
1118 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1119 				enum qed_iov_vport_update_flag flag)
1120 {
1121 	switch (flag) {
1122 	case QED_IOV_VP_UPDATE_ACTIVATE:
1123 		return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1124 	case QED_IOV_VP_UPDATE_VLAN_STRIP:
1125 		return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1126 	case QED_IOV_VP_UPDATE_TX_SWITCH:
1127 		return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1128 	case QED_IOV_VP_UPDATE_MCAST:
1129 		return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1130 	case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1131 		return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1132 	case QED_IOV_VP_UPDATE_RSS:
1133 		return CHANNEL_TLV_VPORT_UPDATE_RSS;
1134 	case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1135 		return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1136 	case QED_IOV_VP_UPDATE_SGE_TPA:
1137 		return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1138 	default:
1139 		return 0;
1140 	}
1141 }
1142 
1143 static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1144 					    struct qed_vf_info *p_vf,
1145 					    struct qed_iov_vf_mbx *p_mbx,
1146 					    u8 status,
1147 					    u16 tlvs_mask, u16 tlvs_accepted)
1148 {
1149 	struct pfvf_def_resp_tlv *resp;
1150 	u16 size, total_len, i;
1151 
1152 	memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1153 	p_mbx->offset = (u8 *)p_mbx->reply_virt;
1154 	size = sizeof(struct pfvf_def_resp_tlv);
1155 	total_len = size;
1156 
1157 	qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1158 
1159 	/* Prepare response for all extended tlvs if they are found by PF */
1160 	for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
1161 		if (!(tlvs_mask & BIT(i)))
1162 			continue;
1163 
1164 		resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1165 				   qed_iov_vport_to_tlv(p_hwfn, i), size);
1166 
1167 		if (tlvs_accepted & BIT(i))
1168 			resp->hdr.status = status;
1169 		else
1170 			resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1171 
1172 		DP_VERBOSE(p_hwfn,
1173 			   QED_MSG_IOV,
1174 			   "VF[%d] - vport_update response: TLV %d, status %02x\n",
1175 			   p_vf->relative_vf_id,
1176 			   qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1177 
1178 		total_len += size;
1179 	}
1180 
1181 	qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1182 		    sizeof(struct channel_list_end_tlv));
1183 
1184 	return total_len;
1185 }
1186 
1187 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
1188 				 struct qed_ptt *p_ptt,
1189 				 struct qed_vf_info *vf_info,
1190 				 u16 type, u16 length, u8 status)
1191 {
1192 	struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1193 
1194 	mbx->offset = (u8 *)mbx->reply_virt;
1195 
1196 	qed_add_tlv(p_hwfn, &mbx->offset, type, length);
1197 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1198 		    sizeof(struct channel_list_end_tlv));
1199 
1200 	qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1201 }
1202 
1203 static struct
1204 qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
1205 					       u16 relative_vf_id,
1206 					       bool b_enabled_only)
1207 {
1208 	struct qed_vf_info *vf = NULL;
1209 
1210 	vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1211 	if (!vf)
1212 		return NULL;
1213 
1214 	return &vf->p_vf_info;
1215 }
1216 
1217 static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
1218 {
1219 	struct qed_public_vf_info *vf_info;
1220 
1221 	vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1222 
1223 	if (!vf_info)
1224 		return;
1225 
1226 	/* Clear the VF mac */
1227 	memset(vf_info->mac, 0, ETH_ALEN);
1228 
1229 	vf_info->rx_accept_mode = 0;
1230 	vf_info->tx_accept_mode = 0;
1231 }
1232 
1233 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1234 			       struct qed_vf_info *p_vf)
1235 {
1236 	u32 i;
1237 
1238 	p_vf->vf_bulletin = 0;
1239 	p_vf->vport_instance = 0;
1240 	p_vf->configured_features = 0;
1241 
1242 	/* If VF previously requested less resources, go back to default */
1243 	p_vf->num_rxqs = p_vf->num_sbs;
1244 	p_vf->num_txqs = p_vf->num_sbs;
1245 
1246 	p_vf->num_active_rxqs = 0;
1247 
1248 	for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1249 		struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i];
1250 
1251 		if (p_queue->p_rx_cid) {
1252 			qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
1253 			p_queue->p_rx_cid = NULL;
1254 		}
1255 
1256 		if (p_queue->p_tx_cid) {
1257 			qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
1258 			p_queue->p_tx_cid = NULL;
1259 		}
1260 	}
1261 
1262 	memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1263 	memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1264 	qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1265 }
1266 
1267 static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1268 				      struct qed_ptt *p_ptt,
1269 				      struct qed_vf_info *p_vf,
1270 				      struct vf_pf_resc_request *p_req,
1271 				      struct pf_vf_resc *p_resp)
1272 {
1273 	int i;
1274 
1275 	/* Queue related information */
1276 	p_resp->num_rxqs = p_vf->num_rxqs;
1277 	p_resp->num_txqs = p_vf->num_txqs;
1278 	p_resp->num_sbs = p_vf->num_sbs;
1279 
1280 	for (i = 0; i < p_resp->num_sbs; i++) {
1281 		p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1282 		p_resp->hw_sbs[i].sb_qid = 0;
1283 	}
1284 
1285 	/* These fields are filled for backward compatibility.
1286 	 * Unused by modern vfs.
1287 	 */
1288 	for (i = 0; i < p_resp->num_rxqs; i++) {
1289 		qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1290 				(u16 *)&p_resp->hw_qid[i]);
1291 		p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
1292 	}
1293 
1294 	/* Filter related information */
1295 	p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1296 					p_req->num_mac_filters);
1297 	p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1298 					 p_req->num_vlan_filters);
1299 
1300 	/* This isn't really needed/enforced, but some legacy VFs might depend
1301 	 * on the correct filling of this field.
1302 	 */
1303 	p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1304 
1305 	/* Validate sufficient resources for VF */
1306 	if (p_resp->num_rxqs < p_req->num_rxqs ||
1307 	    p_resp->num_txqs < p_req->num_txqs ||
1308 	    p_resp->num_sbs < p_req->num_sbs ||
1309 	    p_resp->num_mac_filters < p_req->num_mac_filters ||
1310 	    p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1311 	    p_resp->num_mc_filters < p_req->num_mc_filters) {
1312 		DP_VERBOSE(p_hwfn,
1313 			   QED_MSG_IOV,
1314 			   "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
1315 			   p_vf->abs_vf_id,
1316 			   p_req->num_rxqs,
1317 			   p_resp->num_rxqs,
1318 			   p_req->num_rxqs,
1319 			   p_resp->num_txqs,
1320 			   p_req->num_sbs,
1321 			   p_resp->num_sbs,
1322 			   p_req->num_mac_filters,
1323 			   p_resp->num_mac_filters,
1324 			   p_req->num_vlan_filters,
1325 			   p_resp->num_vlan_filters,
1326 			   p_req->num_mc_filters, p_resp->num_mc_filters);
1327 
1328 		/* Some legacy OSes are incapable of correctly handling this
1329 		 * failure.
1330 		 */
1331 		if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1332 		     ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1333 		    (p_vf->acquire.vfdev_info.os_type ==
1334 		     VFPF_ACQUIRE_OS_WINDOWS))
1335 			return PFVF_STATUS_SUCCESS;
1336 
1337 		return PFVF_STATUS_NO_RESOURCE;
1338 	}
1339 
1340 	return PFVF_STATUS_SUCCESS;
1341 }
1342 
1343 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1344 					 struct pfvf_stats_info *p_stats)
1345 {
1346 	p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1347 				  offsetof(struct mstorm_vf_zone,
1348 					   non_trigger.eth_queue_stat);
1349 	p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1350 	p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1351 				  offsetof(struct ustorm_vf_zone,
1352 					   non_trigger.eth_queue_stat);
1353 	p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1354 	p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1355 				  offsetof(struct pstorm_vf_zone,
1356 					   non_trigger.eth_queue_stat);
1357 	p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1358 	p_stats->tstats.address = 0;
1359 	p_stats->tstats.len = 0;
1360 }
1361 
1362 static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1363 				   struct qed_ptt *p_ptt,
1364 				   struct qed_vf_info *vf)
1365 {
1366 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1367 	struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1368 	struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1369 	struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1370 	u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1371 	struct pf_vf_resc *resc = &resp->resc;
1372 	int rc;
1373 
1374 	memset(resp, 0, sizeof(*resp));
1375 
1376 	/* Write the PF version so that VF would know which version
1377 	 * is supported - might be later overriden. This guarantees that
1378 	 * VF could recognize legacy PF based on lack of versions in reply.
1379 	 */
1380 	pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1381 	pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1382 
1383 	if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
1384 		DP_VERBOSE(p_hwfn,
1385 			   QED_MSG_IOV,
1386 			   "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1387 			   vf->abs_vf_id, vf->state);
1388 		goto out;
1389 	}
1390 
1391 	/* Validate FW compatibility */
1392 	if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1393 		if (req->vfdev_info.capabilities &
1394 		    VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1395 			struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1396 
1397 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1398 				   "VF[%d] is pre-fastpath HSI\n",
1399 				   vf->abs_vf_id);
1400 			p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1401 			p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1402 		} else {
1403 			DP_INFO(p_hwfn,
1404 				"VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
1405 				vf->abs_vf_id,
1406 				req->vfdev_info.eth_fp_hsi_major,
1407 				req->vfdev_info.eth_fp_hsi_minor,
1408 				ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1409 
1410 			goto out;
1411 		}
1412 	}
1413 
1414 	/* On 100g PFs, prevent old VFs from loading */
1415 	if ((p_hwfn->cdev->num_hwfns > 1) &&
1416 	    !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1417 		DP_INFO(p_hwfn,
1418 			"VF[%d] is running an old driver that doesn't support 100g\n",
1419 			vf->abs_vf_id);
1420 		goto out;
1421 	}
1422 
1423 	/* Store the acquire message */
1424 	memcpy(&vf->acquire, req, sizeof(vf->acquire));
1425 
1426 	vf->opaque_fid = req->vfdev_info.opaque_fid;
1427 
1428 	vf->vf_bulletin = req->bulletin_addr;
1429 	vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1430 			    vf->bulletin.size : req->bulletin_size;
1431 
1432 	/* fill in pfdev info */
1433 	pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1434 	pfdev_info->db_size = 0;
1435 	pfdev_info->indices_per_sb = PIS_PER_SB;
1436 
1437 	pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1438 				   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1439 	if (p_hwfn->cdev->num_hwfns > 1)
1440 		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1441 
1442 	qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1443 
1444 	memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1445 
1446 	pfdev_info->fw_major = FW_MAJOR_VERSION;
1447 	pfdev_info->fw_minor = FW_MINOR_VERSION;
1448 	pfdev_info->fw_rev = FW_REVISION_VERSION;
1449 	pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1450 
1451 	/* Incorrect when legacy, but doesn't matter as legacy isn't reading
1452 	 * this field.
1453 	 */
1454 	pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
1455 					 req->vfdev_info.eth_fp_hsi_minor);
1456 	pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1457 	qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1458 
1459 	pfdev_info->dev_type = p_hwfn->cdev->type;
1460 	pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1461 
1462 	/* Fill resources available to VF; Make sure there are enough to
1463 	 * satisfy the VF's request.
1464 	 */
1465 	vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1466 						  &req->resc_request, resc);
1467 	if (vfpf_status != PFVF_STATUS_SUCCESS)
1468 		goto out;
1469 
1470 	/* Start the VF in FW */
1471 	rc = qed_sp_vf_start(p_hwfn, vf);
1472 	if (rc) {
1473 		DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1474 		vfpf_status = PFVF_STATUS_FAILURE;
1475 		goto out;
1476 	}
1477 
1478 	/* Fill agreed size of bulletin board in response */
1479 	resp->bulletin_size = vf->bulletin.size;
1480 	qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1481 
1482 	DP_VERBOSE(p_hwfn,
1483 		   QED_MSG_IOV,
1484 		   "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1485 		   "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1486 		   vf->abs_vf_id,
1487 		   resp->pfdev_info.chip_num,
1488 		   resp->pfdev_info.db_size,
1489 		   resp->pfdev_info.indices_per_sb,
1490 		   resp->pfdev_info.capabilities,
1491 		   resc->num_rxqs,
1492 		   resc->num_txqs,
1493 		   resc->num_sbs,
1494 		   resc->num_mac_filters,
1495 		   resc->num_vlan_filters);
1496 	vf->state = VF_ACQUIRED;
1497 
1498 	/* Prepare Response */
1499 out:
1500 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1501 			     sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
1502 }
1503 
1504 static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
1505 				  struct qed_vf_info *p_vf, bool val)
1506 {
1507 	struct qed_sp_vport_update_params params;
1508 	int rc;
1509 
1510 	if (val == p_vf->spoof_chk) {
1511 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1512 			   "Spoofchk value[%d] is already configured\n", val);
1513 		return 0;
1514 	}
1515 
1516 	memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
1517 	params.opaque_fid = p_vf->opaque_fid;
1518 	params.vport_id = p_vf->vport_id;
1519 	params.update_anti_spoofing_en_flg = 1;
1520 	params.anti_spoofing_en = val;
1521 
1522 	rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
1523 	if (!rc) {
1524 		p_vf->spoof_chk = val;
1525 		p_vf->req_spoofchk_val = p_vf->spoof_chk;
1526 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1527 			   "Spoofchk val[%d] configured\n", val);
1528 	} else {
1529 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1530 			   "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1531 			   val, p_vf->relative_vf_id);
1532 	}
1533 
1534 	return rc;
1535 }
1536 
1537 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
1538 					    struct qed_vf_info *p_vf)
1539 {
1540 	struct qed_filter_ucast filter;
1541 	int rc = 0;
1542 	int i;
1543 
1544 	memset(&filter, 0, sizeof(filter));
1545 	filter.is_rx_filter = 1;
1546 	filter.is_tx_filter = 1;
1547 	filter.vport_to_add_to = p_vf->vport_id;
1548 	filter.opcode = QED_FILTER_ADD;
1549 
1550 	/* Reconfigure vlans */
1551 	for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1552 		if (!p_vf->shadow_config.vlans[i].used)
1553 			continue;
1554 
1555 		filter.type = QED_FILTER_VLAN;
1556 		filter.vlan = p_vf->shadow_config.vlans[i].vid;
1557 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1558 			   "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1559 			   filter.vlan, p_vf->relative_vf_id);
1560 		rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1561 					     &filter, QED_SPQ_MODE_CB, NULL);
1562 		if (rc) {
1563 			DP_NOTICE(p_hwfn,
1564 				  "Failed to configure VLAN [%04x] to VF [%04x]\n",
1565 				  filter.vlan, p_vf->relative_vf_id);
1566 			break;
1567 		}
1568 	}
1569 
1570 	return rc;
1571 }
1572 
1573 static int
1574 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
1575 				   struct qed_vf_info *p_vf, u64 events)
1576 {
1577 	int rc = 0;
1578 
1579 	if ((events & BIT(VLAN_ADDR_FORCED)) &&
1580 	    !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1581 		rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1582 
1583 	return rc;
1584 }
1585 
1586 static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
1587 					  struct qed_vf_info *p_vf, u64 events)
1588 {
1589 	int rc = 0;
1590 	struct qed_filter_ucast filter;
1591 
1592 	if (!p_vf->vport_instance)
1593 		return -EINVAL;
1594 
1595 	if (events & BIT(MAC_ADDR_FORCED)) {
1596 		/* Since there's no way [currently] of removing the MAC,
1597 		 * we can always assume this means we need to force it.
1598 		 */
1599 		memset(&filter, 0, sizeof(filter));
1600 		filter.type = QED_FILTER_MAC;
1601 		filter.opcode = QED_FILTER_REPLACE;
1602 		filter.is_rx_filter = 1;
1603 		filter.is_tx_filter = 1;
1604 		filter.vport_to_add_to = p_vf->vport_id;
1605 		ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1606 
1607 		rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1608 					     &filter, QED_SPQ_MODE_CB, NULL);
1609 		if (rc) {
1610 			DP_NOTICE(p_hwfn,
1611 				  "PF failed to configure MAC for VF\n");
1612 			return rc;
1613 		}
1614 
1615 		p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1616 	}
1617 
1618 	if (events & BIT(VLAN_ADDR_FORCED)) {
1619 		struct qed_sp_vport_update_params vport_update;
1620 		u8 removal;
1621 		int i;
1622 
1623 		memset(&filter, 0, sizeof(filter));
1624 		filter.type = QED_FILTER_VLAN;
1625 		filter.is_rx_filter = 1;
1626 		filter.is_tx_filter = 1;
1627 		filter.vport_to_add_to = p_vf->vport_id;
1628 		filter.vlan = p_vf->bulletin.p_virt->pvid;
1629 		filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
1630 					      QED_FILTER_FLUSH;
1631 
1632 		/* Send the ramrod */
1633 		rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1634 					     &filter, QED_SPQ_MODE_CB, NULL);
1635 		if (rc) {
1636 			DP_NOTICE(p_hwfn,
1637 				  "PF failed to configure VLAN for VF\n");
1638 			return rc;
1639 		}
1640 
1641 		/* Update the default-vlan & silent vlan stripping */
1642 		memset(&vport_update, 0, sizeof(vport_update));
1643 		vport_update.opaque_fid = p_vf->opaque_fid;
1644 		vport_update.vport_id = p_vf->vport_id;
1645 		vport_update.update_default_vlan_enable_flg = 1;
1646 		vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1647 		vport_update.update_default_vlan_flg = 1;
1648 		vport_update.default_vlan = filter.vlan;
1649 
1650 		vport_update.update_inner_vlan_removal_flg = 1;
1651 		removal = filter.vlan ? 1
1652 				      : p_vf->shadow_config.inner_vlan_removal;
1653 		vport_update.inner_vlan_removal_flg = removal;
1654 		vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1655 		rc = qed_sp_vport_update(p_hwfn,
1656 					 &vport_update,
1657 					 QED_SPQ_MODE_EBLOCK, NULL);
1658 		if (rc) {
1659 			DP_NOTICE(p_hwfn,
1660 				  "PF failed to configure VF vport for vlan\n");
1661 			return rc;
1662 		}
1663 
1664 		/* Update all the Rx queues */
1665 		for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1666 			struct qed_queue_cid *p_cid;
1667 
1668 			p_cid = p_vf->vf_queues[i].p_rx_cid;
1669 			if (!p_cid)
1670 				continue;
1671 
1672 			rc = qed_sp_eth_rx_queues_update(p_hwfn,
1673 							 (void **)&p_cid,
1674 							 1, 0, 1,
1675 							 QED_SPQ_MODE_EBLOCK,
1676 							 NULL);
1677 			if (rc) {
1678 				DP_NOTICE(p_hwfn,
1679 					  "Failed to send Rx update fo queue[0x%04x]\n",
1680 					  p_cid->rel.queue_id);
1681 				return rc;
1682 			}
1683 		}
1684 
1685 		if (filter.vlan)
1686 			p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1687 		else
1688 			p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
1689 	}
1690 
1691 	/* If forced features are terminated, we need to configure the shadow
1692 	 * configuration back again.
1693 	 */
1694 	if (events)
1695 		qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1696 
1697 	return rc;
1698 }
1699 
1700 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1701 				       struct qed_ptt *p_ptt,
1702 				       struct qed_vf_info *vf)
1703 {
1704 	struct qed_sp_vport_start_params params = { 0 };
1705 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1706 	struct vfpf_vport_start_tlv *start;
1707 	u8 status = PFVF_STATUS_SUCCESS;
1708 	struct qed_vf_info *vf_info;
1709 	u64 *p_bitmap;
1710 	int sb_id;
1711 	int rc;
1712 
1713 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1714 	if (!vf_info) {
1715 		DP_NOTICE(p_hwfn->cdev,
1716 			  "Failed to get VF info, invalid vfid [%d]\n",
1717 			  vf->relative_vf_id);
1718 		return;
1719 	}
1720 
1721 	vf->state = VF_ENABLED;
1722 	start = &mbx->req_virt->start_vport;
1723 
1724 	/* Initialize Status block in CAU */
1725 	for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1726 		if (!start->sb_addr[sb_id]) {
1727 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1728 				   "VF[%d] did not fill the address of SB %d\n",
1729 				   vf->relative_vf_id, sb_id);
1730 			break;
1731 		}
1732 
1733 		qed_int_cau_conf_sb(p_hwfn, p_ptt,
1734 				    start->sb_addr[sb_id],
1735 				    vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
1736 	}
1737 	qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1738 
1739 	vf->mtu = start->mtu;
1740 	vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1741 
1742 	/* Take into consideration configuration forced by hypervisor;
1743 	 * If none is configured, use the supplied VF values [for old
1744 	 * vfs that would still be fine, since they passed '0' as padding].
1745 	 */
1746 	p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1747 	if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1748 		u8 vf_req = start->only_untagged;
1749 
1750 		vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1751 		*p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1752 	}
1753 
1754 	params.tpa_mode = start->tpa_mode;
1755 	params.remove_inner_vlan = start->inner_vlan_removal;
1756 	params.tx_switching = true;
1757 
1758 	params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1759 	params.drop_ttl0 = false;
1760 	params.concrete_fid = vf->concrete_fid;
1761 	params.opaque_fid = vf->opaque_fid;
1762 	params.vport_id = vf->vport_id;
1763 	params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1764 	params.mtu = vf->mtu;
1765 	params.check_mac = true;
1766 
1767 	rc = qed_sp_eth_vport_start(p_hwfn, &params);
1768 	if (rc) {
1769 		DP_ERR(p_hwfn,
1770 		       "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1771 		status = PFVF_STATUS_FAILURE;
1772 	} else {
1773 		vf->vport_instance++;
1774 
1775 		/* Force configuration if needed on the newly opened vport */
1776 		qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
1777 
1778 		__qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1779 	}
1780 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1781 			     sizeof(struct pfvf_def_resp_tlv), status);
1782 }
1783 
1784 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1785 				      struct qed_ptt *p_ptt,
1786 				      struct qed_vf_info *vf)
1787 {
1788 	u8 status = PFVF_STATUS_SUCCESS;
1789 	int rc;
1790 
1791 	vf->vport_instance--;
1792 	vf->spoof_chk = false;
1793 
1794 	rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
1795 	if (rc) {
1796 		DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1797 		       rc);
1798 		status = PFVF_STATUS_FAILURE;
1799 	}
1800 
1801 	/* Forget the configuration on the vport */
1802 	vf->configured_features = 0;
1803 	memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
1804 
1805 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
1806 			     sizeof(struct pfvf_def_resp_tlv), status);
1807 }
1808 
1809 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
1810 					  struct qed_ptt *p_ptt,
1811 					  struct qed_vf_info *vf,
1812 					  u8 status, bool b_legacy)
1813 {
1814 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1815 	struct pfvf_start_queue_resp_tlv *p_tlv;
1816 	struct vfpf_start_rxq_tlv *req;
1817 	u16 length;
1818 
1819 	mbx->offset = (u8 *)mbx->reply_virt;
1820 
1821 	/* Taking a bigger struct instead of adding a TLV to list was a
1822 	 * mistake, but one which we're now stuck with, as some older
1823 	 * clients assume the size of the previous response.
1824 	 */
1825 	if (!b_legacy)
1826 		length = sizeof(*p_tlv);
1827 	else
1828 		length = sizeof(struct pfvf_def_resp_tlv);
1829 
1830 	p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
1831 			    length);
1832 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1833 		    sizeof(struct channel_list_end_tlv));
1834 
1835 	/* Update the TLV with the response */
1836 	if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
1837 		req = &mbx->req_virt->start_rxq;
1838 		p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
1839 				offsetof(struct mstorm_vf_zone,
1840 					 non_trigger.eth_rx_queue_producers) +
1841 				sizeof(struct eth_rx_prod_data) * req->rx_qid;
1842 	}
1843 
1844 	qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
1845 }
1846 
1847 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
1848 				     struct qed_ptt *p_ptt,
1849 				     struct qed_vf_info *vf)
1850 {
1851 	struct qed_queue_start_common_params params;
1852 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1853 	u8 status = PFVF_STATUS_NO_RESOURCE;
1854 	struct qed_vf_q_info *p_queue;
1855 	struct vfpf_start_rxq_tlv *req;
1856 	bool b_legacy_vf = false;
1857 	int rc;
1858 
1859 	req = &mbx->req_virt->start_rxq;
1860 
1861 	if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
1862 	    !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
1863 		goto out;
1864 
1865 	/* Acquire a new queue-cid */
1866 	p_queue = &vf->vf_queues[req->rx_qid];
1867 
1868 	memset(&params, 0, sizeof(params));
1869 	params.queue_id = p_queue->fw_rx_qid;
1870 	params.vport_id = vf->vport_id;
1871 	params.stats_id = vf->abs_vf_id + 0x10;
1872 	params.sb = req->hw_sb;
1873 	params.sb_idx = req->sb_index;
1874 
1875 	p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn,
1876 						  vf->opaque_fid,
1877 						  p_queue->fw_cid,
1878 						  req->rx_qid, &params);
1879 	if (!p_queue->p_rx_cid)
1880 		goto out;
1881 
1882 	/* Legacy VFs have their Producers in a different location, which they
1883 	 * calculate on their own and clean the producer prior to this.
1884 	 */
1885 	if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1886 	    ETH_HSI_VER_NO_PKT_LEN_TUNN) {
1887 		b_legacy_vf = true;
1888 	} else {
1889 		REG_WR(p_hwfn,
1890 		       GTT_BAR0_MAP_REG_MSDM_RAM +
1891 		       MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
1892 		       0);
1893 	}
1894 	p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
1895 
1896 	rc = qed_eth_rxq_start_ramrod(p_hwfn,
1897 				      p_queue->p_rx_cid,
1898 				      req->bd_max_bytes,
1899 				      req->rxq_addr,
1900 				      req->cqe_pbl_addr, req->cqe_pbl_size);
1901 	if (rc) {
1902 		status = PFVF_STATUS_FAILURE;
1903 		qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
1904 		p_queue->p_rx_cid = NULL;
1905 	} else {
1906 		status = PFVF_STATUS_SUCCESS;
1907 		vf->num_active_rxqs++;
1908 	}
1909 
1910 out:
1911 	qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
1912 }
1913 
1914 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
1915 					  struct qed_ptt *p_ptt,
1916 					  struct qed_vf_info *p_vf, u8 status)
1917 {
1918 	struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1919 	struct pfvf_start_queue_resp_tlv *p_tlv;
1920 	bool b_legacy = false;
1921 	u16 length;
1922 
1923 	mbx->offset = (u8 *)mbx->reply_virt;
1924 
1925 	/* Taking a bigger struct instead of adding a TLV to list was a
1926 	 * mistake, but one which we're now stuck with, as some older
1927 	 * clients assume the size of the previous response.
1928 	 */
1929 	if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1930 	    ETH_HSI_VER_NO_PKT_LEN_TUNN)
1931 		b_legacy = true;
1932 
1933 	if (!b_legacy)
1934 		length = sizeof(*p_tlv);
1935 	else
1936 		length = sizeof(struct pfvf_def_resp_tlv);
1937 
1938 	p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
1939 			    length);
1940 	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1941 		    sizeof(struct channel_list_end_tlv));
1942 
1943 	/* Update the TLV with the response */
1944 	if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
1945 		u16 qid = mbx->req_virt->start_txq.tx_qid;
1946 
1947 		p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid,
1948 					       DQ_DEMS_LEGACY);
1949 	}
1950 
1951 	qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
1952 }
1953 
1954 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
1955 				     struct qed_ptt *p_ptt,
1956 				     struct qed_vf_info *vf)
1957 {
1958 	struct qed_queue_start_common_params params;
1959 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1960 	u8 status = PFVF_STATUS_NO_RESOURCE;
1961 	union qed_qm_pq_params pq_params;
1962 	struct vfpf_start_txq_tlv *req;
1963 	struct qed_vf_q_info *p_queue;
1964 	int rc;
1965 	u16 pq;
1966 
1967 	/* Prepare the parameters which would choose the right PQ */
1968 	memset(&pq_params, 0, sizeof(pq_params));
1969 	pq_params.eth.is_vf = 1;
1970 	pq_params.eth.vf_id = vf->relative_vf_id;
1971 
1972 	memset(&params, 0, sizeof(params));
1973 	req = &mbx->req_virt->start_txq;
1974 
1975 	if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
1976 	    !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
1977 		goto out;
1978 
1979 	/* Acquire a new queue-cid */
1980 	p_queue = &vf->vf_queues[req->tx_qid];
1981 
1982 	params.queue_id = p_queue->fw_tx_qid;
1983 	params.vport_id = vf->vport_id;
1984 	params.stats_id = vf->abs_vf_id + 0x10;
1985 	params.sb = req->hw_sb;
1986 	params.sb_idx = req->sb_index;
1987 
1988 	p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn,
1989 						  vf->opaque_fid,
1990 						  p_queue->fw_cid,
1991 						  req->tx_qid, &params);
1992 	if (!p_queue->p_tx_cid)
1993 		goto out;
1994 
1995 	pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, &pq_params);
1996 	rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
1997 				      req->pbl_addr, req->pbl_size, pq);
1998 	if (rc) {
1999 		status = PFVF_STATUS_FAILURE;
2000 		qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
2001 		p_queue->p_tx_cid = NULL;
2002 	} else {
2003 		status = PFVF_STATUS_SUCCESS;
2004 	}
2005 
2006 out:
2007 	qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
2008 }
2009 
2010 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
2011 				struct qed_vf_info *vf,
2012 				u16 rxq_id, u8 num_rxqs, bool cqe_completion)
2013 {
2014 	struct qed_vf_q_info *p_queue;
2015 	int rc = 0;
2016 	int qid;
2017 
2018 	if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
2019 		return -EINVAL;
2020 
2021 	for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
2022 		p_queue = &vf->vf_queues[qid];
2023 
2024 		if (!p_queue->p_rx_cid)
2025 			continue;
2026 
2027 		rc = qed_eth_rx_queue_stop(p_hwfn,
2028 					   p_queue->p_rx_cid,
2029 					   false, cqe_completion);
2030 		if (rc)
2031 			return rc;
2032 
2033 		vf->vf_queues[qid].p_rx_cid = NULL;
2034 		vf->num_active_rxqs--;
2035 	}
2036 
2037 	return rc;
2038 }
2039 
2040 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
2041 				struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
2042 {
2043 	int rc = 0;
2044 	struct qed_vf_q_info *p_queue;
2045 	int qid;
2046 
2047 	if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
2048 		return -EINVAL;
2049 
2050 	for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
2051 		p_queue = &vf->vf_queues[qid];
2052 		if (!p_queue->p_tx_cid)
2053 			continue;
2054 
2055 		rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
2056 		if (rc)
2057 			return rc;
2058 
2059 		p_queue->p_tx_cid = NULL;
2060 	}
2061 
2062 	return rc;
2063 }
2064 
2065 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
2066 				     struct qed_ptt *p_ptt,
2067 				     struct qed_vf_info *vf)
2068 {
2069 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2070 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2071 	u8 status = PFVF_STATUS_SUCCESS;
2072 	struct vfpf_stop_rxqs_tlv *req;
2073 	int rc;
2074 
2075 	/* We give the option of starting from qid != 0, in this case we
2076 	 * need to make sure that qid + num_qs doesn't exceed the actual
2077 	 * amount of queues that exist.
2078 	 */
2079 	req = &mbx->req_virt->stop_rxqs;
2080 	rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2081 				  req->num_rxqs, req->cqe_completion);
2082 	if (rc)
2083 		status = PFVF_STATUS_FAILURE;
2084 
2085 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2086 			     length, status);
2087 }
2088 
2089 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
2090 				     struct qed_ptt *p_ptt,
2091 				     struct qed_vf_info *vf)
2092 {
2093 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2094 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2095 	u8 status = PFVF_STATUS_SUCCESS;
2096 	struct vfpf_stop_txqs_tlv *req;
2097 	int rc;
2098 
2099 	/* We give the option of starting from qid != 0, in this case we
2100 	 * need to make sure that qid + num_qs doesn't exceed the actual
2101 	 * amount of queues that exist.
2102 	 */
2103 	req = &mbx->req_virt->stop_txqs;
2104 	rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
2105 	if (rc)
2106 		status = PFVF_STATUS_FAILURE;
2107 
2108 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2109 			     length, status);
2110 }
2111 
2112 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
2113 				       struct qed_ptt *p_ptt,
2114 				       struct qed_vf_info *vf)
2115 {
2116 	struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
2117 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2118 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2119 	struct vfpf_update_rxq_tlv *req;
2120 	u8 status = PFVF_STATUS_FAILURE;
2121 	u8 complete_event_flg;
2122 	u8 complete_cqe_flg;
2123 	u16 qid;
2124 	int rc;
2125 	u8 i;
2126 
2127 	req = &mbx->req_virt->update_rxq;
2128 	complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2129 	complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2130 
2131 	/* Validate inputs */
2132 	if (req->num_rxqs + req->rx_qid > QED_MAX_VF_CHAINS_PER_PF ||
2133 	    !qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
2134 		DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2135 			vf->relative_vf_id, req->rx_qid, req->num_rxqs);
2136 		goto out;
2137 	}
2138 
2139 	for (i = 0; i < req->num_rxqs; i++) {
2140 		qid = req->rx_qid + i;
2141 		if (!vf->vf_queues[qid].p_rx_cid) {
2142 			DP_INFO(p_hwfn,
2143 				"VF[%d] rx_qid = %d isn`t active!\n",
2144 				vf->relative_vf_id, qid);
2145 			goto out;
2146 		}
2147 
2148 		handlers[i] = vf->vf_queues[qid].p_rx_cid;
2149 	}
2150 
2151 	rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2152 					 req->num_rxqs,
2153 					 complete_cqe_flg,
2154 					 complete_event_flg,
2155 					 QED_SPQ_MODE_EBLOCK, NULL);
2156 	if (rc)
2157 		goto out;
2158 
2159 	status = PFVF_STATUS_SUCCESS;
2160 out:
2161 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2162 			     length, status);
2163 }
2164 
2165 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
2166 			       void *p_tlvs_list, u16 req_type)
2167 {
2168 	struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2169 	int len = 0;
2170 
2171 	do {
2172 		if (!p_tlv->length) {
2173 			DP_NOTICE(p_hwfn, "Zero length TLV found\n");
2174 			return NULL;
2175 		}
2176 
2177 		if (p_tlv->type == req_type) {
2178 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2179 				   "Extended tlv type %d, length %d found\n",
2180 				   p_tlv->type, p_tlv->length);
2181 			return p_tlv;
2182 		}
2183 
2184 		len += p_tlv->length;
2185 		p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2186 
2187 		if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2188 			DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2189 			return NULL;
2190 		}
2191 	} while (p_tlv->type != CHANNEL_TLV_LIST_END);
2192 
2193 	return NULL;
2194 }
2195 
2196 static void
2197 qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2198 			    struct qed_sp_vport_update_params *p_data,
2199 			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2200 {
2201 	struct vfpf_vport_update_activate_tlv *p_act_tlv;
2202 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2203 
2204 	p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2205 		    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2206 	if (!p_act_tlv)
2207 		return;
2208 
2209 	p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2210 	p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2211 	p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2212 	p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2213 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2214 }
2215 
2216 static void
2217 qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
2218 			     struct qed_sp_vport_update_params *p_data,
2219 			     struct qed_vf_info *p_vf,
2220 			     struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2221 {
2222 	struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2223 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2224 
2225 	p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2226 		     qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2227 	if (!p_vlan_tlv)
2228 		return;
2229 
2230 	p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2231 
2232 	/* Ignore the VF request if we're forcing a vlan */
2233 	if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
2234 		p_data->update_inner_vlan_removal_flg = 1;
2235 		p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2236 	}
2237 
2238 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
2239 }
2240 
2241 static void
2242 qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
2243 			    struct qed_sp_vport_update_params *p_data,
2244 			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2245 {
2246 	struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2247 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2248 
2249 	p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2250 			  qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2251 						   tlv);
2252 	if (!p_tx_switch_tlv)
2253 		return;
2254 
2255 	p_data->update_tx_switching_flg = 1;
2256 	p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2257 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
2258 }
2259 
2260 static void
2261 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2262 				  struct qed_sp_vport_update_params *p_data,
2263 				  struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2264 {
2265 	struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2266 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2267 
2268 	p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2269 	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2270 	if (!p_mcast_tlv)
2271 		return;
2272 
2273 	p_data->update_approx_mcast_flg = 1;
2274 	memcpy(p_data->bins, p_mcast_tlv->bins,
2275 	       sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2276 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2277 }
2278 
2279 static void
2280 qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2281 			      struct qed_sp_vport_update_params *p_data,
2282 			      struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2283 {
2284 	struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2285 	struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2286 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2287 
2288 	p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2289 	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2290 	if (!p_accept_tlv)
2291 		return;
2292 
2293 	p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2294 	p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2295 	p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2296 	p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2297 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2298 }
2299 
2300 static void
2301 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
2302 				  struct qed_sp_vport_update_params *p_data,
2303 				  struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2304 {
2305 	struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2306 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2307 
2308 	p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2309 			    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2310 						     tlv);
2311 	if (!p_accept_any_vlan)
2312 		return;
2313 
2314 	p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2315 	p_data->update_accept_any_vlan_flg =
2316 		    p_accept_any_vlan->update_accept_any_vlan_flg;
2317 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2318 }
2319 
2320 static void
2321 qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2322 			    struct qed_vf_info *vf,
2323 			    struct qed_sp_vport_update_params *p_data,
2324 			    struct qed_rss_params *p_rss,
2325 			    struct qed_iov_vf_mbx *p_mbx,
2326 			    u16 *tlvs_mask, u16 *tlvs_accepted)
2327 {
2328 	struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2329 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2330 	bool b_reject = false;
2331 	u16 table_size;
2332 	u16 i, q_idx;
2333 
2334 	p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2335 		    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2336 	if (!p_rss_tlv) {
2337 		p_data->rss_params = NULL;
2338 		return;
2339 	}
2340 
2341 	memset(p_rss, 0, sizeof(struct qed_rss_params));
2342 
2343 	p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2344 				      VFPF_UPDATE_RSS_CONFIG_FLAG);
2345 	p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2346 					    VFPF_UPDATE_RSS_CAPS_FLAG);
2347 	p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2348 					 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2349 	p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2350 				   VFPF_UPDATE_RSS_KEY_FLAG);
2351 
2352 	p_rss->rss_enable = p_rss_tlv->rss_enable;
2353 	p_rss->rss_eng_id = vf->relative_vf_id + 1;
2354 	p_rss->rss_caps = p_rss_tlv->rss_caps;
2355 	p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2356 	memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2357 
2358 	table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2359 			   (1 << p_rss_tlv->rss_table_size_log));
2360 
2361 	for (i = 0; i < table_size; i++) {
2362 		q_idx = p_rss_tlv->rss_ind_table[i];
2363 		if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx)) {
2364 			DP_VERBOSE(p_hwfn,
2365 				   QED_MSG_IOV,
2366 				   "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2367 				   vf->relative_vf_id, q_idx);
2368 			b_reject = true;
2369 			goto out;
2370 		}
2371 
2372 		if (!vf->vf_queues[q_idx].p_rx_cid) {
2373 			DP_VERBOSE(p_hwfn,
2374 				   QED_MSG_IOV,
2375 				   "VF[%d]: Omitting RSS due to inactive queue %08x\n",
2376 				   vf->relative_vf_id, q_idx);
2377 			b_reject = true;
2378 			goto out;
2379 		}
2380 
2381 		p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
2382 	}
2383 
2384 	p_data->rss_params = p_rss;
2385 out:
2386 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
2387 	if (!b_reject)
2388 		*tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
2389 }
2390 
2391 static void
2392 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
2393 				struct qed_vf_info *vf,
2394 				struct qed_sp_vport_update_params *p_data,
2395 				struct qed_sge_tpa_params *p_sge_tpa,
2396 				struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2397 {
2398 	struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2399 	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2400 
2401 	p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2402 	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2403 
2404 	if (!p_sge_tpa_tlv) {
2405 		p_data->sge_tpa_params = NULL;
2406 		return;
2407 	}
2408 
2409 	memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
2410 
2411 	p_sge_tpa->update_tpa_en_flg =
2412 	    !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2413 	p_sge_tpa->update_tpa_param_flg =
2414 	    !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2415 		VFPF_UPDATE_TPA_PARAM_FLAG);
2416 
2417 	p_sge_tpa->tpa_ipv4_en_flg =
2418 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2419 	p_sge_tpa->tpa_ipv6_en_flg =
2420 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2421 	p_sge_tpa->tpa_pkt_split_flg =
2422 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2423 	p_sge_tpa->tpa_hdr_data_split_flg =
2424 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2425 	p_sge_tpa->tpa_gro_consistent_flg =
2426 	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2427 
2428 	p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2429 	p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2430 	p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2431 	p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2432 	p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2433 
2434 	p_data->sge_tpa_params = p_sge_tpa;
2435 
2436 	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
2437 }
2438 
2439 static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
2440 				    u8 vfid,
2441 				    struct qed_sp_vport_update_params *params,
2442 				    u16 *tlvs)
2443 {
2444 	u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
2445 	struct qed_filter_accept_flags *flags = &params->accept_flags;
2446 	struct qed_public_vf_info *vf_info;
2447 
2448 	/* Untrusted VFs can't even be trusted to know that fact.
2449 	 * Simply indicate everything is configured fine, and trace
2450 	 * configuration 'behind their back'.
2451 	 */
2452 	if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
2453 		return 0;
2454 
2455 	vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
2456 
2457 	if (flags->update_rx_mode_config) {
2458 		vf_info->rx_accept_mode = flags->rx_accept_filter;
2459 		if (!vf_info->is_trusted_configured)
2460 			flags->rx_accept_filter &= ~mask;
2461 	}
2462 
2463 	if (flags->update_tx_mode_config) {
2464 		vf_info->tx_accept_mode = flags->tx_accept_filter;
2465 		if (!vf_info->is_trusted_configured)
2466 			flags->tx_accept_filter &= ~mask;
2467 	}
2468 
2469 	return 0;
2470 }
2471 
2472 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
2473 					struct qed_ptt *p_ptt,
2474 					struct qed_vf_info *vf)
2475 {
2476 	struct qed_rss_params *p_rss_params = NULL;
2477 	struct qed_sp_vport_update_params params;
2478 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2479 	struct qed_sge_tpa_params sge_tpa_params;
2480 	u16 tlvs_mask = 0, tlvs_accepted = 0;
2481 	u8 status = PFVF_STATUS_SUCCESS;
2482 	u16 length;
2483 	int rc;
2484 
2485 	/* Valiate PF can send such a request */
2486 	if (!vf->vport_instance) {
2487 		DP_VERBOSE(p_hwfn,
2488 			   QED_MSG_IOV,
2489 			   "No VPORT instance available for VF[%d], failing vport update\n",
2490 			   vf->abs_vf_id);
2491 		status = PFVF_STATUS_FAILURE;
2492 		goto out;
2493 	}
2494 	p_rss_params = vzalloc(sizeof(*p_rss_params));
2495 	if (p_rss_params == NULL) {
2496 		status = PFVF_STATUS_FAILURE;
2497 		goto out;
2498 	}
2499 
2500 	memset(&params, 0, sizeof(params));
2501 	params.opaque_fid = vf->opaque_fid;
2502 	params.vport_id = vf->vport_id;
2503 	params.rss_params = NULL;
2504 
2505 	/* Search for extended tlvs list and update values
2506 	 * from VF in struct qed_sp_vport_update_params.
2507 	 */
2508 	qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
2509 	qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
2510 	qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
2511 	qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
2512 	qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
2513 	qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
2514 	qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
2515 					&sge_tpa_params, mbx, &tlvs_mask);
2516 
2517 	tlvs_accepted = tlvs_mask;
2518 
2519 	/* Some of the extended TLVs need to be validated first; In that case,
2520 	 * they can update the mask without updating the accepted [so that
2521 	 * PF could communicate to VF it has rejected request].
2522 	 */
2523 	qed_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
2524 				    mbx, &tlvs_mask, &tlvs_accepted);
2525 
2526 	if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
2527 				     &params, &tlvs_accepted)) {
2528 		tlvs_accepted = 0;
2529 		status = PFVF_STATUS_NOT_SUPPORTED;
2530 		goto out;
2531 	}
2532 
2533 	if (!tlvs_accepted) {
2534 		if (tlvs_mask)
2535 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2536 				   "Upper-layer prevents VF vport configuration\n");
2537 		else
2538 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2539 				   "No feature tlvs found for vport update\n");
2540 		status = PFVF_STATUS_NOT_SUPPORTED;
2541 		goto out;
2542 	}
2543 
2544 	rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
2545 
2546 	if (rc)
2547 		status = PFVF_STATUS_FAILURE;
2548 
2549 out:
2550 	vfree(p_rss_params);
2551 	length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
2552 						  tlvs_mask, tlvs_accepted);
2553 	qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2554 }
2555 
2556 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
2557 					 struct qed_vf_info *p_vf,
2558 					 struct qed_filter_ucast *p_params)
2559 {
2560 	int i;
2561 
2562 	/* First remove entries and then add new ones */
2563 	if (p_params->opcode == QED_FILTER_REMOVE) {
2564 		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2565 			if (p_vf->shadow_config.vlans[i].used &&
2566 			    p_vf->shadow_config.vlans[i].vid ==
2567 			    p_params->vlan) {
2568 				p_vf->shadow_config.vlans[i].used = false;
2569 				break;
2570 			}
2571 		if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
2572 			DP_VERBOSE(p_hwfn,
2573 				   QED_MSG_IOV,
2574 				   "VF [%d] - Tries to remove a non-existing vlan\n",
2575 				   p_vf->relative_vf_id);
2576 			return -EINVAL;
2577 		}
2578 	} else if (p_params->opcode == QED_FILTER_REPLACE ||
2579 		   p_params->opcode == QED_FILTER_FLUSH) {
2580 		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2581 			p_vf->shadow_config.vlans[i].used = false;
2582 	}
2583 
2584 	/* In forced mode, we're willing to remove entries - but we don't add
2585 	 * new ones.
2586 	 */
2587 	if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
2588 		return 0;
2589 
2590 	if (p_params->opcode == QED_FILTER_ADD ||
2591 	    p_params->opcode == QED_FILTER_REPLACE) {
2592 		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
2593 			if (p_vf->shadow_config.vlans[i].used)
2594 				continue;
2595 
2596 			p_vf->shadow_config.vlans[i].used = true;
2597 			p_vf->shadow_config.vlans[i].vid = p_params->vlan;
2598 			break;
2599 		}
2600 
2601 		if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
2602 			DP_VERBOSE(p_hwfn,
2603 				   QED_MSG_IOV,
2604 				   "VF [%d] - Tries to configure more than %d vlan filters\n",
2605 				   p_vf->relative_vf_id,
2606 				   QED_ETH_VF_NUM_VLAN_FILTERS + 1);
2607 			return -EINVAL;
2608 		}
2609 	}
2610 
2611 	return 0;
2612 }
2613 
2614 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
2615 					struct qed_vf_info *p_vf,
2616 					struct qed_filter_ucast *p_params)
2617 {
2618 	int i;
2619 
2620 	/* If we're in forced-mode, we don't allow any change */
2621 	if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
2622 		return 0;
2623 
2624 	/* First remove entries and then add new ones */
2625 	if (p_params->opcode == QED_FILTER_REMOVE) {
2626 		for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
2627 			if (ether_addr_equal(p_vf->shadow_config.macs[i],
2628 					     p_params->mac)) {
2629 				memset(p_vf->shadow_config.macs[i], 0,
2630 				       ETH_ALEN);
2631 				break;
2632 			}
2633 		}
2634 
2635 		if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
2636 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2637 				   "MAC isn't configured\n");
2638 			return -EINVAL;
2639 		}
2640 	} else if (p_params->opcode == QED_FILTER_REPLACE ||
2641 		   p_params->opcode == QED_FILTER_FLUSH) {
2642 		for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
2643 			memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
2644 	}
2645 
2646 	/* List the new MAC address */
2647 	if (p_params->opcode != QED_FILTER_ADD &&
2648 	    p_params->opcode != QED_FILTER_REPLACE)
2649 		return 0;
2650 
2651 	for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
2652 		if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
2653 			ether_addr_copy(p_vf->shadow_config.macs[i],
2654 					p_params->mac);
2655 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2656 				   "Added MAC at %d entry in shadow\n", i);
2657 			break;
2658 		}
2659 	}
2660 
2661 	if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
2662 		DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
2663 		return -EINVAL;
2664 	}
2665 
2666 	return 0;
2667 }
2668 
2669 static int
2670 qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
2671 				 struct qed_vf_info *p_vf,
2672 				 struct qed_filter_ucast *p_params)
2673 {
2674 	int rc = 0;
2675 
2676 	if (p_params->type == QED_FILTER_MAC) {
2677 		rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
2678 		if (rc)
2679 			return rc;
2680 	}
2681 
2682 	if (p_params->type == QED_FILTER_VLAN)
2683 		rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
2684 
2685 	return rc;
2686 }
2687 
2688 static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
2689 			     int vfid, struct qed_filter_ucast *params)
2690 {
2691 	struct qed_public_vf_info *vf;
2692 
2693 	vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
2694 	if (!vf)
2695 		return -EINVAL;
2696 
2697 	/* No real decision to make; Store the configured MAC */
2698 	if (params->type == QED_FILTER_MAC ||
2699 	    params->type == QED_FILTER_MAC_VLAN)
2700 		ether_addr_copy(vf->mac, params->mac);
2701 
2702 	return 0;
2703 }
2704 
2705 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
2706 					struct qed_ptt *p_ptt,
2707 					struct qed_vf_info *vf)
2708 {
2709 	struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
2710 	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2711 	struct vfpf_ucast_filter_tlv *req;
2712 	u8 status = PFVF_STATUS_SUCCESS;
2713 	struct qed_filter_ucast params;
2714 	int rc;
2715 
2716 	/* Prepare the unicast filter params */
2717 	memset(&params, 0, sizeof(struct qed_filter_ucast));
2718 	req = &mbx->req_virt->ucast_filter;
2719 	params.opcode = (enum qed_filter_opcode)req->opcode;
2720 	params.type = (enum qed_filter_ucast_type)req->type;
2721 
2722 	params.is_rx_filter = 1;
2723 	params.is_tx_filter = 1;
2724 	params.vport_to_remove_from = vf->vport_id;
2725 	params.vport_to_add_to = vf->vport_id;
2726 	memcpy(params.mac, req->mac, ETH_ALEN);
2727 	params.vlan = req->vlan;
2728 
2729 	DP_VERBOSE(p_hwfn,
2730 		   QED_MSG_IOV,
2731 		   "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
2732 		   vf->abs_vf_id, params.opcode, params.type,
2733 		   params.is_rx_filter ? "RX" : "",
2734 		   params.is_tx_filter ? "TX" : "",
2735 		   params.vport_to_add_to,
2736 		   params.mac[0], params.mac[1],
2737 		   params.mac[2], params.mac[3],
2738 		   params.mac[4], params.mac[5], params.vlan);
2739 
2740 	if (!vf->vport_instance) {
2741 		DP_VERBOSE(p_hwfn,
2742 			   QED_MSG_IOV,
2743 			   "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
2744 			   vf->abs_vf_id);
2745 		status = PFVF_STATUS_FAILURE;
2746 		goto out;
2747 	}
2748 
2749 	/* Update shadow copy of the VF configuration */
2750 	if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
2751 		status = PFVF_STATUS_FAILURE;
2752 		goto out;
2753 	}
2754 
2755 	/* Determine if the unicast filtering is acceptible by PF */
2756 	if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
2757 	    (params.type == QED_FILTER_VLAN ||
2758 	     params.type == QED_FILTER_MAC_VLAN)) {
2759 		/* Once VLAN is forced or PVID is set, do not allow
2760 		 * to add/replace any further VLANs.
2761 		 */
2762 		if (params.opcode == QED_FILTER_ADD ||
2763 		    params.opcode == QED_FILTER_REPLACE)
2764 			status = PFVF_STATUS_FORCED;
2765 		goto out;
2766 	}
2767 
2768 	if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
2769 	    (params.type == QED_FILTER_MAC ||
2770 	     params.type == QED_FILTER_MAC_VLAN)) {
2771 		if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
2772 		    (params.opcode != QED_FILTER_ADD &&
2773 		     params.opcode != QED_FILTER_REPLACE))
2774 			status = PFVF_STATUS_FORCED;
2775 		goto out;
2776 	}
2777 
2778 	rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
2779 	if (rc) {
2780 		status = PFVF_STATUS_FAILURE;
2781 		goto out;
2782 	}
2783 
2784 	rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
2785 				     QED_SPQ_MODE_CB, NULL);
2786 	if (rc)
2787 		status = PFVF_STATUS_FAILURE;
2788 
2789 out:
2790 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
2791 			     sizeof(struct pfvf_def_resp_tlv), status);
2792 }
2793 
2794 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
2795 				       struct qed_ptt *p_ptt,
2796 				       struct qed_vf_info *vf)
2797 {
2798 	int i;
2799 
2800 	/* Reset the SBs */
2801 	for (i = 0; i < vf->num_sbs; i++)
2802 		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2803 						vf->igu_sbs[i],
2804 						vf->opaque_fid, false);
2805 
2806 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
2807 			     sizeof(struct pfvf_def_resp_tlv),
2808 			     PFVF_STATUS_SUCCESS);
2809 }
2810 
2811 static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
2812 				 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
2813 {
2814 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2815 	u8 status = PFVF_STATUS_SUCCESS;
2816 
2817 	/* Disable Interrupts for VF */
2818 	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
2819 
2820 	/* Reset Permission table */
2821 	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
2822 
2823 	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
2824 			     length, status);
2825 }
2826 
2827 static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
2828 				   struct qed_ptt *p_ptt,
2829 				   struct qed_vf_info *p_vf)
2830 {
2831 	u16 length = sizeof(struct pfvf_def_resp_tlv);
2832 	u8 status = PFVF_STATUS_SUCCESS;
2833 	int rc = 0;
2834 
2835 	qed_iov_vf_cleanup(p_hwfn, p_vf);
2836 
2837 	if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
2838 		/* Stopping the VF */
2839 		rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
2840 				    p_vf->opaque_fid);
2841 
2842 		if (rc) {
2843 			DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
2844 			       rc);
2845 			status = PFVF_STATUS_FAILURE;
2846 		}
2847 
2848 		p_vf->state = VF_STOPPED;
2849 	}
2850 
2851 	qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
2852 			     length, status);
2853 }
2854 
2855 static int
2856 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
2857 			 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
2858 {
2859 	int cnt;
2860 	u32 val;
2861 
2862 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
2863 
2864 	for (cnt = 0; cnt < 50; cnt++) {
2865 		val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
2866 		if (!val)
2867 			break;
2868 		msleep(20);
2869 	}
2870 	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
2871 
2872 	if (cnt == 50) {
2873 		DP_ERR(p_hwfn,
2874 		       "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
2875 		       p_vf->abs_vf_id, val);
2876 		return -EBUSY;
2877 	}
2878 
2879 	return 0;
2880 }
2881 
2882 static int
2883 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
2884 			struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
2885 {
2886 	u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
2887 	int i, cnt;
2888 
2889 	/* Read initial consumers & producers */
2890 	for (i = 0; i < MAX_NUM_VOQS; i++) {
2891 		u32 prod;
2892 
2893 		cons[i] = qed_rd(p_hwfn, p_ptt,
2894 				 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
2895 				 i * 0x40);
2896 		prod = qed_rd(p_hwfn, p_ptt,
2897 			      PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
2898 			      i * 0x40);
2899 		distance[i] = prod - cons[i];
2900 	}
2901 
2902 	/* Wait for consumers to pass the producers */
2903 	i = 0;
2904 	for (cnt = 0; cnt < 50; cnt++) {
2905 		for (; i < MAX_NUM_VOQS; i++) {
2906 			u32 tmp;
2907 
2908 			tmp = qed_rd(p_hwfn, p_ptt,
2909 				     PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
2910 				     i * 0x40);
2911 			if (distance[i] > tmp - cons[i])
2912 				break;
2913 		}
2914 
2915 		if (i == MAX_NUM_VOQS)
2916 			break;
2917 
2918 		msleep(20);
2919 	}
2920 
2921 	if (cnt == 50) {
2922 		DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
2923 		       p_vf->abs_vf_id, i);
2924 		return -EBUSY;
2925 	}
2926 
2927 	return 0;
2928 }
2929 
2930 static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
2931 			       struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
2932 {
2933 	int rc;
2934 
2935 	rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
2936 	if (rc)
2937 		return rc;
2938 
2939 	rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
2940 	if (rc)
2941 		return rc;
2942 
2943 	return 0;
2944 }
2945 
2946 static int
2947 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
2948 			       struct qed_ptt *p_ptt,
2949 			       u16 rel_vf_id, u32 *ack_vfs)
2950 {
2951 	struct qed_vf_info *p_vf;
2952 	int rc = 0;
2953 
2954 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
2955 	if (!p_vf)
2956 		return 0;
2957 
2958 	if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
2959 	    (1ULL << (rel_vf_id % 64))) {
2960 		u16 vfid = p_vf->abs_vf_id;
2961 
2962 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2963 			   "VF[%d] - Handling FLR\n", vfid);
2964 
2965 		qed_iov_vf_cleanup(p_hwfn, p_vf);
2966 
2967 		/* If VF isn't active, no need for anything but SW */
2968 		if (!p_vf->b_init)
2969 			goto cleanup;
2970 
2971 		rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
2972 		if (rc)
2973 			goto cleanup;
2974 
2975 		rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
2976 		if (rc) {
2977 			DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
2978 			return rc;
2979 		}
2980 
2981 		/* Workaround to make VF-PF channel ready, as FW
2982 		 * doesn't do that as a part of FLR.
2983 		 */
2984 		REG_WR(p_hwfn,
2985 		       GTT_BAR0_MAP_REG_USDM_RAM +
2986 		       USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
2987 
2988 		/* VF_STOPPED has to be set only after final cleanup
2989 		 * but prior to re-enabling the VF.
2990 		 */
2991 		p_vf->state = VF_STOPPED;
2992 
2993 		rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
2994 		if (rc) {
2995 			DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
2996 			       vfid);
2997 			return rc;
2998 		}
2999 cleanup:
3000 		/* Mark VF for ack and clean pending state */
3001 		if (p_vf->state == VF_RESET)
3002 			p_vf->state = VF_STOPPED;
3003 		ack_vfs[vfid / 32] |= BIT((vfid % 32));
3004 		p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3005 		    ~(1ULL << (rel_vf_id % 64));
3006 		p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
3007 		    ~(1ULL << (rel_vf_id % 64));
3008 	}
3009 
3010 	return rc;
3011 }
3012 
3013 static int
3014 qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3015 {
3016 	u32 ack_vfs[VF_MAX_STATIC / 32];
3017 	int rc = 0;
3018 	u16 i;
3019 
3020 	memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3021 
3022 	/* Since BRB <-> PRS interface can't be tested as part of the flr
3023 	 * polling due to HW limitations, simply sleep a bit. And since
3024 	 * there's no need to wait per-vf, do it before looping.
3025 	 */
3026 	msleep(100);
3027 
3028 	for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
3029 		qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3030 
3031 	rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3032 	return rc;
3033 }
3034 
3035 int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
3036 {
3037 	u16 i, found = 0;
3038 
3039 	DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
3040 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3041 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3042 			   "[%08x,...,%08x]: %08x\n",
3043 			   i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3044 
3045 	if (!p_hwfn->cdev->p_iov_info) {
3046 		DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
3047 		return 0;
3048 	}
3049 
3050 	/* Mark VFs */
3051 	for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
3052 		struct qed_vf_info *p_vf;
3053 		u8 vfid;
3054 
3055 		p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
3056 		if (!p_vf)
3057 			continue;
3058 
3059 		vfid = p_vf->abs_vf_id;
3060 		if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3061 			u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3062 			u16 rel_vf_id = p_vf->relative_vf_id;
3063 
3064 			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3065 				   "VF[%d] [rel %d] got FLR-ed\n",
3066 				   vfid, rel_vf_id);
3067 
3068 			p_vf->state = VF_RESET;
3069 
3070 			/* No need to lock here, since pending_flr should
3071 			 * only change here and before ACKing MFw. Since
3072 			 * MFW will not trigger an additional attention for
3073 			 * VF flr until ACKs, we're safe.
3074 			 */
3075 			p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3076 			found = 1;
3077 		}
3078 	}
3079 
3080 	return found;
3081 }
3082 
3083 static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
3084 			     u16 vfid,
3085 			     struct qed_mcp_link_params *p_params,
3086 			     struct qed_mcp_link_state *p_link,
3087 			     struct qed_mcp_link_capabilities *p_caps)
3088 {
3089 	struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
3090 						       vfid,
3091 						       false);
3092 	struct qed_bulletin_content *p_bulletin;
3093 
3094 	if (!p_vf)
3095 		return;
3096 
3097 	p_bulletin = p_vf->bulletin.p_virt;
3098 
3099 	if (p_params)
3100 		__qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3101 	if (p_link)
3102 		__qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3103 	if (p_caps)
3104 		__qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3105 }
3106 
3107 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
3108 				    struct qed_ptt *p_ptt, int vfid)
3109 {
3110 	struct qed_iov_vf_mbx *mbx;
3111 	struct qed_vf_info *p_vf;
3112 
3113 	p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3114 	if (!p_vf)
3115 		return;
3116 
3117 	mbx = &p_vf->vf_mbx;
3118 
3119 	/* qed_iov_process_mbx_request */
3120 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3121 		   "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
3122 
3123 	mbx->first_tlv = mbx->req_virt->first_tlv;
3124 
3125 	/* check if tlv type is known */
3126 	if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3127 	    !p_vf->b_malicious) {
3128 		switch (mbx->first_tlv.tl.type) {
3129 		case CHANNEL_TLV_ACQUIRE:
3130 			qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3131 			break;
3132 		case CHANNEL_TLV_VPORT_START:
3133 			qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3134 			break;
3135 		case CHANNEL_TLV_VPORT_TEARDOWN:
3136 			qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3137 			break;
3138 		case CHANNEL_TLV_START_RXQ:
3139 			qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3140 			break;
3141 		case CHANNEL_TLV_START_TXQ:
3142 			qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3143 			break;
3144 		case CHANNEL_TLV_STOP_RXQS:
3145 			qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3146 			break;
3147 		case CHANNEL_TLV_STOP_TXQS:
3148 			qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3149 			break;
3150 		case CHANNEL_TLV_UPDATE_RXQ:
3151 			qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3152 			break;
3153 		case CHANNEL_TLV_VPORT_UPDATE:
3154 			qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3155 			break;
3156 		case CHANNEL_TLV_UCAST_FILTER:
3157 			qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3158 			break;
3159 		case CHANNEL_TLV_CLOSE:
3160 			qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3161 			break;
3162 		case CHANNEL_TLV_INT_CLEANUP:
3163 			qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3164 			break;
3165 		case CHANNEL_TLV_RELEASE:
3166 			qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3167 			break;
3168 		}
3169 	} else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3170 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3171 			   "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3172 			   p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3173 
3174 		qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3175 				     mbx->first_tlv.tl.type,
3176 				     sizeof(struct pfvf_def_resp_tlv),
3177 				     PFVF_STATUS_MALICIOUS);
3178 	} else {
3179 		/* unknown TLV - this may belong to a VF driver from the future
3180 		 * - a version written after this PF driver was written, which
3181 		 * supports features unknown as of yet. Too bad since we don't
3182 		 * support them. Or this may be because someone wrote a crappy
3183 		 * VF driver and is sending garbage over the channel.
3184 		 */
3185 		DP_NOTICE(p_hwfn,
3186 			  "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3187 			  p_vf->abs_vf_id,
3188 			  mbx->first_tlv.tl.type,
3189 			  mbx->first_tlv.tl.length,
3190 			  mbx->first_tlv.padding, mbx->first_tlv.reply_address);
3191 
3192 		/* Try replying in case reply address matches the acquisition's
3193 		 * posted address.
3194 		 */
3195 		if (p_vf->acquire.first_tlv.reply_address &&
3196 		    (mbx->first_tlv.reply_address ==
3197 		     p_vf->acquire.first_tlv.reply_address)) {
3198 			qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3199 					     mbx->first_tlv.tl.type,
3200 					     sizeof(struct pfvf_def_resp_tlv),
3201 					     PFVF_STATUS_NOT_SUPPORTED);
3202 		} else {
3203 			DP_VERBOSE(p_hwfn,
3204 				   QED_MSG_IOV,
3205 				   "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3206 				   p_vf->abs_vf_id);
3207 		}
3208 	}
3209 }
3210 
3211 static void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
3212 {
3213 	u64 add_bit = 1ULL << (vfid % 64);
3214 
3215 	p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
3216 }
3217 
3218 static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
3219 						    u64 *events)
3220 {
3221 	u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
3222 
3223 	memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3224 	memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3225 }
3226 
3227 static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
3228 						       u16 abs_vfid)
3229 {
3230 	u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
3231 
3232 	if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3233 		DP_VERBOSE(p_hwfn,
3234 			   QED_MSG_IOV,
3235 			   "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
3236 			   abs_vfid);
3237 		return NULL;
3238 	}
3239 
3240 	return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
3241 }
3242 
3243 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
3244 			      u16 abs_vfid, struct regpair *vf_msg)
3245 {
3246 	struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
3247 			   abs_vfid);
3248 
3249 	if (!p_vf)
3250 		return 0;
3251 
3252 	/* List the physical address of the request so that handler
3253 	 * could later on copy the message from it.
3254 	 */
3255 	p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
3256 
3257 	/* Mark the event and schedule the workqueue */
3258 	qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
3259 	qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
3260 
3261 	return 0;
3262 }
3263 
3264 static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
3265 				     struct malicious_vf_eqe_data *p_data)
3266 {
3267 	struct qed_vf_info *p_vf;
3268 
3269 	p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
3270 
3271 	if (!p_vf)
3272 		return;
3273 
3274 	DP_INFO(p_hwfn,
3275 		"VF [%d] - Malicious behavior [%02x]\n",
3276 		p_vf->abs_vf_id, p_data->err_id);
3277 
3278 	p_vf->b_malicious = true;
3279 }
3280 
3281 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
3282 			u8 opcode, __le16 echo, union event_ring_data *data)
3283 {
3284 	switch (opcode) {
3285 	case COMMON_EVENT_VF_PF_CHANNEL:
3286 		return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
3287 					  &data->vf_pf_channel.msg_addr);
3288 	case COMMON_EVENT_MALICIOUS_VF:
3289 		qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
3290 		return 0;
3291 	default:
3292 		DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
3293 			opcode);
3294 		return -EINVAL;
3295 	}
3296 }
3297 
3298 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
3299 {
3300 	struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
3301 	u16 i;
3302 
3303 	if (!p_iov)
3304 		goto out;
3305 
3306 	for (i = rel_vf_id; i < p_iov->total_vfs; i++)
3307 		if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
3308 			return i;
3309 
3310 out:
3311 	return MAX_NUM_VFS;
3312 }
3313 
3314 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
3315 			       int vfid)
3316 {
3317 	struct qed_dmae_params params;
3318 	struct qed_vf_info *vf_info;
3319 
3320 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3321 	if (!vf_info)
3322 		return -EINVAL;
3323 
3324 	memset(&params, 0, sizeof(struct qed_dmae_params));
3325 	params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
3326 	params.src_vfid = vf_info->abs_vf_id;
3327 
3328 	if (qed_dmae_host2host(p_hwfn, ptt,
3329 			       vf_info->vf_mbx.pending_req,
3330 			       vf_info->vf_mbx.req_phys,
3331 			       sizeof(union vfpf_tlvs) / 4, &params)) {
3332 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3333 			   "Failed to copy message from VF 0x%02x\n", vfid);
3334 
3335 		return -EIO;
3336 	}
3337 
3338 	return 0;
3339 }
3340 
3341 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
3342 					    u8 *mac, int vfid)
3343 {
3344 	struct qed_vf_info *vf_info;
3345 	u64 feature;
3346 
3347 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3348 	if (!vf_info) {
3349 		DP_NOTICE(p_hwfn->cdev,
3350 			  "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3351 		return;
3352 	}
3353 
3354 	if (vf_info->b_malicious) {
3355 		DP_NOTICE(p_hwfn->cdev,
3356 			  "Can't set forced MAC to malicious VF [%d]\n", vfid);
3357 		return;
3358 	}
3359 
3360 	feature = 1 << MAC_ADDR_FORCED;
3361 	memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3362 
3363 	vf_info->bulletin.p_virt->valid_bitmap |= feature;
3364 	/* Forced MAC will disable MAC_ADDR */
3365 	vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
3366 
3367 	qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3368 }
3369 
3370 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
3371 					     u16 pvid, int vfid)
3372 {
3373 	struct qed_vf_info *vf_info;
3374 	u64 feature;
3375 
3376 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3377 	if (!vf_info) {
3378 		DP_NOTICE(p_hwfn->cdev,
3379 			  "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3380 		return;
3381 	}
3382 
3383 	if (vf_info->b_malicious) {
3384 		DP_NOTICE(p_hwfn->cdev,
3385 			  "Can't set forced vlan to malicious VF [%d]\n", vfid);
3386 		return;
3387 	}
3388 
3389 	feature = 1 << VLAN_ADDR_FORCED;
3390 	vf_info->bulletin.p_virt->pvid = pvid;
3391 	if (pvid)
3392 		vf_info->bulletin.p_virt->valid_bitmap |= feature;
3393 	else
3394 		vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
3395 
3396 	qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3397 }
3398 
3399 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
3400 {
3401 	struct qed_vf_info *p_vf_info;
3402 
3403 	p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3404 	if (!p_vf_info)
3405 		return false;
3406 
3407 	return !!p_vf_info->vport_instance;
3408 }
3409 
3410 static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
3411 {
3412 	struct qed_vf_info *p_vf_info;
3413 
3414 	p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3415 	if (!p_vf_info)
3416 		return true;
3417 
3418 	return p_vf_info->state == VF_STOPPED;
3419 }
3420 
3421 static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
3422 {
3423 	struct qed_vf_info *vf_info;
3424 
3425 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3426 	if (!vf_info)
3427 		return false;
3428 
3429 	return vf_info->spoof_chk;
3430 }
3431 
3432 static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
3433 {
3434 	struct qed_vf_info *vf;
3435 	int rc = -EINVAL;
3436 
3437 	if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3438 		DP_NOTICE(p_hwfn,
3439 			  "SR-IOV sanity check failed, can't set spoofchk\n");
3440 		goto out;
3441 	}
3442 
3443 	vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3444 	if (!vf)
3445 		goto out;
3446 
3447 	if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
3448 		/* After VF VPORT start PF will configure spoof check */
3449 		vf->req_spoofchk_val = val;
3450 		rc = 0;
3451 		goto out;
3452 	}
3453 
3454 	rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
3455 
3456 out:
3457 	return rc;
3458 }
3459 
3460 static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
3461 					   u16 rel_vf_id)
3462 {
3463 	struct qed_vf_info *p_vf;
3464 
3465 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3466 	if (!p_vf || !p_vf->bulletin.p_virt)
3467 		return NULL;
3468 
3469 	if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
3470 		return NULL;
3471 
3472 	return p_vf->bulletin.p_virt->mac;
3473 }
3474 
3475 static u16
3476 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
3477 {
3478 	struct qed_vf_info *p_vf;
3479 
3480 	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3481 	if (!p_vf || !p_vf->bulletin.p_virt)
3482 		return 0;
3483 
3484 	if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
3485 		return 0;
3486 
3487 	return p_vf->bulletin.p_virt->pvid;
3488 }
3489 
3490 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
3491 				     struct qed_ptt *p_ptt, int vfid, int val)
3492 {
3493 	struct qed_vf_info *vf;
3494 	u8 abs_vp_id = 0;
3495 	int rc;
3496 
3497 	vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3498 	if (!vf)
3499 		return -EINVAL;
3500 
3501 	rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
3502 	if (rc)
3503 		return rc;
3504 
3505 	return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
3506 }
3507 
3508 static int
3509 qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
3510 {
3511 	struct qed_vf_info *vf;
3512 	u8 vport_id;
3513 	int i;
3514 
3515 	for_each_hwfn(cdev, i) {
3516 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3517 
3518 		if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3519 			DP_NOTICE(p_hwfn,
3520 				  "SR-IOV sanity check failed, can't set min rate\n");
3521 			return -EINVAL;
3522 		}
3523 	}
3524 
3525 	vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
3526 	vport_id = vf->vport_id;
3527 
3528 	return qed_configure_vport_wfq(cdev, vport_id, rate);
3529 }
3530 
3531 static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
3532 {
3533 	struct qed_wfq_data *vf_vp_wfq;
3534 	struct qed_vf_info *vf_info;
3535 
3536 	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3537 	if (!vf_info)
3538 		return 0;
3539 
3540 	vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
3541 
3542 	if (vf_vp_wfq->configured)
3543 		return vf_vp_wfq->min_speed;
3544 	else
3545 		return 0;
3546 }
3547 
3548 /**
3549  * qed_schedule_iov - schedules IOV task for VF and PF
3550  * @hwfn: hardware function pointer
3551  * @flag: IOV flag for VF/PF
3552  */
3553 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
3554 {
3555 	smp_mb__before_atomic();
3556 	set_bit(flag, &hwfn->iov_task_flags);
3557 	smp_mb__after_atomic();
3558 	DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3559 	queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
3560 }
3561 
3562 void qed_vf_start_iov_wq(struct qed_dev *cdev)
3563 {
3564 	int i;
3565 
3566 	for_each_hwfn(cdev, i)
3567 	    queue_delayed_work(cdev->hwfns[i].iov_wq,
3568 			       &cdev->hwfns[i].iov_task, 0);
3569 }
3570 
3571 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
3572 {
3573 	int i, j;
3574 
3575 	for_each_hwfn(cdev, i)
3576 	    if (cdev->hwfns[i].iov_wq)
3577 		flush_workqueue(cdev->hwfns[i].iov_wq);
3578 
3579 	/* Mark VFs for disablement */
3580 	qed_iov_set_vfs_to_disable(cdev, true);
3581 
3582 	if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
3583 		pci_disable_sriov(cdev->pdev);
3584 
3585 	for_each_hwfn(cdev, i) {
3586 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
3587 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
3588 
3589 		/* Failure to acquire the ptt in 100g creates an odd error
3590 		 * where the first engine has already relased IOV.
3591 		 */
3592 		if (!ptt) {
3593 			DP_ERR(hwfn, "Failed to acquire ptt\n");
3594 			return -EBUSY;
3595 		}
3596 
3597 		/* Clean WFQ db and configure equal weight for all vports */
3598 		qed_clean_wfq_db(hwfn, ptt);
3599 
3600 		qed_for_each_vf(hwfn, j) {
3601 			int k;
3602 
3603 			if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
3604 				continue;
3605 
3606 			/* Wait until VF is disabled before releasing */
3607 			for (k = 0; k < 100; k++) {
3608 				if (!qed_iov_is_vf_stopped(hwfn, j))
3609 					msleep(20);
3610 				else
3611 					break;
3612 			}
3613 
3614 			if (k < 100)
3615 				qed_iov_release_hw_for_vf(&cdev->hwfns[i],
3616 							  ptt, j);
3617 			else
3618 				DP_ERR(hwfn,
3619 				       "Timeout waiting for VF's FLR to end\n");
3620 		}
3621 
3622 		qed_ptt_release(hwfn, ptt);
3623 	}
3624 
3625 	qed_iov_set_vfs_to_disable(cdev, false);
3626 
3627 	return 0;
3628 }
3629 
3630 static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
3631 					u16 vfid,
3632 					struct qed_iov_vf_init_params *params)
3633 {
3634 	u16 base, i;
3635 
3636 	/* Since we have an equal resource distribution per-VF, and we assume
3637 	 * PF has acquired the QED_PF_L2_QUE first queues, we start setting
3638 	 * sequentially from there.
3639 	 */
3640 	base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
3641 
3642 	params->rel_vf_id = vfid;
3643 	for (i = 0; i < params->num_queues; i++) {
3644 		params->req_rx_queue[i] = base + i;
3645 		params->req_tx_queue[i] = base + i;
3646 	}
3647 }
3648 
3649 static int qed_sriov_enable(struct qed_dev *cdev, int num)
3650 {
3651 	struct qed_iov_vf_init_params params;
3652 	int i, j, rc;
3653 
3654 	if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
3655 		DP_NOTICE(cdev, "Can start at most %d VFs\n",
3656 			  RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
3657 		return -EINVAL;
3658 	}
3659 
3660 	memset(&params, 0, sizeof(params));
3661 
3662 	/* Initialize HW for VF access */
3663 	for_each_hwfn(cdev, j) {
3664 		struct qed_hwfn *hwfn = &cdev->hwfns[j];
3665 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
3666 
3667 		/* Make sure not to use more than 16 queues per VF */
3668 		params.num_queues = min_t(int,
3669 					  FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
3670 					  16);
3671 
3672 		if (!ptt) {
3673 			DP_ERR(hwfn, "Failed to acquire ptt\n");
3674 			rc = -EBUSY;
3675 			goto err;
3676 		}
3677 
3678 		for (i = 0; i < num; i++) {
3679 			if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
3680 				continue;
3681 
3682 			qed_sriov_enable_qid_config(hwfn, i, &params);
3683 			rc = qed_iov_init_hw_for_vf(hwfn, ptt, &params);
3684 			if (rc) {
3685 				DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
3686 				qed_ptt_release(hwfn, ptt);
3687 				goto err;
3688 			}
3689 		}
3690 
3691 		qed_ptt_release(hwfn, ptt);
3692 	}
3693 
3694 	/* Enable SRIOV PCIe functions */
3695 	rc = pci_enable_sriov(cdev->pdev, num);
3696 	if (rc) {
3697 		DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
3698 		goto err;
3699 	}
3700 
3701 	return num;
3702 
3703 err:
3704 	qed_sriov_disable(cdev, false);
3705 	return rc;
3706 }
3707 
3708 static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
3709 {
3710 	if (!IS_QED_SRIOV(cdev)) {
3711 		DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
3712 		return -EOPNOTSUPP;
3713 	}
3714 
3715 	if (num_vfs_param)
3716 		return qed_sriov_enable(cdev, num_vfs_param);
3717 	else
3718 		return qed_sriov_disable(cdev, true);
3719 }
3720 
3721 static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
3722 {
3723 	int i;
3724 
3725 	if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
3726 		DP_VERBOSE(cdev, QED_MSG_IOV,
3727 			   "Cannot set a VF MAC; Sriov is not enabled\n");
3728 		return -EINVAL;
3729 	}
3730 
3731 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
3732 		DP_VERBOSE(cdev, QED_MSG_IOV,
3733 			   "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
3734 		return -EINVAL;
3735 	}
3736 
3737 	for_each_hwfn(cdev, i) {
3738 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
3739 		struct qed_public_vf_info *vf_info;
3740 
3741 		vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
3742 		if (!vf_info)
3743 			continue;
3744 
3745 		/* Set the forced MAC, and schedule the IOV task */
3746 		ether_addr_copy(vf_info->forced_mac, mac);
3747 		qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
3748 	}
3749 
3750 	return 0;
3751 }
3752 
3753 static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
3754 {
3755 	int i;
3756 
3757 	if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
3758 		DP_VERBOSE(cdev, QED_MSG_IOV,
3759 			   "Cannot set a VF MAC; Sriov is not enabled\n");
3760 		return -EINVAL;
3761 	}
3762 
3763 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
3764 		DP_VERBOSE(cdev, QED_MSG_IOV,
3765 			   "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
3766 		return -EINVAL;
3767 	}
3768 
3769 	for_each_hwfn(cdev, i) {
3770 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
3771 		struct qed_public_vf_info *vf_info;
3772 
3773 		vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
3774 		if (!vf_info)
3775 			continue;
3776 
3777 		/* Set the forced vlan, and schedule the IOV task */
3778 		vf_info->forced_vlan = vid;
3779 		qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
3780 	}
3781 
3782 	return 0;
3783 }
3784 
3785 static int qed_get_vf_config(struct qed_dev *cdev,
3786 			     int vf_id, struct ifla_vf_info *ivi)
3787 {
3788 	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
3789 	struct qed_public_vf_info *vf_info;
3790 	struct qed_mcp_link_state link;
3791 	u32 tx_rate;
3792 
3793 	/* Sanitize request */
3794 	if (IS_VF(cdev))
3795 		return -EINVAL;
3796 
3797 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
3798 		DP_VERBOSE(cdev, QED_MSG_IOV,
3799 			   "VF index [%d] isn't active\n", vf_id);
3800 		return -EINVAL;
3801 	}
3802 
3803 	vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
3804 
3805 	qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
3806 
3807 	/* Fill information about VF */
3808 	ivi->vf = vf_id;
3809 
3810 	if (is_valid_ether_addr(vf_info->forced_mac))
3811 		ether_addr_copy(ivi->mac, vf_info->forced_mac);
3812 	else
3813 		ether_addr_copy(ivi->mac, vf_info->mac);
3814 
3815 	ivi->vlan = vf_info->forced_vlan;
3816 	ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
3817 	ivi->linkstate = vf_info->link_state;
3818 	tx_rate = vf_info->tx_rate;
3819 	ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
3820 	ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
3821 
3822 	return 0;
3823 }
3824 
3825 void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
3826 {
3827 	struct qed_mcp_link_capabilities caps;
3828 	struct qed_mcp_link_params params;
3829 	struct qed_mcp_link_state link;
3830 	int i;
3831 
3832 	if (!hwfn->pf_iov_info)
3833 		return;
3834 
3835 	/* Update bulletin of all future possible VFs with link configuration */
3836 	for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
3837 		struct qed_public_vf_info *vf_info;
3838 
3839 		vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
3840 		if (!vf_info)
3841 			continue;
3842 
3843 		memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
3844 		memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
3845 		memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
3846 		       sizeof(caps));
3847 
3848 		/* Modify link according to the VF's configured link state */
3849 		switch (vf_info->link_state) {
3850 		case IFLA_VF_LINK_STATE_DISABLE:
3851 			link.link_up = false;
3852 			break;
3853 		case IFLA_VF_LINK_STATE_ENABLE:
3854 			link.link_up = true;
3855 			/* Set speed according to maximum supported by HW.
3856 			 * that is 40G for regular devices and 100G for CMT
3857 			 * mode devices.
3858 			 */
3859 			link.speed = (hwfn->cdev->num_hwfns > 1) ?
3860 				     100000 : 40000;
3861 		default:
3862 			/* In auto mode pass PF link image to VF */
3863 			break;
3864 		}
3865 
3866 		if (link.link_up && vf_info->tx_rate) {
3867 			struct qed_ptt *ptt;
3868 			int rate;
3869 
3870 			rate = min_t(int, vf_info->tx_rate, link.speed);
3871 
3872 			ptt = qed_ptt_acquire(hwfn);
3873 			if (!ptt) {
3874 				DP_NOTICE(hwfn, "Failed to acquire PTT\n");
3875 				return;
3876 			}
3877 
3878 			if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
3879 				vf_info->tx_rate = rate;
3880 				link.speed = rate;
3881 			}
3882 
3883 			qed_ptt_release(hwfn, ptt);
3884 		}
3885 
3886 		qed_iov_set_link(hwfn, i, &params, &link, &caps);
3887 	}
3888 
3889 	qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3890 }
3891 
3892 static int qed_set_vf_link_state(struct qed_dev *cdev,
3893 				 int vf_id, int link_state)
3894 {
3895 	int i;
3896 
3897 	/* Sanitize request */
3898 	if (IS_VF(cdev))
3899 		return -EINVAL;
3900 
3901 	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
3902 		DP_VERBOSE(cdev, QED_MSG_IOV,
3903 			   "VF index [%d] isn't active\n", vf_id);
3904 		return -EINVAL;
3905 	}
3906 
3907 	/* Handle configuration of link state */
3908 	for_each_hwfn(cdev, i) {
3909 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
3910 		struct qed_public_vf_info *vf;
3911 
3912 		vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
3913 		if (!vf)
3914 			continue;
3915 
3916 		if (vf->link_state == link_state)
3917 			continue;
3918 
3919 		vf->link_state = link_state;
3920 		qed_inform_vf_link_state(&cdev->hwfns[i]);
3921 	}
3922 
3923 	return 0;
3924 }
3925 
3926 static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
3927 {
3928 	int i, rc = -EINVAL;
3929 
3930 	for_each_hwfn(cdev, i) {
3931 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3932 
3933 		rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
3934 		if (rc)
3935 			break;
3936 	}
3937 
3938 	return rc;
3939 }
3940 
3941 static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
3942 {
3943 	int i;
3944 
3945 	for_each_hwfn(cdev, i) {
3946 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3947 		struct qed_public_vf_info *vf;
3948 
3949 		if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3950 			DP_NOTICE(p_hwfn,
3951 				  "SR-IOV sanity check failed, can't set tx rate\n");
3952 			return -EINVAL;
3953 		}
3954 
3955 		vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
3956 
3957 		vf->tx_rate = rate;
3958 
3959 		qed_inform_vf_link_state(p_hwfn);
3960 	}
3961 
3962 	return 0;
3963 }
3964 
3965 static int qed_set_vf_rate(struct qed_dev *cdev,
3966 			   int vfid, u32 min_rate, u32 max_rate)
3967 {
3968 	int rc_min = 0, rc_max = 0;
3969 
3970 	if (max_rate)
3971 		rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
3972 
3973 	if (min_rate)
3974 		rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
3975 
3976 	if (rc_max | rc_min)
3977 		return -EINVAL;
3978 
3979 	return 0;
3980 }
3981 
3982 static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
3983 {
3984 	int i;
3985 
3986 	for_each_hwfn(cdev, i) {
3987 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
3988 		struct qed_public_vf_info *vf;
3989 
3990 		if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
3991 			DP_NOTICE(hwfn,
3992 				  "SR-IOV sanity check failed, can't set trust\n");
3993 			return -EINVAL;
3994 		}
3995 
3996 		vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
3997 
3998 		if (vf->is_trusted_request == trust)
3999 			return 0;
4000 		vf->is_trusted_request = trust;
4001 
4002 		qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
4003 	}
4004 
4005 	return 0;
4006 }
4007 
4008 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
4009 {
4010 	u64 events[QED_VF_ARRAY_LENGTH];
4011 	struct qed_ptt *ptt;
4012 	int i;
4013 
4014 	ptt = qed_ptt_acquire(hwfn);
4015 	if (!ptt) {
4016 		DP_VERBOSE(hwfn, QED_MSG_IOV,
4017 			   "Can't acquire PTT; re-scheduling\n");
4018 		qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
4019 		return;
4020 	}
4021 
4022 	qed_iov_pf_get_and_clear_pending_events(hwfn, events);
4023 
4024 	DP_VERBOSE(hwfn, QED_MSG_IOV,
4025 		   "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4026 		   events[0], events[1], events[2]);
4027 
4028 	qed_for_each_vf(hwfn, i) {
4029 		/* Skip VFs with no pending messages */
4030 		if (!(events[i / 64] & (1ULL << (i % 64))))
4031 			continue;
4032 
4033 		DP_VERBOSE(hwfn, QED_MSG_IOV,
4034 			   "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4035 			   i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4036 
4037 		/* Copy VF's message to PF's request buffer for that VF */
4038 		if (qed_iov_copy_vf_msg(hwfn, ptt, i))
4039 			continue;
4040 
4041 		qed_iov_process_mbx_req(hwfn, ptt, i);
4042 	}
4043 
4044 	qed_ptt_release(hwfn, ptt);
4045 }
4046 
4047 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
4048 {
4049 	int i;
4050 
4051 	qed_for_each_vf(hwfn, i) {
4052 		struct qed_public_vf_info *info;
4053 		bool update = false;
4054 		u8 *mac;
4055 
4056 		info = qed_iov_get_public_vf_info(hwfn, i, true);
4057 		if (!info)
4058 			continue;
4059 
4060 		/* Update data on bulletin board */
4061 		mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
4062 		if (is_valid_ether_addr(info->forced_mac) &&
4063 		    (!mac || !ether_addr_equal(mac, info->forced_mac))) {
4064 			DP_VERBOSE(hwfn,
4065 				   QED_MSG_IOV,
4066 				   "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
4067 				   i,
4068 				   hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4069 
4070 			/* Update bulletin board with forced MAC */
4071 			qed_iov_bulletin_set_forced_mac(hwfn,
4072 							info->forced_mac, i);
4073 			update = true;
4074 		}
4075 
4076 		if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
4077 		    info->forced_vlan) {
4078 			DP_VERBOSE(hwfn,
4079 				   QED_MSG_IOV,
4080 				   "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
4081 				   info->forced_vlan,
4082 				   i,
4083 				   hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4084 			qed_iov_bulletin_set_forced_vlan(hwfn,
4085 							 info->forced_vlan, i);
4086 			update = true;
4087 		}
4088 
4089 		if (update)
4090 			qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4091 	}
4092 }
4093 
4094 static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
4095 {
4096 	struct qed_ptt *ptt;
4097 	int i;
4098 
4099 	ptt = qed_ptt_acquire(hwfn);
4100 	if (!ptt) {
4101 		DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
4102 		qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4103 		return;
4104 	}
4105 
4106 	qed_for_each_vf(hwfn, i)
4107 	    qed_iov_post_vf_bulletin(hwfn, i, ptt);
4108 
4109 	qed_ptt_release(hwfn, ptt);
4110 }
4111 
4112 static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
4113 {
4114 	struct qed_sp_vport_update_params params;
4115 	struct qed_filter_accept_flags *flags;
4116 	struct qed_public_vf_info *vf_info;
4117 	struct qed_vf_info *vf;
4118 	u8 mask;
4119 	int i;
4120 
4121 	mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
4122 	flags = &params.accept_flags;
4123 
4124 	qed_for_each_vf(hwfn, i) {
4125 		/* Need to make sure current requested configuration didn't
4126 		 * flip so that we'll end up configuring something that's not
4127 		 * needed.
4128 		 */
4129 		vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
4130 		if (vf_info->is_trusted_configured ==
4131 		    vf_info->is_trusted_request)
4132 			continue;
4133 		vf_info->is_trusted_configured = vf_info->is_trusted_request;
4134 
4135 		/* Validate that the VF has a configured vport */
4136 		vf = qed_iov_get_vf_info(hwfn, i, true);
4137 		if (!vf->vport_instance)
4138 			continue;
4139 
4140 		memset(&params, 0, sizeof(params));
4141 		params.opaque_fid = vf->opaque_fid;
4142 		params.vport_id = vf->vport_id;
4143 
4144 		if (vf_info->rx_accept_mode & mask) {
4145 			flags->update_rx_mode_config = 1;
4146 			flags->rx_accept_filter = vf_info->rx_accept_mode;
4147 		}
4148 
4149 		if (vf_info->tx_accept_mode & mask) {
4150 			flags->update_tx_mode_config = 1;
4151 			flags->tx_accept_filter = vf_info->tx_accept_mode;
4152 		}
4153 
4154 		/* Remove if needed; Otherwise this would set the mask */
4155 		if (!vf_info->is_trusted_configured) {
4156 			flags->rx_accept_filter &= ~mask;
4157 			flags->tx_accept_filter &= ~mask;
4158 		}
4159 
4160 		if (flags->update_rx_mode_config ||
4161 		    flags->update_tx_mode_config)
4162 			qed_sp_vport_update(hwfn, &params,
4163 					    QED_SPQ_MODE_EBLOCK, NULL);
4164 	}
4165 }
4166 
4167 static void qed_iov_pf_task(struct work_struct *work)
4168 
4169 {
4170 	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
4171 					     iov_task.work);
4172 	int rc;
4173 
4174 	if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
4175 		return;
4176 
4177 	if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
4178 		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
4179 
4180 		if (!ptt) {
4181 			qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
4182 			return;
4183 		}
4184 
4185 		rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
4186 		if (rc)
4187 			qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
4188 
4189 		qed_ptt_release(hwfn, ptt);
4190 	}
4191 
4192 	if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
4193 		qed_handle_vf_msg(hwfn);
4194 
4195 	if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
4196 			       &hwfn->iov_task_flags))
4197 		qed_handle_pf_set_vf_unicast(hwfn);
4198 
4199 	if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
4200 			       &hwfn->iov_task_flags))
4201 		qed_handle_bulletin_post(hwfn);
4202 
4203 	if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
4204 		qed_iov_handle_trust_change(hwfn);
4205 }
4206 
4207 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
4208 {
4209 	int i;
4210 
4211 	for_each_hwfn(cdev, i) {
4212 		if (!cdev->hwfns[i].iov_wq)
4213 			continue;
4214 
4215 		if (schedule_first) {
4216 			qed_schedule_iov(&cdev->hwfns[i],
4217 					 QED_IOV_WQ_STOP_WQ_FLAG);
4218 			cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
4219 		}
4220 
4221 		flush_workqueue(cdev->hwfns[i].iov_wq);
4222 		destroy_workqueue(cdev->hwfns[i].iov_wq);
4223 	}
4224 }
4225 
4226 int qed_iov_wq_start(struct qed_dev *cdev)
4227 {
4228 	char name[NAME_SIZE];
4229 	int i;
4230 
4231 	for_each_hwfn(cdev, i) {
4232 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4233 
4234 		/* PFs needs a dedicated workqueue only if they support IOV.
4235 		 * VFs always require one.
4236 		 */
4237 		if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
4238 			continue;
4239 
4240 		snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
4241 			 cdev->pdev->bus->number,
4242 			 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
4243 
4244 		p_hwfn->iov_wq = create_singlethread_workqueue(name);
4245 		if (!p_hwfn->iov_wq) {
4246 			DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
4247 			return -ENOMEM;
4248 		}
4249 
4250 		if (IS_PF(cdev))
4251 			INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
4252 		else
4253 			INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
4254 	}
4255 
4256 	return 0;
4257 }
4258 
4259 const struct qed_iov_hv_ops qed_iov_ops_pass = {
4260 	.configure = &qed_sriov_configure,
4261 	.set_mac = &qed_sriov_pf_set_mac,
4262 	.set_vlan = &qed_sriov_pf_set_vlan,
4263 	.get_config = &qed_get_vf_config,
4264 	.set_link_state = &qed_set_vf_link_state,
4265 	.set_spoof = &qed_spoof_configure,
4266 	.set_rate = &qed_set_vf_rate,
4267 	.set_trust = &qed_set_vf_trust,
4268 };
4269