1 /* bnx2x_sriov.c: Broadcom Everest network driver.
2  *
3  * Copyright 2009-2012 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17  *	       Ariel Elior <ariele@broadcom.com>
18  *
19  */
20 #include "bnx2x.h"
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
23 #include "bnx2x_sriov.h"
24 
25 /* General service functions */
26 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
27 					 u16 pf_id)
28 {
29 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
30 		pf_id);
31 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
32 		pf_id);
33 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
34 		pf_id);
35 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
36 		pf_id);
37 }
38 
39 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
40 					u8 enable)
41 {
42 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
43 		enable);
44 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
45 		enable);
46 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
47 		enable);
48 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
49 		enable);
50 }
51 
52 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
53 {
54 	int idx;
55 
56 	for_each_vf(bp, idx)
57 		if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
58 			break;
59 	return idx;
60 }
61 
62 static
63 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
64 {
65 	u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
66 	return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
67 }
68 
69 static int bnx2x_ari_enabled(struct pci_dev *dev)
70 {
71 	return dev->bus->self && dev->bus->self->ari_enabled;
72 }
73 
74 static void
75 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
76 {
77 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
78 	if (vf) {
79 		if (!vf_sb_count(vf))
80 			vf->igu_base_id = igu_sb_id;
81 		++vf_sb_count(vf);
82 	}
83 }
84 
85 static void
86 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
87 {
88 	int sb_id;
89 	u32 val;
90 	u8 fid;
91 
92 	/* IGU in normal mode - read CAM */
93 	for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
94 		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
95 		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
96 			continue;
97 		fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
98 		if (!(fid & IGU_FID_ENCODE_IS_PF))
99 			bnx2x_vf_set_igu_info(bp, sb_id,
100 					      (fid & IGU_FID_VF_NUM_MASK));
101 
102 		DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
103 		   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
104 		   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
105 		   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
106 		   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
107 	}
108 }
109 
110 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
111 {
112 	if (bp->vfdb) {
113 		kfree(bp->vfdb->vfqs);
114 		kfree(bp->vfdb->vfs);
115 		kfree(bp->vfdb);
116 	}
117 	bp->vfdb = NULL;
118 }
119 
120 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
121 {
122 	int pos;
123 	struct pci_dev *dev = bp->pdev;
124 
125 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
126 	if (!pos) {
127 		BNX2X_ERR("failed to find SRIOV capability in device\n");
128 		return -ENODEV;
129 	}
130 
131 	iov->pos = pos;
132 	DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
133 	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
134 	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
135 	pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
136 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
137 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
138 	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
139 	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
140 	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
141 
142 	return 0;
143 }
144 
145 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
146 {
147 	u32 val;
148 
149 	/* read the SRIOV capability structure
150 	 * The fields can be read via configuration read or
151 	 * directly from the device (starting at offset PCICFG_OFFSET)
152 	 */
153 	if (bnx2x_sriov_pci_cfg_info(bp, iov))
154 		return -ENODEV;
155 
156 	/* get the number of SRIOV bars */
157 	iov->nres = 0;
158 
159 	/* read the first_vfid */
160 	val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
161 	iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
162 			       * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
163 
164 	DP(BNX2X_MSG_IOV,
165 	   "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
166 	   BP_FUNC(bp),
167 	   iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
168 	   iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
169 
170 	return 0;
171 }
172 
173 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
174 {
175 	int i;
176 	u8 queue_count = 0;
177 
178 	if (IS_SRIOV(bp))
179 		for_each_vf(bp, i)
180 			queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
181 
182 	return queue_count;
183 }
184 
185 /* must be called after PF bars are mapped */
186 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
187 				 int num_vfs_param)
188 {
189 	int err, i, qcount;
190 	struct bnx2x_sriov *iov;
191 	struct pci_dev *dev = bp->pdev;
192 
193 	bp->vfdb = NULL;
194 
195 	/* verify sriov capability is present in configuration space */
196 	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) {
197 		DP(BNX2X_MSG_IOV, "no sriov - capability not found\n");
198 		return 0;
199 	}
200 
201 	/* verify is pf */
202 	if (IS_VF(bp))
203 		return 0;
204 
205 	/* verify chip revision */
206 	if (CHIP_IS_E1x(bp))
207 		return 0;
208 
209 	/* check if SRIOV support is turned off */
210 	if (!num_vfs_param)
211 		return 0;
212 
213 	/* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
214 	if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
215 		BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
216 			  BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
217 		return 0;
218 	}
219 
220 	/* SRIOV can be enabled only with MSIX */
221 	if (int_mode_param == BNX2X_INT_MODE_MSI ||
222 	    int_mode_param == BNX2X_INT_MODE_INTX) {
223 		BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
224 		return 0;
225 	}
226 
227 	/* verify ari is enabled */
228 	if (!bnx2x_ari_enabled(bp->pdev)) {
229 		BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
230 		return 0;
231 	}
232 
233 	/* verify igu is in normal mode */
234 	if (CHIP_INT_MODE_IS_BC(bp)) {
235 		BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
236 		return 0;
237 	}
238 
239 	/* allocate the vfs database */
240 	bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
241 	if (!bp->vfdb) {
242 		BNX2X_ERR("failed to allocate vf database\n");
243 		err = -ENOMEM;
244 		goto failed;
245 	}
246 
247 	/* get the sriov info - Linux already collected all the pertinent
248 	 * information, however the sriov structure is for the private use
249 	 * of the pci module. Also we want this information regardless
250 	 * of the hyper-visor.
251 	 */
252 	iov = &(bp->vfdb->sriov);
253 	err = bnx2x_sriov_info(bp, iov);
254 	if (err)
255 		goto failed;
256 
257 	/* SR-IOV capability was enabled but there are no VFs*/
258 	if (iov->total == 0)
259 		goto failed;
260 
261 	/* calcuate the actual number of VFs */
262 	iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
263 
264 	/* allcate the vf array */
265 	bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
266 				BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
267 	if (!bp->vfdb->vfs) {
268 		BNX2X_ERR("failed to allocate vf array\n");
269 		err = -ENOMEM;
270 		goto failed;
271 	}
272 
273 	/* Initial VF init - index and abs_vfid - nr_virtfn must be set */
274 	for_each_vf(bp, i) {
275 		bnx2x_vf(bp, i, index) = i;
276 		bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
277 		bnx2x_vf(bp, i, state) = VF_FREE;
278 		INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
279 		mutex_init(&bnx2x_vf(bp, i, op_mutex));
280 		bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
281 	}
282 
283 	/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
284 	bnx2x_get_vf_igu_cam_info(bp);
285 
286 	/* get the total queue count and allocate the global queue arrays */
287 	qcount = bnx2x_iov_get_max_queue_count(bp);
288 
289 	/* allocate the queue arrays for all VFs */
290 	bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
291 				 GFP_KERNEL);
292 	if (!bp->vfdb->vfqs) {
293 		BNX2X_ERR("failed to allocate vf queue array\n");
294 		err = -ENOMEM;
295 		goto failed;
296 	}
297 
298 	return 0;
299 failed:
300 	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
301 	__bnx2x_iov_free_vfdb(bp);
302 	return err;
303 }
304 /* VF enable primitives
305  * when pretend is required the caller is responsible
306  * for calling pretend prior to calling these routines
307  */
308 
309 /* called only on E1H or E2.
310  * When pretending to be PF, the pretend value is the function number 0...7
311  * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
312  * combination
313  */
314 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
315 {
316 	u32 pretend_reg;
317 
318 	if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
319 		return -1;
320 
321 	/* get my own pretend register */
322 	pretend_reg = bnx2x_get_pretend_reg(bp);
323 	REG_WR(bp, pretend_reg, pretend_func_val);
324 	REG_RD(bp, pretend_reg);
325 	return 0;
326 }
327 
328 /* internal vf enable - until vf is enabled internally all transactions
329  * are blocked. this routine should always be called last with pretend.
330  */
331 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
332 {
333 	REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
334 }
335 
336 /* clears vf error in all semi blocks */
337 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
338 {
339 	REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
340 	REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
341 	REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
342 	REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
343 }
344 
345 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
346 {
347 	u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
348 	u32 was_err_reg = 0;
349 
350 	switch (was_err_group) {
351 	case 0:
352 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
353 	    break;
354 	case 1:
355 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
356 	    break;
357 	case 2:
358 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
359 	    break;
360 	case 3:
361 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
362 	    break;
363 	}
364 	REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
365 }
366 
367 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
368 {
369 	/* set the VF-PF association in the FW */
370 	storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
371 	storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
372 
373 	/* clear vf errors*/
374 	bnx2x_vf_semi_clear_err(bp, abs_vfid);
375 	bnx2x_vf_pglue_clear_err(bp, abs_vfid);
376 
377 	/* internal vf-enable - pretend */
378 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
379 	DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
380 	bnx2x_vf_enable_internal(bp, true);
381 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
382 }
383 
384 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
385 {
386 	struct pci_dev *dev;
387 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
388 
389 	if (!vf)
390 		goto unknown_dev;
391 
392 	dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
393 	if (dev)
394 		return bnx2x_is_pcie_pending(dev);
395 
396 unknown_dev:
397 	BNX2X_ERR("Unknown device\n");
398 	return false;
399 }
400 
401 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
402 {
403 	/* Wait 100ms */
404 	msleep(100);
405 
406 	/* Verify no pending pci transactions */
407 	if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
408 		BNX2X_ERR("PCIE Transactions still pending\n");
409 
410 	return 0;
411 }
412 
413 /* must be called after the number of PF queues and the number of VFs are
414  * both known
415  */
416 static void
417 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
418 {
419 	u16 vlan_count = 0;
420 
421 	/* will be set only during VF-ACQUIRE */
422 	resc->num_rxqs = 0;
423 	resc->num_txqs = 0;
424 
425 	/* no credit calculcis for macs (just yet) */
426 	resc->num_mac_filters = 1;
427 
428 	/* divvy up vlan rules */
429 	vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
430 	vlan_count = 1 << ilog2(vlan_count);
431 	resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
432 
433 	/* no real limitation */
434 	resc->num_mc_filters = 0;
435 
436 	/* num_sbs already set */
437 }
438 
439 /* IOV global initialization routines  */
440 void bnx2x_iov_init_dq(struct bnx2x *bp)
441 {
442 	if (!IS_SRIOV(bp))
443 		return;
444 
445 	/* Set the DQ such that the CID reflect the abs_vfid */
446 	REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
447 	REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
448 
449 	/* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
450 	 * the PF L2 queues
451 	 */
452 	REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
453 
454 	/* The VF window size is the log2 of the max number of CIDs per VF */
455 	REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
456 
457 	/* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
458 	 * the Pf doorbell size although the 2 are independent.
459 	 */
460 	REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
461 	       BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
462 
463 	/* No security checks for now -
464 	 * configure single rule (out of 16) mask = 0x1, value = 0x0,
465 	 * CID range 0 - 0x1ffff
466 	 */
467 	REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
468 	REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
469 	REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
470 	REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
471 
472 	/* set the number of VF alllowed doorbells to the full DQ range */
473 	REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
474 
475 	/* set the VF doorbell threshold */
476 	REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
477 }
478 
479 void bnx2x_iov_init_dmae(struct bnx2x *bp)
480 {
481 	DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
482 	if (!IS_SRIOV(bp))
483 		return;
484 
485 	REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
486 }
487 
488 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
489 {
490 	struct pci_dev *dev = bp->pdev;
491 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
492 
493 	return dev->bus->number + ((dev->devfn + iov->offset +
494 				    iov->stride * vfid) >> 8);
495 }
496 
497 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
498 {
499 	struct pci_dev *dev = bp->pdev;
500 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
501 
502 	return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
503 }
504 
505 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
506 {
507 	int i, n;
508 	struct pci_dev *dev = bp->pdev;
509 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
510 
511 	for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
512 		u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
513 		u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
514 
515 		do_div(size, iov->total);
516 		vf->bars[n].bar = start + size * vf->abs_vfid;
517 		vf->bars[n].size = size;
518 	}
519 }
520 
521 void bnx2x_iov_remove_one(struct bnx2x *bp)
522 {
523 	/* if SRIOV is not enabled there's nothing to do */
524 	if (!IS_SRIOV(bp))
525 		return;
526 
527 	/* free vf database */
528 	__bnx2x_iov_free_vfdb(bp);
529 }
530 
531 void bnx2x_iov_free_mem(struct bnx2x *bp)
532 {
533 	int i;
534 
535 	if (!IS_SRIOV(bp))
536 		return;
537 
538 	/* free vfs hw contexts */
539 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
540 		struct hw_dma *cxt = &bp->vfdb->context[i];
541 		BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
542 	}
543 
544 	BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
545 		       BP_VFDB(bp)->sp_dma.mapping,
546 		       BP_VFDB(bp)->sp_dma.size);
547 
548 	BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
549 		       BP_VF_MBX_DMA(bp)->mapping,
550 		       BP_VF_MBX_DMA(bp)->size);
551 }
552 
553 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
554 {
555 	size_t tot_size;
556 	int i, rc = 0;
557 
558 	if (!IS_SRIOV(bp))
559 		return rc;
560 
561 	/* allocate vfs hw contexts */
562 	tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
563 		BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
564 
565 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
566 		struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
567 		cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
568 
569 		if (cxt->size) {
570 			BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
571 		} else {
572 			cxt->addr = NULL;
573 			cxt->mapping = 0;
574 		}
575 		tot_size -= cxt->size;
576 	}
577 
578 	/* allocate vfs ramrods dma memory - client_init and set_mac */
579 	tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
580 	BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
581 			tot_size);
582 	BP_VFDB(bp)->sp_dma.size = tot_size;
583 
584 	/* allocate mailboxes */
585 	tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
586 	BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
587 			tot_size);
588 	BP_VF_MBX_DMA(bp)->size = tot_size;
589 
590 	return 0;
591 
592 alloc_mem_err:
593 	return -ENOMEM;
594 }
595 
596 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
597 			   struct bnx2x_vf_queue *q)
598 {
599 	u8 cl_id = vfq_cl_id(vf, q);
600 	u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
601 	unsigned long q_type = 0;
602 
603 	set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
604 	set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
605 
606 	/* Queue State object */
607 	bnx2x_init_queue_obj(bp, &q->sp_obj,
608 			     cl_id, &q->cid, 1, func_id,
609 			     bnx2x_vf_sp(bp, vf, q_data),
610 			     bnx2x_vf_sp_map(bp, vf, q_data),
611 			     q_type);
612 
613 	DP(BNX2X_MSG_IOV,
614 	   "initialized vf %d's queue object. func id set to %d\n",
615 	   vf->abs_vfid, q->sp_obj.func_id);
616 
617 	/* mac/vlan objects are per queue, but only those
618 	 * that belong to the leading queue are initialized
619 	 */
620 	if (vfq_is_leading(q)) {
621 		/* mac */
622 		bnx2x_init_mac_obj(bp, &q->mac_obj,
623 				   cl_id, q->cid, func_id,
624 				   bnx2x_vf_sp(bp, vf, mac_rdata),
625 				   bnx2x_vf_sp_map(bp, vf, mac_rdata),
626 				   BNX2X_FILTER_MAC_PENDING,
627 				   &vf->filter_state,
628 				   BNX2X_OBJ_TYPE_RX_TX,
629 				   &bp->macs_pool);
630 		/* vlan */
631 		bnx2x_init_vlan_obj(bp, &q->vlan_obj,
632 				    cl_id, q->cid, func_id,
633 				    bnx2x_vf_sp(bp, vf, vlan_rdata),
634 				    bnx2x_vf_sp_map(bp, vf, vlan_rdata),
635 				    BNX2X_FILTER_VLAN_PENDING,
636 				    &vf->filter_state,
637 				    BNX2X_OBJ_TYPE_RX_TX,
638 				    &bp->vlans_pool);
639 
640 		/* mcast */
641 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
642 				     q->cid, func_id, func_id,
643 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
644 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
645 				     BNX2X_FILTER_MCAST_PENDING,
646 				     &vf->filter_state,
647 				     BNX2X_OBJ_TYPE_RX_TX);
648 
649 		vf->leading_rss = cl_id;
650 	}
651 }
652 
653 /* called by bnx2x_nic_load */
654 int bnx2x_iov_nic_init(struct bnx2x *bp)
655 {
656 	int vfid, qcount, i;
657 
658 	if (!IS_SRIOV(bp)) {
659 		DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
660 		return 0;
661 	}
662 
663 	DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
664 
665 	/* initialize vf database */
666 	for_each_vf(bp, vfid) {
667 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
668 
669 		int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
670 			BNX2X_CIDS_PER_VF;
671 
672 		union cdu_context *base_cxt = (union cdu_context *)
673 			BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
674 			(base_vf_cid & (ILT_PAGE_CIDS-1));
675 
676 		DP(BNX2X_MSG_IOV,
677 		   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
678 		   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
679 		   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
680 
681 		/* init statically provisioned resources */
682 		bnx2x_iov_static_resc(bp, &vf->alloc_resc);
683 
684 		/* queues are initialized during VF-ACQUIRE */
685 
686 		/* reserve the vf vlan credit */
687 		bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
688 
689 		vf->filter_state = 0;
690 		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
691 
692 		/*  init mcast object - This object will be re-initialized
693 		 *  during VF-ACQUIRE with the proper cl_id and cid.
694 		 *  It needs to be initialized here so that it can be safely
695 		 *  handled by a subsequent FLR flow.
696 		 */
697 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
698 				     0xFF, 0xFF, 0xFF,
699 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
700 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
701 				     BNX2X_FILTER_MCAST_PENDING,
702 				     &vf->filter_state,
703 				     BNX2X_OBJ_TYPE_RX_TX);
704 
705 		/* set the mailbox message addresses */
706 		BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
707 			(((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
708 			MBX_MSG_ALIGNED_SIZE);
709 
710 		BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
711 			vfid * MBX_MSG_ALIGNED_SIZE;
712 
713 		/* Enable vf mailbox */
714 		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
715 	}
716 
717 	/* Final VF init */
718 	qcount = 0;
719 	for_each_vf(bp, i) {
720 		struct bnx2x_virtf *vf = BP_VF(bp, i);
721 
722 		/* fill in the BDF and bars */
723 		vf->bus = bnx2x_vf_bus(bp, i);
724 		vf->devfn = bnx2x_vf_devfn(bp, i);
725 		bnx2x_vf_set_bars(bp, vf);
726 
727 		DP(BNX2X_MSG_IOV,
728 		   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
729 		   vf->abs_vfid, vf->bus, vf->devfn,
730 		   (unsigned)vf->bars[0].bar, vf->bars[0].size,
731 		   (unsigned)vf->bars[1].bar, vf->bars[1].size,
732 		   (unsigned)vf->bars[2].bar, vf->bars[2].size);
733 
734 		/* set local queue arrays */
735 		vf->vfqs = &bp->vfdb->vfqs[qcount];
736 		qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
737 	}
738 
739 	return 0;
740 }
741 
742 /* called by bnx2x_init_hw_func, returns the next ilt line */
743 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
744 {
745 	int i;
746 	struct bnx2x_ilt *ilt = BP_ILT(bp);
747 
748 	if (!IS_SRIOV(bp))
749 		return line;
750 
751 	/* set vfs ilt lines */
752 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
753 		struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
754 
755 		ilt->lines[line+i].page = hw_cxt->addr;
756 		ilt->lines[line+i].page_mapping = hw_cxt->mapping;
757 		ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
758 	}
759 	return line + i;
760 }
761 
762 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
763 {
764 	return ((cid >= BNX2X_FIRST_VF_CID) &&
765 		((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
766 }
767 
768 static
769 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
770 					struct bnx2x_vf_queue *vfq,
771 					union event_ring_elem *elem)
772 {
773 	unsigned long ramrod_flags = 0;
774 	int rc = 0;
775 
776 	/* Always push next commands out, don't wait here */
777 	set_bit(RAMROD_CONT, &ramrod_flags);
778 
779 	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
780 	case BNX2X_FILTER_MAC_PENDING:
781 		rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
782 					   &ramrod_flags);
783 		break;
784 	case BNX2X_FILTER_VLAN_PENDING:
785 		rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
786 					    &ramrod_flags);
787 		break;
788 	default:
789 		BNX2X_ERR("Unsupported classification command: %d\n",
790 			  elem->message.data.eth_event.echo);
791 		return;
792 	}
793 	if (rc < 0)
794 		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
795 	else if (rc > 0)
796 		DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
797 }
798 
799 static
800 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
801 			       struct bnx2x_virtf *vf)
802 {
803 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
804 	int rc;
805 
806 	rparam.mcast_obj = &vf->mcast_obj;
807 	vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
808 
809 	/* If there are pending mcast commands - send them */
810 	if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
811 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
812 		if (rc < 0)
813 			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
814 				  rc);
815 	}
816 }
817 
818 static
819 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
820 				 struct bnx2x_virtf *vf)
821 {
822 	smp_mb__before_clear_bit();
823 	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
824 	smp_mb__after_clear_bit();
825 }
826 
827 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
828 {
829 	struct bnx2x_virtf *vf;
830 	int qidx = 0, abs_vfid;
831 	u8 opcode;
832 	u16 cid = 0xffff;
833 
834 	if (!IS_SRIOV(bp))
835 		return 1;
836 
837 	/* first get the cid - the only events we handle here are cfc-delete
838 	 * and set-mac completion
839 	 */
840 	opcode = elem->message.opcode;
841 
842 	switch (opcode) {
843 	case EVENT_RING_OPCODE_CFC_DEL:
844 		cid = SW_CID((__force __le32)
845 			     elem->message.data.cfc_del_event.cid);
846 		DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
847 		break;
848 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
849 	case EVENT_RING_OPCODE_MULTICAST_RULES:
850 	case EVENT_RING_OPCODE_FILTERS_RULES:
851 		cid = (elem->message.data.eth_event.echo &
852 		       BNX2X_SWCID_MASK);
853 		DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
854 		break;
855 	case EVENT_RING_OPCODE_VF_FLR:
856 		abs_vfid = elem->message.data.vf_flr_event.vf_id;
857 		DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
858 		   abs_vfid);
859 		goto get_vf;
860 	case EVENT_RING_OPCODE_MALICIOUS_VF:
861 		abs_vfid = elem->message.data.malicious_vf_event.vf_id;
862 		DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
863 		   abs_vfid);
864 		goto get_vf;
865 	default:
866 		return 1;
867 	}
868 
869 	/* check if the cid is the VF range */
870 	if (!bnx2x_iov_is_vf_cid(bp, cid)) {
871 		DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
872 		return 1;
873 	}
874 
875 	/* extract vf and rxq index from vf_cid - relies on the following:
876 	 * 1. vfid on cid reflects the true abs_vfid
877 	 * 2. the max number of VFs (per path) is 64
878 	 */
879 	qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
880 	abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
881 get_vf:
882 	vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
883 
884 	if (!vf) {
885 		BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
886 			  cid, abs_vfid);
887 		return 0;
888 	}
889 
890 	switch (opcode) {
891 	case EVENT_RING_OPCODE_CFC_DEL:
892 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
893 		   vf->abs_vfid, qidx);
894 		vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
895 						       &vfq_get(vf,
896 								qidx)->sp_obj,
897 						       BNX2X_Q_CMD_CFC_DEL);
898 		break;
899 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
900 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
901 		   vf->abs_vfid, qidx);
902 		bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
903 		break;
904 	case EVENT_RING_OPCODE_MULTICAST_RULES:
905 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
906 		   vf->abs_vfid, qidx);
907 		bnx2x_vf_handle_mcast_eqe(bp, vf);
908 		break;
909 	case EVENT_RING_OPCODE_FILTERS_RULES:
910 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
911 		   vf->abs_vfid, qidx);
912 		bnx2x_vf_handle_filters_eqe(bp, vf);
913 		break;
914 	case EVENT_RING_OPCODE_VF_FLR:
915 		DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
916 		   vf->abs_vfid);
917 		/* Do nothing for now */
918 		break;
919 	case EVENT_RING_OPCODE_MALICIOUS_VF:
920 		DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
921 		   vf->abs_vfid);
922 		/* Do nothing for now */
923 		break;
924 	}
925 	/* SRIOV: reschedule any 'in_progress' operations */
926 	bnx2x_iov_sp_event(bp, cid, false);
927 
928 	return 0;
929 }
930 
931 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
932 {
933 	/* extract the vf from vf_cid - relies on the following:
934 	 * 1. vfid on cid reflects the true abs_vfid
935 	 * 2. the max number of VFs (per path) is 64
936 	 */
937 	int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
938 	return bnx2x_vf_by_abs_fid(bp, abs_vfid);
939 }
940 
941 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
942 				struct bnx2x_queue_sp_obj **q_obj)
943 {
944 	struct bnx2x_virtf *vf;
945 
946 	if (!IS_SRIOV(bp))
947 		return;
948 
949 	vf = bnx2x_vf_by_cid(bp, vf_cid);
950 
951 	if (vf) {
952 		/* extract queue index from vf_cid - relies on the following:
953 		 * 1. vfid on cid reflects the true abs_vfid
954 		 * 2. the max number of VFs (per path) is 64
955 		 */
956 		int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
957 		*q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
958 	} else {
959 		BNX2X_ERR("No vf matching cid %d\n", vf_cid);
960 	}
961 }
962 
963 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
964 {
965 	struct bnx2x_virtf *vf;
966 
967 	/* check if the cid is the VF range */
968 	if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
969 		return;
970 
971 	vf = bnx2x_vf_by_cid(bp, vf_cid);
972 	if (vf) {
973 		/* set in_progress flag */
974 		atomic_set(&vf->op_in_progress, 1);
975 		if (queue_work)
976 			queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
977 	}
978 }
979 
980 void bnx2x_iov_sp_task(struct bnx2x *bp)
981 {
982 	int i;
983 
984 	if (!IS_SRIOV(bp))
985 		return;
986 	/* Iterate over all VFs and invoke state transition for VFs with
987 	 * 'in-progress' slow-path operations
988 	 */
989 	DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
990 	for_each_vf(bp, i) {
991 		struct bnx2x_virtf *vf = BP_VF(bp, i);
992 
993 		if (!list_empty(&vf->op_list_head) &&
994 		    atomic_read(&vf->op_in_progress)) {
995 			DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
996 			bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
997 		}
998 	}
999 }
1000 
1001 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
1002 {
1003 	return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
1004 		     BNX2X_VF_MAX_QUEUES);
1005 }
1006 
1007 static
1008 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1009 			    struct vf_pf_resc_request *req_resc)
1010 {
1011 	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1012 	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1013 
1014 	return ((req_resc->num_rxqs <= rxq_cnt) &&
1015 		(req_resc->num_txqs <= txq_cnt) &&
1016 		(req_resc->num_sbs <= vf_sb_count(vf))   &&
1017 		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
1018 		(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
1019 }
1020 
1021 /* CORE VF API */
1022 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1023 		     struct vf_pf_resc_request *resc)
1024 {
1025 	int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
1026 		BNX2X_CIDS_PER_VF;
1027 
1028 	union cdu_context *base_cxt = (union cdu_context *)
1029 		BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1030 		(base_vf_cid & (ILT_PAGE_CIDS-1));
1031 	int i;
1032 
1033 	/* if state is 'acquired' the VF was not released or FLR'd, in
1034 	 * this case the returned resources match the acquired already
1035 	 * acquired resources. Verify that the requested numbers do
1036 	 * not exceed the already acquired numbers.
1037 	 */
1038 	if (vf->state == VF_ACQUIRED) {
1039 		DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
1040 		   vf->abs_vfid);
1041 
1042 		if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
1043 			BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
1044 				  vf->abs_vfid);
1045 			return -EINVAL;
1046 		}
1047 		return 0;
1048 	}
1049 
1050 	/* Otherwise vf state must be 'free' or 'reset' */
1051 	if (vf->state != VF_FREE && vf->state != VF_RESET) {
1052 		BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
1053 			  vf->abs_vfid, vf->state);
1054 		return -EINVAL;
1055 	}
1056 
1057 	/* static allocation:
1058 	 * the global maximum number are fixed per VF. fail the request if
1059 	 * requested number exceed these globals
1060 	 */
1061 	if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
1062 		DP(BNX2X_MSG_IOV,
1063 		   "cannot fulfill vf resource request. Placing maximal available values in response\n");
1064 		/* set the max resource in the vf */
1065 		return -ENOMEM;
1066 	}
1067 
1068 	/* Set resources counters - 0 request means max available */
1069 	vf_sb_count(vf) = resc->num_sbs;
1070 	vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
1071 	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
1072 	if (resc->num_mac_filters)
1073 		vf_mac_rules_cnt(vf) = resc->num_mac_filters;
1074 	if (resc->num_vlan_filters)
1075 		vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
1076 
1077 	DP(BNX2X_MSG_IOV,
1078 	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
1079 	   vf_sb_count(vf), vf_rxq_count(vf),
1080 	   vf_txq_count(vf), vf_mac_rules_cnt(vf),
1081 	   vf_vlan_rules_cnt(vf));
1082 
1083 	/* Initialize the queues */
1084 	if (!vf->vfqs) {
1085 		DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
1086 		return -EINVAL;
1087 	}
1088 
1089 	for_each_vfq(vf, i) {
1090 		struct bnx2x_vf_queue *q = vfq_get(vf, i);
1091 
1092 		if (!q) {
1093 			DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
1094 			return -EINVAL;
1095 		}
1096 
1097 		q->index = i;
1098 		q->cxt = &((base_cxt + i)->eth);
1099 		q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
1100 
1101 		DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
1102 		   vf->abs_vfid, i, q->index, q->cid, q->cxt);
1103 
1104 		/* init SP objects */
1105 		bnx2x_vfq_init(bp, vf, q);
1106 	}
1107 	vf->state = VF_ACQUIRED;
1108 	return 0;
1109 }
1110 
1111 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
1112 			      enum channel_tlvs tlv)
1113 {
1114 	/* lock the channel */
1115 	mutex_lock(&vf->op_mutex);
1116 
1117 	/* record the locking op */
1118 	vf->op_current = tlv;
1119 
1120 	/* log the lock */
1121 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
1122 	   vf->abs_vfid, tlv);
1123 }
1124 
1125 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
1126 				enum channel_tlvs expected_tlv)
1127 {
1128 	WARN(expected_tlv != vf->op_current,
1129 	     "lock mismatch: expected %d found %d", expected_tlv,
1130 	     vf->op_current);
1131 
1132 	/* lock the channel */
1133 	mutex_unlock(&vf->op_mutex);
1134 
1135 	/* log the unlock */
1136 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
1137 	   vf->abs_vfid, vf->op_current);
1138 
1139 	/* record the locking op */
1140 	vf->op_current = CHANNEL_TLV_NONE;
1141 }
1142