1 /* bnx2x_sriov.c: Broadcom Everest network driver.
2  *
3  * Copyright 2009-2012 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17  *	       Ariel Elior <ariele@broadcom.com>
18  *
19  */
20 #include "bnx2x.h"
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
23 #include "bnx2x_sriov.h"
24 
25 /* General service functions */
26 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
27 					 u16 pf_id)
28 {
29 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
30 		pf_id);
31 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
32 		pf_id);
33 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
34 		pf_id);
35 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
36 		pf_id);
37 }
38 
39 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
40 					u8 enable)
41 {
42 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
43 		enable);
44 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
45 		enable);
46 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
47 		enable);
48 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
49 		enable);
50 }
51 
52 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
53 {
54 	int idx;
55 
56 	for_each_vf(bp, idx)
57 		if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
58 			break;
59 	return idx;
60 }
61 
62 static
63 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
64 {
65 	u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
66 	return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
67 }
68 
69 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
70 				u8 igu_sb_id, u8 segment, u16 index, u8 op,
71 				u8 update)
72 {
73 	/* acking a VF sb through the PF - use the GRC */
74 	u32 ctl;
75 	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
76 	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
77 	u32 func_encode = vf->abs_vfid;
78 	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
79 	struct igu_regular cmd_data = {0};
80 
81 	cmd_data.sb_id_and_flags =
82 			((index << IGU_REGULAR_SB_INDEX_SHIFT) |
83 			 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
84 			 (update << IGU_REGULAR_BUPDATE_SHIFT) |
85 			 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
86 
87 	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
88 	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
89 	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
90 
91 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
92 	   cmd_data.sb_id_and_flags, igu_addr_data);
93 	REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
94 	mmiowb();
95 	barrier();
96 
97 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
98 	   ctl, igu_addr_ctl);
99 	REG_WR(bp, igu_addr_ctl, ctl);
100 	mmiowb();
101 	barrier();
102 }
103 
104 static int bnx2x_ari_enabled(struct pci_dev *dev)
105 {
106 	return dev->bus->self && dev->bus->self->ari_enabled;
107 }
108 
109 static void
110 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
111 {
112 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
113 	if (vf) {
114 		if (!vf_sb_count(vf))
115 			vf->igu_base_id = igu_sb_id;
116 		++vf_sb_count(vf);
117 	}
118 }
119 
120 static void
121 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
122 {
123 	int sb_id;
124 	u32 val;
125 	u8 fid;
126 
127 	/* IGU in normal mode - read CAM */
128 	for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
129 		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
130 		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
131 			continue;
132 		fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
133 		if (!(fid & IGU_FID_ENCODE_IS_PF))
134 			bnx2x_vf_set_igu_info(bp, sb_id,
135 					      (fid & IGU_FID_VF_NUM_MASK));
136 
137 		DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
138 		   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
139 		   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
140 		   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
141 		   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
142 	}
143 }
144 
145 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
146 {
147 	if (bp->vfdb) {
148 		kfree(bp->vfdb->vfqs);
149 		kfree(bp->vfdb->vfs);
150 		kfree(bp->vfdb);
151 	}
152 	bp->vfdb = NULL;
153 }
154 
155 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
156 {
157 	int pos;
158 	struct pci_dev *dev = bp->pdev;
159 
160 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
161 	if (!pos) {
162 		BNX2X_ERR("failed to find SRIOV capability in device\n");
163 		return -ENODEV;
164 	}
165 
166 	iov->pos = pos;
167 	DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
168 	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
169 	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
170 	pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
171 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
172 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
173 	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
174 	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
175 	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
176 
177 	return 0;
178 }
179 
180 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
181 {
182 	u32 val;
183 
184 	/* read the SRIOV capability structure
185 	 * The fields can be read via configuration read or
186 	 * directly from the device (starting at offset PCICFG_OFFSET)
187 	 */
188 	if (bnx2x_sriov_pci_cfg_info(bp, iov))
189 		return -ENODEV;
190 
191 	/* get the number of SRIOV bars */
192 	iov->nres = 0;
193 
194 	/* read the first_vfid */
195 	val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
196 	iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
197 			       * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
198 
199 	DP(BNX2X_MSG_IOV,
200 	   "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
201 	   BP_FUNC(bp),
202 	   iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
203 	   iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
204 
205 	return 0;
206 }
207 
208 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
209 {
210 	int i;
211 	u8 queue_count = 0;
212 
213 	if (IS_SRIOV(bp))
214 		for_each_vf(bp, i)
215 			queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
216 
217 	return queue_count;
218 }
219 
220 /* must be called after PF bars are mapped */
221 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
222 				 int num_vfs_param)
223 {
224 	int err, i, qcount;
225 	struct bnx2x_sriov *iov;
226 	struct pci_dev *dev = bp->pdev;
227 
228 	bp->vfdb = NULL;
229 
230 	/* verify sriov capability is present in configuration space */
231 	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) {
232 		DP(BNX2X_MSG_IOV, "no sriov - capability not found\n");
233 		return 0;
234 	}
235 
236 	/* verify is pf */
237 	if (IS_VF(bp))
238 		return 0;
239 
240 	/* verify chip revision */
241 	if (CHIP_IS_E1x(bp))
242 		return 0;
243 
244 	/* check if SRIOV support is turned off */
245 	if (!num_vfs_param)
246 		return 0;
247 
248 	/* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
249 	if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
250 		BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
251 			  BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
252 		return 0;
253 	}
254 
255 	/* SRIOV can be enabled only with MSIX */
256 	if (int_mode_param == BNX2X_INT_MODE_MSI ||
257 	    int_mode_param == BNX2X_INT_MODE_INTX) {
258 		BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
259 		return 0;
260 	}
261 
262 	/* verify ari is enabled */
263 	if (!bnx2x_ari_enabled(bp->pdev)) {
264 		BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
265 		return 0;
266 	}
267 
268 	/* verify igu is in normal mode */
269 	if (CHIP_INT_MODE_IS_BC(bp)) {
270 		BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
271 		return 0;
272 	}
273 
274 	/* allocate the vfs database */
275 	bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
276 	if (!bp->vfdb) {
277 		BNX2X_ERR("failed to allocate vf database\n");
278 		err = -ENOMEM;
279 		goto failed;
280 	}
281 
282 	/* get the sriov info - Linux already collected all the pertinent
283 	 * information, however the sriov structure is for the private use
284 	 * of the pci module. Also we want this information regardless
285 	 * of the hyper-visor.
286 	 */
287 	iov = &(bp->vfdb->sriov);
288 	err = bnx2x_sriov_info(bp, iov);
289 	if (err)
290 		goto failed;
291 
292 	/* SR-IOV capability was enabled but there are no VFs*/
293 	if (iov->total == 0)
294 		goto failed;
295 
296 	/* calcuate the actual number of VFs */
297 	iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
298 
299 	/* allcate the vf array */
300 	bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
301 				BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
302 	if (!bp->vfdb->vfs) {
303 		BNX2X_ERR("failed to allocate vf array\n");
304 		err = -ENOMEM;
305 		goto failed;
306 	}
307 
308 	/* Initial VF init - index and abs_vfid - nr_virtfn must be set */
309 	for_each_vf(bp, i) {
310 		bnx2x_vf(bp, i, index) = i;
311 		bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
312 		bnx2x_vf(bp, i, state) = VF_FREE;
313 		INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
314 		mutex_init(&bnx2x_vf(bp, i, op_mutex));
315 		bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
316 	}
317 
318 	/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
319 	bnx2x_get_vf_igu_cam_info(bp);
320 
321 	/* get the total queue count and allocate the global queue arrays */
322 	qcount = bnx2x_iov_get_max_queue_count(bp);
323 
324 	/* allocate the queue arrays for all VFs */
325 	bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
326 				 GFP_KERNEL);
327 	if (!bp->vfdb->vfqs) {
328 		BNX2X_ERR("failed to allocate vf queue array\n");
329 		err = -ENOMEM;
330 		goto failed;
331 	}
332 
333 	return 0;
334 failed:
335 	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
336 	__bnx2x_iov_free_vfdb(bp);
337 	return err;
338 }
339 /* VF enable primitives
340  * when pretend is required the caller is responsible
341  * for calling pretend prior to calling these routines
342  */
343 
344 /* called only on E1H or E2.
345  * When pretending to be PF, the pretend value is the function number 0...7
346  * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
347  * combination
348  */
349 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
350 {
351 	u32 pretend_reg;
352 
353 	if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
354 		return -1;
355 
356 	/* get my own pretend register */
357 	pretend_reg = bnx2x_get_pretend_reg(bp);
358 	REG_WR(bp, pretend_reg, pretend_func_val);
359 	REG_RD(bp, pretend_reg);
360 	return 0;
361 }
362 
363 /* internal vf enable - until vf is enabled internally all transactions
364  * are blocked. this routine should always be called last with pretend.
365  */
366 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
367 {
368 	REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
369 }
370 
371 /* clears vf error in all semi blocks */
372 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
373 {
374 	REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
375 	REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
376 	REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
377 	REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
378 }
379 
380 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
381 {
382 	u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
383 	u32 was_err_reg = 0;
384 
385 	switch (was_err_group) {
386 	case 0:
387 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
388 	    break;
389 	case 1:
390 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
391 	    break;
392 	case 2:
393 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
394 	    break;
395 	case 3:
396 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
397 	    break;
398 	}
399 	REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
400 }
401 
402 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
403 {
404 	int i;
405 	u32 val;
406 
407 	/* Set VF masks and configuration - pretend */
408 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
409 
410 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
411 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
412 	REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
413 	REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
414 	REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
415 	REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
416 
417 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
418 	val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
419 	if (vf->cfg_flags & VF_CFG_INT_SIMD)
420 		val |= IGU_VF_CONF_SINGLE_ISR_EN;
421 	val &= ~IGU_VF_CONF_PARENT_MASK;
422 	val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT;	/* parent PF */
423 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
424 
425 	DP(BNX2X_MSG_IOV,
426 	   "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
427 	   vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
428 
429 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
430 
431 	/* iterate over all queues, clear sb consumer */
432 	for (i = 0; i < vf_sb_count(vf); i++) {
433 		u8 igu_sb_id = vf_igu_sb(vf, i);
434 
435 		/* zero prod memory */
436 		REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
437 
438 		/* clear sb state machine */
439 		bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
440 				       false /* VF */);
441 
442 		/* disable + update */
443 		bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
444 				    IGU_INT_DISABLE, 1);
445 	}
446 }
447 
448 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
449 {
450 	/* set the VF-PF association in the FW */
451 	storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
452 	storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
453 
454 	/* clear vf errors*/
455 	bnx2x_vf_semi_clear_err(bp, abs_vfid);
456 	bnx2x_vf_pglue_clear_err(bp, abs_vfid);
457 
458 	/* internal vf-enable - pretend */
459 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
460 	DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
461 	bnx2x_vf_enable_internal(bp, true);
462 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
463 }
464 
465 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
466 {
467 	/* Reset vf in IGU  interrupts are still disabled */
468 	bnx2x_vf_igu_reset(bp, vf);
469 
470 	/* pretend to enable the vf with the PBF */
471 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
472 	REG_WR(bp, PBF_REG_DISABLE_VF, 0);
473 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
474 }
475 
476 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
477 {
478 	struct pci_dev *dev;
479 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
480 
481 	if (!vf)
482 		goto unknown_dev;
483 
484 	dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
485 	if (dev)
486 		return bnx2x_is_pcie_pending(dev);
487 
488 unknown_dev:
489 	BNX2X_ERR("Unknown device\n");
490 	return false;
491 }
492 
493 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
494 {
495 	/* Wait 100ms */
496 	msleep(100);
497 
498 	/* Verify no pending pci transactions */
499 	if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
500 		BNX2X_ERR("PCIE Transactions still pending\n");
501 
502 	return 0;
503 }
504 
505 /* must be called after the number of PF queues and the number of VFs are
506  * both known
507  */
508 static void
509 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
510 {
511 	u16 vlan_count = 0;
512 
513 	/* will be set only during VF-ACQUIRE */
514 	resc->num_rxqs = 0;
515 	resc->num_txqs = 0;
516 
517 	/* no credit calculcis for macs (just yet) */
518 	resc->num_mac_filters = 1;
519 
520 	/* divvy up vlan rules */
521 	vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
522 	vlan_count = 1 << ilog2(vlan_count);
523 	resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
524 
525 	/* no real limitation */
526 	resc->num_mc_filters = 0;
527 
528 	/* num_sbs already set */
529 }
530 
531 /* IOV global initialization routines  */
532 void bnx2x_iov_init_dq(struct bnx2x *bp)
533 {
534 	if (!IS_SRIOV(bp))
535 		return;
536 
537 	/* Set the DQ such that the CID reflect the abs_vfid */
538 	REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
539 	REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
540 
541 	/* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
542 	 * the PF L2 queues
543 	 */
544 	REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
545 
546 	/* The VF window size is the log2 of the max number of CIDs per VF */
547 	REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
548 
549 	/* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
550 	 * the Pf doorbell size although the 2 are independent.
551 	 */
552 	REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
553 	       BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
554 
555 	/* No security checks for now -
556 	 * configure single rule (out of 16) mask = 0x1, value = 0x0,
557 	 * CID range 0 - 0x1ffff
558 	 */
559 	REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
560 	REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
561 	REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
562 	REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
563 
564 	/* set the number of VF alllowed doorbells to the full DQ range */
565 	REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
566 
567 	/* set the VF doorbell threshold */
568 	REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
569 }
570 
571 void bnx2x_iov_init_dmae(struct bnx2x *bp)
572 {
573 	DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
574 	if (!IS_SRIOV(bp))
575 		return;
576 
577 	REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
578 }
579 
580 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
581 {
582 	struct pci_dev *dev = bp->pdev;
583 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
584 
585 	return dev->bus->number + ((dev->devfn + iov->offset +
586 				    iov->stride * vfid) >> 8);
587 }
588 
589 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
590 {
591 	struct pci_dev *dev = bp->pdev;
592 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
593 
594 	return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
595 }
596 
597 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
598 {
599 	int i, n;
600 	struct pci_dev *dev = bp->pdev;
601 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
602 
603 	for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
604 		u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
605 		u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
606 
607 		do_div(size, iov->total);
608 		vf->bars[n].bar = start + size * vf->abs_vfid;
609 		vf->bars[n].size = size;
610 	}
611 }
612 
613 void bnx2x_iov_remove_one(struct bnx2x *bp)
614 {
615 	/* if SRIOV is not enabled there's nothing to do */
616 	if (!IS_SRIOV(bp))
617 		return;
618 
619 	/* free vf database */
620 	__bnx2x_iov_free_vfdb(bp);
621 }
622 
623 void bnx2x_iov_free_mem(struct bnx2x *bp)
624 {
625 	int i;
626 
627 	if (!IS_SRIOV(bp))
628 		return;
629 
630 	/* free vfs hw contexts */
631 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
632 		struct hw_dma *cxt = &bp->vfdb->context[i];
633 		BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
634 	}
635 
636 	BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
637 		       BP_VFDB(bp)->sp_dma.mapping,
638 		       BP_VFDB(bp)->sp_dma.size);
639 
640 	BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
641 		       BP_VF_MBX_DMA(bp)->mapping,
642 		       BP_VF_MBX_DMA(bp)->size);
643 }
644 
645 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
646 {
647 	size_t tot_size;
648 	int i, rc = 0;
649 
650 	if (!IS_SRIOV(bp))
651 		return rc;
652 
653 	/* allocate vfs hw contexts */
654 	tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
655 		BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
656 
657 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
658 		struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
659 		cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
660 
661 		if (cxt->size) {
662 			BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
663 		} else {
664 			cxt->addr = NULL;
665 			cxt->mapping = 0;
666 		}
667 		tot_size -= cxt->size;
668 	}
669 
670 	/* allocate vfs ramrods dma memory - client_init and set_mac */
671 	tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
672 	BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
673 			tot_size);
674 	BP_VFDB(bp)->sp_dma.size = tot_size;
675 
676 	/* allocate mailboxes */
677 	tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
678 	BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
679 			tot_size);
680 	BP_VF_MBX_DMA(bp)->size = tot_size;
681 
682 	return 0;
683 
684 alloc_mem_err:
685 	return -ENOMEM;
686 }
687 
688 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
689 			   struct bnx2x_vf_queue *q)
690 {
691 	u8 cl_id = vfq_cl_id(vf, q);
692 	u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
693 	unsigned long q_type = 0;
694 
695 	set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
696 	set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
697 
698 	/* Queue State object */
699 	bnx2x_init_queue_obj(bp, &q->sp_obj,
700 			     cl_id, &q->cid, 1, func_id,
701 			     bnx2x_vf_sp(bp, vf, q_data),
702 			     bnx2x_vf_sp_map(bp, vf, q_data),
703 			     q_type);
704 
705 	DP(BNX2X_MSG_IOV,
706 	   "initialized vf %d's queue object. func id set to %d\n",
707 	   vf->abs_vfid, q->sp_obj.func_id);
708 
709 	/* mac/vlan objects are per queue, but only those
710 	 * that belong to the leading queue are initialized
711 	 */
712 	if (vfq_is_leading(q)) {
713 		/* mac */
714 		bnx2x_init_mac_obj(bp, &q->mac_obj,
715 				   cl_id, q->cid, func_id,
716 				   bnx2x_vf_sp(bp, vf, mac_rdata),
717 				   bnx2x_vf_sp_map(bp, vf, mac_rdata),
718 				   BNX2X_FILTER_MAC_PENDING,
719 				   &vf->filter_state,
720 				   BNX2X_OBJ_TYPE_RX_TX,
721 				   &bp->macs_pool);
722 		/* vlan */
723 		bnx2x_init_vlan_obj(bp, &q->vlan_obj,
724 				    cl_id, q->cid, func_id,
725 				    bnx2x_vf_sp(bp, vf, vlan_rdata),
726 				    bnx2x_vf_sp_map(bp, vf, vlan_rdata),
727 				    BNX2X_FILTER_VLAN_PENDING,
728 				    &vf->filter_state,
729 				    BNX2X_OBJ_TYPE_RX_TX,
730 				    &bp->vlans_pool);
731 
732 		/* mcast */
733 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
734 				     q->cid, func_id, func_id,
735 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
736 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
737 				     BNX2X_FILTER_MCAST_PENDING,
738 				     &vf->filter_state,
739 				     BNX2X_OBJ_TYPE_RX_TX);
740 
741 		vf->leading_rss = cl_id;
742 	}
743 }
744 
745 /* called by bnx2x_nic_load */
746 int bnx2x_iov_nic_init(struct bnx2x *bp)
747 {
748 	int vfid, qcount, i;
749 
750 	if (!IS_SRIOV(bp)) {
751 		DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
752 		return 0;
753 	}
754 
755 	DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
756 
757 	/* initialize vf database */
758 	for_each_vf(bp, vfid) {
759 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
760 
761 		int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
762 			BNX2X_CIDS_PER_VF;
763 
764 		union cdu_context *base_cxt = (union cdu_context *)
765 			BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
766 			(base_vf_cid & (ILT_PAGE_CIDS-1));
767 
768 		DP(BNX2X_MSG_IOV,
769 		   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
770 		   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
771 		   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
772 
773 		/* init statically provisioned resources */
774 		bnx2x_iov_static_resc(bp, &vf->alloc_resc);
775 
776 		/* queues are initialized during VF-ACQUIRE */
777 
778 		/* reserve the vf vlan credit */
779 		bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
780 
781 		vf->filter_state = 0;
782 		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
783 
784 		/*  init mcast object - This object will be re-initialized
785 		 *  during VF-ACQUIRE with the proper cl_id and cid.
786 		 *  It needs to be initialized here so that it can be safely
787 		 *  handled by a subsequent FLR flow.
788 		 */
789 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
790 				     0xFF, 0xFF, 0xFF,
791 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
792 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
793 				     BNX2X_FILTER_MCAST_PENDING,
794 				     &vf->filter_state,
795 				     BNX2X_OBJ_TYPE_RX_TX);
796 
797 		/* set the mailbox message addresses */
798 		BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
799 			(((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
800 			MBX_MSG_ALIGNED_SIZE);
801 
802 		BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
803 			vfid * MBX_MSG_ALIGNED_SIZE;
804 
805 		/* Enable vf mailbox */
806 		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
807 	}
808 
809 	/* Final VF init */
810 	qcount = 0;
811 	for_each_vf(bp, i) {
812 		struct bnx2x_virtf *vf = BP_VF(bp, i);
813 
814 		/* fill in the BDF and bars */
815 		vf->bus = bnx2x_vf_bus(bp, i);
816 		vf->devfn = bnx2x_vf_devfn(bp, i);
817 		bnx2x_vf_set_bars(bp, vf);
818 
819 		DP(BNX2X_MSG_IOV,
820 		   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
821 		   vf->abs_vfid, vf->bus, vf->devfn,
822 		   (unsigned)vf->bars[0].bar, vf->bars[0].size,
823 		   (unsigned)vf->bars[1].bar, vf->bars[1].size,
824 		   (unsigned)vf->bars[2].bar, vf->bars[2].size);
825 
826 		/* set local queue arrays */
827 		vf->vfqs = &bp->vfdb->vfqs[qcount];
828 		qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
829 	}
830 
831 	return 0;
832 }
833 
834 /* called by bnx2x_init_hw_func, returns the next ilt line */
835 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
836 {
837 	int i;
838 	struct bnx2x_ilt *ilt = BP_ILT(bp);
839 
840 	if (!IS_SRIOV(bp))
841 		return line;
842 
843 	/* set vfs ilt lines */
844 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
845 		struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
846 
847 		ilt->lines[line+i].page = hw_cxt->addr;
848 		ilt->lines[line+i].page_mapping = hw_cxt->mapping;
849 		ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
850 	}
851 	return line + i;
852 }
853 
854 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
855 {
856 	return ((cid >= BNX2X_FIRST_VF_CID) &&
857 		((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
858 }
859 
860 static
861 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
862 					struct bnx2x_vf_queue *vfq,
863 					union event_ring_elem *elem)
864 {
865 	unsigned long ramrod_flags = 0;
866 	int rc = 0;
867 
868 	/* Always push next commands out, don't wait here */
869 	set_bit(RAMROD_CONT, &ramrod_flags);
870 
871 	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
872 	case BNX2X_FILTER_MAC_PENDING:
873 		rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
874 					   &ramrod_flags);
875 		break;
876 	case BNX2X_FILTER_VLAN_PENDING:
877 		rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
878 					    &ramrod_flags);
879 		break;
880 	default:
881 		BNX2X_ERR("Unsupported classification command: %d\n",
882 			  elem->message.data.eth_event.echo);
883 		return;
884 	}
885 	if (rc < 0)
886 		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
887 	else if (rc > 0)
888 		DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
889 }
890 
891 static
892 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
893 			       struct bnx2x_virtf *vf)
894 {
895 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
896 	int rc;
897 
898 	rparam.mcast_obj = &vf->mcast_obj;
899 	vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
900 
901 	/* If there are pending mcast commands - send them */
902 	if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
903 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
904 		if (rc < 0)
905 			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
906 				  rc);
907 	}
908 }
909 
910 static
911 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
912 				 struct bnx2x_virtf *vf)
913 {
914 	smp_mb__before_clear_bit();
915 	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
916 	smp_mb__after_clear_bit();
917 }
918 
919 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
920 {
921 	struct bnx2x_virtf *vf;
922 	int qidx = 0, abs_vfid;
923 	u8 opcode;
924 	u16 cid = 0xffff;
925 
926 	if (!IS_SRIOV(bp))
927 		return 1;
928 
929 	/* first get the cid - the only events we handle here are cfc-delete
930 	 * and set-mac completion
931 	 */
932 	opcode = elem->message.opcode;
933 
934 	switch (opcode) {
935 	case EVENT_RING_OPCODE_CFC_DEL:
936 		cid = SW_CID((__force __le32)
937 			     elem->message.data.cfc_del_event.cid);
938 		DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
939 		break;
940 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
941 	case EVENT_RING_OPCODE_MULTICAST_RULES:
942 	case EVENT_RING_OPCODE_FILTERS_RULES:
943 		cid = (elem->message.data.eth_event.echo &
944 		       BNX2X_SWCID_MASK);
945 		DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
946 		break;
947 	case EVENT_RING_OPCODE_VF_FLR:
948 		abs_vfid = elem->message.data.vf_flr_event.vf_id;
949 		DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
950 		   abs_vfid);
951 		goto get_vf;
952 	case EVENT_RING_OPCODE_MALICIOUS_VF:
953 		abs_vfid = elem->message.data.malicious_vf_event.vf_id;
954 		DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
955 		   abs_vfid);
956 		goto get_vf;
957 	default:
958 		return 1;
959 	}
960 
961 	/* check if the cid is the VF range */
962 	if (!bnx2x_iov_is_vf_cid(bp, cid)) {
963 		DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
964 		return 1;
965 	}
966 
967 	/* extract vf and rxq index from vf_cid - relies on the following:
968 	 * 1. vfid on cid reflects the true abs_vfid
969 	 * 2. the max number of VFs (per path) is 64
970 	 */
971 	qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
972 	abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
973 get_vf:
974 	vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
975 
976 	if (!vf) {
977 		BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
978 			  cid, abs_vfid);
979 		return 0;
980 	}
981 
982 	switch (opcode) {
983 	case EVENT_RING_OPCODE_CFC_DEL:
984 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
985 		   vf->abs_vfid, qidx);
986 		vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
987 						       &vfq_get(vf,
988 								qidx)->sp_obj,
989 						       BNX2X_Q_CMD_CFC_DEL);
990 		break;
991 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
992 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
993 		   vf->abs_vfid, qidx);
994 		bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
995 		break;
996 	case EVENT_RING_OPCODE_MULTICAST_RULES:
997 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
998 		   vf->abs_vfid, qidx);
999 		bnx2x_vf_handle_mcast_eqe(bp, vf);
1000 		break;
1001 	case EVENT_RING_OPCODE_FILTERS_RULES:
1002 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
1003 		   vf->abs_vfid, qidx);
1004 		bnx2x_vf_handle_filters_eqe(bp, vf);
1005 		break;
1006 	case EVENT_RING_OPCODE_VF_FLR:
1007 		DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
1008 		   vf->abs_vfid);
1009 		/* Do nothing for now */
1010 		break;
1011 	case EVENT_RING_OPCODE_MALICIOUS_VF:
1012 		DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
1013 		   vf->abs_vfid);
1014 		/* Do nothing for now */
1015 		break;
1016 	}
1017 	/* SRIOV: reschedule any 'in_progress' operations */
1018 	bnx2x_iov_sp_event(bp, cid, false);
1019 
1020 	return 0;
1021 }
1022 
1023 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
1024 {
1025 	/* extract the vf from vf_cid - relies on the following:
1026 	 * 1. vfid on cid reflects the true abs_vfid
1027 	 * 2. the max number of VFs (per path) is 64
1028 	 */
1029 	int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1030 	return bnx2x_vf_by_abs_fid(bp, abs_vfid);
1031 }
1032 
1033 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
1034 				struct bnx2x_queue_sp_obj **q_obj)
1035 {
1036 	struct bnx2x_virtf *vf;
1037 
1038 	if (!IS_SRIOV(bp))
1039 		return;
1040 
1041 	vf = bnx2x_vf_by_cid(bp, vf_cid);
1042 
1043 	if (vf) {
1044 		/* extract queue index from vf_cid - relies on the following:
1045 		 * 1. vfid on cid reflects the true abs_vfid
1046 		 * 2. the max number of VFs (per path) is 64
1047 		 */
1048 		int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
1049 		*q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
1050 	} else {
1051 		BNX2X_ERR("No vf matching cid %d\n", vf_cid);
1052 	}
1053 }
1054 
1055 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
1056 {
1057 	struct bnx2x_virtf *vf;
1058 
1059 	/* check if the cid is the VF range */
1060 	if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
1061 		return;
1062 
1063 	vf = bnx2x_vf_by_cid(bp, vf_cid);
1064 	if (vf) {
1065 		/* set in_progress flag */
1066 		atomic_set(&vf->op_in_progress, 1);
1067 		if (queue_work)
1068 			queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1069 	}
1070 }
1071 
1072 void bnx2x_iov_sp_task(struct bnx2x *bp)
1073 {
1074 	int i;
1075 
1076 	if (!IS_SRIOV(bp))
1077 		return;
1078 	/* Iterate over all VFs and invoke state transition for VFs with
1079 	 * 'in-progress' slow-path operations
1080 	 */
1081 	DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
1082 	for_each_vf(bp, i) {
1083 		struct bnx2x_virtf *vf = BP_VF(bp, i);
1084 
1085 		if (!list_empty(&vf->op_list_head) &&
1086 		    atomic_read(&vf->op_in_progress)) {
1087 			DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
1088 			bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
1089 		}
1090 	}
1091 }
1092 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
1093 				u8 enable)
1094 {
1095 	u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
1096 	u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
1097 
1098 	REG_WR(bp, reg, val);
1099 }
1100 
1101 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
1102 {
1103 	return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
1104 		     BNX2X_VF_MAX_QUEUES);
1105 }
1106 
1107 static
1108 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1109 			    struct vf_pf_resc_request *req_resc)
1110 {
1111 	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1112 	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1113 
1114 	return ((req_resc->num_rxqs <= rxq_cnt) &&
1115 		(req_resc->num_txqs <= txq_cnt) &&
1116 		(req_resc->num_sbs <= vf_sb_count(vf))   &&
1117 		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
1118 		(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
1119 }
1120 
1121 /* CORE VF API */
1122 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1123 		     struct vf_pf_resc_request *resc)
1124 {
1125 	int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
1126 		BNX2X_CIDS_PER_VF;
1127 
1128 	union cdu_context *base_cxt = (union cdu_context *)
1129 		BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1130 		(base_vf_cid & (ILT_PAGE_CIDS-1));
1131 	int i;
1132 
1133 	/* if state is 'acquired' the VF was not released or FLR'd, in
1134 	 * this case the returned resources match the acquired already
1135 	 * acquired resources. Verify that the requested numbers do
1136 	 * not exceed the already acquired numbers.
1137 	 */
1138 	if (vf->state == VF_ACQUIRED) {
1139 		DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
1140 		   vf->abs_vfid);
1141 
1142 		if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
1143 			BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
1144 				  vf->abs_vfid);
1145 			return -EINVAL;
1146 		}
1147 		return 0;
1148 	}
1149 
1150 	/* Otherwise vf state must be 'free' or 'reset' */
1151 	if (vf->state != VF_FREE && vf->state != VF_RESET) {
1152 		BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
1153 			  vf->abs_vfid, vf->state);
1154 		return -EINVAL;
1155 	}
1156 
1157 	/* static allocation:
1158 	 * the global maximum number are fixed per VF. fail the request if
1159 	 * requested number exceed these globals
1160 	 */
1161 	if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
1162 		DP(BNX2X_MSG_IOV,
1163 		   "cannot fulfill vf resource request. Placing maximal available values in response\n");
1164 		/* set the max resource in the vf */
1165 		return -ENOMEM;
1166 	}
1167 
1168 	/* Set resources counters - 0 request means max available */
1169 	vf_sb_count(vf) = resc->num_sbs;
1170 	vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
1171 	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
1172 	if (resc->num_mac_filters)
1173 		vf_mac_rules_cnt(vf) = resc->num_mac_filters;
1174 	if (resc->num_vlan_filters)
1175 		vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
1176 
1177 	DP(BNX2X_MSG_IOV,
1178 	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
1179 	   vf_sb_count(vf), vf_rxq_count(vf),
1180 	   vf_txq_count(vf), vf_mac_rules_cnt(vf),
1181 	   vf_vlan_rules_cnt(vf));
1182 
1183 	/* Initialize the queues */
1184 	if (!vf->vfqs) {
1185 		DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
1186 		return -EINVAL;
1187 	}
1188 
1189 	for_each_vfq(vf, i) {
1190 		struct bnx2x_vf_queue *q = vfq_get(vf, i);
1191 
1192 		if (!q) {
1193 			DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
1194 			return -EINVAL;
1195 		}
1196 
1197 		q->index = i;
1198 		q->cxt = &((base_cxt + i)->eth);
1199 		q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
1200 
1201 		DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
1202 		   vf->abs_vfid, i, q->index, q->cid, q->cxt);
1203 
1204 		/* init SP objects */
1205 		bnx2x_vfq_init(bp, vf, q);
1206 	}
1207 	vf->state = VF_ACQUIRED;
1208 	return 0;
1209 }
1210 
1211 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
1212 {
1213 	struct bnx2x_func_init_params func_init = {0};
1214 	u16 flags = 0;
1215 	int i;
1216 
1217 	/* the sb resources are initialized at this point, do the
1218 	 * FW/HW initializations
1219 	 */
1220 	for_each_vf_sb(vf, i)
1221 		bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
1222 			      vf_igu_sb(vf, i), vf_igu_sb(vf, i));
1223 
1224 	/* Sanity checks */
1225 	if (vf->state != VF_ACQUIRED) {
1226 		DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
1227 		   vf->abs_vfid, vf->state);
1228 		return -EINVAL;
1229 	}
1230 	/* FLR cleanup epilogue */
1231 	if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
1232 		return -EBUSY;
1233 
1234 	/* reset IGU VF statistics: MSIX */
1235 	REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
1236 
1237 	/* vf init */
1238 	if (vf->cfg_flags & VF_CFG_STATS)
1239 		flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
1240 
1241 	if (vf->cfg_flags & VF_CFG_TPA)
1242 		flags |= FUNC_FLG_TPA;
1243 
1244 	if (is_vf_multi(vf))
1245 		flags |= FUNC_FLG_RSS;
1246 
1247 	/* function setup */
1248 	func_init.func_flgs = flags;
1249 	func_init.pf_id = BP_FUNC(bp);
1250 	func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
1251 	func_init.fw_stat_map = vf->fw_stat_map;
1252 	func_init.spq_map = vf->spq_map;
1253 	func_init.spq_prod = 0;
1254 	bnx2x_func_init(bp, &func_init);
1255 
1256 	/* Enable the vf */
1257 	bnx2x_vf_enable_access(bp, vf->abs_vfid);
1258 	bnx2x_vf_enable_traffic(bp, vf);
1259 
1260 	/* queue protection table */
1261 	for_each_vfq(vf, i)
1262 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
1263 				    vfq_qzone_id(vf, vfq_get(vf, i)), true);
1264 
1265 	vf->state = VF_ENABLED;
1266 
1267 	return 0;
1268 }
1269 
1270 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
1271 			      enum channel_tlvs tlv)
1272 {
1273 	/* lock the channel */
1274 	mutex_lock(&vf->op_mutex);
1275 
1276 	/* record the locking op */
1277 	vf->op_current = tlv;
1278 
1279 	/* log the lock */
1280 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
1281 	   vf->abs_vfid, tlv);
1282 }
1283 
1284 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
1285 				enum channel_tlvs expected_tlv)
1286 {
1287 	WARN(expected_tlv != vf->op_current,
1288 	     "lock mismatch: expected %d found %d", expected_tlv,
1289 	     vf->op_current);
1290 
1291 	/* lock the channel */
1292 	mutex_unlock(&vf->op_mutex);
1293 
1294 	/* log the unlock */
1295 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
1296 	   vf->abs_vfid, vf->op_current);
1297 
1298 	/* record the locking op */
1299 	vf->op_current = CHANNEL_TLV_NONE;
1300 }
1301