1 /* bnx2x_sriov.c: Broadcom Everest network driver.
2  *
3  * Copyright 2009-2012 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17  *	       Ariel Elior <ariele@broadcom.com>
18  *
19  */
20 #include "bnx2x.h"
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
23 #include "bnx2x_sriov.h"
24 
25 /* General service functions */
26 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
27 					 u16 pf_id)
28 {
29 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
30 		pf_id);
31 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
32 		pf_id);
33 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
34 		pf_id);
35 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
36 		pf_id);
37 }
38 
39 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
40 					u8 enable)
41 {
42 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
43 		enable);
44 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
45 		enable);
46 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
47 		enable);
48 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
49 		enable);
50 }
51 
52 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
53 {
54 	int idx;
55 
56 	for_each_vf(bp, idx)
57 		if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
58 			break;
59 	return idx;
60 }
61 
62 static
63 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
64 {
65 	u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
66 	return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
67 }
68 
69 static int bnx2x_ari_enabled(struct pci_dev *dev)
70 {
71 	return dev->bus->self && dev->bus->self->ari_enabled;
72 }
73 
74 static void
75 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
76 {
77 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
78 	if (vf) {
79 		if (!vf_sb_count(vf))
80 			vf->igu_base_id = igu_sb_id;
81 		++vf_sb_count(vf);
82 	}
83 }
84 
85 static void
86 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
87 {
88 	int sb_id;
89 	u32 val;
90 	u8 fid;
91 
92 	/* IGU in normal mode - read CAM */
93 	for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
94 		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
95 		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
96 			continue;
97 		fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
98 		if (!(fid & IGU_FID_ENCODE_IS_PF))
99 			bnx2x_vf_set_igu_info(bp, sb_id,
100 					      (fid & IGU_FID_VF_NUM_MASK));
101 
102 		DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
103 		   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
104 		   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
105 		   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
106 		   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
107 	}
108 }
109 
110 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
111 {
112 	if (bp->vfdb) {
113 		kfree(bp->vfdb->vfqs);
114 		kfree(bp->vfdb->vfs);
115 		kfree(bp->vfdb);
116 	}
117 	bp->vfdb = NULL;
118 }
119 
120 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
121 {
122 	int pos;
123 	struct pci_dev *dev = bp->pdev;
124 
125 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
126 	if (!pos) {
127 		BNX2X_ERR("failed to find SRIOV capability in device\n");
128 		return -ENODEV;
129 	}
130 
131 	iov->pos = pos;
132 	DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
133 	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
134 	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
135 	pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
136 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
137 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
138 	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
139 	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
140 	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
141 
142 	return 0;
143 }
144 
145 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
146 {
147 	u32 val;
148 
149 	/* read the SRIOV capability structure
150 	 * The fields can be read via configuration read or
151 	 * directly from the device (starting at offset PCICFG_OFFSET)
152 	 */
153 	if (bnx2x_sriov_pci_cfg_info(bp, iov))
154 		return -ENODEV;
155 
156 	/* get the number of SRIOV bars */
157 	iov->nres = 0;
158 
159 	/* read the first_vfid */
160 	val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
161 	iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
162 			       * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
163 
164 	DP(BNX2X_MSG_IOV,
165 	   "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
166 	   BP_FUNC(bp),
167 	   iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
168 	   iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
169 
170 	return 0;
171 }
172 
173 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
174 {
175 	int i;
176 	u8 queue_count = 0;
177 
178 	if (IS_SRIOV(bp))
179 		for_each_vf(bp, i)
180 			queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
181 
182 	return queue_count;
183 }
184 
185 /* must be called after PF bars are mapped */
186 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
187 				 int num_vfs_param)
188 {
189 	int err, i, qcount;
190 	struct bnx2x_sriov *iov;
191 	struct pci_dev *dev = bp->pdev;
192 
193 	bp->vfdb = NULL;
194 
195 	/* verify sriov capability is present in configuration space */
196 	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) {
197 		DP(BNX2X_MSG_IOV, "no sriov - capability not found\n");
198 		return 0;
199 	}
200 
201 	/* verify is pf */
202 	if (IS_VF(bp))
203 		return 0;
204 
205 	/* verify chip revision */
206 	if (CHIP_IS_E1x(bp))
207 		return 0;
208 
209 	/* check if SRIOV support is turned off */
210 	if (!num_vfs_param)
211 		return 0;
212 
213 	/* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
214 	if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
215 		BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
216 			  BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
217 		return 0;
218 	}
219 
220 	/* SRIOV can be enabled only with MSIX */
221 	if (int_mode_param == BNX2X_INT_MODE_MSI ||
222 	    int_mode_param == BNX2X_INT_MODE_INTX) {
223 		BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
224 		return 0;
225 	}
226 
227 	/* verify ari is enabled */
228 	if (!bnx2x_ari_enabled(bp->pdev)) {
229 		BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
230 		return 0;
231 	}
232 
233 	/* verify igu is in normal mode */
234 	if (CHIP_INT_MODE_IS_BC(bp)) {
235 		BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
236 		return 0;
237 	}
238 
239 	/* allocate the vfs database */
240 	bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
241 	if (!bp->vfdb) {
242 		BNX2X_ERR("failed to allocate vf database\n");
243 		err = -ENOMEM;
244 		goto failed;
245 	}
246 
247 	/* get the sriov info - Linux already collected all the pertinent
248 	 * information, however the sriov structure is for the private use
249 	 * of the pci module. Also we want this information regardless
250 	 * of the hyper-visor.
251 	 */
252 	iov = &(bp->vfdb->sriov);
253 	err = bnx2x_sriov_info(bp, iov);
254 	if (err)
255 		goto failed;
256 
257 	/* SR-IOV capability was enabled but there are no VFs*/
258 	if (iov->total == 0)
259 		goto failed;
260 
261 	/* calcuate the actual number of VFs */
262 	iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
263 
264 	/* allcate the vf array */
265 	bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
266 				BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
267 	if (!bp->vfdb->vfs) {
268 		BNX2X_ERR("failed to allocate vf array\n");
269 		err = -ENOMEM;
270 		goto failed;
271 	}
272 
273 	/* Initial VF init - index and abs_vfid - nr_virtfn must be set */
274 	for_each_vf(bp, i) {
275 		bnx2x_vf(bp, i, index) = i;
276 		bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
277 		bnx2x_vf(bp, i, state) = VF_FREE;
278 		INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
279 		mutex_init(&bnx2x_vf(bp, i, op_mutex));
280 		bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
281 	}
282 
283 	/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
284 	bnx2x_get_vf_igu_cam_info(bp);
285 
286 	/* get the total queue count and allocate the global queue arrays */
287 	qcount = bnx2x_iov_get_max_queue_count(bp);
288 
289 	/* allocate the queue arrays for all VFs */
290 	bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
291 				 GFP_KERNEL);
292 	if (!bp->vfdb->vfqs) {
293 		BNX2X_ERR("failed to allocate vf queue array\n");
294 		err = -ENOMEM;
295 		goto failed;
296 	}
297 
298 	return 0;
299 failed:
300 	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
301 	__bnx2x_iov_free_vfdb(bp);
302 	return err;
303 }
304 /* VF enable primitives
305  * when pretend is required the caller is responsible
306  * for calling pretend prior to calling these routines
307  */
308 
309 /* called only on E1H or E2.
310  * When pretending to be PF, the pretend value is the function number 0...7
311  * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
312  * combination
313  */
314 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
315 {
316 	u32 pretend_reg;
317 
318 	if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
319 		return -1;
320 
321 	/* get my own pretend register */
322 	pretend_reg = bnx2x_get_pretend_reg(bp);
323 	REG_WR(bp, pretend_reg, pretend_func_val);
324 	REG_RD(bp, pretend_reg);
325 	return 0;
326 }
327 
328 /* internal vf enable - until vf is enabled internally all transactions
329  * are blocked. this routine should always be called last with pretend.
330  */
331 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
332 {
333 	REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
334 }
335 
336 /* clears vf error in all semi blocks */
337 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
338 {
339 	REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
340 	REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
341 	REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
342 	REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
343 }
344 
345 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
346 {
347 	u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
348 	u32 was_err_reg = 0;
349 
350 	switch (was_err_group) {
351 	case 0:
352 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
353 	    break;
354 	case 1:
355 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
356 	    break;
357 	case 2:
358 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
359 	    break;
360 	case 3:
361 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
362 	    break;
363 	}
364 	REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
365 }
366 
367 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
368 {
369 	/* set the VF-PF association in the FW */
370 	storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
371 	storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
372 
373 	/* clear vf errors*/
374 	bnx2x_vf_semi_clear_err(bp, abs_vfid);
375 	bnx2x_vf_pglue_clear_err(bp, abs_vfid);
376 
377 	/* internal vf-enable - pretend */
378 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
379 	DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
380 	bnx2x_vf_enable_internal(bp, true);
381 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
382 }
383 
384 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
385 {
386 	struct pci_dev *dev;
387 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
388 
389 	if (!vf)
390 		goto unknown_dev;
391 
392 	dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
393 	if (dev)
394 		return bnx2x_is_pcie_pending(dev);
395 
396 unknown_dev:
397 	BNX2X_ERR("Unknown device\n");
398 	return false;
399 }
400 
401 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
402 {
403 	/* Wait 100ms */
404 	msleep(100);
405 
406 	/* Verify no pending pci transactions */
407 	if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
408 		BNX2X_ERR("PCIE Transactions still pending\n");
409 
410 	return 0;
411 }
412 
413 /* must be called after the number of PF queues and the number of VFs are
414  * both known
415  */
416 static void
417 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
418 {
419 	u16 vlan_count = 0;
420 
421 	/* will be set only during VF-ACQUIRE */
422 	resc->num_rxqs = 0;
423 	resc->num_txqs = 0;
424 
425 	/* no credit calculcis for macs (just yet) */
426 	resc->num_mac_filters = 1;
427 
428 	/* divvy up vlan rules */
429 	vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
430 	vlan_count = 1 << ilog2(vlan_count);
431 	resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
432 
433 	/* no real limitation */
434 	resc->num_mc_filters = 0;
435 
436 	/* num_sbs already set */
437 }
438 
439 /* IOV global initialization routines  */
440 void bnx2x_iov_init_dq(struct bnx2x *bp)
441 {
442 	if (!IS_SRIOV(bp))
443 		return;
444 
445 	/* Set the DQ such that the CID reflect the abs_vfid */
446 	REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
447 	REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
448 
449 	/* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
450 	 * the PF L2 queues
451 	 */
452 	REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
453 
454 	/* The VF window size is the log2 of the max number of CIDs per VF */
455 	REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
456 
457 	/* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
458 	 * the Pf doorbell size although the 2 are independent.
459 	 */
460 	REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
461 	       BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
462 
463 	/* No security checks for now -
464 	 * configure single rule (out of 16) mask = 0x1, value = 0x0,
465 	 * CID range 0 - 0x1ffff
466 	 */
467 	REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
468 	REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
469 	REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
470 	REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
471 
472 	/* set the number of VF alllowed doorbells to the full DQ range */
473 	REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
474 
475 	/* set the VF doorbell threshold */
476 	REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
477 }
478 
479 void bnx2x_iov_init_dmae(struct bnx2x *bp)
480 {
481 	DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
482 	if (!IS_SRIOV(bp))
483 		return;
484 
485 	REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
486 }
487 
488 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
489 {
490 	struct pci_dev *dev = bp->pdev;
491 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
492 
493 	return dev->bus->number + ((dev->devfn + iov->offset +
494 				    iov->stride * vfid) >> 8);
495 }
496 
497 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
498 {
499 	struct pci_dev *dev = bp->pdev;
500 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
501 
502 	return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
503 }
504 
505 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
506 {
507 	int i, n;
508 	struct pci_dev *dev = bp->pdev;
509 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
510 
511 	for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
512 		u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
513 		u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
514 
515 		do_div(size, iov->total);
516 		vf->bars[n].bar = start + size * vf->abs_vfid;
517 		vf->bars[n].size = size;
518 	}
519 }
520 
521 void bnx2x_iov_remove_one(struct bnx2x *bp)
522 {
523 	/* if SRIOV is not enabled there's nothing to do */
524 	if (!IS_SRIOV(bp))
525 		return;
526 
527 	/* free vf database */
528 	__bnx2x_iov_free_vfdb(bp);
529 }
530 
531 void bnx2x_iov_free_mem(struct bnx2x *bp)
532 {
533 	int i;
534 
535 	if (!IS_SRIOV(bp))
536 		return;
537 
538 	/* free vfs hw contexts */
539 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
540 		struct hw_dma *cxt = &bp->vfdb->context[i];
541 		BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
542 	}
543 
544 	BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
545 		       BP_VFDB(bp)->sp_dma.mapping,
546 		       BP_VFDB(bp)->sp_dma.size);
547 
548 	BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
549 		       BP_VF_MBX_DMA(bp)->mapping,
550 		       BP_VF_MBX_DMA(bp)->size);
551 }
552 
553 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
554 {
555 	size_t tot_size;
556 	int i, rc = 0;
557 
558 	if (!IS_SRIOV(bp))
559 		return rc;
560 
561 	/* allocate vfs hw contexts */
562 	tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
563 		BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
564 
565 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
566 		struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
567 		cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
568 
569 		if (cxt->size) {
570 			BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
571 		} else {
572 			cxt->addr = NULL;
573 			cxt->mapping = 0;
574 		}
575 		tot_size -= cxt->size;
576 	}
577 
578 	/* allocate vfs ramrods dma memory - client_init and set_mac */
579 	tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
580 	BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
581 			tot_size);
582 	BP_VFDB(bp)->sp_dma.size = tot_size;
583 
584 	/* allocate mailboxes */
585 	tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
586 	BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
587 			tot_size);
588 	BP_VF_MBX_DMA(bp)->size = tot_size;
589 
590 	return 0;
591 
592 alloc_mem_err:
593 	return -ENOMEM;
594 }
595 
596 /* called by bnx2x_nic_load */
597 int bnx2x_iov_nic_init(struct bnx2x *bp)
598 {
599 	int vfid, qcount, i;
600 
601 	if (!IS_SRIOV(bp)) {
602 		DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
603 		return 0;
604 	}
605 
606 	DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
607 
608 	/* initialize vf database */
609 	for_each_vf(bp, vfid) {
610 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
611 
612 		int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
613 			BNX2X_CIDS_PER_VF;
614 
615 		union cdu_context *base_cxt = (union cdu_context *)
616 			BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
617 			(base_vf_cid & (ILT_PAGE_CIDS-1));
618 
619 		DP(BNX2X_MSG_IOV,
620 		   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
621 		   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
622 		   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
623 
624 		/* init statically provisioned resources */
625 		bnx2x_iov_static_resc(bp, &vf->alloc_resc);
626 
627 		/* queues are initialized during VF-ACQUIRE */
628 
629 		/* reserve the vf vlan credit */
630 		bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
631 
632 		vf->filter_state = 0;
633 		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
634 
635 		/*  init mcast object - This object will be re-initialized
636 		 *  during VF-ACQUIRE with the proper cl_id and cid.
637 		 *  It needs to be initialized here so that it can be safely
638 		 *  handled by a subsequent FLR flow.
639 		 */
640 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
641 				     0xFF, 0xFF, 0xFF,
642 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
643 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
644 				     BNX2X_FILTER_MCAST_PENDING,
645 				     &vf->filter_state,
646 				     BNX2X_OBJ_TYPE_RX_TX);
647 
648 		/* set the mailbox message addresses */
649 		BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
650 			(((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
651 			MBX_MSG_ALIGNED_SIZE);
652 
653 		BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
654 			vfid * MBX_MSG_ALIGNED_SIZE;
655 
656 		/* Enable vf mailbox */
657 		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
658 	}
659 
660 	/* Final VF init */
661 	qcount = 0;
662 	for_each_vf(bp, i) {
663 		struct bnx2x_virtf *vf = BP_VF(bp, i);
664 
665 		/* fill in the BDF and bars */
666 		vf->bus = bnx2x_vf_bus(bp, i);
667 		vf->devfn = bnx2x_vf_devfn(bp, i);
668 		bnx2x_vf_set_bars(bp, vf);
669 
670 		DP(BNX2X_MSG_IOV,
671 		   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
672 		   vf->abs_vfid, vf->bus, vf->devfn,
673 		   (unsigned)vf->bars[0].bar, vf->bars[0].size,
674 		   (unsigned)vf->bars[1].bar, vf->bars[1].size,
675 		   (unsigned)vf->bars[2].bar, vf->bars[2].size);
676 
677 		/* set local queue arrays */
678 		vf->vfqs = &bp->vfdb->vfqs[qcount];
679 		qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
680 	}
681 
682 	return 0;
683 }
684 
685 /* called by bnx2x_init_hw_func, returns the next ilt line */
686 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
687 {
688 	int i;
689 	struct bnx2x_ilt *ilt = BP_ILT(bp);
690 
691 	if (!IS_SRIOV(bp))
692 		return line;
693 
694 	/* set vfs ilt lines */
695 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
696 		struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
697 
698 		ilt->lines[line+i].page = hw_cxt->addr;
699 		ilt->lines[line+i].page_mapping = hw_cxt->mapping;
700 		ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
701 	}
702 	return line + i;
703 }
704 
705 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
706 {
707 	return ((cid >= BNX2X_FIRST_VF_CID) &&
708 		((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
709 }
710 
711 static
712 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
713 					struct bnx2x_vf_queue *vfq,
714 					union event_ring_elem *elem)
715 {
716 	unsigned long ramrod_flags = 0;
717 	int rc = 0;
718 
719 	/* Always push next commands out, don't wait here */
720 	set_bit(RAMROD_CONT, &ramrod_flags);
721 
722 	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
723 	case BNX2X_FILTER_MAC_PENDING:
724 		rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
725 					   &ramrod_flags);
726 		break;
727 	case BNX2X_FILTER_VLAN_PENDING:
728 		rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
729 					    &ramrod_flags);
730 		break;
731 	default:
732 		BNX2X_ERR("Unsupported classification command: %d\n",
733 			  elem->message.data.eth_event.echo);
734 		return;
735 	}
736 	if (rc < 0)
737 		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
738 	else if (rc > 0)
739 		DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
740 }
741 
742 static
743 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
744 			       struct bnx2x_virtf *vf)
745 {
746 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
747 	int rc;
748 
749 	rparam.mcast_obj = &vf->mcast_obj;
750 	vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
751 
752 	/* If there are pending mcast commands - send them */
753 	if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
754 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
755 		if (rc < 0)
756 			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
757 				  rc);
758 	}
759 }
760 
761 static
762 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
763 				 struct bnx2x_virtf *vf)
764 {
765 	smp_mb__before_clear_bit();
766 	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
767 	smp_mb__after_clear_bit();
768 }
769 
770 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
771 {
772 	struct bnx2x_virtf *vf;
773 	int qidx = 0, abs_vfid;
774 	u8 opcode;
775 	u16 cid = 0xffff;
776 
777 	if (!IS_SRIOV(bp))
778 		return 1;
779 
780 	/* first get the cid - the only events we handle here are cfc-delete
781 	 * and set-mac completion
782 	 */
783 	opcode = elem->message.opcode;
784 
785 	switch (opcode) {
786 	case EVENT_RING_OPCODE_CFC_DEL:
787 		cid = SW_CID((__force __le32)
788 			     elem->message.data.cfc_del_event.cid);
789 		DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
790 		break;
791 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
792 	case EVENT_RING_OPCODE_MULTICAST_RULES:
793 	case EVENT_RING_OPCODE_FILTERS_RULES:
794 		cid = (elem->message.data.eth_event.echo &
795 		       BNX2X_SWCID_MASK);
796 		DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
797 		break;
798 	case EVENT_RING_OPCODE_VF_FLR:
799 		abs_vfid = elem->message.data.vf_flr_event.vf_id;
800 		DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
801 		   abs_vfid);
802 		goto get_vf;
803 	case EVENT_RING_OPCODE_MALICIOUS_VF:
804 		abs_vfid = elem->message.data.malicious_vf_event.vf_id;
805 		DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
806 		   abs_vfid);
807 		goto get_vf;
808 	default:
809 		return 1;
810 	}
811 
812 	/* check if the cid is the VF range */
813 	if (!bnx2x_iov_is_vf_cid(bp, cid)) {
814 		DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
815 		return 1;
816 	}
817 
818 	/* extract vf and rxq index from vf_cid - relies on the following:
819 	 * 1. vfid on cid reflects the true abs_vfid
820 	 * 2. the max number of VFs (per path) is 64
821 	 */
822 	qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
823 	abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
824 get_vf:
825 	vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
826 
827 	if (!vf) {
828 		BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
829 			  cid, abs_vfid);
830 		return 0;
831 	}
832 
833 	switch (opcode) {
834 	case EVENT_RING_OPCODE_CFC_DEL:
835 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
836 		   vf->abs_vfid, qidx);
837 		vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
838 						       &vfq_get(vf,
839 								qidx)->sp_obj,
840 						       BNX2X_Q_CMD_CFC_DEL);
841 		break;
842 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
843 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
844 		   vf->abs_vfid, qidx);
845 		bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
846 		break;
847 	case EVENT_RING_OPCODE_MULTICAST_RULES:
848 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
849 		   vf->abs_vfid, qidx);
850 		bnx2x_vf_handle_mcast_eqe(bp, vf);
851 		break;
852 	case EVENT_RING_OPCODE_FILTERS_RULES:
853 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
854 		   vf->abs_vfid, qidx);
855 		bnx2x_vf_handle_filters_eqe(bp, vf);
856 		break;
857 	case EVENT_RING_OPCODE_VF_FLR:
858 		DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
859 		   vf->abs_vfid);
860 		/* Do nothing for now */
861 		break;
862 	case EVENT_RING_OPCODE_MALICIOUS_VF:
863 		DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
864 		   vf->abs_vfid);
865 		/* Do nothing for now */
866 		break;
867 	}
868 	/* SRIOV: reschedule any 'in_progress' operations */
869 	bnx2x_iov_sp_event(bp, cid, false);
870 
871 	return 0;
872 }
873 
874 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
875 {
876 	/* extract the vf from vf_cid - relies on the following:
877 	 * 1. vfid on cid reflects the true abs_vfid
878 	 * 2. the max number of VFs (per path) is 64
879 	 */
880 	int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
881 	return bnx2x_vf_by_abs_fid(bp, abs_vfid);
882 }
883 
884 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
885 				struct bnx2x_queue_sp_obj **q_obj)
886 {
887 	struct bnx2x_virtf *vf;
888 
889 	if (!IS_SRIOV(bp))
890 		return;
891 
892 	vf = bnx2x_vf_by_cid(bp, vf_cid);
893 
894 	if (vf) {
895 		/* extract queue index from vf_cid - relies on the following:
896 		 * 1. vfid on cid reflects the true abs_vfid
897 		 * 2. the max number of VFs (per path) is 64
898 		 */
899 		int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
900 		*q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
901 	} else {
902 		BNX2X_ERR("No vf matching cid %d\n", vf_cid);
903 	}
904 }
905 
906 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
907 {
908 	struct bnx2x_virtf *vf;
909 
910 	/* check if the cid is the VF range */
911 	if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
912 		return;
913 
914 	vf = bnx2x_vf_by_cid(bp, vf_cid);
915 	if (vf) {
916 		/* set in_progress flag */
917 		atomic_set(&vf->op_in_progress, 1);
918 		if (queue_work)
919 			queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
920 	}
921 }
922 
923 void bnx2x_iov_sp_task(struct bnx2x *bp)
924 {
925 	int i;
926 
927 	if (!IS_SRIOV(bp))
928 		return;
929 	/* Iterate over all VFs and invoke state transition for VFs with
930 	 * 'in-progress' slow-path operations
931 	 */
932 	DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
933 	for_each_vf(bp, i) {
934 		struct bnx2x_virtf *vf = BP_VF(bp, i);
935 
936 		if (!list_empty(&vf->op_list_head) &&
937 		    atomic_read(&vf->op_in_progress)) {
938 			DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
939 			bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
940 		}
941 	}
942 }
943