1 /* bnx2x_sriov.c: Broadcom Everest network driver.
2  *
3  * Copyright 2009-2013 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16  * Written by: Shmulik Ravid
17  *	       Ariel Elior <ariel.elior@qlogic.com>
18  *
19  */
20 #include "bnx2x.h"
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
23 #include "bnx2x_sp.h"
24 #include <linux/crc32.h>
25 #include <linux/if_vlan.h>
26 
27 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
28 			    struct bnx2x_virtf **vf,
29 			    struct pf_vf_bulletin_content **bulletin,
30 			    bool test_queue);
31 
32 /* General service functions */
33 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
34 					 u16 pf_id)
35 {
36 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
37 		pf_id);
38 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
39 		pf_id);
40 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
41 		pf_id);
42 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
43 		pf_id);
44 }
45 
46 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
47 					u8 enable)
48 {
49 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
50 		enable);
51 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
52 		enable);
53 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
54 		enable);
55 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
56 		enable);
57 }
58 
59 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
60 {
61 	int idx;
62 
63 	for_each_vf(bp, idx)
64 		if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
65 			break;
66 	return idx;
67 }
68 
69 static
70 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
71 {
72 	u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
73 	return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
74 }
75 
76 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
77 				u8 igu_sb_id, u8 segment, u16 index, u8 op,
78 				u8 update)
79 {
80 	/* acking a VF sb through the PF - use the GRC */
81 	u32 ctl;
82 	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
83 	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
84 	u32 func_encode = vf->abs_vfid;
85 	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
86 	struct igu_regular cmd_data = {0};
87 
88 	cmd_data.sb_id_and_flags =
89 			((index << IGU_REGULAR_SB_INDEX_SHIFT) |
90 			 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
91 			 (update << IGU_REGULAR_BUPDATE_SHIFT) |
92 			 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
93 
94 	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
95 	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
96 	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
97 
98 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
99 	   cmd_data.sb_id_and_flags, igu_addr_data);
100 	REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
101 	mmiowb();
102 	barrier();
103 
104 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
105 	   ctl, igu_addr_ctl);
106 	REG_WR(bp, igu_addr_ctl, ctl);
107 	mmiowb();
108 	barrier();
109 }
110 
111 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
112 				       struct bnx2x_virtf *vf,
113 				       bool print_err)
114 {
115 	if (!bnx2x_leading_vfq(vf, sp_initialized)) {
116 		if (print_err)
117 			BNX2X_ERR("Slowpath objects not yet initialized!\n");
118 		else
119 			DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
120 		return false;
121 	}
122 	return true;
123 }
124 
125 /* VFOP operations states */
126 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
127 			      struct bnx2x_queue_init_params *init_params,
128 			      struct bnx2x_queue_setup_params *setup_params,
129 			      u16 q_idx, u16 sb_idx)
130 {
131 	DP(BNX2X_MSG_IOV,
132 	   "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
133 	   vf->abs_vfid,
134 	   q_idx,
135 	   sb_idx,
136 	   init_params->tx.sb_cq_index,
137 	   init_params->tx.hc_rate,
138 	   setup_params->flags,
139 	   setup_params->txq_params.traffic_type);
140 }
141 
142 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
143 			    struct bnx2x_queue_init_params *init_params,
144 			    struct bnx2x_queue_setup_params *setup_params,
145 			    u16 q_idx, u16 sb_idx)
146 {
147 	struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
148 
149 	DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
150 	   "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
151 	   vf->abs_vfid,
152 	   q_idx,
153 	   sb_idx,
154 	   init_params->rx.sb_cq_index,
155 	   init_params->rx.hc_rate,
156 	   setup_params->gen_params.mtu,
157 	   rxq_params->buf_sz,
158 	   rxq_params->sge_buf_sz,
159 	   rxq_params->max_sges_pkt,
160 	   rxq_params->tpa_agg_sz,
161 	   setup_params->flags,
162 	   rxq_params->drop_flags,
163 	   rxq_params->cache_line_log);
164 }
165 
166 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
167 			   struct bnx2x_virtf *vf,
168 			   struct bnx2x_vf_queue *q,
169 			   struct bnx2x_vf_queue_construct_params *p,
170 			   unsigned long q_type)
171 {
172 	struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
173 	struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
174 
175 	/* INIT */
176 
177 	/* Enable host coalescing in the transition to INIT state */
178 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
179 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
180 
181 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
182 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
183 
184 	/* FW SB ID */
185 	init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
186 	init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
187 
188 	/* context */
189 	init_p->cxts[0] = q->cxt;
190 
191 	/* SETUP */
192 
193 	/* Setup-op general parameters */
194 	setup_p->gen_params.spcl_id = vf->sp_cl_id;
195 	setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
196 
197 	/* Setup-op pause params:
198 	 * Nothing to do, the pause thresholds are set by default to 0 which
199 	 * effectively turns off the feature for this queue. We don't want
200 	 * one queue (VF) to interfering with another queue (another VF)
201 	 */
202 	if (vf->cfg_flags & VF_CFG_FW_FC)
203 		BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
204 			  vf->abs_vfid);
205 	/* Setup-op flags:
206 	 * collect statistics, zero statistics, local-switching, security,
207 	 * OV for Flex10, RSS and MCAST for leading
208 	 */
209 	if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
210 		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
211 
212 	/* for VFs, enable tx switching, bd coherency, and mac address
213 	 * anti-spoofing
214 	 */
215 	__set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
216 	__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
217 	__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
218 
219 	/* Setup-op rx parameters */
220 	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
221 		struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
222 
223 		rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
224 		rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
225 		rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
226 
227 		if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
228 			rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
229 	}
230 
231 	/* Setup-op tx parameters */
232 	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
233 		setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
234 		setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
235 	}
236 }
237 
238 static int bnx2x_vf_queue_create(struct bnx2x *bp,
239 				 struct bnx2x_virtf *vf, int qid,
240 				 struct bnx2x_vf_queue_construct_params *qctor)
241 {
242 	struct bnx2x_queue_state_params *q_params;
243 	int rc = 0;
244 
245 	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
246 
247 	/* Prepare ramrod information */
248 	q_params = &qctor->qstate;
249 	q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
250 	set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
251 
252 	if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
253 	    BNX2X_Q_LOGICAL_STATE_ACTIVE) {
254 		DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
255 		goto out;
256 	}
257 
258 	/* Run Queue 'construction' ramrods */
259 	q_params->cmd = BNX2X_Q_CMD_INIT;
260 	rc = bnx2x_queue_state_change(bp, q_params);
261 	if (rc)
262 		goto out;
263 
264 	memcpy(&q_params->params.setup, &qctor->prep_qsetup,
265 	       sizeof(struct bnx2x_queue_setup_params));
266 	q_params->cmd = BNX2X_Q_CMD_SETUP;
267 	rc = bnx2x_queue_state_change(bp, q_params);
268 	if (rc)
269 		goto out;
270 
271 	/* enable interrupts */
272 	bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
273 			    USTORM_ID, 0, IGU_INT_ENABLE, 0);
274 out:
275 	return rc;
276 }
277 
278 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
279 				  int qid)
280 {
281 	enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
282 				       BNX2X_Q_CMD_TERMINATE,
283 				       BNX2X_Q_CMD_CFC_DEL};
284 	struct bnx2x_queue_state_params q_params;
285 	int rc, i;
286 
287 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
288 
289 	/* Prepare ramrod information */
290 	memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
291 	q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
292 	set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
293 
294 	if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
295 	    BNX2X_Q_LOGICAL_STATE_STOPPED) {
296 		DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
297 		goto out;
298 	}
299 
300 	/* Run Queue 'destruction' ramrods */
301 	for (i = 0; i < ARRAY_SIZE(cmds); i++) {
302 		q_params.cmd = cmds[i];
303 		rc = bnx2x_queue_state_change(bp, &q_params);
304 		if (rc) {
305 			BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
306 			return rc;
307 		}
308 	}
309 out:
310 	/* Clean Context */
311 	if (bnx2x_vfq(vf, qid, cxt)) {
312 		bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
313 		bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
314 	}
315 
316 	return 0;
317 }
318 
319 static void
320 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
321 {
322 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
323 	if (vf) {
324 		/* the first igu entry belonging to VFs of this PF */
325 		if (!BP_VFDB(bp)->first_vf_igu_entry)
326 			BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
327 
328 		/* the first igu entry belonging to this VF */
329 		if (!vf_sb_count(vf))
330 			vf->igu_base_id = igu_sb_id;
331 
332 		++vf_sb_count(vf);
333 		++vf->sb_count;
334 	}
335 	BP_VFDB(bp)->vf_sbs_pool++;
336 }
337 
338 static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
339 					struct bnx2x_vlan_mac_obj *obj,
340 					atomic_t *counter)
341 {
342 	struct list_head *pos;
343 	int read_lock;
344 	int cnt = 0;
345 
346 	read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
347 	if (read_lock)
348 		DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
349 
350 	list_for_each(pos, &obj->head)
351 		cnt++;
352 
353 	if (!read_lock)
354 		bnx2x_vlan_mac_h_read_unlock(bp, obj);
355 
356 	atomic_set(counter, cnt);
357 }
358 
359 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
360 				   int qid, bool drv_only, bool mac)
361 {
362 	struct bnx2x_vlan_mac_ramrod_params ramrod;
363 	int rc;
364 
365 	DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
366 	   mac ? "MACs" : "VLANs");
367 
368 	/* Prepare ramrod params */
369 	memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
370 	if (mac) {
371 		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
372 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
373 	} else {
374 		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
375 			&ramrod.user_req.vlan_mac_flags);
376 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
377 	}
378 	ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
379 
380 	set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
381 	if (drv_only)
382 		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
383 	else
384 		set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
385 
386 	/* Start deleting */
387 	rc = ramrod.vlan_mac_obj->delete_all(bp,
388 					     ramrod.vlan_mac_obj,
389 					     &ramrod.user_req.vlan_mac_flags,
390 					     &ramrod.ramrod_flags);
391 	if (rc) {
392 		BNX2X_ERR("Failed to delete all %s\n",
393 			  mac ? "MACs" : "VLANs");
394 		return rc;
395 	}
396 
397 	/* Clear the vlan counters */
398 	if (!mac)
399 		atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
400 
401 	return 0;
402 }
403 
404 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
405 				    struct bnx2x_virtf *vf, int qid,
406 				    struct bnx2x_vf_mac_vlan_filter *filter,
407 				    bool drv_only)
408 {
409 	struct bnx2x_vlan_mac_ramrod_params ramrod;
410 	int rc;
411 
412 	DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
413 	   vf->abs_vfid, filter->add ? "Adding" : "Deleting",
414 	   filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
415 
416 	/* Prepare ramrod params */
417 	memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
418 	if (filter->type == BNX2X_VF_FILTER_VLAN) {
419 		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
420 			&ramrod.user_req.vlan_mac_flags);
421 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
422 		ramrod.user_req.u.vlan.vlan = filter->vid;
423 	} else {
424 		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
425 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
426 		memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
427 	}
428 	ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
429 					    BNX2X_VLAN_MAC_DEL;
430 
431 	/* Verify there are available vlan credits */
432 	if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
433 	    (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
434 	     vf_vlan_rules_cnt(vf))) {
435 		BNX2X_ERR("No credits for vlan [%d >= %d]\n",
436 			  atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
437 			  vf_vlan_rules_cnt(vf));
438 		return -ENOMEM;
439 	}
440 
441 	set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
442 	if (drv_only)
443 		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
444 	else
445 		set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
446 
447 	/* Add/Remove the filter */
448 	rc = bnx2x_config_vlan_mac(bp, &ramrod);
449 	if (rc && rc != -EEXIST) {
450 		BNX2X_ERR("Failed to %s %s\n",
451 			  filter->add ? "add" : "delete",
452 			  filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
453 								"VLAN");
454 		return rc;
455 	}
456 
457 	/* Update the vlan counters */
458 	if (filter->type == BNX2X_VF_FILTER_VLAN)
459 		bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
460 				     &bnx2x_vfq(vf, qid, vlan_count));
461 
462 	return 0;
463 }
464 
465 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
466 				  struct bnx2x_vf_mac_vlan_filters *filters,
467 				  int qid, bool drv_only)
468 {
469 	int rc = 0, i;
470 
471 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
472 
473 	if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
474 		return -EINVAL;
475 
476 	/* Prepare ramrod params */
477 	for (i = 0; i < filters->count; i++) {
478 		rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
479 					      &filters->filters[i], drv_only);
480 		if (rc)
481 			break;
482 	}
483 
484 	/* Rollback if needed */
485 	if (i != filters->count) {
486 		BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
487 			  i, filters->count + 1);
488 		while (--i >= 0) {
489 			filters->filters[i].add = !filters->filters[i].add;
490 			bnx2x_vf_mac_vlan_config(bp, vf, qid,
491 						 &filters->filters[i],
492 						 drv_only);
493 		}
494 	}
495 
496 	/* It's our responsibility to free the filters */
497 	kfree(filters);
498 
499 	return rc;
500 }
501 
502 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
503 			 struct bnx2x_vf_queue_construct_params *qctor)
504 {
505 	int rc;
506 
507 	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
508 
509 	rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
510 	if (rc)
511 		goto op_err;
512 
513 	/* Configure vlan0 for leading queue */
514 	if (!qid) {
515 		struct bnx2x_vf_mac_vlan_filter filter;
516 
517 		memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
518 		filter.type = BNX2X_VF_FILTER_VLAN;
519 		filter.add = true;
520 		filter.vid = 0;
521 		rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
522 		if (rc)
523 			goto op_err;
524 	}
525 
526 	/* Schedule the configuration of any pending vlan filters */
527 	vf->cfg_flags |= VF_CFG_VLAN;
528 	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
529 			       BNX2X_MSG_IOV);
530 	return 0;
531 op_err:
532 	BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
533 	return rc;
534 }
535 
536 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
537 			       int qid)
538 {
539 	int rc;
540 
541 	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
542 
543 	/* If needed, clean the filtering data base */
544 	if ((qid == LEADING_IDX) &&
545 	    bnx2x_validate_vf_sp_objs(bp, vf, false)) {
546 		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
547 		if (rc)
548 			goto op_err;
549 		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
550 		if (rc)
551 			goto op_err;
552 	}
553 
554 	/* Terminate queue */
555 	if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
556 		struct bnx2x_queue_state_params qstate;
557 
558 		memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
559 		qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
560 		qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
561 		qstate.cmd = BNX2X_Q_CMD_TERMINATE;
562 		set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
563 		rc = bnx2x_queue_state_change(bp, &qstate);
564 		if (rc)
565 			goto op_err;
566 	}
567 
568 	return 0;
569 op_err:
570 	BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
571 	return rc;
572 }
573 
574 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
575 		   bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
576 {
577 	struct bnx2x_mcast_list_elem *mc = NULL;
578 	struct bnx2x_mcast_ramrod_params mcast;
579 	int rc, i;
580 
581 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
582 
583 	/* Prepare Multicast command */
584 	memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
585 	mcast.mcast_obj = &vf->mcast_obj;
586 	if (drv_only)
587 		set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
588 	else
589 		set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
590 	if (mc_num) {
591 		mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
592 			     GFP_KERNEL);
593 		if (!mc) {
594 			BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n");
595 			return -ENOMEM;
596 		}
597 	}
598 
599 	/* clear existing mcasts */
600 	mcast.mcast_list_len = vf->mcast_list_len;
601 	vf->mcast_list_len = mc_num;
602 	rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
603 	if (rc) {
604 		BNX2X_ERR("Failed to remove multicasts\n");
605 		kfree(mc);
606 		return rc;
607 	}
608 
609 	/* update mcast list on the ramrod params */
610 	if (mc_num) {
611 		INIT_LIST_HEAD(&mcast.mcast_list);
612 		for (i = 0; i < mc_num; i++) {
613 			mc[i].mac = mcasts[i];
614 			list_add_tail(&mc[i].link,
615 				      &mcast.mcast_list);
616 		}
617 
618 		/* add new mcasts */
619 		mcast.mcast_list_len = mc_num;
620 		rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
621 		if (rc)
622 			BNX2X_ERR("Faled to add multicasts\n");
623 		kfree(mc);
624 	}
625 
626 	return rc;
627 }
628 
629 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
630 				  struct bnx2x_rx_mode_ramrod_params *ramrod,
631 				  struct bnx2x_virtf *vf,
632 				  unsigned long accept_flags)
633 {
634 	struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
635 
636 	memset(ramrod, 0, sizeof(*ramrod));
637 	ramrod->cid = vfq->cid;
638 	ramrod->cl_id = vfq_cl_id(vf, vfq);
639 	ramrod->rx_mode_obj = &bp->rx_mode_obj;
640 	ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
641 	ramrod->rx_accept_flags = accept_flags;
642 	ramrod->tx_accept_flags = accept_flags;
643 	ramrod->pstate = &vf->filter_state;
644 	ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
645 
646 	set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
647 	set_bit(RAMROD_RX, &ramrod->ramrod_flags);
648 	set_bit(RAMROD_TX, &ramrod->ramrod_flags);
649 
650 	ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
651 	ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
652 }
653 
654 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
655 		    int qid, unsigned long accept_flags)
656 {
657 	struct bnx2x_rx_mode_ramrod_params ramrod;
658 
659 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
660 
661 	bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
662 	set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
663 	vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
664 	return bnx2x_config_rx_mode(bp, &ramrod);
665 }
666 
667 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
668 {
669 	int rc;
670 
671 	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
672 
673 	/* Remove all classification configuration for leading queue */
674 	if (qid == LEADING_IDX) {
675 		rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
676 		if (rc)
677 			goto op_err;
678 
679 		/* Remove filtering if feasible */
680 		if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
681 			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
682 						     false, false);
683 			if (rc)
684 				goto op_err;
685 			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
686 						     false, true);
687 			if (rc)
688 				goto op_err;
689 			rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
690 			if (rc)
691 				goto op_err;
692 		}
693 	}
694 
695 	/* Destroy queue */
696 	rc = bnx2x_vf_queue_destroy(bp, vf, qid);
697 	if (rc)
698 		goto op_err;
699 	return rc;
700 op_err:
701 	BNX2X_ERR("vf[%d:%d] error: rc %d\n",
702 		  vf->abs_vfid, qid, rc);
703 	return rc;
704 }
705 
706 /* VF enable primitives
707  * when pretend is required the caller is responsible
708  * for calling pretend prior to calling these routines
709  */
710 
711 /* internal vf enable - until vf is enabled internally all transactions
712  * are blocked. This routine should always be called last with pretend.
713  */
714 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
715 {
716 	REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
717 }
718 
719 /* clears vf error in all semi blocks */
720 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
721 {
722 	REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
723 	REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
724 	REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
725 	REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
726 }
727 
728 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
729 {
730 	u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
731 	u32 was_err_reg = 0;
732 
733 	switch (was_err_group) {
734 	case 0:
735 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
736 	    break;
737 	case 1:
738 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
739 	    break;
740 	case 2:
741 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
742 	    break;
743 	case 3:
744 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
745 	    break;
746 	}
747 	REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
748 }
749 
750 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
751 {
752 	int i;
753 	u32 val;
754 
755 	/* Set VF masks and configuration - pretend */
756 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
757 
758 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
759 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
760 	REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
761 	REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
762 	REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
763 	REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
764 
765 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
766 	val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
767 	if (vf->cfg_flags & VF_CFG_INT_SIMD)
768 		val |= IGU_VF_CONF_SINGLE_ISR_EN;
769 	val &= ~IGU_VF_CONF_PARENT_MASK;
770 	val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
771 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
772 
773 	DP(BNX2X_MSG_IOV,
774 	   "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
775 	   vf->abs_vfid, val);
776 
777 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
778 
779 	/* iterate over all queues, clear sb consumer */
780 	for (i = 0; i < vf_sb_count(vf); i++) {
781 		u8 igu_sb_id = vf_igu_sb(vf, i);
782 
783 		/* zero prod memory */
784 		REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
785 
786 		/* clear sb state machine */
787 		bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
788 				       false /* VF */);
789 
790 		/* disable + update */
791 		bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
792 				    IGU_INT_DISABLE, 1);
793 	}
794 }
795 
796 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
797 {
798 	/* set the VF-PF association in the FW */
799 	storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
800 	storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
801 
802 	/* clear vf errors*/
803 	bnx2x_vf_semi_clear_err(bp, abs_vfid);
804 	bnx2x_vf_pglue_clear_err(bp, abs_vfid);
805 
806 	/* internal vf-enable - pretend */
807 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
808 	DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
809 	bnx2x_vf_enable_internal(bp, true);
810 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
811 }
812 
813 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
814 {
815 	/* Reset vf in IGU  interrupts are still disabled */
816 	bnx2x_vf_igu_reset(bp, vf);
817 
818 	/* pretend to enable the vf with the PBF */
819 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
820 	REG_WR(bp, PBF_REG_DISABLE_VF, 0);
821 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
822 }
823 
824 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
825 {
826 	struct pci_dev *dev;
827 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
828 
829 	if (!vf)
830 		return false;
831 
832 	dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
833 	if (dev)
834 		return bnx2x_is_pcie_pending(dev);
835 	return false;
836 }
837 
838 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
839 {
840 	/* Verify no pending pci transactions */
841 	if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
842 		BNX2X_ERR("PCIE Transactions still pending\n");
843 
844 	return 0;
845 }
846 
847 static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
848 					  struct bnx2x_virtf *vf,
849 					  int new)
850 {
851 	int num = vf_vlan_rules_cnt(vf);
852 	int diff = new - num;
853 	bool rc = true;
854 
855 	DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
856 	   vf->abs_vfid, new, num);
857 
858 	if (diff > 0)
859 		rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
860 	else if (diff < 0)
861 		rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
862 
863 	if (rc)
864 		vf_vlan_rules_cnt(vf) = new;
865 	else
866 		DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
867 		   vf->abs_vfid);
868 }
869 
870 /* must be called after the number of PF queues and the number of VFs are
871  * both known
872  */
873 static void
874 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
875 {
876 	struct vf_pf_resc_request *resc = &vf->alloc_resc;
877 	u16 vlan_count = 0;
878 
879 	/* will be set only during VF-ACQUIRE */
880 	resc->num_rxqs = 0;
881 	resc->num_txqs = 0;
882 
883 	/* no credit calculations for macs (just yet) */
884 	resc->num_mac_filters = 1;
885 
886 	/* divvy up vlan rules */
887 	bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
888 	vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
889 	vlan_count = 1 << ilog2(vlan_count);
890 	bnx2x_iov_re_set_vlan_filters(bp, vf,
891 				      vlan_count / BNX2X_NR_VIRTFN(bp));
892 
893 	/* no real limitation */
894 	resc->num_mc_filters = 0;
895 
896 	/* num_sbs already set */
897 	resc->num_sbs = vf->sb_count;
898 }
899 
900 /* FLR routines: */
901 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
902 {
903 	/* reset the state variables */
904 	bnx2x_iov_static_resc(bp, vf);
905 	vf->state = VF_FREE;
906 }
907 
908 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
909 {
910 	u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
911 
912 	/* DQ usage counter */
913 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
914 	bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
915 					"DQ VF usage counter timed out",
916 					poll_cnt);
917 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
918 
919 	/* FW cleanup command - poll for the results */
920 	if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
921 				   poll_cnt))
922 		BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
923 
924 	/* verify TX hw is flushed */
925 	bnx2x_tx_hw_flushed(bp, poll_cnt);
926 }
927 
928 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
929 {
930 	int rc, i;
931 
932 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
933 
934 	/* the cleanup operations are valid if and only if the VF
935 	 * was first acquired.
936 	 */
937 	for (i = 0; i < vf_rxq_count(vf); i++) {
938 		rc = bnx2x_vf_queue_flr(bp, vf, i);
939 		if (rc)
940 			goto out;
941 	}
942 
943 	/* remove multicasts */
944 	bnx2x_vf_mcast(bp, vf, NULL, 0, true);
945 
946 	/* dispatch final cleanup and wait for HW queues to flush */
947 	bnx2x_vf_flr_clnup_hw(bp, vf);
948 
949 	/* release VF resources */
950 	bnx2x_vf_free_resc(bp, vf);
951 
952 	/* re-open the mailbox */
953 	bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
954 	return;
955 out:
956 	BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
957 		  vf->abs_vfid, i, rc);
958 }
959 
960 static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
961 {
962 	struct bnx2x_virtf *vf;
963 	int i;
964 
965 	for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
966 		/* VF should be RESET & in FLR cleanup states */
967 		if (bnx2x_vf(bp, i, state) != VF_RESET ||
968 		    !bnx2x_vf(bp, i, flr_clnup_stage))
969 			continue;
970 
971 		DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
972 		   i, BNX2X_NR_VIRTFN(bp));
973 
974 		vf = BP_VF(bp, i);
975 
976 		/* lock the vf pf channel */
977 		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
978 
979 		/* invoke the VF FLR SM */
980 		bnx2x_vf_flr(bp, vf);
981 
982 		/* mark the VF to be ACKED and continue */
983 		vf->flr_clnup_stage = false;
984 		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
985 	}
986 
987 	/* Acknowledge the handled VFs.
988 	 * we are acknowledge all the vfs which an flr was requested for, even
989 	 * if amongst them there are such that we never opened, since the mcp
990 	 * will interrupt us immediately again if we only ack some of the bits,
991 	 * resulting in an endless loop. This can happen for example in KVM
992 	 * where an 'all ones' flr request is sometimes given by hyper visor
993 	 */
994 	DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
995 	   bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
996 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
997 		SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
998 			  bp->vfdb->flrd_vfs[i]);
999 
1000 	bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
1001 
1002 	/* clear the acked bits - better yet if the MCP implemented
1003 	 * write to clear semantics
1004 	 */
1005 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
1006 		SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
1007 }
1008 
1009 void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1010 {
1011 	int i;
1012 
1013 	/* Read FLR'd VFs */
1014 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
1015 		bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
1016 
1017 	DP(BNX2X_MSG_MCP,
1018 	   "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
1019 	   bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1020 
1021 	for_each_vf(bp, i) {
1022 		struct bnx2x_virtf *vf = BP_VF(bp, i);
1023 		u32 reset = 0;
1024 
1025 		if (vf->abs_vfid < 32)
1026 			reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
1027 		else
1028 			reset = bp->vfdb->flrd_vfs[1] &
1029 				(1 << (vf->abs_vfid - 32));
1030 
1031 		if (reset) {
1032 			/* set as reset and ready for cleanup */
1033 			vf->state = VF_RESET;
1034 			vf->flr_clnup_stage = true;
1035 
1036 			DP(BNX2X_MSG_IOV,
1037 			   "Initiating Final cleanup for VF %d\n",
1038 			   vf->abs_vfid);
1039 		}
1040 	}
1041 
1042 	/* do the FLR cleanup for all marked VFs*/
1043 	bnx2x_vf_flr_clnup(bp);
1044 }
1045 
1046 /* IOV global initialization routines  */
1047 void bnx2x_iov_init_dq(struct bnx2x *bp)
1048 {
1049 	if (!IS_SRIOV(bp))
1050 		return;
1051 
1052 	/* Set the DQ such that the CID reflect the abs_vfid */
1053 	REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1054 	REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1055 
1056 	/* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1057 	 * the PF L2 queues
1058 	 */
1059 	REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1060 
1061 	/* The VF window size is the log2 of the max number of CIDs per VF */
1062 	REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1063 
1064 	/* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
1065 	 * the Pf doorbell size although the 2 are independent.
1066 	 */
1067 	REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1068 
1069 	/* No security checks for now -
1070 	 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1071 	 * CID range 0 - 0x1ffff
1072 	 */
1073 	REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1074 	REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1075 	REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1076 	REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1077 
1078 	/* set the VF doorbell threshold. This threshold represents the amount
1079 	 * of doorbells allowed in the main DORQ fifo for a specific VF.
1080 	 */
1081 	REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
1082 }
1083 
1084 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1085 {
1086 	if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1087 		REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1088 }
1089 
1090 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1091 {
1092 	struct pci_dev *dev = bp->pdev;
1093 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1094 
1095 	return dev->bus->number + ((dev->devfn + iov->offset +
1096 				    iov->stride * vfid) >> 8);
1097 }
1098 
1099 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1100 {
1101 	struct pci_dev *dev = bp->pdev;
1102 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1103 
1104 	return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1105 }
1106 
1107 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1108 {
1109 	int i, n;
1110 	struct pci_dev *dev = bp->pdev;
1111 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1112 
1113 	for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1114 		u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1115 		u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1116 
1117 		size /= iov->total;
1118 		vf->bars[n].bar = start + size * vf->abs_vfid;
1119 		vf->bars[n].size = size;
1120 	}
1121 }
1122 
1123 static int bnx2x_ari_enabled(struct pci_dev *dev)
1124 {
1125 	return dev->bus->self && dev->bus->self->ari_enabled;
1126 }
1127 
1128 static void
1129 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1130 {
1131 	int sb_id;
1132 	u32 val;
1133 	u8 fid, current_pf = 0;
1134 
1135 	/* IGU in normal mode - read CAM */
1136 	for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1137 		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1138 		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1139 			continue;
1140 		fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1141 		if (fid & IGU_FID_ENCODE_IS_PF)
1142 			current_pf = fid & IGU_FID_PF_NUM_MASK;
1143 		else if (current_pf == BP_FUNC(bp))
1144 			bnx2x_vf_set_igu_info(bp, sb_id,
1145 					      (fid & IGU_FID_VF_NUM_MASK));
1146 		DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1147 		   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1148 		   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1149 		   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1150 		   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1151 	}
1152 	DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1153 }
1154 
1155 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1156 {
1157 	if (bp->vfdb) {
1158 		kfree(bp->vfdb->vfqs);
1159 		kfree(bp->vfdb->vfs);
1160 		kfree(bp->vfdb);
1161 	}
1162 	bp->vfdb = NULL;
1163 }
1164 
1165 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1166 {
1167 	int pos;
1168 	struct pci_dev *dev = bp->pdev;
1169 
1170 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1171 	if (!pos) {
1172 		BNX2X_ERR("failed to find SRIOV capability in device\n");
1173 		return -ENODEV;
1174 	}
1175 
1176 	iov->pos = pos;
1177 	DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1178 	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1179 	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1180 	pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1181 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1182 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1183 	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1184 	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1185 	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1186 
1187 	return 0;
1188 }
1189 
1190 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1191 {
1192 	u32 val;
1193 
1194 	/* read the SRIOV capability structure
1195 	 * The fields can be read via configuration read or
1196 	 * directly from the device (starting at offset PCICFG_OFFSET)
1197 	 */
1198 	if (bnx2x_sriov_pci_cfg_info(bp, iov))
1199 		return -ENODEV;
1200 
1201 	/* get the number of SRIOV bars */
1202 	iov->nres = 0;
1203 
1204 	/* read the first_vfid */
1205 	val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1206 	iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1207 			       * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1208 
1209 	DP(BNX2X_MSG_IOV,
1210 	   "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1211 	   BP_FUNC(bp),
1212 	   iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1213 	   iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1214 
1215 	return 0;
1216 }
1217 
1218 /* must be called after PF bars are mapped */
1219 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1220 		       int num_vfs_param)
1221 {
1222 	int err, i;
1223 	struct bnx2x_sriov *iov;
1224 	struct pci_dev *dev = bp->pdev;
1225 
1226 	bp->vfdb = NULL;
1227 
1228 	/* verify is pf */
1229 	if (IS_VF(bp))
1230 		return 0;
1231 
1232 	/* verify sriov capability is present in configuration space */
1233 	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1234 		return 0;
1235 
1236 	/* verify chip revision */
1237 	if (CHIP_IS_E1x(bp))
1238 		return 0;
1239 
1240 	/* check if SRIOV support is turned off */
1241 	if (!num_vfs_param)
1242 		return 0;
1243 
1244 	/* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1245 	if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1246 		BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1247 			  BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1248 		return 0;
1249 	}
1250 
1251 	/* SRIOV can be enabled only with MSIX */
1252 	if (int_mode_param == BNX2X_INT_MODE_MSI ||
1253 	    int_mode_param == BNX2X_INT_MODE_INTX) {
1254 		BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1255 		return 0;
1256 	}
1257 
1258 	err = -EIO;
1259 	/* verify ari is enabled */
1260 	if (!bnx2x_ari_enabled(bp->pdev)) {
1261 		BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1262 		return 0;
1263 	}
1264 
1265 	/* verify igu is in normal mode */
1266 	if (CHIP_INT_MODE_IS_BC(bp)) {
1267 		BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1268 		return 0;
1269 	}
1270 
1271 	/* allocate the vfs database */
1272 	bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1273 	if (!bp->vfdb) {
1274 		BNX2X_ERR("failed to allocate vf database\n");
1275 		err = -ENOMEM;
1276 		goto failed;
1277 	}
1278 
1279 	/* get the sriov info - Linux already collected all the pertinent
1280 	 * information, however the sriov structure is for the private use
1281 	 * of the pci module. Also we want this information regardless
1282 	 * of the hyper-visor.
1283 	 */
1284 	iov = &(bp->vfdb->sriov);
1285 	err = bnx2x_sriov_info(bp, iov);
1286 	if (err)
1287 		goto failed;
1288 
1289 	/* SR-IOV capability was enabled but there are no VFs*/
1290 	if (iov->total == 0)
1291 		goto failed;
1292 
1293 	iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1294 
1295 	DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1296 	   num_vfs_param, iov->nr_virtfn);
1297 
1298 	/* allocate the vf array */
1299 	bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
1300 				BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
1301 	if (!bp->vfdb->vfs) {
1302 		BNX2X_ERR("failed to allocate vf array\n");
1303 		err = -ENOMEM;
1304 		goto failed;
1305 	}
1306 
1307 	/* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1308 	for_each_vf(bp, i) {
1309 		bnx2x_vf(bp, i, index) = i;
1310 		bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1311 		bnx2x_vf(bp, i, state) = VF_FREE;
1312 		mutex_init(&bnx2x_vf(bp, i, op_mutex));
1313 		bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1314 	}
1315 
1316 	/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1317 	bnx2x_get_vf_igu_cam_info(bp);
1318 
1319 	/* allocate the queue arrays for all VFs */
1320 	bp->vfdb->vfqs = kzalloc(
1321 		BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
1322 		GFP_KERNEL);
1323 
1324 	DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
1325 
1326 	if (!bp->vfdb->vfqs) {
1327 		BNX2X_ERR("failed to allocate vf queue array\n");
1328 		err = -ENOMEM;
1329 		goto failed;
1330 	}
1331 
1332 	/* Prepare the VFs event synchronization mechanism */
1333 	mutex_init(&bp->vfdb->event_mutex);
1334 
1335 	mutex_init(&bp->vfdb->bulletin_mutex);
1336 
1337 	return 0;
1338 failed:
1339 	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1340 	__bnx2x_iov_free_vfdb(bp);
1341 	return err;
1342 }
1343 
1344 void bnx2x_iov_remove_one(struct bnx2x *bp)
1345 {
1346 	int vf_idx;
1347 
1348 	/* if SRIOV is not enabled there's nothing to do */
1349 	if (!IS_SRIOV(bp))
1350 		return;
1351 
1352 	DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
1353 	pci_disable_sriov(bp->pdev);
1354 	DP(BNX2X_MSG_IOV, "sriov disabled\n");
1355 
1356 	/* disable access to all VFs */
1357 	for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
1358 		bnx2x_pretend_func(bp,
1359 				   HW_VF_HANDLE(bp,
1360 						bp->vfdb->sriov.first_vf_in_pf +
1361 						vf_idx));
1362 		DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
1363 		   bp->vfdb->sriov.first_vf_in_pf + vf_idx);
1364 		bnx2x_vf_enable_internal(bp, 0);
1365 		bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1366 	}
1367 
1368 	/* free vf database */
1369 	__bnx2x_iov_free_vfdb(bp);
1370 }
1371 
1372 void bnx2x_iov_free_mem(struct bnx2x *bp)
1373 {
1374 	int i;
1375 
1376 	if (!IS_SRIOV(bp))
1377 		return;
1378 
1379 	/* free vfs hw contexts */
1380 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1381 		struct hw_dma *cxt = &bp->vfdb->context[i];
1382 		BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1383 	}
1384 
1385 	BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1386 		       BP_VFDB(bp)->sp_dma.mapping,
1387 		       BP_VFDB(bp)->sp_dma.size);
1388 
1389 	BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1390 		       BP_VF_MBX_DMA(bp)->mapping,
1391 		       BP_VF_MBX_DMA(bp)->size);
1392 
1393 	BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
1394 		       BP_VF_BULLETIN_DMA(bp)->mapping,
1395 		       BP_VF_BULLETIN_DMA(bp)->size);
1396 }
1397 
1398 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1399 {
1400 	size_t tot_size;
1401 	int i, rc = 0;
1402 
1403 	if (!IS_SRIOV(bp))
1404 		return rc;
1405 
1406 	/* allocate vfs hw contexts */
1407 	tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1408 		BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1409 
1410 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1411 		struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1412 		cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
1413 
1414 		if (cxt->size) {
1415 			cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
1416 			if (!cxt->addr)
1417 				goto alloc_mem_err;
1418 		} else {
1419 			cxt->addr = NULL;
1420 			cxt->mapping = 0;
1421 		}
1422 		tot_size -= cxt->size;
1423 	}
1424 
1425 	/* allocate vfs ramrods dma memory - client_init and set_mac */
1426 	tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
1427 	BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
1428 						   tot_size);
1429 	if (!BP_VFDB(bp)->sp_dma.addr)
1430 		goto alloc_mem_err;
1431 	BP_VFDB(bp)->sp_dma.size = tot_size;
1432 
1433 	/* allocate mailboxes */
1434 	tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
1435 	BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
1436 						  tot_size);
1437 	if (!BP_VF_MBX_DMA(bp)->addr)
1438 		goto alloc_mem_err;
1439 
1440 	BP_VF_MBX_DMA(bp)->size = tot_size;
1441 
1442 	/* allocate local bulletin boards */
1443 	tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
1444 	BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
1445 						       tot_size);
1446 	if (!BP_VF_BULLETIN_DMA(bp)->addr)
1447 		goto alloc_mem_err;
1448 
1449 	BP_VF_BULLETIN_DMA(bp)->size = tot_size;
1450 
1451 	return 0;
1452 
1453 alloc_mem_err:
1454 	return -ENOMEM;
1455 }
1456 
1457 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1458 			   struct bnx2x_vf_queue *q)
1459 {
1460 	u8 cl_id = vfq_cl_id(vf, q);
1461 	u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
1462 	unsigned long q_type = 0;
1463 
1464 	set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1465 	set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1466 
1467 	/* Queue State object */
1468 	bnx2x_init_queue_obj(bp, &q->sp_obj,
1469 			     cl_id, &q->cid, 1, func_id,
1470 			     bnx2x_vf_sp(bp, vf, q_data),
1471 			     bnx2x_vf_sp_map(bp, vf, q_data),
1472 			     q_type);
1473 
1474 	/* sp indication is set only when vlan/mac/etc. are initialized */
1475 	q->sp_initialized = false;
1476 
1477 	DP(BNX2X_MSG_IOV,
1478 	   "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
1479 	   vf->abs_vfid, q->sp_obj.func_id, q->cid);
1480 }
1481 
1482 static int bnx2x_max_speed_cap(struct bnx2x *bp)
1483 {
1484 	u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
1485 
1486 	if (supported &
1487 	    (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
1488 		return 20000;
1489 
1490 	return 10000; /* assume lowest supported speed is 10G */
1491 }
1492 
1493 int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
1494 {
1495 	struct bnx2x_link_report_data *state = &bp->last_reported_link;
1496 	struct pf_vf_bulletin_content *bulletin;
1497 	struct bnx2x_virtf *vf;
1498 	bool update = true;
1499 	int rc = 0;
1500 
1501 	/* sanity and init */
1502 	rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
1503 	if (rc)
1504 		return rc;
1505 
1506 	mutex_lock(&bp->vfdb->bulletin_mutex);
1507 
1508 	if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
1509 		bulletin->valid_bitmap |= 1 << LINK_VALID;
1510 
1511 		bulletin->link_speed = state->line_speed;
1512 		bulletin->link_flags = 0;
1513 		if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1514 			     &state->link_report_flags))
1515 			bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1516 		if (test_bit(BNX2X_LINK_REPORT_FD,
1517 			     &state->link_report_flags))
1518 			bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
1519 		if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1520 			     &state->link_report_flags))
1521 			bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
1522 		if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1523 			     &state->link_report_flags))
1524 			bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
1525 	} else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
1526 		   !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1527 		bulletin->valid_bitmap |= 1 << LINK_VALID;
1528 		bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1529 	} else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
1530 		   (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1531 		bulletin->valid_bitmap |= 1 << LINK_VALID;
1532 		bulletin->link_speed = bnx2x_max_speed_cap(bp);
1533 		bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
1534 	} else {
1535 		update = false;
1536 	}
1537 
1538 	if (update) {
1539 		DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
1540 		   "vf %d mode %u speed %d flags %x\n", idx,
1541 		   vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
1542 
1543 		/* Post update on VF's bulletin board */
1544 		rc = bnx2x_post_vf_bulletin(bp, idx);
1545 		if (rc) {
1546 			BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
1547 			goto out;
1548 		}
1549 	}
1550 
1551 out:
1552 	mutex_unlock(&bp->vfdb->bulletin_mutex);
1553 	return rc;
1554 }
1555 
1556 int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state)
1557 {
1558 	struct bnx2x *bp = netdev_priv(dev);
1559 	struct bnx2x_virtf *vf = BP_VF(bp, idx);
1560 
1561 	if (!vf)
1562 		return -EINVAL;
1563 
1564 	if (vf->link_cfg == link_state)
1565 		return 0; /* nothing todo */
1566 
1567 	vf->link_cfg = link_state;
1568 
1569 	return bnx2x_iov_link_update_vf(bp, idx);
1570 }
1571 
1572 void bnx2x_iov_link_update(struct bnx2x *bp)
1573 {
1574 	int vfid;
1575 
1576 	if (!IS_SRIOV(bp))
1577 		return;
1578 
1579 	for_each_vf(bp, vfid)
1580 		bnx2x_iov_link_update_vf(bp, vfid);
1581 }
1582 
1583 /* called by bnx2x_nic_load */
1584 int bnx2x_iov_nic_init(struct bnx2x *bp)
1585 {
1586 	int vfid;
1587 
1588 	if (!IS_SRIOV(bp)) {
1589 		DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1590 		return 0;
1591 	}
1592 
1593 	DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1594 
1595 	/* let FLR complete ... */
1596 	msleep(100);
1597 
1598 	/* initialize vf database */
1599 	for_each_vf(bp, vfid) {
1600 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1601 
1602 		int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1603 			BNX2X_CIDS_PER_VF;
1604 
1605 		union cdu_context *base_cxt = (union cdu_context *)
1606 			BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1607 			(base_vf_cid & (ILT_PAGE_CIDS-1));
1608 
1609 		DP(BNX2X_MSG_IOV,
1610 		   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1611 		   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1612 		   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1613 
1614 		/* init statically provisioned resources */
1615 		bnx2x_iov_static_resc(bp, vf);
1616 
1617 		/* queues are initialized during VF-ACQUIRE */
1618 		vf->filter_state = 0;
1619 		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1620 
1621 		/*  init mcast object - This object will be re-initialized
1622 		 *  during VF-ACQUIRE with the proper cl_id and cid.
1623 		 *  It needs to be initialized here so that it can be safely
1624 		 *  handled by a subsequent FLR flow.
1625 		 */
1626 		vf->mcast_list_len = 0;
1627 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1628 				     0xFF, 0xFF, 0xFF,
1629 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
1630 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1631 				     BNX2X_FILTER_MCAST_PENDING,
1632 				     &vf->filter_state,
1633 				     BNX2X_OBJ_TYPE_RX_TX);
1634 
1635 		/* set the mailbox message addresses */
1636 		BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1637 			(((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1638 			MBX_MSG_ALIGNED_SIZE);
1639 
1640 		BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1641 			vfid * MBX_MSG_ALIGNED_SIZE;
1642 
1643 		/* Enable vf mailbox */
1644 		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1645 	}
1646 
1647 	/* Final VF init */
1648 	for_each_vf(bp, vfid) {
1649 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1650 
1651 		/* fill in the BDF and bars */
1652 		vf->bus = bnx2x_vf_bus(bp, vfid);
1653 		vf->devfn = bnx2x_vf_devfn(bp, vfid);
1654 		bnx2x_vf_set_bars(bp, vf);
1655 
1656 		DP(BNX2X_MSG_IOV,
1657 		   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1658 		   vf->abs_vfid, vf->bus, vf->devfn,
1659 		   (unsigned)vf->bars[0].bar, vf->bars[0].size,
1660 		   (unsigned)vf->bars[1].bar, vf->bars[1].size,
1661 		   (unsigned)vf->bars[2].bar, vf->bars[2].size);
1662 	}
1663 
1664 	return 0;
1665 }
1666 
1667 /* called by bnx2x_chip_cleanup */
1668 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
1669 {
1670 	int i;
1671 
1672 	if (!IS_SRIOV(bp))
1673 		return 0;
1674 
1675 	/* release all the VFs */
1676 	for_each_vf(bp, i)
1677 		bnx2x_vf_release(bp, BP_VF(bp, i));
1678 
1679 	return 0;
1680 }
1681 
1682 /* called by bnx2x_init_hw_func, returns the next ilt line */
1683 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
1684 {
1685 	int i;
1686 	struct bnx2x_ilt *ilt = BP_ILT(bp);
1687 
1688 	if (!IS_SRIOV(bp))
1689 		return line;
1690 
1691 	/* set vfs ilt lines */
1692 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1693 		struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1694 
1695 		ilt->lines[line+i].page = hw_cxt->addr;
1696 		ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1697 		ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1698 	}
1699 	return line + i;
1700 }
1701 
1702 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
1703 {
1704 	return ((cid >= BNX2X_FIRST_VF_CID) &&
1705 		((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1706 }
1707 
1708 static
1709 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1710 					struct bnx2x_vf_queue *vfq,
1711 					union event_ring_elem *elem)
1712 {
1713 	unsigned long ramrod_flags = 0;
1714 	int rc = 0;
1715 
1716 	/* Always push next commands out, don't wait here */
1717 	set_bit(RAMROD_CONT, &ramrod_flags);
1718 
1719 	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
1720 	case BNX2X_FILTER_MAC_PENDING:
1721 		rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1722 					   &ramrod_flags);
1723 		break;
1724 	case BNX2X_FILTER_VLAN_PENDING:
1725 		rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
1726 					    &ramrod_flags);
1727 		break;
1728 	default:
1729 		BNX2X_ERR("Unsupported classification command: %d\n",
1730 			  elem->message.data.eth_event.echo);
1731 		return;
1732 	}
1733 	if (rc < 0)
1734 		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
1735 	else if (rc > 0)
1736 		DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
1737 }
1738 
1739 static
1740 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
1741 			       struct bnx2x_virtf *vf)
1742 {
1743 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
1744 	int rc;
1745 
1746 	rparam.mcast_obj = &vf->mcast_obj;
1747 	vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
1748 
1749 	/* If there are pending mcast commands - send them */
1750 	if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
1751 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1752 		if (rc < 0)
1753 			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
1754 				  rc);
1755 	}
1756 }
1757 
1758 static
1759 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
1760 				 struct bnx2x_virtf *vf)
1761 {
1762 	smp_mb__before_atomic();
1763 	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1764 	smp_mb__after_atomic();
1765 }
1766 
1767 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
1768 					   struct bnx2x_virtf *vf)
1769 {
1770 	vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
1771 }
1772 
1773 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
1774 {
1775 	struct bnx2x_virtf *vf;
1776 	int qidx = 0, abs_vfid;
1777 	u8 opcode;
1778 	u16 cid = 0xffff;
1779 
1780 	if (!IS_SRIOV(bp))
1781 		return 1;
1782 
1783 	/* first get the cid - the only events we handle here are cfc-delete
1784 	 * and set-mac completion
1785 	 */
1786 	opcode = elem->message.opcode;
1787 
1788 	switch (opcode) {
1789 	case EVENT_RING_OPCODE_CFC_DEL:
1790 		cid = SW_CID((__force __le32)
1791 			     elem->message.data.cfc_del_event.cid);
1792 		DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
1793 		break;
1794 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1795 	case EVENT_RING_OPCODE_MULTICAST_RULES:
1796 	case EVENT_RING_OPCODE_FILTERS_RULES:
1797 	case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1798 		cid = (elem->message.data.eth_event.echo &
1799 		       BNX2X_SWCID_MASK);
1800 		DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
1801 		break;
1802 	case EVENT_RING_OPCODE_VF_FLR:
1803 		abs_vfid = elem->message.data.vf_flr_event.vf_id;
1804 		DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
1805 		   abs_vfid);
1806 		goto get_vf;
1807 	case EVENT_RING_OPCODE_MALICIOUS_VF:
1808 		abs_vfid = elem->message.data.malicious_vf_event.vf_id;
1809 		BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
1810 			  abs_vfid,
1811 			  elem->message.data.malicious_vf_event.err_id);
1812 		goto get_vf;
1813 	default:
1814 		return 1;
1815 	}
1816 
1817 	/* check if the cid is the VF range */
1818 	if (!bnx2x_iov_is_vf_cid(bp, cid)) {
1819 		DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
1820 		return 1;
1821 	}
1822 
1823 	/* extract vf and rxq index from vf_cid - relies on the following:
1824 	 * 1. vfid on cid reflects the true abs_vfid
1825 	 * 2. The max number of VFs (per path) is 64
1826 	 */
1827 	qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
1828 	abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1829 get_vf:
1830 	vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1831 
1832 	if (!vf) {
1833 		BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
1834 			  cid, abs_vfid);
1835 		return 0;
1836 	}
1837 
1838 	switch (opcode) {
1839 	case EVENT_RING_OPCODE_CFC_DEL:
1840 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
1841 		   vf->abs_vfid, qidx);
1842 		vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
1843 						       &vfq_get(vf,
1844 								qidx)->sp_obj,
1845 						       BNX2X_Q_CMD_CFC_DEL);
1846 		break;
1847 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1848 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
1849 		   vf->abs_vfid, qidx);
1850 		bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
1851 		break;
1852 	case EVENT_RING_OPCODE_MULTICAST_RULES:
1853 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
1854 		   vf->abs_vfid, qidx);
1855 		bnx2x_vf_handle_mcast_eqe(bp, vf);
1856 		break;
1857 	case EVENT_RING_OPCODE_FILTERS_RULES:
1858 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
1859 		   vf->abs_vfid, qidx);
1860 		bnx2x_vf_handle_filters_eqe(bp, vf);
1861 		break;
1862 	case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1863 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
1864 		   vf->abs_vfid, qidx);
1865 		bnx2x_vf_handle_rss_update_eqe(bp, vf);
1866 	case EVENT_RING_OPCODE_VF_FLR:
1867 	case EVENT_RING_OPCODE_MALICIOUS_VF:
1868 		/* Do nothing for now */
1869 		return 0;
1870 	}
1871 
1872 	return 0;
1873 }
1874 
1875 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
1876 {
1877 	/* extract the vf from vf_cid - relies on the following:
1878 	 * 1. vfid on cid reflects the true abs_vfid
1879 	 * 2. The max number of VFs (per path) is 64
1880 	 */
1881 	int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1882 	return bnx2x_vf_by_abs_fid(bp, abs_vfid);
1883 }
1884 
1885 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
1886 				struct bnx2x_queue_sp_obj **q_obj)
1887 {
1888 	struct bnx2x_virtf *vf;
1889 
1890 	if (!IS_SRIOV(bp))
1891 		return;
1892 
1893 	vf = bnx2x_vf_by_cid(bp, vf_cid);
1894 
1895 	if (vf) {
1896 		/* extract queue index from vf_cid - relies on the following:
1897 		 * 1. vfid on cid reflects the true abs_vfid
1898 		 * 2. The max number of VFs (per path) is 64
1899 		 */
1900 		int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
1901 		*q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
1902 	} else {
1903 		BNX2X_ERR("No vf matching cid %d\n", vf_cid);
1904 	}
1905 }
1906 
1907 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1908 {
1909 	int i;
1910 	int first_queue_query_index, num_queues_req;
1911 	dma_addr_t cur_data_offset;
1912 	struct stats_query_entry *cur_query_entry;
1913 	u8 stats_count = 0;
1914 	bool is_fcoe = false;
1915 
1916 	if (!IS_SRIOV(bp))
1917 		return;
1918 
1919 	if (!NO_FCOE(bp))
1920 		is_fcoe = true;
1921 
1922 	/* fcoe adds one global request and one queue request */
1923 	num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
1924 	first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
1925 		(is_fcoe ? 0 : 1);
1926 
1927 	DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1928 	       "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
1929 	       BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
1930 	       first_queue_query_index + num_queues_req);
1931 
1932 	cur_data_offset = bp->fw_stats_data_mapping +
1933 		offsetof(struct bnx2x_fw_stats_data, queue_stats) +
1934 		num_queues_req * sizeof(struct per_queue_stats);
1935 
1936 	cur_query_entry = &bp->fw_stats_req->
1937 		query[first_queue_query_index + num_queues_req];
1938 
1939 	for_each_vf(bp, i) {
1940 		int j;
1941 		struct bnx2x_virtf *vf = BP_VF(bp, i);
1942 
1943 		if (vf->state != VF_ENABLED) {
1944 			DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1945 			       "vf %d not enabled so no stats for it\n",
1946 			       vf->abs_vfid);
1947 			continue;
1948 		}
1949 
1950 		DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
1951 		for_each_vfq(vf, j) {
1952 			struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
1953 
1954 			dma_addr_t q_stats_addr =
1955 				vf->fw_stat_map + j * vf->stats_stride;
1956 
1957 			/* collect stats fro active queues only */
1958 			if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
1959 			    BNX2X_Q_LOGICAL_STATE_STOPPED)
1960 				continue;
1961 
1962 			/* create stats query entry for this queue */
1963 			cur_query_entry->kind = STATS_TYPE_QUEUE;
1964 			cur_query_entry->index = vfq_stat_id(vf, rxq);
1965 			cur_query_entry->funcID =
1966 				cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
1967 			cur_query_entry->address.hi =
1968 				cpu_to_le32(U64_HI(q_stats_addr));
1969 			cur_query_entry->address.lo =
1970 				cpu_to_le32(U64_LO(q_stats_addr));
1971 			DP(BNX2X_MSG_IOV,
1972 			   "added address %x %x for vf %d queue %d client %d\n",
1973 			   cur_query_entry->address.hi,
1974 			   cur_query_entry->address.lo, cur_query_entry->funcID,
1975 			   j, cur_query_entry->index);
1976 			cur_query_entry++;
1977 			cur_data_offset += sizeof(struct per_queue_stats);
1978 			stats_count++;
1979 
1980 			/* all stats are coalesced to the leading queue */
1981 			if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
1982 				break;
1983 		}
1984 	}
1985 	bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
1986 }
1987 
1988 static inline
1989 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
1990 {
1991 	int i;
1992 	struct bnx2x_virtf *vf = NULL;
1993 
1994 	for_each_vf(bp, i) {
1995 		vf = BP_VF(bp, i);
1996 		if (stat_id >= vf->igu_base_id &&
1997 		    stat_id < vf->igu_base_id + vf_sb_count(vf))
1998 			break;
1999 	}
2000 	return vf;
2001 }
2002 
2003 /* VF API helpers */
2004 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
2005 				u8 enable)
2006 {
2007 	u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
2008 	u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
2009 
2010 	REG_WR(bp, reg, val);
2011 }
2012 
2013 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
2014 {
2015 	int i;
2016 
2017 	for_each_vfq(vf, i)
2018 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2019 				    vfq_qzone_id(vf, vfq_get(vf, i)), false);
2020 }
2021 
2022 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
2023 {
2024 	u32 val;
2025 
2026 	/* clear the VF configuration - pretend */
2027 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
2028 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
2029 	val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
2030 		 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
2031 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
2032 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2033 }
2034 
2035 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2036 {
2037 	return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2038 		     BNX2X_VF_MAX_QUEUES);
2039 }
2040 
2041 static
2042 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2043 			    struct vf_pf_resc_request *req_resc)
2044 {
2045 	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2046 	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2047 
2048 	/* Save a vlan filter for the Hypervisor */
2049 	return ((req_resc->num_rxqs <= rxq_cnt) &&
2050 		(req_resc->num_txqs <= txq_cnt) &&
2051 		(req_resc->num_sbs <= vf_sb_count(vf))   &&
2052 		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2053 		(req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
2054 }
2055 
2056 /* CORE VF API */
2057 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2058 		     struct vf_pf_resc_request *resc)
2059 {
2060 	int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2061 		BNX2X_CIDS_PER_VF;
2062 
2063 	union cdu_context *base_cxt = (union cdu_context *)
2064 		BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2065 		(base_vf_cid & (ILT_PAGE_CIDS-1));
2066 	int i;
2067 
2068 	/* if state is 'acquired' the VF was not released or FLR'd, in
2069 	 * this case the returned resources match the acquired already
2070 	 * acquired resources. Verify that the requested numbers do
2071 	 * not exceed the already acquired numbers.
2072 	 */
2073 	if (vf->state == VF_ACQUIRED) {
2074 		DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2075 		   vf->abs_vfid);
2076 
2077 		if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2078 			BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2079 				  vf->abs_vfid);
2080 			return -EINVAL;
2081 		}
2082 		return 0;
2083 	}
2084 
2085 	/* Otherwise vf state must be 'free' or 'reset' */
2086 	if (vf->state != VF_FREE && vf->state != VF_RESET) {
2087 		BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2088 			  vf->abs_vfid, vf->state);
2089 		return -EINVAL;
2090 	}
2091 
2092 	/* static allocation:
2093 	 * the global maximum number are fixed per VF. Fail the request if
2094 	 * requested number exceed these globals
2095 	 */
2096 	if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2097 		DP(BNX2X_MSG_IOV,
2098 		   "cannot fulfill vf resource request. Placing maximal available values in response\n");
2099 		/* set the max resource in the vf */
2100 		return -ENOMEM;
2101 	}
2102 
2103 	/* Set resources counters - 0 request means max available */
2104 	vf_sb_count(vf) = resc->num_sbs;
2105 	vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2106 	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2107 	if (resc->num_mac_filters)
2108 		vf_mac_rules_cnt(vf) = resc->num_mac_filters;
2109 	/* Add an additional vlan filter credit for the hypervisor */
2110 	bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
2111 
2112 	DP(BNX2X_MSG_IOV,
2113 	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2114 	   vf_sb_count(vf), vf_rxq_count(vf),
2115 	   vf_txq_count(vf), vf_mac_rules_cnt(vf),
2116 	   vf_vlan_rules_visible_cnt(vf));
2117 
2118 	/* Initialize the queues */
2119 	if (!vf->vfqs) {
2120 		DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2121 		return -EINVAL;
2122 	}
2123 
2124 	for_each_vfq(vf, i) {
2125 		struct bnx2x_vf_queue *q = vfq_get(vf, i);
2126 
2127 		if (!q) {
2128 			BNX2X_ERR("q number %d was not allocated\n", i);
2129 			return -EINVAL;
2130 		}
2131 
2132 		q->index = i;
2133 		q->cxt = &((base_cxt + i)->eth);
2134 		q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2135 
2136 		DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2137 		   vf->abs_vfid, i, q->index, q->cid, q->cxt);
2138 
2139 		/* init SP objects */
2140 		bnx2x_vfq_init(bp, vf, q);
2141 	}
2142 	vf->state = VF_ACQUIRED;
2143 	return 0;
2144 }
2145 
2146 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2147 {
2148 	struct bnx2x_func_init_params func_init = {0};
2149 	u16 flags = 0;
2150 	int i;
2151 
2152 	/* the sb resources are initialized at this point, do the
2153 	 * FW/HW initializations
2154 	 */
2155 	for_each_vf_sb(vf, i)
2156 		bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2157 			      vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2158 
2159 	/* Sanity checks */
2160 	if (vf->state != VF_ACQUIRED) {
2161 		DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2162 		   vf->abs_vfid, vf->state);
2163 		return -EINVAL;
2164 	}
2165 
2166 	/* let FLR complete ... */
2167 	msleep(100);
2168 
2169 	/* FLR cleanup epilogue */
2170 	if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2171 		return -EBUSY;
2172 
2173 	/* reset IGU VF statistics: MSIX */
2174 	REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2175 
2176 	/* vf init */
2177 	if (vf->cfg_flags & VF_CFG_STATS)
2178 		flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2179 
2180 	if (vf->cfg_flags & VF_CFG_TPA)
2181 		flags |= FUNC_FLG_TPA;
2182 
2183 	if (is_vf_multi(vf))
2184 		flags |= FUNC_FLG_RSS;
2185 
2186 	/* function setup */
2187 	func_init.func_flgs = flags;
2188 	func_init.pf_id = BP_FUNC(bp);
2189 	func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2190 	func_init.fw_stat_map = vf->fw_stat_map;
2191 	func_init.spq_map = vf->spq_map;
2192 	func_init.spq_prod = 0;
2193 	bnx2x_func_init(bp, &func_init);
2194 
2195 	/* Enable the vf */
2196 	bnx2x_vf_enable_access(bp, vf->abs_vfid);
2197 	bnx2x_vf_enable_traffic(bp, vf);
2198 
2199 	/* queue protection table */
2200 	for_each_vfq(vf, i)
2201 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2202 				    vfq_qzone_id(vf, vfq_get(vf, i)), true);
2203 
2204 	vf->state = VF_ENABLED;
2205 
2206 	/* update vf bulletin board */
2207 	bnx2x_post_vf_bulletin(bp, vf->index);
2208 
2209 	return 0;
2210 }
2211 
2212 struct set_vf_state_cookie {
2213 	struct bnx2x_virtf *vf;
2214 	u8 state;
2215 };
2216 
2217 static void bnx2x_set_vf_state(void *cookie)
2218 {
2219 	struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2220 
2221 	p->vf->state = p->state;
2222 }
2223 
2224 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2225 {
2226 	int rc = 0, i;
2227 
2228 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2229 
2230 	/* Close all queues */
2231 	for (i = 0; i < vf_rxq_count(vf); i++) {
2232 		rc = bnx2x_vf_queue_teardown(bp, vf, i);
2233 		if (rc)
2234 			goto op_err;
2235 	}
2236 
2237 	/* disable the interrupts */
2238 	DP(BNX2X_MSG_IOV, "disabling igu\n");
2239 	bnx2x_vf_igu_disable(bp, vf);
2240 
2241 	/* disable the VF */
2242 	DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2243 	bnx2x_vf_clr_qtbl(bp, vf);
2244 
2245 	/* need to make sure there are no outstanding stats ramrods which may
2246 	 * cause the device to access the VF's stats buffer which it will free
2247 	 * as soon as we return from the close flow.
2248 	 */
2249 	{
2250 		struct set_vf_state_cookie cookie;
2251 
2252 		cookie.vf = vf;
2253 		cookie.state = VF_ACQUIRED;
2254 		bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2255 	}
2256 
2257 	DP(BNX2X_MSG_IOV, "set state to acquired\n");
2258 
2259 	return 0;
2260 op_err:
2261 	BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
2262 	return rc;
2263 }
2264 
2265 /* VF release can be called either: 1. The VF was acquired but
2266  * not enabled 2. the vf was enabled or in the process of being
2267  * enabled
2268  */
2269 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
2270 {
2271 	int rc;
2272 
2273 	DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2274 	   vf->state == VF_FREE ? "Free" :
2275 	   vf->state == VF_ACQUIRED ? "Acquired" :
2276 	   vf->state == VF_ENABLED ? "Enabled" :
2277 	   vf->state == VF_RESET ? "Reset" :
2278 	   "Unknown");
2279 
2280 	switch (vf->state) {
2281 	case VF_ENABLED:
2282 		rc = bnx2x_vf_close(bp, vf);
2283 		if (rc)
2284 			goto op_err;
2285 		/* Fallthrough to release resources */
2286 	case VF_ACQUIRED:
2287 		DP(BNX2X_MSG_IOV, "about to free resources\n");
2288 		bnx2x_vf_free_resc(bp, vf);
2289 		break;
2290 
2291 	case VF_FREE:
2292 	case VF_RESET:
2293 	default:
2294 		break;
2295 	}
2296 	return 0;
2297 op_err:
2298 	BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
2299 	return rc;
2300 }
2301 
2302 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2303 			struct bnx2x_config_rss_params *rss)
2304 {
2305 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2306 	set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
2307 	return bnx2x_config_rss(bp, rss);
2308 }
2309 
2310 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2311 			struct vfpf_tpa_tlv *tlv,
2312 			struct bnx2x_queue_update_tpa_params *params)
2313 {
2314 	aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
2315 	struct bnx2x_queue_state_params qstate;
2316 	int qid, rc = 0;
2317 
2318 	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2319 
2320 	/* Set ramrod params */
2321 	memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
2322 	memcpy(&qstate.params.update_tpa, params,
2323 	       sizeof(struct bnx2x_queue_update_tpa_params));
2324 	qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
2325 	set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
2326 
2327 	for (qid = 0; qid < vf_rxq_count(vf); qid++) {
2328 		qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
2329 		qstate.params.update_tpa.sge_map = sge_addr[qid];
2330 		DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
2331 		   vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
2332 		   U64_LO(sge_addr[qid]));
2333 		rc = bnx2x_queue_state_change(bp, &qstate);
2334 		if (rc) {
2335 			BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
2336 				  U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
2337 				  vf->abs_vfid, qid);
2338 			return rc;
2339 		}
2340 	}
2341 
2342 	return rc;
2343 }
2344 
2345 /* VF release ~ VF close + VF release-resources
2346  * Release is the ultimate SW shutdown and is called whenever an
2347  * irrecoverable error is encountered.
2348  */
2349 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2350 {
2351 	int rc;
2352 
2353 	DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
2354 	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2355 
2356 	rc = bnx2x_vf_free(bp, vf);
2357 	if (rc)
2358 		WARN(rc,
2359 		     "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2360 		     vf->abs_vfid, rc);
2361 	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2362 	return rc;
2363 }
2364 
2365 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
2366 			      struct bnx2x_virtf *vf, u32 *sbdf)
2367 {
2368 	*sbdf = vf->devfn | (vf->bus << 8);
2369 }
2370 
2371 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2372 			      enum channel_tlvs tlv)
2373 {
2374 	/* we don't lock the channel for unsupported tlvs */
2375 	if (!bnx2x_tlv_supported(tlv)) {
2376 		BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
2377 		return;
2378 	}
2379 
2380 	/* lock the channel */
2381 	mutex_lock(&vf->op_mutex);
2382 
2383 	/* record the locking op */
2384 	vf->op_current = tlv;
2385 
2386 	/* log the lock */
2387 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2388 	   vf->abs_vfid, tlv);
2389 }
2390 
2391 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2392 				enum channel_tlvs expected_tlv)
2393 {
2394 	enum channel_tlvs current_tlv;
2395 
2396 	if (!vf) {
2397 		BNX2X_ERR("VF was %p\n", vf);
2398 		return;
2399 	}
2400 
2401 	current_tlv = vf->op_current;
2402 
2403 	/* we don't unlock the channel for unsupported tlvs */
2404 	if (!bnx2x_tlv_supported(expected_tlv))
2405 		return;
2406 
2407 	WARN(expected_tlv != vf->op_current,
2408 	     "lock mismatch: expected %d found %d", expected_tlv,
2409 	     vf->op_current);
2410 
2411 	/* record the locking op */
2412 	vf->op_current = CHANNEL_TLV_NONE;
2413 
2414 	/* lock the channel */
2415 	mutex_unlock(&vf->op_mutex);
2416 
2417 	/* log the unlock */
2418 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2419 	   vf->abs_vfid, vf->op_current);
2420 }
2421 
2422 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
2423 {
2424 	struct bnx2x_queue_state_params q_params;
2425 	u32 prev_flags;
2426 	int i, rc;
2427 
2428 	/* Verify changes are needed and record current Tx switching state */
2429 	prev_flags = bp->flags;
2430 	if (enable)
2431 		bp->flags |= TX_SWITCHING;
2432 	else
2433 		bp->flags &= ~TX_SWITCHING;
2434 	if (prev_flags == bp->flags)
2435 		return 0;
2436 
2437 	/* Verify state enables the sending of queue ramrods */
2438 	if ((bp->state != BNX2X_STATE_OPEN) ||
2439 	    (bnx2x_get_q_logical_state(bp,
2440 				      &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
2441 	     BNX2X_Q_LOGICAL_STATE_ACTIVE))
2442 		return 0;
2443 
2444 	/* send q. update ramrod to configure Tx switching */
2445 	memset(&q_params, 0, sizeof(q_params));
2446 	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2447 	q_params.cmd = BNX2X_Q_CMD_UPDATE;
2448 	__set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
2449 		  &q_params.params.update.update_flags);
2450 	if (enable)
2451 		__set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2452 			  &q_params.params.update.update_flags);
2453 	else
2454 		__clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2455 			    &q_params.params.update.update_flags);
2456 
2457 	/* send the ramrod on all the queues of the PF */
2458 	for_each_eth_queue(bp, i) {
2459 		struct bnx2x_fastpath *fp = &bp->fp[i];
2460 
2461 		/* Set the appropriate Queue object */
2462 		q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
2463 
2464 		/* Update the Queue state */
2465 		rc = bnx2x_queue_state_change(bp, &q_params);
2466 		if (rc) {
2467 			BNX2X_ERR("Failed to configure Tx switching\n");
2468 			return rc;
2469 		}
2470 	}
2471 
2472 	DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
2473 	return 0;
2474 }
2475 
2476 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
2477 {
2478 	struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
2479 
2480 	if (!IS_SRIOV(bp)) {
2481 		BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
2482 		return -EINVAL;
2483 	}
2484 
2485 	DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
2486 	   num_vfs_param, BNX2X_NR_VIRTFN(bp));
2487 
2488 	/* HW channel is only operational when PF is up */
2489 	if (bp->state != BNX2X_STATE_OPEN) {
2490 		BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
2491 		return -EINVAL;
2492 	}
2493 
2494 	/* we are always bound by the total_vfs in the configuration space */
2495 	if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
2496 		BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
2497 			  num_vfs_param, BNX2X_NR_VIRTFN(bp));
2498 		num_vfs_param = BNX2X_NR_VIRTFN(bp);
2499 	}
2500 
2501 	bp->requested_nr_virtfn = num_vfs_param;
2502 	if (num_vfs_param == 0) {
2503 		bnx2x_set_pf_tx_switching(bp, false);
2504 		pci_disable_sriov(dev);
2505 		return 0;
2506 	} else {
2507 		return bnx2x_enable_sriov(bp);
2508 	}
2509 }
2510 
2511 #define IGU_ENTRY_SIZE 4
2512 
2513 int bnx2x_enable_sriov(struct bnx2x *bp)
2514 {
2515 	int rc = 0, req_vfs = bp->requested_nr_virtfn;
2516 	int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
2517 	u32 igu_entry, address;
2518 	u16 num_vf_queues;
2519 
2520 	if (req_vfs == 0)
2521 		return 0;
2522 
2523 	first_vf = bp->vfdb->sriov.first_vf_in_pf;
2524 
2525 	/* statically distribute vf sb pool between VFs */
2526 	num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
2527 			      BP_VFDB(bp)->vf_sbs_pool / req_vfs);
2528 
2529 	/* zero previous values learned from igu cam */
2530 	for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
2531 		struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2532 
2533 		vf->sb_count = 0;
2534 		vf_sb_count(BP_VF(bp, vf_idx)) = 0;
2535 	}
2536 	bp->vfdb->vf_sbs_pool = 0;
2537 
2538 	/* prepare IGU cam */
2539 	sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
2540 	address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
2541 	for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2542 		for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
2543 			igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
2544 				vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
2545 				IGU_REG_MAPPING_MEMORY_VALID;
2546 			DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
2547 			   sb_idx, vf_idx);
2548 			REG_WR(bp, address, igu_entry);
2549 			sb_idx++;
2550 			address += IGU_ENTRY_SIZE;
2551 		}
2552 	}
2553 
2554 	/* Reinitialize vf database according to igu cam */
2555 	bnx2x_get_vf_igu_cam_info(bp);
2556 
2557 	DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
2558 	   BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
2559 
2560 	qcount = 0;
2561 	for_each_vf(bp, vf_idx) {
2562 		struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2563 
2564 		/* set local queue arrays */
2565 		vf->vfqs = &bp->vfdb->vfqs[qcount];
2566 		qcount += vf_sb_count(vf);
2567 		bnx2x_iov_static_resc(bp, vf);
2568 	}
2569 
2570 	/* prepare msix vectors in VF configuration space - the value in the
2571 	 * PCI configuration space should be the index of the last entry,
2572 	 * namely one less than the actual size of the table
2573 	 */
2574 	for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2575 		bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
2576 		REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
2577 		       num_vf_queues - 1);
2578 		DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
2579 		   vf_idx, num_vf_queues - 1);
2580 	}
2581 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2582 
2583 	/* enable sriov. This will probe all the VFs, and consequentially cause
2584 	 * the "acquire" messages to appear on the VF PF channel.
2585 	 */
2586 	DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
2587 	bnx2x_disable_sriov(bp);
2588 
2589 	rc = bnx2x_set_pf_tx_switching(bp, true);
2590 	if (rc)
2591 		return rc;
2592 
2593 	rc = pci_enable_sriov(bp->pdev, req_vfs);
2594 	if (rc) {
2595 		BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
2596 		return rc;
2597 	}
2598 	DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
2599 	return req_vfs;
2600 }
2601 
2602 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
2603 {
2604 	int vfidx;
2605 	struct pf_vf_bulletin_content *bulletin;
2606 
2607 	DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
2608 	for_each_vf(bp, vfidx) {
2609 	bulletin = BP_VF_BULLETIN(bp, vfidx);
2610 		if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
2611 			bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
2612 	}
2613 }
2614 
2615 void bnx2x_disable_sriov(struct bnx2x *bp)
2616 {
2617 	pci_disable_sriov(bp->pdev);
2618 }
2619 
2620 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
2621 			    struct bnx2x_virtf **vf,
2622 			    struct pf_vf_bulletin_content **bulletin,
2623 			    bool test_queue)
2624 {
2625 	if (bp->state != BNX2X_STATE_OPEN) {
2626 		BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
2627 		return -EINVAL;
2628 	}
2629 
2630 	if (!IS_SRIOV(bp)) {
2631 		BNX2X_ERR("sriov is disabled - can't utilize iov-realted functionality\n");
2632 		return -EINVAL;
2633 	}
2634 
2635 	if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
2636 		BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
2637 			  vfidx, BNX2X_NR_VIRTFN(bp));
2638 		return -EINVAL;
2639 	}
2640 
2641 	/* init members */
2642 	*vf = BP_VF(bp, vfidx);
2643 	*bulletin = BP_VF_BULLETIN(bp, vfidx);
2644 
2645 	if (!*vf) {
2646 		BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
2647 		return -EINVAL;
2648 	}
2649 
2650 	if (test_queue && !(*vf)->vfqs) {
2651 		BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
2652 			  vfidx);
2653 		return -EINVAL;
2654 	}
2655 
2656 	if (!*bulletin) {
2657 		BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
2658 			  vfidx);
2659 		return -EINVAL;
2660 	}
2661 
2662 	return 0;
2663 }
2664 
2665 int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
2666 			struct ifla_vf_info *ivi)
2667 {
2668 	struct bnx2x *bp = netdev_priv(dev);
2669 	struct bnx2x_virtf *vf = NULL;
2670 	struct pf_vf_bulletin_content *bulletin = NULL;
2671 	struct bnx2x_vlan_mac_obj *mac_obj;
2672 	struct bnx2x_vlan_mac_obj *vlan_obj;
2673 	int rc;
2674 
2675 	/* sanity and init */
2676 	rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2677 	if (rc)
2678 		return rc;
2679 
2680 	mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2681 	vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2682 	if (!mac_obj || !vlan_obj) {
2683 		BNX2X_ERR("VF partially initialized\n");
2684 		return -EINVAL;
2685 	}
2686 
2687 	ivi->vf = vfidx;
2688 	ivi->qos = 0;
2689 	ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
2690 	ivi->min_tx_rate = 0;
2691 	ivi->spoofchk = 1; /*always enabled */
2692 	if (vf->state == VF_ENABLED) {
2693 		/* mac and vlan are in vlan_mac objects */
2694 		if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
2695 			mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
2696 						0, ETH_ALEN);
2697 			vlan_obj->get_n_elements(bp, vlan_obj, 1,
2698 						 (u8 *)&ivi->vlan, 0,
2699 						 VLAN_HLEN);
2700 		}
2701 	} else {
2702 		mutex_lock(&bp->vfdb->bulletin_mutex);
2703 		/* mac */
2704 		if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
2705 			/* mac configured by ndo so its in bulletin board */
2706 			memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
2707 		else
2708 			/* function has not been loaded yet. Show mac as 0s */
2709 			memset(&ivi->mac, 0, ETH_ALEN);
2710 
2711 		/* vlan */
2712 		if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2713 			/* vlan configured by ndo so its in bulletin board */
2714 			memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
2715 		else
2716 			/* function has not been loaded yet. Show vlans as 0s */
2717 			memset(&ivi->vlan, 0, VLAN_HLEN);
2718 
2719 		mutex_unlock(&bp->vfdb->bulletin_mutex);
2720 	}
2721 
2722 	return 0;
2723 }
2724 
2725 /* New mac for VF. Consider these cases:
2726  * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
2727  *    supply at acquire.
2728  * 2. VF has already been acquired but has not yet initialized - store in local
2729  *    bulletin board. mac will be posted on VF bulletin board after VF init. VF
2730  *    will configure this mac when it is ready.
2731  * 3. VF has already initialized but has not yet setup a queue - post the new
2732  *    mac on VF's bulletin board right now. VF will configure this mac when it
2733  *    is ready.
2734  * 4. VF has already set a queue - delete any macs already configured for this
2735  *    queue and manually config the new mac.
2736  * In any event, once this function has been called refuse any attempts by the
2737  * VF to configure any mac for itself except for this mac. In case of a race
2738  * where the VF fails to see the new post on its bulletin board before sending a
2739  * mac configuration request, the PF will simply fail the request and VF can try
2740  * again after consulting its bulletin board.
2741  */
2742 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
2743 {
2744 	struct bnx2x *bp = netdev_priv(dev);
2745 	int rc, q_logical_state;
2746 	struct bnx2x_virtf *vf = NULL;
2747 	struct pf_vf_bulletin_content *bulletin = NULL;
2748 
2749 	if (!is_valid_ether_addr(mac)) {
2750 		BNX2X_ERR("mac address invalid\n");
2751 		return -EINVAL;
2752 	}
2753 
2754 	/* sanity and init */
2755 	rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2756 	if (rc)
2757 		return rc;
2758 
2759 	mutex_lock(&bp->vfdb->bulletin_mutex);
2760 
2761 	/* update PF's copy of the VF's bulletin. Will no longer accept mac
2762 	 * configuration requests from vf unless match this mac
2763 	 */
2764 	bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
2765 	memcpy(bulletin->mac, mac, ETH_ALEN);
2766 
2767 	/* Post update on VF's bulletin board */
2768 	rc = bnx2x_post_vf_bulletin(bp, vfidx);
2769 
2770 	/* release lock before checking return code */
2771 	mutex_unlock(&bp->vfdb->bulletin_mutex);
2772 
2773 	if (rc) {
2774 		BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2775 		return rc;
2776 	}
2777 
2778 	q_logical_state =
2779 		bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
2780 	if (vf->state == VF_ENABLED &&
2781 	    q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
2782 		/* configure the mac in device on this vf's queue */
2783 		unsigned long ramrod_flags = 0;
2784 		struct bnx2x_vlan_mac_obj *mac_obj;
2785 
2786 		/* User should be able to see failure reason in system logs */
2787 		if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2788 			return -EINVAL;
2789 
2790 		/* must lock vfpf channel to protect against vf flows */
2791 		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2792 
2793 		/* remove existing eth macs */
2794 		mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2795 		rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
2796 		if (rc) {
2797 			BNX2X_ERR("failed to delete eth macs\n");
2798 			rc = -EINVAL;
2799 			goto out;
2800 		}
2801 
2802 		/* remove existing uc list macs */
2803 		rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
2804 		if (rc) {
2805 			BNX2X_ERR("failed to delete uc_list macs\n");
2806 			rc = -EINVAL;
2807 			goto out;
2808 		}
2809 
2810 		/* configure the new mac to device */
2811 		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2812 		bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
2813 				  BNX2X_ETH_MAC, &ramrod_flags);
2814 
2815 out:
2816 		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2817 	}
2818 
2819 	return rc;
2820 }
2821 
2822 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
2823 {
2824 	struct bnx2x_queue_state_params q_params = {NULL};
2825 	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
2826 	struct bnx2x_queue_update_params *update_params;
2827 	struct pf_vf_bulletin_content *bulletin = NULL;
2828 	struct bnx2x_rx_mode_ramrod_params rx_ramrod;
2829 	struct bnx2x *bp = netdev_priv(dev);
2830 	struct bnx2x_vlan_mac_obj *vlan_obj;
2831 	unsigned long vlan_mac_flags = 0;
2832 	unsigned long ramrod_flags = 0;
2833 	struct bnx2x_virtf *vf = NULL;
2834 	unsigned long accept_flags;
2835 	int rc;
2836 
2837 	if (vlan > 4095) {
2838 		BNX2X_ERR("illegal vlan value %d\n", vlan);
2839 		return -EINVAL;
2840 	}
2841 
2842 	DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
2843 	   vfidx, vlan, 0);
2844 
2845 	/* sanity and init */
2846 	rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2847 	if (rc)
2848 		return rc;
2849 
2850 	/* update PF's copy of the VF's bulletin. No point in posting the vlan
2851 	 * to the VF since it doesn't have anything to do with it. But it useful
2852 	 * to store it here in case the VF is not up yet and we can only
2853 	 * configure the vlan later when it does. Treat vlan id 0 as remove the
2854 	 * Host tag.
2855 	 */
2856 	mutex_lock(&bp->vfdb->bulletin_mutex);
2857 
2858 	if (vlan > 0)
2859 		bulletin->valid_bitmap |= 1 << VLAN_VALID;
2860 	else
2861 		bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
2862 	bulletin->vlan = vlan;
2863 
2864 	mutex_unlock(&bp->vfdb->bulletin_mutex);
2865 
2866 	/* is vf initialized and queue set up? */
2867 	if (vf->state != VF_ENABLED ||
2868 	    bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2869 	    BNX2X_Q_LOGICAL_STATE_ACTIVE)
2870 		return rc;
2871 
2872 	/* User should be able to see error in system logs */
2873 	if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2874 		return -EINVAL;
2875 
2876 	/* must lock vfpf channel to protect against vf flows */
2877 	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2878 
2879 	/* remove existing vlans */
2880 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2881 	vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2882 	rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
2883 				  &ramrod_flags);
2884 	if (rc) {
2885 		BNX2X_ERR("failed to delete vlans\n");
2886 		rc = -EINVAL;
2887 		goto out;
2888 	}
2889 
2890 	/* need to remove/add the VF's accept_any_vlan bit */
2891 	accept_flags = bnx2x_leading_vfq(vf, accept_flags);
2892 	if (vlan)
2893 		clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2894 	else
2895 		set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2896 
2897 	bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
2898 			      accept_flags);
2899 	bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
2900 	bnx2x_config_rx_mode(bp, &rx_ramrod);
2901 
2902 	/* configure the new vlan to device */
2903 	memset(&ramrod_param, 0, sizeof(ramrod_param));
2904 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2905 	ramrod_param.vlan_mac_obj = vlan_obj;
2906 	ramrod_param.ramrod_flags = ramrod_flags;
2907 	set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
2908 		&ramrod_param.user_req.vlan_mac_flags);
2909 	ramrod_param.user_req.u.vlan.vlan = vlan;
2910 	ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
2911 	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
2912 	if (rc) {
2913 		BNX2X_ERR("failed to configure vlan\n");
2914 		rc =  -EINVAL;
2915 		goto out;
2916 	}
2917 
2918 	/* send queue update ramrod to configure default vlan and silent
2919 	 * vlan removal
2920 	 */
2921 	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2922 	q_params.cmd = BNX2X_Q_CMD_UPDATE;
2923 	q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
2924 	update_params = &q_params.params.update;
2925 	__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
2926 		  &update_params->update_flags);
2927 	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
2928 		  &update_params->update_flags);
2929 	if (vlan == 0) {
2930 		/* if vlan is 0 then we want to leave the VF traffic
2931 		 * untagged, and leave the incoming traffic untouched
2932 		 * (i.e. do not remove any vlan tags).
2933 		 */
2934 		__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2935 			    &update_params->update_flags);
2936 		__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2937 			    &update_params->update_flags);
2938 	} else {
2939 		/* configure default vlan to vf queue and set silent
2940 		 * vlan removal (the vf remains unaware of this vlan).
2941 		 */
2942 		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2943 			  &update_params->update_flags);
2944 		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2945 			  &update_params->update_flags);
2946 		update_params->def_vlan = vlan;
2947 		update_params->silent_removal_value =
2948 			vlan & VLAN_VID_MASK;
2949 		update_params->silent_removal_mask = VLAN_VID_MASK;
2950 	}
2951 
2952 	/* Update the Queue state */
2953 	rc = bnx2x_queue_state_change(bp, &q_params);
2954 	if (rc) {
2955 		BNX2X_ERR("Failed to configure default VLAN\n");
2956 		goto out;
2957 	}
2958 
2959 
2960 	/* clear the flag indicating that this VF needs its vlan
2961 	 * (will only be set if the HV configured the Vlan before vf was
2962 	 * up and we were called because the VF came up later
2963 	 */
2964 out:
2965 	vf->cfg_flags &= ~VF_CFG_VLAN;
2966 	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2967 
2968 	return rc;
2969 }
2970 
2971 /* crc is the first field in the bulletin board. Compute the crc over the
2972  * entire bulletin board excluding the crc field itself. Use the length field
2973  * as the Bulletin Board was posted by a PF with possibly a different version
2974  * from the vf which will sample it. Therefore, the length is computed by the
2975  * PF and then used blindly by the VF.
2976  */
2977 u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
2978 {
2979 	return crc32(BULLETIN_CRC_SEED,
2980 		 ((u8 *)bulletin) + sizeof(bulletin->crc),
2981 		 bulletin->length - sizeof(bulletin->crc));
2982 }
2983 
2984 /* Check for new posts on the bulletin board */
2985 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
2986 {
2987 	struct pf_vf_bulletin_content *bulletin;
2988 	int attempts;
2989 
2990 	/* sampling structure in mid post may result with corrupted data
2991 	 * validate crc to ensure coherency.
2992 	 */
2993 	for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
2994 		u32 crc;
2995 
2996 		/* sample the bulletin board */
2997 		memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
2998 		       sizeof(union pf_vf_bulletin));
2999 
3000 		crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
3001 
3002 		if (bp->shadow_bulletin.content.crc == crc)
3003 			break;
3004 
3005 		BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3006 			  bp->shadow_bulletin.content.crc, crc);
3007 	}
3008 
3009 	if (attempts >= BULLETIN_ATTEMPTS) {
3010 		BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3011 			  attempts);
3012 		return PFVF_BULLETIN_CRC_ERR;
3013 	}
3014 	bulletin = &bp->shadow_bulletin.content;
3015 
3016 	/* bulletin board hasn't changed since last sample */
3017 	if (bp->old_bulletin.version == bulletin->version)
3018 		return PFVF_BULLETIN_UNCHANGED;
3019 
3020 	/* the mac address in bulletin board is valid and is new */
3021 	if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
3022 	    !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
3023 		/* update new mac to net device */
3024 		memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
3025 	}
3026 
3027 	if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
3028 		DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
3029 		   bulletin->link_speed, bulletin->link_flags);
3030 
3031 		bp->vf_link_vars.line_speed = bulletin->link_speed;
3032 		bp->vf_link_vars.link_report_flags = 0;
3033 		/* Link is down */
3034 		if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
3035 			__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
3036 				  &bp->vf_link_vars.link_report_flags);
3037 		/* Full DUPLEX */
3038 		if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
3039 			__set_bit(BNX2X_LINK_REPORT_FD,
3040 				  &bp->vf_link_vars.link_report_flags);
3041 		/* Rx Flow Control is ON */
3042 		if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
3043 			__set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
3044 				  &bp->vf_link_vars.link_report_flags);
3045 		/* Tx Flow Control is ON */
3046 		if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
3047 			__set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
3048 				  &bp->vf_link_vars.link_report_flags);
3049 		__bnx2x_link_report(bp);
3050 	}
3051 
3052 	/* copy new bulletin board to bp */
3053 	memcpy(&bp->old_bulletin, bulletin,
3054 	       sizeof(struct pf_vf_bulletin_content));
3055 
3056 	return PFVF_BULLETIN_UPDATED;
3057 }
3058 
3059 void bnx2x_timer_sriov(struct bnx2x *bp)
3060 {
3061 	bnx2x_sample_bulletin(bp);
3062 
3063 	/* if channel is down we need to self destruct */
3064 	if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
3065 		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3066 				       BNX2X_MSG_IOV);
3067 }
3068 
3069 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3070 {
3071 	/* vf doorbells are embedded within the regview */
3072 	return bp->regview + PXP_VF_ADDR_DB_START;
3073 }
3074 
3075 void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
3076 {
3077 	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3078 		       sizeof(struct bnx2x_vf_mbx_msg));
3079 	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3080 		       sizeof(union pf_vf_bulletin));
3081 }
3082 
3083 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3084 {
3085 	mutex_init(&bp->vf2pf_mutex);
3086 
3087 	/* allocate vf2pf mailbox for vf to pf channel */
3088 	bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
3089 					 sizeof(struct bnx2x_vf_mbx_msg));
3090 	if (!bp->vf2pf_mbox)
3091 		goto alloc_mem_err;
3092 
3093 	/* allocate pf 2 vf bulletin board */
3094 	bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
3095 					     sizeof(union pf_vf_bulletin));
3096 	if (!bp->pf2vf_bulletin)
3097 		goto alloc_mem_err;
3098 
3099 	bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
3100 
3101 	return 0;
3102 
3103 alloc_mem_err:
3104 	bnx2x_vf_pci_dealloc(bp);
3105 	return -ENOMEM;
3106 }
3107 
3108 void bnx2x_iov_channel_down(struct bnx2x *bp)
3109 {
3110 	int vf_idx;
3111 	struct pf_vf_bulletin_content *bulletin;
3112 
3113 	if (!IS_SRIOV(bp))
3114 		return;
3115 
3116 	for_each_vf(bp, vf_idx) {
3117 		/* locate this VFs bulletin board and update the channel down
3118 		 * bit
3119 		 */
3120 		bulletin = BP_VF_BULLETIN(bp, vf_idx);
3121 		bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3122 
3123 		/* update vf bulletin board */
3124 		bnx2x_post_vf_bulletin(bp, vf_idx);
3125 	}
3126 }
3127 
3128 void bnx2x_iov_task(struct work_struct *work)
3129 {
3130 	struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
3131 
3132 	if (!netif_running(bp->dev))
3133 		return;
3134 
3135 	if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
3136 			       &bp->iov_task_state))
3137 		bnx2x_vf_handle_flr_event(bp);
3138 
3139 	if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
3140 			       &bp->iov_task_state))
3141 		bnx2x_vf_mbx(bp);
3142 }
3143 
3144 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
3145 {
3146 	smp_mb__before_atomic();
3147 	set_bit(flag, &bp->iov_task_state);
3148 	smp_mb__after_atomic();
3149 	DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3150 	queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
3151 }
3152