1 /* bnx2x_sriov.c: Broadcom Everest network driver.
2  *
3  * Copyright 2009-2013 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17  *	       Ariel Elior <ariele@broadcom.com>
18  *
19  */
20 #include "bnx2x.h"
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
23 #include <linux/crc32.h>
24 
25 /* General service functions */
26 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
27 					 u16 pf_id)
28 {
29 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
30 		pf_id);
31 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
32 		pf_id);
33 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
34 		pf_id);
35 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
36 		pf_id);
37 }
38 
39 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
40 					u8 enable)
41 {
42 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
43 		enable);
44 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
45 		enable);
46 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
47 		enable);
48 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
49 		enable);
50 }
51 
52 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
53 {
54 	int idx;
55 
56 	for_each_vf(bp, idx)
57 		if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
58 			break;
59 	return idx;
60 }
61 
62 static
63 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
64 {
65 	u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
66 	return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
67 }
68 
69 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
70 				u8 igu_sb_id, u8 segment, u16 index, u8 op,
71 				u8 update)
72 {
73 	/* acking a VF sb through the PF - use the GRC */
74 	u32 ctl;
75 	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
76 	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
77 	u32 func_encode = vf->abs_vfid;
78 	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
79 	struct igu_regular cmd_data = {0};
80 
81 	cmd_data.sb_id_and_flags =
82 			((index << IGU_REGULAR_SB_INDEX_SHIFT) |
83 			 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
84 			 (update << IGU_REGULAR_BUPDATE_SHIFT) |
85 			 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
86 
87 	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
88 	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
89 	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
90 
91 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
92 	   cmd_data.sb_id_and_flags, igu_addr_data);
93 	REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
94 	mmiowb();
95 	barrier();
96 
97 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
98 	   ctl, igu_addr_ctl);
99 	REG_WR(bp, igu_addr_ctl, ctl);
100 	mmiowb();
101 	barrier();
102 }
103 /* VFOP - VF slow-path operation support */
104 
105 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX		0x10000
106 
107 /* VFOP operations states */
108 enum bnx2x_vfop_qctor_state {
109 	   BNX2X_VFOP_QCTOR_INIT,
110 	   BNX2X_VFOP_QCTOR_SETUP,
111 	   BNX2X_VFOP_QCTOR_INT_EN
112 };
113 
114 enum bnx2x_vfop_qdtor_state {
115 	   BNX2X_VFOP_QDTOR_HALT,
116 	   BNX2X_VFOP_QDTOR_TERMINATE,
117 	   BNX2X_VFOP_QDTOR_CFCDEL,
118 	   BNX2X_VFOP_QDTOR_DONE
119 };
120 
121 enum bnx2x_vfop_vlan_mac_state {
122 	   BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
123 	   BNX2X_VFOP_VLAN_MAC_CLEAR,
124 	   BNX2X_VFOP_VLAN_MAC_CHK_DONE,
125 	   BNX2X_VFOP_MAC_CONFIG_LIST,
126 	   BNX2X_VFOP_VLAN_CONFIG_LIST,
127 	   BNX2X_VFOP_VLAN_CONFIG_LIST_0
128 };
129 
130 enum bnx2x_vfop_qsetup_state {
131 	   BNX2X_VFOP_QSETUP_CTOR,
132 	   BNX2X_VFOP_QSETUP_VLAN0,
133 	   BNX2X_VFOP_QSETUP_DONE
134 };
135 
136 enum bnx2x_vfop_mcast_state {
137 	   BNX2X_VFOP_MCAST_DEL,
138 	   BNX2X_VFOP_MCAST_ADD,
139 	   BNX2X_VFOP_MCAST_CHK_DONE
140 };
141 enum bnx2x_vfop_qflr_state {
142 	   BNX2X_VFOP_QFLR_CLR_VLAN,
143 	   BNX2X_VFOP_QFLR_CLR_MAC,
144 	   BNX2X_VFOP_QFLR_TERMINATE,
145 	   BNX2X_VFOP_QFLR_DONE
146 };
147 
148 enum bnx2x_vfop_flr_state {
149 	   BNX2X_VFOP_FLR_QUEUES,
150 	   BNX2X_VFOP_FLR_HW
151 };
152 
153 enum bnx2x_vfop_close_state {
154 	   BNX2X_VFOP_CLOSE_QUEUES,
155 	   BNX2X_VFOP_CLOSE_HW
156 };
157 
158 enum bnx2x_vfop_rxmode_state {
159 	   BNX2X_VFOP_RXMODE_CONFIG,
160 	   BNX2X_VFOP_RXMODE_DONE
161 };
162 
163 enum bnx2x_vfop_qteardown_state {
164 	   BNX2X_VFOP_QTEARDOWN_RXMODE,
165 	   BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
166 	   BNX2X_VFOP_QTEARDOWN_CLR_MAC,
167 	   BNX2X_VFOP_QTEARDOWN_QDTOR,
168 	   BNX2X_VFOP_QTEARDOWN_DONE
169 };
170 
171 #define bnx2x_vfop_reset_wq(vf)	atomic_set(&vf->op_in_progress, 0)
172 
173 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
174 			      struct bnx2x_queue_init_params *init_params,
175 			      struct bnx2x_queue_setup_params *setup_params,
176 			      u16 q_idx, u16 sb_idx)
177 {
178 	DP(BNX2X_MSG_IOV,
179 	   "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
180 	   vf->abs_vfid,
181 	   q_idx,
182 	   sb_idx,
183 	   init_params->tx.sb_cq_index,
184 	   init_params->tx.hc_rate,
185 	   setup_params->flags,
186 	   setup_params->txq_params.traffic_type);
187 }
188 
189 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
190 			    struct bnx2x_queue_init_params *init_params,
191 			    struct bnx2x_queue_setup_params *setup_params,
192 			    u16 q_idx, u16 sb_idx)
193 {
194 	struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
195 
196 	DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
197 	   "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
198 	   vf->abs_vfid,
199 	   q_idx,
200 	   sb_idx,
201 	   init_params->rx.sb_cq_index,
202 	   init_params->rx.hc_rate,
203 	   setup_params->gen_params.mtu,
204 	   rxq_params->buf_sz,
205 	   rxq_params->sge_buf_sz,
206 	   rxq_params->max_sges_pkt,
207 	   rxq_params->tpa_agg_sz,
208 	   setup_params->flags,
209 	   rxq_params->drop_flags,
210 	   rxq_params->cache_line_log);
211 }
212 
213 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
214 			   struct bnx2x_virtf *vf,
215 			   struct bnx2x_vf_queue *q,
216 			   struct bnx2x_vfop_qctor_params *p,
217 			   unsigned long q_type)
218 {
219 	struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
220 	struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
221 
222 	/* INIT */
223 
224 	/* Enable host coalescing in the transition to INIT state */
225 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
226 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
227 
228 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
229 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
230 
231 	/* FW SB ID */
232 	init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
233 	init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
234 
235 	/* context */
236 	init_p->cxts[0] = q->cxt;
237 
238 	/* SETUP */
239 
240 	/* Setup-op general parameters */
241 	setup_p->gen_params.spcl_id = vf->sp_cl_id;
242 	setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
243 
244 	/* Setup-op pause params:
245 	 * Nothing to do, the pause thresholds are set by default to 0 which
246 	 * effectively turns off the feature for this queue. We don't want
247 	 * one queue (VF) to interfering with another queue (another VF)
248 	 */
249 	if (vf->cfg_flags & VF_CFG_FW_FC)
250 		BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
251 			  vf->abs_vfid);
252 	/* Setup-op flags:
253 	 * collect statistics, zero statistics, local-switching, security,
254 	 * OV for Flex10, RSS and MCAST for leading
255 	 */
256 	if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
257 		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
258 
259 	/* for VFs, enable tx switching, bd coherency, and mac address
260 	 * anti-spoofing
261 	 */
262 	__set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
263 	__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
264 	__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
265 
266 	if (vfq_is_leading(q)) {
267 		__set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
268 		__set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
269 	}
270 
271 	/* Setup-op rx parameters */
272 	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
273 		struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
274 
275 		rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
276 		rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
277 		rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
278 
279 		if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
280 			rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
281 	}
282 
283 	/* Setup-op tx parameters */
284 	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
285 		setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
286 		setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
287 	}
288 }
289 
290 /* VFOP queue construction */
291 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
292 {
293 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
294 	struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
295 	struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
296 	enum bnx2x_vfop_qctor_state state = vfop->state;
297 
298 	bnx2x_vfop_reset_wq(vf);
299 
300 	if (vfop->rc < 0)
301 		goto op_err;
302 
303 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
304 
305 	switch (state) {
306 	case BNX2X_VFOP_QCTOR_INIT:
307 
308 		/* has this queue already been opened? */
309 		if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
310 		    BNX2X_Q_LOGICAL_STATE_ACTIVE) {
311 			DP(BNX2X_MSG_IOV,
312 			   "Entered qctor but queue was already up. Aborting gracefully\n");
313 			goto op_done;
314 		}
315 
316 		/* next state */
317 		vfop->state = BNX2X_VFOP_QCTOR_SETUP;
318 
319 		q_params->cmd = BNX2X_Q_CMD_INIT;
320 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
321 
322 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
323 
324 	case BNX2X_VFOP_QCTOR_SETUP:
325 		/* next state */
326 		vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
327 
328 		/* copy pre-prepared setup params to the queue-state params */
329 		vfop->op_p->qctor.qstate.params.setup =
330 			vfop->op_p->qctor.prep_qsetup;
331 
332 		q_params->cmd = BNX2X_Q_CMD_SETUP;
333 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
334 
335 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
336 
337 	case BNX2X_VFOP_QCTOR_INT_EN:
338 
339 		/* enable interrupts */
340 		bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
341 				    USTORM_ID, 0, IGU_INT_ENABLE, 0);
342 		goto op_done;
343 	default:
344 		bnx2x_vfop_default(state);
345 	}
346 op_err:
347 	BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
348 		  vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
349 op_done:
350 	bnx2x_vfop_end(bp, vf, vfop);
351 op_pending:
352 	return;
353 }
354 
355 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
356 				struct bnx2x_virtf *vf,
357 				struct bnx2x_vfop_cmd *cmd,
358 				int qid)
359 {
360 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
361 
362 	if (vfop) {
363 		vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
364 
365 		vfop->args.qctor.qid = qid;
366 		vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
367 
368 		bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
369 				 bnx2x_vfop_qctor, cmd->done);
370 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
371 					     cmd->block);
372 	}
373 	return -ENOMEM;
374 }
375 
376 /* VFOP queue destruction */
377 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
378 {
379 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
380 	struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
381 	struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
382 	enum bnx2x_vfop_qdtor_state state = vfop->state;
383 
384 	bnx2x_vfop_reset_wq(vf);
385 
386 	if (vfop->rc < 0)
387 		goto op_err;
388 
389 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
390 
391 	switch (state) {
392 	case BNX2X_VFOP_QDTOR_HALT:
393 
394 		/* has this queue already been stopped? */
395 		if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
396 		    BNX2X_Q_LOGICAL_STATE_STOPPED) {
397 			DP(BNX2X_MSG_IOV,
398 			   "Entered qdtor but queue was already stopped. Aborting gracefully\n");
399 			goto op_done;
400 		}
401 
402 		/* next state */
403 		vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
404 
405 		q_params->cmd = BNX2X_Q_CMD_HALT;
406 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
407 
408 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
409 
410 	case BNX2X_VFOP_QDTOR_TERMINATE:
411 		/* next state */
412 		vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
413 
414 		q_params->cmd = BNX2X_Q_CMD_TERMINATE;
415 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
416 
417 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
418 
419 	case BNX2X_VFOP_QDTOR_CFCDEL:
420 		/* next state */
421 		vfop->state = BNX2X_VFOP_QDTOR_DONE;
422 
423 		q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
424 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
425 
426 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
427 op_err:
428 	BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
429 		  vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
430 op_done:
431 	case BNX2X_VFOP_QDTOR_DONE:
432 		/* invalidate the context */
433 		qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
434 		qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
435 		bnx2x_vfop_end(bp, vf, vfop);
436 		return;
437 	default:
438 		bnx2x_vfop_default(state);
439 	}
440 op_pending:
441 	return;
442 }
443 
444 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
445 				struct bnx2x_virtf *vf,
446 				struct bnx2x_vfop_cmd *cmd,
447 				int qid)
448 {
449 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
450 
451 	if (vfop) {
452 		struct bnx2x_queue_state_params *qstate =
453 			&vf->op_params.qctor.qstate;
454 
455 		memset(qstate, 0, sizeof(*qstate));
456 		qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
457 
458 		vfop->args.qdtor.qid = qid;
459 		vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
460 
461 		bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
462 				 bnx2x_vfop_qdtor, cmd->done);
463 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
464 					     cmd->block);
465 	}
466 	DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid);
467 	return -ENOMEM;
468 }
469 
470 static void
471 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
472 {
473 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
474 	if (vf) {
475 		if (!vf_sb_count(vf))
476 			vf->igu_base_id = igu_sb_id;
477 		++vf_sb_count(vf);
478 	}
479 }
480 
481 /* VFOP MAC/VLAN helpers */
482 static inline void bnx2x_vfop_credit(struct bnx2x *bp,
483 				     struct bnx2x_vfop *vfop,
484 				     struct bnx2x_vlan_mac_obj *obj)
485 {
486 	struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
487 
488 	/* update credit only if there is no error
489 	 * and a valid credit counter
490 	 */
491 	if (!vfop->rc && args->credit) {
492 		int cnt = 0;
493 		struct list_head *pos;
494 
495 		list_for_each(pos, &obj->head)
496 			cnt++;
497 
498 		atomic_set(args->credit, cnt);
499 	}
500 }
501 
502 static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
503 				    struct bnx2x_vfop_filter *pos,
504 				    struct bnx2x_vlan_mac_data *user_req)
505 {
506 	user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
507 		BNX2X_VLAN_MAC_DEL;
508 
509 	switch (pos->type) {
510 	case BNX2X_VFOP_FILTER_MAC:
511 		memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
512 		break;
513 	case BNX2X_VFOP_FILTER_VLAN:
514 		user_req->u.vlan.vlan = pos->vid;
515 		break;
516 	default:
517 		BNX2X_ERR("Invalid filter type, skipping\n");
518 		return 1;
519 	}
520 	return 0;
521 }
522 
523 static int
524 bnx2x_vfop_config_vlan0(struct bnx2x *bp,
525 			struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
526 			bool add)
527 {
528 	int rc;
529 
530 	vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
531 		BNX2X_VLAN_MAC_DEL;
532 	vlan_mac->user_req.u.vlan.vlan = 0;
533 
534 	rc = bnx2x_config_vlan_mac(bp, vlan_mac);
535 	if (rc == -EEXIST)
536 		rc = 0;
537 	return rc;
538 }
539 
540 static int bnx2x_vfop_config_list(struct bnx2x *bp,
541 				  struct bnx2x_vfop_filters *filters,
542 				  struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
543 {
544 	struct bnx2x_vfop_filter *pos, *tmp;
545 	struct list_head rollback_list, *filters_list = &filters->head;
546 	struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
547 	int rc = 0, cnt = 0;
548 
549 	INIT_LIST_HEAD(&rollback_list);
550 
551 	list_for_each_entry_safe(pos, tmp, filters_list, link) {
552 		if (bnx2x_vfop_set_user_req(bp, pos, user_req))
553 			continue;
554 
555 		rc = bnx2x_config_vlan_mac(bp, vlan_mac);
556 		if (rc >= 0) {
557 			cnt += pos->add ? 1 : -1;
558 			list_del(&pos->link);
559 			list_add(&pos->link, &rollback_list);
560 			rc = 0;
561 		} else if (rc == -EEXIST) {
562 			rc = 0;
563 		} else {
564 			BNX2X_ERR("Failed to add a new vlan_mac command\n");
565 			break;
566 		}
567 	}
568 
569 	/* rollback if error or too many rules added */
570 	if (rc || cnt > filters->add_cnt) {
571 		BNX2X_ERR("error or too many rules added. Performing rollback\n");
572 		list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
573 			pos->add = !pos->add;	/* reverse op */
574 			bnx2x_vfop_set_user_req(bp, pos, user_req);
575 			bnx2x_config_vlan_mac(bp, vlan_mac);
576 			list_del(&pos->link);
577 		}
578 		cnt = 0;
579 		if (!rc)
580 			rc = -EINVAL;
581 	}
582 	filters->add_cnt = cnt;
583 	return rc;
584 }
585 
586 /* VFOP set VLAN/MAC */
587 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
588 {
589 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
590 	struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
591 	struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
592 	struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
593 
594 	enum bnx2x_vfop_vlan_mac_state state = vfop->state;
595 
596 	if (vfop->rc < 0)
597 		goto op_err;
598 
599 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
600 
601 	bnx2x_vfop_reset_wq(vf);
602 
603 	switch (state) {
604 	case BNX2X_VFOP_VLAN_MAC_CLEAR:
605 		/* next state */
606 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
607 
608 		/* do delete */
609 		vfop->rc = obj->delete_all(bp, obj,
610 					   &vlan_mac->user_req.vlan_mac_flags,
611 					   &vlan_mac->ramrod_flags);
612 
613 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
614 
615 	case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
616 		/* next state */
617 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
618 
619 		/* do config */
620 		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
621 		if (vfop->rc == -EEXIST)
622 			vfop->rc = 0;
623 
624 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
625 
626 	case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
627 		vfop->rc = !!obj->raw.check_pending(&obj->raw);
628 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
629 
630 	case BNX2X_VFOP_MAC_CONFIG_LIST:
631 		/* next state */
632 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
633 
634 		/* do list config */
635 		vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
636 		if (vfop->rc)
637 			goto op_err;
638 
639 		set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
640 		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
641 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
642 
643 	case BNX2X_VFOP_VLAN_CONFIG_LIST:
644 		/* next state */
645 		vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
646 
647 		/* remove vlan0 - could be no-op */
648 		vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
649 		if (vfop->rc)
650 			goto op_err;
651 
652 		/* Do vlan list config. if this operation fails we try to
653 		 * restore vlan0 to keep the queue is working order
654 		 */
655 		vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
656 		if (!vfop->rc) {
657 			set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
658 			vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
659 		}
660 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
661 
662 	case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
663 		/* next state */
664 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
665 
666 		if (list_empty(&obj->head))
667 			/* add vlan0 */
668 			vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
669 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
670 
671 	default:
672 		bnx2x_vfop_default(state);
673 	}
674 op_err:
675 	BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
676 op_done:
677 	kfree(filters);
678 	bnx2x_vfop_credit(bp, vfop, obj);
679 	bnx2x_vfop_end(bp, vf, vfop);
680 op_pending:
681 	return;
682 }
683 
684 struct bnx2x_vfop_vlan_mac_flags {
685 	bool drv_only;
686 	bool dont_consume;
687 	bool single_cmd;
688 	bool add;
689 };
690 
691 static void
692 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
693 				struct bnx2x_vfop_vlan_mac_flags *flags)
694 {
695 	struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
696 
697 	memset(ramrod, 0, sizeof(*ramrod));
698 
699 	/* ramrod flags */
700 	if (flags->drv_only)
701 		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
702 	if (flags->single_cmd)
703 		set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
704 
705 	/* mac_vlan flags */
706 	if (flags->dont_consume)
707 		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
708 
709 	/* cmd */
710 	ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
711 }
712 
713 static inline void
714 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
715 			   struct bnx2x_vfop_vlan_mac_flags *flags)
716 {
717 	bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
718 	set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
719 }
720 
721 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
722 				     struct bnx2x_virtf *vf,
723 				     struct bnx2x_vfop_cmd *cmd,
724 				     int qid, bool drv_only)
725 {
726 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
727 
728 	if (vfop) {
729 		struct bnx2x_vfop_args_filters filters = {
730 			.multi_filter = NULL,	/* single */
731 			.credit = NULL,		/* consume credit */
732 		};
733 		struct bnx2x_vfop_vlan_mac_flags flags = {
734 			.drv_only = drv_only,
735 			.dont_consume = (filters.credit != NULL),
736 			.single_cmd = true,
737 			.add = false /* don't care */,
738 		};
739 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
740 			&vf->op_params.vlan_mac;
741 
742 		/* set ramrod params */
743 		bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
744 
745 		/* set object */
746 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
747 
748 		/* set extra args */
749 		vfop->args.filters = filters;
750 
751 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
752 				 bnx2x_vfop_vlan_mac, cmd->done);
753 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
754 					     cmd->block);
755 	}
756 	return -ENOMEM;
757 }
758 
759 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
760 			    struct bnx2x_virtf *vf,
761 			    struct bnx2x_vfop_cmd *cmd,
762 			    struct bnx2x_vfop_filters *macs,
763 			    int qid, bool drv_only)
764 {
765 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
766 
767 	if (vfop) {
768 		struct bnx2x_vfop_args_filters filters = {
769 			.multi_filter = macs,
770 			.credit = NULL,		/* consume credit */
771 		};
772 		struct bnx2x_vfop_vlan_mac_flags flags = {
773 			.drv_only = drv_only,
774 			.dont_consume = (filters.credit != NULL),
775 			.single_cmd = false,
776 			.add = false, /* don't care since only the items in the
777 				       * filters list affect the sp operation,
778 				       * not the list itself
779 				       */
780 		};
781 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
782 			&vf->op_params.vlan_mac;
783 
784 		/* set ramrod params */
785 		bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
786 
787 		/* set object */
788 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
789 
790 		/* set extra args */
791 		filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
792 		vfop->args.filters = filters;
793 
794 		bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
795 				 bnx2x_vfop_vlan_mac, cmd->done);
796 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
797 					     cmd->block);
798 	}
799 	return -ENOMEM;
800 }
801 
802 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
803 			    struct bnx2x_virtf *vf,
804 			    struct bnx2x_vfop_cmd *cmd,
805 			    int qid, u16 vid, bool add)
806 {
807 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
808 
809 	if (vfop) {
810 		struct bnx2x_vfop_args_filters filters = {
811 			.multi_filter = NULL, /* single command */
812 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
813 		};
814 		struct bnx2x_vfop_vlan_mac_flags flags = {
815 			.drv_only = false,
816 			.dont_consume = (filters.credit != NULL),
817 			.single_cmd = true,
818 			.add = add,
819 		};
820 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
821 			&vf->op_params.vlan_mac;
822 
823 		/* set ramrod params */
824 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
825 		ramrod->user_req.u.vlan.vlan = vid;
826 
827 		/* set object */
828 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
829 
830 		/* set extra args */
831 		vfop->args.filters = filters;
832 
833 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
834 				 bnx2x_vfop_vlan_mac, cmd->done);
835 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
836 					     cmd->block);
837 	}
838 	return -ENOMEM;
839 }
840 
841 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
842 			       struct bnx2x_virtf *vf,
843 			       struct bnx2x_vfop_cmd *cmd,
844 			       int qid, bool drv_only)
845 {
846 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
847 
848 	if (vfop) {
849 		struct bnx2x_vfop_args_filters filters = {
850 			.multi_filter = NULL, /* single command */
851 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
852 		};
853 		struct bnx2x_vfop_vlan_mac_flags flags = {
854 			.drv_only = drv_only,
855 			.dont_consume = (filters.credit != NULL),
856 			.single_cmd = true,
857 			.add = false, /* don't care */
858 		};
859 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
860 			&vf->op_params.vlan_mac;
861 
862 		/* set ramrod params */
863 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
864 
865 		/* set object */
866 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
867 
868 		/* set extra args */
869 		vfop->args.filters = filters;
870 
871 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
872 				 bnx2x_vfop_vlan_mac, cmd->done);
873 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
874 					     cmd->block);
875 	}
876 	return -ENOMEM;
877 }
878 
879 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
880 			     struct bnx2x_virtf *vf,
881 			     struct bnx2x_vfop_cmd *cmd,
882 			     struct bnx2x_vfop_filters *vlans,
883 			     int qid, bool drv_only)
884 {
885 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
886 
887 	if (vfop) {
888 		struct bnx2x_vfop_args_filters filters = {
889 			.multi_filter = vlans,
890 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
891 		};
892 		struct bnx2x_vfop_vlan_mac_flags flags = {
893 			.drv_only = drv_only,
894 			.dont_consume = (filters.credit != NULL),
895 			.single_cmd = false,
896 			.add = false, /* don't care */
897 		};
898 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
899 			&vf->op_params.vlan_mac;
900 
901 		/* set ramrod params */
902 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
903 
904 		/* set object */
905 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
906 
907 		/* set extra args */
908 		filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
909 			atomic_read(filters.credit);
910 
911 		vfop->args.filters = filters;
912 
913 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
914 				 bnx2x_vfop_vlan_mac, cmd->done);
915 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
916 					     cmd->block);
917 	}
918 	return -ENOMEM;
919 }
920 
921 /* VFOP queue setup (queue constructor + set vlan 0) */
922 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
923 {
924 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
925 	int qid = vfop->args.qctor.qid;
926 	enum bnx2x_vfop_qsetup_state state = vfop->state;
927 	struct bnx2x_vfop_cmd cmd = {
928 		.done = bnx2x_vfop_qsetup,
929 		.block = false,
930 	};
931 
932 	if (vfop->rc < 0)
933 		goto op_err;
934 
935 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
936 
937 	switch (state) {
938 	case BNX2X_VFOP_QSETUP_CTOR:
939 		/* init the queue ctor command */
940 		vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
941 		vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
942 		if (vfop->rc)
943 			goto op_err;
944 		return;
945 
946 	case BNX2X_VFOP_QSETUP_VLAN0:
947 		/* skip if non-leading or FPGA/EMU*/
948 		if (qid)
949 			goto op_done;
950 
951 		/* init the queue set-vlan command (for vlan 0) */
952 		vfop->state = BNX2X_VFOP_QSETUP_DONE;
953 		vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
954 		if (vfop->rc)
955 			goto op_err;
956 		return;
957 op_err:
958 	BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
959 op_done:
960 	case BNX2X_VFOP_QSETUP_DONE:
961 		bnx2x_vfop_end(bp, vf, vfop);
962 		return;
963 	default:
964 		bnx2x_vfop_default(state);
965 	}
966 }
967 
968 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
969 			  struct bnx2x_virtf *vf,
970 			  struct bnx2x_vfop_cmd *cmd,
971 			  int qid)
972 {
973 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
974 
975 	if (vfop) {
976 		vfop->args.qctor.qid = qid;
977 
978 		bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
979 				 bnx2x_vfop_qsetup, cmd->done);
980 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
981 					     cmd->block);
982 	}
983 	return -ENOMEM;
984 }
985 
986 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
987 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
988 {
989 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
990 	int qid = vfop->args.qx.qid;
991 	enum bnx2x_vfop_qflr_state state = vfop->state;
992 	struct bnx2x_queue_state_params *qstate;
993 	struct bnx2x_vfop_cmd cmd;
994 
995 	bnx2x_vfop_reset_wq(vf);
996 
997 	if (vfop->rc < 0)
998 		goto op_err;
999 
1000 	DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
1001 
1002 	cmd.done = bnx2x_vfop_qflr;
1003 	cmd.block = false;
1004 
1005 	switch (state) {
1006 	case BNX2X_VFOP_QFLR_CLR_VLAN:
1007 		/* vlan-clear-all: driver-only, don't consume credit */
1008 		vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
1009 		vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
1010 		if (vfop->rc)
1011 			goto op_err;
1012 		return;
1013 
1014 	case BNX2X_VFOP_QFLR_CLR_MAC:
1015 		/* mac-clear-all: driver only consume credit */
1016 		vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
1017 		vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
1018 		DP(BNX2X_MSG_IOV,
1019 		   "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
1020 		   vf->abs_vfid, vfop->rc);
1021 		if (vfop->rc)
1022 			goto op_err;
1023 		return;
1024 
1025 	case BNX2X_VFOP_QFLR_TERMINATE:
1026 		qstate = &vfop->op_p->qctor.qstate;
1027 		memset(qstate , 0, sizeof(*qstate));
1028 		qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
1029 		vfop->state = BNX2X_VFOP_QFLR_DONE;
1030 
1031 		DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
1032 		   vf->abs_vfid, qstate->q_obj->state);
1033 
1034 		if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
1035 			qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
1036 			qstate->cmd = BNX2X_Q_CMD_TERMINATE;
1037 			vfop->rc = bnx2x_queue_state_change(bp, qstate);
1038 			bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
1039 		} else {
1040 			goto op_done;
1041 		}
1042 
1043 op_err:
1044 	BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
1045 		  vf->abs_vfid, qid, vfop->rc);
1046 op_done:
1047 	case BNX2X_VFOP_QFLR_DONE:
1048 		bnx2x_vfop_end(bp, vf, vfop);
1049 		return;
1050 	default:
1051 		bnx2x_vfop_default(state);
1052 	}
1053 op_pending:
1054 	return;
1055 }
1056 
1057 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
1058 			       struct bnx2x_virtf *vf,
1059 			       struct bnx2x_vfop_cmd *cmd,
1060 			       int qid)
1061 {
1062 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1063 
1064 	if (vfop) {
1065 		vfop->args.qx.qid = qid;
1066 		bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
1067 				 bnx2x_vfop_qflr, cmd->done);
1068 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
1069 					     cmd->block);
1070 	}
1071 	return -ENOMEM;
1072 }
1073 
1074 /* VFOP multi-casts */
1075 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
1076 {
1077 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1078 	struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
1079 	struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
1080 	struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
1081 	enum bnx2x_vfop_mcast_state state = vfop->state;
1082 	int i;
1083 
1084 	bnx2x_vfop_reset_wq(vf);
1085 
1086 	if (vfop->rc < 0)
1087 		goto op_err;
1088 
1089 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1090 
1091 	switch (state) {
1092 	case BNX2X_VFOP_MCAST_DEL:
1093 		/* clear existing mcasts */
1094 		vfop->state = BNX2X_VFOP_MCAST_ADD;
1095 		vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
1096 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1097 
1098 	case BNX2X_VFOP_MCAST_ADD:
1099 		if (raw->check_pending(raw))
1100 			goto op_pending;
1101 
1102 		if (args->mc_num) {
1103 			/* update mcast list on the ramrod params */
1104 			INIT_LIST_HEAD(&mcast->mcast_list);
1105 			for (i = 0; i < args->mc_num; i++)
1106 				list_add_tail(&(args->mc[i].link),
1107 					      &mcast->mcast_list);
1108 			/* add new mcasts */
1109 			vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
1110 			vfop->rc = bnx2x_config_mcast(bp, mcast,
1111 						      BNX2X_MCAST_CMD_ADD);
1112 		}
1113 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1114 
1115 	case BNX2X_VFOP_MCAST_CHK_DONE:
1116 		vfop->rc = raw->check_pending(raw) ? 1 : 0;
1117 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1118 	default:
1119 		bnx2x_vfop_default(state);
1120 	}
1121 op_err:
1122 	BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
1123 op_done:
1124 	kfree(args->mc);
1125 	bnx2x_vfop_end(bp, vf, vfop);
1126 op_pending:
1127 	return;
1128 }
1129 
1130 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
1131 			 struct bnx2x_virtf *vf,
1132 			 struct bnx2x_vfop_cmd *cmd,
1133 			 bnx2x_mac_addr_t *mcasts,
1134 			 int mcast_num, bool drv_only)
1135 {
1136 	struct bnx2x_vfop *vfop = NULL;
1137 	size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
1138 	struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
1139 					   NULL;
1140 
1141 	if (!mc_sz || mc) {
1142 		vfop = bnx2x_vfop_add(bp, vf);
1143 		if (vfop) {
1144 			int i;
1145 			struct bnx2x_mcast_ramrod_params *ramrod =
1146 				&vf->op_params.mcast;
1147 
1148 			/* set ramrod params */
1149 			memset(ramrod, 0, sizeof(*ramrod));
1150 			ramrod->mcast_obj = &vf->mcast_obj;
1151 			if (drv_only)
1152 				set_bit(RAMROD_DRV_CLR_ONLY,
1153 					&ramrod->ramrod_flags);
1154 
1155 			/* copy mcasts pointers */
1156 			vfop->args.mc_list.mc_num = mcast_num;
1157 			vfop->args.mc_list.mc = mc;
1158 			for (i = 0; i < mcast_num; i++)
1159 				mc[i].mac = mcasts[i];
1160 
1161 			bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
1162 					 bnx2x_vfop_mcast, cmd->done);
1163 			return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
1164 						     cmd->block);
1165 		} else {
1166 			kfree(mc);
1167 		}
1168 	}
1169 	return -ENOMEM;
1170 }
1171 
1172 /* VFOP rx-mode */
1173 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1174 {
1175 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1176 	struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
1177 	enum bnx2x_vfop_rxmode_state state = vfop->state;
1178 
1179 	bnx2x_vfop_reset_wq(vf);
1180 
1181 	if (vfop->rc < 0)
1182 		goto op_err;
1183 
1184 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1185 
1186 	switch (state) {
1187 	case BNX2X_VFOP_RXMODE_CONFIG:
1188 		/* next state */
1189 		vfop->state = BNX2X_VFOP_RXMODE_DONE;
1190 
1191 		vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1192 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1193 op_err:
1194 		BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
1195 op_done:
1196 	case BNX2X_VFOP_RXMODE_DONE:
1197 		bnx2x_vfop_end(bp, vf, vfop);
1198 		return;
1199 	default:
1200 		bnx2x_vfop_default(state);
1201 	}
1202 op_pending:
1203 	return;
1204 }
1205 
1206 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1207 			  struct bnx2x_virtf *vf,
1208 			  struct bnx2x_vfop_cmd *cmd,
1209 			  int qid, unsigned long accept_flags)
1210 {
1211 	struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1212 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1213 
1214 	if (vfop) {
1215 		struct bnx2x_rx_mode_ramrod_params *ramrod =
1216 			&vf->op_params.rx_mode;
1217 
1218 		memset(ramrod, 0, sizeof(*ramrod));
1219 
1220 		/* Prepare ramrod parameters */
1221 		ramrod->cid = vfq->cid;
1222 		ramrod->cl_id = vfq_cl_id(vf, vfq);
1223 		ramrod->rx_mode_obj = &bp->rx_mode_obj;
1224 		ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1225 
1226 		ramrod->rx_accept_flags = accept_flags;
1227 		ramrod->tx_accept_flags = accept_flags;
1228 		ramrod->pstate = &vf->filter_state;
1229 		ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1230 
1231 		set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1232 		set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1233 		set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1234 
1235 		ramrod->rdata =
1236 			bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1237 		ramrod->rdata_mapping =
1238 			bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1239 
1240 		bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1241 				 bnx2x_vfop_rxmode, cmd->done);
1242 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
1243 					     cmd->block);
1244 	}
1245 	return -ENOMEM;
1246 }
1247 
1248 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
1249  * queue destructor)
1250  */
1251 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
1252 {
1253 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1254 	int qid = vfop->args.qx.qid;
1255 	enum bnx2x_vfop_qteardown_state state = vfop->state;
1256 	struct bnx2x_vfop_cmd cmd;
1257 
1258 	if (vfop->rc < 0)
1259 		goto op_err;
1260 
1261 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1262 
1263 	cmd.done = bnx2x_vfop_qdown;
1264 	cmd.block = false;
1265 
1266 	switch (state) {
1267 	case BNX2X_VFOP_QTEARDOWN_RXMODE:
1268 		/* Drop all */
1269 		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
1270 		vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
1271 		if (vfop->rc)
1272 			goto op_err;
1273 		return;
1274 
1275 	case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
1276 		/* vlan-clear-all: don't consume credit */
1277 		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
1278 		vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
1279 		if (vfop->rc)
1280 			goto op_err;
1281 		return;
1282 
1283 	case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
1284 		/* mac-clear-all: consume credit */
1285 		vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1286 		vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
1287 		if (vfop->rc)
1288 			goto op_err;
1289 		return;
1290 
1291 	case BNX2X_VFOP_QTEARDOWN_QDTOR:
1292 		/* run the queue destruction flow */
1293 		DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
1294 		vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
1295 		DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
1296 		vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
1297 		DP(BNX2X_MSG_IOV, "returned from cmd\n");
1298 		if (vfop->rc)
1299 			goto op_err;
1300 		return;
1301 op_err:
1302 	BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
1303 		  vf->abs_vfid, qid, vfop->rc);
1304 
1305 	case BNX2X_VFOP_QTEARDOWN_DONE:
1306 		bnx2x_vfop_end(bp, vf, vfop);
1307 		return;
1308 	default:
1309 		bnx2x_vfop_default(state);
1310 	}
1311 }
1312 
1313 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1314 			 struct bnx2x_virtf *vf,
1315 			 struct bnx2x_vfop_cmd *cmd,
1316 			 int qid)
1317 {
1318 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1319 
1320 	if (vfop) {
1321 		vfop->args.qx.qid = qid;
1322 		bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
1323 				 bnx2x_vfop_qdown, cmd->done);
1324 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1325 					     cmd->block);
1326 	}
1327 
1328 	return -ENOMEM;
1329 }
1330 
1331 /* VF enable primitives
1332  * when pretend is required the caller is responsible
1333  * for calling pretend prior to calling these routines
1334  */
1335 
1336 /* internal vf enable - until vf is enabled internally all transactions
1337  * are blocked. this routine should always be called last with pretend.
1338  */
1339 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
1340 {
1341 	REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
1342 }
1343 
1344 /* clears vf error in all semi blocks */
1345 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
1346 {
1347 	REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
1348 	REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
1349 	REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
1350 	REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
1351 }
1352 
1353 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
1354 {
1355 	u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
1356 	u32 was_err_reg = 0;
1357 
1358 	switch (was_err_group) {
1359 	case 0:
1360 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
1361 	    break;
1362 	case 1:
1363 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
1364 	    break;
1365 	case 2:
1366 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
1367 	    break;
1368 	case 3:
1369 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
1370 	    break;
1371 	}
1372 	REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
1373 }
1374 
1375 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
1376 {
1377 	int i;
1378 	u32 val;
1379 
1380 	/* Set VF masks and configuration - pretend */
1381 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1382 
1383 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
1384 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
1385 	REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
1386 	REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
1387 	REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
1388 	REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
1389 
1390 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1391 	val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
1392 	if (vf->cfg_flags & VF_CFG_INT_SIMD)
1393 		val |= IGU_VF_CONF_SINGLE_ISR_EN;
1394 	val &= ~IGU_VF_CONF_PARENT_MASK;
1395 	val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT;	/* parent PF */
1396 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1397 
1398 	DP(BNX2X_MSG_IOV,
1399 	   "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
1400 	   vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
1401 
1402 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1403 
1404 	/* iterate over all queues, clear sb consumer */
1405 	for (i = 0; i < vf_sb_count(vf); i++) {
1406 		u8 igu_sb_id = vf_igu_sb(vf, i);
1407 
1408 		/* zero prod memory */
1409 		REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
1410 
1411 		/* clear sb state machine */
1412 		bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
1413 				       false /* VF */);
1414 
1415 		/* disable + update */
1416 		bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
1417 				    IGU_INT_DISABLE, 1);
1418 	}
1419 }
1420 
1421 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
1422 {
1423 	/* set the VF-PF association in the FW */
1424 	storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
1425 	storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
1426 
1427 	/* clear vf errors*/
1428 	bnx2x_vf_semi_clear_err(bp, abs_vfid);
1429 	bnx2x_vf_pglue_clear_err(bp, abs_vfid);
1430 
1431 	/* internal vf-enable - pretend */
1432 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
1433 	DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
1434 	bnx2x_vf_enable_internal(bp, true);
1435 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1436 }
1437 
1438 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
1439 {
1440 	/* Reset vf in IGU  interrupts are still disabled */
1441 	bnx2x_vf_igu_reset(bp, vf);
1442 
1443 	/* pretend to enable the vf with the PBF */
1444 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1445 	REG_WR(bp, PBF_REG_DISABLE_VF, 0);
1446 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1447 }
1448 
1449 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
1450 {
1451 	struct pci_dev *dev;
1452 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1453 
1454 	if (!vf)
1455 		goto unknown_dev;
1456 
1457 	dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
1458 	if (dev)
1459 		return bnx2x_is_pcie_pending(dev);
1460 
1461 unknown_dev:
1462 	BNX2X_ERR("Unknown device\n");
1463 	return false;
1464 }
1465 
1466 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1467 {
1468 	/* Wait 100ms */
1469 	msleep(100);
1470 
1471 	/* Verify no pending pci transactions */
1472 	if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
1473 		BNX2X_ERR("PCIE Transactions still pending\n");
1474 
1475 	return 0;
1476 }
1477 
1478 /* must be called after the number of PF queues and the number of VFs are
1479  * both known
1480  */
1481 static void
1482 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
1483 {
1484 	u16 vlan_count = 0;
1485 
1486 	/* will be set only during VF-ACQUIRE */
1487 	resc->num_rxqs = 0;
1488 	resc->num_txqs = 0;
1489 
1490 	/* no credit calculcis for macs (just yet) */
1491 	resc->num_mac_filters = 1;
1492 
1493 	/* divvy up vlan rules */
1494 	vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
1495 	vlan_count = 1 << ilog2(vlan_count);
1496 	resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
1497 
1498 	/* no real limitation */
1499 	resc->num_mc_filters = 0;
1500 
1501 	/* num_sbs already set */
1502 }
1503 
1504 /* FLR routines: */
1505 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1506 {
1507 	/* reset the state variables */
1508 	bnx2x_iov_static_resc(bp, &vf->alloc_resc);
1509 	vf->state = VF_FREE;
1510 }
1511 
1512 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
1513 {
1514 	u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1515 
1516 	/* DQ usage counter */
1517 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1518 	bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
1519 					"DQ VF usage counter timed out",
1520 					poll_cnt);
1521 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1522 
1523 	/* FW cleanup command - poll for the results */
1524 	if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
1525 				   poll_cnt))
1526 		BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
1527 
1528 	/* verify TX hw is flushed */
1529 	bnx2x_tx_hw_flushed(bp, poll_cnt);
1530 }
1531 
1532 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1533 {
1534 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1535 	struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
1536 	enum bnx2x_vfop_flr_state state = vfop->state;
1537 	struct bnx2x_vfop_cmd cmd = {
1538 		.done = bnx2x_vfop_flr,
1539 		.block = false,
1540 	};
1541 
1542 	if (vfop->rc < 0)
1543 		goto op_err;
1544 
1545 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1546 
1547 	switch (state) {
1548 	case BNX2X_VFOP_FLR_QUEUES:
1549 		/* the cleanup operations are valid if and only if the VF
1550 		 * was first acquired.
1551 		 */
1552 		if (++(qx->qid) < vf_rxq_count(vf)) {
1553 			vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
1554 						       qx->qid);
1555 			if (vfop->rc)
1556 				goto op_err;
1557 			return;
1558 		}
1559 		/* remove multicasts */
1560 		vfop->state = BNX2X_VFOP_FLR_HW;
1561 		vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
1562 						0, true);
1563 		if (vfop->rc)
1564 			goto op_err;
1565 		return;
1566 	case BNX2X_VFOP_FLR_HW:
1567 
1568 		/* dispatch final cleanup and wait for HW queues to flush */
1569 		bnx2x_vf_flr_clnup_hw(bp, vf);
1570 
1571 		/* release VF resources */
1572 		bnx2x_vf_free_resc(bp, vf);
1573 
1574 		/* re-open the mailbox */
1575 		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1576 
1577 		goto op_done;
1578 	default:
1579 		bnx2x_vfop_default(state);
1580 	}
1581 op_err:
1582 	BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
1583 op_done:
1584 	vf->flr_clnup_stage = VF_FLR_ACK;
1585 	bnx2x_vfop_end(bp, vf, vfop);
1586 	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1587 }
1588 
1589 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
1590 			      struct bnx2x_virtf *vf,
1591 			      vfop_handler_t done)
1592 {
1593 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1594 	if (vfop) {
1595 		vfop->args.qx.qid = -1; /* loop */
1596 		bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
1597 				 bnx2x_vfop_flr, done);
1598 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
1599 	}
1600 	return -ENOMEM;
1601 }
1602 
1603 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
1604 {
1605 	int i = prev_vf ? prev_vf->index + 1 : 0;
1606 	struct bnx2x_virtf *vf;
1607 
1608 	/* find next VF to cleanup */
1609 next_vf_to_clean:
1610 	for (;
1611 	     i < BNX2X_NR_VIRTFN(bp) &&
1612 	     (bnx2x_vf(bp, i, state) != VF_RESET ||
1613 	      bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
1614 	     i++)
1615 		;
1616 
1617 	DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. num of vfs: %d\n", i,
1618 	   BNX2X_NR_VIRTFN(bp));
1619 
1620 	if (i < BNX2X_NR_VIRTFN(bp)) {
1621 		vf = BP_VF(bp, i);
1622 
1623 		/* lock the vf pf channel */
1624 		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1625 
1626 		/* invoke the VF FLR SM */
1627 		if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
1628 			BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
1629 				  vf->abs_vfid);
1630 
1631 			/* mark the VF to be ACKED and continue */
1632 			vf->flr_clnup_stage = VF_FLR_ACK;
1633 			goto next_vf_to_clean;
1634 		}
1635 		return;
1636 	}
1637 
1638 	/* we are done, update vf records */
1639 	for_each_vf(bp, i) {
1640 		vf = BP_VF(bp, i);
1641 
1642 		if (vf->flr_clnup_stage != VF_FLR_ACK)
1643 			continue;
1644 
1645 		vf->flr_clnup_stage = VF_FLR_EPILOG;
1646 	}
1647 
1648 	/* Acknowledge the handled VFs.
1649 	 * we are acknowledge all the vfs which an flr was requested for, even
1650 	 * if amongst them there are such that we never opened, since the mcp
1651 	 * will interrupt us immediately again if we only ack some of the bits,
1652 	 * resulting in an endless loop. This can happen for example in KVM
1653 	 * where an 'all ones' flr request is sometimes given by hyper visor
1654 	 */
1655 	DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
1656 	   bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1657 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
1658 		SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
1659 			  bp->vfdb->flrd_vfs[i]);
1660 
1661 	bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
1662 
1663 	/* clear the acked bits - better yet if the MCP implemented
1664 	 * write to clear semantics
1665 	 */
1666 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
1667 		SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
1668 }
1669 
1670 void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1671 {
1672 	int i;
1673 
1674 	/* Read FLR'd VFs */
1675 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
1676 		bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
1677 
1678 	DP(BNX2X_MSG_MCP,
1679 	   "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
1680 	   bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1681 
1682 	for_each_vf(bp, i) {
1683 		struct bnx2x_virtf *vf = BP_VF(bp, i);
1684 		u32 reset = 0;
1685 
1686 		if (vf->abs_vfid < 32)
1687 			reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
1688 		else
1689 			reset = bp->vfdb->flrd_vfs[1] &
1690 				(1 << (vf->abs_vfid - 32));
1691 
1692 		if (reset) {
1693 			/* set as reset and ready for cleanup */
1694 			vf->state = VF_RESET;
1695 			vf->flr_clnup_stage = VF_FLR_CLN;
1696 
1697 			DP(BNX2X_MSG_IOV,
1698 			   "Initiating Final cleanup for VF %d\n",
1699 			   vf->abs_vfid);
1700 		}
1701 	}
1702 
1703 	/* do the FLR cleanup for all marked VFs*/
1704 	bnx2x_vf_flr_clnup(bp, NULL);
1705 }
1706 
1707 /* IOV global initialization routines  */
1708 void bnx2x_iov_init_dq(struct bnx2x *bp)
1709 {
1710 	if (!IS_SRIOV(bp))
1711 		return;
1712 
1713 	/* Set the DQ such that the CID reflect the abs_vfid */
1714 	REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1715 	REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1716 
1717 	/* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1718 	 * the PF L2 queues
1719 	 */
1720 	REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1721 
1722 	/* The VF window size is the log2 of the max number of CIDs per VF */
1723 	REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1724 
1725 	/* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
1726 	 * the Pf doorbell size although the 2 are independent.
1727 	 */
1728 	REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
1729 	       BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
1730 
1731 	/* No security checks for now -
1732 	 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1733 	 * CID range 0 - 0x1ffff
1734 	 */
1735 	REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1736 	REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1737 	REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1738 	REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1739 
1740 	/* set the number of VF alllowed doorbells to the full DQ range */
1741 	REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
1742 
1743 	/* set the VF doorbell threshold */
1744 	REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
1745 }
1746 
1747 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1748 {
1749 	DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
1750 	if (!IS_SRIOV(bp))
1751 		return;
1752 
1753 	REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1754 }
1755 
1756 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1757 {
1758 	struct pci_dev *dev = bp->pdev;
1759 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1760 
1761 	return dev->bus->number + ((dev->devfn + iov->offset +
1762 				    iov->stride * vfid) >> 8);
1763 }
1764 
1765 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1766 {
1767 	struct pci_dev *dev = bp->pdev;
1768 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1769 
1770 	return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1771 }
1772 
1773 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1774 {
1775 	int i, n;
1776 	struct pci_dev *dev = bp->pdev;
1777 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1778 
1779 	for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1780 		u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1781 		u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1782 
1783 		size /= iov->total;
1784 		vf->bars[n].bar = start + size * vf->abs_vfid;
1785 		vf->bars[n].size = size;
1786 	}
1787 }
1788 
1789 static int bnx2x_ari_enabled(struct pci_dev *dev)
1790 {
1791 	return dev->bus->self && dev->bus->self->ari_enabled;
1792 }
1793 
1794 static void
1795 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1796 {
1797 	int sb_id;
1798 	u32 val;
1799 	u8 fid;
1800 
1801 	/* IGU in normal mode - read CAM */
1802 	for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1803 		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1804 		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1805 			continue;
1806 		fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1807 		if (!(fid & IGU_FID_ENCODE_IS_PF))
1808 			bnx2x_vf_set_igu_info(bp, sb_id,
1809 					      (fid & IGU_FID_VF_NUM_MASK));
1810 
1811 		DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1812 		   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1813 		   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1814 		   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1815 		   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1816 	}
1817 }
1818 
1819 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1820 {
1821 	if (bp->vfdb) {
1822 		kfree(bp->vfdb->vfqs);
1823 		kfree(bp->vfdb->vfs);
1824 		kfree(bp->vfdb);
1825 	}
1826 	bp->vfdb = NULL;
1827 }
1828 
1829 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1830 {
1831 	int pos;
1832 	struct pci_dev *dev = bp->pdev;
1833 
1834 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1835 	if (!pos) {
1836 		BNX2X_ERR("failed to find SRIOV capability in device\n");
1837 		return -ENODEV;
1838 	}
1839 
1840 	iov->pos = pos;
1841 	DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1842 	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1843 	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1844 	pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1845 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1846 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1847 	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1848 	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1849 	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1850 
1851 	return 0;
1852 }
1853 
1854 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1855 {
1856 	u32 val;
1857 
1858 	/* read the SRIOV capability structure
1859 	 * The fields can be read via configuration read or
1860 	 * directly from the device (starting at offset PCICFG_OFFSET)
1861 	 */
1862 	if (bnx2x_sriov_pci_cfg_info(bp, iov))
1863 		return -ENODEV;
1864 
1865 	/* get the number of SRIOV bars */
1866 	iov->nres = 0;
1867 
1868 	/* read the first_vfid */
1869 	val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1870 	iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1871 			       * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1872 
1873 	DP(BNX2X_MSG_IOV,
1874 	   "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1875 	   BP_FUNC(bp),
1876 	   iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1877 	   iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1878 
1879 	return 0;
1880 }
1881 
1882 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
1883 {
1884 	int i;
1885 	u8 queue_count = 0;
1886 
1887 	if (IS_SRIOV(bp))
1888 		for_each_vf(bp, i)
1889 			queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
1890 
1891 	return queue_count;
1892 }
1893 
1894 /* must be called after PF bars are mapped */
1895 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1896 			int num_vfs_param)
1897 {
1898 	int err, i, qcount;
1899 	struct bnx2x_sriov *iov;
1900 	struct pci_dev *dev = bp->pdev;
1901 
1902 	bp->vfdb = NULL;
1903 
1904 	/* verify is pf */
1905 	if (IS_VF(bp))
1906 		return 0;
1907 
1908 	/* verify sriov capability is present in configuration space */
1909 	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1910 		return 0;
1911 
1912 	/* verify chip revision */
1913 	if (CHIP_IS_E1x(bp))
1914 		return 0;
1915 
1916 	/* check if SRIOV support is turned off */
1917 	if (!num_vfs_param)
1918 		return 0;
1919 
1920 	/* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1921 	if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1922 		BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1923 			  BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1924 		return 0;
1925 	}
1926 
1927 	/* SRIOV can be enabled only with MSIX */
1928 	if (int_mode_param == BNX2X_INT_MODE_MSI ||
1929 	    int_mode_param == BNX2X_INT_MODE_INTX)
1930 		BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1931 
1932 	err = -EIO;
1933 	/* verify ari is enabled */
1934 	if (!bnx2x_ari_enabled(bp->pdev)) {
1935 		BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
1936 		return err;
1937 	}
1938 
1939 	/* verify igu is in normal mode */
1940 	if (CHIP_INT_MODE_IS_BC(bp)) {
1941 		BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1942 		return err;
1943 	}
1944 
1945 	/* allocate the vfs database */
1946 	bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1947 	if (!bp->vfdb) {
1948 		BNX2X_ERR("failed to allocate vf database\n");
1949 		err = -ENOMEM;
1950 		goto failed;
1951 	}
1952 
1953 	/* get the sriov info - Linux already collected all the pertinent
1954 	 * information, however the sriov structure is for the private use
1955 	 * of the pci module. Also we want this information regardless
1956 	 * of the hyper-visor.
1957 	 */
1958 	iov = &(bp->vfdb->sriov);
1959 	err = bnx2x_sriov_info(bp, iov);
1960 	if (err)
1961 		goto failed;
1962 
1963 	/* SR-IOV capability was enabled but there are no VFs*/
1964 	if (iov->total == 0)
1965 		goto failed;
1966 
1967 	/* calculate the actual number of VFs */
1968 	iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
1969 
1970 	/* allocate the vf array */
1971 	bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
1972 				BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
1973 	if (!bp->vfdb->vfs) {
1974 		BNX2X_ERR("failed to allocate vf array\n");
1975 		err = -ENOMEM;
1976 		goto failed;
1977 	}
1978 
1979 	/* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1980 	for_each_vf(bp, i) {
1981 		bnx2x_vf(bp, i, index) = i;
1982 		bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1983 		bnx2x_vf(bp, i, state) = VF_FREE;
1984 		INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
1985 		mutex_init(&bnx2x_vf(bp, i, op_mutex));
1986 		bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1987 	}
1988 
1989 	/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1990 	bnx2x_get_vf_igu_cam_info(bp);
1991 
1992 	/* get the total queue count and allocate the global queue arrays */
1993 	qcount = bnx2x_iov_get_max_queue_count(bp);
1994 
1995 	/* allocate the queue arrays for all VFs */
1996 	bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
1997 				 GFP_KERNEL);
1998 	if (!bp->vfdb->vfqs) {
1999 		BNX2X_ERR("failed to allocate vf queue array\n");
2000 		err = -ENOMEM;
2001 		goto failed;
2002 	}
2003 
2004 	return 0;
2005 failed:
2006 	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
2007 	__bnx2x_iov_free_vfdb(bp);
2008 	return err;
2009 }
2010 
2011 void bnx2x_iov_remove_one(struct bnx2x *bp)
2012 {
2013 	/* if SRIOV is not enabled there's nothing to do */
2014 	if (!IS_SRIOV(bp))
2015 		return;
2016 
2017 	DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
2018 	pci_disable_sriov(bp->pdev);
2019 	DP(BNX2X_MSG_IOV, "sriov disabled\n");
2020 
2021 	/* free vf database */
2022 	__bnx2x_iov_free_vfdb(bp);
2023 }
2024 
2025 void bnx2x_iov_free_mem(struct bnx2x *bp)
2026 {
2027 	int i;
2028 
2029 	if (!IS_SRIOV(bp))
2030 		return;
2031 
2032 	/* free vfs hw contexts */
2033 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2034 		struct hw_dma *cxt = &bp->vfdb->context[i];
2035 		BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
2036 	}
2037 
2038 	BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
2039 		       BP_VFDB(bp)->sp_dma.mapping,
2040 		       BP_VFDB(bp)->sp_dma.size);
2041 
2042 	BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
2043 		       BP_VF_MBX_DMA(bp)->mapping,
2044 		       BP_VF_MBX_DMA(bp)->size);
2045 
2046 	BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
2047 		       BP_VF_BULLETIN_DMA(bp)->mapping,
2048 		       BP_VF_BULLETIN_DMA(bp)->size);
2049 }
2050 
2051 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
2052 {
2053 	size_t tot_size;
2054 	int i, rc = 0;
2055 
2056 	if (!IS_SRIOV(bp))
2057 		return rc;
2058 
2059 	/* allocate vfs hw contexts */
2060 	tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
2061 		BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
2062 
2063 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2064 		struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
2065 		cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
2066 
2067 		if (cxt->size) {
2068 			BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
2069 		} else {
2070 			cxt->addr = NULL;
2071 			cxt->mapping = 0;
2072 		}
2073 		tot_size -= cxt->size;
2074 	}
2075 
2076 	/* allocate vfs ramrods dma memory - client_init and set_mac */
2077 	tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
2078 	BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
2079 			tot_size);
2080 	BP_VFDB(bp)->sp_dma.size = tot_size;
2081 
2082 	/* allocate mailboxes */
2083 	tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
2084 	BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
2085 			tot_size);
2086 	BP_VF_MBX_DMA(bp)->size = tot_size;
2087 
2088 	/* allocate local bulletin boards */
2089 	tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
2090 	BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
2091 			&BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
2092 	BP_VF_BULLETIN_DMA(bp)->size = tot_size;
2093 
2094 	return 0;
2095 
2096 alloc_mem_err:
2097 	return -ENOMEM;
2098 }
2099 
2100 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
2101 			   struct bnx2x_vf_queue *q)
2102 {
2103 	u8 cl_id = vfq_cl_id(vf, q);
2104 	u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
2105 	unsigned long q_type = 0;
2106 
2107 	set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
2108 	set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
2109 
2110 	/* Queue State object */
2111 	bnx2x_init_queue_obj(bp, &q->sp_obj,
2112 			     cl_id, &q->cid, 1, func_id,
2113 			     bnx2x_vf_sp(bp, vf, q_data),
2114 			     bnx2x_vf_sp_map(bp, vf, q_data),
2115 			     q_type);
2116 
2117 	DP(BNX2X_MSG_IOV,
2118 	   "initialized vf %d's queue object. func id set to %d\n",
2119 	   vf->abs_vfid, q->sp_obj.func_id);
2120 
2121 	/* mac/vlan objects are per queue, but only those
2122 	 * that belong to the leading queue are initialized
2123 	 */
2124 	if (vfq_is_leading(q)) {
2125 		/* mac */
2126 		bnx2x_init_mac_obj(bp, &q->mac_obj,
2127 				   cl_id, q->cid, func_id,
2128 				   bnx2x_vf_sp(bp, vf, mac_rdata),
2129 				   bnx2x_vf_sp_map(bp, vf, mac_rdata),
2130 				   BNX2X_FILTER_MAC_PENDING,
2131 				   &vf->filter_state,
2132 				   BNX2X_OBJ_TYPE_RX_TX,
2133 				   &bp->macs_pool);
2134 		/* vlan */
2135 		bnx2x_init_vlan_obj(bp, &q->vlan_obj,
2136 				    cl_id, q->cid, func_id,
2137 				    bnx2x_vf_sp(bp, vf, vlan_rdata),
2138 				    bnx2x_vf_sp_map(bp, vf, vlan_rdata),
2139 				    BNX2X_FILTER_VLAN_PENDING,
2140 				    &vf->filter_state,
2141 				    BNX2X_OBJ_TYPE_RX_TX,
2142 				    &bp->vlans_pool);
2143 
2144 		/* mcast */
2145 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
2146 				     q->cid, func_id, func_id,
2147 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
2148 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2149 				     BNX2X_FILTER_MCAST_PENDING,
2150 				     &vf->filter_state,
2151 				     BNX2X_OBJ_TYPE_RX_TX);
2152 
2153 		vf->leading_rss = cl_id;
2154 	}
2155 }
2156 
2157 /* called by bnx2x_nic_load */
2158 int bnx2x_iov_nic_init(struct bnx2x *bp)
2159 {
2160 	int vfid, qcount, i;
2161 
2162 	if (!IS_SRIOV(bp)) {
2163 		DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
2164 		return 0;
2165 	}
2166 
2167 	DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
2168 
2169 	/* initialize vf database */
2170 	for_each_vf(bp, vfid) {
2171 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2172 
2173 		int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
2174 			BNX2X_CIDS_PER_VF;
2175 
2176 		union cdu_context *base_cxt = (union cdu_context *)
2177 			BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2178 			(base_vf_cid & (ILT_PAGE_CIDS-1));
2179 
2180 		DP(BNX2X_MSG_IOV,
2181 		   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
2182 		   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
2183 		   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
2184 
2185 		/* init statically provisioned resources */
2186 		bnx2x_iov_static_resc(bp, &vf->alloc_resc);
2187 
2188 		/* queues are initialized during VF-ACQUIRE */
2189 
2190 		/* reserve the vf vlan credit */
2191 		bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
2192 
2193 		vf->filter_state = 0;
2194 		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
2195 
2196 		/*  init mcast object - This object will be re-initialized
2197 		 *  during VF-ACQUIRE with the proper cl_id and cid.
2198 		 *  It needs to be initialized here so that it can be safely
2199 		 *  handled by a subsequent FLR flow.
2200 		 */
2201 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
2202 				     0xFF, 0xFF, 0xFF,
2203 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
2204 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2205 				     BNX2X_FILTER_MCAST_PENDING,
2206 				     &vf->filter_state,
2207 				     BNX2X_OBJ_TYPE_RX_TX);
2208 
2209 		/* set the mailbox message addresses */
2210 		BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
2211 			(((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
2212 			MBX_MSG_ALIGNED_SIZE);
2213 
2214 		BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
2215 			vfid * MBX_MSG_ALIGNED_SIZE;
2216 
2217 		/* Enable vf mailbox */
2218 		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
2219 	}
2220 
2221 	/* Final VF init */
2222 	qcount = 0;
2223 	for_each_vf(bp, i) {
2224 		struct bnx2x_virtf *vf = BP_VF(bp, i);
2225 
2226 		/* fill in the BDF and bars */
2227 		vf->bus = bnx2x_vf_bus(bp, i);
2228 		vf->devfn = bnx2x_vf_devfn(bp, i);
2229 		bnx2x_vf_set_bars(bp, vf);
2230 
2231 		DP(BNX2X_MSG_IOV,
2232 		   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
2233 		   vf->abs_vfid, vf->bus, vf->devfn,
2234 		   (unsigned)vf->bars[0].bar, vf->bars[0].size,
2235 		   (unsigned)vf->bars[1].bar, vf->bars[1].size,
2236 		   (unsigned)vf->bars[2].bar, vf->bars[2].size);
2237 
2238 		/* set local queue arrays */
2239 		vf->vfqs = &bp->vfdb->vfqs[qcount];
2240 		qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
2241 	}
2242 
2243 	return 0;
2244 }
2245 
2246 /* called by bnx2x_chip_cleanup */
2247 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
2248 {
2249 	int i;
2250 
2251 	if (!IS_SRIOV(bp))
2252 		return 0;
2253 
2254 	/* release all the VFs */
2255 	for_each_vf(bp, i)
2256 		bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
2257 
2258 	return 0;
2259 }
2260 
2261 /* called by bnx2x_init_hw_func, returns the next ilt line */
2262 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
2263 {
2264 	int i;
2265 	struct bnx2x_ilt *ilt = BP_ILT(bp);
2266 
2267 	if (!IS_SRIOV(bp))
2268 		return line;
2269 
2270 	/* set vfs ilt lines */
2271 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2272 		struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
2273 
2274 		ilt->lines[line+i].page = hw_cxt->addr;
2275 		ilt->lines[line+i].page_mapping = hw_cxt->mapping;
2276 		ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
2277 	}
2278 	return line + i;
2279 }
2280 
2281 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
2282 {
2283 	return ((cid >= BNX2X_FIRST_VF_CID) &&
2284 		((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
2285 }
2286 
2287 static
2288 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
2289 					struct bnx2x_vf_queue *vfq,
2290 					union event_ring_elem *elem)
2291 {
2292 	unsigned long ramrod_flags = 0;
2293 	int rc = 0;
2294 
2295 	/* Always push next commands out, don't wait here */
2296 	set_bit(RAMROD_CONT, &ramrod_flags);
2297 
2298 	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
2299 	case BNX2X_FILTER_MAC_PENDING:
2300 		rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
2301 					   &ramrod_flags);
2302 		break;
2303 	case BNX2X_FILTER_VLAN_PENDING:
2304 		rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
2305 					    &ramrod_flags);
2306 		break;
2307 	default:
2308 		BNX2X_ERR("Unsupported classification command: %d\n",
2309 			  elem->message.data.eth_event.echo);
2310 		return;
2311 	}
2312 	if (rc < 0)
2313 		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
2314 	else if (rc > 0)
2315 		DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
2316 }
2317 
2318 static
2319 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
2320 			       struct bnx2x_virtf *vf)
2321 {
2322 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
2323 	int rc;
2324 
2325 	rparam.mcast_obj = &vf->mcast_obj;
2326 	vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
2327 
2328 	/* If there are pending mcast commands - send them */
2329 	if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
2330 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2331 		if (rc < 0)
2332 			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
2333 				  rc);
2334 	}
2335 }
2336 
2337 static
2338 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
2339 				 struct bnx2x_virtf *vf)
2340 {
2341 	smp_mb__before_clear_bit();
2342 	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
2343 	smp_mb__after_clear_bit();
2344 }
2345 
2346 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2347 {
2348 	struct bnx2x_virtf *vf;
2349 	int qidx = 0, abs_vfid;
2350 	u8 opcode;
2351 	u16 cid = 0xffff;
2352 
2353 	if (!IS_SRIOV(bp))
2354 		return 1;
2355 
2356 	/* first get the cid - the only events we handle here are cfc-delete
2357 	 * and set-mac completion
2358 	 */
2359 	opcode = elem->message.opcode;
2360 
2361 	switch (opcode) {
2362 	case EVENT_RING_OPCODE_CFC_DEL:
2363 		cid = SW_CID((__force __le32)
2364 			     elem->message.data.cfc_del_event.cid);
2365 		DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
2366 		break;
2367 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2368 	case EVENT_RING_OPCODE_MULTICAST_RULES:
2369 	case EVENT_RING_OPCODE_FILTERS_RULES:
2370 		cid = (elem->message.data.eth_event.echo &
2371 		       BNX2X_SWCID_MASK);
2372 		DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
2373 		break;
2374 	case EVENT_RING_OPCODE_VF_FLR:
2375 		abs_vfid = elem->message.data.vf_flr_event.vf_id;
2376 		DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
2377 		   abs_vfid);
2378 		goto get_vf;
2379 	case EVENT_RING_OPCODE_MALICIOUS_VF:
2380 		abs_vfid = elem->message.data.malicious_vf_event.vf_id;
2381 		DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
2382 		   abs_vfid);
2383 		goto get_vf;
2384 	default:
2385 		return 1;
2386 	}
2387 
2388 	/* check if the cid is the VF range */
2389 	if (!bnx2x_iov_is_vf_cid(bp, cid)) {
2390 		DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
2391 		return 1;
2392 	}
2393 
2394 	/* extract vf and rxq index from vf_cid - relies on the following:
2395 	 * 1. vfid on cid reflects the true abs_vfid
2396 	 * 2. the max number of VFs (per path) is 64
2397 	 */
2398 	qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
2399 	abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2400 get_vf:
2401 	vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
2402 
2403 	if (!vf) {
2404 		BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
2405 			  cid, abs_vfid);
2406 		return 0;
2407 	}
2408 
2409 	switch (opcode) {
2410 	case EVENT_RING_OPCODE_CFC_DEL:
2411 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
2412 		   vf->abs_vfid, qidx);
2413 		vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
2414 						       &vfq_get(vf,
2415 								qidx)->sp_obj,
2416 						       BNX2X_Q_CMD_CFC_DEL);
2417 		break;
2418 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2419 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
2420 		   vf->abs_vfid, qidx);
2421 		bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
2422 		break;
2423 	case EVENT_RING_OPCODE_MULTICAST_RULES:
2424 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
2425 		   vf->abs_vfid, qidx);
2426 		bnx2x_vf_handle_mcast_eqe(bp, vf);
2427 		break;
2428 	case EVENT_RING_OPCODE_FILTERS_RULES:
2429 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
2430 		   vf->abs_vfid, qidx);
2431 		bnx2x_vf_handle_filters_eqe(bp, vf);
2432 		break;
2433 	case EVENT_RING_OPCODE_VF_FLR:
2434 		DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
2435 		   vf->abs_vfid);
2436 		/* Do nothing for now */
2437 		break;
2438 	case EVENT_RING_OPCODE_MALICIOUS_VF:
2439 		DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
2440 		   vf->abs_vfid);
2441 		/* Do nothing for now */
2442 		break;
2443 	}
2444 	/* SRIOV: reschedule any 'in_progress' operations */
2445 	bnx2x_iov_sp_event(bp, cid, false);
2446 
2447 	return 0;
2448 }
2449 
2450 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
2451 {
2452 	/* extract the vf from vf_cid - relies on the following:
2453 	 * 1. vfid on cid reflects the true abs_vfid
2454 	 * 2. the max number of VFs (per path) is 64
2455 	 */
2456 	int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2457 	return bnx2x_vf_by_abs_fid(bp, abs_vfid);
2458 }
2459 
2460 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
2461 				struct bnx2x_queue_sp_obj **q_obj)
2462 {
2463 	struct bnx2x_virtf *vf;
2464 
2465 	if (!IS_SRIOV(bp))
2466 		return;
2467 
2468 	vf = bnx2x_vf_by_cid(bp, vf_cid);
2469 
2470 	if (vf) {
2471 		/* extract queue index from vf_cid - relies on the following:
2472 		 * 1. vfid on cid reflects the true abs_vfid
2473 		 * 2. the max number of VFs (per path) is 64
2474 		 */
2475 		int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
2476 		*q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
2477 	} else {
2478 		BNX2X_ERR("No vf matching cid %d\n", vf_cid);
2479 	}
2480 }
2481 
2482 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
2483 {
2484 	struct bnx2x_virtf *vf;
2485 
2486 	/* check if the cid is the VF range */
2487 	if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
2488 		return;
2489 
2490 	vf = bnx2x_vf_by_cid(bp, vf_cid);
2491 	if (vf) {
2492 		/* set in_progress flag */
2493 		atomic_set(&vf->op_in_progress, 1);
2494 		if (queue_work)
2495 			queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2496 	}
2497 }
2498 
2499 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2500 {
2501 	int i;
2502 	int first_queue_query_index, num_queues_req;
2503 	dma_addr_t cur_data_offset;
2504 	struct stats_query_entry *cur_query_entry;
2505 	u8 stats_count = 0;
2506 	bool is_fcoe = false;
2507 
2508 	if (!IS_SRIOV(bp))
2509 		return;
2510 
2511 	if (!NO_FCOE(bp))
2512 		is_fcoe = true;
2513 
2514 	/* fcoe adds one global request and one queue request */
2515 	num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
2516 	first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
2517 		(is_fcoe ? 0 : 1);
2518 
2519 	DP(BNX2X_MSG_IOV,
2520 	   "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
2521 	   BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
2522 	   first_queue_query_index + num_queues_req);
2523 
2524 	cur_data_offset = bp->fw_stats_data_mapping +
2525 		offsetof(struct bnx2x_fw_stats_data, queue_stats) +
2526 		num_queues_req * sizeof(struct per_queue_stats);
2527 
2528 	cur_query_entry = &bp->fw_stats_req->
2529 		query[first_queue_query_index + num_queues_req];
2530 
2531 	for_each_vf(bp, i) {
2532 		int j;
2533 		struct bnx2x_virtf *vf = BP_VF(bp, i);
2534 
2535 		if (vf->state != VF_ENABLED) {
2536 			DP(BNX2X_MSG_IOV,
2537 			   "vf %d not enabled so no stats for it\n",
2538 			   vf->abs_vfid);
2539 			continue;
2540 		}
2541 
2542 		DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
2543 		for_each_vfq(vf, j) {
2544 			struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
2545 
2546 			/* collect stats fro active queues only */
2547 			if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
2548 			    BNX2X_Q_LOGICAL_STATE_STOPPED)
2549 				continue;
2550 
2551 			/* create stats query entry for this queue */
2552 			cur_query_entry->kind = STATS_TYPE_QUEUE;
2553 			cur_query_entry->index = vfq_cl_id(vf, rxq);
2554 			cur_query_entry->funcID =
2555 				cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
2556 			cur_query_entry->address.hi =
2557 				cpu_to_le32(U64_HI(vf->fw_stat_map));
2558 			cur_query_entry->address.lo =
2559 				cpu_to_le32(U64_LO(vf->fw_stat_map));
2560 			DP(BNX2X_MSG_IOV,
2561 			   "added address %x %x for vf %d queue %d client %d\n",
2562 			   cur_query_entry->address.hi,
2563 			   cur_query_entry->address.lo, cur_query_entry->funcID,
2564 			   j, cur_query_entry->index);
2565 			cur_query_entry++;
2566 			cur_data_offset += sizeof(struct per_queue_stats);
2567 			stats_count++;
2568 		}
2569 	}
2570 	bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
2571 }
2572 
2573 void bnx2x_iov_sp_task(struct bnx2x *bp)
2574 {
2575 	int i;
2576 
2577 	if (!IS_SRIOV(bp))
2578 		return;
2579 	/* Iterate over all VFs and invoke state transition for VFs with
2580 	 * 'in-progress' slow-path operations
2581 	 */
2582 	DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
2583 	for_each_vf(bp, i) {
2584 		struct bnx2x_virtf *vf = BP_VF(bp, i);
2585 
2586 		if (!list_empty(&vf->op_list_head) &&
2587 		    atomic_read(&vf->op_in_progress)) {
2588 			DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
2589 			bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
2590 		}
2591 	}
2592 }
2593 
2594 static inline
2595 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
2596 {
2597 	int i;
2598 	struct bnx2x_virtf *vf = NULL;
2599 
2600 	for_each_vf(bp, i) {
2601 		vf = BP_VF(bp, i);
2602 		if (stat_id >= vf->igu_base_id &&
2603 		    stat_id < vf->igu_base_id + vf_sb_count(vf))
2604 			break;
2605 	}
2606 	return vf;
2607 }
2608 
2609 /* VF API helpers */
2610 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
2611 				u8 enable)
2612 {
2613 	u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
2614 	u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
2615 
2616 	REG_WR(bp, reg, val);
2617 }
2618 
2619 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
2620 {
2621 	int i;
2622 
2623 	for_each_vfq(vf, i)
2624 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2625 				    vfq_qzone_id(vf, vfq_get(vf, i)), false);
2626 }
2627 
2628 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
2629 {
2630 	u32 val;
2631 
2632 	/* clear the VF configuration - pretend */
2633 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
2634 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
2635 	val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
2636 		 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
2637 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
2638 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2639 }
2640 
2641 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2642 {
2643 	return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2644 		     BNX2X_VF_MAX_QUEUES);
2645 }
2646 
2647 static
2648 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2649 			    struct vf_pf_resc_request *req_resc)
2650 {
2651 	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2652 	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2653 
2654 	return ((req_resc->num_rxqs <= rxq_cnt) &&
2655 		(req_resc->num_txqs <= txq_cnt) &&
2656 		(req_resc->num_sbs <= vf_sb_count(vf))   &&
2657 		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2658 		(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
2659 }
2660 
2661 /* CORE VF API */
2662 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2663 		     struct vf_pf_resc_request *resc)
2664 {
2665 	int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2666 		BNX2X_CIDS_PER_VF;
2667 
2668 	union cdu_context *base_cxt = (union cdu_context *)
2669 		BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2670 		(base_vf_cid & (ILT_PAGE_CIDS-1));
2671 	int i;
2672 
2673 	/* if state is 'acquired' the VF was not released or FLR'd, in
2674 	 * this case the returned resources match the acquired already
2675 	 * acquired resources. Verify that the requested numbers do
2676 	 * not exceed the already acquired numbers.
2677 	 */
2678 	if (vf->state == VF_ACQUIRED) {
2679 		DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2680 		   vf->abs_vfid);
2681 
2682 		if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2683 			BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2684 				  vf->abs_vfid);
2685 			return -EINVAL;
2686 		}
2687 		return 0;
2688 	}
2689 
2690 	/* Otherwise vf state must be 'free' or 'reset' */
2691 	if (vf->state != VF_FREE && vf->state != VF_RESET) {
2692 		BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2693 			  vf->abs_vfid, vf->state);
2694 		return -EINVAL;
2695 	}
2696 
2697 	/* static allocation:
2698 	 * the global maximum number are fixed per VF. fail the request if
2699 	 * requested number exceed these globals
2700 	 */
2701 	if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2702 		DP(BNX2X_MSG_IOV,
2703 		   "cannot fulfill vf resource request. Placing maximal available values in response\n");
2704 		/* set the max resource in the vf */
2705 		return -ENOMEM;
2706 	}
2707 
2708 	/* Set resources counters - 0 request means max available */
2709 	vf_sb_count(vf) = resc->num_sbs;
2710 	vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2711 	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2712 	if (resc->num_mac_filters)
2713 		vf_mac_rules_cnt(vf) = resc->num_mac_filters;
2714 	if (resc->num_vlan_filters)
2715 		vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
2716 
2717 	DP(BNX2X_MSG_IOV,
2718 	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2719 	   vf_sb_count(vf), vf_rxq_count(vf),
2720 	   vf_txq_count(vf), vf_mac_rules_cnt(vf),
2721 	   vf_vlan_rules_cnt(vf));
2722 
2723 	/* Initialize the queues */
2724 	if (!vf->vfqs) {
2725 		DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2726 		return -EINVAL;
2727 	}
2728 
2729 	for_each_vfq(vf, i) {
2730 		struct bnx2x_vf_queue *q = vfq_get(vf, i);
2731 
2732 		if (!q) {
2733 			DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
2734 			return -EINVAL;
2735 		}
2736 
2737 		q->index = i;
2738 		q->cxt = &((base_cxt + i)->eth);
2739 		q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2740 
2741 		DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2742 		   vf->abs_vfid, i, q->index, q->cid, q->cxt);
2743 
2744 		/* init SP objects */
2745 		bnx2x_vfq_init(bp, vf, q);
2746 	}
2747 	vf->state = VF_ACQUIRED;
2748 	return 0;
2749 }
2750 
2751 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2752 {
2753 	struct bnx2x_func_init_params func_init = {0};
2754 	u16 flags = 0;
2755 	int i;
2756 
2757 	/* the sb resources are initialized at this point, do the
2758 	 * FW/HW initializations
2759 	 */
2760 	for_each_vf_sb(vf, i)
2761 		bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2762 			      vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2763 
2764 	/* Sanity checks */
2765 	if (vf->state != VF_ACQUIRED) {
2766 		DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2767 		   vf->abs_vfid, vf->state);
2768 		return -EINVAL;
2769 	}
2770 	/* FLR cleanup epilogue */
2771 	if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2772 		return -EBUSY;
2773 
2774 	/* reset IGU VF statistics: MSIX */
2775 	REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2776 
2777 	/* vf init */
2778 	if (vf->cfg_flags & VF_CFG_STATS)
2779 		flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2780 
2781 	if (vf->cfg_flags & VF_CFG_TPA)
2782 		flags |= FUNC_FLG_TPA;
2783 
2784 	if (is_vf_multi(vf))
2785 		flags |= FUNC_FLG_RSS;
2786 
2787 	/* function setup */
2788 	func_init.func_flgs = flags;
2789 	func_init.pf_id = BP_FUNC(bp);
2790 	func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2791 	func_init.fw_stat_map = vf->fw_stat_map;
2792 	func_init.spq_map = vf->spq_map;
2793 	func_init.spq_prod = 0;
2794 	bnx2x_func_init(bp, &func_init);
2795 
2796 	/* Enable the vf */
2797 	bnx2x_vf_enable_access(bp, vf->abs_vfid);
2798 	bnx2x_vf_enable_traffic(bp, vf);
2799 
2800 	/* queue protection table */
2801 	for_each_vfq(vf, i)
2802 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2803 				    vfq_qzone_id(vf, vfq_get(vf, i)), true);
2804 
2805 	vf->state = VF_ENABLED;
2806 
2807 	/* update vf bulletin board */
2808 	bnx2x_post_vf_bulletin(bp, vf->index);
2809 
2810 	return 0;
2811 }
2812 
2813 /* VFOP close (teardown the queues, delete mcasts and close HW) */
2814 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2815 {
2816 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2817 	struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
2818 	enum bnx2x_vfop_close_state state = vfop->state;
2819 	struct bnx2x_vfop_cmd cmd = {
2820 		.done = bnx2x_vfop_close,
2821 		.block = false,
2822 	};
2823 
2824 	if (vfop->rc < 0)
2825 		goto op_err;
2826 
2827 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2828 
2829 	switch (state) {
2830 	case BNX2X_VFOP_CLOSE_QUEUES:
2831 
2832 		if (++(qx->qid) < vf_rxq_count(vf)) {
2833 			vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
2834 			if (vfop->rc)
2835 				goto op_err;
2836 			return;
2837 		}
2838 
2839 		/* remove multicasts */
2840 		vfop->state = BNX2X_VFOP_CLOSE_HW;
2841 		vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
2842 		if (vfop->rc)
2843 			goto op_err;
2844 		return;
2845 
2846 	case BNX2X_VFOP_CLOSE_HW:
2847 
2848 		/* disable the interrupts */
2849 		DP(BNX2X_MSG_IOV, "disabling igu\n");
2850 		bnx2x_vf_igu_disable(bp, vf);
2851 
2852 		/* disable the VF */
2853 		DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2854 		bnx2x_vf_clr_qtbl(bp, vf);
2855 
2856 		goto op_done;
2857 	default:
2858 		bnx2x_vfop_default(state);
2859 	}
2860 op_err:
2861 	BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
2862 op_done:
2863 	vf->state = VF_ACQUIRED;
2864 	DP(BNX2X_MSG_IOV, "set state to acquired\n");
2865 	bnx2x_vfop_end(bp, vf, vfop);
2866 }
2867 
2868 int bnx2x_vfop_close_cmd(struct bnx2x *bp,
2869 			 struct bnx2x_virtf *vf,
2870 			 struct bnx2x_vfop_cmd *cmd)
2871 {
2872 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2873 	if (vfop) {
2874 		vfop->args.qx.qid = -1; /* loop */
2875 		bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
2876 				 bnx2x_vfop_close, cmd->done);
2877 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
2878 					     cmd->block);
2879 	}
2880 	return -ENOMEM;
2881 }
2882 
2883 /* VF release can be called either: 1. the VF was acquired but
2884  * not enabled 2. the vf was enabled or in the process of being
2885  * enabled
2886  */
2887 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2888 {
2889 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2890 	struct bnx2x_vfop_cmd cmd = {
2891 		.done = bnx2x_vfop_release,
2892 		.block = false,
2893 	};
2894 
2895 	DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2896 
2897 	if (vfop->rc < 0)
2898 		goto op_err;
2899 
2900 	DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2901 	   vf->state == VF_FREE ? "Free" :
2902 	   vf->state == VF_ACQUIRED ? "Acquired" :
2903 	   vf->state == VF_ENABLED ? "Enabled" :
2904 	   vf->state == VF_RESET ? "Reset" :
2905 	   "Unknown");
2906 
2907 	switch (vf->state) {
2908 	case VF_ENABLED:
2909 		vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
2910 		if (vfop->rc)
2911 			goto op_err;
2912 		return;
2913 
2914 	case VF_ACQUIRED:
2915 		DP(BNX2X_MSG_IOV, "about to free resources\n");
2916 		bnx2x_vf_free_resc(bp, vf);
2917 		DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2918 		goto op_done;
2919 
2920 	case VF_FREE:
2921 	case VF_RESET:
2922 		/* do nothing */
2923 		goto op_done;
2924 	default:
2925 		bnx2x_vfop_default(vf->state);
2926 	}
2927 op_err:
2928 	BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
2929 op_done:
2930 	bnx2x_vfop_end(bp, vf, vfop);
2931 }
2932 
2933 int bnx2x_vfop_release_cmd(struct bnx2x *bp,
2934 			   struct bnx2x_virtf *vf,
2935 			   struct bnx2x_vfop_cmd *cmd)
2936 {
2937 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2938 	if (vfop) {
2939 		bnx2x_vfop_opset(-1, /* use vf->state */
2940 				 bnx2x_vfop_release, cmd->done);
2941 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
2942 					     cmd->block);
2943 	}
2944 	return -ENOMEM;
2945 }
2946 
2947 /* VF release ~ VF close + VF release-resources
2948  * Release is the ultimate SW shutdown and is called whenever an
2949  * irrecoverable error is encountered.
2950  */
2951 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
2952 {
2953 	struct bnx2x_vfop_cmd cmd = {
2954 		.done = NULL,
2955 		.block = block,
2956 	};
2957 	int rc;
2958 	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2959 
2960 	rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
2961 	if (rc)
2962 		WARN(rc,
2963 		     "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2964 		     vf->abs_vfid, rc);
2965 }
2966 
2967 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
2968 			      struct bnx2x_virtf *vf, u32 *sbdf)
2969 {
2970 	*sbdf = vf->devfn | (vf->bus << 8);
2971 }
2972 
2973 static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
2974 		       struct bnx2x_vf_bar_info *bar_info)
2975 {
2976 	int n;
2977 
2978 	bar_info->nr_bars = bp->vfdb->sriov.nres;
2979 	for (n = 0; n < bar_info->nr_bars; n++)
2980 		bar_info->bars[n] = vf->bars[n];
2981 }
2982 
2983 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2984 			      enum channel_tlvs tlv)
2985 {
2986 	/* lock the channel */
2987 	mutex_lock(&vf->op_mutex);
2988 
2989 	/* record the locking op */
2990 	vf->op_current = tlv;
2991 
2992 	/* log the lock */
2993 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2994 	   vf->abs_vfid, tlv);
2995 }
2996 
2997 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2998 				enum channel_tlvs expected_tlv)
2999 {
3000 	WARN(expected_tlv != vf->op_current,
3001 	     "lock mismatch: expected %d found %d", expected_tlv,
3002 	     vf->op_current);
3003 
3004 	/* lock the channel */
3005 	mutex_unlock(&vf->op_mutex);
3006 
3007 	/* log the unlock */
3008 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
3009 	   vf->abs_vfid, vf->op_current);
3010 
3011 	/* record the locking op */
3012 	vf->op_current = CHANNEL_TLV_NONE;
3013 }
3014 
3015 void bnx2x_enable_sriov(struct bnx2x *bp)
3016 {
3017 	int rc = 0;
3018 
3019 	/* disbale sriov in case it is still enabled */
3020 	pci_disable_sriov(bp->pdev);
3021 	DP(BNX2X_MSG_IOV, "sriov disabled\n");
3022 
3023 	/* enable sriov */
3024 	DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn));
3025 	rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn));
3026 	if (rc)
3027 		BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
3028 	else
3029 		DP(BNX2X_MSG_IOV, "sriov enabled\n");
3030 }
3031 
3032 /* New mac for VF. Consider these cases:
3033  * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
3034  *    supply at acquire.
3035  * 2. VF has already been acquired but has not yet initialized - store in local
3036  *    bulletin board. mac will be posted on VF bulletin board after VF init. VF
3037  *    will configure this mac when it is ready.
3038  * 3. VF has already initialized but has not yet setup a queue - post the new
3039  *    mac on VF's bulletin board right now. VF will configure this mac when it
3040  *    is ready.
3041  * 4. VF has already set a queue - delete any macs already configured for this
3042  *    queue and manually config the new mac.
3043  * In any event, once this function has been called refuse any attempts by the
3044  * VF to configure any mac for itself except for this mac. In case of a race
3045  * where the VF fails to see the new post on its bulletin board before sending a
3046  * mac configuration request, the PF will simply fail the request and VF can try
3047  * again after consulting its bulletin board
3048  */
3049 int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
3050 {
3051 	struct bnx2x *bp = netdev_priv(dev);
3052 	int rc, q_logical_state, vfidx = queue;
3053 	struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3054 	struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3055 
3056 	/* if SRIOV is disabled there is nothing to do (and somewhere, someone
3057 	 * has erred).
3058 	 */
3059 	if (!IS_SRIOV(bp)) {
3060 		BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
3061 		return -EINVAL;
3062 	}
3063 
3064 	if (!is_valid_ether_addr(mac)) {
3065 		BNX2X_ERR("mac address invalid\n");
3066 		return -EINVAL;
3067 	}
3068 
3069 	/* update PF's copy of the VF's bulletin. will no longer accept mac
3070 	 * configuration requests from vf unless match this mac
3071 	 */
3072 	bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
3073 	memcpy(bulletin->mac, mac, ETH_ALEN);
3074 
3075 	/* Post update on VF's bulletin board */
3076 	rc = bnx2x_post_vf_bulletin(bp, vfidx);
3077 	if (rc) {
3078 		BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
3079 		return rc;
3080 	}
3081 
3082 	/* is vf initialized and queue set up? */
3083 	q_logical_state =
3084 		bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
3085 	if (vf->state == VF_ENABLED &&
3086 	    q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3087 		/* configure the mac in device on this vf's queue */
3088 		unsigned long flags = 0;
3089 		struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3090 
3091 		/* must lock vfpf channel to protect against vf flows */
3092 		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3093 
3094 		/* remove existing eth macs */
3095 		rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3096 		if (rc) {
3097 			BNX2X_ERR("failed to delete eth macs\n");
3098 			return -EINVAL;
3099 		}
3100 
3101 		/* remove existing uc list macs */
3102 		rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
3103 		if (rc) {
3104 			BNX2X_ERR("failed to delete uc_list macs\n");
3105 			return -EINVAL;
3106 		}
3107 
3108 		/* configure the new mac to device */
3109 		__set_bit(RAMROD_COMP_WAIT, &flags);
3110 		bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3111 				  BNX2X_ETH_MAC, &flags);
3112 
3113 		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3114 	}
3115 
3116 	return rc;
3117 }
3118 
3119 /* crc is the first field in the bulletin board. compute the crc over the
3120  * entire bulletin board excluding the crc field itself
3121  */
3122 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
3123 			  struct pf_vf_bulletin_content *bulletin)
3124 {
3125 	return crc32(BULLETIN_CRC_SEED,
3126 		 ((u8 *)bulletin) + sizeof(bulletin->crc),
3127 		 bulletin->length - sizeof(bulletin->crc));
3128 }
3129 
3130 /* Check for new posts on the bulletin board */
3131 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3132 {
3133 	struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
3134 	int attempts;
3135 
3136 	/* bulletin board hasn't changed since last sample */
3137 	if (bp->old_bulletin.version == bulletin.version)
3138 		return PFVF_BULLETIN_UNCHANGED;
3139 
3140 	/* validate crc of new bulletin board */
3141 	if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
3142 		/* sampling structure in mid post may result with corrupted data
3143 		 * validate crc to ensure coherency.
3144 		 */
3145 		for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
3146 			bulletin = bp->pf2vf_bulletin->content;
3147 			if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
3148 								  &bulletin))
3149 				break;
3150 			BNX2X_ERR("bad crc on bulletin board. contained %x computed %x\n",
3151 				  bulletin.crc,
3152 				  bnx2x_crc_vf_bulletin(bp, &bulletin));
3153 		}
3154 		if (attempts >= BULLETIN_ATTEMPTS) {
3155 			BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3156 				  attempts);
3157 			return PFVF_BULLETIN_CRC_ERR;
3158 		}
3159 	}
3160 
3161 	/* the mac address in bulletin board is valid and is new */
3162 	if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
3163 	    memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
3164 		/* update new mac to net device */
3165 		memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
3166 	}
3167 
3168 	/* copy new bulletin board to bp */
3169 	bp->old_bulletin = bulletin;
3170 
3171 	return PFVF_BULLETIN_UPDATED;
3172 }
3173 
3174 void bnx2x_vf_map_doorbells(struct bnx2x *bp)
3175 {
3176 	/* vf doorbells are embedded within the regview */
3177 	bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START;
3178 }
3179 
3180 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3181 {
3182 	/* allocate vf2pf mailbox for vf to pf channel */
3183 	BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
3184 			sizeof(struct bnx2x_vf_mbx_msg));
3185 
3186 	/* allocate pf 2 vf bulletin board */
3187 	BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
3188 			sizeof(union pf_vf_bulletin));
3189 
3190 	return 0;
3191 
3192 alloc_mem_err:
3193 	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3194 		       sizeof(struct bnx2x_vf_mbx_msg));
3195 	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3196 		       sizeof(union pf_vf_bulletin));
3197 	return -ENOMEM;
3198 }
3199