1 /* bnx2x_sriov.c: Broadcom Everest network driver.
2  *
3  * Copyright 2009-2012 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17  *	       Ariel Elior <ariele@broadcom.com>
18  *
19  */
20 #include "bnx2x.h"
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
23 #include "bnx2x_sriov.h"
24 
25 /* General service functions */
26 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
27 					 u16 pf_id)
28 {
29 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
30 		pf_id);
31 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
32 		pf_id);
33 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
34 		pf_id);
35 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
36 		pf_id);
37 }
38 
39 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
40 					u8 enable)
41 {
42 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
43 		enable);
44 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
45 		enable);
46 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
47 		enable);
48 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
49 		enable);
50 }
51 
52 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
53 {
54 	int idx;
55 
56 	for_each_vf(bp, idx)
57 		if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
58 			break;
59 	return idx;
60 }
61 
62 static
63 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
64 {
65 	u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
66 	return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
67 }
68 
69 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
70 				u8 igu_sb_id, u8 segment, u16 index, u8 op,
71 				u8 update)
72 {
73 	/* acking a VF sb through the PF - use the GRC */
74 	u32 ctl;
75 	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
76 	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
77 	u32 func_encode = vf->abs_vfid;
78 	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
79 	struct igu_regular cmd_data = {0};
80 
81 	cmd_data.sb_id_and_flags =
82 			((index << IGU_REGULAR_SB_INDEX_SHIFT) |
83 			 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
84 			 (update << IGU_REGULAR_BUPDATE_SHIFT) |
85 			 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
86 
87 	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
88 	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
89 	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
90 
91 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
92 	   cmd_data.sb_id_and_flags, igu_addr_data);
93 	REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
94 	mmiowb();
95 	barrier();
96 
97 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
98 	   ctl, igu_addr_ctl);
99 	REG_WR(bp, igu_addr_ctl, ctl);
100 	mmiowb();
101 	barrier();
102 }
103 /* VFOP - VF slow-path operation support */
104 
105 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX		0x10000
106 
107 /* VFOP operations states */
108 enum bnx2x_vfop_qctor_state {
109 	   BNX2X_VFOP_QCTOR_INIT,
110 	   BNX2X_VFOP_QCTOR_SETUP,
111 	   BNX2X_VFOP_QCTOR_INT_EN
112 };
113 
114 enum bnx2x_vfop_qdtor_state {
115 	   BNX2X_VFOP_QDTOR_HALT,
116 	   BNX2X_VFOP_QDTOR_TERMINATE,
117 	   BNX2X_VFOP_QDTOR_CFCDEL,
118 	   BNX2X_VFOP_QDTOR_DONE
119 };
120 
121 enum bnx2x_vfop_vlan_mac_state {
122 	   BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
123 	   BNX2X_VFOP_VLAN_MAC_CLEAR,
124 	   BNX2X_VFOP_VLAN_MAC_CHK_DONE,
125 	   BNX2X_VFOP_MAC_CONFIG_LIST,
126 	   BNX2X_VFOP_VLAN_CONFIG_LIST,
127 	   BNX2X_VFOP_VLAN_CONFIG_LIST_0
128 };
129 
130 enum bnx2x_vfop_qsetup_state {
131 	   BNX2X_VFOP_QSETUP_CTOR,
132 	   BNX2X_VFOP_QSETUP_VLAN0,
133 	   BNX2X_VFOP_QSETUP_DONE
134 };
135 
136 enum bnx2x_vfop_mcast_state {
137 	   BNX2X_VFOP_MCAST_DEL,
138 	   BNX2X_VFOP_MCAST_ADD,
139 	   BNX2X_VFOP_MCAST_CHK_DONE
140 };
141 
142 enum bnx2x_vfop_close_state {
143 	   BNX2X_VFOP_CLOSE_QUEUES,
144 	   BNX2X_VFOP_CLOSE_HW
145 };
146 
147 enum bnx2x_vfop_rxmode_state {
148 	   BNX2X_VFOP_RXMODE_CONFIG,
149 	   BNX2X_VFOP_RXMODE_DONE
150 };
151 
152 enum bnx2x_vfop_qteardown_state {
153 	   BNX2X_VFOP_QTEARDOWN_RXMODE,
154 	   BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
155 	   BNX2X_VFOP_QTEARDOWN_CLR_MAC,
156 	   BNX2X_VFOP_QTEARDOWN_QDTOR,
157 	   BNX2X_VFOP_QTEARDOWN_DONE
158 };
159 
160 #define bnx2x_vfop_reset_wq(vf)	atomic_set(&vf->op_in_progress, 0)
161 
162 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
163 			      struct bnx2x_queue_init_params *init_params,
164 			      struct bnx2x_queue_setup_params *setup_params,
165 			      u16 q_idx, u16 sb_idx)
166 {
167 	DP(BNX2X_MSG_IOV,
168 	   "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
169 	   vf->abs_vfid,
170 	   q_idx,
171 	   sb_idx,
172 	   init_params->tx.sb_cq_index,
173 	   init_params->tx.hc_rate,
174 	   setup_params->flags,
175 	   setup_params->txq_params.traffic_type);
176 }
177 
178 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
179 			    struct bnx2x_queue_init_params *init_params,
180 			    struct bnx2x_queue_setup_params *setup_params,
181 			    u16 q_idx, u16 sb_idx)
182 {
183 	struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
184 
185 	DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
186 	   "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
187 	   vf->abs_vfid,
188 	   q_idx,
189 	   sb_idx,
190 	   init_params->rx.sb_cq_index,
191 	   init_params->rx.hc_rate,
192 	   setup_params->gen_params.mtu,
193 	   rxq_params->buf_sz,
194 	   rxq_params->sge_buf_sz,
195 	   rxq_params->max_sges_pkt,
196 	   rxq_params->tpa_agg_sz,
197 	   setup_params->flags,
198 	   rxq_params->drop_flags,
199 	   rxq_params->cache_line_log);
200 }
201 
202 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
203 			   struct bnx2x_virtf *vf,
204 			   struct bnx2x_vf_queue *q,
205 			   struct bnx2x_vfop_qctor_params *p,
206 			   unsigned long q_type)
207 {
208 	struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
209 	struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
210 
211 	/* INIT */
212 
213 	/* Enable host coalescing in the transition to INIT state */
214 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
215 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
216 
217 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
218 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
219 
220 	/* FW SB ID */
221 	init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
222 	init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
223 
224 	/* context */
225 	init_p->cxts[0] = q->cxt;
226 
227 	/* SETUP */
228 
229 	/* Setup-op general parameters */
230 	setup_p->gen_params.spcl_id = vf->sp_cl_id;
231 	setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
232 
233 	/* Setup-op pause params:
234 	 * Nothing to do, the pause thresholds are set by default to 0 which
235 	 * effectively turns off the feature for this queue. We don't want
236 	 * one queue (VF) to interfering with another queue (another VF)
237 	 */
238 	if (vf->cfg_flags & VF_CFG_FW_FC)
239 		BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
240 			  vf->abs_vfid);
241 	/* Setup-op flags:
242 	 * collect statistics, zero statistics, local-switching, security,
243 	 * OV for Flex10, RSS and MCAST for leading
244 	 */
245 	if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
246 		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
247 
248 	/* for VFs, enable tx switching, bd coherency, and mac address
249 	 * anti-spoofing
250 	 */
251 	__set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
252 	__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
253 	__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
254 
255 	if (vfq_is_leading(q)) {
256 		__set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
257 		__set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
258 	}
259 
260 	/* Setup-op rx parameters */
261 	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
262 		struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
263 
264 		rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
265 		rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
266 		rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
267 
268 		if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
269 			rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
270 	}
271 
272 	/* Setup-op tx parameters */
273 	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
274 		setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
275 		setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
276 	}
277 }
278 
279 /* VFOP queue construction */
280 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
281 {
282 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
283 	struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
284 	struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
285 	enum bnx2x_vfop_qctor_state state = vfop->state;
286 
287 	bnx2x_vfop_reset_wq(vf);
288 
289 	if (vfop->rc < 0)
290 		goto op_err;
291 
292 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
293 
294 	switch (state) {
295 	case BNX2X_VFOP_QCTOR_INIT:
296 
297 		/* has this queue already been opened? */
298 		if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
299 		    BNX2X_Q_LOGICAL_STATE_ACTIVE) {
300 			DP(BNX2X_MSG_IOV,
301 			   "Entered qctor but queue was already up. Aborting gracefully\n");
302 			goto op_done;
303 		}
304 
305 		/* next state */
306 		vfop->state = BNX2X_VFOP_QCTOR_SETUP;
307 
308 		q_params->cmd = BNX2X_Q_CMD_INIT;
309 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
310 
311 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
312 
313 	case BNX2X_VFOP_QCTOR_SETUP:
314 		/* next state */
315 		vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
316 
317 		/* copy pre-prepared setup params to the queue-state params */
318 		vfop->op_p->qctor.qstate.params.setup =
319 			vfop->op_p->qctor.prep_qsetup;
320 
321 		q_params->cmd = BNX2X_Q_CMD_SETUP;
322 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
323 
324 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
325 
326 	case BNX2X_VFOP_QCTOR_INT_EN:
327 
328 		/* enable interrupts */
329 		bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
330 				    USTORM_ID, 0, IGU_INT_ENABLE, 0);
331 		goto op_done;
332 	default:
333 		bnx2x_vfop_default(state);
334 	}
335 op_err:
336 	BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
337 		  vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
338 op_done:
339 	bnx2x_vfop_end(bp, vf, vfop);
340 op_pending:
341 	return;
342 }
343 
344 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
345 				struct bnx2x_virtf *vf,
346 				struct bnx2x_vfop_cmd *cmd,
347 				int qid)
348 {
349 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
350 
351 	if (vfop) {
352 		vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
353 
354 		vfop->args.qctor.qid = qid;
355 		vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
356 
357 		bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
358 				 bnx2x_vfop_qctor, cmd->done);
359 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
360 					     cmd->block);
361 	}
362 	return -ENOMEM;
363 }
364 
365 /* VFOP queue destruction */
366 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
367 {
368 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
369 	struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
370 	struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
371 	enum bnx2x_vfop_qdtor_state state = vfop->state;
372 
373 	bnx2x_vfop_reset_wq(vf);
374 
375 	if (vfop->rc < 0)
376 		goto op_err;
377 
378 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
379 
380 	switch (state) {
381 	case BNX2X_VFOP_QDTOR_HALT:
382 
383 		/* has this queue already been stopped? */
384 		if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
385 		    BNX2X_Q_LOGICAL_STATE_STOPPED) {
386 			DP(BNX2X_MSG_IOV,
387 			   "Entered qdtor but queue was already stopped. Aborting gracefully\n");
388 			goto op_done;
389 		}
390 
391 		/* next state */
392 		vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
393 
394 		q_params->cmd = BNX2X_Q_CMD_HALT;
395 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
396 
397 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
398 
399 	case BNX2X_VFOP_QDTOR_TERMINATE:
400 		/* next state */
401 		vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
402 
403 		q_params->cmd = BNX2X_Q_CMD_TERMINATE;
404 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
405 
406 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
407 
408 	case BNX2X_VFOP_QDTOR_CFCDEL:
409 		/* next state */
410 		vfop->state = BNX2X_VFOP_QDTOR_DONE;
411 
412 		q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
413 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
414 
415 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
416 op_err:
417 	BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
418 		  vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
419 op_done:
420 	case BNX2X_VFOP_QDTOR_DONE:
421 		/* invalidate the context */
422 		qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
423 		qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
424 		bnx2x_vfop_end(bp, vf, vfop);
425 		return;
426 	default:
427 		bnx2x_vfop_default(state);
428 	}
429 op_pending:
430 	return;
431 }
432 
433 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
434 				struct bnx2x_virtf *vf,
435 				struct bnx2x_vfop_cmd *cmd,
436 				int qid)
437 {
438 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
439 
440 	if (vfop) {
441 		struct bnx2x_queue_state_params *qstate =
442 			&vf->op_params.qctor.qstate;
443 
444 		memset(qstate, 0, sizeof(*qstate));
445 		qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
446 
447 		vfop->args.qdtor.qid = qid;
448 		vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
449 
450 		bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
451 				 bnx2x_vfop_qdtor, cmd->done);
452 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
453 					     cmd->block);
454 	}
455 	DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
456 	   vf->abs_vfid, vfop->rc);
457 	return -ENOMEM;
458 }
459 
460 static void
461 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
462 {
463 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
464 	if (vf) {
465 		if (!vf_sb_count(vf))
466 			vf->igu_base_id = igu_sb_id;
467 		++vf_sb_count(vf);
468 	}
469 }
470 
471 /* VFOP MAC/VLAN helpers */
472 static inline void bnx2x_vfop_credit(struct bnx2x *bp,
473 				     struct bnx2x_vfop *vfop,
474 				     struct bnx2x_vlan_mac_obj *obj)
475 {
476 	struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
477 
478 	/* update credit only if there is no error
479 	 * and a valid credit counter
480 	 */
481 	if (!vfop->rc && args->credit) {
482 		int cnt = 0;
483 		struct list_head *pos;
484 
485 		list_for_each(pos, &obj->head)
486 			cnt++;
487 
488 		atomic_set(args->credit, cnt);
489 	}
490 }
491 
492 static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
493 				    struct bnx2x_vfop_filter *pos,
494 				    struct bnx2x_vlan_mac_data *user_req)
495 {
496 	user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
497 		BNX2X_VLAN_MAC_DEL;
498 
499 	switch (pos->type) {
500 	case BNX2X_VFOP_FILTER_MAC:
501 		memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
502 		break;
503 	case BNX2X_VFOP_FILTER_VLAN:
504 		user_req->u.vlan.vlan = pos->vid;
505 		break;
506 	default:
507 		BNX2X_ERR("Invalid filter type, skipping\n");
508 		return 1;
509 	}
510 	return 0;
511 }
512 
513 static int
514 bnx2x_vfop_config_vlan0(struct bnx2x *bp,
515 			struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
516 			bool add)
517 {
518 	int rc;
519 
520 	vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
521 		BNX2X_VLAN_MAC_DEL;
522 	vlan_mac->user_req.u.vlan.vlan = 0;
523 
524 	rc = bnx2x_config_vlan_mac(bp, vlan_mac);
525 	if (rc == -EEXIST)
526 		rc = 0;
527 	return rc;
528 }
529 
530 static int bnx2x_vfop_config_list(struct bnx2x *bp,
531 				  struct bnx2x_vfop_filters *filters,
532 				  struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
533 {
534 	struct bnx2x_vfop_filter *pos, *tmp;
535 	struct list_head rollback_list, *filters_list = &filters->head;
536 	struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
537 	int rc = 0, cnt = 0;
538 
539 	INIT_LIST_HEAD(&rollback_list);
540 
541 	list_for_each_entry_safe(pos, tmp, filters_list, link) {
542 		if (bnx2x_vfop_set_user_req(bp, pos, user_req))
543 			continue;
544 
545 		rc = bnx2x_config_vlan_mac(bp, vlan_mac);
546 		if (rc >= 0) {
547 			cnt += pos->add ? 1 : -1;
548 			list_del(&pos->link);
549 			list_add(&pos->link, &rollback_list);
550 			rc = 0;
551 		} else if (rc == -EEXIST) {
552 			rc = 0;
553 		} else {
554 			BNX2X_ERR("Failed to add a new vlan_mac command\n");
555 			break;
556 		}
557 	}
558 
559 	/* rollback if error or too many rules added */
560 	if (rc || cnt > filters->add_cnt) {
561 		BNX2X_ERR("error or too many rules added. Performing rollback\n");
562 		list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
563 			pos->add = !pos->add;	/* reverse op */
564 			bnx2x_vfop_set_user_req(bp, pos, user_req);
565 			bnx2x_config_vlan_mac(bp, vlan_mac);
566 			list_del(&pos->link);
567 		}
568 		cnt = 0;
569 		if (!rc)
570 			rc = -EINVAL;
571 	}
572 	filters->add_cnt = cnt;
573 	return rc;
574 }
575 
576 /* VFOP set VLAN/MAC */
577 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
578 {
579 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
580 	struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
581 	struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
582 	struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
583 
584 	enum bnx2x_vfop_vlan_mac_state state = vfop->state;
585 
586 	if (vfop->rc < 0)
587 		goto op_err;
588 
589 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
590 
591 	bnx2x_vfop_reset_wq(vf);
592 
593 	switch (state) {
594 	case BNX2X_VFOP_VLAN_MAC_CLEAR:
595 		/* next state */
596 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
597 
598 		/* do delete */
599 		vfop->rc = obj->delete_all(bp, obj,
600 					   &vlan_mac->user_req.vlan_mac_flags,
601 					   &vlan_mac->ramrod_flags);
602 
603 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
604 
605 	case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
606 		/* next state */
607 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
608 
609 		/* do config */
610 		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
611 		if (vfop->rc == -EEXIST)
612 			vfop->rc = 0;
613 
614 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
615 
616 	case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
617 		vfop->rc = !!obj->raw.check_pending(&obj->raw);
618 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
619 
620 	case BNX2X_VFOP_MAC_CONFIG_LIST:
621 		/* next state */
622 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
623 
624 		/* do list config */
625 		vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
626 		if (vfop->rc)
627 			goto op_err;
628 
629 		set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
630 		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
631 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
632 
633 	case BNX2X_VFOP_VLAN_CONFIG_LIST:
634 		/* next state */
635 		vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
636 
637 		/* remove vlan0 - could be no-op */
638 		vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
639 		if (vfop->rc)
640 			goto op_err;
641 
642 		/* Do vlan list config. if this operation fails we try to
643 		 * restore vlan0 to keep the queue is working order
644 		 */
645 		vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
646 		if (!vfop->rc) {
647 			set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
648 			vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
649 		}
650 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
651 
652 	case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
653 		/* next state */
654 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
655 
656 		if (list_empty(&obj->head))
657 			/* add vlan0 */
658 			vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
659 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
660 
661 	default:
662 		bnx2x_vfop_default(state);
663 	}
664 op_err:
665 	BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
666 op_done:
667 	kfree(filters);
668 	bnx2x_vfop_credit(bp, vfop, obj);
669 	bnx2x_vfop_end(bp, vf, vfop);
670 op_pending:
671 	return;
672 }
673 
674 struct bnx2x_vfop_vlan_mac_flags {
675 	bool drv_only;
676 	bool dont_consume;
677 	bool single_cmd;
678 	bool add;
679 };
680 
681 static void
682 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
683 				struct bnx2x_vfop_vlan_mac_flags *flags)
684 {
685 	struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
686 
687 	memset(ramrod, 0, sizeof(*ramrod));
688 
689 	/* ramrod flags */
690 	if (flags->drv_only)
691 		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
692 	if (flags->single_cmd)
693 		set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
694 
695 	/* mac_vlan flags */
696 	if (flags->dont_consume)
697 		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
698 
699 	/* cmd */
700 	ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
701 }
702 
703 static inline void
704 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
705 			   struct bnx2x_vfop_vlan_mac_flags *flags)
706 {
707 	bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
708 	set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
709 }
710 
711 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
712 				     struct bnx2x_virtf *vf,
713 				     struct bnx2x_vfop_cmd *cmd,
714 				     int qid, bool drv_only)
715 {
716 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
717 
718 	if (vfop) {
719 		struct bnx2x_vfop_args_filters filters = {
720 			.multi_filter = NULL,	/* single */
721 			.credit = NULL,		/* consume credit */
722 		};
723 		struct bnx2x_vfop_vlan_mac_flags flags = {
724 			.drv_only = drv_only,
725 			.dont_consume = (filters.credit != NULL),
726 			.single_cmd = true,
727 			.add = false /* don't care */,
728 		};
729 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
730 			&vf->op_params.vlan_mac;
731 
732 		/* set ramrod params */
733 		bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
734 
735 		/* set object */
736 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
737 
738 		/* set extra args */
739 		vfop->args.filters = filters;
740 
741 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
742 				 bnx2x_vfop_vlan_mac, cmd->done);
743 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
744 					     cmd->block);
745 	}
746 	return -ENOMEM;
747 }
748 
749 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
750 			    struct bnx2x_virtf *vf,
751 			    struct bnx2x_vfop_cmd *cmd,
752 			    struct bnx2x_vfop_filters *macs,
753 			    int qid, bool drv_only)
754 {
755 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
756 
757 	if (vfop) {
758 		struct bnx2x_vfop_args_filters filters = {
759 			.multi_filter = macs,
760 			.credit = NULL,		/* consume credit */
761 		};
762 		struct bnx2x_vfop_vlan_mac_flags flags = {
763 			.drv_only = drv_only,
764 			.dont_consume = (filters.credit != NULL),
765 			.single_cmd = false,
766 			.add = false, /* don't care since only the items in the
767 				       * filters list affect the sp operation,
768 				       * not the list itself
769 				       */
770 		};
771 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
772 			&vf->op_params.vlan_mac;
773 
774 		/* set ramrod params */
775 		bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
776 
777 		/* set object */
778 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
779 
780 		/* set extra args */
781 		filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
782 		vfop->args.filters = filters;
783 
784 		bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
785 				 bnx2x_vfop_vlan_mac, cmd->done);
786 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
787 					     cmd->block);
788 	}
789 	return -ENOMEM;
790 }
791 
792 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
793 			    struct bnx2x_virtf *vf,
794 			    struct bnx2x_vfop_cmd *cmd,
795 			    int qid, u16 vid, bool add)
796 {
797 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
798 
799 	if (vfop) {
800 		struct bnx2x_vfop_args_filters filters = {
801 			.multi_filter = NULL, /* single command */
802 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
803 		};
804 		struct bnx2x_vfop_vlan_mac_flags flags = {
805 			.drv_only = false,
806 			.dont_consume = (filters.credit != NULL),
807 			.single_cmd = true,
808 			.add = add,
809 		};
810 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
811 			&vf->op_params.vlan_mac;
812 
813 		/* set ramrod params */
814 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
815 		ramrod->user_req.u.vlan.vlan = vid;
816 
817 		/* set object */
818 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
819 
820 		/* set extra args */
821 		vfop->args.filters = filters;
822 
823 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
824 				 bnx2x_vfop_vlan_mac, cmd->done);
825 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
826 					     cmd->block);
827 	}
828 	return -ENOMEM;
829 }
830 
831 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
832 			       struct bnx2x_virtf *vf,
833 			       struct bnx2x_vfop_cmd *cmd,
834 			       int qid, bool drv_only)
835 {
836 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
837 
838 	if (vfop) {
839 		struct bnx2x_vfop_args_filters filters = {
840 			.multi_filter = NULL, /* single command */
841 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
842 		};
843 		struct bnx2x_vfop_vlan_mac_flags flags = {
844 			.drv_only = drv_only,
845 			.dont_consume = (filters.credit != NULL),
846 			.single_cmd = true,
847 			.add = false, /* don't care */
848 		};
849 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
850 			&vf->op_params.vlan_mac;
851 
852 		/* set ramrod params */
853 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
854 
855 		/* set object */
856 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
857 
858 		/* set extra args */
859 		vfop->args.filters = filters;
860 
861 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
862 				 bnx2x_vfop_vlan_mac, cmd->done);
863 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
864 					     cmd->block);
865 	}
866 	return -ENOMEM;
867 }
868 
869 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
870 			     struct bnx2x_virtf *vf,
871 			     struct bnx2x_vfop_cmd *cmd,
872 			     struct bnx2x_vfop_filters *vlans,
873 			     int qid, bool drv_only)
874 {
875 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
876 
877 	if (vfop) {
878 		struct bnx2x_vfop_args_filters filters = {
879 			.multi_filter = vlans,
880 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
881 		};
882 		struct bnx2x_vfop_vlan_mac_flags flags = {
883 			.drv_only = drv_only,
884 			.dont_consume = (filters.credit != NULL),
885 			.single_cmd = false,
886 			.add = false, /* don't care */
887 		};
888 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
889 			&vf->op_params.vlan_mac;
890 
891 		/* set ramrod params */
892 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
893 
894 		/* set object */
895 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
896 
897 		/* set extra args */
898 		filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
899 			atomic_read(filters.credit);
900 
901 		vfop->args.filters = filters;
902 
903 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
904 				 bnx2x_vfop_vlan_mac, cmd->done);
905 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
906 					     cmd->block);
907 	}
908 	return -ENOMEM;
909 }
910 
911 /* VFOP queue setup (queue constructor + set vlan 0) */
912 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
913 {
914 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
915 	int qid = vfop->args.qctor.qid;
916 	enum bnx2x_vfop_qsetup_state state = vfop->state;
917 	struct bnx2x_vfop_cmd cmd = {
918 		.done = bnx2x_vfop_qsetup,
919 		.block = false,
920 	};
921 
922 	if (vfop->rc < 0)
923 		goto op_err;
924 
925 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
926 
927 	switch (state) {
928 	case BNX2X_VFOP_QSETUP_CTOR:
929 		/* init the queue ctor command */
930 		vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
931 		vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
932 		if (vfop->rc)
933 			goto op_err;
934 		return;
935 
936 	case BNX2X_VFOP_QSETUP_VLAN0:
937 		/* skip if non-leading or FPGA/EMU*/
938 		if (qid)
939 			goto op_done;
940 
941 		/* init the queue set-vlan command (for vlan 0) */
942 		vfop->state = BNX2X_VFOP_QSETUP_DONE;
943 		vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
944 		if (vfop->rc)
945 			goto op_err;
946 		return;
947 op_err:
948 	BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
949 op_done:
950 	case BNX2X_VFOP_QSETUP_DONE:
951 		bnx2x_vfop_end(bp, vf, vfop);
952 		return;
953 	default:
954 		bnx2x_vfop_default(state);
955 	}
956 }
957 
958 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
959 			  struct bnx2x_virtf *vf,
960 			  struct bnx2x_vfop_cmd *cmd,
961 			  int qid)
962 {
963 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
964 
965 	if (vfop) {
966 		vfop->args.qctor.qid = qid;
967 
968 		bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
969 				 bnx2x_vfop_qsetup, cmd->done);
970 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
971 					     cmd->block);
972 	}
973 	return -ENOMEM;
974 }
975 
976 /* VFOP multi-casts */
977 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
978 {
979 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
980 	struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
981 	struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
982 	struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
983 	enum bnx2x_vfop_mcast_state state = vfop->state;
984 	int i;
985 
986 	bnx2x_vfop_reset_wq(vf);
987 
988 	if (vfop->rc < 0)
989 		goto op_err;
990 
991 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
992 
993 	switch (state) {
994 	case BNX2X_VFOP_MCAST_DEL:
995 		/* clear existing mcasts */
996 		vfop->state = BNX2X_VFOP_MCAST_ADD;
997 		vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
998 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
999 
1000 	case BNX2X_VFOP_MCAST_ADD:
1001 		if (raw->check_pending(raw))
1002 			goto op_pending;
1003 
1004 		if (args->mc_num) {
1005 			/* update mcast list on the ramrod params */
1006 			INIT_LIST_HEAD(&mcast->mcast_list);
1007 			for (i = 0; i < args->mc_num; i++)
1008 				list_add_tail(&(args->mc[i].link),
1009 					      &mcast->mcast_list);
1010 			/* add new mcasts */
1011 			vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
1012 			vfop->rc = bnx2x_config_mcast(bp, mcast,
1013 						      BNX2X_MCAST_CMD_ADD);
1014 		}
1015 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1016 
1017 	case BNX2X_VFOP_MCAST_CHK_DONE:
1018 		vfop->rc = raw->check_pending(raw) ? 1 : 0;
1019 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1020 	default:
1021 		bnx2x_vfop_default(state);
1022 	}
1023 op_err:
1024 	BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
1025 op_done:
1026 	kfree(args->mc);
1027 	bnx2x_vfop_end(bp, vf, vfop);
1028 op_pending:
1029 	return;
1030 }
1031 
1032 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
1033 			 struct bnx2x_virtf *vf,
1034 			 struct bnx2x_vfop_cmd *cmd,
1035 			 bnx2x_mac_addr_t *mcasts,
1036 			 int mcast_num, bool drv_only)
1037 {
1038 	struct bnx2x_vfop *vfop = NULL;
1039 	size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
1040 	struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
1041 					   NULL;
1042 
1043 	if (!mc_sz || mc) {
1044 		vfop = bnx2x_vfop_add(bp, vf);
1045 		if (vfop) {
1046 			int i;
1047 			struct bnx2x_mcast_ramrod_params *ramrod =
1048 				&vf->op_params.mcast;
1049 
1050 			/* set ramrod params */
1051 			memset(ramrod, 0, sizeof(*ramrod));
1052 			ramrod->mcast_obj = &vf->mcast_obj;
1053 			if (drv_only)
1054 				set_bit(RAMROD_DRV_CLR_ONLY,
1055 					&ramrod->ramrod_flags);
1056 
1057 			/* copy mcasts pointers */
1058 			vfop->args.mc_list.mc_num = mcast_num;
1059 			vfop->args.mc_list.mc = mc;
1060 			for (i = 0; i < mcast_num; i++)
1061 				mc[i].mac = mcasts[i];
1062 
1063 			bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
1064 					 bnx2x_vfop_mcast, cmd->done);
1065 			return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
1066 						     cmd->block);
1067 		} else {
1068 			kfree(mc);
1069 		}
1070 	}
1071 	return -ENOMEM;
1072 }
1073 
1074 /* VFOP rx-mode */
1075 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1076 {
1077 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1078 	struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
1079 	enum bnx2x_vfop_rxmode_state state = vfop->state;
1080 
1081 	bnx2x_vfop_reset_wq(vf);
1082 
1083 	if (vfop->rc < 0)
1084 		goto op_err;
1085 
1086 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1087 
1088 	switch (state) {
1089 	case BNX2X_VFOP_RXMODE_CONFIG:
1090 		/* next state */
1091 		vfop->state = BNX2X_VFOP_RXMODE_DONE;
1092 
1093 		vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1094 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1095 op_err:
1096 		BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
1097 op_done:
1098 	case BNX2X_VFOP_RXMODE_DONE:
1099 		bnx2x_vfop_end(bp, vf, vfop);
1100 		return;
1101 	default:
1102 		bnx2x_vfop_default(state);
1103 	}
1104 op_pending:
1105 	return;
1106 }
1107 
1108 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1109 			  struct bnx2x_virtf *vf,
1110 			  struct bnx2x_vfop_cmd *cmd,
1111 			  int qid, unsigned long accept_flags)
1112 {
1113 	struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1114 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1115 
1116 	if (vfop) {
1117 		struct bnx2x_rx_mode_ramrod_params *ramrod =
1118 			&vf->op_params.rx_mode;
1119 
1120 		memset(ramrod, 0, sizeof(*ramrod));
1121 
1122 		/* Prepare ramrod parameters */
1123 		ramrod->cid = vfq->cid;
1124 		ramrod->cl_id = vfq_cl_id(vf, vfq);
1125 		ramrod->rx_mode_obj = &bp->rx_mode_obj;
1126 		ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1127 
1128 		ramrod->rx_accept_flags = accept_flags;
1129 		ramrod->tx_accept_flags = accept_flags;
1130 		ramrod->pstate = &vf->filter_state;
1131 		ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1132 
1133 		set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1134 		set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1135 		set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1136 
1137 		ramrod->rdata =
1138 			bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1139 		ramrod->rdata_mapping =
1140 			bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1141 
1142 		bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1143 				 bnx2x_vfop_rxmode, cmd->done);
1144 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
1145 					     cmd->block);
1146 	}
1147 	return -ENOMEM;
1148 }
1149 
1150 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
1151  * queue destructor)
1152  */
1153 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
1154 {
1155 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1156 	int qid = vfop->args.qx.qid;
1157 	enum bnx2x_vfop_qteardown_state state = vfop->state;
1158 	struct bnx2x_vfop_cmd cmd;
1159 
1160 	if (vfop->rc < 0)
1161 		goto op_err;
1162 
1163 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1164 
1165 	cmd.done = bnx2x_vfop_qdown;
1166 	cmd.block = false;
1167 
1168 	switch (state) {
1169 	case BNX2X_VFOP_QTEARDOWN_RXMODE:
1170 		/* Drop all */
1171 		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
1172 		vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
1173 		if (vfop->rc)
1174 			goto op_err;
1175 		return;
1176 
1177 	case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
1178 		/* vlan-clear-all: don't consume credit */
1179 		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
1180 		vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
1181 		if (vfop->rc)
1182 			goto op_err;
1183 		return;
1184 
1185 	case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
1186 		/* mac-clear-all: consume credit */
1187 		vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1188 		vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
1189 		if (vfop->rc)
1190 			goto op_err;
1191 		return;
1192 
1193 	case BNX2X_VFOP_QTEARDOWN_QDTOR:
1194 		/* run the queue destruction flow */
1195 		DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
1196 		vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
1197 		DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
1198 		vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
1199 		DP(BNX2X_MSG_IOV, "returned from cmd\n");
1200 		if (vfop->rc)
1201 			goto op_err;
1202 		return;
1203 op_err:
1204 	BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
1205 		  vf->abs_vfid, qid, vfop->rc);
1206 
1207 	case BNX2X_VFOP_QTEARDOWN_DONE:
1208 		bnx2x_vfop_end(bp, vf, vfop);
1209 		return;
1210 	default:
1211 		bnx2x_vfop_default(state);
1212 	}
1213 }
1214 
1215 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1216 			 struct bnx2x_virtf *vf,
1217 			 struct bnx2x_vfop_cmd *cmd,
1218 			 int qid)
1219 {
1220 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1221 
1222 	if (vfop) {
1223 		vfop->args.qx.qid = qid;
1224 		bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
1225 				 bnx2x_vfop_qdown, cmd->done);
1226 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1227 					     cmd->block);
1228 	}
1229 
1230 	return -ENOMEM;
1231 }
1232 
1233 /* VF enable primitives
1234  * when pretend is required the caller is responsible
1235  * for calling pretend prior to calling these routines
1236  */
1237 
1238 /* called only on E1H or E2.
1239  * When pretending to be PF, the pretend value is the function number 0...7
1240  * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
1241  * combination
1242  */
1243 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
1244 {
1245 	u32 pretend_reg;
1246 
1247 	if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
1248 		return -1;
1249 
1250 	/* get my own pretend register */
1251 	pretend_reg = bnx2x_get_pretend_reg(bp);
1252 	REG_WR(bp, pretend_reg, pretend_func_val);
1253 	REG_RD(bp, pretend_reg);
1254 	return 0;
1255 }
1256 
1257 /* internal vf enable - until vf is enabled internally all transactions
1258  * are blocked. this routine should always be called last with pretend.
1259  */
1260 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
1261 {
1262 	REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
1263 }
1264 
1265 /* clears vf error in all semi blocks */
1266 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
1267 {
1268 	REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
1269 	REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
1270 	REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
1271 	REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
1272 }
1273 
1274 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
1275 {
1276 	u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
1277 	u32 was_err_reg = 0;
1278 
1279 	switch (was_err_group) {
1280 	case 0:
1281 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
1282 	    break;
1283 	case 1:
1284 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
1285 	    break;
1286 	case 2:
1287 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
1288 	    break;
1289 	case 3:
1290 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
1291 	    break;
1292 	}
1293 	REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
1294 }
1295 
1296 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
1297 {
1298 	int i;
1299 	u32 val;
1300 
1301 	/* Set VF masks and configuration - pretend */
1302 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1303 
1304 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
1305 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
1306 	REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
1307 	REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
1308 	REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
1309 	REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
1310 
1311 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1312 	val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
1313 	if (vf->cfg_flags & VF_CFG_INT_SIMD)
1314 		val |= IGU_VF_CONF_SINGLE_ISR_EN;
1315 	val &= ~IGU_VF_CONF_PARENT_MASK;
1316 	val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT;	/* parent PF */
1317 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1318 
1319 	DP(BNX2X_MSG_IOV,
1320 	   "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
1321 	   vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
1322 
1323 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1324 
1325 	/* iterate over all queues, clear sb consumer */
1326 	for (i = 0; i < vf_sb_count(vf); i++) {
1327 		u8 igu_sb_id = vf_igu_sb(vf, i);
1328 
1329 		/* zero prod memory */
1330 		REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
1331 
1332 		/* clear sb state machine */
1333 		bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
1334 				       false /* VF */);
1335 
1336 		/* disable + update */
1337 		bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
1338 				    IGU_INT_DISABLE, 1);
1339 	}
1340 }
1341 
1342 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
1343 {
1344 	/* set the VF-PF association in the FW */
1345 	storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
1346 	storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
1347 
1348 	/* clear vf errors*/
1349 	bnx2x_vf_semi_clear_err(bp, abs_vfid);
1350 	bnx2x_vf_pglue_clear_err(bp, abs_vfid);
1351 
1352 	/* internal vf-enable - pretend */
1353 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
1354 	DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
1355 	bnx2x_vf_enable_internal(bp, true);
1356 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1357 }
1358 
1359 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
1360 {
1361 	/* Reset vf in IGU  interrupts are still disabled */
1362 	bnx2x_vf_igu_reset(bp, vf);
1363 
1364 	/* pretend to enable the vf with the PBF */
1365 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1366 	REG_WR(bp, PBF_REG_DISABLE_VF, 0);
1367 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1368 }
1369 
1370 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
1371 {
1372 	struct pci_dev *dev;
1373 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1374 
1375 	if (!vf)
1376 		goto unknown_dev;
1377 
1378 	dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
1379 	if (dev)
1380 		return bnx2x_is_pcie_pending(dev);
1381 
1382 unknown_dev:
1383 	BNX2X_ERR("Unknown device\n");
1384 	return false;
1385 }
1386 
1387 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1388 {
1389 	/* Wait 100ms */
1390 	msleep(100);
1391 
1392 	/* Verify no pending pci transactions */
1393 	if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
1394 		BNX2X_ERR("PCIE Transactions still pending\n");
1395 
1396 	return 0;
1397 }
1398 
1399 /* must be called after the number of PF queues and the number of VFs are
1400  * both known
1401  */
1402 static void
1403 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
1404 {
1405 	u16 vlan_count = 0;
1406 
1407 	/* will be set only during VF-ACQUIRE */
1408 	resc->num_rxqs = 0;
1409 	resc->num_txqs = 0;
1410 
1411 	/* no credit calculcis for macs (just yet) */
1412 	resc->num_mac_filters = 1;
1413 
1414 	/* divvy up vlan rules */
1415 	vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
1416 	vlan_count = 1 << ilog2(vlan_count);
1417 	resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
1418 
1419 	/* no real limitation */
1420 	resc->num_mc_filters = 0;
1421 
1422 	/* num_sbs already set */
1423 }
1424 
1425 /* FLR routines: */
1426 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1427 {
1428 	/* reset the state variables */
1429 	bnx2x_iov_static_resc(bp, &vf->alloc_resc);
1430 	vf->state = VF_FREE;
1431 }
1432 
1433 /* IOV global initialization routines  */
1434 void bnx2x_iov_init_dq(struct bnx2x *bp)
1435 {
1436 	if (!IS_SRIOV(bp))
1437 		return;
1438 
1439 	/* Set the DQ such that the CID reflect the abs_vfid */
1440 	REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1441 	REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1442 
1443 	/* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1444 	 * the PF L2 queues
1445 	 */
1446 	REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1447 
1448 	/* The VF window size is the log2 of the max number of CIDs per VF */
1449 	REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1450 
1451 	/* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
1452 	 * the Pf doorbell size although the 2 are independent.
1453 	 */
1454 	REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
1455 	       BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
1456 
1457 	/* No security checks for now -
1458 	 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1459 	 * CID range 0 - 0x1ffff
1460 	 */
1461 	REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1462 	REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1463 	REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1464 	REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1465 
1466 	/* set the number of VF alllowed doorbells to the full DQ range */
1467 	REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
1468 
1469 	/* set the VF doorbell threshold */
1470 	REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
1471 }
1472 
1473 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1474 {
1475 	DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
1476 	if (!IS_SRIOV(bp))
1477 		return;
1478 
1479 	REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1480 }
1481 
1482 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1483 {
1484 	struct pci_dev *dev = bp->pdev;
1485 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1486 
1487 	return dev->bus->number + ((dev->devfn + iov->offset +
1488 				    iov->stride * vfid) >> 8);
1489 }
1490 
1491 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1492 {
1493 	struct pci_dev *dev = bp->pdev;
1494 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1495 
1496 	return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1497 }
1498 
1499 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1500 {
1501 	int i, n;
1502 	struct pci_dev *dev = bp->pdev;
1503 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1504 
1505 	for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1506 		u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1507 		u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1508 
1509 		do_div(size, iov->total);
1510 		vf->bars[n].bar = start + size * vf->abs_vfid;
1511 		vf->bars[n].size = size;
1512 	}
1513 }
1514 
1515 static int bnx2x_ari_enabled(struct pci_dev *dev)
1516 {
1517 	return dev->bus->self && dev->bus->self->ari_enabled;
1518 }
1519 
1520 static void
1521 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1522 {
1523 	int sb_id;
1524 	u32 val;
1525 	u8 fid;
1526 
1527 	/* IGU in normal mode - read CAM */
1528 	for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1529 		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1530 		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1531 			continue;
1532 		fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1533 		if (!(fid & IGU_FID_ENCODE_IS_PF))
1534 			bnx2x_vf_set_igu_info(bp, sb_id,
1535 					      (fid & IGU_FID_VF_NUM_MASK));
1536 
1537 		DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1538 		   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1539 		   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1540 		   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1541 		   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1542 	}
1543 }
1544 
1545 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1546 {
1547 	if (bp->vfdb) {
1548 		kfree(bp->vfdb->vfqs);
1549 		kfree(bp->vfdb->vfs);
1550 		kfree(bp->vfdb);
1551 	}
1552 	bp->vfdb = NULL;
1553 }
1554 
1555 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1556 {
1557 	int pos;
1558 	struct pci_dev *dev = bp->pdev;
1559 
1560 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1561 	if (!pos) {
1562 		BNX2X_ERR("failed to find SRIOV capability in device\n");
1563 		return -ENODEV;
1564 	}
1565 
1566 	iov->pos = pos;
1567 	DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1568 	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1569 	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1570 	pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1571 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1572 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1573 	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1574 	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1575 	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1576 
1577 	return 0;
1578 }
1579 
1580 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1581 {
1582 	u32 val;
1583 
1584 	/* read the SRIOV capability structure
1585 	 * The fields can be read via configuration read or
1586 	 * directly from the device (starting at offset PCICFG_OFFSET)
1587 	 */
1588 	if (bnx2x_sriov_pci_cfg_info(bp, iov))
1589 		return -ENODEV;
1590 
1591 	/* get the number of SRIOV bars */
1592 	iov->nres = 0;
1593 
1594 	/* read the first_vfid */
1595 	val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1596 	iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1597 			       * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1598 
1599 	DP(BNX2X_MSG_IOV,
1600 	   "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1601 	   BP_FUNC(bp),
1602 	   iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1603 	   iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1604 
1605 	return 0;
1606 }
1607 
1608 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
1609 {
1610 	int i;
1611 	u8 queue_count = 0;
1612 
1613 	if (IS_SRIOV(bp))
1614 		for_each_vf(bp, i)
1615 			queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
1616 
1617 	return queue_count;
1618 }
1619 
1620 /* must be called after PF bars are mapped */
1621 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1622 			int num_vfs_param)
1623 {
1624 	int err, i, qcount;
1625 	struct bnx2x_sriov *iov;
1626 	struct pci_dev *dev = bp->pdev;
1627 
1628 	bp->vfdb = NULL;
1629 
1630 	/* verify is pf */
1631 	if (IS_VF(bp))
1632 		return 0;
1633 
1634 	/* verify sriov capability is present in configuration space */
1635 	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1636 		return 0;
1637 
1638 	/* verify chip revision */
1639 	if (CHIP_IS_E1x(bp))
1640 		return 0;
1641 
1642 	/* check if SRIOV support is turned off */
1643 	if (!num_vfs_param)
1644 		return 0;
1645 
1646 	/* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1647 	if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1648 		BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1649 			  BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1650 		return 0;
1651 	}
1652 
1653 	/* SRIOV can be enabled only with MSIX */
1654 	if (int_mode_param == BNX2X_INT_MODE_MSI ||
1655 	    int_mode_param == BNX2X_INT_MODE_INTX)
1656 		BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1657 
1658 	err = -EIO;
1659 	/* verify ari is enabled */
1660 	if (!bnx2x_ari_enabled(bp->pdev)) {
1661 		BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
1662 		return err;
1663 	}
1664 
1665 	/* verify igu is in normal mode */
1666 	if (CHIP_INT_MODE_IS_BC(bp)) {
1667 		BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1668 		return err;
1669 	}
1670 
1671 	/* allocate the vfs database */
1672 	bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1673 	if (!bp->vfdb) {
1674 		BNX2X_ERR("failed to allocate vf database\n");
1675 		err = -ENOMEM;
1676 		goto failed;
1677 	}
1678 
1679 	/* get the sriov info - Linux already collected all the pertinent
1680 	 * information, however the sriov structure is for the private use
1681 	 * of the pci module. Also we want this information regardless
1682 	 * of the hyper-visor.
1683 	 */
1684 	iov = &(bp->vfdb->sriov);
1685 	err = bnx2x_sriov_info(bp, iov);
1686 	if (err)
1687 		goto failed;
1688 
1689 	/* SR-IOV capability was enabled but there are no VFs*/
1690 	if (iov->total == 0)
1691 		goto failed;
1692 
1693 	/* calculate the actual number of VFs */
1694 	iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
1695 
1696 	/* allocate the vf array */
1697 	bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
1698 				BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
1699 	if (!bp->vfdb->vfs) {
1700 		BNX2X_ERR("failed to allocate vf array\n");
1701 		err = -ENOMEM;
1702 		goto failed;
1703 	}
1704 
1705 	/* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1706 	for_each_vf(bp, i) {
1707 		bnx2x_vf(bp, i, index) = i;
1708 		bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1709 		bnx2x_vf(bp, i, state) = VF_FREE;
1710 		INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
1711 		mutex_init(&bnx2x_vf(bp, i, op_mutex));
1712 		bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1713 	}
1714 
1715 	/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1716 	bnx2x_get_vf_igu_cam_info(bp);
1717 
1718 	/* get the total queue count and allocate the global queue arrays */
1719 	qcount = bnx2x_iov_get_max_queue_count(bp);
1720 
1721 	/* allocate the queue arrays for all VFs */
1722 	bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
1723 				 GFP_KERNEL);
1724 	if (!bp->vfdb->vfqs) {
1725 		BNX2X_ERR("failed to allocate vf queue array\n");
1726 		err = -ENOMEM;
1727 		goto failed;
1728 	}
1729 
1730 	return 0;
1731 failed:
1732 	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1733 	__bnx2x_iov_free_vfdb(bp);
1734 	return err;
1735 }
1736 
1737 void bnx2x_iov_remove_one(struct bnx2x *bp)
1738 {
1739 	/* if SRIOV is not enabled there's nothing to do */
1740 	if (!IS_SRIOV(bp))
1741 		return;
1742 
1743 	/* free vf database */
1744 	__bnx2x_iov_free_vfdb(bp);
1745 }
1746 
1747 void bnx2x_iov_free_mem(struct bnx2x *bp)
1748 {
1749 	int i;
1750 
1751 	if (!IS_SRIOV(bp))
1752 		return;
1753 
1754 	/* free vfs hw contexts */
1755 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1756 		struct hw_dma *cxt = &bp->vfdb->context[i];
1757 		BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1758 	}
1759 
1760 	BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1761 		       BP_VFDB(bp)->sp_dma.mapping,
1762 		       BP_VFDB(bp)->sp_dma.size);
1763 
1764 	BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1765 		       BP_VF_MBX_DMA(bp)->mapping,
1766 		       BP_VF_MBX_DMA(bp)->size);
1767 }
1768 
1769 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1770 {
1771 	size_t tot_size;
1772 	int i, rc = 0;
1773 
1774 	if (!IS_SRIOV(bp))
1775 		return rc;
1776 
1777 	/* allocate vfs hw contexts */
1778 	tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1779 		BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1780 
1781 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1782 		struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1783 		cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
1784 
1785 		if (cxt->size) {
1786 			BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
1787 		} else {
1788 			cxt->addr = NULL;
1789 			cxt->mapping = 0;
1790 		}
1791 		tot_size -= cxt->size;
1792 	}
1793 
1794 	/* allocate vfs ramrods dma memory - client_init and set_mac */
1795 	tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
1796 	BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
1797 			tot_size);
1798 	BP_VFDB(bp)->sp_dma.size = tot_size;
1799 
1800 	/* allocate mailboxes */
1801 	tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
1802 	BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
1803 			tot_size);
1804 	BP_VF_MBX_DMA(bp)->size = tot_size;
1805 
1806 	return 0;
1807 
1808 alloc_mem_err:
1809 	return -ENOMEM;
1810 }
1811 
1812 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1813 			   struct bnx2x_vf_queue *q)
1814 {
1815 	u8 cl_id = vfq_cl_id(vf, q);
1816 	u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
1817 	unsigned long q_type = 0;
1818 
1819 	set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1820 	set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1821 
1822 	/* Queue State object */
1823 	bnx2x_init_queue_obj(bp, &q->sp_obj,
1824 			     cl_id, &q->cid, 1, func_id,
1825 			     bnx2x_vf_sp(bp, vf, q_data),
1826 			     bnx2x_vf_sp_map(bp, vf, q_data),
1827 			     q_type);
1828 
1829 	DP(BNX2X_MSG_IOV,
1830 	   "initialized vf %d's queue object. func id set to %d\n",
1831 	   vf->abs_vfid, q->sp_obj.func_id);
1832 
1833 	/* mac/vlan objects are per queue, but only those
1834 	 * that belong to the leading queue are initialized
1835 	 */
1836 	if (vfq_is_leading(q)) {
1837 		/* mac */
1838 		bnx2x_init_mac_obj(bp, &q->mac_obj,
1839 				   cl_id, q->cid, func_id,
1840 				   bnx2x_vf_sp(bp, vf, mac_rdata),
1841 				   bnx2x_vf_sp_map(bp, vf, mac_rdata),
1842 				   BNX2X_FILTER_MAC_PENDING,
1843 				   &vf->filter_state,
1844 				   BNX2X_OBJ_TYPE_RX_TX,
1845 				   &bp->macs_pool);
1846 		/* vlan */
1847 		bnx2x_init_vlan_obj(bp, &q->vlan_obj,
1848 				    cl_id, q->cid, func_id,
1849 				    bnx2x_vf_sp(bp, vf, vlan_rdata),
1850 				    bnx2x_vf_sp_map(bp, vf, vlan_rdata),
1851 				    BNX2X_FILTER_VLAN_PENDING,
1852 				    &vf->filter_state,
1853 				    BNX2X_OBJ_TYPE_RX_TX,
1854 				    &bp->vlans_pool);
1855 
1856 		/* mcast */
1857 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
1858 				     q->cid, func_id, func_id,
1859 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
1860 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1861 				     BNX2X_FILTER_MCAST_PENDING,
1862 				     &vf->filter_state,
1863 				     BNX2X_OBJ_TYPE_RX_TX);
1864 
1865 		vf->leading_rss = cl_id;
1866 	}
1867 }
1868 
1869 /* called by bnx2x_nic_load */
1870 int bnx2x_iov_nic_init(struct bnx2x *bp)
1871 {
1872 	int vfid, qcount, i;
1873 
1874 	if (!IS_SRIOV(bp)) {
1875 		DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1876 		return 0;
1877 	}
1878 
1879 	DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1880 
1881 	/* initialize vf database */
1882 	for_each_vf(bp, vfid) {
1883 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1884 
1885 		int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1886 			BNX2X_CIDS_PER_VF;
1887 
1888 		union cdu_context *base_cxt = (union cdu_context *)
1889 			BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1890 			(base_vf_cid & (ILT_PAGE_CIDS-1));
1891 
1892 		DP(BNX2X_MSG_IOV,
1893 		   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1894 		   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1895 		   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1896 
1897 		/* init statically provisioned resources */
1898 		bnx2x_iov_static_resc(bp, &vf->alloc_resc);
1899 
1900 		/* queues are initialized during VF-ACQUIRE */
1901 
1902 		/* reserve the vf vlan credit */
1903 		bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
1904 
1905 		vf->filter_state = 0;
1906 		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1907 
1908 		/*  init mcast object - This object will be re-initialized
1909 		 *  during VF-ACQUIRE with the proper cl_id and cid.
1910 		 *  It needs to be initialized here so that it can be safely
1911 		 *  handled by a subsequent FLR flow.
1912 		 */
1913 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1914 				     0xFF, 0xFF, 0xFF,
1915 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
1916 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1917 				     BNX2X_FILTER_MCAST_PENDING,
1918 				     &vf->filter_state,
1919 				     BNX2X_OBJ_TYPE_RX_TX);
1920 
1921 		/* set the mailbox message addresses */
1922 		BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1923 			(((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1924 			MBX_MSG_ALIGNED_SIZE);
1925 
1926 		BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1927 			vfid * MBX_MSG_ALIGNED_SIZE;
1928 
1929 		/* Enable vf mailbox */
1930 		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1931 	}
1932 
1933 	/* Final VF init */
1934 	qcount = 0;
1935 	for_each_vf(bp, i) {
1936 		struct bnx2x_virtf *vf = BP_VF(bp, i);
1937 
1938 		/* fill in the BDF and bars */
1939 		vf->bus = bnx2x_vf_bus(bp, i);
1940 		vf->devfn = bnx2x_vf_devfn(bp, i);
1941 		bnx2x_vf_set_bars(bp, vf);
1942 
1943 		DP(BNX2X_MSG_IOV,
1944 		   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1945 		   vf->abs_vfid, vf->bus, vf->devfn,
1946 		   (unsigned)vf->bars[0].bar, vf->bars[0].size,
1947 		   (unsigned)vf->bars[1].bar, vf->bars[1].size,
1948 		   (unsigned)vf->bars[2].bar, vf->bars[2].size);
1949 
1950 		/* set local queue arrays */
1951 		vf->vfqs = &bp->vfdb->vfqs[qcount];
1952 		qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
1953 	}
1954 
1955 	return 0;
1956 }
1957 
1958 /* called by bnx2x_chip_cleanup */
1959 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
1960 {
1961 	int i;
1962 
1963 	if (!IS_SRIOV(bp))
1964 		return 0;
1965 
1966 	/* release all the VFs */
1967 	for_each_vf(bp, i)
1968 		bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
1969 
1970 	return 0;
1971 }
1972 
1973 /* called by bnx2x_init_hw_func, returns the next ilt line */
1974 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
1975 {
1976 	int i;
1977 	struct bnx2x_ilt *ilt = BP_ILT(bp);
1978 
1979 	if (!IS_SRIOV(bp))
1980 		return line;
1981 
1982 	/* set vfs ilt lines */
1983 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1984 		struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1985 
1986 		ilt->lines[line+i].page = hw_cxt->addr;
1987 		ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1988 		ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1989 	}
1990 	return line + i;
1991 }
1992 
1993 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
1994 {
1995 	return ((cid >= BNX2X_FIRST_VF_CID) &&
1996 		((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1997 }
1998 
1999 static
2000 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
2001 					struct bnx2x_vf_queue *vfq,
2002 					union event_ring_elem *elem)
2003 {
2004 	unsigned long ramrod_flags = 0;
2005 	int rc = 0;
2006 
2007 	/* Always push next commands out, don't wait here */
2008 	set_bit(RAMROD_CONT, &ramrod_flags);
2009 
2010 	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
2011 	case BNX2X_FILTER_MAC_PENDING:
2012 		rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
2013 					   &ramrod_flags);
2014 		break;
2015 	case BNX2X_FILTER_VLAN_PENDING:
2016 		rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
2017 					    &ramrod_flags);
2018 		break;
2019 	default:
2020 		BNX2X_ERR("Unsupported classification command: %d\n",
2021 			  elem->message.data.eth_event.echo);
2022 		return;
2023 	}
2024 	if (rc < 0)
2025 		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
2026 	else if (rc > 0)
2027 		DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
2028 }
2029 
2030 static
2031 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
2032 			       struct bnx2x_virtf *vf)
2033 {
2034 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
2035 	int rc;
2036 
2037 	rparam.mcast_obj = &vf->mcast_obj;
2038 	vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
2039 
2040 	/* If there are pending mcast commands - send them */
2041 	if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
2042 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2043 		if (rc < 0)
2044 			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
2045 				  rc);
2046 	}
2047 }
2048 
2049 static
2050 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
2051 				 struct bnx2x_virtf *vf)
2052 {
2053 	smp_mb__before_clear_bit();
2054 	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
2055 	smp_mb__after_clear_bit();
2056 }
2057 
2058 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2059 {
2060 	struct bnx2x_virtf *vf;
2061 	int qidx = 0, abs_vfid;
2062 	u8 opcode;
2063 	u16 cid = 0xffff;
2064 
2065 	if (!IS_SRIOV(bp))
2066 		return 1;
2067 
2068 	/* first get the cid - the only events we handle here are cfc-delete
2069 	 * and set-mac completion
2070 	 */
2071 	opcode = elem->message.opcode;
2072 
2073 	switch (opcode) {
2074 	case EVENT_RING_OPCODE_CFC_DEL:
2075 		cid = SW_CID((__force __le32)
2076 			     elem->message.data.cfc_del_event.cid);
2077 		DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
2078 		break;
2079 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2080 	case EVENT_RING_OPCODE_MULTICAST_RULES:
2081 	case EVENT_RING_OPCODE_FILTERS_RULES:
2082 		cid = (elem->message.data.eth_event.echo &
2083 		       BNX2X_SWCID_MASK);
2084 		DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
2085 		break;
2086 	case EVENT_RING_OPCODE_VF_FLR:
2087 		abs_vfid = elem->message.data.vf_flr_event.vf_id;
2088 		DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
2089 		   abs_vfid);
2090 		goto get_vf;
2091 	case EVENT_RING_OPCODE_MALICIOUS_VF:
2092 		abs_vfid = elem->message.data.malicious_vf_event.vf_id;
2093 		DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
2094 		   abs_vfid);
2095 		goto get_vf;
2096 	default:
2097 		return 1;
2098 	}
2099 
2100 	/* check if the cid is the VF range */
2101 	if (!bnx2x_iov_is_vf_cid(bp, cid)) {
2102 		DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
2103 		return 1;
2104 	}
2105 
2106 	/* extract vf and rxq index from vf_cid - relies on the following:
2107 	 * 1. vfid on cid reflects the true abs_vfid
2108 	 * 2. the max number of VFs (per path) is 64
2109 	 */
2110 	qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
2111 	abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2112 get_vf:
2113 	vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
2114 
2115 	if (!vf) {
2116 		BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
2117 			  cid, abs_vfid);
2118 		return 0;
2119 	}
2120 
2121 	switch (opcode) {
2122 	case EVENT_RING_OPCODE_CFC_DEL:
2123 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
2124 		   vf->abs_vfid, qidx);
2125 		vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
2126 						       &vfq_get(vf,
2127 								qidx)->sp_obj,
2128 						       BNX2X_Q_CMD_CFC_DEL);
2129 		break;
2130 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2131 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
2132 		   vf->abs_vfid, qidx);
2133 		bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
2134 		break;
2135 	case EVENT_RING_OPCODE_MULTICAST_RULES:
2136 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
2137 		   vf->abs_vfid, qidx);
2138 		bnx2x_vf_handle_mcast_eqe(bp, vf);
2139 		break;
2140 	case EVENT_RING_OPCODE_FILTERS_RULES:
2141 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
2142 		   vf->abs_vfid, qidx);
2143 		bnx2x_vf_handle_filters_eqe(bp, vf);
2144 		break;
2145 	case EVENT_RING_OPCODE_VF_FLR:
2146 		DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
2147 		   vf->abs_vfid);
2148 		/* Do nothing for now */
2149 		break;
2150 	case EVENT_RING_OPCODE_MALICIOUS_VF:
2151 		DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
2152 		   vf->abs_vfid);
2153 		/* Do nothing for now */
2154 		break;
2155 	}
2156 	/* SRIOV: reschedule any 'in_progress' operations */
2157 	bnx2x_iov_sp_event(bp, cid, false);
2158 
2159 	return 0;
2160 }
2161 
2162 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
2163 {
2164 	/* extract the vf from vf_cid - relies on the following:
2165 	 * 1. vfid on cid reflects the true abs_vfid
2166 	 * 2. the max number of VFs (per path) is 64
2167 	 */
2168 	int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2169 	return bnx2x_vf_by_abs_fid(bp, abs_vfid);
2170 }
2171 
2172 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
2173 				struct bnx2x_queue_sp_obj **q_obj)
2174 {
2175 	struct bnx2x_virtf *vf;
2176 
2177 	if (!IS_SRIOV(bp))
2178 		return;
2179 
2180 	vf = bnx2x_vf_by_cid(bp, vf_cid);
2181 
2182 	if (vf) {
2183 		/* extract queue index from vf_cid - relies on the following:
2184 		 * 1. vfid on cid reflects the true abs_vfid
2185 		 * 2. the max number of VFs (per path) is 64
2186 		 */
2187 		int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
2188 		*q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
2189 	} else {
2190 		BNX2X_ERR("No vf matching cid %d\n", vf_cid);
2191 	}
2192 }
2193 
2194 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
2195 {
2196 	struct bnx2x_virtf *vf;
2197 
2198 	/* check if the cid is the VF range */
2199 	if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
2200 		return;
2201 
2202 	vf = bnx2x_vf_by_cid(bp, vf_cid);
2203 	if (vf) {
2204 		/* set in_progress flag */
2205 		atomic_set(&vf->op_in_progress, 1);
2206 		if (queue_work)
2207 			queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2208 	}
2209 }
2210 
2211 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2212 {
2213 	int i;
2214 	int first_queue_query_index, num_queues_req;
2215 	dma_addr_t cur_data_offset;
2216 	struct stats_query_entry *cur_query_entry;
2217 	u8 stats_count = 0;
2218 	bool is_fcoe = false;
2219 
2220 	if (!IS_SRIOV(bp))
2221 		return;
2222 
2223 	if (!NO_FCOE(bp))
2224 		is_fcoe = true;
2225 
2226 	/* fcoe adds one global request and one queue request */
2227 	num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
2228 	first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
2229 		(is_fcoe ? 0 : 1);
2230 
2231 	DP(BNX2X_MSG_IOV,
2232 	   "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
2233 	   BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
2234 	   first_queue_query_index + num_queues_req);
2235 
2236 	cur_data_offset = bp->fw_stats_data_mapping +
2237 		offsetof(struct bnx2x_fw_stats_data, queue_stats) +
2238 		num_queues_req * sizeof(struct per_queue_stats);
2239 
2240 	cur_query_entry = &bp->fw_stats_req->
2241 		query[first_queue_query_index + num_queues_req];
2242 
2243 	for_each_vf(bp, i) {
2244 		int j;
2245 		struct bnx2x_virtf *vf = BP_VF(bp, i);
2246 
2247 		if (vf->state != VF_ENABLED) {
2248 			DP(BNX2X_MSG_IOV,
2249 			   "vf %d not enabled so no stats for it\n",
2250 			   vf->abs_vfid);
2251 			continue;
2252 		}
2253 
2254 		DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
2255 		for_each_vfq(vf, j) {
2256 			struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
2257 
2258 			/* collect stats fro active queues only */
2259 			if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
2260 			    BNX2X_Q_LOGICAL_STATE_STOPPED)
2261 				continue;
2262 
2263 			/* create stats query entry for this queue */
2264 			cur_query_entry->kind = STATS_TYPE_QUEUE;
2265 			cur_query_entry->index = vfq_cl_id(vf, rxq);
2266 			cur_query_entry->funcID =
2267 				cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
2268 			cur_query_entry->address.hi =
2269 				cpu_to_le32(U64_HI(vf->fw_stat_map));
2270 			cur_query_entry->address.lo =
2271 				cpu_to_le32(U64_LO(vf->fw_stat_map));
2272 			DP(BNX2X_MSG_IOV,
2273 			   "added address %x %x for vf %d queue %d client %d\n",
2274 			   cur_query_entry->address.hi,
2275 			   cur_query_entry->address.lo, cur_query_entry->funcID,
2276 			   j, cur_query_entry->index);
2277 			cur_query_entry++;
2278 			cur_data_offset += sizeof(struct per_queue_stats);
2279 			stats_count++;
2280 		}
2281 	}
2282 	bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
2283 }
2284 
2285 void bnx2x_iov_sp_task(struct bnx2x *bp)
2286 {
2287 	int i;
2288 
2289 	if (!IS_SRIOV(bp))
2290 		return;
2291 	/* Iterate over all VFs and invoke state transition for VFs with
2292 	 * 'in-progress' slow-path operations
2293 	 */
2294 	DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
2295 	for_each_vf(bp, i) {
2296 		struct bnx2x_virtf *vf = BP_VF(bp, i);
2297 
2298 		if (!list_empty(&vf->op_list_head) &&
2299 		    atomic_read(&vf->op_in_progress)) {
2300 			DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
2301 			bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
2302 		}
2303 	}
2304 }
2305 
2306 static inline
2307 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
2308 {
2309 	int i;
2310 	struct bnx2x_virtf *vf = NULL;
2311 
2312 	for_each_vf(bp, i) {
2313 		vf = BP_VF(bp, i);
2314 		if (stat_id >= vf->igu_base_id &&
2315 		    stat_id < vf->igu_base_id + vf_sb_count(vf))
2316 			break;
2317 	}
2318 	return vf;
2319 }
2320 
2321 /* VF API helpers */
2322 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
2323 				u8 enable)
2324 {
2325 	u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
2326 	u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
2327 
2328 	REG_WR(bp, reg, val);
2329 }
2330 
2331 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
2332 {
2333 	int i;
2334 
2335 	for_each_vfq(vf, i)
2336 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2337 				    vfq_qzone_id(vf, vfq_get(vf, i)), false);
2338 }
2339 
2340 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
2341 {
2342 	u32 val;
2343 
2344 	/* clear the VF configuration - pretend */
2345 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
2346 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
2347 	val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
2348 		 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
2349 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
2350 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2351 }
2352 
2353 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2354 {
2355 	return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2356 		     BNX2X_VF_MAX_QUEUES);
2357 }
2358 
2359 static
2360 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2361 			    struct vf_pf_resc_request *req_resc)
2362 {
2363 	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2364 	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2365 
2366 	return ((req_resc->num_rxqs <= rxq_cnt) &&
2367 		(req_resc->num_txqs <= txq_cnt) &&
2368 		(req_resc->num_sbs <= vf_sb_count(vf))   &&
2369 		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2370 		(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
2371 }
2372 
2373 /* CORE VF API */
2374 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2375 		     struct vf_pf_resc_request *resc)
2376 {
2377 	int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2378 		BNX2X_CIDS_PER_VF;
2379 
2380 	union cdu_context *base_cxt = (union cdu_context *)
2381 		BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2382 		(base_vf_cid & (ILT_PAGE_CIDS-1));
2383 	int i;
2384 
2385 	/* if state is 'acquired' the VF was not released or FLR'd, in
2386 	 * this case the returned resources match the acquired already
2387 	 * acquired resources. Verify that the requested numbers do
2388 	 * not exceed the already acquired numbers.
2389 	 */
2390 	if (vf->state == VF_ACQUIRED) {
2391 		DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2392 		   vf->abs_vfid);
2393 
2394 		if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2395 			BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2396 				  vf->abs_vfid);
2397 			return -EINVAL;
2398 		}
2399 		return 0;
2400 	}
2401 
2402 	/* Otherwise vf state must be 'free' or 'reset' */
2403 	if (vf->state != VF_FREE && vf->state != VF_RESET) {
2404 		BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2405 			  vf->abs_vfid, vf->state);
2406 		return -EINVAL;
2407 	}
2408 
2409 	/* static allocation:
2410 	 * the global maximum number are fixed per VF. fail the request if
2411 	 * requested number exceed these globals
2412 	 */
2413 	if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2414 		DP(BNX2X_MSG_IOV,
2415 		   "cannot fulfill vf resource request. Placing maximal available values in response\n");
2416 		/* set the max resource in the vf */
2417 		return -ENOMEM;
2418 	}
2419 
2420 	/* Set resources counters - 0 request means max available */
2421 	vf_sb_count(vf) = resc->num_sbs;
2422 	vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2423 	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2424 	if (resc->num_mac_filters)
2425 		vf_mac_rules_cnt(vf) = resc->num_mac_filters;
2426 	if (resc->num_vlan_filters)
2427 		vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
2428 
2429 	DP(BNX2X_MSG_IOV,
2430 	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2431 	   vf_sb_count(vf), vf_rxq_count(vf),
2432 	   vf_txq_count(vf), vf_mac_rules_cnt(vf),
2433 	   vf_vlan_rules_cnt(vf));
2434 
2435 	/* Initialize the queues */
2436 	if (!vf->vfqs) {
2437 		DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2438 		return -EINVAL;
2439 	}
2440 
2441 	for_each_vfq(vf, i) {
2442 		struct bnx2x_vf_queue *q = vfq_get(vf, i);
2443 
2444 		if (!q) {
2445 			DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
2446 			return -EINVAL;
2447 		}
2448 
2449 		q->index = i;
2450 		q->cxt = &((base_cxt + i)->eth);
2451 		q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2452 
2453 		DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2454 		   vf->abs_vfid, i, q->index, q->cid, q->cxt);
2455 
2456 		/* init SP objects */
2457 		bnx2x_vfq_init(bp, vf, q);
2458 	}
2459 	vf->state = VF_ACQUIRED;
2460 	return 0;
2461 }
2462 
2463 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2464 {
2465 	struct bnx2x_func_init_params func_init = {0};
2466 	u16 flags = 0;
2467 	int i;
2468 
2469 	/* the sb resources are initialized at this point, do the
2470 	 * FW/HW initializations
2471 	 */
2472 	for_each_vf_sb(vf, i)
2473 		bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2474 			      vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2475 
2476 	/* Sanity checks */
2477 	if (vf->state != VF_ACQUIRED) {
2478 		DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2479 		   vf->abs_vfid, vf->state);
2480 		return -EINVAL;
2481 	}
2482 	/* FLR cleanup epilogue */
2483 	if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2484 		return -EBUSY;
2485 
2486 	/* reset IGU VF statistics: MSIX */
2487 	REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2488 
2489 	/* vf init */
2490 	if (vf->cfg_flags & VF_CFG_STATS)
2491 		flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2492 
2493 	if (vf->cfg_flags & VF_CFG_TPA)
2494 		flags |= FUNC_FLG_TPA;
2495 
2496 	if (is_vf_multi(vf))
2497 		flags |= FUNC_FLG_RSS;
2498 
2499 	/* function setup */
2500 	func_init.func_flgs = flags;
2501 	func_init.pf_id = BP_FUNC(bp);
2502 	func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2503 	func_init.fw_stat_map = vf->fw_stat_map;
2504 	func_init.spq_map = vf->spq_map;
2505 	func_init.spq_prod = 0;
2506 	bnx2x_func_init(bp, &func_init);
2507 
2508 	/* Enable the vf */
2509 	bnx2x_vf_enable_access(bp, vf->abs_vfid);
2510 	bnx2x_vf_enable_traffic(bp, vf);
2511 
2512 	/* queue protection table */
2513 	for_each_vfq(vf, i)
2514 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2515 				    vfq_qzone_id(vf, vfq_get(vf, i)), true);
2516 
2517 	vf->state = VF_ENABLED;
2518 
2519 	return 0;
2520 }
2521 
2522 /* VFOP close (teardown the queues, delete mcasts and close HW) */
2523 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2524 {
2525 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2526 	struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
2527 	enum bnx2x_vfop_close_state state = vfop->state;
2528 	struct bnx2x_vfop_cmd cmd = {
2529 		.done = bnx2x_vfop_close,
2530 		.block = false,
2531 	};
2532 
2533 	if (vfop->rc < 0)
2534 		goto op_err;
2535 
2536 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2537 
2538 	switch (state) {
2539 	case BNX2X_VFOP_CLOSE_QUEUES:
2540 
2541 		if (++(qx->qid) < vf_rxq_count(vf)) {
2542 			vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
2543 			if (vfop->rc)
2544 				goto op_err;
2545 			return;
2546 		}
2547 
2548 		/* remove multicasts */
2549 		vfop->state = BNX2X_VFOP_CLOSE_HW;
2550 		vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
2551 		if (vfop->rc)
2552 			goto op_err;
2553 		return;
2554 
2555 	case BNX2X_VFOP_CLOSE_HW:
2556 
2557 		/* disable the interrupts */
2558 		DP(BNX2X_MSG_IOV, "disabling igu\n");
2559 		bnx2x_vf_igu_disable(bp, vf);
2560 
2561 		/* disable the VF */
2562 		DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2563 		bnx2x_vf_clr_qtbl(bp, vf);
2564 
2565 		goto op_done;
2566 	default:
2567 		bnx2x_vfop_default(state);
2568 	}
2569 op_err:
2570 	BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
2571 op_done:
2572 	vf->state = VF_ACQUIRED;
2573 	DP(BNX2X_MSG_IOV, "set state to acquired\n");
2574 	bnx2x_vfop_end(bp, vf, vfop);
2575 }
2576 
2577 int bnx2x_vfop_close_cmd(struct bnx2x *bp,
2578 			 struct bnx2x_virtf *vf,
2579 			 struct bnx2x_vfop_cmd *cmd)
2580 {
2581 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2582 	if (vfop) {
2583 		vfop->args.qx.qid = -1; /* loop */
2584 		bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
2585 				 bnx2x_vfop_close, cmd->done);
2586 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
2587 					     cmd->block);
2588 	}
2589 	return -ENOMEM;
2590 }
2591 
2592 /* VF release can be called either: 1. the VF was acquired but
2593  * not enabled 2. the vf was enabled or in the process of being
2594  * enabled
2595  */
2596 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2597 {
2598 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2599 	struct bnx2x_vfop_cmd cmd = {
2600 		.done = bnx2x_vfop_release,
2601 		.block = false,
2602 	};
2603 
2604 	DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2605 
2606 	if (vfop->rc < 0)
2607 		goto op_err;
2608 
2609 	DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2610 	   vf->state == VF_FREE ? "Free" :
2611 	   vf->state == VF_ACQUIRED ? "Acquired" :
2612 	   vf->state == VF_ENABLED ? "Enabled" :
2613 	   vf->state == VF_RESET ? "Reset" :
2614 	   "Unknown");
2615 
2616 	switch (vf->state) {
2617 	case VF_ENABLED:
2618 		vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
2619 		if (vfop->rc)
2620 			goto op_err;
2621 		return;
2622 
2623 	case VF_ACQUIRED:
2624 		DP(BNX2X_MSG_IOV, "about to free resources\n");
2625 		bnx2x_vf_free_resc(bp, vf);
2626 		DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2627 		goto op_done;
2628 
2629 	case VF_FREE:
2630 	case VF_RESET:
2631 		/* do nothing */
2632 		goto op_done;
2633 	default:
2634 		bnx2x_vfop_default(vf->state);
2635 	}
2636 op_err:
2637 	BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
2638 op_done:
2639 	bnx2x_vfop_end(bp, vf, vfop);
2640 }
2641 
2642 int bnx2x_vfop_release_cmd(struct bnx2x *bp,
2643 			   struct bnx2x_virtf *vf,
2644 			   struct bnx2x_vfop_cmd *cmd)
2645 {
2646 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2647 	if (vfop) {
2648 		bnx2x_vfop_opset(-1, /* use vf->state */
2649 				 bnx2x_vfop_release, cmd->done);
2650 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
2651 					     cmd->block);
2652 	}
2653 	return -ENOMEM;
2654 }
2655 
2656 /* VF release ~ VF close + VF release-resources
2657  * Release is the ultimate SW shutdown and is called whenever an
2658  * irrecoverable error is encountered.
2659  */
2660 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
2661 {
2662 	struct bnx2x_vfop_cmd cmd = {
2663 		.done = NULL,
2664 		.block = block,
2665 	};
2666 	int rc;
2667 	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2668 
2669 	rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
2670 	if (rc)
2671 		WARN(rc,
2672 		     "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2673 		     vf->abs_vfid, rc);
2674 }
2675 
2676 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
2677 			      struct bnx2x_virtf *vf, u32 *sbdf)
2678 {
2679 	*sbdf = vf->devfn | (vf->bus << 8);
2680 }
2681 
2682 static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
2683 		       struct bnx2x_vf_bar_info *bar_info)
2684 {
2685 	int n;
2686 
2687 	bar_info->nr_bars = bp->vfdb->sriov.nres;
2688 	for (n = 0; n < bar_info->nr_bars; n++)
2689 		bar_info->bars[n] = vf->bars[n];
2690 }
2691 
2692 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2693 			      enum channel_tlvs tlv)
2694 {
2695 	/* lock the channel */
2696 	mutex_lock(&vf->op_mutex);
2697 
2698 	/* record the locking op */
2699 	vf->op_current = tlv;
2700 
2701 	/* log the lock */
2702 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2703 	   vf->abs_vfid, tlv);
2704 }
2705 
2706 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2707 				enum channel_tlvs expected_tlv)
2708 {
2709 	WARN(expected_tlv != vf->op_current,
2710 	     "lock mismatch: expected %d found %d", expected_tlv,
2711 	     vf->op_current);
2712 
2713 	/* lock the channel */
2714 	mutex_unlock(&vf->op_mutex);
2715 
2716 	/* log the unlock */
2717 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2718 	   vf->abs_vfid, vf->op_current);
2719 
2720 	/* record the locking op */
2721 	vf->op_current = CHANNEL_TLV_NONE;
2722 }
2723