1 /* bnx2x_sriov.c: Broadcom Everest network driver.
2  *
3  * Copyright 2009-2013 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17  *	       Ariel Elior <ariele@broadcom.com>
18  *
19  */
20 #include "bnx2x.h"
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
23 #include "bnx2x_sp.h"
24 #include <linux/crc32.h>
25 #include <linux/if_vlan.h>
26 
27 /* General service functions */
28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
29 					 u16 pf_id)
30 {
31 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
32 		pf_id);
33 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
34 		pf_id);
35 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
36 		pf_id);
37 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
38 		pf_id);
39 }
40 
41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
42 					u8 enable)
43 {
44 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
45 		enable);
46 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
47 		enable);
48 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
49 		enable);
50 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
51 		enable);
52 }
53 
54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
55 {
56 	int idx;
57 
58 	for_each_vf(bp, idx)
59 		if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
60 			break;
61 	return idx;
62 }
63 
64 static
65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
66 {
67 	u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
68 	return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
69 }
70 
71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
72 				u8 igu_sb_id, u8 segment, u16 index, u8 op,
73 				u8 update)
74 {
75 	/* acking a VF sb through the PF - use the GRC */
76 	u32 ctl;
77 	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
78 	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
79 	u32 func_encode = vf->abs_vfid;
80 	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
81 	struct igu_regular cmd_data = {0};
82 
83 	cmd_data.sb_id_and_flags =
84 			((index << IGU_REGULAR_SB_INDEX_SHIFT) |
85 			 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
86 			 (update << IGU_REGULAR_BUPDATE_SHIFT) |
87 			 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
88 
89 	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
90 	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
91 	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
92 
93 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
94 	   cmd_data.sb_id_and_flags, igu_addr_data);
95 	REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
96 	mmiowb();
97 	barrier();
98 
99 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
100 	   ctl, igu_addr_ctl);
101 	REG_WR(bp, igu_addr_ctl, ctl);
102 	mmiowb();
103 	barrier();
104 }
105 /* VFOP - VF slow-path operation support */
106 
107 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX		0x10000
108 
109 /* VFOP operations states */
110 enum bnx2x_vfop_qctor_state {
111 	   BNX2X_VFOP_QCTOR_INIT,
112 	   BNX2X_VFOP_QCTOR_SETUP,
113 	   BNX2X_VFOP_QCTOR_INT_EN
114 };
115 
116 enum bnx2x_vfop_qdtor_state {
117 	   BNX2X_VFOP_QDTOR_HALT,
118 	   BNX2X_VFOP_QDTOR_TERMINATE,
119 	   BNX2X_VFOP_QDTOR_CFCDEL,
120 	   BNX2X_VFOP_QDTOR_DONE
121 };
122 
123 enum bnx2x_vfop_vlan_mac_state {
124 	   BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
125 	   BNX2X_VFOP_VLAN_MAC_CLEAR,
126 	   BNX2X_VFOP_VLAN_MAC_CHK_DONE,
127 	   BNX2X_VFOP_MAC_CONFIG_LIST,
128 	   BNX2X_VFOP_VLAN_CONFIG_LIST,
129 	   BNX2X_VFOP_VLAN_CONFIG_LIST_0
130 };
131 
132 enum bnx2x_vfop_qsetup_state {
133 	   BNX2X_VFOP_QSETUP_CTOR,
134 	   BNX2X_VFOP_QSETUP_VLAN0,
135 	   BNX2X_VFOP_QSETUP_DONE
136 };
137 
138 enum bnx2x_vfop_mcast_state {
139 	   BNX2X_VFOP_MCAST_DEL,
140 	   BNX2X_VFOP_MCAST_ADD,
141 	   BNX2X_VFOP_MCAST_CHK_DONE
142 };
143 enum bnx2x_vfop_qflr_state {
144 	   BNX2X_VFOP_QFLR_CLR_VLAN,
145 	   BNX2X_VFOP_QFLR_CLR_MAC,
146 	   BNX2X_VFOP_QFLR_TERMINATE,
147 	   BNX2X_VFOP_QFLR_DONE
148 };
149 
150 enum bnx2x_vfop_flr_state {
151 	   BNX2X_VFOP_FLR_QUEUES,
152 	   BNX2X_VFOP_FLR_HW
153 };
154 
155 enum bnx2x_vfop_close_state {
156 	   BNX2X_VFOP_CLOSE_QUEUES,
157 	   BNX2X_VFOP_CLOSE_HW
158 };
159 
160 enum bnx2x_vfop_rxmode_state {
161 	   BNX2X_VFOP_RXMODE_CONFIG,
162 	   BNX2X_VFOP_RXMODE_DONE
163 };
164 
165 enum bnx2x_vfop_qteardown_state {
166 	   BNX2X_VFOP_QTEARDOWN_RXMODE,
167 	   BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
168 	   BNX2X_VFOP_QTEARDOWN_CLR_MAC,
169 	   BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
170 	   BNX2X_VFOP_QTEARDOWN_QDTOR,
171 	   BNX2X_VFOP_QTEARDOWN_DONE
172 };
173 
174 enum bnx2x_vfop_rss_state {
175 	   BNX2X_VFOP_RSS_CONFIG,
176 	   BNX2X_VFOP_RSS_DONE
177 };
178 
179 enum bnx2x_vfop_tpa_state {
180 	   BNX2X_VFOP_TPA_CONFIG,
181 	   BNX2X_VFOP_TPA_DONE
182 };
183 
184 #define bnx2x_vfop_reset_wq(vf)	atomic_set(&vf->op_in_progress, 0)
185 
186 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
187 			      struct bnx2x_queue_init_params *init_params,
188 			      struct bnx2x_queue_setup_params *setup_params,
189 			      u16 q_idx, u16 sb_idx)
190 {
191 	DP(BNX2X_MSG_IOV,
192 	   "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
193 	   vf->abs_vfid,
194 	   q_idx,
195 	   sb_idx,
196 	   init_params->tx.sb_cq_index,
197 	   init_params->tx.hc_rate,
198 	   setup_params->flags,
199 	   setup_params->txq_params.traffic_type);
200 }
201 
202 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
203 			    struct bnx2x_queue_init_params *init_params,
204 			    struct bnx2x_queue_setup_params *setup_params,
205 			    u16 q_idx, u16 sb_idx)
206 {
207 	struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
208 
209 	DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
210 	   "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
211 	   vf->abs_vfid,
212 	   q_idx,
213 	   sb_idx,
214 	   init_params->rx.sb_cq_index,
215 	   init_params->rx.hc_rate,
216 	   setup_params->gen_params.mtu,
217 	   rxq_params->buf_sz,
218 	   rxq_params->sge_buf_sz,
219 	   rxq_params->max_sges_pkt,
220 	   rxq_params->tpa_agg_sz,
221 	   setup_params->flags,
222 	   rxq_params->drop_flags,
223 	   rxq_params->cache_line_log);
224 }
225 
226 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
227 			   struct bnx2x_virtf *vf,
228 			   struct bnx2x_vf_queue *q,
229 			   struct bnx2x_vfop_qctor_params *p,
230 			   unsigned long q_type)
231 {
232 	struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
233 	struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
234 
235 	/* INIT */
236 
237 	/* Enable host coalescing in the transition to INIT state */
238 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
239 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
240 
241 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
242 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
243 
244 	/* FW SB ID */
245 	init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
246 	init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
247 
248 	/* context */
249 	init_p->cxts[0] = q->cxt;
250 
251 	/* SETUP */
252 
253 	/* Setup-op general parameters */
254 	setup_p->gen_params.spcl_id = vf->sp_cl_id;
255 	setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
256 
257 	/* Setup-op pause params:
258 	 * Nothing to do, the pause thresholds are set by default to 0 which
259 	 * effectively turns off the feature for this queue. We don't want
260 	 * one queue (VF) to interfering with another queue (another VF)
261 	 */
262 	if (vf->cfg_flags & VF_CFG_FW_FC)
263 		BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
264 			  vf->abs_vfid);
265 	/* Setup-op flags:
266 	 * collect statistics, zero statistics, local-switching, security,
267 	 * OV for Flex10, RSS and MCAST for leading
268 	 */
269 	if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
270 		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
271 
272 	/* for VFs, enable tx switching, bd coherency, and mac address
273 	 * anti-spoofing
274 	 */
275 	__set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
276 	__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
277 	__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
278 
279 	/* Setup-op rx parameters */
280 	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
281 		struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
282 
283 		rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
284 		rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
285 		rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
286 
287 		if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
288 			rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
289 	}
290 
291 	/* Setup-op tx parameters */
292 	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
293 		setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
294 		setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
295 	}
296 }
297 
298 /* VFOP queue construction */
299 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
300 {
301 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
302 	struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
303 	struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
304 	enum bnx2x_vfop_qctor_state state = vfop->state;
305 
306 	bnx2x_vfop_reset_wq(vf);
307 
308 	if (vfop->rc < 0)
309 		goto op_err;
310 
311 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
312 
313 	switch (state) {
314 	case BNX2X_VFOP_QCTOR_INIT:
315 
316 		/* has this queue already been opened? */
317 		if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
318 		    BNX2X_Q_LOGICAL_STATE_ACTIVE) {
319 			DP(BNX2X_MSG_IOV,
320 			   "Entered qctor but queue was already up. Aborting gracefully\n");
321 			goto op_done;
322 		}
323 
324 		/* next state */
325 		vfop->state = BNX2X_VFOP_QCTOR_SETUP;
326 
327 		q_params->cmd = BNX2X_Q_CMD_INIT;
328 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
329 
330 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
331 
332 	case BNX2X_VFOP_QCTOR_SETUP:
333 		/* next state */
334 		vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
335 
336 		/* copy pre-prepared setup params to the queue-state params */
337 		vfop->op_p->qctor.qstate.params.setup =
338 			vfop->op_p->qctor.prep_qsetup;
339 
340 		q_params->cmd = BNX2X_Q_CMD_SETUP;
341 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
342 
343 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
344 
345 	case BNX2X_VFOP_QCTOR_INT_EN:
346 
347 		/* enable interrupts */
348 		bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
349 				    USTORM_ID, 0, IGU_INT_ENABLE, 0);
350 		goto op_done;
351 	default:
352 		bnx2x_vfop_default(state);
353 	}
354 op_err:
355 	BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
356 		  vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
357 op_done:
358 	bnx2x_vfop_end(bp, vf, vfop);
359 op_pending:
360 	return;
361 }
362 
363 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
364 				struct bnx2x_virtf *vf,
365 				struct bnx2x_vfop_cmd *cmd,
366 				int qid)
367 {
368 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
369 
370 	if (vfop) {
371 		vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
372 
373 		vfop->args.qctor.qid = qid;
374 		vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
375 
376 		bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
377 				 bnx2x_vfop_qctor, cmd->done);
378 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
379 					     cmd->block);
380 	}
381 	return -ENOMEM;
382 }
383 
384 /* VFOP queue destruction */
385 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
386 {
387 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
388 	struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
389 	struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
390 	enum bnx2x_vfop_qdtor_state state = vfop->state;
391 
392 	bnx2x_vfop_reset_wq(vf);
393 
394 	if (vfop->rc < 0)
395 		goto op_err;
396 
397 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
398 
399 	switch (state) {
400 	case BNX2X_VFOP_QDTOR_HALT:
401 
402 		/* has this queue already been stopped? */
403 		if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
404 		    BNX2X_Q_LOGICAL_STATE_STOPPED) {
405 			DP(BNX2X_MSG_IOV,
406 			   "Entered qdtor but queue was already stopped. Aborting gracefully\n");
407 
408 			/* next state */
409 			vfop->state = BNX2X_VFOP_QDTOR_DONE;
410 
411 			bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
412 		}
413 
414 		/* next state */
415 		vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
416 
417 		q_params->cmd = BNX2X_Q_CMD_HALT;
418 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
419 
420 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
421 
422 	case BNX2X_VFOP_QDTOR_TERMINATE:
423 		/* next state */
424 		vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
425 
426 		q_params->cmd = BNX2X_Q_CMD_TERMINATE;
427 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
428 
429 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
430 
431 	case BNX2X_VFOP_QDTOR_CFCDEL:
432 		/* next state */
433 		vfop->state = BNX2X_VFOP_QDTOR_DONE;
434 
435 		q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
436 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
437 
438 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
439 op_err:
440 	BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
441 		  vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
442 op_done:
443 	case BNX2X_VFOP_QDTOR_DONE:
444 		/* invalidate the context */
445 		if (qdtor->cxt) {
446 			qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
447 			qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
448 		}
449 		bnx2x_vfop_end(bp, vf, vfop);
450 		return;
451 	default:
452 		bnx2x_vfop_default(state);
453 	}
454 op_pending:
455 	return;
456 }
457 
458 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
459 				struct bnx2x_virtf *vf,
460 				struct bnx2x_vfop_cmd *cmd,
461 				int qid)
462 {
463 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
464 
465 	if (vfop) {
466 		struct bnx2x_queue_state_params *qstate =
467 			&vf->op_params.qctor.qstate;
468 
469 		memset(qstate, 0, sizeof(*qstate));
470 		qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
471 
472 		vfop->args.qdtor.qid = qid;
473 		vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
474 
475 		bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
476 				 bnx2x_vfop_qdtor, cmd->done);
477 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
478 					     cmd->block);
479 	} else {
480 		BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
481 		return -ENOMEM;
482 	}
483 }
484 
485 static void
486 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
487 {
488 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
489 	if (vf) {
490 		/* the first igu entry belonging to VFs of this PF */
491 		if (!BP_VFDB(bp)->first_vf_igu_entry)
492 			BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
493 
494 		/* the first igu entry belonging to this VF */
495 		if (!vf_sb_count(vf))
496 			vf->igu_base_id = igu_sb_id;
497 
498 		++vf_sb_count(vf);
499 		++vf->sb_count;
500 	}
501 	BP_VFDB(bp)->vf_sbs_pool++;
502 }
503 
504 /* VFOP MAC/VLAN helpers */
505 static inline void bnx2x_vfop_credit(struct bnx2x *bp,
506 				     struct bnx2x_vfop *vfop,
507 				     struct bnx2x_vlan_mac_obj *obj)
508 {
509 	struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
510 
511 	/* update credit only if there is no error
512 	 * and a valid credit counter
513 	 */
514 	if (!vfop->rc && args->credit) {
515 		struct list_head *pos;
516 		int read_lock;
517 		int cnt = 0;
518 
519 		read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
520 		if (read_lock)
521 			DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
522 
523 		list_for_each(pos, &obj->head)
524 			cnt++;
525 
526 		if (!read_lock)
527 			bnx2x_vlan_mac_h_read_unlock(bp, obj);
528 
529 		atomic_set(args->credit, cnt);
530 	}
531 }
532 
533 static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
534 				    struct bnx2x_vfop_filter *pos,
535 				    struct bnx2x_vlan_mac_data *user_req)
536 {
537 	user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
538 		BNX2X_VLAN_MAC_DEL;
539 
540 	switch (pos->type) {
541 	case BNX2X_VFOP_FILTER_MAC:
542 		memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
543 		break;
544 	case BNX2X_VFOP_FILTER_VLAN:
545 		user_req->u.vlan.vlan = pos->vid;
546 		break;
547 	default:
548 		BNX2X_ERR("Invalid filter type, skipping\n");
549 		return 1;
550 	}
551 	return 0;
552 }
553 
554 static int bnx2x_vfop_config_list(struct bnx2x *bp,
555 				  struct bnx2x_vfop_filters *filters,
556 				  struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
557 {
558 	struct bnx2x_vfop_filter *pos, *tmp;
559 	struct list_head rollback_list, *filters_list = &filters->head;
560 	struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
561 	int rc = 0, cnt = 0;
562 
563 	INIT_LIST_HEAD(&rollback_list);
564 
565 	list_for_each_entry_safe(pos, tmp, filters_list, link) {
566 		if (bnx2x_vfop_set_user_req(bp, pos, user_req))
567 			continue;
568 
569 		rc = bnx2x_config_vlan_mac(bp, vlan_mac);
570 		if (rc >= 0) {
571 			cnt += pos->add ? 1 : -1;
572 			list_move(&pos->link, &rollback_list);
573 			rc = 0;
574 		} else if (rc == -EEXIST) {
575 			rc = 0;
576 		} else {
577 			BNX2X_ERR("Failed to add a new vlan_mac command\n");
578 			break;
579 		}
580 	}
581 
582 	/* rollback if error or too many rules added */
583 	if (rc || cnt > filters->add_cnt) {
584 		BNX2X_ERR("error or too many rules added. Performing rollback\n");
585 		list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
586 			pos->add = !pos->add;	/* reverse op */
587 			bnx2x_vfop_set_user_req(bp, pos, user_req);
588 			bnx2x_config_vlan_mac(bp, vlan_mac);
589 			list_del(&pos->link);
590 		}
591 		cnt = 0;
592 		if (!rc)
593 			rc = -EINVAL;
594 	}
595 	filters->add_cnt = cnt;
596 	return rc;
597 }
598 
599 /* VFOP set VLAN/MAC */
600 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
601 {
602 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
603 	struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
604 	struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
605 	struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
606 
607 	enum bnx2x_vfop_vlan_mac_state state = vfop->state;
608 
609 	if (vfop->rc < 0)
610 		goto op_err;
611 
612 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
613 
614 	bnx2x_vfop_reset_wq(vf);
615 
616 	switch (state) {
617 	case BNX2X_VFOP_VLAN_MAC_CLEAR:
618 		/* next state */
619 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
620 
621 		/* do delete */
622 		vfop->rc = obj->delete_all(bp, obj,
623 					   &vlan_mac->user_req.vlan_mac_flags,
624 					   &vlan_mac->ramrod_flags);
625 
626 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
627 
628 	case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
629 		/* next state */
630 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
631 
632 		/* do config */
633 		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
634 		if (vfop->rc == -EEXIST)
635 			vfop->rc = 0;
636 
637 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
638 
639 	case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
640 		vfop->rc = !!obj->raw.check_pending(&obj->raw);
641 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
642 
643 	case BNX2X_VFOP_MAC_CONFIG_LIST:
644 		/* next state */
645 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
646 
647 		/* do list config */
648 		vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
649 		if (vfop->rc)
650 			goto op_err;
651 
652 		set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
653 		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
654 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
655 
656 	case BNX2X_VFOP_VLAN_CONFIG_LIST:
657 		/* next state */
658 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
659 
660 		/* do list config */
661 		vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
662 		if (!vfop->rc) {
663 			set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
664 			vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
665 		}
666 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
667 
668 	default:
669 		bnx2x_vfop_default(state);
670 	}
671 op_err:
672 	BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
673 op_done:
674 	kfree(filters);
675 	bnx2x_vfop_credit(bp, vfop, obj);
676 	bnx2x_vfop_end(bp, vf, vfop);
677 op_pending:
678 	return;
679 }
680 
681 struct bnx2x_vfop_vlan_mac_flags {
682 	bool drv_only;
683 	bool dont_consume;
684 	bool single_cmd;
685 	bool add;
686 };
687 
688 static void
689 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
690 				struct bnx2x_vfop_vlan_mac_flags *flags)
691 {
692 	struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
693 
694 	memset(ramrod, 0, sizeof(*ramrod));
695 
696 	/* ramrod flags */
697 	if (flags->drv_only)
698 		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
699 	if (flags->single_cmd)
700 		set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
701 
702 	/* mac_vlan flags */
703 	if (flags->dont_consume)
704 		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
705 
706 	/* cmd */
707 	ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
708 }
709 
710 static inline void
711 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
712 			   struct bnx2x_vfop_vlan_mac_flags *flags)
713 {
714 	bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
715 	set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
716 }
717 
718 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
719 				     struct bnx2x_virtf *vf,
720 				     struct bnx2x_vfop_cmd *cmd,
721 				     int qid, bool drv_only)
722 {
723 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
724 	int rc;
725 
726 	if (vfop) {
727 		struct bnx2x_vfop_args_filters filters = {
728 			.multi_filter = NULL,	/* single */
729 			.credit = NULL,		/* consume credit */
730 		};
731 		struct bnx2x_vfop_vlan_mac_flags flags = {
732 			.drv_only = drv_only,
733 			.dont_consume = (filters.credit != NULL),
734 			.single_cmd = true,
735 			.add = false /* don't care */,
736 		};
737 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
738 			&vf->op_params.vlan_mac;
739 
740 		/* set ramrod params */
741 		bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
742 
743 		/* set object */
744 		rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
745 		if (rc)
746 			return rc;
747 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
748 
749 		/* set extra args */
750 		vfop->args.filters = filters;
751 
752 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
753 				 bnx2x_vfop_vlan_mac, cmd->done);
754 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
755 					     cmd->block);
756 	}
757 	return -ENOMEM;
758 }
759 
760 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
761 			    struct bnx2x_virtf *vf,
762 			    struct bnx2x_vfop_cmd *cmd,
763 			    struct bnx2x_vfop_filters *macs,
764 			    int qid, bool drv_only)
765 {
766 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
767 	int rc;
768 
769 	if (vfop) {
770 		struct bnx2x_vfop_args_filters filters = {
771 			.multi_filter = macs,
772 			.credit = NULL,		/* consume credit */
773 		};
774 		struct bnx2x_vfop_vlan_mac_flags flags = {
775 			.drv_only = drv_only,
776 			.dont_consume = (filters.credit != NULL),
777 			.single_cmd = false,
778 			.add = false, /* don't care since only the items in the
779 				       * filters list affect the sp operation,
780 				       * not the list itself
781 				       */
782 		};
783 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
784 			&vf->op_params.vlan_mac;
785 
786 		/* set ramrod params */
787 		bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
788 
789 		/* set object */
790 		rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
791 		if (rc)
792 			return rc;
793 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
794 
795 		/* set extra args */
796 		filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
797 		vfop->args.filters = filters;
798 
799 		bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
800 				 bnx2x_vfop_vlan_mac, cmd->done);
801 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
802 					     cmd->block);
803 	}
804 	return -ENOMEM;
805 }
806 
807 static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
808 				   struct bnx2x_virtf *vf,
809 				   struct bnx2x_vfop_cmd *cmd,
810 				   int qid, u16 vid, bool add)
811 {
812 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
813 	int rc;
814 
815 	if (vfop) {
816 		struct bnx2x_vfop_args_filters filters = {
817 			.multi_filter = NULL, /* single command */
818 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
819 		};
820 		struct bnx2x_vfop_vlan_mac_flags flags = {
821 			.drv_only = false,
822 			.dont_consume = (filters.credit != NULL),
823 			.single_cmd = true,
824 			.add = add,
825 		};
826 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
827 			&vf->op_params.vlan_mac;
828 
829 		/* set ramrod params */
830 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
831 		ramrod->user_req.u.vlan.vlan = vid;
832 
833 		/* set object */
834 		rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
835 		if (rc)
836 			return rc;
837 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
838 
839 		/* set extra args */
840 		vfop->args.filters = filters;
841 
842 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
843 				 bnx2x_vfop_vlan_mac, cmd->done);
844 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
845 					     cmd->block);
846 	}
847 	return -ENOMEM;
848 }
849 
850 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
851 			       struct bnx2x_virtf *vf,
852 			       struct bnx2x_vfop_cmd *cmd,
853 			       int qid, bool drv_only)
854 {
855 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
856 	int rc;
857 
858 	if (vfop) {
859 		struct bnx2x_vfop_args_filters filters = {
860 			.multi_filter = NULL, /* single command */
861 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
862 		};
863 		struct bnx2x_vfop_vlan_mac_flags flags = {
864 			.drv_only = drv_only,
865 			.dont_consume = (filters.credit != NULL),
866 			.single_cmd = true,
867 			.add = false, /* don't care */
868 		};
869 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
870 			&vf->op_params.vlan_mac;
871 
872 		/* set ramrod params */
873 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
874 
875 		/* set object */
876 		rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
877 		if (rc)
878 			return rc;
879 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
880 
881 		/* set extra args */
882 		vfop->args.filters = filters;
883 
884 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
885 				 bnx2x_vfop_vlan_mac, cmd->done);
886 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
887 					     cmd->block);
888 	}
889 	return -ENOMEM;
890 }
891 
892 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
893 			     struct bnx2x_virtf *vf,
894 			     struct bnx2x_vfop_cmd *cmd,
895 			     struct bnx2x_vfop_filters *vlans,
896 			     int qid, bool drv_only)
897 {
898 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
899 	int rc;
900 
901 	if (vfop) {
902 		struct bnx2x_vfop_args_filters filters = {
903 			.multi_filter = vlans,
904 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
905 		};
906 		struct bnx2x_vfop_vlan_mac_flags flags = {
907 			.drv_only = drv_only,
908 			.dont_consume = (filters.credit != NULL),
909 			.single_cmd = false,
910 			.add = false, /* don't care */
911 		};
912 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
913 			&vf->op_params.vlan_mac;
914 
915 		/* set ramrod params */
916 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
917 
918 		/* set object */
919 		rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
920 		if (rc)
921 			return rc;
922 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
923 
924 		/* set extra args */
925 		filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
926 			atomic_read(filters.credit);
927 
928 		vfop->args.filters = filters;
929 
930 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
931 				 bnx2x_vfop_vlan_mac, cmd->done);
932 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
933 					     cmd->block);
934 	}
935 	return -ENOMEM;
936 }
937 
938 /* VFOP queue setup (queue constructor + set vlan 0) */
939 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
940 {
941 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
942 	int qid = vfop->args.qctor.qid;
943 	enum bnx2x_vfop_qsetup_state state = vfop->state;
944 	struct bnx2x_vfop_cmd cmd = {
945 		.done = bnx2x_vfop_qsetup,
946 		.block = false,
947 	};
948 
949 	if (vfop->rc < 0)
950 		goto op_err;
951 
952 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
953 
954 	switch (state) {
955 	case BNX2X_VFOP_QSETUP_CTOR:
956 		/* init the queue ctor command */
957 		vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
958 		vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
959 		if (vfop->rc)
960 			goto op_err;
961 		return;
962 
963 	case BNX2X_VFOP_QSETUP_VLAN0:
964 		/* skip if non-leading or FPGA/EMU*/
965 		if (qid)
966 			goto op_done;
967 
968 		/* init the queue set-vlan command (for vlan 0) */
969 		vfop->state = BNX2X_VFOP_QSETUP_DONE;
970 		vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
971 		if (vfop->rc)
972 			goto op_err;
973 		return;
974 op_err:
975 	BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
976 op_done:
977 	case BNX2X_VFOP_QSETUP_DONE:
978 		vf->cfg_flags |= VF_CFG_VLAN;
979 		smp_mb__before_clear_bit();
980 		set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
981 			&bp->sp_rtnl_state);
982 		smp_mb__after_clear_bit();
983 		schedule_delayed_work(&bp->sp_rtnl_task, 0);
984 		bnx2x_vfop_end(bp, vf, vfop);
985 		return;
986 	default:
987 		bnx2x_vfop_default(state);
988 	}
989 }
990 
991 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
992 			  struct bnx2x_virtf *vf,
993 			  struct bnx2x_vfop_cmd *cmd,
994 			  int qid)
995 {
996 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
997 
998 	if (vfop) {
999 		vfop->args.qctor.qid = qid;
1000 
1001 		bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
1002 				 bnx2x_vfop_qsetup, cmd->done);
1003 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
1004 					     cmd->block);
1005 	}
1006 	return -ENOMEM;
1007 }
1008 
1009 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
1010 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1011 {
1012 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1013 	int qid = vfop->args.qx.qid;
1014 	enum bnx2x_vfop_qflr_state state = vfop->state;
1015 	struct bnx2x_queue_state_params *qstate;
1016 	struct bnx2x_vfop_cmd cmd;
1017 
1018 	bnx2x_vfop_reset_wq(vf);
1019 
1020 	if (vfop->rc < 0)
1021 		goto op_err;
1022 
1023 	DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
1024 
1025 	cmd.done = bnx2x_vfop_qflr;
1026 	cmd.block = false;
1027 
1028 	switch (state) {
1029 	case BNX2X_VFOP_QFLR_CLR_VLAN:
1030 		/* vlan-clear-all: driver-only, don't consume credit */
1031 		vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
1032 
1033 		if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) {
1034 			/* the vlan_mac vfop will re-schedule us */
1035 			vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd,
1036 							      qid, true);
1037 			if (vfop->rc)
1038 				goto op_err;
1039 			return;
1040 
1041 		} else {
1042 			/* need to reschedule ourselves */
1043 			bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1044 		}
1045 
1046 	case BNX2X_VFOP_QFLR_CLR_MAC:
1047 		/* mac-clear-all: driver only consume credit */
1048 		vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
1049 		if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) {
1050 			/* the vlan_mac vfop will re-schedule us */
1051 			vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd,
1052 							     qid, true);
1053 			if (vfop->rc)
1054 				goto op_err;
1055 			return;
1056 
1057 		} else {
1058 			/* need to reschedule ourselves */
1059 			bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1060 		}
1061 
1062 	case BNX2X_VFOP_QFLR_TERMINATE:
1063 		qstate = &vfop->op_p->qctor.qstate;
1064 		memset(qstate , 0, sizeof(*qstate));
1065 		qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
1066 		vfop->state = BNX2X_VFOP_QFLR_DONE;
1067 
1068 		DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
1069 		   vf->abs_vfid, qstate->q_obj->state);
1070 
1071 		if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
1072 			qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
1073 			qstate->cmd = BNX2X_Q_CMD_TERMINATE;
1074 			vfop->rc = bnx2x_queue_state_change(bp, qstate);
1075 			bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
1076 		} else {
1077 			goto op_done;
1078 		}
1079 
1080 op_err:
1081 	BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
1082 		  vf->abs_vfid, qid, vfop->rc);
1083 op_done:
1084 	case BNX2X_VFOP_QFLR_DONE:
1085 		bnx2x_vfop_end(bp, vf, vfop);
1086 		return;
1087 	default:
1088 		bnx2x_vfop_default(state);
1089 	}
1090 op_pending:
1091 	return;
1092 }
1093 
1094 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
1095 			       struct bnx2x_virtf *vf,
1096 			       struct bnx2x_vfop_cmd *cmd,
1097 			       int qid)
1098 {
1099 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1100 
1101 	if (vfop) {
1102 		vfop->args.qx.qid = qid;
1103 		bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
1104 				 bnx2x_vfop_qflr, cmd->done);
1105 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
1106 					     cmd->block);
1107 	}
1108 	return -ENOMEM;
1109 }
1110 
1111 /* VFOP multi-casts */
1112 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
1113 {
1114 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1115 	struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
1116 	struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
1117 	struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
1118 	enum bnx2x_vfop_mcast_state state = vfop->state;
1119 	int i;
1120 
1121 	bnx2x_vfop_reset_wq(vf);
1122 
1123 	if (vfop->rc < 0)
1124 		goto op_err;
1125 
1126 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1127 
1128 	switch (state) {
1129 	case BNX2X_VFOP_MCAST_DEL:
1130 		/* clear existing mcasts */
1131 		vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
1132 					     : BNX2X_VFOP_MCAST_CHK_DONE;
1133 		mcast->mcast_list_len = vf->mcast_list_len;
1134 		vf->mcast_list_len = args->mc_num;
1135 		vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
1136 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1137 
1138 	case BNX2X_VFOP_MCAST_ADD:
1139 		if (raw->check_pending(raw))
1140 			goto op_pending;
1141 
1142 		/* update mcast list on the ramrod params */
1143 		INIT_LIST_HEAD(&mcast->mcast_list);
1144 		for (i = 0; i < args->mc_num; i++)
1145 			list_add_tail(&(args->mc[i].link),
1146 				      &mcast->mcast_list);
1147 		mcast->mcast_list_len = args->mc_num;
1148 
1149 		/* add new mcasts */
1150 		vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
1151 		vfop->rc = bnx2x_config_mcast(bp, mcast,
1152 					      BNX2X_MCAST_CMD_ADD);
1153 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1154 
1155 	case BNX2X_VFOP_MCAST_CHK_DONE:
1156 		vfop->rc = raw->check_pending(raw) ? 1 : 0;
1157 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1158 	default:
1159 		bnx2x_vfop_default(state);
1160 	}
1161 op_err:
1162 	BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
1163 op_done:
1164 	kfree(args->mc);
1165 	bnx2x_vfop_end(bp, vf, vfop);
1166 op_pending:
1167 	return;
1168 }
1169 
1170 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
1171 			 struct bnx2x_virtf *vf,
1172 			 struct bnx2x_vfop_cmd *cmd,
1173 			 bnx2x_mac_addr_t *mcasts,
1174 			 int mcast_num, bool drv_only)
1175 {
1176 	struct bnx2x_vfop *vfop = NULL;
1177 	size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
1178 	struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
1179 					   NULL;
1180 
1181 	if (!mc_sz || mc) {
1182 		vfop = bnx2x_vfop_add(bp, vf);
1183 		if (vfop) {
1184 			int i;
1185 			struct bnx2x_mcast_ramrod_params *ramrod =
1186 				&vf->op_params.mcast;
1187 
1188 			/* set ramrod params */
1189 			memset(ramrod, 0, sizeof(*ramrod));
1190 			ramrod->mcast_obj = &vf->mcast_obj;
1191 			if (drv_only)
1192 				set_bit(RAMROD_DRV_CLR_ONLY,
1193 					&ramrod->ramrod_flags);
1194 
1195 			/* copy mcasts pointers */
1196 			vfop->args.mc_list.mc_num = mcast_num;
1197 			vfop->args.mc_list.mc = mc;
1198 			for (i = 0; i < mcast_num; i++)
1199 				mc[i].mac = mcasts[i];
1200 
1201 			bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
1202 					 bnx2x_vfop_mcast, cmd->done);
1203 			return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
1204 						     cmd->block);
1205 		} else {
1206 			kfree(mc);
1207 		}
1208 	}
1209 	return -ENOMEM;
1210 }
1211 
1212 /* VFOP rx-mode */
1213 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1214 {
1215 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1216 	struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
1217 	enum bnx2x_vfop_rxmode_state state = vfop->state;
1218 
1219 	bnx2x_vfop_reset_wq(vf);
1220 
1221 	if (vfop->rc < 0)
1222 		goto op_err;
1223 
1224 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1225 
1226 	switch (state) {
1227 	case BNX2X_VFOP_RXMODE_CONFIG:
1228 		/* next state */
1229 		vfop->state = BNX2X_VFOP_RXMODE_DONE;
1230 
1231 		/* record the accept flags in vfdb so hypervisor can modify them
1232 		 * if necessary
1233 		 */
1234 		bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
1235 			ramrod->rx_accept_flags;
1236 		vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1237 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1238 op_err:
1239 		BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
1240 op_done:
1241 	case BNX2X_VFOP_RXMODE_DONE:
1242 		bnx2x_vfop_end(bp, vf, vfop);
1243 		return;
1244 	default:
1245 		bnx2x_vfop_default(state);
1246 	}
1247 op_pending:
1248 	return;
1249 }
1250 
1251 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
1252 				  struct bnx2x_rx_mode_ramrod_params *ramrod,
1253 				  struct bnx2x_virtf *vf,
1254 				  unsigned long accept_flags)
1255 {
1256 	struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1257 
1258 	memset(ramrod, 0, sizeof(*ramrod));
1259 	ramrod->cid = vfq->cid;
1260 	ramrod->cl_id = vfq_cl_id(vf, vfq);
1261 	ramrod->rx_mode_obj = &bp->rx_mode_obj;
1262 	ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1263 	ramrod->rx_accept_flags = accept_flags;
1264 	ramrod->tx_accept_flags = accept_flags;
1265 	ramrod->pstate = &vf->filter_state;
1266 	ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1267 
1268 	set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1269 	set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1270 	set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1271 
1272 	ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1273 	ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1274 }
1275 
1276 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1277 			  struct bnx2x_virtf *vf,
1278 			  struct bnx2x_vfop_cmd *cmd,
1279 			  int qid, unsigned long accept_flags)
1280 {
1281 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1282 
1283 	if (vfop) {
1284 		struct bnx2x_rx_mode_ramrod_params *ramrod =
1285 			&vf->op_params.rx_mode;
1286 
1287 		bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
1288 
1289 		bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1290 				 bnx2x_vfop_rxmode, cmd->done);
1291 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
1292 					     cmd->block);
1293 	}
1294 	return -ENOMEM;
1295 }
1296 
1297 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
1298  * queue destructor)
1299  */
1300 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
1301 {
1302 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1303 	int qid = vfop->args.qx.qid;
1304 	enum bnx2x_vfop_qteardown_state state = vfop->state;
1305 	struct bnx2x_vfop_cmd cmd;
1306 
1307 	if (vfop->rc < 0)
1308 		goto op_err;
1309 
1310 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1311 
1312 	cmd.done = bnx2x_vfop_qdown;
1313 	cmd.block = false;
1314 
1315 	switch (state) {
1316 	case BNX2X_VFOP_QTEARDOWN_RXMODE:
1317 		/* Drop all */
1318 		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
1319 		vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
1320 		if (vfop->rc)
1321 			goto op_err;
1322 		return;
1323 
1324 	case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
1325 		/* vlan-clear-all: don't consume credit */
1326 		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
1327 		vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
1328 		if (vfop->rc)
1329 			goto op_err;
1330 		return;
1331 
1332 	case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
1333 		/* mac-clear-all: consume credit */
1334 		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
1335 		vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
1336 		if (vfop->rc)
1337 			goto op_err;
1338 		return;
1339 
1340 	case BNX2X_VFOP_QTEARDOWN_CLR_MCAST:
1341 		vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1342 		vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
1343 		if (vfop->rc)
1344 			goto op_err;
1345 		return;
1346 
1347 	case BNX2X_VFOP_QTEARDOWN_QDTOR:
1348 		/* run the queue destruction flow */
1349 		DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
1350 		vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
1351 		DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
1352 		vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
1353 		DP(BNX2X_MSG_IOV, "returned from cmd\n");
1354 		if (vfop->rc)
1355 			goto op_err;
1356 		return;
1357 op_err:
1358 	BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
1359 		  vf->abs_vfid, qid, vfop->rc);
1360 
1361 	case BNX2X_VFOP_QTEARDOWN_DONE:
1362 		bnx2x_vfop_end(bp, vf, vfop);
1363 		return;
1364 	default:
1365 		bnx2x_vfop_default(state);
1366 	}
1367 }
1368 
1369 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1370 			 struct bnx2x_virtf *vf,
1371 			 struct bnx2x_vfop_cmd *cmd,
1372 			 int qid)
1373 {
1374 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1375 
1376 	/* for non leading queues skip directly to qdown sate */
1377 	if (vfop) {
1378 		vfop->args.qx.qid = qid;
1379 		bnx2x_vfop_opset(qid == LEADING_IDX ?
1380 				 BNX2X_VFOP_QTEARDOWN_RXMODE :
1381 				 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
1382 				 cmd->done);
1383 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1384 					     cmd->block);
1385 	}
1386 
1387 	return -ENOMEM;
1388 }
1389 
1390 /* VF enable primitives
1391  * when pretend is required the caller is responsible
1392  * for calling pretend prior to calling these routines
1393  */
1394 
1395 /* internal vf enable - until vf is enabled internally all transactions
1396  * are blocked. This routine should always be called last with pretend.
1397  */
1398 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
1399 {
1400 	REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
1401 }
1402 
1403 /* clears vf error in all semi blocks */
1404 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
1405 {
1406 	REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
1407 	REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
1408 	REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
1409 	REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
1410 }
1411 
1412 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
1413 {
1414 	u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
1415 	u32 was_err_reg = 0;
1416 
1417 	switch (was_err_group) {
1418 	case 0:
1419 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
1420 	    break;
1421 	case 1:
1422 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
1423 	    break;
1424 	case 2:
1425 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
1426 	    break;
1427 	case 3:
1428 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
1429 	    break;
1430 	}
1431 	REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
1432 }
1433 
1434 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
1435 {
1436 	int i;
1437 	u32 val;
1438 
1439 	/* Set VF masks and configuration - pretend */
1440 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1441 
1442 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
1443 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
1444 	REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
1445 	REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
1446 	REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
1447 	REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
1448 
1449 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1450 	val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
1451 	if (vf->cfg_flags & VF_CFG_INT_SIMD)
1452 		val |= IGU_VF_CONF_SINGLE_ISR_EN;
1453 	val &= ~IGU_VF_CONF_PARENT_MASK;
1454 	val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
1455 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1456 
1457 	DP(BNX2X_MSG_IOV,
1458 	   "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
1459 	   vf->abs_vfid, val);
1460 
1461 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1462 
1463 	/* iterate over all queues, clear sb consumer */
1464 	for (i = 0; i < vf_sb_count(vf); i++) {
1465 		u8 igu_sb_id = vf_igu_sb(vf, i);
1466 
1467 		/* zero prod memory */
1468 		REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
1469 
1470 		/* clear sb state machine */
1471 		bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
1472 				       false /* VF */);
1473 
1474 		/* disable + update */
1475 		bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
1476 				    IGU_INT_DISABLE, 1);
1477 	}
1478 }
1479 
1480 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
1481 {
1482 	/* set the VF-PF association in the FW */
1483 	storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
1484 	storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
1485 
1486 	/* clear vf errors*/
1487 	bnx2x_vf_semi_clear_err(bp, abs_vfid);
1488 	bnx2x_vf_pglue_clear_err(bp, abs_vfid);
1489 
1490 	/* internal vf-enable - pretend */
1491 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
1492 	DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
1493 	bnx2x_vf_enable_internal(bp, true);
1494 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1495 }
1496 
1497 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
1498 {
1499 	/* Reset vf in IGU  interrupts are still disabled */
1500 	bnx2x_vf_igu_reset(bp, vf);
1501 
1502 	/* pretend to enable the vf with the PBF */
1503 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1504 	REG_WR(bp, PBF_REG_DISABLE_VF, 0);
1505 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1506 }
1507 
1508 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
1509 {
1510 	struct pci_dev *dev;
1511 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1512 
1513 	if (!vf)
1514 		return false;
1515 
1516 	dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
1517 	if (dev)
1518 		return bnx2x_is_pcie_pending(dev);
1519 	return false;
1520 }
1521 
1522 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1523 {
1524 	/* Verify no pending pci transactions */
1525 	if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
1526 		BNX2X_ERR("PCIE Transactions still pending\n");
1527 
1528 	return 0;
1529 }
1530 
1531 /* must be called after the number of PF queues and the number of VFs are
1532  * both known
1533  */
1534 static void
1535 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1536 {
1537 	struct vf_pf_resc_request *resc = &vf->alloc_resc;
1538 	u16 vlan_count = 0;
1539 
1540 	/* will be set only during VF-ACQUIRE */
1541 	resc->num_rxqs = 0;
1542 	resc->num_txqs = 0;
1543 
1544 	/* no credit calculations for macs (just yet) */
1545 	resc->num_mac_filters = 1;
1546 
1547 	/* divvy up vlan rules */
1548 	vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
1549 	vlan_count = 1 << ilog2(vlan_count);
1550 	resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
1551 
1552 	/* no real limitation */
1553 	resc->num_mc_filters = 0;
1554 
1555 	/* num_sbs already set */
1556 	resc->num_sbs = vf->sb_count;
1557 }
1558 
1559 /* FLR routines: */
1560 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1561 {
1562 	/* reset the state variables */
1563 	bnx2x_iov_static_resc(bp, vf);
1564 	vf->state = VF_FREE;
1565 }
1566 
1567 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
1568 {
1569 	u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1570 
1571 	/* DQ usage counter */
1572 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1573 	bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
1574 					"DQ VF usage counter timed out",
1575 					poll_cnt);
1576 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1577 
1578 	/* FW cleanup command - poll for the results */
1579 	if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
1580 				   poll_cnt))
1581 		BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
1582 
1583 	/* verify TX hw is flushed */
1584 	bnx2x_tx_hw_flushed(bp, poll_cnt);
1585 }
1586 
1587 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1588 {
1589 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1590 	struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
1591 	enum bnx2x_vfop_flr_state state = vfop->state;
1592 	struct bnx2x_vfop_cmd cmd = {
1593 		.done = bnx2x_vfop_flr,
1594 		.block = false,
1595 	};
1596 
1597 	if (vfop->rc < 0)
1598 		goto op_err;
1599 
1600 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1601 
1602 	switch (state) {
1603 	case BNX2X_VFOP_FLR_QUEUES:
1604 		/* the cleanup operations are valid if and only if the VF
1605 		 * was first acquired.
1606 		 */
1607 		if (++(qx->qid) < vf_rxq_count(vf)) {
1608 			vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
1609 						       qx->qid);
1610 			if (vfop->rc)
1611 				goto op_err;
1612 			return;
1613 		}
1614 		/* remove multicasts */
1615 		vfop->state = BNX2X_VFOP_FLR_HW;
1616 		vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
1617 						0, true);
1618 		if (vfop->rc)
1619 			goto op_err;
1620 		return;
1621 	case BNX2X_VFOP_FLR_HW:
1622 
1623 		/* dispatch final cleanup and wait for HW queues to flush */
1624 		bnx2x_vf_flr_clnup_hw(bp, vf);
1625 
1626 		/* release VF resources */
1627 		bnx2x_vf_free_resc(bp, vf);
1628 
1629 		/* re-open the mailbox */
1630 		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1631 
1632 		goto op_done;
1633 	default:
1634 		bnx2x_vfop_default(state);
1635 	}
1636 op_err:
1637 	BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
1638 op_done:
1639 	vf->flr_clnup_stage = VF_FLR_ACK;
1640 	bnx2x_vfop_end(bp, vf, vfop);
1641 	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1642 }
1643 
1644 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
1645 			      struct bnx2x_virtf *vf,
1646 			      vfop_handler_t done)
1647 {
1648 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1649 	if (vfop) {
1650 		vfop->args.qx.qid = -1; /* loop */
1651 		bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
1652 				 bnx2x_vfop_flr, done);
1653 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
1654 	}
1655 	return -ENOMEM;
1656 }
1657 
1658 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
1659 {
1660 	int i = prev_vf ? prev_vf->index + 1 : 0;
1661 	struct bnx2x_virtf *vf;
1662 
1663 	/* find next VF to cleanup */
1664 next_vf_to_clean:
1665 	for (;
1666 	     i < BNX2X_NR_VIRTFN(bp) &&
1667 	     (bnx2x_vf(bp, i, state) != VF_RESET ||
1668 	      bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
1669 	     i++)
1670 		;
1671 
1672 	DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
1673 	   BNX2X_NR_VIRTFN(bp));
1674 
1675 	if (i < BNX2X_NR_VIRTFN(bp)) {
1676 		vf = BP_VF(bp, i);
1677 
1678 		/* lock the vf pf channel */
1679 		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1680 
1681 		/* invoke the VF FLR SM */
1682 		if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
1683 			BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
1684 				  vf->abs_vfid);
1685 
1686 			/* mark the VF to be ACKED and continue */
1687 			vf->flr_clnup_stage = VF_FLR_ACK;
1688 			goto next_vf_to_clean;
1689 		}
1690 		return;
1691 	}
1692 
1693 	/* we are done, update vf records */
1694 	for_each_vf(bp, i) {
1695 		vf = BP_VF(bp, i);
1696 
1697 		if (vf->flr_clnup_stage != VF_FLR_ACK)
1698 			continue;
1699 
1700 		vf->flr_clnup_stage = VF_FLR_EPILOG;
1701 	}
1702 
1703 	/* Acknowledge the handled VFs.
1704 	 * we are acknowledge all the vfs which an flr was requested for, even
1705 	 * if amongst them there are such that we never opened, since the mcp
1706 	 * will interrupt us immediately again if we only ack some of the bits,
1707 	 * resulting in an endless loop. This can happen for example in KVM
1708 	 * where an 'all ones' flr request is sometimes given by hyper visor
1709 	 */
1710 	DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
1711 	   bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1712 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
1713 		SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
1714 			  bp->vfdb->flrd_vfs[i]);
1715 
1716 	bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
1717 
1718 	/* clear the acked bits - better yet if the MCP implemented
1719 	 * write to clear semantics
1720 	 */
1721 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
1722 		SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
1723 }
1724 
1725 void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1726 {
1727 	int i;
1728 
1729 	/* Read FLR'd VFs */
1730 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
1731 		bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
1732 
1733 	DP(BNX2X_MSG_MCP,
1734 	   "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
1735 	   bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1736 
1737 	for_each_vf(bp, i) {
1738 		struct bnx2x_virtf *vf = BP_VF(bp, i);
1739 		u32 reset = 0;
1740 
1741 		if (vf->abs_vfid < 32)
1742 			reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
1743 		else
1744 			reset = bp->vfdb->flrd_vfs[1] &
1745 				(1 << (vf->abs_vfid - 32));
1746 
1747 		if (reset) {
1748 			/* set as reset and ready for cleanup */
1749 			vf->state = VF_RESET;
1750 			vf->flr_clnup_stage = VF_FLR_CLN;
1751 
1752 			DP(BNX2X_MSG_IOV,
1753 			   "Initiating Final cleanup for VF %d\n",
1754 			   vf->abs_vfid);
1755 		}
1756 	}
1757 
1758 	/* do the FLR cleanup for all marked VFs*/
1759 	bnx2x_vf_flr_clnup(bp, NULL);
1760 }
1761 
1762 /* IOV global initialization routines  */
1763 void bnx2x_iov_init_dq(struct bnx2x *bp)
1764 {
1765 	if (!IS_SRIOV(bp))
1766 		return;
1767 
1768 	/* Set the DQ such that the CID reflect the abs_vfid */
1769 	REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1770 	REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1771 
1772 	/* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1773 	 * the PF L2 queues
1774 	 */
1775 	REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1776 
1777 	/* The VF window size is the log2 of the max number of CIDs per VF */
1778 	REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1779 
1780 	/* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
1781 	 * the Pf doorbell size although the 2 are independent.
1782 	 */
1783 	REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1784 
1785 	/* No security checks for now -
1786 	 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1787 	 * CID range 0 - 0x1ffff
1788 	 */
1789 	REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1790 	REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1791 	REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1792 	REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1793 
1794 	/* set the VF doorbell threshold */
1795 	REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
1796 }
1797 
1798 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1799 {
1800 	if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1801 		REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1802 }
1803 
1804 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1805 {
1806 	struct pci_dev *dev = bp->pdev;
1807 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1808 
1809 	return dev->bus->number + ((dev->devfn + iov->offset +
1810 				    iov->stride * vfid) >> 8);
1811 }
1812 
1813 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1814 {
1815 	struct pci_dev *dev = bp->pdev;
1816 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1817 
1818 	return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1819 }
1820 
1821 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1822 {
1823 	int i, n;
1824 	struct pci_dev *dev = bp->pdev;
1825 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1826 
1827 	for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1828 		u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1829 		u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1830 
1831 		size /= iov->total;
1832 		vf->bars[n].bar = start + size * vf->abs_vfid;
1833 		vf->bars[n].size = size;
1834 	}
1835 }
1836 
1837 static int bnx2x_ari_enabled(struct pci_dev *dev)
1838 {
1839 	return dev->bus->self && dev->bus->self->ari_enabled;
1840 }
1841 
1842 static void
1843 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1844 {
1845 	int sb_id;
1846 	u32 val;
1847 	u8 fid, current_pf = 0;
1848 
1849 	/* IGU in normal mode - read CAM */
1850 	for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1851 		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1852 		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1853 			continue;
1854 		fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1855 		if (fid & IGU_FID_ENCODE_IS_PF)
1856 			current_pf = fid & IGU_FID_PF_NUM_MASK;
1857 		else if (current_pf == BP_FUNC(bp))
1858 			bnx2x_vf_set_igu_info(bp, sb_id,
1859 					      (fid & IGU_FID_VF_NUM_MASK));
1860 		DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1861 		   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1862 		   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1863 		   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1864 		   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1865 	}
1866 	DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1867 }
1868 
1869 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1870 {
1871 	if (bp->vfdb) {
1872 		kfree(bp->vfdb->vfqs);
1873 		kfree(bp->vfdb->vfs);
1874 		kfree(bp->vfdb);
1875 	}
1876 	bp->vfdb = NULL;
1877 }
1878 
1879 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1880 {
1881 	int pos;
1882 	struct pci_dev *dev = bp->pdev;
1883 
1884 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1885 	if (!pos) {
1886 		BNX2X_ERR("failed to find SRIOV capability in device\n");
1887 		return -ENODEV;
1888 	}
1889 
1890 	iov->pos = pos;
1891 	DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1892 	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1893 	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1894 	pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1895 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1896 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1897 	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1898 	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1899 	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1900 
1901 	return 0;
1902 }
1903 
1904 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1905 {
1906 	u32 val;
1907 
1908 	/* read the SRIOV capability structure
1909 	 * The fields can be read via configuration read or
1910 	 * directly from the device (starting at offset PCICFG_OFFSET)
1911 	 */
1912 	if (bnx2x_sriov_pci_cfg_info(bp, iov))
1913 		return -ENODEV;
1914 
1915 	/* get the number of SRIOV bars */
1916 	iov->nres = 0;
1917 
1918 	/* read the first_vfid */
1919 	val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1920 	iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1921 			       * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1922 
1923 	DP(BNX2X_MSG_IOV,
1924 	   "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1925 	   BP_FUNC(bp),
1926 	   iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1927 	   iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1928 
1929 	return 0;
1930 }
1931 
1932 /* must be called after PF bars are mapped */
1933 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1934 		       int num_vfs_param)
1935 {
1936 	int err, i;
1937 	struct bnx2x_sriov *iov;
1938 	struct pci_dev *dev = bp->pdev;
1939 
1940 	bp->vfdb = NULL;
1941 
1942 	/* verify is pf */
1943 	if (IS_VF(bp))
1944 		return 0;
1945 
1946 	/* verify sriov capability is present in configuration space */
1947 	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1948 		return 0;
1949 
1950 	/* verify chip revision */
1951 	if (CHIP_IS_E1x(bp))
1952 		return 0;
1953 
1954 	/* check if SRIOV support is turned off */
1955 	if (!num_vfs_param)
1956 		return 0;
1957 
1958 	/* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1959 	if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1960 		BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1961 			  BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1962 		return 0;
1963 	}
1964 
1965 	/* SRIOV can be enabled only with MSIX */
1966 	if (int_mode_param == BNX2X_INT_MODE_MSI ||
1967 	    int_mode_param == BNX2X_INT_MODE_INTX) {
1968 		BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1969 		return 0;
1970 	}
1971 
1972 	err = -EIO;
1973 	/* verify ari is enabled */
1974 	if (!bnx2x_ari_enabled(bp->pdev)) {
1975 		BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1976 		return 0;
1977 	}
1978 
1979 	/* verify igu is in normal mode */
1980 	if (CHIP_INT_MODE_IS_BC(bp)) {
1981 		BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1982 		return 0;
1983 	}
1984 
1985 	/* allocate the vfs database */
1986 	bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1987 	if (!bp->vfdb) {
1988 		BNX2X_ERR("failed to allocate vf database\n");
1989 		err = -ENOMEM;
1990 		goto failed;
1991 	}
1992 
1993 	/* get the sriov info - Linux already collected all the pertinent
1994 	 * information, however the sriov structure is for the private use
1995 	 * of the pci module. Also we want this information regardless
1996 	 * of the hyper-visor.
1997 	 */
1998 	iov = &(bp->vfdb->sriov);
1999 	err = bnx2x_sriov_info(bp, iov);
2000 	if (err)
2001 		goto failed;
2002 
2003 	/* SR-IOV capability was enabled but there are no VFs*/
2004 	if (iov->total == 0)
2005 		goto failed;
2006 
2007 	iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
2008 
2009 	DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
2010 	   num_vfs_param, iov->nr_virtfn);
2011 
2012 	/* allocate the vf array */
2013 	bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
2014 				BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
2015 	if (!bp->vfdb->vfs) {
2016 		BNX2X_ERR("failed to allocate vf array\n");
2017 		err = -ENOMEM;
2018 		goto failed;
2019 	}
2020 
2021 	/* Initial VF init - index and abs_vfid - nr_virtfn must be set */
2022 	for_each_vf(bp, i) {
2023 		bnx2x_vf(bp, i, index) = i;
2024 		bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
2025 		bnx2x_vf(bp, i, state) = VF_FREE;
2026 		INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
2027 		mutex_init(&bnx2x_vf(bp, i, op_mutex));
2028 		bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
2029 	}
2030 
2031 	/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
2032 	bnx2x_get_vf_igu_cam_info(bp);
2033 
2034 	/* allocate the queue arrays for all VFs */
2035 	bp->vfdb->vfqs = kzalloc(
2036 		BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
2037 		GFP_KERNEL);
2038 
2039 	DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
2040 
2041 	if (!bp->vfdb->vfqs) {
2042 		BNX2X_ERR("failed to allocate vf queue array\n");
2043 		err = -ENOMEM;
2044 		goto failed;
2045 	}
2046 
2047 	return 0;
2048 failed:
2049 	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
2050 	__bnx2x_iov_free_vfdb(bp);
2051 	return err;
2052 }
2053 
2054 void bnx2x_iov_remove_one(struct bnx2x *bp)
2055 {
2056 	int vf_idx;
2057 
2058 	/* if SRIOV is not enabled there's nothing to do */
2059 	if (!IS_SRIOV(bp))
2060 		return;
2061 
2062 	DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
2063 	pci_disable_sriov(bp->pdev);
2064 	DP(BNX2X_MSG_IOV, "sriov disabled\n");
2065 
2066 	/* disable access to all VFs */
2067 	for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
2068 		bnx2x_pretend_func(bp,
2069 				   HW_VF_HANDLE(bp,
2070 						bp->vfdb->sriov.first_vf_in_pf +
2071 						vf_idx));
2072 		DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
2073 		   bp->vfdb->sriov.first_vf_in_pf + vf_idx);
2074 		bnx2x_vf_enable_internal(bp, 0);
2075 		bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2076 	}
2077 
2078 	/* free vf database */
2079 	__bnx2x_iov_free_vfdb(bp);
2080 }
2081 
2082 void bnx2x_iov_free_mem(struct bnx2x *bp)
2083 {
2084 	int i;
2085 
2086 	if (!IS_SRIOV(bp))
2087 		return;
2088 
2089 	/* free vfs hw contexts */
2090 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2091 		struct hw_dma *cxt = &bp->vfdb->context[i];
2092 		BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
2093 	}
2094 
2095 	BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
2096 		       BP_VFDB(bp)->sp_dma.mapping,
2097 		       BP_VFDB(bp)->sp_dma.size);
2098 
2099 	BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
2100 		       BP_VF_MBX_DMA(bp)->mapping,
2101 		       BP_VF_MBX_DMA(bp)->size);
2102 
2103 	BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
2104 		       BP_VF_BULLETIN_DMA(bp)->mapping,
2105 		       BP_VF_BULLETIN_DMA(bp)->size);
2106 }
2107 
2108 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
2109 {
2110 	size_t tot_size;
2111 	int i, rc = 0;
2112 
2113 	if (!IS_SRIOV(bp))
2114 		return rc;
2115 
2116 	/* allocate vfs hw contexts */
2117 	tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
2118 		BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
2119 
2120 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2121 		struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
2122 		cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
2123 
2124 		if (cxt->size) {
2125 			BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
2126 		} else {
2127 			cxt->addr = NULL;
2128 			cxt->mapping = 0;
2129 		}
2130 		tot_size -= cxt->size;
2131 	}
2132 
2133 	/* allocate vfs ramrods dma memory - client_init and set_mac */
2134 	tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
2135 	BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
2136 			tot_size);
2137 	BP_VFDB(bp)->sp_dma.size = tot_size;
2138 
2139 	/* allocate mailboxes */
2140 	tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
2141 	BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
2142 			tot_size);
2143 	BP_VF_MBX_DMA(bp)->size = tot_size;
2144 
2145 	/* allocate local bulletin boards */
2146 	tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
2147 	BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
2148 			&BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
2149 	BP_VF_BULLETIN_DMA(bp)->size = tot_size;
2150 
2151 	return 0;
2152 
2153 alloc_mem_err:
2154 	return -ENOMEM;
2155 }
2156 
2157 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
2158 			   struct bnx2x_vf_queue *q)
2159 {
2160 	u8 cl_id = vfq_cl_id(vf, q);
2161 	u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
2162 	unsigned long q_type = 0;
2163 
2164 	set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
2165 	set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
2166 
2167 	/* Queue State object */
2168 	bnx2x_init_queue_obj(bp, &q->sp_obj,
2169 			     cl_id, &q->cid, 1, func_id,
2170 			     bnx2x_vf_sp(bp, vf, q_data),
2171 			     bnx2x_vf_sp_map(bp, vf, q_data),
2172 			     q_type);
2173 
2174 	DP(BNX2X_MSG_IOV,
2175 	   "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
2176 	   vf->abs_vfid, q->sp_obj.func_id, q->cid);
2177 }
2178 
2179 /* called by bnx2x_nic_load */
2180 int bnx2x_iov_nic_init(struct bnx2x *bp)
2181 {
2182 	int vfid;
2183 
2184 	if (!IS_SRIOV(bp)) {
2185 		DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
2186 		return 0;
2187 	}
2188 
2189 	DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
2190 
2191 	/* let FLR complete ... */
2192 	msleep(100);
2193 
2194 	/* initialize vf database */
2195 	for_each_vf(bp, vfid) {
2196 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2197 
2198 		int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
2199 			BNX2X_CIDS_PER_VF;
2200 
2201 		union cdu_context *base_cxt = (union cdu_context *)
2202 			BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2203 			(base_vf_cid & (ILT_PAGE_CIDS-1));
2204 
2205 		DP(BNX2X_MSG_IOV,
2206 		   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
2207 		   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
2208 		   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
2209 
2210 		/* init statically provisioned resources */
2211 		bnx2x_iov_static_resc(bp, vf);
2212 
2213 		/* queues are initialized during VF-ACQUIRE */
2214 
2215 		/* reserve the vf vlan credit */
2216 		bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
2217 
2218 		vf->filter_state = 0;
2219 		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
2220 
2221 		/*  init mcast object - This object will be re-initialized
2222 		 *  during VF-ACQUIRE with the proper cl_id and cid.
2223 		 *  It needs to be initialized here so that it can be safely
2224 		 *  handled by a subsequent FLR flow.
2225 		 */
2226 		vf->mcast_list_len = 0;
2227 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
2228 				     0xFF, 0xFF, 0xFF,
2229 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
2230 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2231 				     BNX2X_FILTER_MCAST_PENDING,
2232 				     &vf->filter_state,
2233 				     BNX2X_OBJ_TYPE_RX_TX);
2234 
2235 		/* set the mailbox message addresses */
2236 		BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
2237 			(((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
2238 			MBX_MSG_ALIGNED_SIZE);
2239 
2240 		BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
2241 			vfid * MBX_MSG_ALIGNED_SIZE;
2242 
2243 		/* Enable vf mailbox */
2244 		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
2245 	}
2246 
2247 	/* Final VF init */
2248 	for_each_vf(bp, vfid) {
2249 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2250 
2251 		/* fill in the BDF and bars */
2252 		vf->bus = bnx2x_vf_bus(bp, vfid);
2253 		vf->devfn = bnx2x_vf_devfn(bp, vfid);
2254 		bnx2x_vf_set_bars(bp, vf);
2255 
2256 		DP(BNX2X_MSG_IOV,
2257 		   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
2258 		   vf->abs_vfid, vf->bus, vf->devfn,
2259 		   (unsigned)vf->bars[0].bar, vf->bars[0].size,
2260 		   (unsigned)vf->bars[1].bar, vf->bars[1].size,
2261 		   (unsigned)vf->bars[2].bar, vf->bars[2].size);
2262 	}
2263 
2264 	return 0;
2265 }
2266 
2267 /* called by bnx2x_chip_cleanup */
2268 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
2269 {
2270 	int i;
2271 
2272 	if (!IS_SRIOV(bp))
2273 		return 0;
2274 
2275 	/* release all the VFs */
2276 	for_each_vf(bp, i)
2277 		bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
2278 
2279 	return 0;
2280 }
2281 
2282 /* called by bnx2x_init_hw_func, returns the next ilt line */
2283 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
2284 {
2285 	int i;
2286 	struct bnx2x_ilt *ilt = BP_ILT(bp);
2287 
2288 	if (!IS_SRIOV(bp))
2289 		return line;
2290 
2291 	/* set vfs ilt lines */
2292 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2293 		struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
2294 
2295 		ilt->lines[line+i].page = hw_cxt->addr;
2296 		ilt->lines[line+i].page_mapping = hw_cxt->mapping;
2297 		ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
2298 	}
2299 	return line + i;
2300 }
2301 
2302 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
2303 {
2304 	return ((cid >= BNX2X_FIRST_VF_CID) &&
2305 		((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
2306 }
2307 
2308 static
2309 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
2310 					struct bnx2x_vf_queue *vfq,
2311 					union event_ring_elem *elem)
2312 {
2313 	unsigned long ramrod_flags = 0;
2314 	int rc = 0;
2315 
2316 	/* Always push next commands out, don't wait here */
2317 	set_bit(RAMROD_CONT, &ramrod_flags);
2318 
2319 	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
2320 	case BNX2X_FILTER_MAC_PENDING:
2321 		rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
2322 					   &ramrod_flags);
2323 		break;
2324 	case BNX2X_FILTER_VLAN_PENDING:
2325 		rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
2326 					    &ramrod_flags);
2327 		break;
2328 	default:
2329 		BNX2X_ERR("Unsupported classification command: %d\n",
2330 			  elem->message.data.eth_event.echo);
2331 		return;
2332 	}
2333 	if (rc < 0)
2334 		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
2335 	else if (rc > 0)
2336 		DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
2337 }
2338 
2339 static
2340 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
2341 			       struct bnx2x_virtf *vf)
2342 {
2343 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
2344 	int rc;
2345 
2346 	rparam.mcast_obj = &vf->mcast_obj;
2347 	vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
2348 
2349 	/* If there are pending mcast commands - send them */
2350 	if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
2351 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2352 		if (rc < 0)
2353 			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
2354 				  rc);
2355 	}
2356 }
2357 
2358 static
2359 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
2360 				 struct bnx2x_virtf *vf)
2361 {
2362 	smp_mb__before_clear_bit();
2363 	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
2364 	smp_mb__after_clear_bit();
2365 }
2366 
2367 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2368 {
2369 	struct bnx2x_virtf *vf;
2370 	int qidx = 0, abs_vfid;
2371 	u8 opcode;
2372 	u16 cid = 0xffff;
2373 
2374 	if (!IS_SRIOV(bp))
2375 		return 1;
2376 
2377 	/* first get the cid - the only events we handle here are cfc-delete
2378 	 * and set-mac completion
2379 	 */
2380 	opcode = elem->message.opcode;
2381 
2382 	switch (opcode) {
2383 	case EVENT_RING_OPCODE_CFC_DEL:
2384 		cid = SW_CID((__force __le32)
2385 			     elem->message.data.cfc_del_event.cid);
2386 		DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
2387 		break;
2388 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2389 	case EVENT_RING_OPCODE_MULTICAST_RULES:
2390 	case EVENT_RING_OPCODE_FILTERS_RULES:
2391 		cid = (elem->message.data.eth_event.echo &
2392 		       BNX2X_SWCID_MASK);
2393 		DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
2394 		break;
2395 	case EVENT_RING_OPCODE_VF_FLR:
2396 		abs_vfid = elem->message.data.vf_flr_event.vf_id;
2397 		DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
2398 		   abs_vfid);
2399 		goto get_vf;
2400 	case EVENT_RING_OPCODE_MALICIOUS_VF:
2401 		abs_vfid = elem->message.data.malicious_vf_event.vf_id;
2402 		BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
2403 			  abs_vfid,
2404 			  elem->message.data.malicious_vf_event.err_id);
2405 		goto get_vf;
2406 	default:
2407 		return 1;
2408 	}
2409 
2410 	/* check if the cid is the VF range */
2411 	if (!bnx2x_iov_is_vf_cid(bp, cid)) {
2412 		DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
2413 		return 1;
2414 	}
2415 
2416 	/* extract vf and rxq index from vf_cid - relies on the following:
2417 	 * 1. vfid on cid reflects the true abs_vfid
2418 	 * 2. The max number of VFs (per path) is 64
2419 	 */
2420 	qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
2421 	abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2422 get_vf:
2423 	vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
2424 
2425 	if (!vf) {
2426 		BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
2427 			  cid, abs_vfid);
2428 		return 0;
2429 	}
2430 
2431 	switch (opcode) {
2432 	case EVENT_RING_OPCODE_CFC_DEL:
2433 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
2434 		   vf->abs_vfid, qidx);
2435 		vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
2436 						       &vfq_get(vf,
2437 								qidx)->sp_obj,
2438 						       BNX2X_Q_CMD_CFC_DEL);
2439 		break;
2440 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2441 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
2442 		   vf->abs_vfid, qidx);
2443 		bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
2444 		break;
2445 	case EVENT_RING_OPCODE_MULTICAST_RULES:
2446 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
2447 		   vf->abs_vfid, qidx);
2448 		bnx2x_vf_handle_mcast_eqe(bp, vf);
2449 		break;
2450 	case EVENT_RING_OPCODE_FILTERS_RULES:
2451 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
2452 		   vf->abs_vfid, qidx);
2453 		bnx2x_vf_handle_filters_eqe(bp, vf);
2454 		break;
2455 	case EVENT_RING_OPCODE_VF_FLR:
2456 	case EVENT_RING_OPCODE_MALICIOUS_VF:
2457 		/* Do nothing for now */
2458 		return 0;
2459 	}
2460 	/* SRIOV: reschedule any 'in_progress' operations */
2461 	bnx2x_iov_sp_event(bp, cid, false);
2462 
2463 	return 0;
2464 }
2465 
2466 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
2467 {
2468 	/* extract the vf from vf_cid - relies on the following:
2469 	 * 1. vfid on cid reflects the true abs_vfid
2470 	 * 2. The max number of VFs (per path) is 64
2471 	 */
2472 	int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2473 	return bnx2x_vf_by_abs_fid(bp, abs_vfid);
2474 }
2475 
2476 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
2477 				struct bnx2x_queue_sp_obj **q_obj)
2478 {
2479 	struct bnx2x_virtf *vf;
2480 
2481 	if (!IS_SRIOV(bp))
2482 		return;
2483 
2484 	vf = bnx2x_vf_by_cid(bp, vf_cid);
2485 
2486 	if (vf) {
2487 		/* extract queue index from vf_cid - relies on the following:
2488 		 * 1. vfid on cid reflects the true abs_vfid
2489 		 * 2. The max number of VFs (per path) is 64
2490 		 */
2491 		int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
2492 		*q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
2493 	} else {
2494 		BNX2X_ERR("No vf matching cid %d\n", vf_cid);
2495 	}
2496 }
2497 
2498 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
2499 {
2500 	struct bnx2x_virtf *vf;
2501 
2502 	/* check if the cid is the VF range */
2503 	if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
2504 		return;
2505 
2506 	vf = bnx2x_vf_by_cid(bp, vf_cid);
2507 	if (vf) {
2508 		/* set in_progress flag */
2509 		atomic_set(&vf->op_in_progress, 1);
2510 		if (queue_work)
2511 			queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2512 	}
2513 }
2514 
2515 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2516 {
2517 	int i;
2518 	int first_queue_query_index, num_queues_req;
2519 	dma_addr_t cur_data_offset;
2520 	struct stats_query_entry *cur_query_entry;
2521 	u8 stats_count = 0;
2522 	bool is_fcoe = false;
2523 
2524 	if (!IS_SRIOV(bp))
2525 		return;
2526 
2527 	if (!NO_FCOE(bp))
2528 		is_fcoe = true;
2529 
2530 	/* fcoe adds one global request and one queue request */
2531 	num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
2532 	first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
2533 		(is_fcoe ? 0 : 1);
2534 
2535 	DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
2536 	       "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
2537 	       BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
2538 	       first_queue_query_index + num_queues_req);
2539 
2540 	cur_data_offset = bp->fw_stats_data_mapping +
2541 		offsetof(struct bnx2x_fw_stats_data, queue_stats) +
2542 		num_queues_req * sizeof(struct per_queue_stats);
2543 
2544 	cur_query_entry = &bp->fw_stats_req->
2545 		query[first_queue_query_index + num_queues_req];
2546 
2547 	for_each_vf(bp, i) {
2548 		int j;
2549 		struct bnx2x_virtf *vf = BP_VF(bp, i);
2550 
2551 		if (vf->state != VF_ENABLED) {
2552 			DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
2553 			       "vf %d not enabled so no stats for it\n",
2554 			       vf->abs_vfid);
2555 			continue;
2556 		}
2557 
2558 		DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
2559 		for_each_vfq(vf, j) {
2560 			struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
2561 
2562 			dma_addr_t q_stats_addr =
2563 				vf->fw_stat_map + j * vf->stats_stride;
2564 
2565 			/* collect stats fro active queues only */
2566 			if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
2567 			    BNX2X_Q_LOGICAL_STATE_STOPPED)
2568 				continue;
2569 
2570 			/* create stats query entry for this queue */
2571 			cur_query_entry->kind = STATS_TYPE_QUEUE;
2572 			cur_query_entry->index = vfq_stat_id(vf, rxq);
2573 			cur_query_entry->funcID =
2574 				cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
2575 			cur_query_entry->address.hi =
2576 				cpu_to_le32(U64_HI(q_stats_addr));
2577 			cur_query_entry->address.lo =
2578 				cpu_to_le32(U64_LO(q_stats_addr));
2579 			DP(BNX2X_MSG_IOV,
2580 			   "added address %x %x for vf %d queue %d client %d\n",
2581 			   cur_query_entry->address.hi,
2582 			   cur_query_entry->address.lo, cur_query_entry->funcID,
2583 			   j, cur_query_entry->index);
2584 			cur_query_entry++;
2585 			cur_data_offset += sizeof(struct per_queue_stats);
2586 			stats_count++;
2587 
2588 			/* all stats are coalesced to the leading queue */
2589 			if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
2590 				break;
2591 		}
2592 	}
2593 	bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
2594 }
2595 
2596 void bnx2x_iov_sp_task(struct bnx2x *bp)
2597 {
2598 	int i;
2599 
2600 	if (!IS_SRIOV(bp))
2601 		return;
2602 	/* Iterate over all VFs and invoke state transition for VFs with
2603 	 * 'in-progress' slow-path operations
2604 	 */
2605 	DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_SP),
2606 	       "searching for pending vf operations\n");
2607 	for_each_vf(bp, i) {
2608 		struct bnx2x_virtf *vf = BP_VF(bp, i);
2609 
2610 		if (!vf) {
2611 			BNX2X_ERR("VF was null! skipping...\n");
2612 			continue;
2613 		}
2614 
2615 		if (!list_empty(&vf->op_list_head) &&
2616 		    atomic_read(&vf->op_in_progress)) {
2617 			DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
2618 			bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
2619 		}
2620 	}
2621 }
2622 
2623 static inline
2624 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
2625 {
2626 	int i;
2627 	struct bnx2x_virtf *vf = NULL;
2628 
2629 	for_each_vf(bp, i) {
2630 		vf = BP_VF(bp, i);
2631 		if (stat_id >= vf->igu_base_id &&
2632 		    stat_id < vf->igu_base_id + vf_sb_count(vf))
2633 			break;
2634 	}
2635 	return vf;
2636 }
2637 
2638 /* VF API helpers */
2639 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
2640 				u8 enable)
2641 {
2642 	u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
2643 	u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
2644 
2645 	REG_WR(bp, reg, val);
2646 }
2647 
2648 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
2649 {
2650 	int i;
2651 
2652 	for_each_vfq(vf, i)
2653 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2654 				    vfq_qzone_id(vf, vfq_get(vf, i)), false);
2655 }
2656 
2657 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
2658 {
2659 	u32 val;
2660 
2661 	/* clear the VF configuration - pretend */
2662 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
2663 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
2664 	val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
2665 		 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
2666 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
2667 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2668 }
2669 
2670 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2671 {
2672 	return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2673 		     BNX2X_VF_MAX_QUEUES);
2674 }
2675 
2676 static
2677 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2678 			    struct vf_pf_resc_request *req_resc)
2679 {
2680 	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2681 	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2682 
2683 	return ((req_resc->num_rxqs <= rxq_cnt) &&
2684 		(req_resc->num_txqs <= txq_cnt) &&
2685 		(req_resc->num_sbs <= vf_sb_count(vf))   &&
2686 		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2687 		(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
2688 }
2689 
2690 /* CORE VF API */
2691 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2692 		     struct vf_pf_resc_request *resc)
2693 {
2694 	int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2695 		BNX2X_CIDS_PER_VF;
2696 
2697 	union cdu_context *base_cxt = (union cdu_context *)
2698 		BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2699 		(base_vf_cid & (ILT_PAGE_CIDS-1));
2700 	int i;
2701 
2702 	/* if state is 'acquired' the VF was not released or FLR'd, in
2703 	 * this case the returned resources match the acquired already
2704 	 * acquired resources. Verify that the requested numbers do
2705 	 * not exceed the already acquired numbers.
2706 	 */
2707 	if (vf->state == VF_ACQUIRED) {
2708 		DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2709 		   vf->abs_vfid);
2710 
2711 		if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2712 			BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2713 				  vf->abs_vfid);
2714 			return -EINVAL;
2715 		}
2716 		return 0;
2717 	}
2718 
2719 	/* Otherwise vf state must be 'free' or 'reset' */
2720 	if (vf->state != VF_FREE && vf->state != VF_RESET) {
2721 		BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2722 			  vf->abs_vfid, vf->state);
2723 		return -EINVAL;
2724 	}
2725 
2726 	/* static allocation:
2727 	 * the global maximum number are fixed per VF. Fail the request if
2728 	 * requested number exceed these globals
2729 	 */
2730 	if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2731 		DP(BNX2X_MSG_IOV,
2732 		   "cannot fulfill vf resource request. Placing maximal available values in response\n");
2733 		/* set the max resource in the vf */
2734 		return -ENOMEM;
2735 	}
2736 
2737 	/* Set resources counters - 0 request means max available */
2738 	vf_sb_count(vf) = resc->num_sbs;
2739 	vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2740 	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2741 	if (resc->num_mac_filters)
2742 		vf_mac_rules_cnt(vf) = resc->num_mac_filters;
2743 	if (resc->num_vlan_filters)
2744 		vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
2745 
2746 	DP(BNX2X_MSG_IOV,
2747 	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2748 	   vf_sb_count(vf), vf_rxq_count(vf),
2749 	   vf_txq_count(vf), vf_mac_rules_cnt(vf),
2750 	   vf_vlan_rules_cnt(vf));
2751 
2752 	/* Initialize the queues */
2753 	if (!vf->vfqs) {
2754 		DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2755 		return -EINVAL;
2756 	}
2757 
2758 	for_each_vfq(vf, i) {
2759 		struct bnx2x_vf_queue *q = vfq_get(vf, i);
2760 
2761 		if (!q) {
2762 			BNX2X_ERR("q number %d was not allocated\n", i);
2763 			return -EINVAL;
2764 		}
2765 
2766 		q->index = i;
2767 		q->cxt = &((base_cxt + i)->eth);
2768 		q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2769 
2770 		DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2771 		   vf->abs_vfid, i, q->index, q->cid, q->cxt);
2772 
2773 		/* init SP objects */
2774 		bnx2x_vfq_init(bp, vf, q);
2775 	}
2776 	vf->state = VF_ACQUIRED;
2777 	return 0;
2778 }
2779 
2780 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2781 {
2782 	struct bnx2x_func_init_params func_init = {0};
2783 	u16 flags = 0;
2784 	int i;
2785 
2786 	/* the sb resources are initialized at this point, do the
2787 	 * FW/HW initializations
2788 	 */
2789 	for_each_vf_sb(vf, i)
2790 		bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2791 			      vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2792 
2793 	/* Sanity checks */
2794 	if (vf->state != VF_ACQUIRED) {
2795 		DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2796 		   vf->abs_vfid, vf->state);
2797 		return -EINVAL;
2798 	}
2799 
2800 	/* let FLR complete ... */
2801 	msleep(100);
2802 
2803 	/* FLR cleanup epilogue */
2804 	if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2805 		return -EBUSY;
2806 
2807 	/* reset IGU VF statistics: MSIX */
2808 	REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2809 
2810 	/* vf init */
2811 	if (vf->cfg_flags & VF_CFG_STATS)
2812 		flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2813 
2814 	if (vf->cfg_flags & VF_CFG_TPA)
2815 		flags |= FUNC_FLG_TPA;
2816 
2817 	if (is_vf_multi(vf))
2818 		flags |= FUNC_FLG_RSS;
2819 
2820 	/* function setup */
2821 	func_init.func_flgs = flags;
2822 	func_init.pf_id = BP_FUNC(bp);
2823 	func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2824 	func_init.fw_stat_map = vf->fw_stat_map;
2825 	func_init.spq_map = vf->spq_map;
2826 	func_init.spq_prod = 0;
2827 	bnx2x_func_init(bp, &func_init);
2828 
2829 	/* Enable the vf */
2830 	bnx2x_vf_enable_access(bp, vf->abs_vfid);
2831 	bnx2x_vf_enable_traffic(bp, vf);
2832 
2833 	/* queue protection table */
2834 	for_each_vfq(vf, i)
2835 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2836 				    vfq_qzone_id(vf, vfq_get(vf, i)), true);
2837 
2838 	vf->state = VF_ENABLED;
2839 
2840 	/* update vf bulletin board */
2841 	bnx2x_post_vf_bulletin(bp, vf->index);
2842 
2843 	return 0;
2844 }
2845 
2846 struct set_vf_state_cookie {
2847 	struct bnx2x_virtf *vf;
2848 	u8 state;
2849 };
2850 
2851 static void bnx2x_set_vf_state(void *cookie)
2852 {
2853 	struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2854 
2855 	p->vf->state = p->state;
2856 }
2857 
2858 /* VFOP close (teardown the queues, delete mcasts and close HW) */
2859 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2860 {
2861 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2862 	struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
2863 	enum bnx2x_vfop_close_state state = vfop->state;
2864 	struct bnx2x_vfop_cmd cmd = {
2865 		.done = bnx2x_vfop_close,
2866 		.block = false,
2867 	};
2868 
2869 	if (vfop->rc < 0)
2870 		goto op_err;
2871 
2872 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2873 
2874 	switch (state) {
2875 	case BNX2X_VFOP_CLOSE_QUEUES:
2876 
2877 		if (++(qx->qid) < vf_rxq_count(vf)) {
2878 			vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
2879 			if (vfop->rc)
2880 				goto op_err;
2881 			return;
2882 		}
2883 		vfop->state = BNX2X_VFOP_CLOSE_HW;
2884 		vfop->rc = 0;
2885 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
2886 
2887 	case BNX2X_VFOP_CLOSE_HW:
2888 
2889 		/* disable the interrupts */
2890 		DP(BNX2X_MSG_IOV, "disabling igu\n");
2891 		bnx2x_vf_igu_disable(bp, vf);
2892 
2893 		/* disable the VF */
2894 		DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2895 		bnx2x_vf_clr_qtbl(bp, vf);
2896 
2897 		goto op_done;
2898 	default:
2899 		bnx2x_vfop_default(state);
2900 	}
2901 op_err:
2902 	BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
2903 op_done:
2904 
2905 	/* need to make sure there are no outstanding stats ramrods which may
2906 	 * cause the device to access the VF's stats buffer which it will free
2907 	 * as soon as we return from the close flow.
2908 	 */
2909 	{
2910 		struct set_vf_state_cookie cookie;
2911 
2912 		cookie.vf = vf;
2913 		cookie.state = VF_ACQUIRED;
2914 		bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2915 	}
2916 
2917 	DP(BNX2X_MSG_IOV, "set state to acquired\n");
2918 	bnx2x_vfop_end(bp, vf, vfop);
2919 op_pending:
2920 	/* Not supported at the moment; Exists for macros only */
2921 	return;
2922 }
2923 
2924 int bnx2x_vfop_close_cmd(struct bnx2x *bp,
2925 			 struct bnx2x_virtf *vf,
2926 			 struct bnx2x_vfop_cmd *cmd)
2927 {
2928 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2929 	if (vfop) {
2930 		vfop->args.qx.qid = -1; /* loop */
2931 		bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
2932 				 bnx2x_vfop_close, cmd->done);
2933 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
2934 					     cmd->block);
2935 	}
2936 	return -ENOMEM;
2937 }
2938 
2939 /* VF release can be called either: 1. The VF was acquired but
2940  * not enabled 2. the vf was enabled or in the process of being
2941  * enabled
2942  */
2943 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2944 {
2945 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2946 	struct bnx2x_vfop_cmd cmd = {
2947 		.done = bnx2x_vfop_release,
2948 		.block = false,
2949 	};
2950 
2951 	DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2952 
2953 	if (vfop->rc < 0)
2954 		goto op_err;
2955 
2956 	DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2957 	   vf->state == VF_FREE ? "Free" :
2958 	   vf->state == VF_ACQUIRED ? "Acquired" :
2959 	   vf->state == VF_ENABLED ? "Enabled" :
2960 	   vf->state == VF_RESET ? "Reset" :
2961 	   "Unknown");
2962 
2963 	switch (vf->state) {
2964 	case VF_ENABLED:
2965 		vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
2966 		if (vfop->rc)
2967 			goto op_err;
2968 		return;
2969 
2970 	case VF_ACQUIRED:
2971 		DP(BNX2X_MSG_IOV, "about to free resources\n");
2972 		bnx2x_vf_free_resc(bp, vf);
2973 		DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2974 		goto op_done;
2975 
2976 	case VF_FREE:
2977 	case VF_RESET:
2978 		/* do nothing */
2979 		goto op_done;
2980 	default:
2981 		bnx2x_vfop_default(vf->state);
2982 	}
2983 op_err:
2984 	BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
2985 op_done:
2986 	bnx2x_vfop_end(bp, vf, vfop);
2987 }
2988 
2989 static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
2990 {
2991 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2992 	enum bnx2x_vfop_rss_state state;
2993 
2994 	if (!vfop) {
2995 		BNX2X_ERR("vfop was null\n");
2996 		return;
2997 	}
2998 
2999 	state = vfop->state;
3000 	bnx2x_vfop_reset_wq(vf);
3001 
3002 	if (vfop->rc < 0)
3003 		goto op_err;
3004 
3005 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
3006 
3007 	switch (state) {
3008 	case BNX2X_VFOP_RSS_CONFIG:
3009 		/* next state */
3010 		vfop->state = BNX2X_VFOP_RSS_DONE;
3011 		bnx2x_config_rss(bp, &vfop->op_p->rss);
3012 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
3013 op_err:
3014 		BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
3015 op_done:
3016 	case BNX2X_VFOP_RSS_DONE:
3017 		bnx2x_vfop_end(bp, vf, vfop);
3018 		return;
3019 	default:
3020 		bnx2x_vfop_default(state);
3021 	}
3022 op_pending:
3023 	return;
3024 }
3025 
3026 int bnx2x_vfop_release_cmd(struct bnx2x *bp,
3027 			   struct bnx2x_virtf *vf,
3028 			   struct bnx2x_vfop_cmd *cmd)
3029 {
3030 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3031 	if (vfop) {
3032 		bnx2x_vfop_opset(-1, /* use vf->state */
3033 				 bnx2x_vfop_release, cmd->done);
3034 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
3035 					     cmd->block);
3036 	}
3037 	return -ENOMEM;
3038 }
3039 
3040 int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
3041 		       struct bnx2x_virtf *vf,
3042 		       struct bnx2x_vfop_cmd *cmd)
3043 {
3044 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3045 
3046 	if (vfop) {
3047 		bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
3048 				 cmd->done);
3049 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
3050 					     cmd->block);
3051 	}
3052 	return -ENOMEM;
3053 }
3054 
3055 /* VFOP tpa update, send update on all queues */
3056 static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf)
3057 {
3058 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
3059 	struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa;
3060 	enum bnx2x_vfop_tpa_state state = vfop->state;
3061 
3062 	bnx2x_vfop_reset_wq(vf);
3063 
3064 	if (vfop->rc < 0)
3065 		goto op_err;
3066 
3067 	DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n",
3068 	   vf->abs_vfid, tpa_args->qid,
3069 	   state);
3070 
3071 	switch (state) {
3072 	case BNX2X_VFOP_TPA_CONFIG:
3073 
3074 		if (tpa_args->qid < vf_rxq_count(vf)) {
3075 			struct bnx2x_queue_state_params *qstate =
3076 				&vf->op_params.qstate;
3077 
3078 			qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj);
3079 
3080 			/* The only thing that changes for the ramrod params
3081 			 * between calls is the sge_map
3082 			 */
3083 			qstate->params.update_tpa.sge_map =
3084 				tpa_args->sge_map[tpa_args->qid];
3085 
3086 			DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n",
3087 			   tpa_args->qid,
3088 			   U64_HI(qstate->params.update_tpa.sge_map),
3089 			   U64_LO(qstate->params.update_tpa.sge_map));
3090 			qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA;
3091 			vfop->rc = bnx2x_queue_state_change(bp, qstate);
3092 
3093 			tpa_args->qid++;
3094 			bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
3095 		}
3096 		vfop->state = BNX2X_VFOP_TPA_DONE;
3097 		vfop->rc = 0;
3098 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
3099 op_err:
3100 		BNX2X_ERR("TPA update error: rc %d\n", vfop->rc);
3101 op_done:
3102 	case BNX2X_VFOP_TPA_DONE:
3103 		bnx2x_vfop_end(bp, vf, vfop);
3104 		return;
3105 	default:
3106 		bnx2x_vfop_default(state);
3107 	}
3108 op_pending:
3109 	return;
3110 }
3111 
3112 int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
3113 			struct bnx2x_virtf *vf,
3114 			struct bnx2x_vfop_cmd *cmd,
3115 			struct vfpf_tpa_tlv *tpa_tlv)
3116 {
3117 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3118 
3119 	if (vfop) {
3120 		vfop->args.qx.qid = 0; /* loop */
3121 		memcpy(&vfop->args.tpa.sge_map,
3122 		       tpa_tlv->tpa_client_info.sge_addr,
3123 		       sizeof(vfop->args.tpa.sge_map));
3124 		bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG,
3125 				 bnx2x_vfop_tpa, cmd->done);
3126 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa,
3127 					     cmd->block);
3128 	}
3129 	return -ENOMEM;
3130 }
3131 
3132 /* VF release ~ VF close + VF release-resources
3133  * Release is the ultimate SW shutdown and is called whenever an
3134  * irrecoverable error is encountered.
3135  */
3136 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
3137 {
3138 	struct bnx2x_vfop_cmd cmd = {
3139 		.done = NULL,
3140 		.block = block,
3141 	};
3142 	int rc;
3143 
3144 	DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
3145 	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
3146 
3147 	rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
3148 	if (rc)
3149 		WARN(rc,
3150 		     "VF[%d] Failed to allocate resources for release op- rc=%d\n",
3151 		     vf->abs_vfid, rc);
3152 }
3153 
3154 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
3155 			      struct bnx2x_virtf *vf, u32 *sbdf)
3156 {
3157 	*sbdf = vf->devfn | (vf->bus << 8);
3158 }
3159 
3160 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3161 			      enum channel_tlvs tlv)
3162 {
3163 	/* we don't lock the channel for unsupported tlvs */
3164 	if (!bnx2x_tlv_supported(tlv)) {
3165 		BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
3166 		return;
3167 	}
3168 
3169 	/* lock the channel */
3170 	mutex_lock(&vf->op_mutex);
3171 
3172 	/* record the locking op */
3173 	vf->op_current = tlv;
3174 
3175 	/* log the lock */
3176 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
3177 	   vf->abs_vfid, tlv);
3178 }
3179 
3180 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3181 				enum channel_tlvs expected_tlv)
3182 {
3183 	enum channel_tlvs current_tlv;
3184 
3185 	if (!vf) {
3186 		BNX2X_ERR("VF was %p\n", vf);
3187 		return;
3188 	}
3189 
3190 	current_tlv = vf->op_current;
3191 
3192 	/* we don't unlock the channel for unsupported tlvs */
3193 	if (!bnx2x_tlv_supported(expected_tlv))
3194 		return;
3195 
3196 	WARN(expected_tlv != vf->op_current,
3197 	     "lock mismatch: expected %d found %d", expected_tlv,
3198 	     vf->op_current);
3199 
3200 	/* record the locking op */
3201 	vf->op_current = CHANNEL_TLV_NONE;
3202 
3203 	/* lock the channel */
3204 	mutex_unlock(&vf->op_mutex);
3205 
3206 	/* log the unlock */
3207 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
3208 	   vf->abs_vfid, vf->op_current);
3209 }
3210 
3211 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
3212 {
3213 	struct bnx2x_queue_state_params q_params;
3214 	u32 prev_flags;
3215 	int i, rc;
3216 
3217 	/* Verify changes are needed and record current Tx switching state */
3218 	prev_flags = bp->flags;
3219 	if (enable)
3220 		bp->flags |= TX_SWITCHING;
3221 	else
3222 		bp->flags &= ~TX_SWITCHING;
3223 	if (prev_flags == bp->flags)
3224 		return 0;
3225 
3226 	/* Verify state enables the sending of queue ramrods */
3227 	if ((bp->state != BNX2X_STATE_OPEN) ||
3228 	    (bnx2x_get_q_logical_state(bp,
3229 				      &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
3230 	     BNX2X_Q_LOGICAL_STATE_ACTIVE))
3231 		return 0;
3232 
3233 	/* send q. update ramrod to configure Tx switching */
3234 	memset(&q_params, 0, sizeof(q_params));
3235 	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3236 	q_params.cmd = BNX2X_Q_CMD_UPDATE;
3237 	__set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
3238 		  &q_params.params.update.update_flags);
3239 	if (enable)
3240 		__set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
3241 			  &q_params.params.update.update_flags);
3242 	else
3243 		__clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
3244 			    &q_params.params.update.update_flags);
3245 
3246 	/* send the ramrod on all the queues of the PF */
3247 	for_each_eth_queue(bp, i) {
3248 		struct bnx2x_fastpath *fp = &bp->fp[i];
3249 
3250 		/* Set the appropriate Queue object */
3251 		q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
3252 
3253 		/* Update the Queue state */
3254 		rc = bnx2x_queue_state_change(bp, &q_params);
3255 		if (rc) {
3256 			BNX2X_ERR("Failed to configure Tx switching\n");
3257 			return rc;
3258 		}
3259 	}
3260 
3261 	DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
3262 	return 0;
3263 }
3264 
3265 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3266 {
3267 	struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
3268 
3269 	if (!IS_SRIOV(bp)) {
3270 		BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
3271 		return -EINVAL;
3272 	}
3273 
3274 	DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
3275 	   num_vfs_param, BNX2X_NR_VIRTFN(bp));
3276 
3277 	/* HW channel is only operational when PF is up */
3278 	if (bp->state != BNX2X_STATE_OPEN) {
3279 		BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
3280 		return -EINVAL;
3281 	}
3282 
3283 	/* we are always bound by the total_vfs in the configuration space */
3284 	if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
3285 		BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
3286 			  num_vfs_param, BNX2X_NR_VIRTFN(bp));
3287 		num_vfs_param = BNX2X_NR_VIRTFN(bp);
3288 	}
3289 
3290 	bp->requested_nr_virtfn = num_vfs_param;
3291 	if (num_vfs_param == 0) {
3292 		bnx2x_set_pf_tx_switching(bp, false);
3293 		pci_disable_sriov(dev);
3294 		return 0;
3295 	} else {
3296 		return bnx2x_enable_sriov(bp);
3297 	}
3298 }
3299 
3300 #define IGU_ENTRY_SIZE 4
3301 
3302 int bnx2x_enable_sriov(struct bnx2x *bp)
3303 {
3304 	int rc = 0, req_vfs = bp->requested_nr_virtfn;
3305 	int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
3306 	u32 igu_entry, address;
3307 	u16 num_vf_queues;
3308 
3309 	if (req_vfs == 0)
3310 		return 0;
3311 
3312 	first_vf = bp->vfdb->sriov.first_vf_in_pf;
3313 
3314 	/* statically distribute vf sb pool between VFs */
3315 	num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
3316 			      BP_VFDB(bp)->vf_sbs_pool / req_vfs);
3317 
3318 	/* zero previous values learned from igu cam */
3319 	for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
3320 		struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
3321 
3322 		vf->sb_count = 0;
3323 		vf_sb_count(BP_VF(bp, vf_idx)) = 0;
3324 	}
3325 	bp->vfdb->vf_sbs_pool = 0;
3326 
3327 	/* prepare IGU cam */
3328 	sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
3329 	address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
3330 	for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3331 		for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
3332 			igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
3333 				vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
3334 				IGU_REG_MAPPING_MEMORY_VALID;
3335 			DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
3336 			   sb_idx, vf_idx);
3337 			REG_WR(bp, address, igu_entry);
3338 			sb_idx++;
3339 			address += IGU_ENTRY_SIZE;
3340 		}
3341 	}
3342 
3343 	/* Reinitialize vf database according to igu cam */
3344 	bnx2x_get_vf_igu_cam_info(bp);
3345 
3346 	DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
3347 	   BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
3348 
3349 	qcount = 0;
3350 	for_each_vf(bp, vf_idx) {
3351 		struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
3352 
3353 		/* set local queue arrays */
3354 		vf->vfqs = &bp->vfdb->vfqs[qcount];
3355 		qcount += vf_sb_count(vf);
3356 		bnx2x_iov_static_resc(bp, vf);
3357 	}
3358 
3359 	/* prepare msix vectors in VF configuration space - the value in the
3360 	 * PCI configuration space should be the index of the last entry,
3361 	 * namely one less than the actual size of the table
3362 	 */
3363 	for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3364 		bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3365 		REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3366 		       num_vf_queues - 1);
3367 		DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
3368 		   vf_idx, num_vf_queues - 1);
3369 	}
3370 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3371 
3372 	/* enable sriov. This will probe all the VFs, and consequentially cause
3373 	 * the "acquire" messages to appear on the VF PF channel.
3374 	 */
3375 	DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
3376 	bnx2x_disable_sriov(bp);
3377 
3378 	rc = bnx2x_set_pf_tx_switching(bp, true);
3379 	if (rc)
3380 		return rc;
3381 
3382 	rc = pci_enable_sriov(bp->pdev, req_vfs);
3383 	if (rc) {
3384 		BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
3385 		return rc;
3386 	}
3387 	DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
3388 	return req_vfs;
3389 }
3390 
3391 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
3392 {
3393 	int vfidx;
3394 	struct pf_vf_bulletin_content *bulletin;
3395 
3396 	DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
3397 	for_each_vf(bp, vfidx) {
3398 	bulletin = BP_VF_BULLETIN(bp, vfidx);
3399 		if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
3400 			bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
3401 	}
3402 }
3403 
3404 void bnx2x_disable_sriov(struct bnx2x *bp)
3405 {
3406 	pci_disable_sriov(bp->pdev);
3407 }
3408 
3409 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
3410 			     struct bnx2x_virtf **vf,
3411 			     struct pf_vf_bulletin_content **bulletin)
3412 {
3413 	if (bp->state != BNX2X_STATE_OPEN) {
3414 		BNX2X_ERR("vf ndo called though PF is down\n");
3415 		return -EINVAL;
3416 	}
3417 
3418 	if (!IS_SRIOV(bp)) {
3419 		BNX2X_ERR("vf ndo called though sriov is disabled\n");
3420 		return -EINVAL;
3421 	}
3422 
3423 	if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
3424 		BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
3425 			  vfidx, BNX2X_NR_VIRTFN(bp));
3426 		return -EINVAL;
3427 	}
3428 
3429 	/* init members */
3430 	*vf = BP_VF(bp, vfidx);
3431 	*bulletin = BP_VF_BULLETIN(bp, vfidx);
3432 
3433 	if (!*vf) {
3434 		BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
3435 			  vfidx);
3436 		return -EINVAL;
3437 	}
3438 
3439 	if (!(*vf)->vfqs) {
3440 		BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
3441 			  vfidx);
3442 		return -EINVAL;
3443 	}
3444 
3445 	if (!*bulletin) {
3446 		BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
3447 			  vfidx);
3448 		return -EINVAL;
3449 	}
3450 
3451 	return 0;
3452 }
3453 
3454 int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3455 			struct ifla_vf_info *ivi)
3456 {
3457 	struct bnx2x *bp = netdev_priv(dev);
3458 	struct bnx2x_virtf *vf = NULL;
3459 	struct pf_vf_bulletin_content *bulletin = NULL;
3460 	struct bnx2x_vlan_mac_obj *mac_obj;
3461 	struct bnx2x_vlan_mac_obj *vlan_obj;
3462 	int rc;
3463 
3464 	/* sanity and init */
3465 	rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3466 	if (rc)
3467 		return rc;
3468 	mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
3469 	vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3470 	if (!mac_obj || !vlan_obj) {
3471 		BNX2X_ERR("VF partially initialized\n");
3472 		return -EINVAL;
3473 	}
3474 
3475 	ivi->vf = vfidx;
3476 	ivi->qos = 0;
3477 	ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
3478 	ivi->spoofchk = 1; /*always enabled */
3479 	if (vf->state == VF_ENABLED) {
3480 		/* mac and vlan are in vlan_mac objects */
3481 		if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
3482 			mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
3483 						0, ETH_ALEN);
3484 		if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
3485 			vlan_obj->get_n_elements(bp, vlan_obj, 1,
3486 						 (u8 *)&ivi->vlan, 0,
3487 						 VLAN_HLEN);
3488 	} else {
3489 		/* mac */
3490 		if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
3491 			/* mac configured by ndo so its in bulletin board */
3492 			memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
3493 		else
3494 			/* function has not been loaded yet. Show mac as 0s */
3495 			memset(&ivi->mac, 0, ETH_ALEN);
3496 
3497 		/* vlan */
3498 		if (bulletin->valid_bitmap & (1 << VLAN_VALID))
3499 			/* vlan configured by ndo so its in bulletin board */
3500 			memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
3501 		else
3502 			/* function has not been loaded yet. Show vlans as 0s */
3503 			memset(&ivi->vlan, 0, VLAN_HLEN);
3504 	}
3505 
3506 	return 0;
3507 }
3508 
3509 /* New mac for VF. Consider these cases:
3510  * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
3511  *    supply at acquire.
3512  * 2. VF has already been acquired but has not yet initialized - store in local
3513  *    bulletin board. mac will be posted on VF bulletin board after VF init. VF
3514  *    will configure this mac when it is ready.
3515  * 3. VF has already initialized but has not yet setup a queue - post the new
3516  *    mac on VF's bulletin board right now. VF will configure this mac when it
3517  *    is ready.
3518  * 4. VF has already set a queue - delete any macs already configured for this
3519  *    queue and manually config the new mac.
3520  * In any event, once this function has been called refuse any attempts by the
3521  * VF to configure any mac for itself except for this mac. In case of a race
3522  * where the VF fails to see the new post on its bulletin board before sending a
3523  * mac configuration request, the PF will simply fail the request and VF can try
3524  * again after consulting its bulletin board.
3525  */
3526 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3527 {
3528 	struct bnx2x *bp = netdev_priv(dev);
3529 	int rc, q_logical_state;
3530 	struct bnx2x_virtf *vf = NULL;
3531 	struct pf_vf_bulletin_content *bulletin = NULL;
3532 
3533 	/* sanity and init */
3534 	rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3535 	if (rc)
3536 		return rc;
3537 	if (!is_valid_ether_addr(mac)) {
3538 		BNX2X_ERR("mac address invalid\n");
3539 		return -EINVAL;
3540 	}
3541 
3542 	/* update PF's copy of the VF's bulletin. Will no longer accept mac
3543 	 * configuration requests from vf unless match this mac
3544 	 */
3545 	bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
3546 	memcpy(bulletin->mac, mac, ETH_ALEN);
3547 
3548 	/* Post update on VF's bulletin board */
3549 	rc = bnx2x_post_vf_bulletin(bp, vfidx);
3550 	if (rc) {
3551 		BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
3552 		return rc;
3553 	}
3554 
3555 	q_logical_state =
3556 		bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
3557 	if (vf->state == VF_ENABLED &&
3558 	    q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3559 		/* configure the mac in device on this vf's queue */
3560 		unsigned long ramrod_flags = 0;
3561 		struct bnx2x_vlan_mac_obj *mac_obj =
3562 			&bnx2x_leading_vfq(vf, mac_obj);
3563 
3564 		rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3565 		if (rc)
3566 			return rc;
3567 
3568 		/* must lock vfpf channel to protect against vf flows */
3569 		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3570 
3571 		/* remove existing eth macs */
3572 		rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3573 		if (rc) {
3574 			BNX2X_ERR("failed to delete eth macs\n");
3575 			rc = -EINVAL;
3576 			goto out;
3577 		}
3578 
3579 		/* remove existing uc list macs */
3580 		rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
3581 		if (rc) {
3582 			BNX2X_ERR("failed to delete uc_list macs\n");
3583 			rc = -EINVAL;
3584 			goto out;
3585 		}
3586 
3587 		/* configure the new mac to device */
3588 		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3589 		bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3590 				  BNX2X_ETH_MAC, &ramrod_flags);
3591 
3592 out:
3593 		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3594 	}
3595 
3596 	return 0;
3597 }
3598 
3599 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3600 {
3601 	struct bnx2x_queue_state_params q_params = {NULL};
3602 	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3603 	struct bnx2x_queue_update_params *update_params;
3604 	struct pf_vf_bulletin_content *bulletin = NULL;
3605 	struct bnx2x_rx_mode_ramrod_params rx_ramrod;
3606 	struct bnx2x *bp = netdev_priv(dev);
3607 	struct bnx2x_vlan_mac_obj *vlan_obj;
3608 	unsigned long vlan_mac_flags = 0;
3609 	unsigned long ramrod_flags = 0;
3610 	struct bnx2x_virtf *vf = NULL;
3611 	unsigned long accept_flags;
3612 	int rc;
3613 
3614 	/* sanity and init */
3615 	rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3616 	if (rc)
3617 		return rc;
3618 
3619 	if (vlan > 4095) {
3620 		BNX2X_ERR("illegal vlan value %d\n", vlan);
3621 		return -EINVAL;
3622 	}
3623 
3624 	DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
3625 	   vfidx, vlan, 0);
3626 
3627 	/* update PF's copy of the VF's bulletin. No point in posting the vlan
3628 	 * to the VF since it doesn't have anything to do with it. But it useful
3629 	 * to store it here in case the VF is not up yet and we can only
3630 	 * configure the vlan later when it does. Treat vlan id 0 as remove the
3631 	 * Host tag.
3632 	 */
3633 	if (vlan > 0)
3634 		bulletin->valid_bitmap |= 1 << VLAN_VALID;
3635 	else
3636 		bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
3637 	bulletin->vlan = vlan;
3638 
3639 	/* is vf initialized and queue set up? */
3640 	if (vf->state != VF_ENABLED ||
3641 	    bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
3642 	    BNX2X_Q_LOGICAL_STATE_ACTIVE)
3643 		return rc;
3644 
3645 	/* configure the vlan in device on this vf's queue */
3646 	vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3647 	rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3648 	if (rc)
3649 		return rc;
3650 
3651 	/* must lock vfpf channel to protect against vf flows */
3652 	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3653 
3654 	/* remove existing vlans */
3655 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3656 	rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3657 				  &ramrod_flags);
3658 	if (rc) {
3659 		BNX2X_ERR("failed to delete vlans\n");
3660 		rc = -EINVAL;
3661 		goto out;
3662 	}
3663 
3664 	/* need to remove/add the VF's accept_any_vlan bit */
3665 	accept_flags = bnx2x_leading_vfq(vf, accept_flags);
3666 	if (vlan)
3667 		clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3668 	else
3669 		set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3670 
3671 	bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
3672 			      accept_flags);
3673 	bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
3674 	bnx2x_config_rx_mode(bp, &rx_ramrod);
3675 
3676 	/* configure the new vlan to device */
3677 	memset(&ramrod_param, 0, sizeof(ramrod_param));
3678 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3679 	ramrod_param.vlan_mac_obj = vlan_obj;
3680 	ramrod_param.ramrod_flags = ramrod_flags;
3681 	set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
3682 		&ramrod_param.user_req.vlan_mac_flags);
3683 	ramrod_param.user_req.u.vlan.vlan = vlan;
3684 	ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3685 	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3686 	if (rc) {
3687 		BNX2X_ERR("failed to configure vlan\n");
3688 		rc =  -EINVAL;
3689 		goto out;
3690 	}
3691 
3692 	/* send queue update ramrod to configure default vlan and silent
3693 	 * vlan removal
3694 	 */
3695 	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3696 	q_params.cmd = BNX2X_Q_CMD_UPDATE;
3697 	q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3698 	update_params = &q_params.params.update;
3699 	__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3700 		  &update_params->update_flags);
3701 	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3702 		  &update_params->update_flags);
3703 	if (vlan == 0) {
3704 		/* if vlan is 0 then we want to leave the VF traffic
3705 		 * untagged, and leave the incoming traffic untouched
3706 		 * (i.e. do not remove any vlan tags).
3707 		 */
3708 		__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3709 			    &update_params->update_flags);
3710 		__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3711 			    &update_params->update_flags);
3712 	} else {
3713 		/* configure default vlan to vf queue and set silent
3714 		 * vlan removal (the vf remains unaware of this vlan).
3715 		 */
3716 		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3717 			  &update_params->update_flags);
3718 		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3719 			  &update_params->update_flags);
3720 		update_params->def_vlan = vlan;
3721 		update_params->silent_removal_value =
3722 			vlan & VLAN_VID_MASK;
3723 		update_params->silent_removal_mask = VLAN_VID_MASK;
3724 	}
3725 
3726 	/* Update the Queue state */
3727 	rc = bnx2x_queue_state_change(bp, &q_params);
3728 	if (rc) {
3729 		BNX2X_ERR("Failed to configure default VLAN\n");
3730 		goto out;
3731 	}
3732 
3733 
3734 	/* clear the flag indicating that this VF needs its vlan
3735 	 * (will only be set if the HV configured the Vlan before vf was
3736 	 * up and we were called because the VF came up later
3737 	 */
3738 out:
3739 	vf->cfg_flags &= ~VF_CFG_VLAN;
3740 	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3741 
3742 	return rc;
3743 }
3744 
3745 /* crc is the first field in the bulletin board. Compute the crc over the
3746  * entire bulletin board excluding the crc field itself. Use the length field
3747  * as the Bulletin Board was posted by a PF with possibly a different version
3748  * from the vf which will sample it. Therefore, the length is computed by the
3749  * PF and the used blindly by the VF.
3750  */
3751 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
3752 			  struct pf_vf_bulletin_content *bulletin)
3753 {
3754 	return crc32(BULLETIN_CRC_SEED,
3755 		 ((u8 *)bulletin) + sizeof(bulletin->crc),
3756 		 bulletin->length - sizeof(bulletin->crc));
3757 }
3758 
3759 /* Check for new posts on the bulletin board */
3760 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3761 {
3762 	struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
3763 	int attempts;
3764 
3765 	/* bulletin board hasn't changed since last sample */
3766 	if (bp->old_bulletin.version == bulletin.version)
3767 		return PFVF_BULLETIN_UNCHANGED;
3768 
3769 	/* validate crc of new bulletin board */
3770 	if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
3771 		/* sampling structure in mid post may result with corrupted data
3772 		 * validate crc to ensure coherency.
3773 		 */
3774 		for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
3775 			bulletin = bp->pf2vf_bulletin->content;
3776 			if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
3777 								  &bulletin))
3778 				break;
3779 			BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3780 				  bulletin.crc,
3781 				  bnx2x_crc_vf_bulletin(bp, &bulletin));
3782 		}
3783 		if (attempts >= BULLETIN_ATTEMPTS) {
3784 			BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3785 				  attempts);
3786 			return PFVF_BULLETIN_CRC_ERR;
3787 		}
3788 	}
3789 
3790 	/* the mac address in bulletin board is valid and is new */
3791 	if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
3792 	    !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {
3793 		/* update new mac to net device */
3794 		memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
3795 	}
3796 
3797 	/* the vlan in bulletin board is valid and is new */
3798 	if (bulletin.valid_bitmap & 1 << VLAN_VALID)
3799 		memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
3800 
3801 	/* copy new bulletin board to bp */
3802 	bp->old_bulletin = bulletin;
3803 
3804 	return PFVF_BULLETIN_UPDATED;
3805 }
3806 
3807 void bnx2x_timer_sriov(struct bnx2x *bp)
3808 {
3809 	bnx2x_sample_bulletin(bp);
3810 
3811 	/* if channel is down we need to self destruct */
3812 	if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
3813 		smp_mb__before_clear_bit();
3814 		set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3815 			&bp->sp_rtnl_state);
3816 		smp_mb__after_clear_bit();
3817 		schedule_delayed_work(&bp->sp_rtnl_task, 0);
3818 	}
3819 }
3820 
3821 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3822 {
3823 	/* vf doorbells are embedded within the regview */
3824 	return bp->regview + PXP_VF_ADDR_DB_START;
3825 }
3826 
3827 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3828 {
3829 	mutex_init(&bp->vf2pf_mutex);
3830 
3831 	/* allocate vf2pf mailbox for vf to pf channel */
3832 	BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
3833 			sizeof(struct bnx2x_vf_mbx_msg));
3834 
3835 	/* allocate pf 2 vf bulletin board */
3836 	BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
3837 			sizeof(union pf_vf_bulletin));
3838 
3839 	return 0;
3840 
3841 alloc_mem_err:
3842 	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3843 		       sizeof(struct bnx2x_vf_mbx_msg));
3844 	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3845 		       sizeof(union pf_vf_bulletin));
3846 	return -ENOMEM;
3847 }
3848 
3849 void bnx2x_iov_channel_down(struct bnx2x *bp)
3850 {
3851 	int vf_idx;
3852 	struct pf_vf_bulletin_content *bulletin;
3853 
3854 	if (!IS_SRIOV(bp))
3855 		return;
3856 
3857 	for_each_vf(bp, vf_idx) {
3858 		/* locate this VFs bulletin board and update the channel down
3859 		 * bit
3860 		 */
3861 		bulletin = BP_VF_BULLETIN(bp, vf_idx);
3862 		bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3863 
3864 		/* update vf bulletin board */
3865 		bnx2x_post_vf_bulletin(bp, vf_idx);
3866 	}
3867 }
3868