1 /* bnx2x_sriov.c: Broadcom Everest network driver.
2  *
3  * Copyright 2009-2013 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17  *	       Ariel Elior <ariele@broadcom.com>
18  *
19  */
20 #include "bnx2x.h"
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
23 #include "bnx2x_sp.h"
24 #include <linux/crc32.h>
25 #include <linux/if_vlan.h>
26 
27 /* General service functions */
28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
29 					 u16 pf_id)
30 {
31 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
32 		pf_id);
33 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
34 		pf_id);
35 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
36 		pf_id);
37 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
38 		pf_id);
39 }
40 
41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
42 					u8 enable)
43 {
44 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
45 		enable);
46 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
47 		enable);
48 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
49 		enable);
50 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
51 		enable);
52 }
53 
54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
55 {
56 	int idx;
57 
58 	for_each_vf(bp, idx)
59 		if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
60 			break;
61 	return idx;
62 }
63 
64 static
65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
66 {
67 	u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
68 	return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
69 }
70 
71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
72 				u8 igu_sb_id, u8 segment, u16 index, u8 op,
73 				u8 update)
74 {
75 	/* acking a VF sb through the PF - use the GRC */
76 	u32 ctl;
77 	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
78 	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
79 	u32 func_encode = vf->abs_vfid;
80 	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
81 	struct igu_regular cmd_data = {0};
82 
83 	cmd_data.sb_id_and_flags =
84 			((index << IGU_REGULAR_SB_INDEX_SHIFT) |
85 			 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
86 			 (update << IGU_REGULAR_BUPDATE_SHIFT) |
87 			 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
88 
89 	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
90 	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
91 	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
92 
93 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
94 	   cmd_data.sb_id_and_flags, igu_addr_data);
95 	REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
96 	mmiowb();
97 	barrier();
98 
99 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
100 	   ctl, igu_addr_ctl);
101 	REG_WR(bp, igu_addr_ctl, ctl);
102 	mmiowb();
103 	barrier();
104 }
105 
106 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
107 				       struct bnx2x_virtf *vf,
108 				       bool print_err)
109 {
110 	if (!bnx2x_leading_vfq(vf, sp_initialized)) {
111 		if (print_err)
112 			BNX2X_ERR("Slowpath objects not yet initialized!\n");
113 		else
114 			DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
115 		return false;
116 	}
117 	return true;
118 }
119 
120 /* VFOP - VF slow-path operation support */
121 
122 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX		0x10000
123 
124 /* VFOP operations states */
125 enum bnx2x_vfop_qctor_state {
126 	   BNX2X_VFOP_QCTOR_INIT,
127 	   BNX2X_VFOP_QCTOR_SETUP,
128 	   BNX2X_VFOP_QCTOR_INT_EN
129 };
130 
131 enum bnx2x_vfop_qdtor_state {
132 	   BNX2X_VFOP_QDTOR_HALT,
133 	   BNX2X_VFOP_QDTOR_TERMINATE,
134 	   BNX2X_VFOP_QDTOR_CFCDEL,
135 	   BNX2X_VFOP_QDTOR_DONE
136 };
137 
138 enum bnx2x_vfop_vlan_mac_state {
139 	   BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
140 	   BNX2X_VFOP_VLAN_MAC_CLEAR,
141 	   BNX2X_VFOP_VLAN_MAC_CHK_DONE,
142 	   BNX2X_VFOP_MAC_CONFIG_LIST,
143 	   BNX2X_VFOP_VLAN_CONFIG_LIST,
144 	   BNX2X_VFOP_VLAN_CONFIG_LIST_0
145 };
146 
147 enum bnx2x_vfop_qsetup_state {
148 	   BNX2X_VFOP_QSETUP_CTOR,
149 	   BNX2X_VFOP_QSETUP_VLAN0,
150 	   BNX2X_VFOP_QSETUP_DONE
151 };
152 
153 enum bnx2x_vfop_mcast_state {
154 	   BNX2X_VFOP_MCAST_DEL,
155 	   BNX2X_VFOP_MCAST_ADD,
156 	   BNX2X_VFOP_MCAST_CHK_DONE
157 };
158 enum bnx2x_vfop_qflr_state {
159 	   BNX2X_VFOP_QFLR_CLR_VLAN,
160 	   BNX2X_VFOP_QFLR_CLR_MAC,
161 	   BNX2X_VFOP_QFLR_TERMINATE,
162 	   BNX2X_VFOP_QFLR_DONE
163 };
164 
165 enum bnx2x_vfop_flr_state {
166 	   BNX2X_VFOP_FLR_QUEUES,
167 	   BNX2X_VFOP_FLR_HW
168 };
169 
170 enum bnx2x_vfop_close_state {
171 	   BNX2X_VFOP_CLOSE_QUEUES,
172 	   BNX2X_VFOP_CLOSE_HW
173 };
174 
175 enum bnx2x_vfop_rxmode_state {
176 	   BNX2X_VFOP_RXMODE_CONFIG,
177 	   BNX2X_VFOP_RXMODE_DONE
178 };
179 
180 enum bnx2x_vfop_qteardown_state {
181 	   BNX2X_VFOP_QTEARDOWN_RXMODE,
182 	   BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
183 	   BNX2X_VFOP_QTEARDOWN_CLR_MAC,
184 	   BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
185 	   BNX2X_VFOP_QTEARDOWN_QDTOR,
186 	   BNX2X_VFOP_QTEARDOWN_DONE
187 };
188 
189 enum bnx2x_vfop_rss_state {
190 	   BNX2X_VFOP_RSS_CONFIG,
191 	   BNX2X_VFOP_RSS_DONE
192 };
193 
194 enum bnx2x_vfop_tpa_state {
195 	   BNX2X_VFOP_TPA_CONFIG,
196 	   BNX2X_VFOP_TPA_DONE
197 };
198 
199 #define bnx2x_vfop_reset_wq(vf)	atomic_set(&vf->op_in_progress, 0)
200 
201 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
202 			      struct bnx2x_queue_init_params *init_params,
203 			      struct bnx2x_queue_setup_params *setup_params,
204 			      u16 q_idx, u16 sb_idx)
205 {
206 	DP(BNX2X_MSG_IOV,
207 	   "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
208 	   vf->abs_vfid,
209 	   q_idx,
210 	   sb_idx,
211 	   init_params->tx.sb_cq_index,
212 	   init_params->tx.hc_rate,
213 	   setup_params->flags,
214 	   setup_params->txq_params.traffic_type);
215 }
216 
217 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
218 			    struct bnx2x_queue_init_params *init_params,
219 			    struct bnx2x_queue_setup_params *setup_params,
220 			    u16 q_idx, u16 sb_idx)
221 {
222 	struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
223 
224 	DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
225 	   "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
226 	   vf->abs_vfid,
227 	   q_idx,
228 	   sb_idx,
229 	   init_params->rx.sb_cq_index,
230 	   init_params->rx.hc_rate,
231 	   setup_params->gen_params.mtu,
232 	   rxq_params->buf_sz,
233 	   rxq_params->sge_buf_sz,
234 	   rxq_params->max_sges_pkt,
235 	   rxq_params->tpa_agg_sz,
236 	   setup_params->flags,
237 	   rxq_params->drop_flags,
238 	   rxq_params->cache_line_log);
239 }
240 
241 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
242 			   struct bnx2x_virtf *vf,
243 			   struct bnx2x_vf_queue *q,
244 			   struct bnx2x_vfop_qctor_params *p,
245 			   unsigned long q_type)
246 {
247 	struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
248 	struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
249 
250 	/* INIT */
251 
252 	/* Enable host coalescing in the transition to INIT state */
253 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
254 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
255 
256 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
257 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
258 
259 	/* FW SB ID */
260 	init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
261 	init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
262 
263 	/* context */
264 	init_p->cxts[0] = q->cxt;
265 
266 	/* SETUP */
267 
268 	/* Setup-op general parameters */
269 	setup_p->gen_params.spcl_id = vf->sp_cl_id;
270 	setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
271 
272 	/* Setup-op pause params:
273 	 * Nothing to do, the pause thresholds are set by default to 0 which
274 	 * effectively turns off the feature for this queue. We don't want
275 	 * one queue (VF) to interfering with another queue (another VF)
276 	 */
277 	if (vf->cfg_flags & VF_CFG_FW_FC)
278 		BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
279 			  vf->abs_vfid);
280 	/* Setup-op flags:
281 	 * collect statistics, zero statistics, local-switching, security,
282 	 * OV for Flex10, RSS and MCAST for leading
283 	 */
284 	if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
285 		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
286 
287 	/* for VFs, enable tx switching, bd coherency, and mac address
288 	 * anti-spoofing
289 	 */
290 	__set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
291 	__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
292 	__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
293 
294 	/* Setup-op rx parameters */
295 	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
296 		struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
297 
298 		rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
299 		rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
300 		rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
301 
302 		if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
303 			rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
304 	}
305 
306 	/* Setup-op tx parameters */
307 	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
308 		setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
309 		setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
310 	}
311 }
312 
313 /* VFOP queue construction */
314 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
315 {
316 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
317 	struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
318 	struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
319 	enum bnx2x_vfop_qctor_state state = vfop->state;
320 
321 	bnx2x_vfop_reset_wq(vf);
322 
323 	if (vfop->rc < 0)
324 		goto op_err;
325 
326 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
327 
328 	switch (state) {
329 	case BNX2X_VFOP_QCTOR_INIT:
330 
331 		/* has this queue already been opened? */
332 		if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
333 		    BNX2X_Q_LOGICAL_STATE_ACTIVE) {
334 			DP(BNX2X_MSG_IOV,
335 			   "Entered qctor but queue was already up. Aborting gracefully\n");
336 			goto op_done;
337 		}
338 
339 		/* next state */
340 		vfop->state = BNX2X_VFOP_QCTOR_SETUP;
341 
342 		q_params->cmd = BNX2X_Q_CMD_INIT;
343 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
344 
345 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
346 
347 	case BNX2X_VFOP_QCTOR_SETUP:
348 		/* next state */
349 		vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
350 
351 		/* copy pre-prepared setup params to the queue-state params */
352 		vfop->op_p->qctor.qstate.params.setup =
353 			vfop->op_p->qctor.prep_qsetup;
354 
355 		q_params->cmd = BNX2X_Q_CMD_SETUP;
356 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
357 
358 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
359 
360 	case BNX2X_VFOP_QCTOR_INT_EN:
361 
362 		/* enable interrupts */
363 		bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
364 				    USTORM_ID, 0, IGU_INT_ENABLE, 0);
365 		goto op_done;
366 	default:
367 		bnx2x_vfop_default(state);
368 	}
369 op_err:
370 	BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
371 		  vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
372 op_done:
373 	bnx2x_vfop_end(bp, vf, vfop);
374 op_pending:
375 	return;
376 }
377 
378 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
379 				struct bnx2x_virtf *vf,
380 				struct bnx2x_vfop_cmd *cmd,
381 				int qid)
382 {
383 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
384 
385 	if (vfop) {
386 		vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
387 
388 		vfop->args.qctor.qid = qid;
389 		vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
390 
391 		bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
392 				 bnx2x_vfop_qctor, cmd->done);
393 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
394 					     cmd->block);
395 	}
396 	return -ENOMEM;
397 }
398 
399 /* VFOP queue destruction */
400 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
401 {
402 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
403 	struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
404 	struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
405 	enum bnx2x_vfop_qdtor_state state = vfop->state;
406 
407 	bnx2x_vfop_reset_wq(vf);
408 
409 	if (vfop->rc < 0)
410 		goto op_err;
411 
412 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
413 
414 	switch (state) {
415 	case BNX2X_VFOP_QDTOR_HALT:
416 
417 		/* has this queue already been stopped? */
418 		if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
419 		    BNX2X_Q_LOGICAL_STATE_STOPPED) {
420 			DP(BNX2X_MSG_IOV,
421 			   "Entered qdtor but queue was already stopped. Aborting gracefully\n");
422 
423 			/* next state */
424 			vfop->state = BNX2X_VFOP_QDTOR_DONE;
425 
426 			bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
427 		}
428 
429 		/* next state */
430 		vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
431 
432 		q_params->cmd = BNX2X_Q_CMD_HALT;
433 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
434 
435 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
436 
437 	case BNX2X_VFOP_QDTOR_TERMINATE:
438 		/* next state */
439 		vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
440 
441 		q_params->cmd = BNX2X_Q_CMD_TERMINATE;
442 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
443 
444 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
445 
446 	case BNX2X_VFOP_QDTOR_CFCDEL:
447 		/* next state */
448 		vfop->state = BNX2X_VFOP_QDTOR_DONE;
449 
450 		q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
451 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
452 
453 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
454 op_err:
455 	BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
456 		  vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
457 op_done:
458 	case BNX2X_VFOP_QDTOR_DONE:
459 		/* invalidate the context */
460 		if (qdtor->cxt) {
461 			qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
462 			qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
463 		}
464 		bnx2x_vfop_end(bp, vf, vfop);
465 		return;
466 	default:
467 		bnx2x_vfop_default(state);
468 	}
469 op_pending:
470 	return;
471 }
472 
473 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
474 				struct bnx2x_virtf *vf,
475 				struct bnx2x_vfop_cmd *cmd,
476 				int qid)
477 {
478 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
479 
480 	if (vfop) {
481 		struct bnx2x_queue_state_params *qstate =
482 			&vf->op_params.qctor.qstate;
483 
484 		memset(qstate, 0, sizeof(*qstate));
485 		qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
486 
487 		vfop->args.qdtor.qid = qid;
488 		vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
489 
490 		bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
491 				 bnx2x_vfop_qdtor, cmd->done);
492 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
493 					     cmd->block);
494 	} else {
495 		BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
496 		return -ENOMEM;
497 	}
498 }
499 
500 static void
501 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
502 {
503 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
504 	if (vf) {
505 		/* the first igu entry belonging to VFs of this PF */
506 		if (!BP_VFDB(bp)->first_vf_igu_entry)
507 			BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
508 
509 		/* the first igu entry belonging to this VF */
510 		if (!vf_sb_count(vf))
511 			vf->igu_base_id = igu_sb_id;
512 
513 		++vf_sb_count(vf);
514 		++vf->sb_count;
515 	}
516 	BP_VFDB(bp)->vf_sbs_pool++;
517 }
518 
519 /* VFOP MAC/VLAN helpers */
520 static inline void bnx2x_vfop_credit(struct bnx2x *bp,
521 				     struct bnx2x_vfop *vfop,
522 				     struct bnx2x_vlan_mac_obj *obj)
523 {
524 	struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
525 
526 	/* update credit only if there is no error
527 	 * and a valid credit counter
528 	 */
529 	if (!vfop->rc && args->credit) {
530 		struct list_head *pos;
531 		int read_lock;
532 		int cnt = 0;
533 
534 		read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
535 		if (read_lock)
536 			DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
537 
538 		list_for_each(pos, &obj->head)
539 			cnt++;
540 
541 		if (!read_lock)
542 			bnx2x_vlan_mac_h_read_unlock(bp, obj);
543 
544 		atomic_set(args->credit, cnt);
545 	}
546 }
547 
548 static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
549 				    struct bnx2x_vfop_filter *pos,
550 				    struct bnx2x_vlan_mac_data *user_req)
551 {
552 	user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
553 		BNX2X_VLAN_MAC_DEL;
554 
555 	switch (pos->type) {
556 	case BNX2X_VFOP_FILTER_MAC:
557 		memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
558 		break;
559 	case BNX2X_VFOP_FILTER_VLAN:
560 		user_req->u.vlan.vlan = pos->vid;
561 		break;
562 	default:
563 		BNX2X_ERR("Invalid filter type, skipping\n");
564 		return 1;
565 	}
566 	return 0;
567 }
568 
569 static int bnx2x_vfop_config_list(struct bnx2x *bp,
570 				  struct bnx2x_vfop_filters *filters,
571 				  struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
572 {
573 	struct bnx2x_vfop_filter *pos, *tmp;
574 	struct list_head rollback_list, *filters_list = &filters->head;
575 	struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
576 	int rc = 0, cnt = 0;
577 
578 	INIT_LIST_HEAD(&rollback_list);
579 
580 	list_for_each_entry_safe(pos, tmp, filters_list, link) {
581 		if (bnx2x_vfop_set_user_req(bp, pos, user_req))
582 			continue;
583 
584 		rc = bnx2x_config_vlan_mac(bp, vlan_mac);
585 		if (rc >= 0) {
586 			cnt += pos->add ? 1 : -1;
587 			list_move(&pos->link, &rollback_list);
588 			rc = 0;
589 		} else if (rc == -EEXIST) {
590 			rc = 0;
591 		} else {
592 			BNX2X_ERR("Failed to add a new vlan_mac command\n");
593 			break;
594 		}
595 	}
596 
597 	/* rollback if error or too many rules added */
598 	if (rc || cnt > filters->add_cnt) {
599 		BNX2X_ERR("error or too many rules added. Performing rollback\n");
600 		list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
601 			pos->add = !pos->add;	/* reverse op */
602 			bnx2x_vfop_set_user_req(bp, pos, user_req);
603 			bnx2x_config_vlan_mac(bp, vlan_mac);
604 			list_del(&pos->link);
605 		}
606 		cnt = 0;
607 		if (!rc)
608 			rc = -EINVAL;
609 	}
610 	filters->add_cnt = cnt;
611 	return rc;
612 }
613 
614 /* VFOP set VLAN/MAC */
615 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
616 {
617 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
618 	struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
619 	struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
620 	struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
621 
622 	enum bnx2x_vfop_vlan_mac_state state = vfop->state;
623 
624 	if (vfop->rc < 0)
625 		goto op_err;
626 
627 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
628 
629 	bnx2x_vfop_reset_wq(vf);
630 
631 	switch (state) {
632 	case BNX2X_VFOP_VLAN_MAC_CLEAR:
633 		/* next state */
634 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
635 
636 		/* do delete */
637 		vfop->rc = obj->delete_all(bp, obj,
638 					   &vlan_mac->user_req.vlan_mac_flags,
639 					   &vlan_mac->ramrod_flags);
640 
641 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
642 
643 	case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
644 		/* next state */
645 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
646 
647 		/* do config */
648 		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
649 		if (vfop->rc == -EEXIST)
650 			vfop->rc = 0;
651 
652 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
653 
654 	case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
655 		vfop->rc = !!obj->raw.check_pending(&obj->raw);
656 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
657 
658 	case BNX2X_VFOP_MAC_CONFIG_LIST:
659 		/* next state */
660 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
661 
662 		/* do list config */
663 		vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
664 		if (vfop->rc)
665 			goto op_err;
666 
667 		set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
668 		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
669 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
670 
671 	case BNX2X_VFOP_VLAN_CONFIG_LIST:
672 		/* next state */
673 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
674 
675 		/* do list config */
676 		vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
677 		if (!vfop->rc) {
678 			set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
679 			vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
680 		}
681 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
682 
683 	default:
684 		bnx2x_vfop_default(state);
685 	}
686 op_err:
687 	BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
688 op_done:
689 	kfree(filters);
690 	bnx2x_vfop_credit(bp, vfop, obj);
691 	bnx2x_vfop_end(bp, vf, vfop);
692 op_pending:
693 	return;
694 }
695 
696 struct bnx2x_vfop_vlan_mac_flags {
697 	bool drv_only;
698 	bool dont_consume;
699 	bool single_cmd;
700 	bool add;
701 };
702 
703 static void
704 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
705 				struct bnx2x_vfop_vlan_mac_flags *flags)
706 {
707 	struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
708 
709 	memset(ramrod, 0, sizeof(*ramrod));
710 
711 	/* ramrod flags */
712 	if (flags->drv_only)
713 		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
714 	if (flags->single_cmd)
715 		set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
716 
717 	/* mac_vlan flags */
718 	if (flags->dont_consume)
719 		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
720 
721 	/* cmd */
722 	ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
723 }
724 
725 static inline void
726 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
727 			   struct bnx2x_vfop_vlan_mac_flags *flags)
728 {
729 	bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
730 	set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
731 }
732 
733 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
734 				     struct bnx2x_virtf *vf,
735 				     struct bnx2x_vfop_cmd *cmd,
736 				     int qid, bool drv_only)
737 {
738 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
739 
740 	if (vfop) {
741 		struct bnx2x_vfop_args_filters filters = {
742 			.multi_filter = NULL,	/* single */
743 			.credit = NULL,		/* consume credit */
744 		};
745 		struct bnx2x_vfop_vlan_mac_flags flags = {
746 			.drv_only = drv_only,
747 			.dont_consume = (filters.credit != NULL),
748 			.single_cmd = true,
749 			.add = false /* don't care */,
750 		};
751 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
752 			&vf->op_params.vlan_mac;
753 
754 		/* set ramrod params */
755 		bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
756 
757 		/* set object */
758 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
759 
760 		/* set extra args */
761 		vfop->args.filters = filters;
762 
763 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
764 				 bnx2x_vfop_vlan_mac, cmd->done);
765 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
766 					     cmd->block);
767 	}
768 	return -ENOMEM;
769 }
770 
771 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
772 			    struct bnx2x_virtf *vf,
773 			    struct bnx2x_vfop_cmd *cmd,
774 			    struct bnx2x_vfop_filters *macs,
775 			    int qid, bool drv_only)
776 {
777 	struct bnx2x_vfop *vfop;
778 
779 	if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
780 			return -EINVAL;
781 
782 	vfop  = bnx2x_vfop_add(bp, vf);
783 	if (vfop) {
784 		struct bnx2x_vfop_args_filters filters = {
785 			.multi_filter = macs,
786 			.credit = NULL,		/* consume credit */
787 		};
788 		struct bnx2x_vfop_vlan_mac_flags flags = {
789 			.drv_only = drv_only,
790 			.dont_consume = (filters.credit != NULL),
791 			.single_cmd = false,
792 			.add = false, /* don't care since only the items in the
793 				       * filters list affect the sp operation,
794 				       * not the list itself
795 				       */
796 		};
797 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
798 			&vf->op_params.vlan_mac;
799 
800 		/* set ramrod params */
801 		bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
802 
803 		/* set object */
804 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
805 
806 		/* set extra args */
807 		filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
808 		vfop->args.filters = filters;
809 
810 		bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
811 				 bnx2x_vfop_vlan_mac, cmd->done);
812 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
813 					     cmd->block);
814 	}
815 	return -ENOMEM;
816 }
817 
818 static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
819 				   struct bnx2x_virtf *vf,
820 				   struct bnx2x_vfop_cmd *cmd,
821 				   int qid, u16 vid, bool add)
822 {
823 	struct bnx2x_vfop *vfop;
824 
825 	if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
826 		return -EINVAL;
827 
828 	vfop  = bnx2x_vfop_add(bp, vf);
829 	if (vfop) {
830 		struct bnx2x_vfop_args_filters filters = {
831 			.multi_filter = NULL, /* single command */
832 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
833 		};
834 		struct bnx2x_vfop_vlan_mac_flags flags = {
835 			.drv_only = false,
836 			.dont_consume = (filters.credit != NULL),
837 			.single_cmd = true,
838 			.add = add,
839 		};
840 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
841 			&vf->op_params.vlan_mac;
842 
843 		/* set ramrod params */
844 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
845 		ramrod->user_req.u.vlan.vlan = vid;
846 
847 		/* set object */
848 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
849 
850 		/* set extra args */
851 		vfop->args.filters = filters;
852 
853 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
854 				 bnx2x_vfop_vlan_mac, cmd->done);
855 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
856 					     cmd->block);
857 	}
858 	return -ENOMEM;
859 }
860 
861 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
862 			       struct bnx2x_virtf *vf,
863 			       struct bnx2x_vfop_cmd *cmd,
864 			       int qid, bool drv_only)
865 {
866 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
867 
868 	if (vfop) {
869 		struct bnx2x_vfop_args_filters filters = {
870 			.multi_filter = NULL, /* single command */
871 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
872 		};
873 		struct bnx2x_vfop_vlan_mac_flags flags = {
874 			.drv_only = drv_only,
875 			.dont_consume = (filters.credit != NULL),
876 			.single_cmd = true,
877 			.add = false, /* don't care */
878 		};
879 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
880 			&vf->op_params.vlan_mac;
881 
882 		/* set ramrod params */
883 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
884 
885 		/* set object */
886 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
887 
888 		/* set extra args */
889 		vfop->args.filters = filters;
890 
891 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
892 				 bnx2x_vfop_vlan_mac, cmd->done);
893 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
894 					     cmd->block);
895 	}
896 	return -ENOMEM;
897 }
898 
899 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
900 			     struct bnx2x_virtf *vf,
901 			     struct bnx2x_vfop_cmd *cmd,
902 			     struct bnx2x_vfop_filters *vlans,
903 			     int qid, bool drv_only)
904 {
905 	struct bnx2x_vfop *vfop;
906 
907 	if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
908 		return -EINVAL;
909 
910 	vfop = bnx2x_vfop_add(bp, vf);
911 	if (vfop) {
912 		struct bnx2x_vfop_args_filters filters = {
913 			.multi_filter = vlans,
914 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
915 		};
916 		struct bnx2x_vfop_vlan_mac_flags flags = {
917 			.drv_only = drv_only,
918 			.dont_consume = (filters.credit != NULL),
919 			.single_cmd = false,
920 			.add = false, /* don't care */
921 		};
922 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
923 			&vf->op_params.vlan_mac;
924 
925 		/* set ramrod params */
926 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
927 
928 		/* set object */
929 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
930 
931 		/* set extra args */
932 		filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
933 			atomic_read(filters.credit);
934 
935 		vfop->args.filters = filters;
936 
937 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
938 				 bnx2x_vfop_vlan_mac, cmd->done);
939 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
940 					     cmd->block);
941 	}
942 	return -ENOMEM;
943 }
944 
945 /* VFOP queue setup (queue constructor + set vlan 0) */
946 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
947 {
948 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
949 	int qid = vfop->args.qctor.qid;
950 	enum bnx2x_vfop_qsetup_state state = vfop->state;
951 	struct bnx2x_vfop_cmd cmd = {
952 		.done = bnx2x_vfop_qsetup,
953 		.block = false,
954 	};
955 
956 	if (vfop->rc < 0)
957 		goto op_err;
958 
959 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
960 
961 	switch (state) {
962 	case BNX2X_VFOP_QSETUP_CTOR:
963 		/* init the queue ctor command */
964 		vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
965 		vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
966 		if (vfop->rc)
967 			goto op_err;
968 		return;
969 
970 	case BNX2X_VFOP_QSETUP_VLAN0:
971 		/* skip if non-leading or FPGA/EMU*/
972 		if (qid)
973 			goto op_done;
974 
975 		/* init the queue set-vlan command (for vlan 0) */
976 		vfop->state = BNX2X_VFOP_QSETUP_DONE;
977 		vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
978 		if (vfop->rc)
979 			goto op_err;
980 		return;
981 op_err:
982 	BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
983 op_done:
984 	case BNX2X_VFOP_QSETUP_DONE:
985 		vf->cfg_flags |= VF_CFG_VLAN;
986 		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
987 				       BNX2X_MSG_IOV);
988 		bnx2x_vfop_end(bp, vf, vfop);
989 		return;
990 	default:
991 		bnx2x_vfop_default(state);
992 	}
993 }
994 
995 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
996 			  struct bnx2x_virtf *vf,
997 			  struct bnx2x_vfop_cmd *cmd,
998 			  int qid)
999 {
1000 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1001 
1002 	if (vfop) {
1003 		vfop->args.qctor.qid = qid;
1004 
1005 		bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
1006 				 bnx2x_vfop_qsetup, cmd->done);
1007 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
1008 					     cmd->block);
1009 	}
1010 	return -ENOMEM;
1011 }
1012 
1013 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
1014 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1015 {
1016 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1017 	int qid = vfop->args.qx.qid;
1018 	enum bnx2x_vfop_qflr_state state = vfop->state;
1019 	struct bnx2x_queue_state_params *qstate;
1020 	struct bnx2x_vfop_cmd cmd;
1021 
1022 	bnx2x_vfop_reset_wq(vf);
1023 
1024 	if (vfop->rc < 0)
1025 		goto op_err;
1026 
1027 	DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
1028 
1029 	cmd.done = bnx2x_vfop_qflr;
1030 	cmd.block = false;
1031 
1032 	switch (state) {
1033 	case BNX2X_VFOP_QFLR_CLR_VLAN:
1034 		/* vlan-clear-all: driver-only, don't consume credit */
1035 		vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
1036 
1037 		/* the vlan_mac vfop will re-schedule us */
1038 		vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
1039 		if (vfop->rc)
1040 			goto op_err;
1041 		return;
1042 
1043 	case BNX2X_VFOP_QFLR_CLR_MAC:
1044 		/* mac-clear-all: driver only consume credit */
1045 		vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
1046 		/* the vlan_mac vfop will re-schedule us */
1047 		vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
1048 		if (vfop->rc)
1049 			goto op_err;
1050 		return;
1051 
1052 	case BNX2X_VFOP_QFLR_TERMINATE:
1053 		qstate = &vfop->op_p->qctor.qstate;
1054 		memset(qstate , 0, sizeof(*qstate));
1055 		qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
1056 		vfop->state = BNX2X_VFOP_QFLR_DONE;
1057 
1058 		DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
1059 		   vf->abs_vfid, qstate->q_obj->state);
1060 
1061 		if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
1062 			qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
1063 			qstate->cmd = BNX2X_Q_CMD_TERMINATE;
1064 			vfop->rc = bnx2x_queue_state_change(bp, qstate);
1065 			bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
1066 		} else {
1067 			goto op_done;
1068 		}
1069 
1070 op_err:
1071 	BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
1072 		  vf->abs_vfid, qid, vfop->rc);
1073 op_done:
1074 	case BNX2X_VFOP_QFLR_DONE:
1075 		bnx2x_vfop_end(bp, vf, vfop);
1076 		return;
1077 	default:
1078 		bnx2x_vfop_default(state);
1079 	}
1080 op_pending:
1081 	return;
1082 }
1083 
1084 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
1085 			       struct bnx2x_virtf *vf,
1086 			       struct bnx2x_vfop_cmd *cmd,
1087 			       int qid)
1088 {
1089 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1090 
1091 	if (vfop) {
1092 		vfop->args.qx.qid = qid;
1093 		if ((qid == LEADING_IDX) &&
1094 		    bnx2x_validate_vf_sp_objs(bp, vf, false))
1095 			bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
1096 					 bnx2x_vfop_qflr, cmd->done);
1097 		else
1098 			bnx2x_vfop_opset(BNX2X_VFOP_QFLR_TERMINATE,
1099 					 bnx2x_vfop_qflr, cmd->done);
1100 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
1101 					     cmd->block);
1102 	}
1103 	return -ENOMEM;
1104 }
1105 
1106 /* VFOP multi-casts */
1107 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
1108 {
1109 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1110 	struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
1111 	struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
1112 	struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
1113 	enum bnx2x_vfop_mcast_state state = vfop->state;
1114 	int i;
1115 
1116 	bnx2x_vfop_reset_wq(vf);
1117 
1118 	if (vfop->rc < 0)
1119 		goto op_err;
1120 
1121 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1122 
1123 	switch (state) {
1124 	case BNX2X_VFOP_MCAST_DEL:
1125 		/* clear existing mcasts */
1126 		vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
1127 					     : BNX2X_VFOP_MCAST_CHK_DONE;
1128 		mcast->mcast_list_len = vf->mcast_list_len;
1129 		vf->mcast_list_len = args->mc_num;
1130 		vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
1131 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1132 
1133 	case BNX2X_VFOP_MCAST_ADD:
1134 		if (raw->check_pending(raw))
1135 			goto op_pending;
1136 
1137 		/* update mcast list on the ramrod params */
1138 		INIT_LIST_HEAD(&mcast->mcast_list);
1139 		for (i = 0; i < args->mc_num; i++)
1140 			list_add_tail(&(args->mc[i].link),
1141 				      &mcast->mcast_list);
1142 		mcast->mcast_list_len = args->mc_num;
1143 
1144 		/* add new mcasts */
1145 		vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
1146 		vfop->rc = bnx2x_config_mcast(bp, mcast,
1147 					      BNX2X_MCAST_CMD_ADD);
1148 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1149 
1150 	case BNX2X_VFOP_MCAST_CHK_DONE:
1151 		vfop->rc = raw->check_pending(raw) ? 1 : 0;
1152 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1153 	default:
1154 		bnx2x_vfop_default(state);
1155 	}
1156 op_err:
1157 	BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
1158 op_done:
1159 	kfree(args->mc);
1160 	bnx2x_vfop_end(bp, vf, vfop);
1161 op_pending:
1162 	return;
1163 }
1164 
1165 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
1166 			 struct bnx2x_virtf *vf,
1167 			 struct bnx2x_vfop_cmd *cmd,
1168 			 bnx2x_mac_addr_t *mcasts,
1169 			 int mcast_num, bool drv_only)
1170 {
1171 	struct bnx2x_vfop *vfop = NULL;
1172 	size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
1173 	struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
1174 					   NULL;
1175 
1176 	if (!mc_sz || mc) {
1177 		vfop = bnx2x_vfop_add(bp, vf);
1178 		if (vfop) {
1179 			int i;
1180 			struct bnx2x_mcast_ramrod_params *ramrod =
1181 				&vf->op_params.mcast;
1182 
1183 			/* set ramrod params */
1184 			memset(ramrod, 0, sizeof(*ramrod));
1185 			ramrod->mcast_obj = &vf->mcast_obj;
1186 			if (drv_only)
1187 				set_bit(RAMROD_DRV_CLR_ONLY,
1188 					&ramrod->ramrod_flags);
1189 
1190 			/* copy mcasts pointers */
1191 			vfop->args.mc_list.mc_num = mcast_num;
1192 			vfop->args.mc_list.mc = mc;
1193 			for (i = 0; i < mcast_num; i++)
1194 				mc[i].mac = mcasts[i];
1195 
1196 			bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
1197 					 bnx2x_vfop_mcast, cmd->done);
1198 			return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
1199 						     cmd->block);
1200 		} else {
1201 			kfree(mc);
1202 		}
1203 	}
1204 	return -ENOMEM;
1205 }
1206 
1207 /* VFOP rx-mode */
1208 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1209 {
1210 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1211 	struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
1212 	enum bnx2x_vfop_rxmode_state state = vfop->state;
1213 
1214 	bnx2x_vfop_reset_wq(vf);
1215 
1216 	if (vfop->rc < 0)
1217 		goto op_err;
1218 
1219 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1220 
1221 	switch (state) {
1222 	case BNX2X_VFOP_RXMODE_CONFIG:
1223 		/* next state */
1224 		vfop->state = BNX2X_VFOP_RXMODE_DONE;
1225 
1226 		/* record the accept flags in vfdb so hypervisor can modify them
1227 		 * if necessary
1228 		 */
1229 		bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
1230 			ramrod->rx_accept_flags;
1231 		vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1232 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1233 op_err:
1234 		BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
1235 op_done:
1236 	case BNX2X_VFOP_RXMODE_DONE:
1237 		bnx2x_vfop_end(bp, vf, vfop);
1238 		return;
1239 	default:
1240 		bnx2x_vfop_default(state);
1241 	}
1242 op_pending:
1243 	return;
1244 }
1245 
1246 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
1247 				  struct bnx2x_rx_mode_ramrod_params *ramrod,
1248 				  struct bnx2x_virtf *vf,
1249 				  unsigned long accept_flags)
1250 {
1251 	struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1252 
1253 	memset(ramrod, 0, sizeof(*ramrod));
1254 	ramrod->cid = vfq->cid;
1255 	ramrod->cl_id = vfq_cl_id(vf, vfq);
1256 	ramrod->rx_mode_obj = &bp->rx_mode_obj;
1257 	ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1258 	ramrod->rx_accept_flags = accept_flags;
1259 	ramrod->tx_accept_flags = accept_flags;
1260 	ramrod->pstate = &vf->filter_state;
1261 	ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1262 
1263 	set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1264 	set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1265 	set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1266 
1267 	ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1268 	ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1269 }
1270 
1271 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1272 			  struct bnx2x_virtf *vf,
1273 			  struct bnx2x_vfop_cmd *cmd,
1274 			  int qid, unsigned long accept_flags)
1275 {
1276 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1277 
1278 	if (vfop) {
1279 		struct bnx2x_rx_mode_ramrod_params *ramrod =
1280 			&vf->op_params.rx_mode;
1281 
1282 		bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
1283 
1284 		bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1285 				 bnx2x_vfop_rxmode, cmd->done);
1286 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
1287 					     cmd->block);
1288 	}
1289 	return -ENOMEM;
1290 }
1291 
1292 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
1293  * queue destructor)
1294  */
1295 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
1296 {
1297 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1298 	int qid = vfop->args.qx.qid;
1299 	enum bnx2x_vfop_qteardown_state state = vfop->state;
1300 	struct bnx2x_vfop_cmd cmd;
1301 
1302 	if (vfop->rc < 0)
1303 		goto op_err;
1304 
1305 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1306 
1307 	cmd.done = bnx2x_vfop_qdown;
1308 	cmd.block = false;
1309 
1310 	switch (state) {
1311 	case BNX2X_VFOP_QTEARDOWN_RXMODE:
1312 		/* Drop all */
1313 		if (bnx2x_validate_vf_sp_objs(bp, vf, true))
1314 			vfop->state =  BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
1315 		else
1316 			vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1317 		vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
1318 		if (vfop->rc)
1319 			goto op_err;
1320 		return;
1321 
1322 	case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
1323 		/* vlan-clear-all: don't consume credit */
1324 		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
1325 		vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
1326 		if (vfop->rc)
1327 			goto op_err;
1328 		return;
1329 
1330 	case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
1331 		/* mac-clear-all: consume credit */
1332 		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
1333 		vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
1334 		if (vfop->rc)
1335 			goto op_err;
1336 		return;
1337 
1338 	case BNX2X_VFOP_QTEARDOWN_CLR_MCAST:
1339 		vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1340 		vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
1341 		if (vfop->rc)
1342 			goto op_err;
1343 		return;
1344 
1345 	case BNX2X_VFOP_QTEARDOWN_QDTOR:
1346 		/* run the queue destruction flow */
1347 		DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
1348 		vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
1349 		DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
1350 		vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
1351 		DP(BNX2X_MSG_IOV, "returned from cmd\n");
1352 		if (vfop->rc)
1353 			goto op_err;
1354 		return;
1355 op_err:
1356 	BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
1357 		  vf->abs_vfid, qid, vfop->rc);
1358 
1359 	case BNX2X_VFOP_QTEARDOWN_DONE:
1360 		bnx2x_vfop_end(bp, vf, vfop);
1361 		return;
1362 	default:
1363 		bnx2x_vfop_default(state);
1364 	}
1365 }
1366 
1367 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1368 			 struct bnx2x_virtf *vf,
1369 			 struct bnx2x_vfop_cmd *cmd,
1370 			 int qid)
1371 {
1372 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1373 
1374 	/* for non leading queues skip directly to qdown sate */
1375 	if (vfop) {
1376 		vfop->args.qx.qid = qid;
1377 		bnx2x_vfop_opset(qid == LEADING_IDX ?
1378 				 BNX2X_VFOP_QTEARDOWN_RXMODE :
1379 				 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
1380 				 cmd->done);
1381 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1382 					     cmd->block);
1383 	}
1384 
1385 	return -ENOMEM;
1386 }
1387 
1388 /* VF enable primitives
1389  * when pretend is required the caller is responsible
1390  * for calling pretend prior to calling these routines
1391  */
1392 
1393 /* internal vf enable - until vf is enabled internally all transactions
1394  * are blocked. This routine should always be called last with pretend.
1395  */
1396 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
1397 {
1398 	REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
1399 }
1400 
1401 /* clears vf error in all semi blocks */
1402 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
1403 {
1404 	REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
1405 	REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
1406 	REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
1407 	REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
1408 }
1409 
1410 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
1411 {
1412 	u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
1413 	u32 was_err_reg = 0;
1414 
1415 	switch (was_err_group) {
1416 	case 0:
1417 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
1418 	    break;
1419 	case 1:
1420 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
1421 	    break;
1422 	case 2:
1423 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
1424 	    break;
1425 	case 3:
1426 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
1427 	    break;
1428 	}
1429 	REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
1430 }
1431 
1432 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
1433 {
1434 	int i;
1435 	u32 val;
1436 
1437 	/* Set VF masks and configuration - pretend */
1438 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1439 
1440 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
1441 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
1442 	REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
1443 	REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
1444 	REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
1445 	REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
1446 
1447 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1448 	val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
1449 	if (vf->cfg_flags & VF_CFG_INT_SIMD)
1450 		val |= IGU_VF_CONF_SINGLE_ISR_EN;
1451 	val &= ~IGU_VF_CONF_PARENT_MASK;
1452 	val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
1453 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1454 
1455 	DP(BNX2X_MSG_IOV,
1456 	   "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
1457 	   vf->abs_vfid, val);
1458 
1459 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1460 
1461 	/* iterate over all queues, clear sb consumer */
1462 	for (i = 0; i < vf_sb_count(vf); i++) {
1463 		u8 igu_sb_id = vf_igu_sb(vf, i);
1464 
1465 		/* zero prod memory */
1466 		REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
1467 
1468 		/* clear sb state machine */
1469 		bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
1470 				       false /* VF */);
1471 
1472 		/* disable + update */
1473 		bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
1474 				    IGU_INT_DISABLE, 1);
1475 	}
1476 }
1477 
1478 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
1479 {
1480 	/* set the VF-PF association in the FW */
1481 	storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
1482 	storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
1483 
1484 	/* clear vf errors*/
1485 	bnx2x_vf_semi_clear_err(bp, abs_vfid);
1486 	bnx2x_vf_pglue_clear_err(bp, abs_vfid);
1487 
1488 	/* internal vf-enable - pretend */
1489 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
1490 	DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
1491 	bnx2x_vf_enable_internal(bp, true);
1492 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1493 }
1494 
1495 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
1496 {
1497 	/* Reset vf in IGU  interrupts are still disabled */
1498 	bnx2x_vf_igu_reset(bp, vf);
1499 
1500 	/* pretend to enable the vf with the PBF */
1501 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1502 	REG_WR(bp, PBF_REG_DISABLE_VF, 0);
1503 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1504 }
1505 
1506 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
1507 {
1508 	struct pci_dev *dev;
1509 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1510 
1511 	if (!vf)
1512 		return false;
1513 
1514 	dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
1515 	if (dev)
1516 		return bnx2x_is_pcie_pending(dev);
1517 	return false;
1518 }
1519 
1520 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1521 {
1522 	/* Verify no pending pci transactions */
1523 	if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
1524 		BNX2X_ERR("PCIE Transactions still pending\n");
1525 
1526 	return 0;
1527 }
1528 
1529 /* must be called after the number of PF queues and the number of VFs are
1530  * both known
1531  */
1532 static void
1533 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1534 {
1535 	struct vf_pf_resc_request *resc = &vf->alloc_resc;
1536 	u16 vlan_count = 0;
1537 
1538 	/* will be set only during VF-ACQUIRE */
1539 	resc->num_rxqs = 0;
1540 	resc->num_txqs = 0;
1541 
1542 	/* no credit calculations for macs (just yet) */
1543 	resc->num_mac_filters = 1;
1544 
1545 	/* divvy up vlan rules */
1546 	vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
1547 	vlan_count = 1 << ilog2(vlan_count);
1548 	resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
1549 
1550 	/* no real limitation */
1551 	resc->num_mc_filters = 0;
1552 
1553 	/* num_sbs already set */
1554 	resc->num_sbs = vf->sb_count;
1555 }
1556 
1557 /* FLR routines: */
1558 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1559 {
1560 	/* reset the state variables */
1561 	bnx2x_iov_static_resc(bp, vf);
1562 	vf->state = VF_FREE;
1563 }
1564 
1565 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
1566 {
1567 	u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1568 
1569 	/* DQ usage counter */
1570 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1571 	bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
1572 					"DQ VF usage counter timed out",
1573 					poll_cnt);
1574 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1575 
1576 	/* FW cleanup command - poll for the results */
1577 	if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
1578 				   poll_cnt))
1579 		BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
1580 
1581 	/* verify TX hw is flushed */
1582 	bnx2x_tx_hw_flushed(bp, poll_cnt);
1583 }
1584 
1585 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1586 {
1587 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1588 	struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
1589 	enum bnx2x_vfop_flr_state state = vfop->state;
1590 	struct bnx2x_vfop_cmd cmd = {
1591 		.done = bnx2x_vfop_flr,
1592 		.block = false,
1593 	};
1594 
1595 	if (vfop->rc < 0)
1596 		goto op_err;
1597 
1598 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1599 
1600 	switch (state) {
1601 	case BNX2X_VFOP_FLR_QUEUES:
1602 		/* the cleanup operations are valid if and only if the VF
1603 		 * was first acquired.
1604 		 */
1605 		if (++(qx->qid) < vf_rxq_count(vf)) {
1606 			vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
1607 						       qx->qid);
1608 			if (vfop->rc)
1609 				goto op_err;
1610 			return;
1611 		}
1612 		/* remove multicasts */
1613 		vfop->state = BNX2X_VFOP_FLR_HW;
1614 		vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
1615 						0, true);
1616 		if (vfop->rc)
1617 			goto op_err;
1618 		return;
1619 	case BNX2X_VFOP_FLR_HW:
1620 
1621 		/* dispatch final cleanup and wait for HW queues to flush */
1622 		bnx2x_vf_flr_clnup_hw(bp, vf);
1623 
1624 		/* release VF resources */
1625 		bnx2x_vf_free_resc(bp, vf);
1626 
1627 		/* re-open the mailbox */
1628 		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1629 
1630 		goto op_done;
1631 	default:
1632 		bnx2x_vfop_default(state);
1633 	}
1634 op_err:
1635 	BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
1636 op_done:
1637 	vf->flr_clnup_stage = VF_FLR_ACK;
1638 	bnx2x_vfop_end(bp, vf, vfop);
1639 	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1640 }
1641 
1642 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
1643 			      struct bnx2x_virtf *vf,
1644 			      vfop_handler_t done)
1645 {
1646 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1647 	if (vfop) {
1648 		vfop->args.qx.qid = -1; /* loop */
1649 		bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
1650 				 bnx2x_vfop_flr, done);
1651 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
1652 	}
1653 	return -ENOMEM;
1654 }
1655 
1656 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
1657 {
1658 	int i = prev_vf ? prev_vf->index + 1 : 0;
1659 	struct bnx2x_virtf *vf;
1660 
1661 	/* find next VF to cleanup */
1662 next_vf_to_clean:
1663 	for (;
1664 	     i < BNX2X_NR_VIRTFN(bp) &&
1665 	     (bnx2x_vf(bp, i, state) != VF_RESET ||
1666 	      bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
1667 	     i++)
1668 		;
1669 
1670 	DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
1671 	   BNX2X_NR_VIRTFN(bp));
1672 
1673 	if (i < BNX2X_NR_VIRTFN(bp)) {
1674 		vf = BP_VF(bp, i);
1675 
1676 		/* lock the vf pf channel */
1677 		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1678 
1679 		/* invoke the VF FLR SM */
1680 		if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
1681 			BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
1682 				  vf->abs_vfid);
1683 
1684 			/* mark the VF to be ACKED and continue */
1685 			vf->flr_clnup_stage = VF_FLR_ACK;
1686 			goto next_vf_to_clean;
1687 		}
1688 		return;
1689 	}
1690 
1691 	/* we are done, update vf records */
1692 	for_each_vf(bp, i) {
1693 		vf = BP_VF(bp, i);
1694 
1695 		if (vf->flr_clnup_stage != VF_FLR_ACK)
1696 			continue;
1697 
1698 		vf->flr_clnup_stage = VF_FLR_EPILOG;
1699 	}
1700 
1701 	/* Acknowledge the handled VFs.
1702 	 * we are acknowledge all the vfs which an flr was requested for, even
1703 	 * if amongst them there are such that we never opened, since the mcp
1704 	 * will interrupt us immediately again if we only ack some of the bits,
1705 	 * resulting in an endless loop. This can happen for example in KVM
1706 	 * where an 'all ones' flr request is sometimes given by hyper visor
1707 	 */
1708 	DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
1709 	   bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1710 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
1711 		SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
1712 			  bp->vfdb->flrd_vfs[i]);
1713 
1714 	bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
1715 
1716 	/* clear the acked bits - better yet if the MCP implemented
1717 	 * write to clear semantics
1718 	 */
1719 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
1720 		SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
1721 }
1722 
1723 void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1724 {
1725 	int i;
1726 
1727 	/* Read FLR'd VFs */
1728 	for (i = 0; i < FLRD_VFS_DWORDS; i++)
1729 		bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
1730 
1731 	DP(BNX2X_MSG_MCP,
1732 	   "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
1733 	   bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1734 
1735 	for_each_vf(bp, i) {
1736 		struct bnx2x_virtf *vf = BP_VF(bp, i);
1737 		u32 reset = 0;
1738 
1739 		if (vf->abs_vfid < 32)
1740 			reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
1741 		else
1742 			reset = bp->vfdb->flrd_vfs[1] &
1743 				(1 << (vf->abs_vfid - 32));
1744 
1745 		if (reset) {
1746 			/* set as reset and ready for cleanup */
1747 			vf->state = VF_RESET;
1748 			vf->flr_clnup_stage = VF_FLR_CLN;
1749 
1750 			DP(BNX2X_MSG_IOV,
1751 			   "Initiating Final cleanup for VF %d\n",
1752 			   vf->abs_vfid);
1753 		}
1754 	}
1755 
1756 	/* do the FLR cleanup for all marked VFs*/
1757 	bnx2x_vf_flr_clnup(bp, NULL);
1758 }
1759 
1760 /* IOV global initialization routines  */
1761 void bnx2x_iov_init_dq(struct bnx2x *bp)
1762 {
1763 	if (!IS_SRIOV(bp))
1764 		return;
1765 
1766 	/* Set the DQ such that the CID reflect the abs_vfid */
1767 	REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1768 	REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1769 
1770 	/* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1771 	 * the PF L2 queues
1772 	 */
1773 	REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1774 
1775 	/* The VF window size is the log2 of the max number of CIDs per VF */
1776 	REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1777 
1778 	/* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
1779 	 * the Pf doorbell size although the 2 are independent.
1780 	 */
1781 	REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1782 
1783 	/* No security checks for now -
1784 	 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1785 	 * CID range 0 - 0x1ffff
1786 	 */
1787 	REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1788 	REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1789 	REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1790 	REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1791 
1792 	/* set the VF doorbell threshold */
1793 	REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
1794 }
1795 
1796 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1797 {
1798 	if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1799 		REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1800 }
1801 
1802 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1803 {
1804 	struct pci_dev *dev = bp->pdev;
1805 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1806 
1807 	return dev->bus->number + ((dev->devfn + iov->offset +
1808 				    iov->stride * vfid) >> 8);
1809 }
1810 
1811 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1812 {
1813 	struct pci_dev *dev = bp->pdev;
1814 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1815 
1816 	return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1817 }
1818 
1819 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1820 {
1821 	int i, n;
1822 	struct pci_dev *dev = bp->pdev;
1823 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1824 
1825 	for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1826 		u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1827 		u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1828 
1829 		size /= iov->total;
1830 		vf->bars[n].bar = start + size * vf->abs_vfid;
1831 		vf->bars[n].size = size;
1832 	}
1833 }
1834 
1835 static int bnx2x_ari_enabled(struct pci_dev *dev)
1836 {
1837 	return dev->bus->self && dev->bus->self->ari_enabled;
1838 }
1839 
1840 static void
1841 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1842 {
1843 	int sb_id;
1844 	u32 val;
1845 	u8 fid, current_pf = 0;
1846 
1847 	/* IGU in normal mode - read CAM */
1848 	for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1849 		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1850 		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1851 			continue;
1852 		fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1853 		if (fid & IGU_FID_ENCODE_IS_PF)
1854 			current_pf = fid & IGU_FID_PF_NUM_MASK;
1855 		else if (current_pf == BP_FUNC(bp))
1856 			bnx2x_vf_set_igu_info(bp, sb_id,
1857 					      (fid & IGU_FID_VF_NUM_MASK));
1858 		DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1859 		   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1860 		   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1861 		   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1862 		   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1863 	}
1864 	DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1865 }
1866 
1867 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1868 {
1869 	if (bp->vfdb) {
1870 		kfree(bp->vfdb->vfqs);
1871 		kfree(bp->vfdb->vfs);
1872 		kfree(bp->vfdb);
1873 	}
1874 	bp->vfdb = NULL;
1875 }
1876 
1877 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1878 {
1879 	int pos;
1880 	struct pci_dev *dev = bp->pdev;
1881 
1882 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1883 	if (!pos) {
1884 		BNX2X_ERR("failed to find SRIOV capability in device\n");
1885 		return -ENODEV;
1886 	}
1887 
1888 	iov->pos = pos;
1889 	DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1890 	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1891 	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1892 	pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1893 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1894 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1895 	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1896 	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1897 	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1898 
1899 	return 0;
1900 }
1901 
1902 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1903 {
1904 	u32 val;
1905 
1906 	/* read the SRIOV capability structure
1907 	 * The fields can be read via configuration read or
1908 	 * directly from the device (starting at offset PCICFG_OFFSET)
1909 	 */
1910 	if (bnx2x_sriov_pci_cfg_info(bp, iov))
1911 		return -ENODEV;
1912 
1913 	/* get the number of SRIOV bars */
1914 	iov->nres = 0;
1915 
1916 	/* read the first_vfid */
1917 	val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1918 	iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1919 			       * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1920 
1921 	DP(BNX2X_MSG_IOV,
1922 	   "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1923 	   BP_FUNC(bp),
1924 	   iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1925 	   iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1926 
1927 	return 0;
1928 }
1929 
1930 /* must be called after PF bars are mapped */
1931 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1932 		       int num_vfs_param)
1933 {
1934 	int err, i;
1935 	struct bnx2x_sriov *iov;
1936 	struct pci_dev *dev = bp->pdev;
1937 
1938 	bp->vfdb = NULL;
1939 
1940 	/* verify is pf */
1941 	if (IS_VF(bp))
1942 		return 0;
1943 
1944 	/* verify sriov capability is present in configuration space */
1945 	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1946 		return 0;
1947 
1948 	/* verify chip revision */
1949 	if (CHIP_IS_E1x(bp))
1950 		return 0;
1951 
1952 	/* check if SRIOV support is turned off */
1953 	if (!num_vfs_param)
1954 		return 0;
1955 
1956 	/* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1957 	if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1958 		BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1959 			  BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1960 		return 0;
1961 	}
1962 
1963 	/* SRIOV can be enabled only with MSIX */
1964 	if (int_mode_param == BNX2X_INT_MODE_MSI ||
1965 	    int_mode_param == BNX2X_INT_MODE_INTX) {
1966 		BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1967 		return 0;
1968 	}
1969 
1970 	err = -EIO;
1971 	/* verify ari is enabled */
1972 	if (!bnx2x_ari_enabled(bp->pdev)) {
1973 		BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1974 		return 0;
1975 	}
1976 
1977 	/* verify igu is in normal mode */
1978 	if (CHIP_INT_MODE_IS_BC(bp)) {
1979 		BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1980 		return 0;
1981 	}
1982 
1983 	/* allocate the vfs database */
1984 	bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1985 	if (!bp->vfdb) {
1986 		BNX2X_ERR("failed to allocate vf database\n");
1987 		err = -ENOMEM;
1988 		goto failed;
1989 	}
1990 
1991 	/* get the sriov info - Linux already collected all the pertinent
1992 	 * information, however the sriov structure is for the private use
1993 	 * of the pci module. Also we want this information regardless
1994 	 * of the hyper-visor.
1995 	 */
1996 	iov = &(bp->vfdb->sriov);
1997 	err = bnx2x_sriov_info(bp, iov);
1998 	if (err)
1999 		goto failed;
2000 
2001 	/* SR-IOV capability was enabled but there are no VFs*/
2002 	if (iov->total == 0)
2003 		goto failed;
2004 
2005 	iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
2006 
2007 	DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
2008 	   num_vfs_param, iov->nr_virtfn);
2009 
2010 	/* allocate the vf array */
2011 	bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
2012 				BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
2013 	if (!bp->vfdb->vfs) {
2014 		BNX2X_ERR("failed to allocate vf array\n");
2015 		err = -ENOMEM;
2016 		goto failed;
2017 	}
2018 
2019 	/* Initial VF init - index and abs_vfid - nr_virtfn must be set */
2020 	for_each_vf(bp, i) {
2021 		bnx2x_vf(bp, i, index) = i;
2022 		bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
2023 		bnx2x_vf(bp, i, state) = VF_FREE;
2024 		INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
2025 		mutex_init(&bnx2x_vf(bp, i, op_mutex));
2026 		bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
2027 	}
2028 
2029 	/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
2030 	bnx2x_get_vf_igu_cam_info(bp);
2031 
2032 	/* allocate the queue arrays for all VFs */
2033 	bp->vfdb->vfqs = kzalloc(
2034 		BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
2035 		GFP_KERNEL);
2036 
2037 	DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
2038 
2039 	if (!bp->vfdb->vfqs) {
2040 		BNX2X_ERR("failed to allocate vf queue array\n");
2041 		err = -ENOMEM;
2042 		goto failed;
2043 	}
2044 
2045 	/* Prepare the VFs event synchronization mechanism */
2046 	mutex_init(&bp->vfdb->event_mutex);
2047 
2048 	return 0;
2049 failed:
2050 	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
2051 	__bnx2x_iov_free_vfdb(bp);
2052 	return err;
2053 }
2054 
2055 void bnx2x_iov_remove_one(struct bnx2x *bp)
2056 {
2057 	int vf_idx;
2058 
2059 	/* if SRIOV is not enabled there's nothing to do */
2060 	if (!IS_SRIOV(bp))
2061 		return;
2062 
2063 	DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
2064 	pci_disable_sriov(bp->pdev);
2065 	DP(BNX2X_MSG_IOV, "sriov disabled\n");
2066 
2067 	/* disable access to all VFs */
2068 	for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
2069 		bnx2x_pretend_func(bp,
2070 				   HW_VF_HANDLE(bp,
2071 						bp->vfdb->sriov.first_vf_in_pf +
2072 						vf_idx));
2073 		DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
2074 		   bp->vfdb->sriov.first_vf_in_pf + vf_idx);
2075 		bnx2x_vf_enable_internal(bp, 0);
2076 		bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2077 	}
2078 
2079 	/* free vf database */
2080 	__bnx2x_iov_free_vfdb(bp);
2081 }
2082 
2083 void bnx2x_iov_free_mem(struct bnx2x *bp)
2084 {
2085 	int i;
2086 
2087 	if (!IS_SRIOV(bp))
2088 		return;
2089 
2090 	/* free vfs hw contexts */
2091 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2092 		struct hw_dma *cxt = &bp->vfdb->context[i];
2093 		BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
2094 	}
2095 
2096 	BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
2097 		       BP_VFDB(bp)->sp_dma.mapping,
2098 		       BP_VFDB(bp)->sp_dma.size);
2099 
2100 	BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
2101 		       BP_VF_MBX_DMA(bp)->mapping,
2102 		       BP_VF_MBX_DMA(bp)->size);
2103 
2104 	BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
2105 		       BP_VF_BULLETIN_DMA(bp)->mapping,
2106 		       BP_VF_BULLETIN_DMA(bp)->size);
2107 }
2108 
2109 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
2110 {
2111 	size_t tot_size;
2112 	int i, rc = 0;
2113 
2114 	if (!IS_SRIOV(bp))
2115 		return rc;
2116 
2117 	/* allocate vfs hw contexts */
2118 	tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
2119 		BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
2120 
2121 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2122 		struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
2123 		cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
2124 
2125 		if (cxt->size) {
2126 			cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
2127 			if (!cxt->addr)
2128 				goto alloc_mem_err;
2129 		} else {
2130 			cxt->addr = NULL;
2131 			cxt->mapping = 0;
2132 		}
2133 		tot_size -= cxt->size;
2134 	}
2135 
2136 	/* allocate vfs ramrods dma memory - client_init and set_mac */
2137 	tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
2138 	BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
2139 						   tot_size);
2140 	if (!BP_VFDB(bp)->sp_dma.addr)
2141 		goto alloc_mem_err;
2142 	BP_VFDB(bp)->sp_dma.size = tot_size;
2143 
2144 	/* allocate mailboxes */
2145 	tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
2146 	BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
2147 						  tot_size);
2148 	if (!BP_VF_MBX_DMA(bp)->addr)
2149 		goto alloc_mem_err;
2150 
2151 	BP_VF_MBX_DMA(bp)->size = tot_size;
2152 
2153 	/* allocate local bulletin boards */
2154 	tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
2155 	BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
2156 						       tot_size);
2157 	if (!BP_VF_BULLETIN_DMA(bp)->addr)
2158 		goto alloc_mem_err;
2159 
2160 	BP_VF_BULLETIN_DMA(bp)->size = tot_size;
2161 
2162 	return 0;
2163 
2164 alloc_mem_err:
2165 	return -ENOMEM;
2166 }
2167 
2168 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
2169 			   struct bnx2x_vf_queue *q)
2170 {
2171 	u8 cl_id = vfq_cl_id(vf, q);
2172 	u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
2173 	unsigned long q_type = 0;
2174 
2175 	set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
2176 	set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
2177 
2178 	/* Queue State object */
2179 	bnx2x_init_queue_obj(bp, &q->sp_obj,
2180 			     cl_id, &q->cid, 1, func_id,
2181 			     bnx2x_vf_sp(bp, vf, q_data),
2182 			     bnx2x_vf_sp_map(bp, vf, q_data),
2183 			     q_type);
2184 
2185 	/* sp indication is set only when vlan/mac/etc. are initialized */
2186 	q->sp_initialized = false;
2187 
2188 	DP(BNX2X_MSG_IOV,
2189 	   "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
2190 	   vf->abs_vfid, q->sp_obj.func_id, q->cid);
2191 }
2192 
2193 /* called by bnx2x_nic_load */
2194 int bnx2x_iov_nic_init(struct bnx2x *bp)
2195 {
2196 	int vfid;
2197 
2198 	if (!IS_SRIOV(bp)) {
2199 		DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
2200 		return 0;
2201 	}
2202 
2203 	DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
2204 
2205 	/* let FLR complete ... */
2206 	msleep(100);
2207 
2208 	/* initialize vf database */
2209 	for_each_vf(bp, vfid) {
2210 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2211 
2212 		int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
2213 			BNX2X_CIDS_PER_VF;
2214 
2215 		union cdu_context *base_cxt = (union cdu_context *)
2216 			BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2217 			(base_vf_cid & (ILT_PAGE_CIDS-1));
2218 
2219 		DP(BNX2X_MSG_IOV,
2220 		   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
2221 		   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
2222 		   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
2223 
2224 		/* init statically provisioned resources */
2225 		bnx2x_iov_static_resc(bp, vf);
2226 
2227 		/* queues are initialized during VF-ACQUIRE */
2228 
2229 		/* reserve the vf vlan credit */
2230 		bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
2231 
2232 		vf->filter_state = 0;
2233 		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
2234 
2235 		/*  init mcast object - This object will be re-initialized
2236 		 *  during VF-ACQUIRE with the proper cl_id and cid.
2237 		 *  It needs to be initialized here so that it can be safely
2238 		 *  handled by a subsequent FLR flow.
2239 		 */
2240 		vf->mcast_list_len = 0;
2241 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
2242 				     0xFF, 0xFF, 0xFF,
2243 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
2244 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2245 				     BNX2X_FILTER_MCAST_PENDING,
2246 				     &vf->filter_state,
2247 				     BNX2X_OBJ_TYPE_RX_TX);
2248 
2249 		/* set the mailbox message addresses */
2250 		BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
2251 			(((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
2252 			MBX_MSG_ALIGNED_SIZE);
2253 
2254 		BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
2255 			vfid * MBX_MSG_ALIGNED_SIZE;
2256 
2257 		/* Enable vf mailbox */
2258 		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
2259 	}
2260 
2261 	/* Final VF init */
2262 	for_each_vf(bp, vfid) {
2263 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2264 
2265 		/* fill in the BDF and bars */
2266 		vf->bus = bnx2x_vf_bus(bp, vfid);
2267 		vf->devfn = bnx2x_vf_devfn(bp, vfid);
2268 		bnx2x_vf_set_bars(bp, vf);
2269 
2270 		DP(BNX2X_MSG_IOV,
2271 		   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
2272 		   vf->abs_vfid, vf->bus, vf->devfn,
2273 		   (unsigned)vf->bars[0].bar, vf->bars[0].size,
2274 		   (unsigned)vf->bars[1].bar, vf->bars[1].size,
2275 		   (unsigned)vf->bars[2].bar, vf->bars[2].size);
2276 	}
2277 
2278 	return 0;
2279 }
2280 
2281 /* called by bnx2x_chip_cleanup */
2282 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
2283 {
2284 	int i;
2285 
2286 	if (!IS_SRIOV(bp))
2287 		return 0;
2288 
2289 	/* release all the VFs */
2290 	for_each_vf(bp, i)
2291 		bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
2292 
2293 	return 0;
2294 }
2295 
2296 /* called by bnx2x_init_hw_func, returns the next ilt line */
2297 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
2298 {
2299 	int i;
2300 	struct bnx2x_ilt *ilt = BP_ILT(bp);
2301 
2302 	if (!IS_SRIOV(bp))
2303 		return line;
2304 
2305 	/* set vfs ilt lines */
2306 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2307 		struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
2308 
2309 		ilt->lines[line+i].page = hw_cxt->addr;
2310 		ilt->lines[line+i].page_mapping = hw_cxt->mapping;
2311 		ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
2312 	}
2313 	return line + i;
2314 }
2315 
2316 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
2317 {
2318 	return ((cid >= BNX2X_FIRST_VF_CID) &&
2319 		((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
2320 }
2321 
2322 static
2323 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
2324 					struct bnx2x_vf_queue *vfq,
2325 					union event_ring_elem *elem)
2326 {
2327 	unsigned long ramrod_flags = 0;
2328 	int rc = 0;
2329 
2330 	/* Always push next commands out, don't wait here */
2331 	set_bit(RAMROD_CONT, &ramrod_flags);
2332 
2333 	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
2334 	case BNX2X_FILTER_MAC_PENDING:
2335 		rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
2336 					   &ramrod_flags);
2337 		break;
2338 	case BNX2X_FILTER_VLAN_PENDING:
2339 		rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
2340 					    &ramrod_flags);
2341 		break;
2342 	default:
2343 		BNX2X_ERR("Unsupported classification command: %d\n",
2344 			  elem->message.data.eth_event.echo);
2345 		return;
2346 	}
2347 	if (rc < 0)
2348 		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
2349 	else if (rc > 0)
2350 		DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
2351 }
2352 
2353 static
2354 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
2355 			       struct bnx2x_virtf *vf)
2356 {
2357 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
2358 	int rc;
2359 
2360 	rparam.mcast_obj = &vf->mcast_obj;
2361 	vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
2362 
2363 	/* If there are pending mcast commands - send them */
2364 	if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
2365 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2366 		if (rc < 0)
2367 			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
2368 				  rc);
2369 	}
2370 }
2371 
2372 static
2373 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
2374 				 struct bnx2x_virtf *vf)
2375 {
2376 	smp_mb__before_clear_bit();
2377 	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
2378 	smp_mb__after_clear_bit();
2379 }
2380 
2381 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2382 {
2383 	struct bnx2x_virtf *vf;
2384 	int qidx = 0, abs_vfid;
2385 	u8 opcode;
2386 	u16 cid = 0xffff;
2387 
2388 	if (!IS_SRIOV(bp))
2389 		return 1;
2390 
2391 	/* first get the cid - the only events we handle here are cfc-delete
2392 	 * and set-mac completion
2393 	 */
2394 	opcode = elem->message.opcode;
2395 
2396 	switch (opcode) {
2397 	case EVENT_RING_OPCODE_CFC_DEL:
2398 		cid = SW_CID((__force __le32)
2399 			     elem->message.data.cfc_del_event.cid);
2400 		DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
2401 		break;
2402 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2403 	case EVENT_RING_OPCODE_MULTICAST_RULES:
2404 	case EVENT_RING_OPCODE_FILTERS_RULES:
2405 		cid = (elem->message.data.eth_event.echo &
2406 		       BNX2X_SWCID_MASK);
2407 		DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
2408 		break;
2409 	case EVENT_RING_OPCODE_VF_FLR:
2410 		abs_vfid = elem->message.data.vf_flr_event.vf_id;
2411 		DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
2412 		   abs_vfid);
2413 		goto get_vf;
2414 	case EVENT_RING_OPCODE_MALICIOUS_VF:
2415 		abs_vfid = elem->message.data.malicious_vf_event.vf_id;
2416 		BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
2417 			  abs_vfid,
2418 			  elem->message.data.malicious_vf_event.err_id);
2419 		goto get_vf;
2420 	default:
2421 		return 1;
2422 	}
2423 
2424 	/* check if the cid is the VF range */
2425 	if (!bnx2x_iov_is_vf_cid(bp, cid)) {
2426 		DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
2427 		return 1;
2428 	}
2429 
2430 	/* extract vf and rxq index from vf_cid - relies on the following:
2431 	 * 1. vfid on cid reflects the true abs_vfid
2432 	 * 2. The max number of VFs (per path) is 64
2433 	 */
2434 	qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
2435 	abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2436 get_vf:
2437 	vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
2438 
2439 	if (!vf) {
2440 		BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
2441 			  cid, abs_vfid);
2442 		return 0;
2443 	}
2444 
2445 	switch (opcode) {
2446 	case EVENT_RING_OPCODE_CFC_DEL:
2447 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
2448 		   vf->abs_vfid, qidx);
2449 		vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
2450 						       &vfq_get(vf,
2451 								qidx)->sp_obj,
2452 						       BNX2X_Q_CMD_CFC_DEL);
2453 		break;
2454 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2455 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
2456 		   vf->abs_vfid, qidx);
2457 		bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
2458 		break;
2459 	case EVENT_RING_OPCODE_MULTICAST_RULES:
2460 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
2461 		   vf->abs_vfid, qidx);
2462 		bnx2x_vf_handle_mcast_eqe(bp, vf);
2463 		break;
2464 	case EVENT_RING_OPCODE_FILTERS_RULES:
2465 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
2466 		   vf->abs_vfid, qidx);
2467 		bnx2x_vf_handle_filters_eqe(bp, vf);
2468 		break;
2469 	case EVENT_RING_OPCODE_VF_FLR:
2470 	case EVENT_RING_OPCODE_MALICIOUS_VF:
2471 		/* Do nothing for now */
2472 		return 0;
2473 	}
2474 	/* SRIOV: reschedule any 'in_progress' operations */
2475 	bnx2x_iov_sp_event(bp, cid);
2476 
2477 	return 0;
2478 }
2479 
2480 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
2481 {
2482 	/* extract the vf from vf_cid - relies on the following:
2483 	 * 1. vfid on cid reflects the true abs_vfid
2484 	 * 2. The max number of VFs (per path) is 64
2485 	 */
2486 	int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2487 	return bnx2x_vf_by_abs_fid(bp, abs_vfid);
2488 }
2489 
2490 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
2491 				struct bnx2x_queue_sp_obj **q_obj)
2492 {
2493 	struct bnx2x_virtf *vf;
2494 
2495 	if (!IS_SRIOV(bp))
2496 		return;
2497 
2498 	vf = bnx2x_vf_by_cid(bp, vf_cid);
2499 
2500 	if (vf) {
2501 		/* extract queue index from vf_cid - relies on the following:
2502 		 * 1. vfid on cid reflects the true abs_vfid
2503 		 * 2. The max number of VFs (per path) is 64
2504 		 */
2505 		int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
2506 		*q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
2507 	} else {
2508 		BNX2X_ERR("No vf matching cid %d\n", vf_cid);
2509 	}
2510 }
2511 
2512 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid)
2513 {
2514 	struct bnx2x_virtf *vf;
2515 
2516 	/* check if the cid is the VF range */
2517 	if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
2518 		return;
2519 
2520 	vf = bnx2x_vf_by_cid(bp, vf_cid);
2521 	if (vf) {
2522 		/* set in_progress flag */
2523 		atomic_set(&vf->op_in_progress, 1);
2524 		bnx2x_schedule_iov_task(bp, BNX2X_IOV_CONT_VFOP);
2525 	}
2526 }
2527 
2528 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2529 {
2530 	int i;
2531 	int first_queue_query_index, num_queues_req;
2532 	dma_addr_t cur_data_offset;
2533 	struct stats_query_entry *cur_query_entry;
2534 	u8 stats_count = 0;
2535 	bool is_fcoe = false;
2536 
2537 	if (!IS_SRIOV(bp))
2538 		return;
2539 
2540 	if (!NO_FCOE(bp))
2541 		is_fcoe = true;
2542 
2543 	/* fcoe adds one global request and one queue request */
2544 	num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
2545 	first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
2546 		(is_fcoe ? 0 : 1);
2547 
2548 	DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
2549 	       "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
2550 	       BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
2551 	       first_queue_query_index + num_queues_req);
2552 
2553 	cur_data_offset = bp->fw_stats_data_mapping +
2554 		offsetof(struct bnx2x_fw_stats_data, queue_stats) +
2555 		num_queues_req * sizeof(struct per_queue_stats);
2556 
2557 	cur_query_entry = &bp->fw_stats_req->
2558 		query[first_queue_query_index + num_queues_req];
2559 
2560 	for_each_vf(bp, i) {
2561 		int j;
2562 		struct bnx2x_virtf *vf = BP_VF(bp, i);
2563 
2564 		if (vf->state != VF_ENABLED) {
2565 			DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
2566 			       "vf %d not enabled so no stats for it\n",
2567 			       vf->abs_vfid);
2568 			continue;
2569 		}
2570 
2571 		DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
2572 		for_each_vfq(vf, j) {
2573 			struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
2574 
2575 			dma_addr_t q_stats_addr =
2576 				vf->fw_stat_map + j * vf->stats_stride;
2577 
2578 			/* collect stats fro active queues only */
2579 			if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
2580 			    BNX2X_Q_LOGICAL_STATE_STOPPED)
2581 				continue;
2582 
2583 			/* create stats query entry for this queue */
2584 			cur_query_entry->kind = STATS_TYPE_QUEUE;
2585 			cur_query_entry->index = vfq_stat_id(vf, rxq);
2586 			cur_query_entry->funcID =
2587 				cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
2588 			cur_query_entry->address.hi =
2589 				cpu_to_le32(U64_HI(q_stats_addr));
2590 			cur_query_entry->address.lo =
2591 				cpu_to_le32(U64_LO(q_stats_addr));
2592 			DP(BNX2X_MSG_IOV,
2593 			   "added address %x %x for vf %d queue %d client %d\n",
2594 			   cur_query_entry->address.hi,
2595 			   cur_query_entry->address.lo, cur_query_entry->funcID,
2596 			   j, cur_query_entry->index);
2597 			cur_query_entry++;
2598 			cur_data_offset += sizeof(struct per_queue_stats);
2599 			stats_count++;
2600 
2601 			/* all stats are coalesced to the leading queue */
2602 			if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
2603 				break;
2604 		}
2605 	}
2606 	bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
2607 }
2608 
2609 void bnx2x_iov_vfop_cont(struct bnx2x *bp)
2610 {
2611 	int i;
2612 
2613 	if (!IS_SRIOV(bp))
2614 		return;
2615 	/* Iterate over all VFs and invoke state transition for VFs with
2616 	 * 'in-progress' slow-path operations
2617 	 */
2618 	DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_SP),
2619 	       "searching for pending vf operations\n");
2620 	for_each_vf(bp, i) {
2621 		struct bnx2x_virtf *vf = BP_VF(bp, i);
2622 
2623 		if (!vf) {
2624 			BNX2X_ERR("VF was null! skipping...\n");
2625 			continue;
2626 		}
2627 
2628 		if (!list_empty(&vf->op_list_head) &&
2629 		    atomic_read(&vf->op_in_progress)) {
2630 			DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
2631 			bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
2632 		}
2633 	}
2634 }
2635 
2636 static inline
2637 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
2638 {
2639 	int i;
2640 	struct bnx2x_virtf *vf = NULL;
2641 
2642 	for_each_vf(bp, i) {
2643 		vf = BP_VF(bp, i);
2644 		if (stat_id >= vf->igu_base_id &&
2645 		    stat_id < vf->igu_base_id + vf_sb_count(vf))
2646 			break;
2647 	}
2648 	return vf;
2649 }
2650 
2651 /* VF API helpers */
2652 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
2653 				u8 enable)
2654 {
2655 	u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
2656 	u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
2657 
2658 	REG_WR(bp, reg, val);
2659 }
2660 
2661 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
2662 {
2663 	int i;
2664 
2665 	for_each_vfq(vf, i)
2666 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2667 				    vfq_qzone_id(vf, vfq_get(vf, i)), false);
2668 }
2669 
2670 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
2671 {
2672 	u32 val;
2673 
2674 	/* clear the VF configuration - pretend */
2675 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
2676 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
2677 	val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
2678 		 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
2679 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
2680 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2681 }
2682 
2683 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2684 {
2685 	return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2686 		     BNX2X_VF_MAX_QUEUES);
2687 }
2688 
2689 static
2690 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2691 			    struct vf_pf_resc_request *req_resc)
2692 {
2693 	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2694 	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2695 
2696 	return ((req_resc->num_rxqs <= rxq_cnt) &&
2697 		(req_resc->num_txqs <= txq_cnt) &&
2698 		(req_resc->num_sbs <= vf_sb_count(vf))   &&
2699 		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2700 		(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
2701 }
2702 
2703 /* CORE VF API */
2704 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2705 		     struct vf_pf_resc_request *resc)
2706 {
2707 	int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2708 		BNX2X_CIDS_PER_VF;
2709 
2710 	union cdu_context *base_cxt = (union cdu_context *)
2711 		BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2712 		(base_vf_cid & (ILT_PAGE_CIDS-1));
2713 	int i;
2714 
2715 	/* if state is 'acquired' the VF was not released or FLR'd, in
2716 	 * this case the returned resources match the acquired already
2717 	 * acquired resources. Verify that the requested numbers do
2718 	 * not exceed the already acquired numbers.
2719 	 */
2720 	if (vf->state == VF_ACQUIRED) {
2721 		DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2722 		   vf->abs_vfid);
2723 
2724 		if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2725 			BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2726 				  vf->abs_vfid);
2727 			return -EINVAL;
2728 		}
2729 		return 0;
2730 	}
2731 
2732 	/* Otherwise vf state must be 'free' or 'reset' */
2733 	if (vf->state != VF_FREE && vf->state != VF_RESET) {
2734 		BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2735 			  vf->abs_vfid, vf->state);
2736 		return -EINVAL;
2737 	}
2738 
2739 	/* static allocation:
2740 	 * the global maximum number are fixed per VF. Fail the request if
2741 	 * requested number exceed these globals
2742 	 */
2743 	if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2744 		DP(BNX2X_MSG_IOV,
2745 		   "cannot fulfill vf resource request. Placing maximal available values in response\n");
2746 		/* set the max resource in the vf */
2747 		return -ENOMEM;
2748 	}
2749 
2750 	/* Set resources counters - 0 request means max available */
2751 	vf_sb_count(vf) = resc->num_sbs;
2752 	vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2753 	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2754 	if (resc->num_mac_filters)
2755 		vf_mac_rules_cnt(vf) = resc->num_mac_filters;
2756 	if (resc->num_vlan_filters)
2757 		vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
2758 
2759 	DP(BNX2X_MSG_IOV,
2760 	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2761 	   vf_sb_count(vf), vf_rxq_count(vf),
2762 	   vf_txq_count(vf), vf_mac_rules_cnt(vf),
2763 	   vf_vlan_rules_cnt(vf));
2764 
2765 	/* Initialize the queues */
2766 	if (!vf->vfqs) {
2767 		DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2768 		return -EINVAL;
2769 	}
2770 
2771 	for_each_vfq(vf, i) {
2772 		struct bnx2x_vf_queue *q = vfq_get(vf, i);
2773 
2774 		if (!q) {
2775 			BNX2X_ERR("q number %d was not allocated\n", i);
2776 			return -EINVAL;
2777 		}
2778 
2779 		q->index = i;
2780 		q->cxt = &((base_cxt + i)->eth);
2781 		q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2782 
2783 		DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2784 		   vf->abs_vfid, i, q->index, q->cid, q->cxt);
2785 
2786 		/* init SP objects */
2787 		bnx2x_vfq_init(bp, vf, q);
2788 	}
2789 	vf->state = VF_ACQUIRED;
2790 	return 0;
2791 }
2792 
2793 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2794 {
2795 	struct bnx2x_func_init_params func_init = {0};
2796 	u16 flags = 0;
2797 	int i;
2798 
2799 	/* the sb resources are initialized at this point, do the
2800 	 * FW/HW initializations
2801 	 */
2802 	for_each_vf_sb(vf, i)
2803 		bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2804 			      vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2805 
2806 	/* Sanity checks */
2807 	if (vf->state != VF_ACQUIRED) {
2808 		DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2809 		   vf->abs_vfid, vf->state);
2810 		return -EINVAL;
2811 	}
2812 
2813 	/* let FLR complete ... */
2814 	msleep(100);
2815 
2816 	/* FLR cleanup epilogue */
2817 	if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2818 		return -EBUSY;
2819 
2820 	/* reset IGU VF statistics: MSIX */
2821 	REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2822 
2823 	/* vf init */
2824 	if (vf->cfg_flags & VF_CFG_STATS)
2825 		flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2826 
2827 	if (vf->cfg_flags & VF_CFG_TPA)
2828 		flags |= FUNC_FLG_TPA;
2829 
2830 	if (is_vf_multi(vf))
2831 		flags |= FUNC_FLG_RSS;
2832 
2833 	/* function setup */
2834 	func_init.func_flgs = flags;
2835 	func_init.pf_id = BP_FUNC(bp);
2836 	func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2837 	func_init.fw_stat_map = vf->fw_stat_map;
2838 	func_init.spq_map = vf->spq_map;
2839 	func_init.spq_prod = 0;
2840 	bnx2x_func_init(bp, &func_init);
2841 
2842 	/* Enable the vf */
2843 	bnx2x_vf_enable_access(bp, vf->abs_vfid);
2844 	bnx2x_vf_enable_traffic(bp, vf);
2845 
2846 	/* queue protection table */
2847 	for_each_vfq(vf, i)
2848 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2849 				    vfq_qzone_id(vf, vfq_get(vf, i)), true);
2850 
2851 	vf->state = VF_ENABLED;
2852 
2853 	/* update vf bulletin board */
2854 	bnx2x_post_vf_bulletin(bp, vf->index);
2855 
2856 	return 0;
2857 }
2858 
2859 struct set_vf_state_cookie {
2860 	struct bnx2x_virtf *vf;
2861 	u8 state;
2862 };
2863 
2864 static void bnx2x_set_vf_state(void *cookie)
2865 {
2866 	struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2867 
2868 	p->vf->state = p->state;
2869 }
2870 
2871 /* VFOP close (teardown the queues, delete mcasts and close HW) */
2872 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2873 {
2874 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2875 	struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
2876 	enum bnx2x_vfop_close_state state = vfop->state;
2877 	struct bnx2x_vfop_cmd cmd = {
2878 		.done = bnx2x_vfop_close,
2879 		.block = false,
2880 	};
2881 
2882 	if (vfop->rc < 0)
2883 		goto op_err;
2884 
2885 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2886 
2887 	switch (state) {
2888 	case BNX2X_VFOP_CLOSE_QUEUES:
2889 
2890 		if (++(qx->qid) < vf_rxq_count(vf)) {
2891 			vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
2892 			if (vfop->rc)
2893 				goto op_err;
2894 			return;
2895 		}
2896 		vfop->state = BNX2X_VFOP_CLOSE_HW;
2897 		vfop->rc = 0;
2898 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
2899 
2900 	case BNX2X_VFOP_CLOSE_HW:
2901 
2902 		/* disable the interrupts */
2903 		DP(BNX2X_MSG_IOV, "disabling igu\n");
2904 		bnx2x_vf_igu_disable(bp, vf);
2905 
2906 		/* disable the VF */
2907 		DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2908 		bnx2x_vf_clr_qtbl(bp, vf);
2909 
2910 		goto op_done;
2911 	default:
2912 		bnx2x_vfop_default(state);
2913 	}
2914 op_err:
2915 	BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
2916 op_done:
2917 
2918 	/* need to make sure there are no outstanding stats ramrods which may
2919 	 * cause the device to access the VF's stats buffer which it will free
2920 	 * as soon as we return from the close flow.
2921 	 */
2922 	{
2923 		struct set_vf_state_cookie cookie;
2924 
2925 		cookie.vf = vf;
2926 		cookie.state = VF_ACQUIRED;
2927 		bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2928 	}
2929 
2930 	DP(BNX2X_MSG_IOV, "set state to acquired\n");
2931 	bnx2x_vfop_end(bp, vf, vfop);
2932 op_pending:
2933 	/* Not supported at the moment; Exists for macros only */
2934 	return;
2935 }
2936 
2937 int bnx2x_vfop_close_cmd(struct bnx2x *bp,
2938 			 struct bnx2x_virtf *vf,
2939 			 struct bnx2x_vfop_cmd *cmd)
2940 {
2941 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2942 	if (vfop) {
2943 		vfop->args.qx.qid = -1; /* loop */
2944 		bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
2945 				 bnx2x_vfop_close, cmd->done);
2946 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
2947 					     cmd->block);
2948 	}
2949 	return -ENOMEM;
2950 }
2951 
2952 /* VF release can be called either: 1. The VF was acquired but
2953  * not enabled 2. the vf was enabled or in the process of being
2954  * enabled
2955  */
2956 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2957 {
2958 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2959 	struct bnx2x_vfop_cmd cmd = {
2960 		.done = bnx2x_vfop_release,
2961 		.block = false,
2962 	};
2963 
2964 	DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2965 
2966 	if (vfop->rc < 0)
2967 		goto op_err;
2968 
2969 	DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2970 	   vf->state == VF_FREE ? "Free" :
2971 	   vf->state == VF_ACQUIRED ? "Acquired" :
2972 	   vf->state == VF_ENABLED ? "Enabled" :
2973 	   vf->state == VF_RESET ? "Reset" :
2974 	   "Unknown");
2975 
2976 	switch (vf->state) {
2977 	case VF_ENABLED:
2978 		vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
2979 		if (vfop->rc)
2980 			goto op_err;
2981 		return;
2982 
2983 	case VF_ACQUIRED:
2984 		DP(BNX2X_MSG_IOV, "about to free resources\n");
2985 		bnx2x_vf_free_resc(bp, vf);
2986 		DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2987 		goto op_done;
2988 
2989 	case VF_FREE:
2990 	case VF_RESET:
2991 		/* do nothing */
2992 		goto op_done;
2993 	default:
2994 		bnx2x_vfop_default(vf->state);
2995 	}
2996 op_err:
2997 	BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
2998 op_done:
2999 	bnx2x_vfop_end(bp, vf, vfop);
3000 }
3001 
3002 static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
3003 {
3004 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
3005 	enum bnx2x_vfop_rss_state state;
3006 
3007 	if (!vfop) {
3008 		BNX2X_ERR("vfop was null\n");
3009 		return;
3010 	}
3011 
3012 	state = vfop->state;
3013 	bnx2x_vfop_reset_wq(vf);
3014 
3015 	if (vfop->rc < 0)
3016 		goto op_err;
3017 
3018 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
3019 
3020 	switch (state) {
3021 	case BNX2X_VFOP_RSS_CONFIG:
3022 		/* next state */
3023 		vfop->state = BNX2X_VFOP_RSS_DONE;
3024 		bnx2x_config_rss(bp, &vfop->op_p->rss);
3025 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
3026 op_err:
3027 		BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
3028 op_done:
3029 	case BNX2X_VFOP_RSS_DONE:
3030 		bnx2x_vfop_end(bp, vf, vfop);
3031 		return;
3032 	default:
3033 		bnx2x_vfop_default(state);
3034 	}
3035 op_pending:
3036 	return;
3037 }
3038 
3039 int bnx2x_vfop_release_cmd(struct bnx2x *bp,
3040 			   struct bnx2x_virtf *vf,
3041 			   struct bnx2x_vfop_cmd *cmd)
3042 {
3043 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3044 	if (vfop) {
3045 		bnx2x_vfop_opset(-1, /* use vf->state */
3046 				 bnx2x_vfop_release, cmd->done);
3047 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
3048 					     cmd->block);
3049 	}
3050 	return -ENOMEM;
3051 }
3052 
3053 int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
3054 		       struct bnx2x_virtf *vf,
3055 		       struct bnx2x_vfop_cmd *cmd)
3056 {
3057 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3058 
3059 	if (vfop) {
3060 		bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
3061 				 cmd->done);
3062 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
3063 					     cmd->block);
3064 	}
3065 	return -ENOMEM;
3066 }
3067 
3068 /* VFOP tpa update, send update on all queues */
3069 static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf)
3070 {
3071 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
3072 	struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa;
3073 	enum bnx2x_vfop_tpa_state state = vfop->state;
3074 
3075 	bnx2x_vfop_reset_wq(vf);
3076 
3077 	if (vfop->rc < 0)
3078 		goto op_err;
3079 
3080 	DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n",
3081 	   vf->abs_vfid, tpa_args->qid,
3082 	   state);
3083 
3084 	switch (state) {
3085 	case BNX2X_VFOP_TPA_CONFIG:
3086 
3087 		if (tpa_args->qid < vf_rxq_count(vf)) {
3088 			struct bnx2x_queue_state_params *qstate =
3089 				&vf->op_params.qstate;
3090 
3091 			qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj);
3092 
3093 			/* The only thing that changes for the ramrod params
3094 			 * between calls is the sge_map
3095 			 */
3096 			qstate->params.update_tpa.sge_map =
3097 				tpa_args->sge_map[tpa_args->qid];
3098 
3099 			DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n",
3100 			   tpa_args->qid,
3101 			   U64_HI(qstate->params.update_tpa.sge_map),
3102 			   U64_LO(qstate->params.update_tpa.sge_map));
3103 			qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA;
3104 			vfop->rc = bnx2x_queue_state_change(bp, qstate);
3105 
3106 			tpa_args->qid++;
3107 			bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
3108 		}
3109 		vfop->state = BNX2X_VFOP_TPA_DONE;
3110 		vfop->rc = 0;
3111 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
3112 op_err:
3113 		BNX2X_ERR("TPA update error: rc %d\n", vfop->rc);
3114 op_done:
3115 	case BNX2X_VFOP_TPA_DONE:
3116 		bnx2x_vfop_end(bp, vf, vfop);
3117 		return;
3118 	default:
3119 		bnx2x_vfop_default(state);
3120 	}
3121 op_pending:
3122 	return;
3123 }
3124 
3125 int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
3126 			struct bnx2x_virtf *vf,
3127 			struct bnx2x_vfop_cmd *cmd,
3128 			struct vfpf_tpa_tlv *tpa_tlv)
3129 {
3130 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3131 
3132 	if (vfop) {
3133 		vfop->args.qx.qid = 0; /* loop */
3134 		memcpy(&vfop->args.tpa.sge_map,
3135 		       tpa_tlv->tpa_client_info.sge_addr,
3136 		       sizeof(vfop->args.tpa.sge_map));
3137 		bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG,
3138 				 bnx2x_vfop_tpa, cmd->done);
3139 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa,
3140 					     cmd->block);
3141 	}
3142 	return -ENOMEM;
3143 }
3144 
3145 /* VF release ~ VF close + VF release-resources
3146  * Release is the ultimate SW shutdown and is called whenever an
3147  * irrecoverable error is encountered.
3148  */
3149 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
3150 {
3151 	struct bnx2x_vfop_cmd cmd = {
3152 		.done = NULL,
3153 		.block = block,
3154 	};
3155 	int rc;
3156 
3157 	DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
3158 	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
3159 
3160 	rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
3161 	if (rc)
3162 		WARN(rc,
3163 		     "VF[%d] Failed to allocate resources for release op- rc=%d\n",
3164 		     vf->abs_vfid, rc);
3165 }
3166 
3167 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
3168 			      struct bnx2x_virtf *vf, u32 *sbdf)
3169 {
3170 	*sbdf = vf->devfn | (vf->bus << 8);
3171 }
3172 
3173 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3174 			      enum channel_tlvs tlv)
3175 {
3176 	/* we don't lock the channel for unsupported tlvs */
3177 	if (!bnx2x_tlv_supported(tlv)) {
3178 		BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
3179 		return;
3180 	}
3181 
3182 	/* lock the channel */
3183 	mutex_lock(&vf->op_mutex);
3184 
3185 	/* record the locking op */
3186 	vf->op_current = tlv;
3187 
3188 	/* log the lock */
3189 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
3190 	   vf->abs_vfid, tlv);
3191 }
3192 
3193 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3194 				enum channel_tlvs expected_tlv)
3195 {
3196 	enum channel_tlvs current_tlv;
3197 
3198 	if (!vf) {
3199 		BNX2X_ERR("VF was %p\n", vf);
3200 		return;
3201 	}
3202 
3203 	current_tlv = vf->op_current;
3204 
3205 	/* we don't unlock the channel for unsupported tlvs */
3206 	if (!bnx2x_tlv_supported(expected_tlv))
3207 		return;
3208 
3209 	WARN(expected_tlv != vf->op_current,
3210 	     "lock mismatch: expected %d found %d", expected_tlv,
3211 	     vf->op_current);
3212 
3213 	/* record the locking op */
3214 	vf->op_current = CHANNEL_TLV_NONE;
3215 
3216 	/* lock the channel */
3217 	mutex_unlock(&vf->op_mutex);
3218 
3219 	/* log the unlock */
3220 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
3221 	   vf->abs_vfid, vf->op_current);
3222 }
3223 
3224 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
3225 {
3226 	struct bnx2x_queue_state_params q_params;
3227 	u32 prev_flags;
3228 	int i, rc;
3229 
3230 	/* Verify changes are needed and record current Tx switching state */
3231 	prev_flags = bp->flags;
3232 	if (enable)
3233 		bp->flags |= TX_SWITCHING;
3234 	else
3235 		bp->flags &= ~TX_SWITCHING;
3236 	if (prev_flags == bp->flags)
3237 		return 0;
3238 
3239 	/* Verify state enables the sending of queue ramrods */
3240 	if ((bp->state != BNX2X_STATE_OPEN) ||
3241 	    (bnx2x_get_q_logical_state(bp,
3242 				      &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
3243 	     BNX2X_Q_LOGICAL_STATE_ACTIVE))
3244 		return 0;
3245 
3246 	/* send q. update ramrod to configure Tx switching */
3247 	memset(&q_params, 0, sizeof(q_params));
3248 	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3249 	q_params.cmd = BNX2X_Q_CMD_UPDATE;
3250 	__set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
3251 		  &q_params.params.update.update_flags);
3252 	if (enable)
3253 		__set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
3254 			  &q_params.params.update.update_flags);
3255 	else
3256 		__clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
3257 			    &q_params.params.update.update_flags);
3258 
3259 	/* send the ramrod on all the queues of the PF */
3260 	for_each_eth_queue(bp, i) {
3261 		struct bnx2x_fastpath *fp = &bp->fp[i];
3262 
3263 		/* Set the appropriate Queue object */
3264 		q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
3265 
3266 		/* Update the Queue state */
3267 		rc = bnx2x_queue_state_change(bp, &q_params);
3268 		if (rc) {
3269 			BNX2X_ERR("Failed to configure Tx switching\n");
3270 			return rc;
3271 		}
3272 	}
3273 
3274 	DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
3275 	return 0;
3276 }
3277 
3278 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3279 {
3280 	struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
3281 
3282 	if (!IS_SRIOV(bp)) {
3283 		BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
3284 		return -EINVAL;
3285 	}
3286 
3287 	DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
3288 	   num_vfs_param, BNX2X_NR_VIRTFN(bp));
3289 
3290 	/* HW channel is only operational when PF is up */
3291 	if (bp->state != BNX2X_STATE_OPEN) {
3292 		BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
3293 		return -EINVAL;
3294 	}
3295 
3296 	/* we are always bound by the total_vfs in the configuration space */
3297 	if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
3298 		BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
3299 			  num_vfs_param, BNX2X_NR_VIRTFN(bp));
3300 		num_vfs_param = BNX2X_NR_VIRTFN(bp);
3301 	}
3302 
3303 	bp->requested_nr_virtfn = num_vfs_param;
3304 	if (num_vfs_param == 0) {
3305 		bnx2x_set_pf_tx_switching(bp, false);
3306 		pci_disable_sriov(dev);
3307 		return 0;
3308 	} else {
3309 		return bnx2x_enable_sriov(bp);
3310 	}
3311 }
3312 
3313 #define IGU_ENTRY_SIZE 4
3314 
3315 int bnx2x_enable_sriov(struct bnx2x *bp)
3316 {
3317 	int rc = 0, req_vfs = bp->requested_nr_virtfn;
3318 	int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
3319 	u32 igu_entry, address;
3320 	u16 num_vf_queues;
3321 
3322 	if (req_vfs == 0)
3323 		return 0;
3324 
3325 	first_vf = bp->vfdb->sriov.first_vf_in_pf;
3326 
3327 	/* statically distribute vf sb pool between VFs */
3328 	num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
3329 			      BP_VFDB(bp)->vf_sbs_pool / req_vfs);
3330 
3331 	/* zero previous values learned from igu cam */
3332 	for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
3333 		struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
3334 
3335 		vf->sb_count = 0;
3336 		vf_sb_count(BP_VF(bp, vf_idx)) = 0;
3337 	}
3338 	bp->vfdb->vf_sbs_pool = 0;
3339 
3340 	/* prepare IGU cam */
3341 	sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
3342 	address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
3343 	for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3344 		for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
3345 			igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
3346 				vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
3347 				IGU_REG_MAPPING_MEMORY_VALID;
3348 			DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
3349 			   sb_idx, vf_idx);
3350 			REG_WR(bp, address, igu_entry);
3351 			sb_idx++;
3352 			address += IGU_ENTRY_SIZE;
3353 		}
3354 	}
3355 
3356 	/* Reinitialize vf database according to igu cam */
3357 	bnx2x_get_vf_igu_cam_info(bp);
3358 
3359 	DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
3360 	   BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
3361 
3362 	qcount = 0;
3363 	for_each_vf(bp, vf_idx) {
3364 		struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
3365 
3366 		/* set local queue arrays */
3367 		vf->vfqs = &bp->vfdb->vfqs[qcount];
3368 		qcount += vf_sb_count(vf);
3369 		bnx2x_iov_static_resc(bp, vf);
3370 	}
3371 
3372 	/* prepare msix vectors in VF configuration space - the value in the
3373 	 * PCI configuration space should be the index of the last entry,
3374 	 * namely one less than the actual size of the table
3375 	 */
3376 	for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3377 		bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3378 		REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3379 		       num_vf_queues - 1);
3380 		DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
3381 		   vf_idx, num_vf_queues - 1);
3382 	}
3383 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3384 
3385 	/* enable sriov. This will probe all the VFs, and consequentially cause
3386 	 * the "acquire" messages to appear on the VF PF channel.
3387 	 */
3388 	DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
3389 	bnx2x_disable_sriov(bp);
3390 
3391 	rc = bnx2x_set_pf_tx_switching(bp, true);
3392 	if (rc)
3393 		return rc;
3394 
3395 	rc = pci_enable_sriov(bp->pdev, req_vfs);
3396 	if (rc) {
3397 		BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
3398 		return rc;
3399 	}
3400 	DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
3401 	return req_vfs;
3402 }
3403 
3404 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
3405 {
3406 	int vfidx;
3407 	struct pf_vf_bulletin_content *bulletin;
3408 
3409 	DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
3410 	for_each_vf(bp, vfidx) {
3411 	bulletin = BP_VF_BULLETIN(bp, vfidx);
3412 		if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
3413 			bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
3414 	}
3415 }
3416 
3417 void bnx2x_disable_sriov(struct bnx2x *bp)
3418 {
3419 	pci_disable_sriov(bp->pdev);
3420 }
3421 
3422 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
3423 			     struct bnx2x_virtf **vf,
3424 			     struct pf_vf_bulletin_content **bulletin)
3425 {
3426 	if (bp->state != BNX2X_STATE_OPEN) {
3427 		BNX2X_ERR("vf ndo called though PF is down\n");
3428 		return -EINVAL;
3429 	}
3430 
3431 	if (!IS_SRIOV(bp)) {
3432 		BNX2X_ERR("vf ndo called though sriov is disabled\n");
3433 		return -EINVAL;
3434 	}
3435 
3436 	if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
3437 		BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
3438 			  vfidx, BNX2X_NR_VIRTFN(bp));
3439 		return -EINVAL;
3440 	}
3441 
3442 	/* init members */
3443 	*vf = BP_VF(bp, vfidx);
3444 	*bulletin = BP_VF_BULLETIN(bp, vfidx);
3445 
3446 	if (!*vf) {
3447 		BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
3448 			  vfidx);
3449 		return -EINVAL;
3450 	}
3451 
3452 	if (!(*vf)->vfqs) {
3453 		BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
3454 			  vfidx);
3455 		return -EINVAL;
3456 	}
3457 
3458 	if (!*bulletin) {
3459 		BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
3460 			  vfidx);
3461 		return -EINVAL;
3462 	}
3463 
3464 	return 0;
3465 }
3466 
3467 int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3468 			struct ifla_vf_info *ivi)
3469 {
3470 	struct bnx2x *bp = netdev_priv(dev);
3471 	struct bnx2x_virtf *vf = NULL;
3472 	struct pf_vf_bulletin_content *bulletin = NULL;
3473 	struct bnx2x_vlan_mac_obj *mac_obj;
3474 	struct bnx2x_vlan_mac_obj *vlan_obj;
3475 	int rc;
3476 
3477 	/* sanity and init */
3478 	rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3479 	if (rc)
3480 		return rc;
3481 	mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
3482 	vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3483 	if (!mac_obj || !vlan_obj) {
3484 		BNX2X_ERR("VF partially initialized\n");
3485 		return -EINVAL;
3486 	}
3487 
3488 	ivi->vf = vfidx;
3489 	ivi->qos = 0;
3490 	ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
3491 	ivi->spoofchk = 1; /*always enabled */
3492 	if (vf->state == VF_ENABLED) {
3493 		/* mac and vlan are in vlan_mac objects */
3494 		if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
3495 			mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
3496 						0, ETH_ALEN);
3497 			vlan_obj->get_n_elements(bp, vlan_obj, 1,
3498 						 (u8 *)&ivi->vlan, 0,
3499 						 VLAN_HLEN);
3500 		}
3501 	} else {
3502 		/* mac */
3503 		if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
3504 			/* mac configured by ndo so its in bulletin board */
3505 			memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
3506 		else
3507 			/* function has not been loaded yet. Show mac as 0s */
3508 			memset(&ivi->mac, 0, ETH_ALEN);
3509 
3510 		/* vlan */
3511 		if (bulletin->valid_bitmap & (1 << VLAN_VALID))
3512 			/* vlan configured by ndo so its in bulletin board */
3513 			memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
3514 		else
3515 			/* function has not been loaded yet. Show vlans as 0s */
3516 			memset(&ivi->vlan, 0, VLAN_HLEN);
3517 	}
3518 
3519 	return 0;
3520 }
3521 
3522 /* New mac for VF. Consider these cases:
3523  * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
3524  *    supply at acquire.
3525  * 2. VF has already been acquired but has not yet initialized - store in local
3526  *    bulletin board. mac will be posted on VF bulletin board after VF init. VF
3527  *    will configure this mac when it is ready.
3528  * 3. VF has already initialized but has not yet setup a queue - post the new
3529  *    mac on VF's bulletin board right now. VF will configure this mac when it
3530  *    is ready.
3531  * 4. VF has already set a queue - delete any macs already configured for this
3532  *    queue and manually config the new mac.
3533  * In any event, once this function has been called refuse any attempts by the
3534  * VF to configure any mac for itself except for this mac. In case of a race
3535  * where the VF fails to see the new post on its bulletin board before sending a
3536  * mac configuration request, the PF will simply fail the request and VF can try
3537  * again after consulting its bulletin board.
3538  */
3539 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3540 {
3541 	struct bnx2x *bp = netdev_priv(dev);
3542 	int rc, q_logical_state;
3543 	struct bnx2x_virtf *vf = NULL;
3544 	struct pf_vf_bulletin_content *bulletin = NULL;
3545 
3546 	/* sanity and init */
3547 	rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3548 	if (rc)
3549 		return rc;
3550 	if (!is_valid_ether_addr(mac)) {
3551 		BNX2X_ERR("mac address invalid\n");
3552 		return -EINVAL;
3553 	}
3554 
3555 	/* update PF's copy of the VF's bulletin. Will no longer accept mac
3556 	 * configuration requests from vf unless match this mac
3557 	 */
3558 	bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
3559 	memcpy(bulletin->mac, mac, ETH_ALEN);
3560 
3561 	/* Post update on VF's bulletin board */
3562 	rc = bnx2x_post_vf_bulletin(bp, vfidx);
3563 	if (rc) {
3564 		BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
3565 		return rc;
3566 	}
3567 
3568 	q_logical_state =
3569 		bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
3570 	if (vf->state == VF_ENABLED &&
3571 	    q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3572 		/* configure the mac in device on this vf's queue */
3573 		unsigned long ramrod_flags = 0;
3574 		struct bnx2x_vlan_mac_obj *mac_obj;
3575 
3576 		/* User should be able to see failure reason in system logs */
3577 		if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
3578 			return -EINVAL;
3579 
3580 		/* must lock vfpf channel to protect against vf flows */
3581 		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3582 
3583 		/* remove existing eth macs */
3584 		mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
3585 		rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3586 		if (rc) {
3587 			BNX2X_ERR("failed to delete eth macs\n");
3588 			rc = -EINVAL;
3589 			goto out;
3590 		}
3591 
3592 		/* remove existing uc list macs */
3593 		rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
3594 		if (rc) {
3595 			BNX2X_ERR("failed to delete uc_list macs\n");
3596 			rc = -EINVAL;
3597 			goto out;
3598 		}
3599 
3600 		/* configure the new mac to device */
3601 		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3602 		bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3603 				  BNX2X_ETH_MAC, &ramrod_flags);
3604 
3605 out:
3606 		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3607 	}
3608 
3609 	return 0;
3610 }
3611 
3612 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3613 {
3614 	struct bnx2x_queue_state_params q_params = {NULL};
3615 	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3616 	struct bnx2x_queue_update_params *update_params;
3617 	struct pf_vf_bulletin_content *bulletin = NULL;
3618 	struct bnx2x_rx_mode_ramrod_params rx_ramrod;
3619 	struct bnx2x *bp = netdev_priv(dev);
3620 	struct bnx2x_vlan_mac_obj *vlan_obj;
3621 	unsigned long vlan_mac_flags = 0;
3622 	unsigned long ramrod_flags = 0;
3623 	struct bnx2x_virtf *vf = NULL;
3624 	unsigned long accept_flags;
3625 	int rc;
3626 
3627 	/* sanity and init */
3628 	rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3629 	if (rc)
3630 		return rc;
3631 
3632 	if (vlan > 4095) {
3633 		BNX2X_ERR("illegal vlan value %d\n", vlan);
3634 		return -EINVAL;
3635 	}
3636 
3637 	DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
3638 	   vfidx, vlan, 0);
3639 
3640 	/* update PF's copy of the VF's bulletin. No point in posting the vlan
3641 	 * to the VF since it doesn't have anything to do with it. But it useful
3642 	 * to store it here in case the VF is not up yet and we can only
3643 	 * configure the vlan later when it does. Treat vlan id 0 as remove the
3644 	 * Host tag.
3645 	 */
3646 	if (vlan > 0)
3647 		bulletin->valid_bitmap |= 1 << VLAN_VALID;
3648 	else
3649 		bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
3650 	bulletin->vlan = vlan;
3651 
3652 	/* is vf initialized and queue set up? */
3653 	if (vf->state != VF_ENABLED ||
3654 	    bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
3655 	    BNX2X_Q_LOGICAL_STATE_ACTIVE)
3656 		return rc;
3657 
3658 	/* User should be able to see error in system logs */
3659 	if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
3660 		return -EINVAL;
3661 
3662 	/* must lock vfpf channel to protect against vf flows */
3663 	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3664 
3665 	/* remove existing vlans */
3666 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3667 	vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3668 	rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3669 				  &ramrod_flags);
3670 	if (rc) {
3671 		BNX2X_ERR("failed to delete vlans\n");
3672 		rc = -EINVAL;
3673 		goto out;
3674 	}
3675 
3676 	/* need to remove/add the VF's accept_any_vlan bit */
3677 	accept_flags = bnx2x_leading_vfq(vf, accept_flags);
3678 	if (vlan)
3679 		clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3680 	else
3681 		set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3682 
3683 	bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
3684 			      accept_flags);
3685 	bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
3686 	bnx2x_config_rx_mode(bp, &rx_ramrod);
3687 
3688 	/* configure the new vlan to device */
3689 	memset(&ramrod_param, 0, sizeof(ramrod_param));
3690 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3691 	ramrod_param.vlan_mac_obj = vlan_obj;
3692 	ramrod_param.ramrod_flags = ramrod_flags;
3693 	set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
3694 		&ramrod_param.user_req.vlan_mac_flags);
3695 	ramrod_param.user_req.u.vlan.vlan = vlan;
3696 	ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3697 	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3698 	if (rc) {
3699 		BNX2X_ERR("failed to configure vlan\n");
3700 		rc =  -EINVAL;
3701 		goto out;
3702 	}
3703 
3704 	/* send queue update ramrod to configure default vlan and silent
3705 	 * vlan removal
3706 	 */
3707 	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3708 	q_params.cmd = BNX2X_Q_CMD_UPDATE;
3709 	q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3710 	update_params = &q_params.params.update;
3711 	__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3712 		  &update_params->update_flags);
3713 	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3714 		  &update_params->update_flags);
3715 	if (vlan == 0) {
3716 		/* if vlan is 0 then we want to leave the VF traffic
3717 		 * untagged, and leave the incoming traffic untouched
3718 		 * (i.e. do not remove any vlan tags).
3719 		 */
3720 		__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3721 			    &update_params->update_flags);
3722 		__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3723 			    &update_params->update_flags);
3724 	} else {
3725 		/* configure default vlan to vf queue and set silent
3726 		 * vlan removal (the vf remains unaware of this vlan).
3727 		 */
3728 		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3729 			  &update_params->update_flags);
3730 		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3731 			  &update_params->update_flags);
3732 		update_params->def_vlan = vlan;
3733 		update_params->silent_removal_value =
3734 			vlan & VLAN_VID_MASK;
3735 		update_params->silent_removal_mask = VLAN_VID_MASK;
3736 	}
3737 
3738 	/* Update the Queue state */
3739 	rc = bnx2x_queue_state_change(bp, &q_params);
3740 	if (rc) {
3741 		BNX2X_ERR("Failed to configure default VLAN\n");
3742 		goto out;
3743 	}
3744 
3745 
3746 	/* clear the flag indicating that this VF needs its vlan
3747 	 * (will only be set if the HV configured the Vlan before vf was
3748 	 * up and we were called because the VF came up later
3749 	 */
3750 out:
3751 	vf->cfg_flags &= ~VF_CFG_VLAN;
3752 	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3753 
3754 	return rc;
3755 }
3756 
3757 /* crc is the first field in the bulletin board. Compute the crc over the
3758  * entire bulletin board excluding the crc field itself. Use the length field
3759  * as the Bulletin Board was posted by a PF with possibly a different version
3760  * from the vf which will sample it. Therefore, the length is computed by the
3761  * PF and the used blindly by the VF.
3762  */
3763 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
3764 			  struct pf_vf_bulletin_content *bulletin)
3765 {
3766 	return crc32(BULLETIN_CRC_SEED,
3767 		 ((u8 *)bulletin) + sizeof(bulletin->crc),
3768 		 bulletin->length - sizeof(bulletin->crc));
3769 }
3770 
3771 /* Check for new posts on the bulletin board */
3772 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3773 {
3774 	struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
3775 	int attempts;
3776 
3777 	/* bulletin board hasn't changed since last sample */
3778 	if (bp->old_bulletin.version == bulletin.version)
3779 		return PFVF_BULLETIN_UNCHANGED;
3780 
3781 	/* validate crc of new bulletin board */
3782 	if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
3783 		/* sampling structure in mid post may result with corrupted data
3784 		 * validate crc to ensure coherency.
3785 		 */
3786 		for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
3787 			bulletin = bp->pf2vf_bulletin->content;
3788 			if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
3789 								  &bulletin))
3790 				break;
3791 			BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3792 				  bulletin.crc,
3793 				  bnx2x_crc_vf_bulletin(bp, &bulletin));
3794 		}
3795 		if (attempts >= BULLETIN_ATTEMPTS) {
3796 			BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3797 				  attempts);
3798 			return PFVF_BULLETIN_CRC_ERR;
3799 		}
3800 	}
3801 
3802 	/* the mac address in bulletin board is valid and is new */
3803 	if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
3804 	    !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {
3805 		/* update new mac to net device */
3806 		memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
3807 	}
3808 
3809 	/* the vlan in bulletin board is valid and is new */
3810 	if (bulletin.valid_bitmap & 1 << VLAN_VALID)
3811 		memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
3812 
3813 	/* copy new bulletin board to bp */
3814 	bp->old_bulletin = bulletin;
3815 
3816 	return PFVF_BULLETIN_UPDATED;
3817 }
3818 
3819 void bnx2x_timer_sriov(struct bnx2x *bp)
3820 {
3821 	bnx2x_sample_bulletin(bp);
3822 
3823 	/* if channel is down we need to self destruct */
3824 	if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
3825 		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3826 				       BNX2X_MSG_IOV);
3827 }
3828 
3829 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3830 {
3831 	/* vf doorbells are embedded within the regview */
3832 	return bp->regview + PXP_VF_ADDR_DB_START;
3833 }
3834 
3835 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3836 {
3837 	mutex_init(&bp->vf2pf_mutex);
3838 
3839 	/* allocate vf2pf mailbox for vf to pf channel */
3840 	bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
3841 					 sizeof(struct bnx2x_vf_mbx_msg));
3842 	if (!bp->vf2pf_mbox)
3843 		goto alloc_mem_err;
3844 
3845 	/* allocate pf 2 vf bulletin board */
3846 	bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
3847 					     sizeof(union pf_vf_bulletin));
3848 	if (!bp->pf2vf_bulletin)
3849 		goto alloc_mem_err;
3850 
3851 	return 0;
3852 
3853 alloc_mem_err:
3854 	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3855 		       sizeof(struct bnx2x_vf_mbx_msg));
3856 	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3857 		       sizeof(union pf_vf_bulletin));
3858 	return -ENOMEM;
3859 }
3860 
3861 void bnx2x_iov_channel_down(struct bnx2x *bp)
3862 {
3863 	int vf_idx;
3864 	struct pf_vf_bulletin_content *bulletin;
3865 
3866 	if (!IS_SRIOV(bp))
3867 		return;
3868 
3869 	for_each_vf(bp, vf_idx) {
3870 		/* locate this VFs bulletin board and update the channel down
3871 		 * bit
3872 		 */
3873 		bulletin = BP_VF_BULLETIN(bp, vf_idx);
3874 		bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3875 
3876 		/* update vf bulletin board */
3877 		bnx2x_post_vf_bulletin(bp, vf_idx);
3878 	}
3879 }
3880 
3881 void bnx2x_iov_task(struct work_struct *work)
3882 {
3883 	struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
3884 
3885 	if (!netif_running(bp->dev))
3886 		return;
3887 
3888 	if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
3889 			       &bp->iov_task_state))
3890 		bnx2x_vf_handle_flr_event(bp);
3891 
3892 	if (test_and_clear_bit(BNX2X_IOV_CONT_VFOP,
3893 			       &bp->iov_task_state))
3894 		bnx2x_iov_vfop_cont(bp);
3895 
3896 	if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
3897 			       &bp->iov_task_state))
3898 		bnx2x_vf_mbx(bp);
3899 }
3900 
3901 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
3902 {
3903 	smp_mb__before_clear_bit();
3904 	set_bit(flag, &bp->iov_task_state);
3905 	smp_mb__after_clear_bit();
3906 	DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3907 	queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
3908 }
3909