1 /* bnx2x_sriov.c: Broadcom Everest network driver.
2  *
3  * Copyright 2009-2012 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17  *	       Ariel Elior <ariele@broadcom.com>
18  *
19  */
20 #include "bnx2x.h"
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
23 #include "bnx2x_sriov.h"
24 
25 /* General service functions */
26 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
27 					 u16 pf_id)
28 {
29 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
30 		pf_id);
31 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
32 		pf_id);
33 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
34 		pf_id);
35 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
36 		pf_id);
37 }
38 
39 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
40 					u8 enable)
41 {
42 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
43 		enable);
44 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
45 		enable);
46 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
47 		enable);
48 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
49 		enable);
50 }
51 
52 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
53 {
54 	int idx;
55 
56 	for_each_vf(bp, idx)
57 		if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
58 			break;
59 	return idx;
60 }
61 
62 static
63 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
64 {
65 	u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
66 	return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
67 }
68 
69 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
70 				u8 igu_sb_id, u8 segment, u16 index, u8 op,
71 				u8 update)
72 {
73 	/* acking a VF sb through the PF - use the GRC */
74 	u32 ctl;
75 	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
76 	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
77 	u32 func_encode = vf->abs_vfid;
78 	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
79 	struct igu_regular cmd_data = {0};
80 
81 	cmd_data.sb_id_and_flags =
82 			((index << IGU_REGULAR_SB_INDEX_SHIFT) |
83 			 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
84 			 (update << IGU_REGULAR_BUPDATE_SHIFT) |
85 			 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
86 
87 	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
88 	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
89 	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
90 
91 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
92 	   cmd_data.sb_id_and_flags, igu_addr_data);
93 	REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
94 	mmiowb();
95 	barrier();
96 
97 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
98 	   ctl, igu_addr_ctl);
99 	REG_WR(bp, igu_addr_ctl, ctl);
100 	mmiowb();
101 	barrier();
102 }
103 /* VFOP - VF slow-path operation support */
104 
105 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX		0x10000
106 
107 /* VFOP operations states */
108 enum bnx2x_vfop_qctor_state {
109 	   BNX2X_VFOP_QCTOR_INIT,
110 	   BNX2X_VFOP_QCTOR_SETUP,
111 	   BNX2X_VFOP_QCTOR_INT_EN
112 };
113 
114 enum bnx2x_vfop_qdtor_state {
115 	   BNX2X_VFOP_QDTOR_HALT,
116 	   BNX2X_VFOP_QDTOR_TERMINATE,
117 	   BNX2X_VFOP_QDTOR_CFCDEL,
118 	   BNX2X_VFOP_QDTOR_DONE
119 };
120 
121 enum bnx2x_vfop_vlan_mac_state {
122 	   BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
123 	   BNX2X_VFOP_VLAN_MAC_CLEAR,
124 	   BNX2X_VFOP_VLAN_MAC_CHK_DONE,
125 	   BNX2X_VFOP_MAC_CONFIG_LIST,
126 	   BNX2X_VFOP_VLAN_CONFIG_LIST,
127 	   BNX2X_VFOP_VLAN_CONFIG_LIST_0
128 };
129 
130 enum bnx2x_vfop_qsetup_state {
131 	   BNX2X_VFOP_QSETUP_CTOR,
132 	   BNX2X_VFOP_QSETUP_VLAN0,
133 	   BNX2X_VFOP_QSETUP_DONE
134 };
135 
136 enum bnx2x_vfop_mcast_state {
137 	   BNX2X_VFOP_MCAST_DEL,
138 	   BNX2X_VFOP_MCAST_ADD,
139 	   BNX2X_VFOP_MCAST_CHK_DONE
140 };
141 
142 enum bnx2x_vfop_close_state {
143 	   BNX2X_VFOP_CLOSE_QUEUES,
144 	   BNX2X_VFOP_CLOSE_HW
145 };
146 
147 enum bnx2x_vfop_rxmode_state {
148 	   BNX2X_VFOP_RXMODE_CONFIG,
149 	   BNX2X_VFOP_RXMODE_DONE
150 };
151 
152 enum bnx2x_vfop_qteardown_state {
153 	   BNX2X_VFOP_QTEARDOWN_RXMODE,
154 	   BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
155 	   BNX2X_VFOP_QTEARDOWN_CLR_MAC,
156 	   BNX2X_VFOP_QTEARDOWN_QDTOR,
157 	   BNX2X_VFOP_QTEARDOWN_DONE
158 };
159 
160 #define bnx2x_vfop_reset_wq(vf)	atomic_set(&vf->op_in_progress, 0)
161 
162 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
163 			      struct bnx2x_queue_init_params *init_params,
164 			      struct bnx2x_queue_setup_params *setup_params,
165 			      u16 q_idx, u16 sb_idx)
166 {
167 	DP(BNX2X_MSG_IOV,
168 	   "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
169 	   vf->abs_vfid,
170 	   q_idx,
171 	   sb_idx,
172 	   init_params->tx.sb_cq_index,
173 	   init_params->tx.hc_rate,
174 	   setup_params->flags,
175 	   setup_params->txq_params.traffic_type);
176 }
177 
178 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
179 			    struct bnx2x_queue_init_params *init_params,
180 			    struct bnx2x_queue_setup_params *setup_params,
181 			    u16 q_idx, u16 sb_idx)
182 {
183 	struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
184 
185 	DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
186 	   "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
187 	   vf->abs_vfid,
188 	   q_idx,
189 	   sb_idx,
190 	   init_params->rx.sb_cq_index,
191 	   init_params->rx.hc_rate,
192 	   setup_params->gen_params.mtu,
193 	   rxq_params->buf_sz,
194 	   rxq_params->sge_buf_sz,
195 	   rxq_params->max_sges_pkt,
196 	   rxq_params->tpa_agg_sz,
197 	   setup_params->flags,
198 	   rxq_params->drop_flags,
199 	   rxq_params->cache_line_log);
200 }
201 
202 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
203 			   struct bnx2x_virtf *vf,
204 			   struct bnx2x_vf_queue *q,
205 			   struct bnx2x_vfop_qctor_params *p,
206 			   unsigned long q_type)
207 {
208 	struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
209 	struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
210 
211 	/* INIT */
212 
213 	/* Enable host coalescing in the transition to INIT state */
214 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
215 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
216 
217 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
218 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
219 
220 	/* FW SB ID */
221 	init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
222 	init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
223 
224 	/* context */
225 	init_p->cxts[0] = q->cxt;
226 
227 	/* SETUP */
228 
229 	/* Setup-op general parameters */
230 	setup_p->gen_params.spcl_id = vf->sp_cl_id;
231 	setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
232 
233 	/* Setup-op pause params:
234 	 * Nothing to do, the pause thresholds are set by default to 0 which
235 	 * effectively turns off the feature for this queue. We don't want
236 	 * one queue (VF) to interfering with another queue (another VF)
237 	 */
238 	if (vf->cfg_flags & VF_CFG_FW_FC)
239 		BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
240 			  vf->abs_vfid);
241 	/* Setup-op flags:
242 	 * collect statistics, zero statistics, local-switching, security,
243 	 * OV for Flex10, RSS and MCAST for leading
244 	 */
245 	if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
246 		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
247 
248 	/* for VFs, enable tx switching, bd coherency, and mac address
249 	 * anti-spoofing
250 	 */
251 	__set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
252 	__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
253 	__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
254 
255 	if (vfq_is_leading(q)) {
256 		__set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
257 		__set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
258 	}
259 
260 	/* Setup-op rx parameters */
261 	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
262 		struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
263 
264 		rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
265 		rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
266 		rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
267 
268 		if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
269 			rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
270 	}
271 
272 	/* Setup-op tx parameters */
273 	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
274 		setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
275 		setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
276 	}
277 }
278 
279 /* VFOP queue construction */
280 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
281 {
282 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
283 	struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
284 	struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
285 	enum bnx2x_vfop_qctor_state state = vfop->state;
286 
287 	bnx2x_vfop_reset_wq(vf);
288 
289 	if (vfop->rc < 0)
290 		goto op_err;
291 
292 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
293 
294 	switch (state) {
295 	case BNX2X_VFOP_QCTOR_INIT:
296 
297 		/* has this queue already been opened? */
298 		if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
299 		    BNX2X_Q_LOGICAL_STATE_ACTIVE) {
300 			DP(BNX2X_MSG_IOV,
301 			   "Entered qctor but queue was already up. Aborting gracefully\n");
302 			goto op_done;
303 		}
304 
305 		/* next state */
306 		vfop->state = BNX2X_VFOP_QCTOR_SETUP;
307 
308 		q_params->cmd = BNX2X_Q_CMD_INIT;
309 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
310 
311 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
312 
313 	case BNX2X_VFOP_QCTOR_SETUP:
314 		/* next state */
315 		vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
316 
317 		/* copy pre-prepared setup params to the queue-state params */
318 		vfop->op_p->qctor.qstate.params.setup =
319 			vfop->op_p->qctor.prep_qsetup;
320 
321 		q_params->cmd = BNX2X_Q_CMD_SETUP;
322 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
323 
324 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
325 
326 	case BNX2X_VFOP_QCTOR_INT_EN:
327 
328 		/* enable interrupts */
329 		bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
330 				    USTORM_ID, 0, IGU_INT_ENABLE, 0);
331 		goto op_done;
332 	default:
333 		bnx2x_vfop_default(state);
334 	}
335 op_err:
336 	BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
337 		  vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
338 op_done:
339 	bnx2x_vfop_end(bp, vf, vfop);
340 op_pending:
341 	return;
342 }
343 
344 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
345 				struct bnx2x_virtf *vf,
346 				struct bnx2x_vfop_cmd *cmd,
347 				int qid)
348 {
349 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
350 
351 	if (vfop) {
352 		vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
353 
354 		vfop->args.qctor.qid = qid;
355 		vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
356 
357 		bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
358 				 bnx2x_vfop_qctor, cmd->done);
359 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
360 					     cmd->block);
361 	}
362 	return -ENOMEM;
363 }
364 
365 /* VFOP queue destruction */
366 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
367 {
368 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
369 	struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
370 	struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
371 	enum bnx2x_vfop_qdtor_state state = vfop->state;
372 
373 	bnx2x_vfop_reset_wq(vf);
374 
375 	if (vfop->rc < 0)
376 		goto op_err;
377 
378 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
379 
380 	switch (state) {
381 	case BNX2X_VFOP_QDTOR_HALT:
382 
383 		/* has this queue already been stopped? */
384 		if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
385 		    BNX2X_Q_LOGICAL_STATE_STOPPED) {
386 			DP(BNX2X_MSG_IOV,
387 			   "Entered qdtor but queue was already stopped. Aborting gracefully\n");
388 			goto op_done;
389 		}
390 
391 		/* next state */
392 		vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
393 
394 		q_params->cmd = BNX2X_Q_CMD_HALT;
395 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
396 
397 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
398 
399 	case BNX2X_VFOP_QDTOR_TERMINATE:
400 		/* next state */
401 		vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
402 
403 		q_params->cmd = BNX2X_Q_CMD_TERMINATE;
404 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
405 
406 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
407 
408 	case BNX2X_VFOP_QDTOR_CFCDEL:
409 		/* next state */
410 		vfop->state = BNX2X_VFOP_QDTOR_DONE;
411 
412 		q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
413 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
414 
415 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
416 op_err:
417 	BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
418 		  vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
419 op_done:
420 	case BNX2X_VFOP_QDTOR_DONE:
421 		/* invalidate the context */
422 		qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
423 		qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
424 		bnx2x_vfop_end(bp, vf, vfop);
425 		return;
426 	default:
427 		bnx2x_vfop_default(state);
428 	}
429 op_pending:
430 	return;
431 }
432 
433 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
434 				struct bnx2x_virtf *vf,
435 				struct bnx2x_vfop_cmd *cmd,
436 				int qid)
437 {
438 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
439 
440 	if (vfop) {
441 		struct bnx2x_queue_state_params *qstate =
442 			&vf->op_params.qctor.qstate;
443 
444 		memset(qstate, 0, sizeof(*qstate));
445 		qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
446 
447 		vfop->args.qdtor.qid = qid;
448 		vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
449 
450 		bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
451 				 bnx2x_vfop_qdtor, cmd->done);
452 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
453 					     cmd->block);
454 	}
455 	DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
456 	   vf->abs_vfid, vfop->rc);
457 	return -ENOMEM;
458 }
459 
460 static void
461 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
462 {
463 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
464 	if (vf) {
465 		if (!vf_sb_count(vf))
466 			vf->igu_base_id = igu_sb_id;
467 		++vf_sb_count(vf);
468 	}
469 }
470 
471 /* VFOP MAC/VLAN helpers */
472 static inline void bnx2x_vfop_credit(struct bnx2x *bp,
473 				     struct bnx2x_vfop *vfop,
474 				     struct bnx2x_vlan_mac_obj *obj)
475 {
476 	struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
477 
478 	/* update credit only if there is no error
479 	 * and a valid credit counter
480 	 */
481 	if (!vfop->rc && args->credit) {
482 		int cnt = 0;
483 		struct list_head *pos;
484 
485 		list_for_each(pos, &obj->head)
486 			cnt++;
487 
488 		atomic_set(args->credit, cnt);
489 	}
490 }
491 
492 static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
493 				    struct bnx2x_vfop_filter *pos,
494 				    struct bnx2x_vlan_mac_data *user_req)
495 {
496 	user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
497 		BNX2X_VLAN_MAC_DEL;
498 
499 	switch (pos->type) {
500 	case BNX2X_VFOP_FILTER_MAC:
501 		memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
502 		break;
503 	case BNX2X_VFOP_FILTER_VLAN:
504 		user_req->u.vlan.vlan = pos->vid;
505 		break;
506 	default:
507 		BNX2X_ERR("Invalid filter type, skipping\n");
508 		return 1;
509 	}
510 	return 0;
511 }
512 
513 static int
514 bnx2x_vfop_config_vlan0(struct bnx2x *bp,
515 			struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
516 			bool add)
517 {
518 	int rc;
519 
520 	vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
521 		BNX2X_VLAN_MAC_DEL;
522 	vlan_mac->user_req.u.vlan.vlan = 0;
523 
524 	rc = bnx2x_config_vlan_mac(bp, vlan_mac);
525 	if (rc == -EEXIST)
526 		rc = 0;
527 	return rc;
528 }
529 
530 static int bnx2x_vfop_config_list(struct bnx2x *bp,
531 				  struct bnx2x_vfop_filters *filters,
532 				  struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
533 {
534 	struct bnx2x_vfop_filter *pos, *tmp;
535 	struct list_head rollback_list, *filters_list = &filters->head;
536 	struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
537 	int rc = 0, cnt = 0;
538 
539 	INIT_LIST_HEAD(&rollback_list);
540 
541 	list_for_each_entry_safe(pos, tmp, filters_list, link) {
542 		if (bnx2x_vfop_set_user_req(bp, pos, user_req))
543 			continue;
544 
545 		rc = bnx2x_config_vlan_mac(bp, vlan_mac);
546 		if (rc >= 0) {
547 			cnt += pos->add ? 1 : -1;
548 			list_del(&pos->link);
549 			list_add(&pos->link, &rollback_list);
550 			rc = 0;
551 		} else if (rc == -EEXIST) {
552 			rc = 0;
553 		} else {
554 			BNX2X_ERR("Failed to add a new vlan_mac command\n");
555 			break;
556 		}
557 	}
558 
559 	/* rollback if error or too many rules added */
560 	if (rc || cnt > filters->add_cnt) {
561 		BNX2X_ERR("error or too many rules added. Performing rollback\n");
562 		list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
563 			pos->add = !pos->add;	/* reverse op */
564 			bnx2x_vfop_set_user_req(bp, pos, user_req);
565 			bnx2x_config_vlan_mac(bp, vlan_mac);
566 			list_del(&pos->link);
567 		}
568 		cnt = 0;
569 		if (!rc)
570 			rc = -EINVAL;
571 	}
572 	filters->add_cnt = cnt;
573 	return rc;
574 }
575 
576 /* VFOP set VLAN/MAC */
577 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
578 {
579 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
580 	struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
581 	struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
582 	struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
583 
584 	enum bnx2x_vfop_vlan_mac_state state = vfop->state;
585 
586 	if (vfop->rc < 0)
587 		goto op_err;
588 
589 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
590 
591 	bnx2x_vfop_reset_wq(vf);
592 
593 	switch (state) {
594 	case BNX2X_VFOP_VLAN_MAC_CLEAR:
595 		/* next state */
596 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
597 
598 		/* do delete */
599 		vfop->rc = obj->delete_all(bp, obj,
600 					   &vlan_mac->user_req.vlan_mac_flags,
601 					   &vlan_mac->ramrod_flags);
602 
603 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
604 
605 	case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
606 		/* next state */
607 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
608 
609 		/* do config */
610 		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
611 		if (vfop->rc == -EEXIST)
612 			vfop->rc = 0;
613 
614 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
615 
616 	case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
617 		vfop->rc = !!obj->raw.check_pending(&obj->raw);
618 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
619 
620 	case BNX2X_VFOP_MAC_CONFIG_LIST:
621 		/* next state */
622 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
623 
624 		/* do list config */
625 		vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
626 		if (vfop->rc)
627 			goto op_err;
628 
629 		set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
630 		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
631 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
632 
633 	case BNX2X_VFOP_VLAN_CONFIG_LIST:
634 		/* next state */
635 		vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
636 
637 		/* remove vlan0 - could be no-op */
638 		vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
639 		if (vfop->rc)
640 			goto op_err;
641 
642 		/* Do vlan list config. if this operation fails we try to
643 		 * restore vlan0 to keep the queue is working order
644 		 */
645 		vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
646 		if (!vfop->rc) {
647 			set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
648 			vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
649 		}
650 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
651 
652 	case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
653 		/* next state */
654 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
655 
656 		if (list_empty(&obj->head))
657 			/* add vlan0 */
658 			vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
659 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
660 
661 	default:
662 		bnx2x_vfop_default(state);
663 	}
664 op_err:
665 	BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
666 op_done:
667 	kfree(filters);
668 	bnx2x_vfop_credit(bp, vfop, obj);
669 	bnx2x_vfop_end(bp, vf, vfop);
670 op_pending:
671 	return;
672 }
673 
674 struct bnx2x_vfop_vlan_mac_flags {
675 	bool drv_only;
676 	bool dont_consume;
677 	bool single_cmd;
678 	bool add;
679 };
680 
681 static void
682 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
683 				struct bnx2x_vfop_vlan_mac_flags *flags)
684 {
685 	struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
686 
687 	memset(ramrod, 0, sizeof(*ramrod));
688 
689 	/* ramrod flags */
690 	if (flags->drv_only)
691 		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
692 	if (flags->single_cmd)
693 		set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
694 
695 	/* mac_vlan flags */
696 	if (flags->dont_consume)
697 		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
698 
699 	/* cmd */
700 	ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
701 }
702 
703 static inline void
704 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
705 			   struct bnx2x_vfop_vlan_mac_flags *flags)
706 {
707 	bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
708 	set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
709 }
710 
711 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
712 				     struct bnx2x_virtf *vf,
713 				     struct bnx2x_vfop_cmd *cmd,
714 				     int qid, bool drv_only)
715 {
716 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
717 
718 	if (vfop) {
719 		struct bnx2x_vfop_args_filters filters = {
720 			.multi_filter = NULL,	/* single */
721 			.credit = NULL,		/* consume credit */
722 		};
723 		struct bnx2x_vfop_vlan_mac_flags flags = {
724 			.drv_only = drv_only,
725 			.dont_consume = (filters.credit != NULL),
726 			.single_cmd = true,
727 			.add = false /* don't care */,
728 		};
729 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
730 			&vf->op_params.vlan_mac;
731 
732 		/* set ramrod params */
733 		bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
734 
735 		/* set object */
736 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
737 
738 		/* set extra args */
739 		vfop->args.filters = filters;
740 
741 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
742 				 bnx2x_vfop_vlan_mac, cmd->done);
743 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
744 					     cmd->block);
745 	}
746 	return -ENOMEM;
747 }
748 
749 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
750 			    struct bnx2x_virtf *vf,
751 			    struct bnx2x_vfop_cmd *cmd,
752 			    struct bnx2x_vfop_filters *macs,
753 			    int qid, bool drv_only)
754 {
755 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
756 
757 	if (vfop) {
758 		struct bnx2x_vfop_args_filters filters = {
759 			.multi_filter = macs,
760 			.credit = NULL,		/* consume credit */
761 		};
762 		struct bnx2x_vfop_vlan_mac_flags flags = {
763 			.drv_only = drv_only,
764 			.dont_consume = (filters.credit != NULL),
765 			.single_cmd = false,
766 			.add = false, /* don't care since only the items in the
767 				       * filters list affect the sp operation,
768 				       * not the list itself
769 				       */
770 		};
771 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
772 			&vf->op_params.vlan_mac;
773 
774 		/* set ramrod params */
775 		bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
776 
777 		/* set object */
778 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
779 
780 		/* set extra args */
781 		filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
782 		vfop->args.filters = filters;
783 
784 		bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
785 				 bnx2x_vfop_vlan_mac, cmd->done);
786 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
787 					     cmd->block);
788 	}
789 	return -ENOMEM;
790 }
791 
792 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
793 			    struct bnx2x_virtf *vf,
794 			    struct bnx2x_vfop_cmd *cmd,
795 			    int qid, u16 vid, bool add)
796 {
797 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
798 
799 	if (vfop) {
800 		struct bnx2x_vfop_args_filters filters = {
801 			.multi_filter = NULL, /* single command */
802 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
803 		};
804 		struct bnx2x_vfop_vlan_mac_flags flags = {
805 			.drv_only = false,
806 			.dont_consume = (filters.credit != NULL),
807 			.single_cmd = true,
808 			.add = add,
809 		};
810 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
811 			&vf->op_params.vlan_mac;
812 
813 		/* set ramrod params */
814 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
815 		ramrod->user_req.u.vlan.vlan = vid;
816 
817 		/* set object */
818 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
819 
820 		/* set extra args */
821 		vfop->args.filters = filters;
822 
823 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
824 				 bnx2x_vfop_vlan_mac, cmd->done);
825 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
826 					     cmd->block);
827 	}
828 	return -ENOMEM;
829 }
830 
831 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
832 			       struct bnx2x_virtf *vf,
833 			       struct bnx2x_vfop_cmd *cmd,
834 			       int qid, bool drv_only)
835 {
836 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
837 
838 	if (vfop) {
839 		struct bnx2x_vfop_args_filters filters = {
840 			.multi_filter = NULL, /* single command */
841 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
842 		};
843 		struct bnx2x_vfop_vlan_mac_flags flags = {
844 			.drv_only = drv_only,
845 			.dont_consume = (filters.credit != NULL),
846 			.single_cmd = true,
847 			.add = false, /* don't care */
848 		};
849 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
850 			&vf->op_params.vlan_mac;
851 
852 		/* set ramrod params */
853 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
854 
855 		/* set object */
856 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
857 
858 		/* set extra args */
859 		vfop->args.filters = filters;
860 
861 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
862 				 bnx2x_vfop_vlan_mac, cmd->done);
863 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
864 					     cmd->block);
865 	}
866 	return -ENOMEM;
867 }
868 
869 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
870 			     struct bnx2x_virtf *vf,
871 			     struct bnx2x_vfop_cmd *cmd,
872 			     struct bnx2x_vfop_filters *vlans,
873 			     int qid, bool drv_only)
874 {
875 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
876 
877 	if (vfop) {
878 		struct bnx2x_vfop_args_filters filters = {
879 			.multi_filter = vlans,
880 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
881 		};
882 		struct bnx2x_vfop_vlan_mac_flags flags = {
883 			.drv_only = drv_only,
884 			.dont_consume = (filters.credit != NULL),
885 			.single_cmd = false,
886 			.add = false, /* don't care */
887 		};
888 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
889 			&vf->op_params.vlan_mac;
890 
891 		/* set ramrod params */
892 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
893 
894 		/* set object */
895 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
896 
897 		/* set extra args */
898 		filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
899 			atomic_read(filters.credit);
900 
901 		vfop->args.filters = filters;
902 
903 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
904 				 bnx2x_vfop_vlan_mac, cmd->done);
905 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
906 					     cmd->block);
907 	}
908 	return -ENOMEM;
909 }
910 
911 /* VFOP queue setup (queue constructor + set vlan 0) */
912 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
913 {
914 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
915 	int qid = vfop->args.qctor.qid;
916 	enum bnx2x_vfop_qsetup_state state = vfop->state;
917 	struct bnx2x_vfop_cmd cmd = {
918 		.done = bnx2x_vfop_qsetup,
919 		.block = false,
920 	};
921 
922 	if (vfop->rc < 0)
923 		goto op_err;
924 
925 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
926 
927 	switch (state) {
928 	case BNX2X_VFOP_QSETUP_CTOR:
929 		/* init the queue ctor command */
930 		vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
931 		vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
932 		if (vfop->rc)
933 			goto op_err;
934 		return;
935 
936 	case BNX2X_VFOP_QSETUP_VLAN0:
937 		/* skip if non-leading or FPGA/EMU*/
938 		if (qid)
939 			goto op_done;
940 
941 		/* init the queue set-vlan command (for vlan 0) */
942 		vfop->state = BNX2X_VFOP_QSETUP_DONE;
943 		vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
944 		if (vfop->rc)
945 			goto op_err;
946 		return;
947 op_err:
948 	BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
949 op_done:
950 	case BNX2X_VFOP_QSETUP_DONE:
951 		bnx2x_vfop_end(bp, vf, vfop);
952 		return;
953 	default:
954 		bnx2x_vfop_default(state);
955 	}
956 }
957 
958 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
959 			  struct bnx2x_virtf *vf,
960 			  struct bnx2x_vfop_cmd *cmd,
961 			  int qid)
962 {
963 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
964 
965 	if (vfop) {
966 		vfop->args.qctor.qid = qid;
967 
968 		bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
969 				 bnx2x_vfop_qsetup, cmd->done);
970 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
971 					     cmd->block);
972 	}
973 	return -ENOMEM;
974 }
975 
976 /* VFOP multi-casts */
977 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
978 {
979 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
980 	struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
981 	struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
982 	struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
983 	enum bnx2x_vfop_mcast_state state = vfop->state;
984 	int i;
985 
986 	bnx2x_vfop_reset_wq(vf);
987 
988 	if (vfop->rc < 0)
989 		goto op_err;
990 
991 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
992 
993 	switch (state) {
994 	case BNX2X_VFOP_MCAST_DEL:
995 		/* clear existing mcasts */
996 		vfop->state = BNX2X_VFOP_MCAST_ADD;
997 		vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
998 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
999 
1000 	case BNX2X_VFOP_MCAST_ADD:
1001 		if (raw->check_pending(raw))
1002 			goto op_pending;
1003 
1004 		if (args->mc_num) {
1005 			/* update mcast list on the ramrod params */
1006 			INIT_LIST_HEAD(&mcast->mcast_list);
1007 			for (i = 0; i < args->mc_num; i++)
1008 				list_add_tail(&(args->mc[i].link),
1009 					      &mcast->mcast_list);
1010 			/* add new mcasts */
1011 			vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
1012 			vfop->rc = bnx2x_config_mcast(bp, mcast,
1013 						      BNX2X_MCAST_CMD_ADD);
1014 		}
1015 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1016 
1017 	case BNX2X_VFOP_MCAST_CHK_DONE:
1018 		vfop->rc = raw->check_pending(raw) ? 1 : 0;
1019 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1020 	default:
1021 		bnx2x_vfop_default(state);
1022 	}
1023 op_err:
1024 	BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
1025 op_done:
1026 	kfree(args->mc);
1027 	bnx2x_vfop_end(bp, vf, vfop);
1028 op_pending:
1029 	return;
1030 }
1031 
1032 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
1033 			 struct bnx2x_virtf *vf,
1034 			 struct bnx2x_vfop_cmd *cmd,
1035 			 bnx2x_mac_addr_t *mcasts,
1036 			 int mcast_num, bool drv_only)
1037 {
1038 	struct bnx2x_vfop *vfop = NULL;
1039 	size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
1040 	struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
1041 					   NULL;
1042 
1043 	if (!mc_sz || mc) {
1044 		vfop = bnx2x_vfop_add(bp, vf);
1045 		if (vfop) {
1046 			int i;
1047 			struct bnx2x_mcast_ramrod_params *ramrod =
1048 				&vf->op_params.mcast;
1049 
1050 			/* set ramrod params */
1051 			memset(ramrod, 0, sizeof(*ramrod));
1052 			ramrod->mcast_obj = &vf->mcast_obj;
1053 			if (drv_only)
1054 				set_bit(RAMROD_DRV_CLR_ONLY,
1055 					&ramrod->ramrod_flags);
1056 
1057 			/* copy mcasts pointers */
1058 			vfop->args.mc_list.mc_num = mcast_num;
1059 			vfop->args.mc_list.mc = mc;
1060 			for (i = 0; i < mcast_num; i++)
1061 				mc[i].mac = mcasts[i];
1062 
1063 			bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
1064 					 bnx2x_vfop_mcast, cmd->done);
1065 			return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
1066 						     cmd->block);
1067 		} else {
1068 			kfree(mc);
1069 		}
1070 	}
1071 	return -ENOMEM;
1072 }
1073 
1074 /* VFOP rx-mode */
1075 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1076 {
1077 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1078 	struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
1079 	enum bnx2x_vfop_rxmode_state state = vfop->state;
1080 
1081 	bnx2x_vfop_reset_wq(vf);
1082 
1083 	if (vfop->rc < 0)
1084 		goto op_err;
1085 
1086 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1087 
1088 	switch (state) {
1089 	case BNX2X_VFOP_RXMODE_CONFIG:
1090 		/* next state */
1091 		vfop->state = BNX2X_VFOP_RXMODE_DONE;
1092 
1093 		vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1094 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1095 op_err:
1096 		BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
1097 op_done:
1098 	case BNX2X_VFOP_RXMODE_DONE:
1099 		bnx2x_vfop_end(bp, vf, vfop);
1100 		return;
1101 	default:
1102 		bnx2x_vfop_default(state);
1103 	}
1104 op_pending:
1105 	return;
1106 }
1107 
1108 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1109 			  struct bnx2x_virtf *vf,
1110 			  struct bnx2x_vfop_cmd *cmd,
1111 			  int qid, unsigned long accept_flags)
1112 {
1113 	struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1114 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1115 
1116 	if (vfop) {
1117 		struct bnx2x_rx_mode_ramrod_params *ramrod =
1118 			&vf->op_params.rx_mode;
1119 
1120 		memset(ramrod, 0, sizeof(*ramrod));
1121 
1122 		/* Prepare ramrod parameters */
1123 		ramrod->cid = vfq->cid;
1124 		ramrod->cl_id = vfq_cl_id(vf, vfq);
1125 		ramrod->rx_mode_obj = &bp->rx_mode_obj;
1126 		ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1127 
1128 		ramrod->rx_accept_flags = accept_flags;
1129 		ramrod->tx_accept_flags = accept_flags;
1130 		ramrod->pstate = &vf->filter_state;
1131 		ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1132 
1133 		set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1134 		set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1135 		set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1136 
1137 		ramrod->rdata =
1138 			bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1139 		ramrod->rdata_mapping =
1140 			bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1141 
1142 		bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1143 				 bnx2x_vfop_rxmode, cmd->done);
1144 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
1145 					     cmd->block);
1146 	}
1147 	return -ENOMEM;
1148 }
1149 
1150 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
1151  * queue destructor)
1152  */
1153 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
1154 {
1155 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1156 	int qid = vfop->args.qx.qid;
1157 	enum bnx2x_vfop_qteardown_state state = vfop->state;
1158 	struct bnx2x_vfop_cmd cmd;
1159 
1160 	if (vfop->rc < 0)
1161 		goto op_err;
1162 
1163 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1164 
1165 	cmd.done = bnx2x_vfop_qdown;
1166 	cmd.block = false;
1167 
1168 	switch (state) {
1169 	case BNX2X_VFOP_QTEARDOWN_RXMODE:
1170 		/* Drop all */
1171 		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
1172 		vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
1173 		if (vfop->rc)
1174 			goto op_err;
1175 		return;
1176 
1177 	case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
1178 		/* vlan-clear-all: don't consume credit */
1179 		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
1180 		vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
1181 		if (vfop->rc)
1182 			goto op_err;
1183 		return;
1184 
1185 	case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
1186 		/* mac-clear-all: consume credit */
1187 		vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1188 		vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
1189 		if (vfop->rc)
1190 			goto op_err;
1191 		return;
1192 
1193 	case BNX2X_VFOP_QTEARDOWN_QDTOR:
1194 		/* run the queue destruction flow */
1195 		DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
1196 		vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
1197 		DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
1198 		vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
1199 		DP(BNX2X_MSG_IOV, "returned from cmd\n");
1200 		if (vfop->rc)
1201 			goto op_err;
1202 		return;
1203 op_err:
1204 	BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
1205 		  vf->abs_vfid, qid, vfop->rc);
1206 
1207 	case BNX2X_VFOP_QTEARDOWN_DONE:
1208 		bnx2x_vfop_end(bp, vf, vfop);
1209 		return;
1210 	default:
1211 		bnx2x_vfop_default(state);
1212 	}
1213 }
1214 
1215 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1216 			 struct bnx2x_virtf *vf,
1217 			 struct bnx2x_vfop_cmd *cmd,
1218 			 int qid)
1219 {
1220 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1221 
1222 	if (vfop) {
1223 		vfop->args.qx.qid = qid;
1224 		bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
1225 				 bnx2x_vfop_qdown, cmd->done);
1226 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1227 					     cmd->block);
1228 	}
1229 
1230 	return -ENOMEM;
1231 }
1232 
1233 /* VF enable primitives
1234  * when pretend is required the caller is responsible
1235  * for calling pretend prior to calling these routines
1236  */
1237 
1238 /* called only on E1H or E2.
1239  * When pretending to be PF, the pretend value is the function number 0...7
1240  * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
1241  * combination
1242  */
1243 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
1244 {
1245 	u32 pretend_reg;
1246 
1247 	if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
1248 		return -1;
1249 
1250 	/* get my own pretend register */
1251 	pretend_reg = bnx2x_get_pretend_reg(bp);
1252 	REG_WR(bp, pretend_reg, pretend_func_val);
1253 	REG_RD(bp, pretend_reg);
1254 	return 0;
1255 }
1256 
1257 /* internal vf enable - until vf is enabled internally all transactions
1258  * are blocked. this routine should always be called last with pretend.
1259  */
1260 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
1261 {
1262 	REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
1263 }
1264 
1265 /* clears vf error in all semi blocks */
1266 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
1267 {
1268 	REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
1269 	REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
1270 	REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
1271 	REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
1272 }
1273 
1274 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
1275 {
1276 	u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
1277 	u32 was_err_reg = 0;
1278 
1279 	switch (was_err_group) {
1280 	case 0:
1281 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
1282 	    break;
1283 	case 1:
1284 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
1285 	    break;
1286 	case 2:
1287 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
1288 	    break;
1289 	case 3:
1290 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
1291 	    break;
1292 	}
1293 	REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
1294 }
1295 
1296 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
1297 {
1298 	int i;
1299 	u32 val;
1300 
1301 	/* Set VF masks and configuration - pretend */
1302 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1303 
1304 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
1305 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
1306 	REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
1307 	REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
1308 	REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
1309 	REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
1310 
1311 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1312 	val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
1313 	if (vf->cfg_flags & VF_CFG_INT_SIMD)
1314 		val |= IGU_VF_CONF_SINGLE_ISR_EN;
1315 	val &= ~IGU_VF_CONF_PARENT_MASK;
1316 	val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT;	/* parent PF */
1317 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1318 
1319 	DP(BNX2X_MSG_IOV,
1320 	   "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
1321 	   vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
1322 
1323 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1324 
1325 	/* iterate over all queues, clear sb consumer */
1326 	for (i = 0; i < vf_sb_count(vf); i++) {
1327 		u8 igu_sb_id = vf_igu_sb(vf, i);
1328 
1329 		/* zero prod memory */
1330 		REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
1331 
1332 		/* clear sb state machine */
1333 		bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
1334 				       false /* VF */);
1335 
1336 		/* disable + update */
1337 		bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
1338 				    IGU_INT_DISABLE, 1);
1339 	}
1340 }
1341 
1342 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
1343 {
1344 	/* set the VF-PF association in the FW */
1345 	storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
1346 	storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
1347 
1348 	/* clear vf errors*/
1349 	bnx2x_vf_semi_clear_err(bp, abs_vfid);
1350 	bnx2x_vf_pglue_clear_err(bp, abs_vfid);
1351 
1352 	/* internal vf-enable - pretend */
1353 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
1354 	DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
1355 	bnx2x_vf_enable_internal(bp, true);
1356 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1357 }
1358 
1359 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
1360 {
1361 	/* Reset vf in IGU  interrupts are still disabled */
1362 	bnx2x_vf_igu_reset(bp, vf);
1363 
1364 	/* pretend to enable the vf with the PBF */
1365 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1366 	REG_WR(bp, PBF_REG_DISABLE_VF, 0);
1367 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1368 }
1369 
1370 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
1371 {
1372 	struct pci_dev *dev;
1373 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1374 
1375 	if (!vf)
1376 		goto unknown_dev;
1377 
1378 	dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
1379 	if (dev)
1380 		return bnx2x_is_pcie_pending(dev);
1381 
1382 unknown_dev:
1383 	BNX2X_ERR("Unknown device\n");
1384 	return false;
1385 }
1386 
1387 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1388 {
1389 	/* Wait 100ms */
1390 	msleep(100);
1391 
1392 	/* Verify no pending pci transactions */
1393 	if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
1394 		BNX2X_ERR("PCIE Transactions still pending\n");
1395 
1396 	return 0;
1397 }
1398 
1399 /* must be called after the number of PF queues and the number of VFs are
1400  * both known
1401  */
1402 static void
1403 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
1404 {
1405 	u16 vlan_count = 0;
1406 
1407 	/* will be set only during VF-ACQUIRE */
1408 	resc->num_rxqs = 0;
1409 	resc->num_txqs = 0;
1410 
1411 	/* no credit calculcis for macs (just yet) */
1412 	resc->num_mac_filters = 1;
1413 
1414 	/* divvy up vlan rules */
1415 	vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
1416 	vlan_count = 1 << ilog2(vlan_count);
1417 	resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
1418 
1419 	/* no real limitation */
1420 	resc->num_mc_filters = 0;
1421 
1422 	/* num_sbs already set */
1423 }
1424 
1425 /* IOV global initialization routines  */
1426 void bnx2x_iov_init_dq(struct bnx2x *bp)
1427 {
1428 	if (!IS_SRIOV(bp))
1429 		return;
1430 
1431 	/* Set the DQ such that the CID reflect the abs_vfid */
1432 	REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1433 	REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1434 
1435 	/* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1436 	 * the PF L2 queues
1437 	 */
1438 	REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1439 
1440 	/* The VF window size is the log2 of the max number of CIDs per VF */
1441 	REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1442 
1443 	/* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
1444 	 * the Pf doorbell size although the 2 are independent.
1445 	 */
1446 	REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
1447 	       BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
1448 
1449 	/* No security checks for now -
1450 	 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1451 	 * CID range 0 - 0x1ffff
1452 	 */
1453 	REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1454 	REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1455 	REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1456 	REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1457 
1458 	/* set the number of VF alllowed doorbells to the full DQ range */
1459 	REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
1460 
1461 	/* set the VF doorbell threshold */
1462 	REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
1463 }
1464 
1465 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1466 {
1467 	DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
1468 	if (!IS_SRIOV(bp))
1469 		return;
1470 
1471 	REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1472 }
1473 
1474 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1475 {
1476 	struct pci_dev *dev = bp->pdev;
1477 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1478 
1479 	return dev->bus->number + ((dev->devfn + iov->offset +
1480 				    iov->stride * vfid) >> 8);
1481 }
1482 
1483 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1484 {
1485 	struct pci_dev *dev = bp->pdev;
1486 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1487 
1488 	return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1489 }
1490 
1491 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1492 {
1493 	int i, n;
1494 	struct pci_dev *dev = bp->pdev;
1495 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1496 
1497 	for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1498 		u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1499 		u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1500 
1501 		do_div(size, iov->total);
1502 		vf->bars[n].bar = start + size * vf->abs_vfid;
1503 		vf->bars[n].size = size;
1504 	}
1505 }
1506 
1507 static int bnx2x_ari_enabled(struct pci_dev *dev)
1508 {
1509 	return dev->bus->self && dev->bus->self->ari_enabled;
1510 }
1511 
1512 static void
1513 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1514 {
1515 	int sb_id;
1516 	u32 val;
1517 	u8 fid;
1518 
1519 	/* IGU in normal mode - read CAM */
1520 	for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1521 		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1522 		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1523 			continue;
1524 		fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1525 		if (!(fid & IGU_FID_ENCODE_IS_PF))
1526 			bnx2x_vf_set_igu_info(bp, sb_id,
1527 					      (fid & IGU_FID_VF_NUM_MASK));
1528 
1529 		DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1530 		   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1531 		   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1532 		   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1533 		   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1534 	}
1535 }
1536 
1537 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1538 {
1539 	if (bp->vfdb) {
1540 		kfree(bp->vfdb->vfqs);
1541 		kfree(bp->vfdb->vfs);
1542 		kfree(bp->vfdb);
1543 	}
1544 	bp->vfdb = NULL;
1545 }
1546 
1547 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1548 {
1549 	int pos;
1550 	struct pci_dev *dev = bp->pdev;
1551 
1552 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1553 	if (!pos) {
1554 		BNX2X_ERR("failed to find SRIOV capability in device\n");
1555 		return -ENODEV;
1556 	}
1557 
1558 	iov->pos = pos;
1559 	DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1560 	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1561 	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1562 	pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1563 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1564 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1565 	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1566 	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1567 	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1568 
1569 	return 0;
1570 }
1571 
1572 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1573 {
1574 	u32 val;
1575 
1576 	/* read the SRIOV capability structure
1577 	 * The fields can be read via configuration read or
1578 	 * directly from the device (starting at offset PCICFG_OFFSET)
1579 	 */
1580 	if (bnx2x_sriov_pci_cfg_info(bp, iov))
1581 		return -ENODEV;
1582 
1583 	/* get the number of SRIOV bars */
1584 	iov->nres = 0;
1585 
1586 	/* read the first_vfid */
1587 	val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1588 	iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1589 			       * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1590 
1591 	DP(BNX2X_MSG_IOV,
1592 	   "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1593 	   BP_FUNC(bp),
1594 	   iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1595 	   iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1596 
1597 	return 0;
1598 }
1599 
1600 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
1601 {
1602 	int i;
1603 	u8 queue_count = 0;
1604 
1605 	if (IS_SRIOV(bp))
1606 		for_each_vf(bp, i)
1607 			queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
1608 
1609 	return queue_count;
1610 }
1611 
1612 /* must be called after PF bars are mapped */
1613 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1614 			int num_vfs_param)
1615 {
1616 	int err, i, qcount;
1617 	struct bnx2x_sriov *iov;
1618 	struct pci_dev *dev = bp->pdev;
1619 
1620 	bp->vfdb = NULL;
1621 
1622 	/* verify is pf */
1623 	if (IS_VF(bp))
1624 		return 0;
1625 
1626 	/* verify sriov capability is present in configuration space */
1627 	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1628 		return 0;
1629 
1630 	/* verify chip revision */
1631 	if (CHIP_IS_E1x(bp))
1632 		return 0;
1633 
1634 	/* check if SRIOV support is turned off */
1635 	if (!num_vfs_param)
1636 		return 0;
1637 
1638 	/* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1639 	if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1640 		BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1641 			  BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1642 		return 0;
1643 	}
1644 
1645 	/* SRIOV can be enabled only with MSIX */
1646 	if (int_mode_param == BNX2X_INT_MODE_MSI ||
1647 	    int_mode_param == BNX2X_INT_MODE_INTX)
1648 		BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1649 
1650 	err = -EIO;
1651 	/* verify ari is enabled */
1652 	if (!bnx2x_ari_enabled(bp->pdev)) {
1653 		BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
1654 		return err;
1655 	}
1656 
1657 	/* verify igu is in normal mode */
1658 	if (CHIP_INT_MODE_IS_BC(bp)) {
1659 		BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1660 		return err;
1661 	}
1662 
1663 	/* allocate the vfs database */
1664 	bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1665 	if (!bp->vfdb) {
1666 		BNX2X_ERR("failed to allocate vf database\n");
1667 		err = -ENOMEM;
1668 		goto failed;
1669 	}
1670 
1671 	/* get the sriov info - Linux already collected all the pertinent
1672 	 * information, however the sriov structure is for the private use
1673 	 * of the pci module. Also we want this information regardless
1674 	 * of the hyper-visor.
1675 	 */
1676 	iov = &(bp->vfdb->sriov);
1677 	err = bnx2x_sriov_info(bp, iov);
1678 	if (err)
1679 		goto failed;
1680 
1681 	/* SR-IOV capability was enabled but there are no VFs*/
1682 	if (iov->total == 0)
1683 		goto failed;
1684 
1685 	/* calculate the actual number of VFs */
1686 	iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
1687 
1688 	/* allocate the vf array */
1689 	bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
1690 				BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
1691 	if (!bp->vfdb->vfs) {
1692 		BNX2X_ERR("failed to allocate vf array\n");
1693 		err = -ENOMEM;
1694 		goto failed;
1695 	}
1696 
1697 	/* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1698 	for_each_vf(bp, i) {
1699 		bnx2x_vf(bp, i, index) = i;
1700 		bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1701 		bnx2x_vf(bp, i, state) = VF_FREE;
1702 		INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
1703 		mutex_init(&bnx2x_vf(bp, i, op_mutex));
1704 		bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1705 	}
1706 
1707 	/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1708 	bnx2x_get_vf_igu_cam_info(bp);
1709 
1710 	/* get the total queue count and allocate the global queue arrays */
1711 	qcount = bnx2x_iov_get_max_queue_count(bp);
1712 
1713 	/* allocate the queue arrays for all VFs */
1714 	bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
1715 				 GFP_KERNEL);
1716 	if (!bp->vfdb->vfqs) {
1717 		BNX2X_ERR("failed to allocate vf queue array\n");
1718 		err = -ENOMEM;
1719 		goto failed;
1720 	}
1721 
1722 	return 0;
1723 failed:
1724 	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1725 	__bnx2x_iov_free_vfdb(bp);
1726 	return err;
1727 }
1728 
1729 void bnx2x_iov_remove_one(struct bnx2x *bp)
1730 {
1731 	/* if SRIOV is not enabled there's nothing to do */
1732 	if (!IS_SRIOV(bp))
1733 		return;
1734 
1735 	/* free vf database */
1736 	__bnx2x_iov_free_vfdb(bp);
1737 }
1738 
1739 void bnx2x_iov_free_mem(struct bnx2x *bp)
1740 {
1741 	int i;
1742 
1743 	if (!IS_SRIOV(bp))
1744 		return;
1745 
1746 	/* free vfs hw contexts */
1747 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1748 		struct hw_dma *cxt = &bp->vfdb->context[i];
1749 		BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1750 	}
1751 
1752 	BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1753 		       BP_VFDB(bp)->sp_dma.mapping,
1754 		       BP_VFDB(bp)->sp_dma.size);
1755 
1756 	BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1757 		       BP_VF_MBX_DMA(bp)->mapping,
1758 		       BP_VF_MBX_DMA(bp)->size);
1759 }
1760 
1761 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1762 {
1763 	size_t tot_size;
1764 	int i, rc = 0;
1765 
1766 	if (!IS_SRIOV(bp))
1767 		return rc;
1768 
1769 	/* allocate vfs hw contexts */
1770 	tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1771 		BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1772 
1773 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1774 		struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1775 		cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
1776 
1777 		if (cxt->size) {
1778 			BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
1779 		} else {
1780 			cxt->addr = NULL;
1781 			cxt->mapping = 0;
1782 		}
1783 		tot_size -= cxt->size;
1784 	}
1785 
1786 	/* allocate vfs ramrods dma memory - client_init and set_mac */
1787 	tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
1788 	BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
1789 			tot_size);
1790 	BP_VFDB(bp)->sp_dma.size = tot_size;
1791 
1792 	/* allocate mailboxes */
1793 	tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
1794 	BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
1795 			tot_size);
1796 	BP_VF_MBX_DMA(bp)->size = tot_size;
1797 
1798 	return 0;
1799 
1800 alloc_mem_err:
1801 	return -ENOMEM;
1802 }
1803 
1804 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1805 			   struct bnx2x_vf_queue *q)
1806 {
1807 	u8 cl_id = vfq_cl_id(vf, q);
1808 	u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
1809 	unsigned long q_type = 0;
1810 
1811 	set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1812 	set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1813 
1814 	/* Queue State object */
1815 	bnx2x_init_queue_obj(bp, &q->sp_obj,
1816 			     cl_id, &q->cid, 1, func_id,
1817 			     bnx2x_vf_sp(bp, vf, q_data),
1818 			     bnx2x_vf_sp_map(bp, vf, q_data),
1819 			     q_type);
1820 
1821 	DP(BNX2X_MSG_IOV,
1822 	   "initialized vf %d's queue object. func id set to %d\n",
1823 	   vf->abs_vfid, q->sp_obj.func_id);
1824 
1825 	/* mac/vlan objects are per queue, but only those
1826 	 * that belong to the leading queue are initialized
1827 	 */
1828 	if (vfq_is_leading(q)) {
1829 		/* mac */
1830 		bnx2x_init_mac_obj(bp, &q->mac_obj,
1831 				   cl_id, q->cid, func_id,
1832 				   bnx2x_vf_sp(bp, vf, mac_rdata),
1833 				   bnx2x_vf_sp_map(bp, vf, mac_rdata),
1834 				   BNX2X_FILTER_MAC_PENDING,
1835 				   &vf->filter_state,
1836 				   BNX2X_OBJ_TYPE_RX_TX,
1837 				   &bp->macs_pool);
1838 		/* vlan */
1839 		bnx2x_init_vlan_obj(bp, &q->vlan_obj,
1840 				    cl_id, q->cid, func_id,
1841 				    bnx2x_vf_sp(bp, vf, vlan_rdata),
1842 				    bnx2x_vf_sp_map(bp, vf, vlan_rdata),
1843 				    BNX2X_FILTER_VLAN_PENDING,
1844 				    &vf->filter_state,
1845 				    BNX2X_OBJ_TYPE_RX_TX,
1846 				    &bp->vlans_pool);
1847 
1848 		/* mcast */
1849 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
1850 				     q->cid, func_id, func_id,
1851 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
1852 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1853 				     BNX2X_FILTER_MCAST_PENDING,
1854 				     &vf->filter_state,
1855 				     BNX2X_OBJ_TYPE_RX_TX);
1856 
1857 		vf->leading_rss = cl_id;
1858 	}
1859 }
1860 
1861 /* called by bnx2x_nic_load */
1862 int bnx2x_iov_nic_init(struct bnx2x *bp)
1863 {
1864 	int vfid, qcount, i;
1865 
1866 	if (!IS_SRIOV(bp)) {
1867 		DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1868 		return 0;
1869 	}
1870 
1871 	DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1872 
1873 	/* initialize vf database */
1874 	for_each_vf(bp, vfid) {
1875 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1876 
1877 		int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1878 			BNX2X_CIDS_PER_VF;
1879 
1880 		union cdu_context *base_cxt = (union cdu_context *)
1881 			BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1882 			(base_vf_cid & (ILT_PAGE_CIDS-1));
1883 
1884 		DP(BNX2X_MSG_IOV,
1885 		   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1886 		   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1887 		   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1888 
1889 		/* init statically provisioned resources */
1890 		bnx2x_iov_static_resc(bp, &vf->alloc_resc);
1891 
1892 		/* queues are initialized during VF-ACQUIRE */
1893 
1894 		/* reserve the vf vlan credit */
1895 		bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
1896 
1897 		vf->filter_state = 0;
1898 		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1899 
1900 		/*  init mcast object - This object will be re-initialized
1901 		 *  during VF-ACQUIRE with the proper cl_id and cid.
1902 		 *  It needs to be initialized here so that it can be safely
1903 		 *  handled by a subsequent FLR flow.
1904 		 */
1905 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1906 				     0xFF, 0xFF, 0xFF,
1907 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
1908 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1909 				     BNX2X_FILTER_MCAST_PENDING,
1910 				     &vf->filter_state,
1911 				     BNX2X_OBJ_TYPE_RX_TX);
1912 
1913 		/* set the mailbox message addresses */
1914 		BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1915 			(((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1916 			MBX_MSG_ALIGNED_SIZE);
1917 
1918 		BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1919 			vfid * MBX_MSG_ALIGNED_SIZE;
1920 
1921 		/* Enable vf mailbox */
1922 		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1923 	}
1924 
1925 	/* Final VF init */
1926 	qcount = 0;
1927 	for_each_vf(bp, i) {
1928 		struct bnx2x_virtf *vf = BP_VF(bp, i);
1929 
1930 		/* fill in the BDF and bars */
1931 		vf->bus = bnx2x_vf_bus(bp, i);
1932 		vf->devfn = bnx2x_vf_devfn(bp, i);
1933 		bnx2x_vf_set_bars(bp, vf);
1934 
1935 		DP(BNX2X_MSG_IOV,
1936 		   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1937 		   vf->abs_vfid, vf->bus, vf->devfn,
1938 		   (unsigned)vf->bars[0].bar, vf->bars[0].size,
1939 		   (unsigned)vf->bars[1].bar, vf->bars[1].size,
1940 		   (unsigned)vf->bars[2].bar, vf->bars[2].size);
1941 
1942 		/* set local queue arrays */
1943 		vf->vfqs = &bp->vfdb->vfqs[qcount];
1944 		qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
1945 	}
1946 
1947 	return 0;
1948 }
1949 
1950 /* called by bnx2x_init_hw_func, returns the next ilt line */
1951 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
1952 {
1953 	int i;
1954 	struct bnx2x_ilt *ilt = BP_ILT(bp);
1955 
1956 	if (!IS_SRIOV(bp))
1957 		return line;
1958 
1959 	/* set vfs ilt lines */
1960 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1961 		struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1962 
1963 		ilt->lines[line+i].page = hw_cxt->addr;
1964 		ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1965 		ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1966 	}
1967 	return line + i;
1968 }
1969 
1970 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
1971 {
1972 	return ((cid >= BNX2X_FIRST_VF_CID) &&
1973 		((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1974 }
1975 
1976 static
1977 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1978 					struct bnx2x_vf_queue *vfq,
1979 					union event_ring_elem *elem)
1980 {
1981 	unsigned long ramrod_flags = 0;
1982 	int rc = 0;
1983 
1984 	/* Always push next commands out, don't wait here */
1985 	set_bit(RAMROD_CONT, &ramrod_flags);
1986 
1987 	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
1988 	case BNX2X_FILTER_MAC_PENDING:
1989 		rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1990 					   &ramrod_flags);
1991 		break;
1992 	case BNX2X_FILTER_VLAN_PENDING:
1993 		rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
1994 					    &ramrod_flags);
1995 		break;
1996 	default:
1997 		BNX2X_ERR("Unsupported classification command: %d\n",
1998 			  elem->message.data.eth_event.echo);
1999 		return;
2000 	}
2001 	if (rc < 0)
2002 		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
2003 	else if (rc > 0)
2004 		DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
2005 }
2006 
2007 static
2008 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
2009 			       struct bnx2x_virtf *vf)
2010 {
2011 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
2012 	int rc;
2013 
2014 	rparam.mcast_obj = &vf->mcast_obj;
2015 	vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
2016 
2017 	/* If there are pending mcast commands - send them */
2018 	if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
2019 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2020 		if (rc < 0)
2021 			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
2022 				  rc);
2023 	}
2024 }
2025 
2026 static
2027 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
2028 				 struct bnx2x_virtf *vf)
2029 {
2030 	smp_mb__before_clear_bit();
2031 	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
2032 	smp_mb__after_clear_bit();
2033 }
2034 
2035 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2036 {
2037 	struct bnx2x_virtf *vf;
2038 	int qidx = 0, abs_vfid;
2039 	u8 opcode;
2040 	u16 cid = 0xffff;
2041 
2042 	if (!IS_SRIOV(bp))
2043 		return 1;
2044 
2045 	/* first get the cid - the only events we handle here are cfc-delete
2046 	 * and set-mac completion
2047 	 */
2048 	opcode = elem->message.opcode;
2049 
2050 	switch (opcode) {
2051 	case EVENT_RING_OPCODE_CFC_DEL:
2052 		cid = SW_CID((__force __le32)
2053 			     elem->message.data.cfc_del_event.cid);
2054 		DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
2055 		break;
2056 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2057 	case EVENT_RING_OPCODE_MULTICAST_RULES:
2058 	case EVENT_RING_OPCODE_FILTERS_RULES:
2059 		cid = (elem->message.data.eth_event.echo &
2060 		       BNX2X_SWCID_MASK);
2061 		DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
2062 		break;
2063 	case EVENT_RING_OPCODE_VF_FLR:
2064 		abs_vfid = elem->message.data.vf_flr_event.vf_id;
2065 		DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
2066 		   abs_vfid);
2067 		goto get_vf;
2068 	case EVENT_RING_OPCODE_MALICIOUS_VF:
2069 		abs_vfid = elem->message.data.malicious_vf_event.vf_id;
2070 		DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
2071 		   abs_vfid);
2072 		goto get_vf;
2073 	default:
2074 		return 1;
2075 	}
2076 
2077 	/* check if the cid is the VF range */
2078 	if (!bnx2x_iov_is_vf_cid(bp, cid)) {
2079 		DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
2080 		return 1;
2081 	}
2082 
2083 	/* extract vf and rxq index from vf_cid - relies on the following:
2084 	 * 1. vfid on cid reflects the true abs_vfid
2085 	 * 2. the max number of VFs (per path) is 64
2086 	 */
2087 	qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
2088 	abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2089 get_vf:
2090 	vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
2091 
2092 	if (!vf) {
2093 		BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
2094 			  cid, abs_vfid);
2095 		return 0;
2096 	}
2097 
2098 	switch (opcode) {
2099 	case EVENT_RING_OPCODE_CFC_DEL:
2100 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
2101 		   vf->abs_vfid, qidx);
2102 		vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
2103 						       &vfq_get(vf,
2104 								qidx)->sp_obj,
2105 						       BNX2X_Q_CMD_CFC_DEL);
2106 		break;
2107 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2108 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
2109 		   vf->abs_vfid, qidx);
2110 		bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
2111 		break;
2112 	case EVENT_RING_OPCODE_MULTICAST_RULES:
2113 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
2114 		   vf->abs_vfid, qidx);
2115 		bnx2x_vf_handle_mcast_eqe(bp, vf);
2116 		break;
2117 	case EVENT_RING_OPCODE_FILTERS_RULES:
2118 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
2119 		   vf->abs_vfid, qidx);
2120 		bnx2x_vf_handle_filters_eqe(bp, vf);
2121 		break;
2122 	case EVENT_RING_OPCODE_VF_FLR:
2123 		DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
2124 		   vf->abs_vfid);
2125 		/* Do nothing for now */
2126 		break;
2127 	case EVENT_RING_OPCODE_MALICIOUS_VF:
2128 		DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
2129 		   vf->abs_vfid);
2130 		/* Do nothing for now */
2131 		break;
2132 	}
2133 	/* SRIOV: reschedule any 'in_progress' operations */
2134 	bnx2x_iov_sp_event(bp, cid, false);
2135 
2136 	return 0;
2137 }
2138 
2139 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
2140 {
2141 	/* extract the vf from vf_cid - relies on the following:
2142 	 * 1. vfid on cid reflects the true abs_vfid
2143 	 * 2. the max number of VFs (per path) is 64
2144 	 */
2145 	int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2146 	return bnx2x_vf_by_abs_fid(bp, abs_vfid);
2147 }
2148 
2149 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
2150 				struct bnx2x_queue_sp_obj **q_obj)
2151 {
2152 	struct bnx2x_virtf *vf;
2153 
2154 	if (!IS_SRIOV(bp))
2155 		return;
2156 
2157 	vf = bnx2x_vf_by_cid(bp, vf_cid);
2158 
2159 	if (vf) {
2160 		/* extract queue index from vf_cid - relies on the following:
2161 		 * 1. vfid on cid reflects the true abs_vfid
2162 		 * 2. the max number of VFs (per path) is 64
2163 		 */
2164 		int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
2165 		*q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
2166 	} else {
2167 		BNX2X_ERR("No vf matching cid %d\n", vf_cid);
2168 	}
2169 }
2170 
2171 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
2172 {
2173 	struct bnx2x_virtf *vf;
2174 
2175 	/* check if the cid is the VF range */
2176 	if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
2177 		return;
2178 
2179 	vf = bnx2x_vf_by_cid(bp, vf_cid);
2180 	if (vf) {
2181 		/* set in_progress flag */
2182 		atomic_set(&vf->op_in_progress, 1);
2183 		if (queue_work)
2184 			queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2185 	}
2186 }
2187 
2188 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2189 {
2190 	int i;
2191 	int first_queue_query_index, num_queues_req;
2192 	dma_addr_t cur_data_offset;
2193 	struct stats_query_entry *cur_query_entry;
2194 	u8 stats_count = 0;
2195 	bool is_fcoe = false;
2196 
2197 	if (!IS_SRIOV(bp))
2198 		return;
2199 
2200 	if (!NO_FCOE(bp))
2201 		is_fcoe = true;
2202 
2203 	/* fcoe adds one global request and one queue request */
2204 	num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
2205 	first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
2206 		(is_fcoe ? 0 : 1);
2207 
2208 	DP(BNX2X_MSG_IOV,
2209 	   "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
2210 	   BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
2211 	   first_queue_query_index + num_queues_req);
2212 
2213 	cur_data_offset = bp->fw_stats_data_mapping +
2214 		offsetof(struct bnx2x_fw_stats_data, queue_stats) +
2215 		num_queues_req * sizeof(struct per_queue_stats);
2216 
2217 	cur_query_entry = &bp->fw_stats_req->
2218 		query[first_queue_query_index + num_queues_req];
2219 
2220 	for_each_vf(bp, i) {
2221 		int j;
2222 		struct bnx2x_virtf *vf = BP_VF(bp, i);
2223 
2224 		if (vf->state != VF_ENABLED) {
2225 			DP(BNX2X_MSG_IOV,
2226 			   "vf %d not enabled so no stats for it\n",
2227 			   vf->abs_vfid);
2228 			continue;
2229 		}
2230 
2231 		DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
2232 		for_each_vfq(vf, j) {
2233 			struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
2234 
2235 			/* collect stats fro active queues only */
2236 			if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
2237 			    BNX2X_Q_LOGICAL_STATE_STOPPED)
2238 				continue;
2239 
2240 			/* create stats query entry for this queue */
2241 			cur_query_entry->kind = STATS_TYPE_QUEUE;
2242 			cur_query_entry->index = vfq_cl_id(vf, rxq);
2243 			cur_query_entry->funcID =
2244 				cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
2245 			cur_query_entry->address.hi =
2246 				cpu_to_le32(U64_HI(vf->fw_stat_map));
2247 			cur_query_entry->address.lo =
2248 				cpu_to_le32(U64_LO(vf->fw_stat_map));
2249 			DP(BNX2X_MSG_IOV,
2250 			   "added address %x %x for vf %d queue %d client %d\n",
2251 			   cur_query_entry->address.hi,
2252 			   cur_query_entry->address.lo, cur_query_entry->funcID,
2253 			   j, cur_query_entry->index);
2254 			cur_query_entry++;
2255 			cur_data_offset += sizeof(struct per_queue_stats);
2256 			stats_count++;
2257 		}
2258 	}
2259 	bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
2260 }
2261 
2262 void bnx2x_iov_sp_task(struct bnx2x *bp)
2263 {
2264 	int i;
2265 
2266 	if (!IS_SRIOV(bp))
2267 		return;
2268 	/* Iterate over all VFs and invoke state transition for VFs with
2269 	 * 'in-progress' slow-path operations
2270 	 */
2271 	DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
2272 	for_each_vf(bp, i) {
2273 		struct bnx2x_virtf *vf = BP_VF(bp, i);
2274 
2275 		if (!list_empty(&vf->op_list_head) &&
2276 		    atomic_read(&vf->op_in_progress)) {
2277 			DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
2278 			bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
2279 		}
2280 	}
2281 }
2282 
2283 static inline
2284 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
2285 {
2286 	int i;
2287 	struct bnx2x_virtf *vf = NULL;
2288 
2289 	for_each_vf(bp, i) {
2290 		vf = BP_VF(bp, i);
2291 		if (stat_id >= vf->igu_base_id &&
2292 		    stat_id < vf->igu_base_id + vf_sb_count(vf))
2293 			break;
2294 	}
2295 	return vf;
2296 }
2297 
2298 /* VF API helpers */
2299 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
2300 				u8 enable)
2301 {
2302 	u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
2303 	u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
2304 
2305 	REG_WR(bp, reg, val);
2306 }
2307 
2308 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
2309 {
2310 	int i;
2311 
2312 	for_each_vfq(vf, i)
2313 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2314 				    vfq_qzone_id(vf, vfq_get(vf, i)), false);
2315 }
2316 
2317 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
2318 {
2319 	u32 val;
2320 
2321 	/* clear the VF configuration - pretend */
2322 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
2323 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
2324 	val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
2325 		 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
2326 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
2327 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2328 }
2329 
2330 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2331 {
2332 	return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2333 		     BNX2X_VF_MAX_QUEUES);
2334 }
2335 
2336 static
2337 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2338 			    struct vf_pf_resc_request *req_resc)
2339 {
2340 	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2341 	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2342 
2343 	return ((req_resc->num_rxqs <= rxq_cnt) &&
2344 		(req_resc->num_txqs <= txq_cnt) &&
2345 		(req_resc->num_sbs <= vf_sb_count(vf))   &&
2346 		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2347 		(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
2348 }
2349 
2350 /* CORE VF API */
2351 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2352 		     struct vf_pf_resc_request *resc)
2353 {
2354 	int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2355 		BNX2X_CIDS_PER_VF;
2356 
2357 	union cdu_context *base_cxt = (union cdu_context *)
2358 		BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2359 		(base_vf_cid & (ILT_PAGE_CIDS-1));
2360 	int i;
2361 
2362 	/* if state is 'acquired' the VF was not released or FLR'd, in
2363 	 * this case the returned resources match the acquired already
2364 	 * acquired resources. Verify that the requested numbers do
2365 	 * not exceed the already acquired numbers.
2366 	 */
2367 	if (vf->state == VF_ACQUIRED) {
2368 		DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2369 		   vf->abs_vfid);
2370 
2371 		if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2372 			BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2373 				  vf->abs_vfid);
2374 			return -EINVAL;
2375 		}
2376 		return 0;
2377 	}
2378 
2379 	/* Otherwise vf state must be 'free' or 'reset' */
2380 	if (vf->state != VF_FREE && vf->state != VF_RESET) {
2381 		BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2382 			  vf->abs_vfid, vf->state);
2383 		return -EINVAL;
2384 	}
2385 
2386 	/* static allocation:
2387 	 * the global maximum number are fixed per VF. fail the request if
2388 	 * requested number exceed these globals
2389 	 */
2390 	if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2391 		DP(BNX2X_MSG_IOV,
2392 		   "cannot fulfill vf resource request. Placing maximal available values in response\n");
2393 		/* set the max resource in the vf */
2394 		return -ENOMEM;
2395 	}
2396 
2397 	/* Set resources counters - 0 request means max available */
2398 	vf_sb_count(vf) = resc->num_sbs;
2399 	vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2400 	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2401 	if (resc->num_mac_filters)
2402 		vf_mac_rules_cnt(vf) = resc->num_mac_filters;
2403 	if (resc->num_vlan_filters)
2404 		vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
2405 
2406 	DP(BNX2X_MSG_IOV,
2407 	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2408 	   vf_sb_count(vf), vf_rxq_count(vf),
2409 	   vf_txq_count(vf), vf_mac_rules_cnt(vf),
2410 	   vf_vlan_rules_cnt(vf));
2411 
2412 	/* Initialize the queues */
2413 	if (!vf->vfqs) {
2414 		DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2415 		return -EINVAL;
2416 	}
2417 
2418 	for_each_vfq(vf, i) {
2419 		struct bnx2x_vf_queue *q = vfq_get(vf, i);
2420 
2421 		if (!q) {
2422 			DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
2423 			return -EINVAL;
2424 		}
2425 
2426 		q->index = i;
2427 		q->cxt = &((base_cxt + i)->eth);
2428 		q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2429 
2430 		DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2431 		   vf->abs_vfid, i, q->index, q->cid, q->cxt);
2432 
2433 		/* init SP objects */
2434 		bnx2x_vfq_init(bp, vf, q);
2435 	}
2436 	vf->state = VF_ACQUIRED;
2437 	return 0;
2438 }
2439 
2440 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2441 {
2442 	struct bnx2x_func_init_params func_init = {0};
2443 	u16 flags = 0;
2444 	int i;
2445 
2446 	/* the sb resources are initialized at this point, do the
2447 	 * FW/HW initializations
2448 	 */
2449 	for_each_vf_sb(vf, i)
2450 		bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2451 			      vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2452 
2453 	/* Sanity checks */
2454 	if (vf->state != VF_ACQUIRED) {
2455 		DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2456 		   vf->abs_vfid, vf->state);
2457 		return -EINVAL;
2458 	}
2459 	/* FLR cleanup epilogue */
2460 	if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2461 		return -EBUSY;
2462 
2463 	/* reset IGU VF statistics: MSIX */
2464 	REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2465 
2466 	/* vf init */
2467 	if (vf->cfg_flags & VF_CFG_STATS)
2468 		flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2469 
2470 	if (vf->cfg_flags & VF_CFG_TPA)
2471 		flags |= FUNC_FLG_TPA;
2472 
2473 	if (is_vf_multi(vf))
2474 		flags |= FUNC_FLG_RSS;
2475 
2476 	/* function setup */
2477 	func_init.func_flgs = flags;
2478 	func_init.pf_id = BP_FUNC(bp);
2479 	func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2480 	func_init.fw_stat_map = vf->fw_stat_map;
2481 	func_init.spq_map = vf->spq_map;
2482 	func_init.spq_prod = 0;
2483 	bnx2x_func_init(bp, &func_init);
2484 
2485 	/* Enable the vf */
2486 	bnx2x_vf_enable_access(bp, vf->abs_vfid);
2487 	bnx2x_vf_enable_traffic(bp, vf);
2488 
2489 	/* queue protection table */
2490 	for_each_vfq(vf, i)
2491 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2492 				    vfq_qzone_id(vf, vfq_get(vf, i)), true);
2493 
2494 	vf->state = VF_ENABLED;
2495 
2496 	return 0;
2497 }
2498 
2499 /* VFOP close (teardown the queues, delete mcasts and close HW) */
2500 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2501 {
2502 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2503 	struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
2504 	enum bnx2x_vfop_close_state state = vfop->state;
2505 	struct bnx2x_vfop_cmd cmd = {
2506 		.done = bnx2x_vfop_close,
2507 		.block = false,
2508 	};
2509 
2510 	if (vfop->rc < 0)
2511 		goto op_err;
2512 
2513 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2514 
2515 	switch (state) {
2516 	case BNX2X_VFOP_CLOSE_QUEUES:
2517 
2518 		if (++(qx->qid) < vf_rxq_count(vf)) {
2519 			vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
2520 			if (vfop->rc)
2521 				goto op_err;
2522 			return;
2523 		}
2524 
2525 		/* remove multicasts */
2526 		vfop->state = BNX2X_VFOP_CLOSE_HW;
2527 		vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
2528 		if (vfop->rc)
2529 			goto op_err;
2530 		return;
2531 
2532 	case BNX2X_VFOP_CLOSE_HW:
2533 
2534 		/* disable the interrupts */
2535 		DP(BNX2X_MSG_IOV, "disabling igu\n");
2536 		bnx2x_vf_igu_disable(bp, vf);
2537 
2538 		/* disable the VF */
2539 		DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2540 		bnx2x_vf_clr_qtbl(bp, vf);
2541 
2542 		goto op_done;
2543 	default:
2544 		bnx2x_vfop_default(state);
2545 	}
2546 op_err:
2547 	BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
2548 op_done:
2549 	vf->state = VF_ACQUIRED;
2550 	DP(BNX2X_MSG_IOV, "set state to acquired\n");
2551 	bnx2x_vfop_end(bp, vf, vfop);
2552 }
2553 
2554 int bnx2x_vfop_close_cmd(struct bnx2x *bp,
2555 			 struct bnx2x_virtf *vf,
2556 			 struct bnx2x_vfop_cmd *cmd)
2557 {
2558 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2559 	if (vfop) {
2560 		vfop->args.qx.qid = -1; /* loop */
2561 		bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
2562 				 bnx2x_vfop_close, cmd->done);
2563 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
2564 					     cmd->block);
2565 	}
2566 	return -ENOMEM;
2567 }
2568 
2569 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2570 			      enum channel_tlvs tlv)
2571 {
2572 	/* lock the channel */
2573 	mutex_lock(&vf->op_mutex);
2574 
2575 	/* record the locking op */
2576 	vf->op_current = tlv;
2577 
2578 	/* log the lock */
2579 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2580 	   vf->abs_vfid, tlv);
2581 }
2582 
2583 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2584 				enum channel_tlvs expected_tlv)
2585 {
2586 	WARN(expected_tlv != vf->op_current,
2587 	     "lock mismatch: expected %d found %d", expected_tlv,
2588 	     vf->op_current);
2589 
2590 	/* lock the channel */
2591 	mutex_unlock(&vf->op_mutex);
2592 
2593 	/* log the unlock */
2594 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2595 	   vf->abs_vfid, vf->op_current);
2596 
2597 	/* record the locking op */
2598 	vf->op_current = CHANNEL_TLV_NONE;
2599 }
2600