1 /* bnx2x_sriov.c: Broadcom Everest network driver.
2  *
3  * Copyright 2009-2012 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17  *	       Ariel Elior <ariele@broadcom.com>
18  *
19  */
20 #include "bnx2x.h"
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
23 #include "bnx2x_sriov.h"
24 
25 /* General service functions */
26 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
27 					 u16 pf_id)
28 {
29 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
30 		pf_id);
31 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
32 		pf_id);
33 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
34 		pf_id);
35 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
36 		pf_id);
37 }
38 
39 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
40 					u8 enable)
41 {
42 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
43 		enable);
44 	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
45 		enable);
46 	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
47 		enable);
48 	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
49 		enable);
50 }
51 
52 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
53 {
54 	int idx;
55 
56 	for_each_vf(bp, idx)
57 		if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
58 			break;
59 	return idx;
60 }
61 
62 static
63 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
64 {
65 	u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
66 	return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
67 }
68 
69 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
70 				u8 igu_sb_id, u8 segment, u16 index, u8 op,
71 				u8 update)
72 {
73 	/* acking a VF sb through the PF - use the GRC */
74 	u32 ctl;
75 	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
76 	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
77 	u32 func_encode = vf->abs_vfid;
78 	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
79 	struct igu_regular cmd_data = {0};
80 
81 	cmd_data.sb_id_and_flags =
82 			((index << IGU_REGULAR_SB_INDEX_SHIFT) |
83 			 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
84 			 (update << IGU_REGULAR_BUPDATE_SHIFT) |
85 			 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
86 
87 	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
88 	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
89 	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
90 
91 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
92 	   cmd_data.sb_id_and_flags, igu_addr_data);
93 	REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
94 	mmiowb();
95 	barrier();
96 
97 	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
98 	   ctl, igu_addr_ctl);
99 	REG_WR(bp, igu_addr_ctl, ctl);
100 	mmiowb();
101 	barrier();
102 }
103 /* VFOP - VF slow-path operation support */
104 
105 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX		0x10000
106 
107 /* VFOP operations states */
108 enum bnx2x_vfop_qctor_state {
109 	   BNX2X_VFOP_QCTOR_INIT,
110 	   BNX2X_VFOP_QCTOR_SETUP,
111 	   BNX2X_VFOP_QCTOR_INT_EN
112 };
113 
114 enum bnx2x_vfop_qdtor_state {
115 	   BNX2X_VFOP_QDTOR_HALT,
116 	   BNX2X_VFOP_QDTOR_TERMINATE,
117 	   BNX2X_VFOP_QDTOR_CFCDEL,
118 	   BNX2X_VFOP_QDTOR_DONE
119 };
120 
121 enum bnx2x_vfop_vlan_mac_state {
122 	   BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
123 	   BNX2X_VFOP_VLAN_MAC_CLEAR,
124 	   BNX2X_VFOP_VLAN_MAC_CHK_DONE,
125 	   BNX2X_VFOP_MAC_CONFIG_LIST,
126 	   BNX2X_VFOP_VLAN_CONFIG_LIST,
127 	   BNX2X_VFOP_VLAN_CONFIG_LIST_0
128 };
129 
130 enum bnx2x_vfop_qsetup_state {
131 	   BNX2X_VFOP_QSETUP_CTOR,
132 	   BNX2X_VFOP_QSETUP_VLAN0,
133 	   BNX2X_VFOP_QSETUP_DONE
134 };
135 
136 enum bnx2x_vfop_mcast_state {
137 	   BNX2X_VFOP_MCAST_DEL,
138 	   BNX2X_VFOP_MCAST_ADD,
139 	   BNX2X_VFOP_MCAST_CHK_DONE
140 };
141 
142 enum bnx2x_vfop_rxmode_state {
143 	   BNX2X_VFOP_RXMODE_CONFIG,
144 	   BNX2X_VFOP_RXMODE_DONE
145 };
146 
147 enum bnx2x_vfop_qteardown_state {
148 	   BNX2X_VFOP_QTEARDOWN_RXMODE,
149 	   BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
150 	   BNX2X_VFOP_QTEARDOWN_CLR_MAC,
151 	   BNX2X_VFOP_QTEARDOWN_QDTOR,
152 	   BNX2X_VFOP_QTEARDOWN_DONE
153 };
154 
155 #define bnx2x_vfop_reset_wq(vf)	atomic_set(&vf->op_in_progress, 0)
156 
157 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
158 			      struct bnx2x_queue_init_params *init_params,
159 			      struct bnx2x_queue_setup_params *setup_params,
160 			      u16 q_idx, u16 sb_idx)
161 {
162 	DP(BNX2X_MSG_IOV,
163 	   "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
164 	   vf->abs_vfid,
165 	   q_idx,
166 	   sb_idx,
167 	   init_params->tx.sb_cq_index,
168 	   init_params->tx.hc_rate,
169 	   setup_params->flags,
170 	   setup_params->txq_params.traffic_type);
171 }
172 
173 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
174 			    struct bnx2x_queue_init_params *init_params,
175 			    struct bnx2x_queue_setup_params *setup_params,
176 			    u16 q_idx, u16 sb_idx)
177 {
178 	struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
179 
180 	DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
181 	   "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
182 	   vf->abs_vfid,
183 	   q_idx,
184 	   sb_idx,
185 	   init_params->rx.sb_cq_index,
186 	   init_params->rx.hc_rate,
187 	   setup_params->gen_params.mtu,
188 	   rxq_params->buf_sz,
189 	   rxq_params->sge_buf_sz,
190 	   rxq_params->max_sges_pkt,
191 	   rxq_params->tpa_agg_sz,
192 	   setup_params->flags,
193 	   rxq_params->drop_flags,
194 	   rxq_params->cache_line_log);
195 }
196 
197 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
198 			   struct bnx2x_virtf *vf,
199 			   struct bnx2x_vf_queue *q,
200 			   struct bnx2x_vfop_qctor_params *p,
201 			   unsigned long q_type)
202 {
203 	struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
204 	struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
205 
206 	/* INIT */
207 
208 	/* Enable host coalescing in the transition to INIT state */
209 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
210 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
211 
212 	if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
213 		__set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
214 
215 	/* FW SB ID */
216 	init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
217 	init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
218 
219 	/* context */
220 	init_p->cxts[0] = q->cxt;
221 
222 	/* SETUP */
223 
224 	/* Setup-op general parameters */
225 	setup_p->gen_params.spcl_id = vf->sp_cl_id;
226 	setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
227 
228 	/* Setup-op pause params:
229 	 * Nothing to do, the pause thresholds are set by default to 0 which
230 	 * effectively turns off the feature for this queue. We don't want
231 	 * one queue (VF) to interfering with another queue (another VF)
232 	 */
233 	if (vf->cfg_flags & VF_CFG_FW_FC)
234 		BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
235 			  vf->abs_vfid);
236 	/* Setup-op flags:
237 	 * collect statistics, zero statistics, local-switching, security,
238 	 * OV for Flex10, RSS and MCAST for leading
239 	 */
240 	if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
241 		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
242 
243 	/* for VFs, enable tx switching, bd coherency, and mac address
244 	 * anti-spoofing
245 	 */
246 	__set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
247 	__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
248 	__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
249 
250 	if (vfq_is_leading(q)) {
251 		__set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
252 		__set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
253 	}
254 
255 	/* Setup-op rx parameters */
256 	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
257 		struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
258 
259 		rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
260 		rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
261 		rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
262 
263 		if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
264 			rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
265 	}
266 
267 	/* Setup-op tx parameters */
268 	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
269 		setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
270 		setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
271 	}
272 }
273 
274 /* VFOP queue construction */
275 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
276 {
277 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
278 	struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
279 	struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
280 	enum bnx2x_vfop_qctor_state state = vfop->state;
281 
282 	bnx2x_vfop_reset_wq(vf);
283 
284 	if (vfop->rc < 0)
285 		goto op_err;
286 
287 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
288 
289 	switch (state) {
290 	case BNX2X_VFOP_QCTOR_INIT:
291 
292 		/* has this queue already been opened? */
293 		if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
294 		    BNX2X_Q_LOGICAL_STATE_ACTIVE) {
295 			DP(BNX2X_MSG_IOV,
296 			   "Entered qctor but queue was already up. Aborting gracefully\n");
297 			goto op_done;
298 		}
299 
300 		/* next state */
301 		vfop->state = BNX2X_VFOP_QCTOR_SETUP;
302 
303 		q_params->cmd = BNX2X_Q_CMD_INIT;
304 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
305 
306 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
307 
308 	case BNX2X_VFOP_QCTOR_SETUP:
309 		/* next state */
310 		vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
311 
312 		/* copy pre-prepared setup params to the queue-state params */
313 		vfop->op_p->qctor.qstate.params.setup =
314 			vfop->op_p->qctor.prep_qsetup;
315 
316 		q_params->cmd = BNX2X_Q_CMD_SETUP;
317 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
318 
319 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
320 
321 	case BNX2X_VFOP_QCTOR_INT_EN:
322 
323 		/* enable interrupts */
324 		bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
325 				    USTORM_ID, 0, IGU_INT_ENABLE, 0);
326 		goto op_done;
327 	default:
328 		bnx2x_vfop_default(state);
329 	}
330 op_err:
331 	BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
332 		  vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
333 op_done:
334 	bnx2x_vfop_end(bp, vf, vfop);
335 op_pending:
336 	return;
337 }
338 
339 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
340 				struct bnx2x_virtf *vf,
341 				struct bnx2x_vfop_cmd *cmd,
342 				int qid)
343 {
344 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
345 
346 	if (vfop) {
347 		vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
348 
349 		vfop->args.qctor.qid = qid;
350 		vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
351 
352 		bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
353 				 bnx2x_vfop_qctor, cmd->done);
354 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
355 					     cmd->block);
356 	}
357 	return -ENOMEM;
358 }
359 
360 /* VFOP queue destruction */
361 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
362 {
363 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
364 	struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
365 	struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
366 	enum bnx2x_vfop_qdtor_state state = vfop->state;
367 
368 	bnx2x_vfop_reset_wq(vf);
369 
370 	if (vfop->rc < 0)
371 		goto op_err;
372 
373 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
374 
375 	switch (state) {
376 	case BNX2X_VFOP_QDTOR_HALT:
377 
378 		/* has this queue already been stopped? */
379 		if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
380 		    BNX2X_Q_LOGICAL_STATE_STOPPED) {
381 			DP(BNX2X_MSG_IOV,
382 			   "Entered qdtor but queue was already stopped. Aborting gracefully\n");
383 			goto op_done;
384 		}
385 
386 		/* next state */
387 		vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
388 
389 		q_params->cmd = BNX2X_Q_CMD_HALT;
390 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
391 
392 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
393 
394 	case BNX2X_VFOP_QDTOR_TERMINATE:
395 		/* next state */
396 		vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
397 
398 		q_params->cmd = BNX2X_Q_CMD_TERMINATE;
399 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
400 
401 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
402 
403 	case BNX2X_VFOP_QDTOR_CFCDEL:
404 		/* next state */
405 		vfop->state = BNX2X_VFOP_QDTOR_DONE;
406 
407 		q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
408 		vfop->rc = bnx2x_queue_state_change(bp, q_params);
409 
410 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
411 op_err:
412 	BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
413 		  vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
414 op_done:
415 	case BNX2X_VFOP_QDTOR_DONE:
416 		/* invalidate the context */
417 		qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
418 		qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
419 		bnx2x_vfop_end(bp, vf, vfop);
420 		return;
421 	default:
422 		bnx2x_vfop_default(state);
423 	}
424 op_pending:
425 	return;
426 }
427 
428 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
429 				struct bnx2x_virtf *vf,
430 				struct bnx2x_vfop_cmd *cmd,
431 				int qid)
432 {
433 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
434 
435 	if (vfop) {
436 		struct bnx2x_queue_state_params *qstate =
437 			&vf->op_params.qctor.qstate;
438 
439 		memset(qstate, 0, sizeof(*qstate));
440 		qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
441 
442 		vfop->args.qdtor.qid = qid;
443 		vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
444 
445 		bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
446 				 bnx2x_vfop_qdtor, cmd->done);
447 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
448 					     cmd->block);
449 	}
450 	DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
451 	   vf->abs_vfid, vfop->rc);
452 	return -ENOMEM;
453 }
454 
455 static void
456 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
457 {
458 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
459 	if (vf) {
460 		if (!vf_sb_count(vf))
461 			vf->igu_base_id = igu_sb_id;
462 		++vf_sb_count(vf);
463 	}
464 }
465 
466 /* VFOP MAC/VLAN helpers */
467 static inline void bnx2x_vfop_credit(struct bnx2x *bp,
468 				     struct bnx2x_vfop *vfop,
469 				     struct bnx2x_vlan_mac_obj *obj)
470 {
471 	struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
472 
473 	/* update credit only if there is no error
474 	 * and a valid credit counter
475 	 */
476 	if (!vfop->rc && args->credit) {
477 		int cnt = 0;
478 		struct list_head *pos;
479 
480 		list_for_each(pos, &obj->head)
481 			cnt++;
482 
483 		atomic_set(args->credit, cnt);
484 	}
485 }
486 
487 static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
488 				    struct bnx2x_vfop_filter *pos,
489 				    struct bnx2x_vlan_mac_data *user_req)
490 {
491 	user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
492 		BNX2X_VLAN_MAC_DEL;
493 
494 	switch (pos->type) {
495 	case BNX2X_VFOP_FILTER_MAC:
496 		memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
497 		break;
498 	case BNX2X_VFOP_FILTER_VLAN:
499 		user_req->u.vlan.vlan = pos->vid;
500 		break;
501 	default:
502 		BNX2X_ERR("Invalid filter type, skipping\n");
503 		return 1;
504 	}
505 	return 0;
506 }
507 
508 static int
509 bnx2x_vfop_config_vlan0(struct bnx2x *bp,
510 			struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
511 			bool add)
512 {
513 	int rc;
514 
515 	vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
516 		BNX2X_VLAN_MAC_DEL;
517 	vlan_mac->user_req.u.vlan.vlan = 0;
518 
519 	rc = bnx2x_config_vlan_mac(bp, vlan_mac);
520 	if (rc == -EEXIST)
521 		rc = 0;
522 	return rc;
523 }
524 
525 static int bnx2x_vfop_config_list(struct bnx2x *bp,
526 				  struct bnx2x_vfop_filters *filters,
527 				  struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
528 {
529 	struct bnx2x_vfop_filter *pos, *tmp;
530 	struct list_head rollback_list, *filters_list = &filters->head;
531 	struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
532 	int rc = 0, cnt = 0;
533 
534 	INIT_LIST_HEAD(&rollback_list);
535 
536 	list_for_each_entry_safe(pos, tmp, filters_list, link) {
537 		if (bnx2x_vfop_set_user_req(bp, pos, user_req))
538 			continue;
539 
540 		rc = bnx2x_config_vlan_mac(bp, vlan_mac);
541 		if (rc >= 0) {
542 			cnt += pos->add ? 1 : -1;
543 			list_del(&pos->link);
544 			list_add(&pos->link, &rollback_list);
545 			rc = 0;
546 		} else if (rc == -EEXIST) {
547 			rc = 0;
548 		} else {
549 			BNX2X_ERR("Failed to add a new vlan_mac command\n");
550 			break;
551 		}
552 	}
553 
554 	/* rollback if error or too many rules added */
555 	if (rc || cnt > filters->add_cnt) {
556 		BNX2X_ERR("error or too many rules added. Performing rollback\n");
557 		list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
558 			pos->add = !pos->add;	/* reverse op */
559 			bnx2x_vfop_set_user_req(bp, pos, user_req);
560 			bnx2x_config_vlan_mac(bp, vlan_mac);
561 			list_del(&pos->link);
562 		}
563 		cnt = 0;
564 		if (!rc)
565 			rc = -EINVAL;
566 	}
567 	filters->add_cnt = cnt;
568 	return rc;
569 }
570 
571 /* VFOP set VLAN/MAC */
572 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
573 {
574 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
575 	struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
576 	struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
577 	struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
578 
579 	enum bnx2x_vfop_vlan_mac_state state = vfop->state;
580 
581 	if (vfop->rc < 0)
582 		goto op_err;
583 
584 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
585 
586 	bnx2x_vfop_reset_wq(vf);
587 
588 	switch (state) {
589 	case BNX2X_VFOP_VLAN_MAC_CLEAR:
590 		/* next state */
591 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
592 
593 		/* do delete */
594 		vfop->rc = obj->delete_all(bp, obj,
595 					   &vlan_mac->user_req.vlan_mac_flags,
596 					   &vlan_mac->ramrod_flags);
597 
598 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
599 
600 	case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
601 		/* next state */
602 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
603 
604 		/* do config */
605 		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
606 		if (vfop->rc == -EEXIST)
607 			vfop->rc = 0;
608 
609 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
610 
611 	case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
612 		vfop->rc = !!obj->raw.check_pending(&obj->raw);
613 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
614 
615 	case BNX2X_VFOP_MAC_CONFIG_LIST:
616 		/* next state */
617 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
618 
619 		/* do list config */
620 		vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
621 		if (vfop->rc)
622 			goto op_err;
623 
624 		set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
625 		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
626 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
627 
628 	case BNX2X_VFOP_VLAN_CONFIG_LIST:
629 		/* next state */
630 		vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
631 
632 		/* remove vlan0 - could be no-op */
633 		vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
634 		if (vfop->rc)
635 			goto op_err;
636 
637 		/* Do vlan list config. if this operation fails we try to
638 		 * restore vlan0 to keep the queue is working order
639 		 */
640 		vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
641 		if (!vfop->rc) {
642 			set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
643 			vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
644 		}
645 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
646 
647 	case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
648 		/* next state */
649 		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
650 
651 		if (list_empty(&obj->head))
652 			/* add vlan0 */
653 			vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
654 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
655 
656 	default:
657 		bnx2x_vfop_default(state);
658 	}
659 op_err:
660 	BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
661 op_done:
662 	kfree(filters);
663 	bnx2x_vfop_credit(bp, vfop, obj);
664 	bnx2x_vfop_end(bp, vf, vfop);
665 op_pending:
666 	return;
667 }
668 
669 struct bnx2x_vfop_vlan_mac_flags {
670 	bool drv_only;
671 	bool dont_consume;
672 	bool single_cmd;
673 	bool add;
674 };
675 
676 static void
677 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
678 				struct bnx2x_vfop_vlan_mac_flags *flags)
679 {
680 	struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
681 
682 	memset(ramrod, 0, sizeof(*ramrod));
683 
684 	/* ramrod flags */
685 	if (flags->drv_only)
686 		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
687 	if (flags->single_cmd)
688 		set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
689 
690 	/* mac_vlan flags */
691 	if (flags->dont_consume)
692 		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
693 
694 	/* cmd */
695 	ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
696 }
697 
698 static inline void
699 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
700 			   struct bnx2x_vfop_vlan_mac_flags *flags)
701 {
702 	bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
703 	set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
704 }
705 
706 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
707 				     struct bnx2x_virtf *vf,
708 				     struct bnx2x_vfop_cmd *cmd,
709 				     int qid, bool drv_only)
710 {
711 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
712 
713 	if (vfop) {
714 		struct bnx2x_vfop_args_filters filters = {
715 			.multi_filter = NULL,	/* single */
716 			.credit = NULL,		/* consume credit */
717 		};
718 		struct bnx2x_vfop_vlan_mac_flags flags = {
719 			.drv_only = drv_only,
720 			.dont_consume = (filters.credit != NULL),
721 			.single_cmd = true,
722 			.add = false /* don't care */,
723 		};
724 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
725 			&vf->op_params.vlan_mac;
726 
727 		/* set ramrod params */
728 		bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
729 
730 		/* set object */
731 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
732 
733 		/* set extra args */
734 		vfop->args.filters = filters;
735 
736 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
737 				 bnx2x_vfop_vlan_mac, cmd->done);
738 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
739 					     cmd->block);
740 	}
741 	return -ENOMEM;
742 }
743 
744 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
745 			    struct bnx2x_virtf *vf,
746 			    struct bnx2x_vfop_cmd *cmd,
747 			    struct bnx2x_vfop_filters *macs,
748 			    int qid, bool drv_only)
749 {
750 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
751 
752 	if (vfop) {
753 		struct bnx2x_vfop_args_filters filters = {
754 			.multi_filter = macs,
755 			.credit = NULL,		/* consume credit */
756 		};
757 		struct bnx2x_vfop_vlan_mac_flags flags = {
758 			.drv_only = drv_only,
759 			.dont_consume = (filters.credit != NULL),
760 			.single_cmd = false,
761 			.add = false, /* don't care since only the items in the
762 				       * filters list affect the sp operation,
763 				       * not the list itself
764 				       */
765 		};
766 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
767 			&vf->op_params.vlan_mac;
768 
769 		/* set ramrod params */
770 		bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
771 
772 		/* set object */
773 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
774 
775 		/* set extra args */
776 		filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
777 		vfop->args.filters = filters;
778 
779 		bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
780 				 bnx2x_vfop_vlan_mac, cmd->done);
781 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
782 					     cmd->block);
783 	}
784 	return -ENOMEM;
785 }
786 
787 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
788 			    struct bnx2x_virtf *vf,
789 			    struct bnx2x_vfop_cmd *cmd,
790 			    int qid, u16 vid, bool add)
791 {
792 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
793 
794 	if (vfop) {
795 		struct bnx2x_vfop_args_filters filters = {
796 			.multi_filter = NULL, /* single command */
797 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
798 		};
799 		struct bnx2x_vfop_vlan_mac_flags flags = {
800 			.drv_only = false,
801 			.dont_consume = (filters.credit != NULL),
802 			.single_cmd = true,
803 			.add = add,
804 		};
805 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
806 			&vf->op_params.vlan_mac;
807 
808 		/* set ramrod params */
809 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
810 		ramrod->user_req.u.vlan.vlan = vid;
811 
812 		/* set object */
813 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
814 
815 		/* set extra args */
816 		vfop->args.filters = filters;
817 
818 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
819 				 bnx2x_vfop_vlan_mac, cmd->done);
820 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
821 					     cmd->block);
822 	}
823 	return -ENOMEM;
824 }
825 
826 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
827 			       struct bnx2x_virtf *vf,
828 			       struct bnx2x_vfop_cmd *cmd,
829 			       int qid, bool drv_only)
830 {
831 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
832 
833 	if (vfop) {
834 		struct bnx2x_vfop_args_filters filters = {
835 			.multi_filter = NULL, /* single command */
836 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
837 		};
838 		struct bnx2x_vfop_vlan_mac_flags flags = {
839 			.drv_only = drv_only,
840 			.dont_consume = (filters.credit != NULL),
841 			.single_cmd = true,
842 			.add = false, /* don't care */
843 		};
844 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
845 			&vf->op_params.vlan_mac;
846 
847 		/* set ramrod params */
848 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
849 
850 		/* set object */
851 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
852 
853 		/* set extra args */
854 		vfop->args.filters = filters;
855 
856 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
857 				 bnx2x_vfop_vlan_mac, cmd->done);
858 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
859 					     cmd->block);
860 	}
861 	return -ENOMEM;
862 }
863 
864 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
865 			     struct bnx2x_virtf *vf,
866 			     struct bnx2x_vfop_cmd *cmd,
867 			     struct bnx2x_vfop_filters *vlans,
868 			     int qid, bool drv_only)
869 {
870 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
871 
872 	if (vfop) {
873 		struct bnx2x_vfop_args_filters filters = {
874 			.multi_filter = vlans,
875 			.credit = &bnx2x_vfq(vf, qid, vlan_count),
876 		};
877 		struct bnx2x_vfop_vlan_mac_flags flags = {
878 			.drv_only = drv_only,
879 			.dont_consume = (filters.credit != NULL),
880 			.single_cmd = false,
881 			.add = false, /* don't care */
882 		};
883 		struct bnx2x_vlan_mac_ramrod_params *ramrod =
884 			&vf->op_params.vlan_mac;
885 
886 		/* set ramrod params */
887 		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
888 
889 		/* set object */
890 		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
891 
892 		/* set extra args */
893 		filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
894 			atomic_read(filters.credit);
895 
896 		vfop->args.filters = filters;
897 
898 		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
899 				 bnx2x_vfop_vlan_mac, cmd->done);
900 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
901 					     cmd->block);
902 	}
903 	return -ENOMEM;
904 }
905 
906 /* VFOP queue setup (queue constructor + set vlan 0) */
907 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
908 {
909 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
910 	int qid = vfop->args.qctor.qid;
911 	enum bnx2x_vfop_qsetup_state state = vfop->state;
912 	struct bnx2x_vfop_cmd cmd = {
913 		.done = bnx2x_vfop_qsetup,
914 		.block = false,
915 	};
916 
917 	if (vfop->rc < 0)
918 		goto op_err;
919 
920 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
921 
922 	switch (state) {
923 	case BNX2X_VFOP_QSETUP_CTOR:
924 		/* init the queue ctor command */
925 		vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
926 		vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
927 		if (vfop->rc)
928 			goto op_err;
929 		return;
930 
931 	case BNX2X_VFOP_QSETUP_VLAN0:
932 		/* skip if non-leading or FPGA/EMU*/
933 		if (qid)
934 			goto op_done;
935 
936 		/* init the queue set-vlan command (for vlan 0) */
937 		vfop->state = BNX2X_VFOP_QSETUP_DONE;
938 		vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
939 		if (vfop->rc)
940 			goto op_err;
941 		return;
942 op_err:
943 	BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
944 op_done:
945 	case BNX2X_VFOP_QSETUP_DONE:
946 		bnx2x_vfop_end(bp, vf, vfop);
947 		return;
948 	default:
949 		bnx2x_vfop_default(state);
950 	}
951 }
952 
953 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
954 			  struct bnx2x_virtf *vf,
955 			  struct bnx2x_vfop_cmd *cmd,
956 			  int qid)
957 {
958 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
959 
960 	if (vfop) {
961 		vfop->args.qctor.qid = qid;
962 
963 		bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
964 				 bnx2x_vfop_qsetup, cmd->done);
965 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
966 					     cmd->block);
967 	}
968 	return -ENOMEM;
969 }
970 
971 /* VFOP multi-casts */
972 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
973 {
974 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
975 	struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
976 	struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
977 	struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
978 	enum bnx2x_vfop_mcast_state state = vfop->state;
979 	int i;
980 
981 	bnx2x_vfop_reset_wq(vf);
982 
983 	if (vfop->rc < 0)
984 		goto op_err;
985 
986 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
987 
988 	switch (state) {
989 	case BNX2X_VFOP_MCAST_DEL:
990 		/* clear existing mcasts */
991 		vfop->state = BNX2X_VFOP_MCAST_ADD;
992 		vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
993 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
994 
995 	case BNX2X_VFOP_MCAST_ADD:
996 		if (raw->check_pending(raw))
997 			goto op_pending;
998 
999 		if (args->mc_num) {
1000 			/* update mcast list on the ramrod params */
1001 			INIT_LIST_HEAD(&mcast->mcast_list);
1002 			for (i = 0; i < args->mc_num; i++)
1003 				list_add_tail(&(args->mc[i].link),
1004 					      &mcast->mcast_list);
1005 			/* add new mcasts */
1006 			vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
1007 			vfop->rc = bnx2x_config_mcast(bp, mcast,
1008 						      BNX2X_MCAST_CMD_ADD);
1009 		}
1010 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1011 
1012 	case BNX2X_VFOP_MCAST_CHK_DONE:
1013 		vfop->rc = raw->check_pending(raw) ? 1 : 0;
1014 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1015 	default:
1016 		bnx2x_vfop_default(state);
1017 	}
1018 op_err:
1019 	BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
1020 op_done:
1021 	kfree(args->mc);
1022 	bnx2x_vfop_end(bp, vf, vfop);
1023 op_pending:
1024 	return;
1025 }
1026 
1027 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
1028 			 struct bnx2x_virtf *vf,
1029 			 struct bnx2x_vfop_cmd *cmd,
1030 			 bnx2x_mac_addr_t *mcasts,
1031 			 int mcast_num, bool drv_only)
1032 {
1033 	struct bnx2x_vfop *vfop = NULL;
1034 	size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
1035 	struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
1036 					   NULL;
1037 
1038 	if (!mc_sz || mc) {
1039 		vfop = bnx2x_vfop_add(bp, vf);
1040 		if (vfop) {
1041 			int i;
1042 			struct bnx2x_mcast_ramrod_params *ramrod =
1043 				&vf->op_params.mcast;
1044 
1045 			/* set ramrod params */
1046 			memset(ramrod, 0, sizeof(*ramrod));
1047 			ramrod->mcast_obj = &vf->mcast_obj;
1048 			if (drv_only)
1049 				set_bit(RAMROD_DRV_CLR_ONLY,
1050 					&ramrod->ramrod_flags);
1051 
1052 			/* copy mcasts pointers */
1053 			vfop->args.mc_list.mc_num = mcast_num;
1054 			vfop->args.mc_list.mc = mc;
1055 			for (i = 0; i < mcast_num; i++)
1056 				mc[i].mac = mcasts[i];
1057 
1058 			bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
1059 					 bnx2x_vfop_mcast, cmd->done);
1060 			return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
1061 						     cmd->block);
1062 		} else {
1063 			kfree(mc);
1064 		}
1065 	}
1066 	return -ENOMEM;
1067 }
1068 
1069 /* VFOP rx-mode */
1070 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1071 {
1072 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1073 	struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
1074 	enum bnx2x_vfop_rxmode_state state = vfop->state;
1075 
1076 	bnx2x_vfop_reset_wq(vf);
1077 
1078 	if (vfop->rc < 0)
1079 		goto op_err;
1080 
1081 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1082 
1083 	switch (state) {
1084 	case BNX2X_VFOP_RXMODE_CONFIG:
1085 		/* next state */
1086 		vfop->state = BNX2X_VFOP_RXMODE_DONE;
1087 
1088 		vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1089 		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1090 op_err:
1091 		BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
1092 op_done:
1093 	case BNX2X_VFOP_RXMODE_DONE:
1094 		bnx2x_vfop_end(bp, vf, vfop);
1095 		return;
1096 	default:
1097 		bnx2x_vfop_default(state);
1098 	}
1099 op_pending:
1100 	return;
1101 }
1102 
1103 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1104 			  struct bnx2x_virtf *vf,
1105 			  struct bnx2x_vfop_cmd *cmd,
1106 			  int qid, unsigned long accept_flags)
1107 {
1108 	struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1109 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1110 
1111 	if (vfop) {
1112 		struct bnx2x_rx_mode_ramrod_params *ramrod =
1113 			&vf->op_params.rx_mode;
1114 
1115 		memset(ramrod, 0, sizeof(*ramrod));
1116 
1117 		/* Prepare ramrod parameters */
1118 		ramrod->cid = vfq->cid;
1119 		ramrod->cl_id = vfq_cl_id(vf, vfq);
1120 		ramrod->rx_mode_obj = &bp->rx_mode_obj;
1121 		ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1122 
1123 		ramrod->rx_accept_flags = accept_flags;
1124 		ramrod->tx_accept_flags = accept_flags;
1125 		ramrod->pstate = &vf->filter_state;
1126 		ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1127 
1128 		set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1129 		set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1130 		set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1131 
1132 		ramrod->rdata =
1133 			bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1134 		ramrod->rdata_mapping =
1135 			bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1136 
1137 		bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1138 				 bnx2x_vfop_rxmode, cmd->done);
1139 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
1140 					     cmd->block);
1141 	}
1142 	return -ENOMEM;
1143 }
1144 
1145 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
1146  * queue destructor)
1147  */
1148 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
1149 {
1150 	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1151 	int qid = vfop->args.qx.qid;
1152 	enum bnx2x_vfop_qteardown_state state = vfop->state;
1153 	struct bnx2x_vfop_cmd cmd;
1154 
1155 	if (vfop->rc < 0)
1156 		goto op_err;
1157 
1158 	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1159 
1160 	cmd.done = bnx2x_vfop_qdown;
1161 	cmd.block = false;
1162 
1163 	switch (state) {
1164 	case BNX2X_VFOP_QTEARDOWN_RXMODE:
1165 		/* Drop all */
1166 		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
1167 		vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
1168 		if (vfop->rc)
1169 			goto op_err;
1170 		return;
1171 
1172 	case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
1173 		/* vlan-clear-all: don't consume credit */
1174 		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
1175 		vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
1176 		if (vfop->rc)
1177 			goto op_err;
1178 		return;
1179 
1180 	case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
1181 		/* mac-clear-all: consume credit */
1182 		vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1183 		vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
1184 		if (vfop->rc)
1185 			goto op_err;
1186 		return;
1187 
1188 	case BNX2X_VFOP_QTEARDOWN_QDTOR:
1189 		/* run the queue destruction flow */
1190 		DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
1191 		vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
1192 		DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
1193 		vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
1194 		DP(BNX2X_MSG_IOV, "returned from cmd\n");
1195 		if (vfop->rc)
1196 			goto op_err;
1197 		return;
1198 op_err:
1199 	BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
1200 		  vf->abs_vfid, qid, vfop->rc);
1201 
1202 	case BNX2X_VFOP_QTEARDOWN_DONE:
1203 		bnx2x_vfop_end(bp, vf, vfop);
1204 		return;
1205 	default:
1206 		bnx2x_vfop_default(state);
1207 	}
1208 }
1209 
1210 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1211 			 struct bnx2x_virtf *vf,
1212 			 struct bnx2x_vfop_cmd *cmd,
1213 			 int qid)
1214 {
1215 	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1216 
1217 	if (vfop) {
1218 		vfop->args.qx.qid = qid;
1219 		bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
1220 				 bnx2x_vfop_qdown, cmd->done);
1221 		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1222 					     cmd->block);
1223 	}
1224 
1225 	return -ENOMEM;
1226 }
1227 
1228 /* VF enable primitives
1229  * when pretend is required the caller is responsible
1230  * for calling pretend prior to calling these routines
1231  */
1232 
1233 /* called only on E1H or E2.
1234  * When pretending to be PF, the pretend value is the function number 0...7
1235  * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
1236  * combination
1237  */
1238 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
1239 {
1240 	u32 pretend_reg;
1241 
1242 	if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
1243 		return -1;
1244 
1245 	/* get my own pretend register */
1246 	pretend_reg = bnx2x_get_pretend_reg(bp);
1247 	REG_WR(bp, pretend_reg, pretend_func_val);
1248 	REG_RD(bp, pretend_reg);
1249 	return 0;
1250 }
1251 
1252 /* internal vf enable - until vf is enabled internally all transactions
1253  * are blocked. this routine should always be called last with pretend.
1254  */
1255 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
1256 {
1257 	REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
1258 }
1259 
1260 /* clears vf error in all semi blocks */
1261 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
1262 {
1263 	REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
1264 	REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
1265 	REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
1266 	REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
1267 }
1268 
1269 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
1270 {
1271 	u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
1272 	u32 was_err_reg = 0;
1273 
1274 	switch (was_err_group) {
1275 	case 0:
1276 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
1277 	    break;
1278 	case 1:
1279 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
1280 	    break;
1281 	case 2:
1282 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
1283 	    break;
1284 	case 3:
1285 	    was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
1286 	    break;
1287 	}
1288 	REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
1289 }
1290 
1291 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
1292 {
1293 	int i;
1294 	u32 val;
1295 
1296 	/* Set VF masks and configuration - pretend */
1297 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1298 
1299 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
1300 	REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
1301 	REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
1302 	REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
1303 	REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
1304 	REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
1305 
1306 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1307 	val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
1308 	if (vf->cfg_flags & VF_CFG_INT_SIMD)
1309 		val |= IGU_VF_CONF_SINGLE_ISR_EN;
1310 	val &= ~IGU_VF_CONF_PARENT_MASK;
1311 	val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT;	/* parent PF */
1312 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1313 
1314 	DP(BNX2X_MSG_IOV,
1315 	   "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
1316 	   vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
1317 
1318 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1319 
1320 	/* iterate over all queues, clear sb consumer */
1321 	for (i = 0; i < vf_sb_count(vf); i++) {
1322 		u8 igu_sb_id = vf_igu_sb(vf, i);
1323 
1324 		/* zero prod memory */
1325 		REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
1326 
1327 		/* clear sb state machine */
1328 		bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
1329 				       false /* VF */);
1330 
1331 		/* disable + update */
1332 		bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
1333 				    IGU_INT_DISABLE, 1);
1334 	}
1335 }
1336 
1337 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
1338 {
1339 	/* set the VF-PF association in the FW */
1340 	storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
1341 	storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
1342 
1343 	/* clear vf errors*/
1344 	bnx2x_vf_semi_clear_err(bp, abs_vfid);
1345 	bnx2x_vf_pglue_clear_err(bp, abs_vfid);
1346 
1347 	/* internal vf-enable - pretend */
1348 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
1349 	DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
1350 	bnx2x_vf_enable_internal(bp, true);
1351 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1352 }
1353 
1354 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
1355 {
1356 	/* Reset vf in IGU  interrupts are still disabled */
1357 	bnx2x_vf_igu_reset(bp, vf);
1358 
1359 	/* pretend to enable the vf with the PBF */
1360 	bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1361 	REG_WR(bp, PBF_REG_DISABLE_VF, 0);
1362 	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1363 }
1364 
1365 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
1366 {
1367 	struct pci_dev *dev;
1368 	struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1369 
1370 	if (!vf)
1371 		goto unknown_dev;
1372 
1373 	dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
1374 	if (dev)
1375 		return bnx2x_is_pcie_pending(dev);
1376 
1377 unknown_dev:
1378 	BNX2X_ERR("Unknown device\n");
1379 	return false;
1380 }
1381 
1382 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1383 {
1384 	/* Wait 100ms */
1385 	msleep(100);
1386 
1387 	/* Verify no pending pci transactions */
1388 	if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
1389 		BNX2X_ERR("PCIE Transactions still pending\n");
1390 
1391 	return 0;
1392 }
1393 
1394 /* must be called after the number of PF queues and the number of VFs are
1395  * both known
1396  */
1397 static void
1398 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
1399 {
1400 	u16 vlan_count = 0;
1401 
1402 	/* will be set only during VF-ACQUIRE */
1403 	resc->num_rxqs = 0;
1404 	resc->num_txqs = 0;
1405 
1406 	/* no credit calculcis for macs (just yet) */
1407 	resc->num_mac_filters = 1;
1408 
1409 	/* divvy up vlan rules */
1410 	vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
1411 	vlan_count = 1 << ilog2(vlan_count);
1412 	resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
1413 
1414 	/* no real limitation */
1415 	resc->num_mc_filters = 0;
1416 
1417 	/* num_sbs already set */
1418 }
1419 
1420 /* IOV global initialization routines  */
1421 void bnx2x_iov_init_dq(struct bnx2x *bp)
1422 {
1423 	if (!IS_SRIOV(bp))
1424 		return;
1425 
1426 	/* Set the DQ such that the CID reflect the abs_vfid */
1427 	REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1428 	REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1429 
1430 	/* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1431 	 * the PF L2 queues
1432 	 */
1433 	REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1434 
1435 	/* The VF window size is the log2 of the max number of CIDs per VF */
1436 	REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1437 
1438 	/* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
1439 	 * the Pf doorbell size although the 2 are independent.
1440 	 */
1441 	REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
1442 	       BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
1443 
1444 	/* No security checks for now -
1445 	 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1446 	 * CID range 0 - 0x1ffff
1447 	 */
1448 	REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1449 	REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1450 	REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1451 	REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1452 
1453 	/* set the number of VF alllowed doorbells to the full DQ range */
1454 	REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
1455 
1456 	/* set the VF doorbell threshold */
1457 	REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
1458 }
1459 
1460 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1461 {
1462 	DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
1463 	if (!IS_SRIOV(bp))
1464 		return;
1465 
1466 	REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1467 }
1468 
1469 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1470 {
1471 	struct pci_dev *dev = bp->pdev;
1472 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1473 
1474 	return dev->bus->number + ((dev->devfn + iov->offset +
1475 				    iov->stride * vfid) >> 8);
1476 }
1477 
1478 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1479 {
1480 	struct pci_dev *dev = bp->pdev;
1481 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1482 
1483 	return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1484 }
1485 
1486 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1487 {
1488 	int i, n;
1489 	struct pci_dev *dev = bp->pdev;
1490 	struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1491 
1492 	for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1493 		u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1494 		u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1495 
1496 		do_div(size, iov->total);
1497 		vf->bars[n].bar = start + size * vf->abs_vfid;
1498 		vf->bars[n].size = size;
1499 	}
1500 }
1501 
1502 static int bnx2x_ari_enabled(struct pci_dev *dev)
1503 {
1504 	return dev->bus->self && dev->bus->self->ari_enabled;
1505 }
1506 
1507 static void
1508 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1509 {
1510 	int sb_id;
1511 	u32 val;
1512 	u8 fid;
1513 
1514 	/* IGU in normal mode - read CAM */
1515 	for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1516 		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1517 		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1518 			continue;
1519 		fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1520 		if (!(fid & IGU_FID_ENCODE_IS_PF))
1521 			bnx2x_vf_set_igu_info(bp, sb_id,
1522 					      (fid & IGU_FID_VF_NUM_MASK));
1523 
1524 		DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1525 		   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1526 		   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1527 		   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1528 		   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1529 	}
1530 }
1531 
1532 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1533 {
1534 	if (bp->vfdb) {
1535 		kfree(bp->vfdb->vfqs);
1536 		kfree(bp->vfdb->vfs);
1537 		kfree(bp->vfdb);
1538 	}
1539 	bp->vfdb = NULL;
1540 }
1541 
1542 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1543 {
1544 	int pos;
1545 	struct pci_dev *dev = bp->pdev;
1546 
1547 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1548 	if (!pos) {
1549 		BNX2X_ERR("failed to find SRIOV capability in device\n");
1550 		return -ENODEV;
1551 	}
1552 
1553 	iov->pos = pos;
1554 	DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1555 	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1556 	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1557 	pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1558 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1559 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1560 	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1561 	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1562 	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1563 
1564 	return 0;
1565 }
1566 
1567 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1568 {
1569 	u32 val;
1570 
1571 	/* read the SRIOV capability structure
1572 	 * The fields can be read via configuration read or
1573 	 * directly from the device (starting at offset PCICFG_OFFSET)
1574 	 */
1575 	if (bnx2x_sriov_pci_cfg_info(bp, iov))
1576 		return -ENODEV;
1577 
1578 	/* get the number of SRIOV bars */
1579 	iov->nres = 0;
1580 
1581 	/* read the first_vfid */
1582 	val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1583 	iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1584 			       * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1585 
1586 	DP(BNX2X_MSG_IOV,
1587 	   "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1588 	   BP_FUNC(bp),
1589 	   iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1590 	   iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1591 
1592 	return 0;
1593 }
1594 
1595 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
1596 {
1597 	int i;
1598 	u8 queue_count = 0;
1599 
1600 	if (IS_SRIOV(bp))
1601 		for_each_vf(bp, i)
1602 			queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
1603 
1604 	return queue_count;
1605 }
1606 
1607 /* must be called after PF bars are mapped */
1608 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1609 			int num_vfs_param)
1610 {
1611 	int err, i, qcount;
1612 	struct bnx2x_sriov *iov;
1613 	struct pci_dev *dev = bp->pdev;
1614 
1615 	bp->vfdb = NULL;
1616 
1617 	/* verify is pf */
1618 	if (IS_VF(bp))
1619 		return 0;
1620 
1621 	/* verify sriov capability is present in configuration space */
1622 	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1623 		return 0;
1624 
1625 	/* verify chip revision */
1626 	if (CHIP_IS_E1x(bp))
1627 		return 0;
1628 
1629 	/* check if SRIOV support is turned off */
1630 	if (!num_vfs_param)
1631 		return 0;
1632 
1633 	/* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1634 	if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1635 		BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1636 			  BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1637 		return 0;
1638 	}
1639 
1640 	/* SRIOV can be enabled only with MSIX */
1641 	if (int_mode_param == BNX2X_INT_MODE_MSI ||
1642 	    int_mode_param == BNX2X_INT_MODE_INTX)
1643 		BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1644 
1645 	err = -EIO;
1646 	/* verify ari is enabled */
1647 	if (!bnx2x_ari_enabled(bp->pdev)) {
1648 		BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
1649 		return err;
1650 	}
1651 
1652 	/* verify igu is in normal mode */
1653 	if (CHIP_INT_MODE_IS_BC(bp)) {
1654 		BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1655 		return err;
1656 	}
1657 
1658 	/* allocate the vfs database */
1659 	bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1660 	if (!bp->vfdb) {
1661 		BNX2X_ERR("failed to allocate vf database\n");
1662 		err = -ENOMEM;
1663 		goto failed;
1664 	}
1665 
1666 	/* get the sriov info - Linux already collected all the pertinent
1667 	 * information, however the sriov structure is for the private use
1668 	 * of the pci module. Also we want this information regardless
1669 	 * of the hyper-visor.
1670 	 */
1671 	iov = &(bp->vfdb->sriov);
1672 	err = bnx2x_sriov_info(bp, iov);
1673 	if (err)
1674 		goto failed;
1675 
1676 	/* SR-IOV capability was enabled but there are no VFs*/
1677 	if (iov->total == 0)
1678 		goto failed;
1679 
1680 	/* calculate the actual number of VFs */
1681 	iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
1682 
1683 	/* allocate the vf array */
1684 	bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
1685 				BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
1686 	if (!bp->vfdb->vfs) {
1687 		BNX2X_ERR("failed to allocate vf array\n");
1688 		err = -ENOMEM;
1689 		goto failed;
1690 	}
1691 
1692 	/* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1693 	for_each_vf(bp, i) {
1694 		bnx2x_vf(bp, i, index) = i;
1695 		bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1696 		bnx2x_vf(bp, i, state) = VF_FREE;
1697 		INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
1698 		mutex_init(&bnx2x_vf(bp, i, op_mutex));
1699 		bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1700 	}
1701 
1702 	/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1703 	bnx2x_get_vf_igu_cam_info(bp);
1704 
1705 	/* get the total queue count and allocate the global queue arrays */
1706 	qcount = bnx2x_iov_get_max_queue_count(bp);
1707 
1708 	/* allocate the queue arrays for all VFs */
1709 	bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
1710 				 GFP_KERNEL);
1711 	if (!bp->vfdb->vfqs) {
1712 		BNX2X_ERR("failed to allocate vf queue array\n");
1713 		err = -ENOMEM;
1714 		goto failed;
1715 	}
1716 
1717 	return 0;
1718 failed:
1719 	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1720 	__bnx2x_iov_free_vfdb(bp);
1721 	return err;
1722 }
1723 
1724 void bnx2x_iov_remove_one(struct bnx2x *bp)
1725 {
1726 	/* if SRIOV is not enabled there's nothing to do */
1727 	if (!IS_SRIOV(bp))
1728 		return;
1729 
1730 	/* free vf database */
1731 	__bnx2x_iov_free_vfdb(bp);
1732 }
1733 
1734 void bnx2x_iov_free_mem(struct bnx2x *bp)
1735 {
1736 	int i;
1737 
1738 	if (!IS_SRIOV(bp))
1739 		return;
1740 
1741 	/* free vfs hw contexts */
1742 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1743 		struct hw_dma *cxt = &bp->vfdb->context[i];
1744 		BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1745 	}
1746 
1747 	BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1748 		       BP_VFDB(bp)->sp_dma.mapping,
1749 		       BP_VFDB(bp)->sp_dma.size);
1750 
1751 	BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1752 		       BP_VF_MBX_DMA(bp)->mapping,
1753 		       BP_VF_MBX_DMA(bp)->size);
1754 }
1755 
1756 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1757 {
1758 	size_t tot_size;
1759 	int i, rc = 0;
1760 
1761 	if (!IS_SRIOV(bp))
1762 		return rc;
1763 
1764 	/* allocate vfs hw contexts */
1765 	tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1766 		BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1767 
1768 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1769 		struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1770 		cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
1771 
1772 		if (cxt->size) {
1773 			BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
1774 		} else {
1775 			cxt->addr = NULL;
1776 			cxt->mapping = 0;
1777 		}
1778 		tot_size -= cxt->size;
1779 	}
1780 
1781 	/* allocate vfs ramrods dma memory - client_init and set_mac */
1782 	tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
1783 	BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
1784 			tot_size);
1785 	BP_VFDB(bp)->sp_dma.size = tot_size;
1786 
1787 	/* allocate mailboxes */
1788 	tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
1789 	BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
1790 			tot_size);
1791 	BP_VF_MBX_DMA(bp)->size = tot_size;
1792 
1793 	return 0;
1794 
1795 alloc_mem_err:
1796 	return -ENOMEM;
1797 }
1798 
1799 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1800 			   struct bnx2x_vf_queue *q)
1801 {
1802 	u8 cl_id = vfq_cl_id(vf, q);
1803 	u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
1804 	unsigned long q_type = 0;
1805 
1806 	set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1807 	set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1808 
1809 	/* Queue State object */
1810 	bnx2x_init_queue_obj(bp, &q->sp_obj,
1811 			     cl_id, &q->cid, 1, func_id,
1812 			     bnx2x_vf_sp(bp, vf, q_data),
1813 			     bnx2x_vf_sp_map(bp, vf, q_data),
1814 			     q_type);
1815 
1816 	DP(BNX2X_MSG_IOV,
1817 	   "initialized vf %d's queue object. func id set to %d\n",
1818 	   vf->abs_vfid, q->sp_obj.func_id);
1819 
1820 	/* mac/vlan objects are per queue, but only those
1821 	 * that belong to the leading queue are initialized
1822 	 */
1823 	if (vfq_is_leading(q)) {
1824 		/* mac */
1825 		bnx2x_init_mac_obj(bp, &q->mac_obj,
1826 				   cl_id, q->cid, func_id,
1827 				   bnx2x_vf_sp(bp, vf, mac_rdata),
1828 				   bnx2x_vf_sp_map(bp, vf, mac_rdata),
1829 				   BNX2X_FILTER_MAC_PENDING,
1830 				   &vf->filter_state,
1831 				   BNX2X_OBJ_TYPE_RX_TX,
1832 				   &bp->macs_pool);
1833 		/* vlan */
1834 		bnx2x_init_vlan_obj(bp, &q->vlan_obj,
1835 				    cl_id, q->cid, func_id,
1836 				    bnx2x_vf_sp(bp, vf, vlan_rdata),
1837 				    bnx2x_vf_sp_map(bp, vf, vlan_rdata),
1838 				    BNX2X_FILTER_VLAN_PENDING,
1839 				    &vf->filter_state,
1840 				    BNX2X_OBJ_TYPE_RX_TX,
1841 				    &bp->vlans_pool);
1842 
1843 		/* mcast */
1844 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
1845 				     q->cid, func_id, func_id,
1846 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
1847 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1848 				     BNX2X_FILTER_MCAST_PENDING,
1849 				     &vf->filter_state,
1850 				     BNX2X_OBJ_TYPE_RX_TX);
1851 
1852 		vf->leading_rss = cl_id;
1853 	}
1854 }
1855 
1856 /* called by bnx2x_nic_load */
1857 int bnx2x_iov_nic_init(struct bnx2x *bp)
1858 {
1859 	int vfid, qcount, i;
1860 
1861 	if (!IS_SRIOV(bp)) {
1862 		DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1863 		return 0;
1864 	}
1865 
1866 	DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1867 
1868 	/* initialize vf database */
1869 	for_each_vf(bp, vfid) {
1870 		struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1871 
1872 		int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1873 			BNX2X_CIDS_PER_VF;
1874 
1875 		union cdu_context *base_cxt = (union cdu_context *)
1876 			BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1877 			(base_vf_cid & (ILT_PAGE_CIDS-1));
1878 
1879 		DP(BNX2X_MSG_IOV,
1880 		   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1881 		   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1882 		   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1883 
1884 		/* init statically provisioned resources */
1885 		bnx2x_iov_static_resc(bp, &vf->alloc_resc);
1886 
1887 		/* queues are initialized during VF-ACQUIRE */
1888 
1889 		/* reserve the vf vlan credit */
1890 		bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
1891 
1892 		vf->filter_state = 0;
1893 		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1894 
1895 		/*  init mcast object - This object will be re-initialized
1896 		 *  during VF-ACQUIRE with the proper cl_id and cid.
1897 		 *  It needs to be initialized here so that it can be safely
1898 		 *  handled by a subsequent FLR flow.
1899 		 */
1900 		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1901 				     0xFF, 0xFF, 0xFF,
1902 				     bnx2x_vf_sp(bp, vf, mcast_rdata),
1903 				     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1904 				     BNX2X_FILTER_MCAST_PENDING,
1905 				     &vf->filter_state,
1906 				     BNX2X_OBJ_TYPE_RX_TX);
1907 
1908 		/* set the mailbox message addresses */
1909 		BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1910 			(((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1911 			MBX_MSG_ALIGNED_SIZE);
1912 
1913 		BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1914 			vfid * MBX_MSG_ALIGNED_SIZE;
1915 
1916 		/* Enable vf mailbox */
1917 		bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1918 	}
1919 
1920 	/* Final VF init */
1921 	qcount = 0;
1922 	for_each_vf(bp, i) {
1923 		struct bnx2x_virtf *vf = BP_VF(bp, i);
1924 
1925 		/* fill in the BDF and bars */
1926 		vf->bus = bnx2x_vf_bus(bp, i);
1927 		vf->devfn = bnx2x_vf_devfn(bp, i);
1928 		bnx2x_vf_set_bars(bp, vf);
1929 
1930 		DP(BNX2X_MSG_IOV,
1931 		   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1932 		   vf->abs_vfid, vf->bus, vf->devfn,
1933 		   (unsigned)vf->bars[0].bar, vf->bars[0].size,
1934 		   (unsigned)vf->bars[1].bar, vf->bars[1].size,
1935 		   (unsigned)vf->bars[2].bar, vf->bars[2].size);
1936 
1937 		/* set local queue arrays */
1938 		vf->vfqs = &bp->vfdb->vfqs[qcount];
1939 		qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
1940 	}
1941 
1942 	return 0;
1943 }
1944 
1945 /* called by bnx2x_init_hw_func, returns the next ilt line */
1946 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
1947 {
1948 	int i;
1949 	struct bnx2x_ilt *ilt = BP_ILT(bp);
1950 
1951 	if (!IS_SRIOV(bp))
1952 		return line;
1953 
1954 	/* set vfs ilt lines */
1955 	for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1956 		struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1957 
1958 		ilt->lines[line+i].page = hw_cxt->addr;
1959 		ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1960 		ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1961 	}
1962 	return line + i;
1963 }
1964 
1965 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
1966 {
1967 	return ((cid >= BNX2X_FIRST_VF_CID) &&
1968 		((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1969 }
1970 
1971 static
1972 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1973 					struct bnx2x_vf_queue *vfq,
1974 					union event_ring_elem *elem)
1975 {
1976 	unsigned long ramrod_flags = 0;
1977 	int rc = 0;
1978 
1979 	/* Always push next commands out, don't wait here */
1980 	set_bit(RAMROD_CONT, &ramrod_flags);
1981 
1982 	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
1983 	case BNX2X_FILTER_MAC_PENDING:
1984 		rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1985 					   &ramrod_flags);
1986 		break;
1987 	case BNX2X_FILTER_VLAN_PENDING:
1988 		rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
1989 					    &ramrod_flags);
1990 		break;
1991 	default:
1992 		BNX2X_ERR("Unsupported classification command: %d\n",
1993 			  elem->message.data.eth_event.echo);
1994 		return;
1995 	}
1996 	if (rc < 0)
1997 		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
1998 	else if (rc > 0)
1999 		DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
2000 }
2001 
2002 static
2003 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
2004 			       struct bnx2x_virtf *vf)
2005 {
2006 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
2007 	int rc;
2008 
2009 	rparam.mcast_obj = &vf->mcast_obj;
2010 	vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
2011 
2012 	/* If there are pending mcast commands - send them */
2013 	if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
2014 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2015 		if (rc < 0)
2016 			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
2017 				  rc);
2018 	}
2019 }
2020 
2021 static
2022 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
2023 				 struct bnx2x_virtf *vf)
2024 {
2025 	smp_mb__before_clear_bit();
2026 	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
2027 	smp_mb__after_clear_bit();
2028 }
2029 
2030 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2031 {
2032 	struct bnx2x_virtf *vf;
2033 	int qidx = 0, abs_vfid;
2034 	u8 opcode;
2035 	u16 cid = 0xffff;
2036 
2037 	if (!IS_SRIOV(bp))
2038 		return 1;
2039 
2040 	/* first get the cid - the only events we handle here are cfc-delete
2041 	 * and set-mac completion
2042 	 */
2043 	opcode = elem->message.opcode;
2044 
2045 	switch (opcode) {
2046 	case EVENT_RING_OPCODE_CFC_DEL:
2047 		cid = SW_CID((__force __le32)
2048 			     elem->message.data.cfc_del_event.cid);
2049 		DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
2050 		break;
2051 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2052 	case EVENT_RING_OPCODE_MULTICAST_RULES:
2053 	case EVENT_RING_OPCODE_FILTERS_RULES:
2054 		cid = (elem->message.data.eth_event.echo &
2055 		       BNX2X_SWCID_MASK);
2056 		DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
2057 		break;
2058 	case EVENT_RING_OPCODE_VF_FLR:
2059 		abs_vfid = elem->message.data.vf_flr_event.vf_id;
2060 		DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
2061 		   abs_vfid);
2062 		goto get_vf;
2063 	case EVENT_RING_OPCODE_MALICIOUS_VF:
2064 		abs_vfid = elem->message.data.malicious_vf_event.vf_id;
2065 		DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
2066 		   abs_vfid);
2067 		goto get_vf;
2068 	default:
2069 		return 1;
2070 	}
2071 
2072 	/* check if the cid is the VF range */
2073 	if (!bnx2x_iov_is_vf_cid(bp, cid)) {
2074 		DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
2075 		return 1;
2076 	}
2077 
2078 	/* extract vf and rxq index from vf_cid - relies on the following:
2079 	 * 1. vfid on cid reflects the true abs_vfid
2080 	 * 2. the max number of VFs (per path) is 64
2081 	 */
2082 	qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
2083 	abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2084 get_vf:
2085 	vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
2086 
2087 	if (!vf) {
2088 		BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
2089 			  cid, abs_vfid);
2090 		return 0;
2091 	}
2092 
2093 	switch (opcode) {
2094 	case EVENT_RING_OPCODE_CFC_DEL:
2095 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
2096 		   vf->abs_vfid, qidx);
2097 		vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
2098 						       &vfq_get(vf,
2099 								qidx)->sp_obj,
2100 						       BNX2X_Q_CMD_CFC_DEL);
2101 		break;
2102 	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2103 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
2104 		   vf->abs_vfid, qidx);
2105 		bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
2106 		break;
2107 	case EVENT_RING_OPCODE_MULTICAST_RULES:
2108 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
2109 		   vf->abs_vfid, qidx);
2110 		bnx2x_vf_handle_mcast_eqe(bp, vf);
2111 		break;
2112 	case EVENT_RING_OPCODE_FILTERS_RULES:
2113 		DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
2114 		   vf->abs_vfid, qidx);
2115 		bnx2x_vf_handle_filters_eqe(bp, vf);
2116 		break;
2117 	case EVENT_RING_OPCODE_VF_FLR:
2118 		DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
2119 		   vf->abs_vfid);
2120 		/* Do nothing for now */
2121 		break;
2122 	case EVENT_RING_OPCODE_MALICIOUS_VF:
2123 		DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
2124 		   vf->abs_vfid);
2125 		/* Do nothing for now */
2126 		break;
2127 	}
2128 	/* SRIOV: reschedule any 'in_progress' operations */
2129 	bnx2x_iov_sp_event(bp, cid, false);
2130 
2131 	return 0;
2132 }
2133 
2134 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
2135 {
2136 	/* extract the vf from vf_cid - relies on the following:
2137 	 * 1. vfid on cid reflects the true abs_vfid
2138 	 * 2. the max number of VFs (per path) is 64
2139 	 */
2140 	int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2141 	return bnx2x_vf_by_abs_fid(bp, abs_vfid);
2142 }
2143 
2144 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
2145 				struct bnx2x_queue_sp_obj **q_obj)
2146 {
2147 	struct bnx2x_virtf *vf;
2148 
2149 	if (!IS_SRIOV(bp))
2150 		return;
2151 
2152 	vf = bnx2x_vf_by_cid(bp, vf_cid);
2153 
2154 	if (vf) {
2155 		/* extract queue index from vf_cid - relies on the following:
2156 		 * 1. vfid on cid reflects the true abs_vfid
2157 		 * 2. the max number of VFs (per path) is 64
2158 		 */
2159 		int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
2160 		*q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
2161 	} else {
2162 		BNX2X_ERR("No vf matching cid %d\n", vf_cid);
2163 	}
2164 }
2165 
2166 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
2167 {
2168 	struct bnx2x_virtf *vf;
2169 
2170 	/* check if the cid is the VF range */
2171 	if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
2172 		return;
2173 
2174 	vf = bnx2x_vf_by_cid(bp, vf_cid);
2175 	if (vf) {
2176 		/* set in_progress flag */
2177 		atomic_set(&vf->op_in_progress, 1);
2178 		if (queue_work)
2179 			queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2180 	}
2181 }
2182 
2183 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2184 {
2185 	int i;
2186 	int first_queue_query_index, num_queues_req;
2187 	dma_addr_t cur_data_offset;
2188 	struct stats_query_entry *cur_query_entry;
2189 	u8 stats_count = 0;
2190 	bool is_fcoe = false;
2191 
2192 	if (!IS_SRIOV(bp))
2193 		return;
2194 
2195 	if (!NO_FCOE(bp))
2196 		is_fcoe = true;
2197 
2198 	/* fcoe adds one global request and one queue request */
2199 	num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
2200 	first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
2201 		(is_fcoe ? 0 : 1);
2202 
2203 	DP(BNX2X_MSG_IOV,
2204 	   "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
2205 	   BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
2206 	   first_queue_query_index + num_queues_req);
2207 
2208 	cur_data_offset = bp->fw_stats_data_mapping +
2209 		offsetof(struct bnx2x_fw_stats_data, queue_stats) +
2210 		num_queues_req * sizeof(struct per_queue_stats);
2211 
2212 	cur_query_entry = &bp->fw_stats_req->
2213 		query[first_queue_query_index + num_queues_req];
2214 
2215 	for_each_vf(bp, i) {
2216 		int j;
2217 		struct bnx2x_virtf *vf = BP_VF(bp, i);
2218 
2219 		if (vf->state != VF_ENABLED) {
2220 			DP(BNX2X_MSG_IOV,
2221 			   "vf %d not enabled so no stats for it\n",
2222 			   vf->abs_vfid);
2223 			continue;
2224 		}
2225 
2226 		DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
2227 		for_each_vfq(vf, j) {
2228 			struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
2229 
2230 			/* collect stats fro active queues only */
2231 			if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
2232 			    BNX2X_Q_LOGICAL_STATE_STOPPED)
2233 				continue;
2234 
2235 			/* create stats query entry for this queue */
2236 			cur_query_entry->kind = STATS_TYPE_QUEUE;
2237 			cur_query_entry->index = vfq_cl_id(vf, rxq);
2238 			cur_query_entry->funcID =
2239 				cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
2240 			cur_query_entry->address.hi =
2241 				cpu_to_le32(U64_HI(vf->fw_stat_map));
2242 			cur_query_entry->address.lo =
2243 				cpu_to_le32(U64_LO(vf->fw_stat_map));
2244 			DP(BNX2X_MSG_IOV,
2245 			   "added address %x %x for vf %d queue %d client %d\n",
2246 			   cur_query_entry->address.hi,
2247 			   cur_query_entry->address.lo, cur_query_entry->funcID,
2248 			   j, cur_query_entry->index);
2249 			cur_query_entry++;
2250 			cur_data_offset += sizeof(struct per_queue_stats);
2251 			stats_count++;
2252 		}
2253 	}
2254 	bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
2255 }
2256 
2257 void bnx2x_iov_sp_task(struct bnx2x *bp)
2258 {
2259 	int i;
2260 
2261 	if (!IS_SRIOV(bp))
2262 		return;
2263 	/* Iterate over all VFs and invoke state transition for VFs with
2264 	 * 'in-progress' slow-path operations
2265 	 */
2266 	DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
2267 	for_each_vf(bp, i) {
2268 		struct bnx2x_virtf *vf = BP_VF(bp, i);
2269 
2270 		if (!list_empty(&vf->op_list_head) &&
2271 		    atomic_read(&vf->op_in_progress)) {
2272 			DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
2273 			bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
2274 		}
2275 	}
2276 }
2277 
2278 static inline
2279 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
2280 {
2281 	int i;
2282 	struct bnx2x_virtf *vf = NULL;
2283 
2284 	for_each_vf(bp, i) {
2285 		vf = BP_VF(bp, i);
2286 		if (stat_id >= vf->igu_base_id &&
2287 		    stat_id < vf->igu_base_id + vf_sb_count(vf))
2288 			break;
2289 	}
2290 	return vf;
2291 }
2292 
2293 /* VF API helpers */
2294 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
2295 				u8 enable)
2296 {
2297 	u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
2298 	u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
2299 
2300 	REG_WR(bp, reg, val);
2301 }
2302 
2303 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2304 {
2305 	return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2306 		     BNX2X_VF_MAX_QUEUES);
2307 }
2308 
2309 static
2310 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2311 			    struct vf_pf_resc_request *req_resc)
2312 {
2313 	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2314 	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2315 
2316 	return ((req_resc->num_rxqs <= rxq_cnt) &&
2317 		(req_resc->num_txqs <= txq_cnt) &&
2318 		(req_resc->num_sbs <= vf_sb_count(vf))   &&
2319 		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2320 		(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
2321 }
2322 
2323 /* CORE VF API */
2324 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2325 		     struct vf_pf_resc_request *resc)
2326 {
2327 	int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2328 		BNX2X_CIDS_PER_VF;
2329 
2330 	union cdu_context *base_cxt = (union cdu_context *)
2331 		BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2332 		(base_vf_cid & (ILT_PAGE_CIDS-1));
2333 	int i;
2334 
2335 	/* if state is 'acquired' the VF was not released or FLR'd, in
2336 	 * this case the returned resources match the acquired already
2337 	 * acquired resources. Verify that the requested numbers do
2338 	 * not exceed the already acquired numbers.
2339 	 */
2340 	if (vf->state == VF_ACQUIRED) {
2341 		DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2342 		   vf->abs_vfid);
2343 
2344 		if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2345 			BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2346 				  vf->abs_vfid);
2347 			return -EINVAL;
2348 		}
2349 		return 0;
2350 	}
2351 
2352 	/* Otherwise vf state must be 'free' or 'reset' */
2353 	if (vf->state != VF_FREE && vf->state != VF_RESET) {
2354 		BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2355 			  vf->abs_vfid, vf->state);
2356 		return -EINVAL;
2357 	}
2358 
2359 	/* static allocation:
2360 	 * the global maximum number are fixed per VF. fail the request if
2361 	 * requested number exceed these globals
2362 	 */
2363 	if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2364 		DP(BNX2X_MSG_IOV,
2365 		   "cannot fulfill vf resource request. Placing maximal available values in response\n");
2366 		/* set the max resource in the vf */
2367 		return -ENOMEM;
2368 	}
2369 
2370 	/* Set resources counters - 0 request means max available */
2371 	vf_sb_count(vf) = resc->num_sbs;
2372 	vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2373 	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2374 	if (resc->num_mac_filters)
2375 		vf_mac_rules_cnt(vf) = resc->num_mac_filters;
2376 	if (resc->num_vlan_filters)
2377 		vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
2378 
2379 	DP(BNX2X_MSG_IOV,
2380 	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2381 	   vf_sb_count(vf), vf_rxq_count(vf),
2382 	   vf_txq_count(vf), vf_mac_rules_cnt(vf),
2383 	   vf_vlan_rules_cnt(vf));
2384 
2385 	/* Initialize the queues */
2386 	if (!vf->vfqs) {
2387 		DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2388 		return -EINVAL;
2389 	}
2390 
2391 	for_each_vfq(vf, i) {
2392 		struct bnx2x_vf_queue *q = vfq_get(vf, i);
2393 
2394 		if (!q) {
2395 			DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
2396 			return -EINVAL;
2397 		}
2398 
2399 		q->index = i;
2400 		q->cxt = &((base_cxt + i)->eth);
2401 		q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2402 
2403 		DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2404 		   vf->abs_vfid, i, q->index, q->cid, q->cxt);
2405 
2406 		/* init SP objects */
2407 		bnx2x_vfq_init(bp, vf, q);
2408 	}
2409 	vf->state = VF_ACQUIRED;
2410 	return 0;
2411 }
2412 
2413 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2414 {
2415 	struct bnx2x_func_init_params func_init = {0};
2416 	u16 flags = 0;
2417 	int i;
2418 
2419 	/* the sb resources are initialized at this point, do the
2420 	 * FW/HW initializations
2421 	 */
2422 	for_each_vf_sb(vf, i)
2423 		bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2424 			      vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2425 
2426 	/* Sanity checks */
2427 	if (vf->state != VF_ACQUIRED) {
2428 		DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2429 		   vf->abs_vfid, vf->state);
2430 		return -EINVAL;
2431 	}
2432 	/* FLR cleanup epilogue */
2433 	if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2434 		return -EBUSY;
2435 
2436 	/* reset IGU VF statistics: MSIX */
2437 	REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2438 
2439 	/* vf init */
2440 	if (vf->cfg_flags & VF_CFG_STATS)
2441 		flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2442 
2443 	if (vf->cfg_flags & VF_CFG_TPA)
2444 		flags |= FUNC_FLG_TPA;
2445 
2446 	if (is_vf_multi(vf))
2447 		flags |= FUNC_FLG_RSS;
2448 
2449 	/* function setup */
2450 	func_init.func_flgs = flags;
2451 	func_init.pf_id = BP_FUNC(bp);
2452 	func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2453 	func_init.fw_stat_map = vf->fw_stat_map;
2454 	func_init.spq_map = vf->spq_map;
2455 	func_init.spq_prod = 0;
2456 	bnx2x_func_init(bp, &func_init);
2457 
2458 	/* Enable the vf */
2459 	bnx2x_vf_enable_access(bp, vf->abs_vfid);
2460 	bnx2x_vf_enable_traffic(bp, vf);
2461 
2462 	/* queue protection table */
2463 	for_each_vfq(vf, i)
2464 		bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2465 				    vfq_qzone_id(vf, vfq_get(vf, i)), true);
2466 
2467 	vf->state = VF_ENABLED;
2468 
2469 	return 0;
2470 }
2471 
2472 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2473 			      enum channel_tlvs tlv)
2474 {
2475 	/* lock the channel */
2476 	mutex_lock(&vf->op_mutex);
2477 
2478 	/* record the locking op */
2479 	vf->op_current = tlv;
2480 
2481 	/* log the lock */
2482 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2483 	   vf->abs_vfid, tlv);
2484 }
2485 
2486 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2487 				enum channel_tlvs expected_tlv)
2488 {
2489 	WARN(expected_tlv != vf->op_current,
2490 	     "lock mismatch: expected %d found %d", expected_tlv,
2491 	     vf->op_current);
2492 
2493 	/* lock the channel */
2494 	mutex_unlock(&vf->op_mutex);
2495 
2496 	/* log the unlock */
2497 	DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2498 	   vf->abs_vfid, vf->op_current);
2499 
2500 	/* record the locking op */
2501 	vf->op_current = CHANNEL_TLV_NONE;
2502 }
2503