1 /* bnx2x_vfpf.c: QLogic Everest network driver.
2  *
3  * Copyright 2009-2013 Broadcom Corporation
4  * Copyright 2014 QLogic Corporation
5  * All rights reserved
6  *
7  * Unless you and QLogic execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2, available
10  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
11  *
12  * Notwithstanding the above, under no circumstances may you combine this
13  * software in any way with any other QLogic software provided under a
14  * license other than the GPL, without QLogic's express prior written
15  * consent.
16  *
17  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
18  * Written by: Shmulik Ravid
19  *	       Ariel Elior <ariel.elior@qlogic.com>
20  */
21 
22 #include "bnx2x.h"
23 #include "bnx2x_cmn.h"
24 #include <linux/crc32.h>
25 
26 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
27 
28 /* place a given tlv on the tlv buffer at a given offset */
29 static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
30 			  u16 offset, u16 type, u16 length)
31 {
32 	struct channel_tlv *tl =
33 		(struct channel_tlv *)(tlvs_list + offset);
34 
35 	tl->type = type;
36 	tl->length = length;
37 }
38 
39 /* Clear the mailbox and init the header of the first tlv */
40 static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
41 			    u16 type, u16 length)
42 {
43 	mutex_lock(&bp->vf2pf_mutex);
44 
45 	DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
46 	   type);
47 
48 	/* Clear mailbox */
49 	memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
50 
51 	/* init type and length */
52 	bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
53 
54 	/* init first tlv header */
55 	first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
56 }
57 
58 /* releases the mailbox */
59 static void bnx2x_vfpf_finalize(struct bnx2x *bp,
60 				struct vfpf_first_tlv *first_tlv)
61 {
62 	DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
63 	   first_tlv->tl.type);
64 
65 	mutex_unlock(&bp->vf2pf_mutex);
66 }
67 
68 /* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
69 static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
70 				   enum channel_tlvs req_tlv)
71 {
72 	struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
73 
74 	do {
75 		if (tlv->type == req_tlv)
76 			return tlv;
77 
78 		if (!tlv->length) {
79 			BNX2X_ERR("Found TLV with length 0\n");
80 			return NULL;
81 		}
82 
83 		tlvs_list += tlv->length;
84 		tlv = (struct channel_tlv *)tlvs_list;
85 	} while (tlv->type != CHANNEL_TLV_LIST_END);
86 
87 	DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
88 
89 	return NULL;
90 }
91 
92 /* list the types and lengths of the tlvs on the buffer */
93 static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
94 {
95 	int i = 1;
96 	struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
97 
98 	while (tlv->type != CHANNEL_TLV_LIST_END) {
99 		/* output tlv */
100 		DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
101 		   tlv->type, tlv->length);
102 
103 		/* advance to next tlv */
104 		tlvs_list += tlv->length;
105 
106 		/* cast general tlv list pointer to channel tlv header*/
107 		tlv = (struct channel_tlv *)tlvs_list;
108 
109 		i++;
110 
111 		/* break condition for this loop */
112 		if (i > MAX_TLVS_IN_LIST) {
113 			WARN(true, "corrupt tlvs");
114 			return;
115 		}
116 	}
117 
118 	/* output last tlv */
119 	DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
120 	   tlv->type, tlv->length);
121 }
122 
123 /* test whether we support a tlv type */
124 bool bnx2x_tlv_supported(u16 tlvtype)
125 {
126 	return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
127 }
128 
129 static inline int bnx2x_pfvf_status_codes(int rc)
130 {
131 	switch (rc) {
132 	case 0:
133 		return PFVF_STATUS_SUCCESS;
134 	case -ENOMEM:
135 		return PFVF_STATUS_NO_RESOURCE;
136 	default:
137 		return PFVF_STATUS_FAILURE;
138 	}
139 }
140 
141 static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
142 {
143 	struct cstorm_vf_zone_data __iomem *zone_data =
144 		REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
145 	int tout = 100, interval = 100; /* wait for 10 seconds */
146 
147 	if (*done) {
148 		BNX2X_ERR("done was non zero before message to pf was sent\n");
149 		WARN_ON(true);
150 		return -EINVAL;
151 	}
152 
153 	/* if PF indicated channel is down avoid sending message. Return success
154 	 * so calling flow can continue
155 	 */
156 	bnx2x_sample_bulletin(bp);
157 	if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
158 		DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
159 		*done = PFVF_STATUS_SUCCESS;
160 		return -EINVAL;
161 	}
162 
163 	/* Write message address */
164 	writel(U64_LO(msg_mapping),
165 	       &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
166 	writel(U64_HI(msg_mapping),
167 	       &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
168 
169 	/* make sure the address is written before FW accesses it */
170 	wmb();
171 
172 	/* Trigger the PF FW */
173 	writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
174 
175 	/* Wait for PF to complete */
176 	while ((tout >= 0) && (!*done)) {
177 		msleep(interval);
178 		tout -= 1;
179 
180 		/* progress indicator - HV can take its own sweet time in
181 		 * answering VFs...
182 		 */
183 		DP_CONT(BNX2X_MSG_IOV, ".");
184 	}
185 
186 	if (!*done) {
187 		BNX2X_ERR("PF response has timed out\n");
188 		return -EAGAIN;
189 	}
190 	DP(BNX2X_MSG_SP, "Got a response from PF\n");
191 	return 0;
192 }
193 
194 static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
195 {
196 	u32 me_reg;
197 	int tout = 10, interval = 100; /* Wait for 1 sec */
198 
199 	do {
200 		/* pxp traps vf read of doorbells and returns me reg value */
201 		me_reg = readl(bp->doorbells);
202 		if (GOOD_ME_REG(me_reg))
203 			break;
204 
205 		msleep(interval);
206 
207 		BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
208 			  me_reg);
209 	} while (tout-- > 0);
210 
211 	if (!GOOD_ME_REG(me_reg)) {
212 		BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
213 		return -EINVAL;
214 	}
215 
216 	DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
217 
218 	*vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
219 
220 	return 0;
221 }
222 
223 int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
224 {
225 	int rc = 0, attempts = 0;
226 	struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
227 	struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
228 	struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
229 	struct vfpf_fp_hsi_resp_tlv *fp_hsi_resp;
230 	u32 vf_id;
231 	bool resources_acquired = false;
232 
233 	/* clear mailbox and prep first tlv */
234 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
235 
236 	if (bnx2x_get_vf_id(bp, &vf_id)) {
237 		rc = -EAGAIN;
238 		goto out;
239 	}
240 
241 	req->vfdev_info.vf_id = vf_id;
242 	req->vfdev_info.vf_os = 0;
243 	req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION;
244 
245 	req->resc_request.num_rxqs = rx_count;
246 	req->resc_request.num_txqs = tx_count;
247 	req->resc_request.num_sbs = bp->igu_sb_cnt;
248 	req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
249 	req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
250 	req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
251 
252 	/* pf 2 vf bulletin board address */
253 	req->bulletin_addr = bp->pf2vf_bulletin_mapping;
254 
255 	/* Request physical port identifier */
256 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
257 		      CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
258 
259 	/* Bulletin support for bulletin board with length > legacy length */
260 	req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
261 	/* vlan filtering is supported */
262 	req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
263 
264 	/* add list termination tlv */
265 	bnx2x_add_tlv(bp, req,
266 		      req->first_tlv.tl.length + sizeof(struct channel_tlv),
267 		      CHANNEL_TLV_LIST_END,
268 		      sizeof(struct channel_list_end_tlv));
269 
270 	/* output tlvs list */
271 	bnx2x_dp_tlv_list(bp, req);
272 
273 	while (!resources_acquired) {
274 		DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
275 
276 		/* send acquire request */
277 		rc = bnx2x_send_msg2pf(bp,
278 				       &resp->hdr.status,
279 				       bp->vf2pf_mbox_mapping);
280 
281 		/* PF timeout */
282 		if (rc)
283 			goto out;
284 
285 		/* copy acquire response from buffer to bp */
286 		memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
287 
288 		attempts++;
289 
290 		/* test whether the PF accepted our request. If not, humble
291 		 * the request and try again.
292 		 */
293 		if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
294 			DP(BNX2X_MSG_SP, "resources acquired\n");
295 			resources_acquired = true;
296 		} else if (bp->acquire_resp.hdr.status ==
297 			   PFVF_STATUS_NO_RESOURCE &&
298 			   attempts < VF_ACQUIRE_THRESH) {
299 			DP(BNX2X_MSG_SP,
300 			   "PF unwilling to fulfill resource request. Try PF recommended amount\n");
301 
302 			/* humble our request */
303 			req->resc_request.num_txqs =
304 				min(req->resc_request.num_txqs,
305 				    bp->acquire_resp.resc.num_txqs);
306 			req->resc_request.num_rxqs =
307 				min(req->resc_request.num_rxqs,
308 				    bp->acquire_resp.resc.num_rxqs);
309 			req->resc_request.num_sbs =
310 				min(req->resc_request.num_sbs,
311 				    bp->acquire_resp.resc.num_sbs);
312 			req->resc_request.num_mac_filters =
313 				min(req->resc_request.num_mac_filters,
314 				    bp->acquire_resp.resc.num_mac_filters);
315 			req->resc_request.num_vlan_filters =
316 				min(req->resc_request.num_vlan_filters,
317 				    bp->acquire_resp.resc.num_vlan_filters);
318 			req->resc_request.num_mc_filters =
319 				min(req->resc_request.num_mc_filters,
320 				    bp->acquire_resp.resc.num_mc_filters);
321 
322 			/* Clear response buffer */
323 			memset(&bp->vf2pf_mbox->resp, 0,
324 			       sizeof(union pfvf_tlvs));
325 		} else {
326 			/* Determine reason of PF failure of acquire process */
327 			fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
328 							    CHANNEL_TLV_FP_HSI_SUPPORT);
329 			if (fp_hsi_resp && !fp_hsi_resp->is_supported)
330 				BNX2X_ERR("Old hypervisor - doesn't support current fastpath HSI version; Need to downgrade VF driver [or upgrade hypervisor]\n");
331 			else
332 				BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
333 					  bp->acquire_resp.hdr.status);
334 			rc = -EAGAIN;
335 			goto out;
336 		}
337 	}
338 
339 	/* Retrieve physical port id (if possible) */
340 	phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
341 			 bnx2x_search_tlv_list(bp, resp,
342 					       CHANNEL_TLV_PHYS_PORT_ID);
343 	if (phys_port_resp) {
344 		memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
345 		bp->flags |= HAS_PHYS_PORT_ID;
346 	}
347 
348 	/* Old Hypevisors might not even support the FP_HSI_SUPPORT TLV.
349 	 * If that's the case, we need to make certain required FW was
350 	 * supported by such a hypervisor [i.e., v0-v2].
351 	 */
352 	fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
353 					    CHANNEL_TLV_FP_HSI_SUPPORT);
354 	if (!fp_hsi_resp && (ETH_FP_HSI_VERSION > ETH_FP_HSI_VER_2)) {
355 		BNX2X_ERR("Old hypervisor - need to downgrade VF's driver\n");
356 
357 		/* Since acquire succeeded on the PF side, we need to send a
358 		 * release message in order to allow future probes.
359 		 */
360 		bnx2x_vfpf_finalize(bp, &req->first_tlv);
361 		bnx2x_vfpf_release(bp);
362 
363 		rc = -EINVAL;
364 		goto out;
365 	}
366 
367 	/* get HW info */
368 	bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
369 	bp->link_params.chip_id = bp->common.chip_id;
370 	bp->db_size = bp->acquire_resp.pfdev_info.db_size;
371 	bp->common.int_block = INT_BLOCK_IGU;
372 	bp->common.chip_port_mode = CHIP_2_PORT_MODE;
373 	bp->igu_dsb_id = -1;
374 	bp->mf_ov = 0;
375 	bp->mf_mode = 0;
376 	bp->common.flash_size = 0;
377 	bp->flags |=
378 		NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
379 	bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
380 	bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
381 	bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
382 
383 	strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
384 		sizeof(bp->fw_ver));
385 
386 	if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
387 		memcpy(bp->dev->dev_addr,
388 		       bp->acquire_resp.resc.current_mac_addr,
389 		       ETH_ALEN);
390 
391 out:
392 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
393 	return rc;
394 }
395 
396 int bnx2x_vfpf_release(struct bnx2x *bp)
397 {
398 	struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
399 	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
400 	u32 rc, vf_id;
401 
402 	/* clear mailbox and prep first tlv */
403 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
404 
405 	if (bnx2x_get_vf_id(bp, &vf_id)) {
406 		rc = -EAGAIN;
407 		goto out;
408 	}
409 
410 	req->vf_id = vf_id;
411 
412 	/* add list termination tlv */
413 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
414 		      sizeof(struct channel_list_end_tlv));
415 
416 	/* output tlvs list */
417 	bnx2x_dp_tlv_list(bp, req);
418 
419 	/* send release request */
420 	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
421 
422 	if (rc)
423 		/* PF timeout */
424 		goto out;
425 
426 	if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
427 		/* PF released us */
428 		DP(BNX2X_MSG_SP, "vf released\n");
429 	} else {
430 		/* PF reports error */
431 		BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
432 			  resp->hdr.status);
433 		rc = -EAGAIN;
434 		goto out;
435 	}
436 out:
437 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
438 
439 	return rc;
440 }
441 
442 /* Tell PF about SB addresses */
443 int bnx2x_vfpf_init(struct bnx2x *bp)
444 {
445 	struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
446 	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
447 	int rc, i;
448 
449 	/* clear mailbox and prep first tlv */
450 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
451 
452 	/* status blocks */
453 	for_each_eth_queue(bp, i)
454 		req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
455 						       status_blk_mapping);
456 
457 	/* statistics - requests only supports single queue for now */
458 	req->stats_addr = bp->fw_stats_data_mapping +
459 			  offsetof(struct bnx2x_fw_stats_data, queue_stats);
460 
461 	req->stats_stride = sizeof(struct per_queue_stats);
462 
463 	/* add list termination tlv */
464 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
465 		      sizeof(struct channel_list_end_tlv));
466 
467 	/* output tlvs list */
468 	bnx2x_dp_tlv_list(bp, req);
469 
470 	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
471 	if (rc)
472 		goto out;
473 
474 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
475 		BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
476 			  resp->hdr.status);
477 		rc = -EAGAIN;
478 		goto out;
479 	}
480 
481 	DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
482 out:
483 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
484 
485 	return rc;
486 }
487 
488 /* CLOSE VF - opposite to INIT_VF */
489 void bnx2x_vfpf_close_vf(struct bnx2x *bp)
490 {
491 	struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
492 	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
493 	int i, rc;
494 	u32 vf_id;
495 
496 	/* If we haven't got a valid VF id, there is no sense to
497 	 * continue with sending messages
498 	 */
499 	if (bnx2x_get_vf_id(bp, &vf_id))
500 		goto free_irq;
501 
502 	/* Close the queues */
503 	for_each_queue(bp, i)
504 		bnx2x_vfpf_teardown_queue(bp, i);
505 
506 	/* remove mac */
507 	bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
508 
509 	/* clear mailbox and prep first tlv */
510 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
511 
512 	req->vf_id = vf_id;
513 
514 	/* add list termination tlv */
515 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
516 		      sizeof(struct channel_list_end_tlv));
517 
518 	/* output tlvs list */
519 	bnx2x_dp_tlv_list(bp, req);
520 
521 	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
522 
523 	if (rc)
524 		BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
525 
526 	else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
527 		BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
528 			  resp->hdr.status);
529 
530 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
531 
532 free_irq:
533 	/* Disable HW interrupts, NAPI */
534 	bnx2x_netif_stop(bp, 0);
535 	/* Delete all NAPI objects */
536 	bnx2x_del_all_napi(bp);
537 
538 	/* Release IRQs */
539 	bnx2x_free_irq(bp);
540 }
541 
542 static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
543 				   struct bnx2x_vf_queue *q)
544 {
545 	u8 cl_id = vfq_cl_id(vf, q);
546 	u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
547 
548 	/* mac */
549 	bnx2x_init_mac_obj(bp, &q->mac_obj,
550 			   cl_id, q->cid, func_id,
551 			   bnx2x_vf_sp(bp, vf, mac_rdata),
552 			   bnx2x_vf_sp_map(bp, vf, mac_rdata),
553 			   BNX2X_FILTER_MAC_PENDING,
554 			   &vf->filter_state,
555 			   BNX2X_OBJ_TYPE_RX_TX,
556 			   &vf->vf_macs_pool);
557 	/* vlan */
558 	bnx2x_init_vlan_obj(bp, &q->vlan_obj,
559 			    cl_id, q->cid, func_id,
560 			    bnx2x_vf_sp(bp, vf, vlan_rdata),
561 			    bnx2x_vf_sp_map(bp, vf, vlan_rdata),
562 			    BNX2X_FILTER_VLAN_PENDING,
563 			    &vf->filter_state,
564 			    BNX2X_OBJ_TYPE_RX_TX,
565 			    &vf->vf_vlans_pool);
566 	/* vlan-mac */
567 	bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
568 				cl_id, q->cid, func_id,
569 				bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
570 				bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
571 				BNX2X_FILTER_VLAN_MAC_PENDING,
572 				&vf->filter_state,
573 				BNX2X_OBJ_TYPE_RX_TX,
574 				&vf->vf_macs_pool,
575 				&vf->vf_vlans_pool);
576 	/* mcast */
577 	bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
578 			     q->cid, func_id, func_id,
579 			     bnx2x_vf_sp(bp, vf, mcast_rdata),
580 			     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
581 			     BNX2X_FILTER_MCAST_PENDING,
582 			     &vf->filter_state,
583 			     BNX2X_OBJ_TYPE_RX_TX);
584 
585 	/* rss */
586 	bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
587 				  func_id, func_id,
588 				  bnx2x_vf_sp(bp, vf, rss_rdata),
589 				  bnx2x_vf_sp_map(bp, vf, rss_rdata),
590 				  BNX2X_FILTER_RSS_CONF_PENDING,
591 				  &vf->filter_state,
592 				  BNX2X_OBJ_TYPE_RX_TX);
593 
594 	vf->leading_rss = cl_id;
595 	q->is_leading = true;
596 	q->sp_initialized = true;
597 }
598 
599 /* ask the pf to open a queue for the vf */
600 int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
601 		       bool is_leading)
602 {
603 	struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
604 	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
605 	u8 fp_idx = fp->index;
606 	u16 tpa_agg_size = 0, flags = 0;
607 	int rc;
608 
609 	/* clear mailbox and prep first tlv */
610 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
611 
612 	/* select tpa mode to request */
613 	if (fp->mode != TPA_MODE_DISABLED) {
614 		flags |= VFPF_QUEUE_FLG_TPA;
615 		flags |= VFPF_QUEUE_FLG_TPA_IPV6;
616 		if (fp->mode == TPA_MODE_GRO)
617 			flags |= VFPF_QUEUE_FLG_TPA_GRO;
618 		tpa_agg_size = TPA_AGG_SIZE;
619 	}
620 
621 	if (is_leading)
622 		flags |= VFPF_QUEUE_FLG_LEADING_RSS;
623 
624 	/* calculate queue flags */
625 	flags |= VFPF_QUEUE_FLG_STATS;
626 	flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
627 	flags |= VFPF_QUEUE_FLG_VLAN;
628 
629 	/* Common */
630 	req->vf_qid = fp_idx;
631 	req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
632 
633 	/* Rx */
634 	req->rxq.rcq_addr = fp->rx_comp_mapping;
635 	req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
636 	req->rxq.rxq_addr = fp->rx_desc_mapping;
637 	req->rxq.sge_addr = fp->rx_sge_mapping;
638 	req->rxq.vf_sb = fp_idx;
639 	req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
640 	req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
641 	req->rxq.mtu = bp->dev->mtu;
642 	req->rxq.buf_sz = fp->rx_buf_size;
643 	req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
644 	req->rxq.tpa_agg_sz = tpa_agg_size;
645 	req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
646 	req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
647 			  (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
648 	req->rxq.flags = flags;
649 	req->rxq.drop_flags = 0;
650 	req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
651 	req->rxq.stat_id = -1; /* No stats at the moment */
652 
653 	/* Tx */
654 	req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
655 	req->txq.vf_sb = fp_idx;
656 	req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
657 	req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
658 	req->txq.flags = flags;
659 	req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
660 
661 	/* add list termination tlv */
662 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
663 		      sizeof(struct channel_list_end_tlv));
664 
665 	/* output tlvs list */
666 	bnx2x_dp_tlv_list(bp, req);
667 
668 	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
669 	if (rc)
670 		BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
671 			  fp_idx);
672 
673 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
674 		BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
675 			  fp_idx, resp->hdr.status);
676 		rc = -EINVAL;
677 	}
678 
679 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
680 
681 	return rc;
682 }
683 
684 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
685 {
686 	struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
687 	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
688 	int rc;
689 
690 	/* clear mailbox and prep first tlv */
691 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
692 			sizeof(*req));
693 
694 	req->vf_qid = qidx;
695 
696 	/* add list termination tlv */
697 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
698 		      sizeof(struct channel_list_end_tlv));
699 
700 	/* output tlvs list */
701 	bnx2x_dp_tlv_list(bp, req);
702 
703 	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
704 
705 	if (rc) {
706 		BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
707 			  rc);
708 		goto out;
709 	}
710 
711 	/* PF failed the transaction */
712 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
713 		BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
714 			  resp->hdr.status);
715 		rc = -EINVAL;
716 	}
717 
718 out:
719 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
720 
721 	return rc;
722 }
723 
724 /* request pf to add a mac for the vf */
725 int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
726 {
727 	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
728 	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
729 	struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
730 	int rc = 0;
731 
732 	/* clear mailbox and prep first tlv */
733 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
734 			sizeof(*req));
735 
736 	req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
737 	req->vf_qid = vf_qid;
738 	req->n_mac_vlan_filters = 1;
739 
740 	req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
741 	if (set)
742 		req->filters[0].flags |= VFPF_Q_FILTER_SET;
743 
744 	/* sample bulletin board for new mac */
745 	bnx2x_sample_bulletin(bp);
746 
747 	/* copy mac from device to request */
748 	memcpy(req->filters[0].mac, addr, ETH_ALEN);
749 
750 	/* add list termination tlv */
751 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
752 		      sizeof(struct channel_list_end_tlv));
753 
754 	/* output tlvs list */
755 	bnx2x_dp_tlv_list(bp, req);
756 
757 	/* send message to pf */
758 	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
759 	if (rc) {
760 		BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
761 		goto out;
762 	}
763 
764 	/* failure may mean PF was configured with a new mac for us */
765 	while (resp->hdr.status == PFVF_STATUS_FAILURE) {
766 		DP(BNX2X_MSG_IOV,
767 		   "vfpf SET MAC failed. Check bulletin board for new posts\n");
768 
769 		/* copy mac from bulletin to device */
770 		memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
771 
772 		/* check if bulletin board was updated */
773 		if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
774 			/* copy mac from device to request */
775 			memcpy(req->filters[0].mac, bp->dev->dev_addr,
776 			       ETH_ALEN);
777 
778 			/* send message to pf */
779 			rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
780 					       bp->vf2pf_mbox_mapping);
781 		} else {
782 			/* no new info in bulletin */
783 			break;
784 		}
785 	}
786 
787 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
788 		BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
789 		rc = -EINVAL;
790 	}
791 out:
792 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
793 
794 	return rc;
795 }
796 
797 /* request pf to config rss table for vf queues*/
798 int bnx2x_vfpf_config_rss(struct bnx2x *bp,
799 			  struct bnx2x_config_rss_params *params)
800 {
801 	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
802 	struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
803 	int rc = 0;
804 
805 	/* clear mailbox and prep first tlv */
806 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
807 			sizeof(*req));
808 
809 	/* add list termination tlv */
810 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
811 		      sizeof(struct channel_list_end_tlv));
812 
813 	memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
814 	memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
815 	req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
816 	req->rss_key_size = T_ETH_RSS_KEY;
817 	req->rss_result_mask = params->rss_result_mask;
818 
819 	/* flags handled individually for backward/forward compatibility */
820 	if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
821 		req->rss_flags |= VFPF_RSS_MODE_DISABLED;
822 	if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
823 		req->rss_flags |= VFPF_RSS_MODE_REGULAR;
824 	if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
825 		req->rss_flags |= VFPF_RSS_SET_SRCH;
826 	if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
827 		req->rss_flags |= VFPF_RSS_IPV4;
828 	if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
829 		req->rss_flags |= VFPF_RSS_IPV4_TCP;
830 	if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
831 		req->rss_flags |= VFPF_RSS_IPV4_UDP;
832 	if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
833 		req->rss_flags |= VFPF_RSS_IPV6;
834 	if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
835 		req->rss_flags |= VFPF_RSS_IPV6_TCP;
836 	if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
837 		req->rss_flags |= VFPF_RSS_IPV6_UDP;
838 
839 	DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
840 
841 	/* output tlvs list */
842 	bnx2x_dp_tlv_list(bp, req);
843 
844 	/* send message to pf */
845 	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
846 	if (rc) {
847 		BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
848 		goto out;
849 	}
850 
851 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
852 		/* Since older drivers don't support this feature (and VF has
853 		 * no way of knowing other than failing this), don't propagate
854 		 * an error in this case.
855 		 */
856 		DP(BNX2X_MSG_IOV,
857 		   "Failed to send rss message to PF over VF-PF channel [%d]\n",
858 		   resp->hdr.status);
859 	}
860 out:
861 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
862 
863 	return rc;
864 }
865 
866 int bnx2x_vfpf_set_mcast(struct net_device *dev)
867 {
868 	struct bnx2x *bp = netdev_priv(dev);
869 	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
870 	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
871 	int rc = 0, i = 0;
872 	struct netdev_hw_addr *ha;
873 
874 	if (bp->state != BNX2X_STATE_OPEN) {
875 		DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
876 		return -EINVAL;
877 	}
878 
879 	/* clear mailbox and prep first tlv */
880 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
881 			sizeof(*req));
882 
883 	/* Get Rx mode requested */
884 	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
885 
886 	/* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
887 	if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
888 		DP(NETIF_MSG_IFUP,
889 		   "VF supports not more than %d multicast MAC addresses\n",
890 		   PFVF_MAX_MULTICAST_PER_VF);
891 		rc = -EINVAL;
892 		goto out;
893 	}
894 
895 	netdev_for_each_mc_addr(ha, dev) {
896 		DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
897 		   bnx2x_mc_addr(ha));
898 		memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
899 		i++;
900 	}
901 
902 	req->n_multicast = i;
903 	req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
904 	req->vf_qid = 0;
905 
906 	/* add list termination tlv */
907 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
908 		      sizeof(struct channel_list_end_tlv));
909 
910 	/* output tlvs list */
911 	bnx2x_dp_tlv_list(bp, req);
912 	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
913 	if (rc) {
914 		BNX2X_ERR("Sending a message failed: %d\n", rc);
915 		goto out;
916 	}
917 
918 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
919 		BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
920 			  resp->hdr.status);
921 		rc = -EINVAL;
922 	}
923 out:
924 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
925 
926 	return rc;
927 }
928 
929 /* request pf to add a vlan for the vf */
930 int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
931 {
932 	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
933 	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
934 	int rc = 0;
935 
936 	if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
937 		DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
938 		return 0;
939 	}
940 
941 	/* clear mailbox and prep first tlv */
942 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
943 			sizeof(*req));
944 
945 	req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
946 	req->vf_qid = vf_qid;
947 	req->n_mac_vlan_filters = 1;
948 
949 	req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
950 
951 	if (add)
952 		req->filters[0].flags |= VFPF_Q_FILTER_SET;
953 
954 	/* sample bulletin board for hypervisor vlan */
955 	bnx2x_sample_bulletin(bp);
956 
957 	if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
958 		BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
959 		rc = -EINVAL;
960 		goto out;
961 	}
962 
963 	req->filters[0].vlan_tag = vid;
964 
965 	/* add list termination tlv */
966 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
967 		      sizeof(struct channel_list_end_tlv));
968 
969 	/* output tlvs list */
970 	bnx2x_dp_tlv_list(bp, req);
971 
972 	/* send message to pf */
973 	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
974 	if (rc) {
975 		BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
976 		goto out;
977 	}
978 
979 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
980 		BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
981 			  vid);
982 		rc = -EINVAL;
983 	}
984 out:
985 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
986 
987 	return rc;
988 }
989 
990 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
991 {
992 	int mode = bp->rx_mode;
993 	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
994 	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
995 	int rc;
996 
997 	/* clear mailbox and prep first tlv */
998 	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
999 			sizeof(*req));
1000 
1001 	DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
1002 
1003 	/* Ignore everything accept MODE_NONE */
1004 	if (mode  == BNX2X_RX_MODE_NONE) {
1005 		req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
1006 	} else {
1007 		/* Current PF driver will not look at the specific flags,
1008 		 * but they are required when working with older drivers on hv.
1009 		 */
1010 		req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
1011 		req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
1012 		req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
1013 		if (mode == BNX2X_RX_MODE_PROMISC)
1014 			req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
1015 	}
1016 
1017 	if (bp->accept_any_vlan)
1018 		req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
1019 
1020 	req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
1021 	req->vf_qid = 0;
1022 
1023 	/* add list termination tlv */
1024 	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
1025 		      sizeof(struct channel_list_end_tlv));
1026 
1027 	/* output tlvs list */
1028 	bnx2x_dp_tlv_list(bp, req);
1029 
1030 	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
1031 	if (rc)
1032 		BNX2X_ERR("Sending a message failed: %d\n", rc);
1033 
1034 	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1035 		BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
1036 		rc = -EINVAL;
1037 	}
1038 
1039 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
1040 
1041 	return rc;
1042 }
1043 
1044 /* General service functions */
1045 static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
1046 {
1047 	u32 addr = BAR_CSTRORM_INTMEM +
1048 		   CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
1049 
1050 	REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
1051 }
1052 
1053 static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
1054 {
1055 	u32 addr = BAR_CSTRORM_INTMEM +
1056 		   CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
1057 
1058 	REG_WR8(bp, addr, 1);
1059 }
1060 
1061 /* enable vf_pf mailbox (aka vf-pf-channel) */
1062 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
1063 {
1064 	bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
1065 
1066 	/* enable the mailbox in the FW */
1067 	storm_memset_vf_mbx_ack(bp, abs_vfid);
1068 	storm_memset_vf_mbx_valid(bp, abs_vfid);
1069 
1070 	/* enable the VF access to the mailbox */
1071 	bnx2x_vf_enable_access(bp, abs_vfid);
1072 }
1073 
1074 /* this works only on !E1h */
1075 static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
1076 				dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
1077 				u32 vf_addr_lo, u32 len32)
1078 {
1079 	struct dmae_command dmae;
1080 
1081 	if (CHIP_IS_E1x(bp)) {
1082 		BNX2X_ERR("Chip revision does not support VFs\n");
1083 		return DMAE_NOT_RDY;
1084 	}
1085 
1086 	if (!bp->dmae_ready) {
1087 		BNX2X_ERR("DMAE is not ready, can not copy\n");
1088 		return DMAE_NOT_RDY;
1089 	}
1090 
1091 	/* set opcode and fixed command fields */
1092 	bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
1093 
1094 	if (from_vf) {
1095 		dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
1096 			(DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
1097 			(DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
1098 
1099 		dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
1100 
1101 		dmae.src_addr_lo = vf_addr_lo;
1102 		dmae.src_addr_hi = vf_addr_hi;
1103 		dmae.dst_addr_lo = U64_LO(pf_addr);
1104 		dmae.dst_addr_hi = U64_HI(pf_addr);
1105 	} else {
1106 		dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
1107 			(DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
1108 			(DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
1109 
1110 		dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
1111 
1112 		dmae.src_addr_lo = U64_LO(pf_addr);
1113 		dmae.src_addr_hi = U64_HI(pf_addr);
1114 		dmae.dst_addr_lo = vf_addr_lo;
1115 		dmae.dst_addr_hi = vf_addr_hi;
1116 	}
1117 	dmae.len = len32;
1118 
1119 	/* issue the command and wait for completion */
1120 	return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
1121 }
1122 
1123 static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
1124 					 struct bnx2x_virtf *vf)
1125 {
1126 	struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
1127 	u16 length, type;
1128 
1129 	/* prepare response */
1130 	type = mbx->first_tlv.tl.type;
1131 	length = type == CHANNEL_TLV_ACQUIRE ?
1132 		sizeof(struct pfvf_acquire_resp_tlv) :
1133 		sizeof(struct pfvf_general_resp_tlv);
1134 	bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
1135 	bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
1136 		      sizeof(struct channel_list_end_tlv));
1137 }
1138 
1139 static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
1140 				       struct bnx2x_virtf *vf,
1141 				       int vf_rc)
1142 {
1143 	struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
1144 	struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
1145 	dma_addr_t pf_addr;
1146 	u64 vf_addr;
1147 	int rc;
1148 
1149 	bnx2x_dp_tlv_list(bp, resp);
1150 	DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
1151 	   mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
1152 
1153 	resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
1154 
1155 	/* send response */
1156 	vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
1157 		  mbx->first_tlv.resp_msg_offset;
1158 	pf_addr = mbx->msg_mapping +
1159 		  offsetof(struct bnx2x_vf_mbx_msg, resp);
1160 
1161 	/* Copy the response buffer. The first u64 is written afterwards, as
1162 	 * the vf is sensitive to the header being written
1163 	 */
1164 	vf_addr += sizeof(u64);
1165 	pf_addr += sizeof(u64);
1166 	rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
1167 				  U64_HI(vf_addr),
1168 				  U64_LO(vf_addr),
1169 				  (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
1170 	if (rc) {
1171 		BNX2X_ERR("Failed to copy response body to VF %d\n",
1172 			  vf->abs_vfid);
1173 		goto mbx_error;
1174 	}
1175 	vf_addr -= sizeof(u64);
1176 	pf_addr -= sizeof(u64);
1177 
1178 	/* ack the FW */
1179 	storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1180 	mmiowb();
1181 
1182 	/* copy the response header including status-done field,
1183 	 * must be last dmae, must be after FW is acked
1184 	 */
1185 	rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
1186 				  U64_HI(vf_addr),
1187 				  U64_LO(vf_addr),
1188 				  sizeof(u64)/4);
1189 
1190 	/* unlock channel mutex */
1191 	bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1192 
1193 	if (rc) {
1194 		BNX2X_ERR("Failed to copy response status to VF %d\n",
1195 			  vf->abs_vfid);
1196 		goto mbx_error;
1197 	}
1198 	return;
1199 
1200 mbx_error:
1201 	bnx2x_vf_release(bp, vf);
1202 }
1203 
1204 static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
1205 			      struct bnx2x_virtf *vf,
1206 			      int rc)
1207 {
1208 	bnx2x_vf_mbx_resp_single_tlv(bp, vf);
1209 	bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
1210 }
1211 
1212 static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
1213 					struct bnx2x_virtf *vf,
1214 					void *buffer,
1215 					u16 *offset)
1216 {
1217 	struct vfpf_port_phys_id_resp_tlv *port_id;
1218 
1219 	if (!(bp->flags & HAS_PHYS_PORT_ID))
1220 		return;
1221 
1222 	bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
1223 		      sizeof(struct vfpf_port_phys_id_resp_tlv));
1224 
1225 	port_id = (struct vfpf_port_phys_id_resp_tlv *)
1226 		  (((u8 *)buffer) + *offset);
1227 	memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
1228 
1229 	/* Offset should continue representing the offset to the tail
1230 	 * of TLV data (outside this function scope)
1231 	 */
1232 	*offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
1233 }
1234 
1235 static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp,
1236 					 struct bnx2x_virtf *vf,
1237 					 void *buffer,
1238 					 u16 *offset)
1239 {
1240 	struct vfpf_fp_hsi_resp_tlv *fp_hsi;
1241 
1242 	bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT,
1243 		      sizeof(struct vfpf_fp_hsi_resp_tlv));
1244 
1245 	fp_hsi = (struct vfpf_fp_hsi_resp_tlv *)
1246 		 (((u8 *)buffer) + *offset);
1247 	fp_hsi->is_supported = (vf->fp_hsi > ETH_FP_HSI_VERSION) ? 0 : 1;
1248 
1249 	/* Offset should continue representing the offset to the tail
1250 	 * of TLV data (outside this function scope)
1251 	 */
1252 	*offset += sizeof(struct vfpf_fp_hsi_resp_tlv);
1253 }
1254 
1255 static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1256 				      struct bnx2x_vf_mbx *mbx, int vfop_status)
1257 {
1258 	int i;
1259 	struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
1260 	struct pf_vf_resc *resc = &resp->resc;
1261 	u8 status = bnx2x_pfvf_status_codes(vfop_status);
1262 	u16 length;
1263 
1264 	memset(resp, 0, sizeof(*resp));
1265 
1266 	/* fill in pfdev info */
1267 	resp->pfdev_info.chip_num = bp->common.chip_id;
1268 	resp->pfdev_info.db_size = bp->db_size;
1269 	resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
1270 	resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
1271 				   PFVF_CAP_TPA |
1272 				   PFVF_CAP_TPA_UPDATE |
1273 				   PFVF_CAP_VLAN_FILTER);
1274 	bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
1275 			  sizeof(resp->pfdev_info.fw_ver));
1276 
1277 	if (status == PFVF_STATUS_NO_RESOURCE ||
1278 	    status == PFVF_STATUS_SUCCESS) {
1279 		/* set resources numbers, if status equals NO_RESOURCE these
1280 		 * are max possible numbers
1281 		 */
1282 		resc->num_rxqs = vf_rxq_count(vf) ? :
1283 			bnx2x_vf_max_queue_cnt(bp, vf);
1284 		resc->num_txqs = vf_txq_count(vf) ? :
1285 			bnx2x_vf_max_queue_cnt(bp, vf);
1286 		resc->num_sbs = vf_sb_count(vf);
1287 		resc->num_mac_filters = vf_mac_rules_cnt(vf);
1288 		resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
1289 		resc->num_mc_filters = 0;
1290 
1291 		if (status == PFVF_STATUS_SUCCESS) {
1292 			/* fill in the allocated resources */
1293 			struct pf_vf_bulletin_content *bulletin =
1294 				BP_VF_BULLETIN(bp, vf->index);
1295 
1296 			for_each_vfq(vf, i)
1297 				resc->hw_qid[i] =
1298 					vfq_qzone_id(vf, vfq_get(vf, i));
1299 
1300 			for_each_vf_sb(vf, i) {
1301 				resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
1302 				resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
1303 			}
1304 
1305 			/* if a mac has been set for this vf, supply it */
1306 			if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
1307 				memcpy(resc->current_mac_addr, bulletin->mac,
1308 				       ETH_ALEN);
1309 			}
1310 		}
1311 	}
1312 
1313 	DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
1314 	   "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
1315 	   vf->abs_vfid,
1316 	   resp->pfdev_info.chip_num,
1317 	   resp->pfdev_info.db_size,
1318 	   resp->pfdev_info.indices_per_sb,
1319 	   resp->pfdev_info.pf_cap,
1320 	   resc->num_rxqs,
1321 	   resc->num_txqs,
1322 	   resc->num_sbs,
1323 	   resc->num_mac_filters,
1324 	   resc->num_vlan_filters,
1325 	   resc->num_mc_filters,
1326 	   resp->pfdev_info.fw_ver);
1327 
1328 	DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
1329 	for (i = 0; i < vf_rxq_count(vf); i++)
1330 		DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
1331 	DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
1332 	for (i = 0; i < vf_sb_count(vf); i++)
1333 		DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
1334 			resc->hw_sbs[i].hw_sb_id,
1335 			resc->hw_sbs[i].sb_qid);
1336 	DP_CONT(BNX2X_MSG_IOV, "]\n");
1337 
1338 	/* prepare response */
1339 	length = sizeof(struct pfvf_acquire_resp_tlv);
1340 	bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
1341 
1342 	/* Handle possible VF requests for physical port identifiers.
1343 	 * 'length' should continue to indicate the offset of the first empty
1344 	 * place in the buffer (i.e., where next TLV should be inserted)
1345 	 */
1346 	if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
1347 				  CHANNEL_TLV_PHYS_PORT_ID))
1348 		bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
1349 
1350 	/* `New' vfs will want to know if fastpath HSI is supported, since
1351 	 * if that's not the case they could print into system log the fact
1352 	 * the driver version must be updated.
1353 	 */
1354 	bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length);
1355 
1356 	bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
1357 		      sizeof(struct channel_list_end_tlv));
1358 
1359 	/* send the response */
1360 	bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
1361 }
1362 
1363 static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp,
1364 				       struct vfpf_acquire_tlv *acquire)
1365 {
1366 	/* Windows driver does one of three things:
1367 	 * 1. Old driver doesn't have bulletin board address set.
1368 	 * 2. 'Middle' driver sends mc_num == 32.
1369 	 * 3. New driver sets the OS field.
1370 	 */
1371 	if (!acquire->bulletin_addr ||
1372 	    acquire->resc_request.num_mc_filters == 32 ||
1373 	    ((acquire->vfdev_info.vf_os & VF_OS_MASK) ==
1374 	     VF_OS_WINDOWS))
1375 		return true;
1376 
1377 	return false;
1378 }
1379 
1380 static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp,
1381 					 struct bnx2x_virtf *vf,
1382 					 struct bnx2x_vf_mbx *mbx)
1383 {
1384 	/* Linux drivers which correctly set the doorbell size also
1385 	 * send a physical port request
1386 	 */
1387 	if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
1388 				  CHANNEL_TLV_PHYS_PORT_ID))
1389 		return 0;
1390 
1391 	/* Issue does not exist in windows VMs */
1392 	if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
1393 		return 0;
1394 
1395 	return -EOPNOTSUPP;
1396 }
1397 
1398 static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1399 				 struct bnx2x_vf_mbx *mbx)
1400 {
1401 	int rc;
1402 	struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
1403 
1404 	/* log vfdef info */
1405 	DP(BNX2X_MSG_IOV,
1406 	   "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
1407 	   vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
1408 	   acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
1409 	   acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
1410 	   acquire->resc_request.num_vlan_filters,
1411 	   acquire->resc_request.num_mc_filters);
1412 
1413 	/* Prevent VFs with old drivers from loading, since they calculate
1414 	 * CIDs incorrectly requiring a VF-flr [VM reboot] in order to recover
1415 	 * while being upgraded.
1416 	 */
1417 	rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx);
1418 	if (rc) {
1419 		DP(BNX2X_MSG_IOV,
1420 		   "VF [%d] - Can't support acquire request due to doorbell mismatch. Please update VM driver\n",
1421 		   vf->abs_vfid);
1422 		goto out;
1423 	}
1424 
1425 	/* Verify the VF fastpath HSI can be supported by the loaded FW.
1426 	 * Linux vfs should be oblivious to changes between v0 and v2.
1427 	 */
1428 	if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
1429 		vf->fp_hsi = acquire->vfdev_info.fp_hsi_ver;
1430 	else
1431 		vf->fp_hsi = max_t(u8, acquire->vfdev_info.fp_hsi_ver,
1432 				   ETH_FP_HSI_VER_2);
1433 	if (vf->fp_hsi > ETH_FP_HSI_VERSION) {
1434 		DP(BNX2X_MSG_IOV,
1435 		   "VF [%d] - Can't support acquire request since VF requests a FW version which is too new [%02x > %02x]\n",
1436 		   vf->abs_vfid, acquire->vfdev_info.fp_hsi_ver,
1437 		   ETH_FP_HSI_VERSION);
1438 		rc = -EINVAL;
1439 		goto out;
1440 	}
1441 
1442 	/* acquire the resources */
1443 	rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
1444 
1445 	/* store address of vf's bulletin board */
1446 	vf->bulletin_map = acquire->bulletin_addr;
1447 	if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_EXT_BULLETIN) {
1448 		DP(BNX2X_MSG_IOV, "VF[%d] supports long bulletin boards\n",
1449 		   vf->abs_vfid);
1450 		vf->cfg_flags |= VF_CFG_EXT_BULLETIN;
1451 	} else {
1452 		vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
1453 	}
1454 
1455 	if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
1456 		DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
1457 		   vf->abs_vfid);
1458 		vf->cfg_flags |= VF_CFG_VLAN_FILTER;
1459 	} else {
1460 		vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
1461 	}
1462 
1463 out:
1464 	/* response */
1465 	bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
1466 }
1467 
1468 static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1469 			      struct bnx2x_vf_mbx *mbx)
1470 {
1471 	struct vfpf_init_tlv *init = &mbx->msg->req.init;
1472 	int rc;
1473 
1474 	/* record ghost addresses from vf message */
1475 	vf->fw_stat_map = init->stats_addr;
1476 	vf->stats_stride = init->stats_stride;
1477 	rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
1478 
1479 	/* set VF multiqueue statistics collection mode */
1480 	if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
1481 		vf->cfg_flags |= VF_CFG_STATS_COALESCE;
1482 
1483 	/* Update VF's view of link state */
1484 	if (vf->cfg_flags & VF_CFG_EXT_BULLETIN)
1485 		bnx2x_iov_link_update_vf(bp, vf->index);
1486 
1487 	/* response */
1488 	bnx2x_vf_mbx_resp(bp, vf, rc);
1489 }
1490 
1491 /* convert MBX queue-flags to standard SP queue-flags */
1492 static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
1493 				     unsigned long *sp_q_flags)
1494 {
1495 	if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
1496 		__set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
1497 	if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
1498 		__set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
1499 	if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
1500 		__set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
1501 	if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
1502 		__set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
1503 	if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
1504 		__set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
1505 	if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
1506 		__set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
1507 	if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
1508 		__set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
1509 	if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
1510 		__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
1511 	if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
1512 		__set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
1513 
1514 	/* outer vlan removal is set according to PF's multi function mode */
1515 	if (IS_MF_SD(bp))
1516 		__set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
1517 }
1518 
1519 static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1520 				 struct bnx2x_vf_mbx *mbx)
1521 {
1522 	struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
1523 	struct bnx2x_vf_queue_construct_params qctor;
1524 	int rc = 0;
1525 
1526 	/* verify vf_qid */
1527 	if (setup_q->vf_qid >= vf_rxq_count(vf)) {
1528 		BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
1529 			  setup_q->vf_qid, vf_rxq_count(vf));
1530 		rc = -EINVAL;
1531 		goto response;
1532 	}
1533 
1534 	/* tx queues must be setup alongside rx queues thus if the rx queue
1535 	 * is not marked as valid there's nothing to do.
1536 	 */
1537 	if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
1538 		struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
1539 		unsigned long q_type = 0;
1540 
1541 		struct bnx2x_queue_init_params *init_p;
1542 		struct bnx2x_queue_setup_params *setup_p;
1543 
1544 		if (bnx2x_vfq_is_leading(q))
1545 			bnx2x_leading_vfq_init(bp, vf, q);
1546 
1547 		/* re-init the VF operation context */
1548 		memset(&qctor, 0 ,
1549 		       sizeof(struct bnx2x_vf_queue_construct_params));
1550 		setup_p = &qctor.prep_qsetup;
1551 		init_p =  &qctor.qstate.params.init;
1552 
1553 		/* activate immediately */
1554 		__set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
1555 
1556 		if (setup_q->param_valid & VFPF_TXQ_VALID) {
1557 			struct bnx2x_txq_setup_params *txq_params =
1558 				&setup_p->txq_params;
1559 
1560 			__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1561 
1562 			/* save sb resource index */
1563 			q->sb_idx = setup_q->txq.vf_sb;
1564 
1565 			/* tx init */
1566 			init_p->tx.hc_rate = setup_q->txq.hc_rate;
1567 			init_p->tx.sb_cq_index = setup_q->txq.sb_index;
1568 
1569 			bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1570 						 &init_p->tx.flags);
1571 
1572 			/* tx setup - flags */
1573 			bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1574 						 &setup_p->flags);
1575 
1576 			/* tx setup - general, nothing */
1577 
1578 			/* tx setup - tx */
1579 			txq_params->dscr_map = setup_q->txq.txq_addr;
1580 			txq_params->sb_cq_index = setup_q->txq.sb_index;
1581 			txq_params->traffic_type = setup_q->txq.traffic_type;
1582 
1583 			bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
1584 						 q->index, q->sb_idx);
1585 		}
1586 
1587 		if (setup_q->param_valid & VFPF_RXQ_VALID) {
1588 			struct bnx2x_rxq_setup_params *rxq_params =
1589 							&setup_p->rxq_params;
1590 
1591 			__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1592 
1593 			/* Note: there is no support for different SBs
1594 			 * for TX and RX
1595 			 */
1596 			q->sb_idx = setup_q->rxq.vf_sb;
1597 
1598 			/* rx init */
1599 			init_p->rx.hc_rate = setup_q->rxq.hc_rate;
1600 			init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
1601 			bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1602 						 &init_p->rx.flags);
1603 
1604 			/* rx setup - flags */
1605 			bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1606 						 &setup_p->flags);
1607 
1608 			/* rx setup - general */
1609 			setup_p->gen_params.mtu = setup_q->rxq.mtu;
1610 
1611 			/* rx setup - rx */
1612 			rxq_params->drop_flags = setup_q->rxq.drop_flags;
1613 			rxq_params->dscr_map = setup_q->rxq.rxq_addr;
1614 			rxq_params->sge_map = setup_q->rxq.sge_addr;
1615 			rxq_params->rcq_map = setup_q->rxq.rcq_addr;
1616 			rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
1617 			rxq_params->buf_sz = setup_q->rxq.buf_sz;
1618 			rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
1619 			rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
1620 			rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
1621 			rxq_params->cache_line_log =
1622 				setup_q->rxq.cache_line_log;
1623 			rxq_params->sb_cq_index = setup_q->rxq.sb_index;
1624 
1625 			/* rx setup - multicast engine */
1626 			if (bnx2x_vfq_is_leading(q)) {
1627 				u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
1628 
1629 				rxq_params->mcast_engine_id = mcast_id;
1630 				__set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
1631 			}
1632 
1633 			bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
1634 						 q->index, q->sb_idx);
1635 		}
1636 		/* complete the preparations */
1637 		bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
1638 
1639 		rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
1640 		if (rc)
1641 			goto response;
1642 	}
1643 response:
1644 	bnx2x_vf_mbx_resp(bp, vf, rc);
1645 }
1646 
1647 static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
1648 				     struct bnx2x_virtf *vf,
1649 				     struct vfpf_set_q_filters_tlv *tlv,
1650 				     struct bnx2x_vf_mac_vlan_filters **pfl,
1651 				     u32 type_flag)
1652 {
1653 	int i, j;
1654 	struct bnx2x_vf_mac_vlan_filters *fl = NULL;
1655 	size_t fsz;
1656 
1657 	fsz = tlv->n_mac_vlan_filters *
1658 	      sizeof(struct bnx2x_vf_mac_vlan_filter) +
1659 	      sizeof(struct bnx2x_vf_mac_vlan_filters);
1660 
1661 	fl = kzalloc(fsz, GFP_KERNEL);
1662 	if (!fl)
1663 		return -ENOMEM;
1664 
1665 	for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
1666 		struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
1667 
1668 		if ((msg_filter->flags & type_flag) != type_flag)
1669 			continue;
1670 		memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
1671 		if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
1672 			fl->filters[j].mac = msg_filter->mac;
1673 			fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
1674 		}
1675 		if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
1676 			fl->filters[j].vid = msg_filter->vlan_tag;
1677 			fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
1678 		}
1679 		fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
1680 		fl->count++;
1681 		j++;
1682 	}
1683 	if (!fl->count)
1684 		kfree(fl);
1685 	else
1686 		*pfl = fl;
1687 
1688 	return 0;
1689 }
1690 
1691 static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
1692 				    u32 flags)
1693 {
1694 	int i, cnt = 0;
1695 
1696 	for (i = 0; i < filters->n_mac_vlan_filters; i++)
1697 		if  ((filters->filters[i].flags & flags) == flags)
1698 			cnt++;
1699 
1700 	return cnt;
1701 }
1702 
1703 static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
1704 				       struct vfpf_q_mac_vlan_filter *filter)
1705 {
1706 	DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
1707 	if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
1708 		DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
1709 	if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
1710 		DP_CONT(msglvl, ", MAC=%pM", filter->mac);
1711 	DP_CONT(msglvl, "\n");
1712 }
1713 
1714 static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
1715 				       struct vfpf_set_q_filters_tlv *filters)
1716 {
1717 	int i;
1718 
1719 	if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
1720 		for (i = 0; i < filters->n_mac_vlan_filters; i++)
1721 			bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
1722 						 &filters->filters[i]);
1723 
1724 	if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
1725 		DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
1726 
1727 	if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
1728 		for (i = 0; i < filters->n_multicast; i++)
1729 			DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
1730 }
1731 
1732 #define VFPF_MAC_FILTER		VFPF_Q_FILTER_DEST_MAC_VALID
1733 #define VFPF_VLAN_FILTER	VFPF_Q_FILTER_VLAN_TAG_VALID
1734 #define VFPF_VLAN_MAC_FILTER	(VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
1735 
1736 static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1737 {
1738 	int rc = 0;
1739 
1740 	struct vfpf_set_q_filters_tlv *msg =
1741 		&BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
1742 
1743 	/* check for any mac/vlan changes */
1744 	if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
1745 		struct bnx2x_vf_mac_vlan_filters *fl = NULL;
1746 
1747 		/* build vlan-mac list */
1748 		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1749 					       VFPF_VLAN_MAC_FILTER);
1750 		if (rc)
1751 			goto op_err;
1752 
1753 		if (fl) {
1754 
1755 			/* set vlan-mac list */
1756 			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1757 							   msg->vf_qid,
1758 							   false);
1759 			if (rc)
1760 				goto op_err;
1761 		}
1762 
1763 		/* build mac list */
1764 		fl = NULL;
1765 
1766 		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1767 					       VFPF_MAC_FILTER);
1768 		if (rc)
1769 			goto op_err;
1770 
1771 		if (fl) {
1772 			/* set mac list */
1773 			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1774 							   msg->vf_qid,
1775 							   false);
1776 			if (rc)
1777 				goto op_err;
1778 		}
1779 
1780 		/* build vlan list */
1781 		fl = NULL;
1782 
1783 		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1784 					       VFPF_VLAN_FILTER);
1785 		if (rc)
1786 			goto op_err;
1787 
1788 		if (fl) {
1789 			/* set vlan list */
1790 			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1791 							   msg->vf_qid,
1792 							   false);
1793 			if (rc)
1794 				goto op_err;
1795 		}
1796 
1797 	}
1798 
1799 	if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1800 		unsigned long accept = 0;
1801 		struct pf_vf_bulletin_content *bulletin =
1802 					BP_VF_BULLETIN(bp, vf->index);
1803 
1804 		/* Ignore VF requested mode; instead set a regular mode */
1805 		if (msg->rx_mask !=  VFPF_RX_MASK_ACCEPT_NONE) {
1806 			__set_bit(BNX2X_ACCEPT_UNICAST, &accept);
1807 			__set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
1808 			__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
1809 		}
1810 
1811 		/* any_vlan is not configured if HV is forcing VLAN
1812 		 * any_vlan is configured if
1813 		 *   1. VF does not support vlan filtering
1814 		 *   OR
1815 		 *   2. VF supports vlan filtering and explicitly requested it
1816 		 */
1817 		if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
1818 		    (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
1819 		     msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
1820 			__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1821 
1822 		/* set rx-mode */
1823 		rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
1824 		if (rc)
1825 			goto op_err;
1826 	}
1827 
1828 	if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
1829 		/* set mcasts */
1830 		rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
1831 				    msg->n_multicast, false);
1832 		if (rc)
1833 			goto op_err;
1834 	}
1835 op_err:
1836 	if (rc)
1837 		BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
1838 			  vf->abs_vfid, msg->vf_qid, rc);
1839 	return rc;
1840 }
1841 
1842 static int bnx2x_filters_validate_mac(struct bnx2x *bp,
1843 				      struct bnx2x_virtf *vf,
1844 				      struct vfpf_set_q_filters_tlv *filters)
1845 {
1846 	struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1847 	int rc = 0;
1848 
1849 	/* if a mac was already set for this VF via the set vf mac ndo, we only
1850 	 * accept mac configurations of that mac. Why accept them at all?
1851 	 * because PF may have been unable to configure the mac at the time
1852 	 * since queue was not set up.
1853 	 */
1854 	if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
1855 		struct vfpf_q_mac_vlan_filter *filter = NULL;
1856 		int i;
1857 
1858 		for (i = 0; i < filters->n_mac_vlan_filters; i++) {
1859 			if (!(filters->filters[i].flags &
1860 			      VFPF_Q_FILTER_DEST_MAC_VALID))
1861 				continue;
1862 
1863 			/* once a mac was set by ndo can only accept
1864 			 * a single mac...
1865 			 */
1866 			if (filter) {
1867 				BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
1868 					  vf->abs_vfid,
1869 					  filters->n_mac_vlan_filters);
1870 				rc = -EPERM;
1871 				goto response;
1872 			}
1873 
1874 			filter = &filters->filters[i];
1875 		}
1876 
1877 		/* ...and only the mac set by the ndo */
1878 		if (filter &&
1879 		    !ether_addr_equal(filter->mac, bulletin->mac)) {
1880 			BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
1881 				  vf->abs_vfid);
1882 
1883 			rc = -EPERM;
1884 			goto response;
1885 		}
1886 	}
1887 
1888 response:
1889 	return rc;
1890 }
1891 
1892 static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
1893 				       struct bnx2x_virtf *vf,
1894 				       struct vfpf_set_q_filters_tlv *filters)
1895 {
1896 	struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1897 	int rc = 0;
1898 
1899 	/* if vlan was set by hypervisor we don't allow guest to config vlan */
1900 	if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
1901 		/* search for vlan filters */
1902 
1903 		if (bnx2x_vf_filters_contain(filters,
1904 					     VFPF_Q_FILTER_VLAN_TAG_VALID)) {
1905 			BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
1906 				  vf->abs_vfid);
1907 			rc = -EPERM;
1908 			goto response;
1909 		}
1910 	}
1911 
1912 	/* verify vf_qid */
1913 	if (filters->vf_qid > vf_rxq_count(vf)) {
1914 		rc = -EPERM;
1915 		goto response;
1916 	}
1917 
1918 response:
1919 	return rc;
1920 }
1921 
1922 static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1923 				       struct bnx2x_virtf *vf,
1924 				       struct bnx2x_vf_mbx *mbx)
1925 {
1926 	struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
1927 	int rc;
1928 
1929 	rc = bnx2x_filters_validate_mac(bp, vf, filters);
1930 	if (rc)
1931 		goto response;
1932 
1933 	rc = bnx2x_filters_validate_vlan(bp, vf, filters);
1934 	if (rc)
1935 		goto response;
1936 
1937 	DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
1938 	   vf->abs_vfid,
1939 	   filters->vf_qid);
1940 
1941 	/* print q_filter message */
1942 	bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
1943 
1944 	rc = bnx2x_vf_mbx_qfilters(bp, vf);
1945 response:
1946 	bnx2x_vf_mbx_resp(bp, vf, rc);
1947 }
1948 
1949 static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1950 				    struct bnx2x_vf_mbx *mbx)
1951 {
1952 	int qid = mbx->msg->req.q_op.vf_qid;
1953 	int rc;
1954 
1955 	DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
1956 	   vf->abs_vfid, qid);
1957 
1958 	rc = bnx2x_vf_queue_teardown(bp, vf, qid);
1959 	bnx2x_vf_mbx_resp(bp, vf, rc);
1960 }
1961 
1962 static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1963 				  struct bnx2x_vf_mbx *mbx)
1964 {
1965 	int rc;
1966 
1967 	DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
1968 
1969 	rc = bnx2x_vf_close(bp, vf);
1970 	bnx2x_vf_mbx_resp(bp, vf, rc);
1971 }
1972 
1973 static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1974 				    struct bnx2x_vf_mbx *mbx)
1975 {
1976 	int rc;
1977 
1978 	DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
1979 
1980 	rc = bnx2x_vf_free(bp, vf);
1981 	bnx2x_vf_mbx_resp(bp, vf, rc);
1982 }
1983 
1984 static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1985 				    struct bnx2x_vf_mbx *mbx)
1986 {
1987 	struct bnx2x_config_rss_params rss;
1988 	struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
1989 	int rc = 0;
1990 
1991 	if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
1992 	    rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
1993 		BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
1994 			  vf->index);
1995 		rc = -EINVAL;
1996 		goto mbx_resp;
1997 	}
1998 
1999 	memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
2000 
2001 	/* set vfop params according to rss tlv */
2002 	memcpy(rss.ind_table, rss_tlv->ind_table,
2003 	       T_ETH_INDIRECTION_TABLE_SIZE);
2004 	memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
2005 	rss.rss_obj = &vf->rss_conf_obj;
2006 	rss.rss_result_mask = rss_tlv->rss_result_mask;
2007 
2008 	/* flags handled individually for backward/forward compatibility */
2009 	rss.rss_flags = 0;
2010 	rss.ramrod_flags = 0;
2011 
2012 	if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
2013 		__set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
2014 	if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
2015 		__set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
2016 	if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
2017 		__set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
2018 	if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
2019 		__set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
2020 	if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
2021 		__set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
2022 	if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
2023 		__set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
2024 	if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
2025 		__set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
2026 	if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
2027 		__set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
2028 	if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
2029 		__set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
2030 
2031 	if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
2032 	     rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
2033 	    (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
2034 	     rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
2035 		BNX2X_ERR("about to hit a FW assert. aborting...\n");
2036 		rc = -EINVAL;
2037 		goto mbx_resp;
2038 	}
2039 
2040 	rc = bnx2x_vf_rss_update(bp, vf, &rss);
2041 mbx_resp:
2042 	bnx2x_vf_mbx_resp(bp, vf, rc);
2043 }
2044 
2045 static int bnx2x_validate_tpa_params(struct bnx2x *bp,
2046 				       struct vfpf_tpa_tlv *tpa_tlv)
2047 {
2048 	int rc = 0;
2049 
2050 	if (tpa_tlv->tpa_client_info.max_sges_for_packet >
2051 	    U_ETH_MAX_SGES_FOR_PACKET) {
2052 		rc = -EINVAL;
2053 		BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
2054 			  tpa_tlv->tpa_client_info.max_sges_for_packet,
2055 			  U_ETH_MAX_SGES_FOR_PACKET);
2056 	}
2057 
2058 	if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
2059 		rc = -EINVAL;
2060 		BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
2061 			  tpa_tlv->tpa_client_info.max_tpa_queues,
2062 			  MAX_AGG_QS(bp));
2063 	}
2064 
2065 	return rc;
2066 }
2067 
2068 static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
2069 				    struct bnx2x_vf_mbx *mbx)
2070 {
2071 	struct bnx2x_queue_update_tpa_params vf_op_params;
2072 	struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
2073 	int rc = 0;
2074 
2075 	memset(&vf_op_params, 0, sizeof(vf_op_params));
2076 
2077 	if (bnx2x_validate_tpa_params(bp, tpa_tlv))
2078 		goto mbx_resp;
2079 
2080 	vf_op_params.complete_on_both_clients =
2081 		tpa_tlv->tpa_client_info.complete_on_both_clients;
2082 	vf_op_params.dont_verify_thr =
2083 		tpa_tlv->tpa_client_info.dont_verify_thr;
2084 	vf_op_params.max_agg_sz =
2085 		tpa_tlv->tpa_client_info.max_agg_size;
2086 	vf_op_params.max_sges_pkt =
2087 		tpa_tlv->tpa_client_info.max_sges_for_packet;
2088 	vf_op_params.max_tpa_queues =
2089 		tpa_tlv->tpa_client_info.max_tpa_queues;
2090 	vf_op_params.sge_buff_sz =
2091 		tpa_tlv->tpa_client_info.sge_buff_size;
2092 	vf_op_params.sge_pause_thr_high =
2093 		tpa_tlv->tpa_client_info.sge_pause_thr_high;
2094 	vf_op_params.sge_pause_thr_low =
2095 		tpa_tlv->tpa_client_info.sge_pause_thr_low;
2096 	vf_op_params.tpa_mode =
2097 		tpa_tlv->tpa_client_info.tpa_mode;
2098 	vf_op_params.update_ipv4 =
2099 		tpa_tlv->tpa_client_info.update_ipv4;
2100 	vf_op_params.update_ipv6 =
2101 		tpa_tlv->tpa_client_info.update_ipv6;
2102 
2103 	rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
2104 
2105 mbx_resp:
2106 	bnx2x_vf_mbx_resp(bp, vf, rc);
2107 }
2108 
2109 /* dispatch request */
2110 static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
2111 				  struct bnx2x_vf_mbx *mbx)
2112 {
2113 	int i;
2114 
2115 	/* check if tlv type is known */
2116 	if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
2117 		/* Lock the per vf op mutex and note the locker's identity.
2118 		 * The unlock will take place in mbx response.
2119 		 */
2120 		bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
2121 
2122 		/* switch on the opcode */
2123 		switch (mbx->first_tlv.tl.type) {
2124 		case CHANNEL_TLV_ACQUIRE:
2125 			bnx2x_vf_mbx_acquire(bp, vf, mbx);
2126 			return;
2127 		case CHANNEL_TLV_INIT:
2128 			bnx2x_vf_mbx_init_vf(bp, vf, mbx);
2129 			return;
2130 		case CHANNEL_TLV_SETUP_Q:
2131 			bnx2x_vf_mbx_setup_q(bp, vf, mbx);
2132 			return;
2133 		case CHANNEL_TLV_SET_Q_FILTERS:
2134 			bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
2135 			return;
2136 		case CHANNEL_TLV_TEARDOWN_Q:
2137 			bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
2138 			return;
2139 		case CHANNEL_TLV_CLOSE:
2140 			bnx2x_vf_mbx_close_vf(bp, vf, mbx);
2141 			return;
2142 		case CHANNEL_TLV_RELEASE:
2143 			bnx2x_vf_mbx_release_vf(bp, vf, mbx);
2144 			return;
2145 		case CHANNEL_TLV_UPDATE_RSS:
2146 			bnx2x_vf_mbx_update_rss(bp, vf, mbx);
2147 			return;
2148 		case CHANNEL_TLV_UPDATE_TPA:
2149 			bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
2150 			return;
2151 		}
2152 
2153 	} else {
2154 		/* unknown TLV - this may belong to a VF driver from the future
2155 		 * - a version written after this PF driver was written, which
2156 		 * supports features unknown as of yet. Too bad since we don't
2157 		 * support them. Or this may be because someone wrote a crappy
2158 		 * VF driver and is sending garbage over the channel.
2159 		 */
2160 		BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
2161 			  mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
2162 			  vf->state);
2163 		for (i = 0; i < 20; i++)
2164 			DP_CONT(BNX2X_MSG_IOV, "%x ",
2165 				mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
2166 	}
2167 
2168 	/* can we respond to VF (do we have an address for it?) */
2169 	if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
2170 		/* notify the VF that we do not support this request */
2171 		bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
2172 	} else {
2173 		/* can't send a response since this VF is unknown to us
2174 		 * just ack the FW to release the mailbox and unlock
2175 		 * the channel.
2176 		 */
2177 		storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
2178 		/* Firmware ack should be written before unlocking channel */
2179 		mmiowb();
2180 		bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
2181 	}
2182 }
2183 
2184 void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
2185 			   struct vf_pf_event_data *vfpf_event)
2186 {
2187 	u8 vf_idx;
2188 
2189 	DP(BNX2X_MSG_IOV,
2190 	   "vf pf event received: vfid %d, address_hi %x, address lo %x",
2191 	   vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
2192 	/* Sanity checks consider removing later */
2193 
2194 	/* check if the vf_id is valid */
2195 	if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
2196 	    BNX2X_NR_VIRTFN(bp)) {
2197 		BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
2198 			  vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
2199 		return;
2200 	}
2201 
2202 	vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
2203 
2204 	/* Update VFDB with current message and schedule its handling */
2205 	mutex_lock(&BP_VFDB(bp)->event_mutex);
2206 	BP_VF_MBX(bp, vf_idx)->vf_addr_hi =
2207 		le32_to_cpu(vfpf_event->msg_addr_hi);
2208 	BP_VF_MBX(bp, vf_idx)->vf_addr_lo =
2209 		le32_to_cpu(vfpf_event->msg_addr_lo);
2210 	BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
2211 	mutex_unlock(&BP_VFDB(bp)->event_mutex);
2212 
2213 	bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
2214 }
2215 
2216 /* handle new vf-pf messages */
2217 void bnx2x_vf_mbx(struct bnx2x *bp)
2218 {
2219 	struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
2220 	u64 events;
2221 	u8 vf_idx;
2222 	int rc;
2223 
2224 	if (!vfdb)
2225 		return;
2226 
2227 	mutex_lock(&vfdb->event_mutex);
2228 	events = vfdb->event_occur;
2229 	vfdb->event_occur = 0;
2230 	mutex_unlock(&vfdb->event_mutex);
2231 
2232 	for_each_vf(bp, vf_idx) {
2233 		struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
2234 		struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2235 
2236 		/* Handle VFs which have pending events */
2237 		if (!(events & (1ULL << vf_idx)))
2238 			continue;
2239 
2240 		DP(BNX2X_MSG_IOV,
2241 		   "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
2242 		   vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
2243 		   mbx->first_tlv.resp_msg_offset);
2244 
2245 		/* dmae to get the VF request */
2246 		rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
2247 					  vf->abs_vfid, mbx->vf_addr_hi,
2248 					  mbx->vf_addr_lo,
2249 					  sizeof(union vfpf_tlvs)/4);
2250 		if (rc) {
2251 			BNX2X_ERR("Failed to copy request VF %d\n",
2252 				  vf->abs_vfid);
2253 			bnx2x_vf_release(bp, vf);
2254 			return;
2255 		}
2256 
2257 		/* process the VF message header */
2258 		mbx->first_tlv = mbx->msg->req.first_tlv;
2259 
2260 		/* Clean response buffer to refrain from falsely
2261 		 * seeing chains.
2262 		 */
2263 		memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
2264 
2265 		/* dispatch the request (will prepare the response) */
2266 		bnx2x_vf_mbx_request(bp, vf, mbx);
2267 	}
2268 }
2269 
2270 void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
2271 				bool support_long)
2272 {
2273 	/* Older VFs contain a bug where they can't check CRC for bulletin
2274 	 * boards of length greater than legacy size.
2275 	 */
2276 	bulletin->length = support_long ? BULLETIN_CONTENT_SIZE :
2277 					  BULLETIN_CONTENT_LEGACY_SIZE;
2278 	bulletin->crc = bnx2x_crc_vf_bulletin(bulletin);
2279 }
2280 
2281 /* propagate local bulletin board to vf */
2282 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
2283 {
2284 	struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
2285 	dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
2286 		vf * BULLETIN_CONTENT_SIZE;
2287 	dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
2288 	int rc;
2289 
2290 	/* can only update vf after init took place */
2291 	if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
2292 	    bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
2293 		return 0;
2294 
2295 	/* increment bulletin board version and compute crc */
2296 	bulletin->version++;
2297 	bnx2x_vf_bulletin_finalize(bulletin,
2298 				   (bnx2x_vf(bp, vf, cfg_flags) &
2299 				    VF_CFG_EXT_BULLETIN) ? true : false);
2300 
2301 	/* propagate bulletin board via dmae to vm memory */
2302 	rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
2303 				  bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
2304 				  U64_LO(vf_addr), bulletin->length / 4);
2305 	return rc;
2306 }
2307