1 /* bnx2x_cmn.c: QLogic Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  * Copyright (c) 2014 QLogic Corporation
5  * All rights reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation.
10  *
11  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12  * Written by: Eliezer Tamir
13  * Based on code from Michael Chan's bnx2 driver
14  * UDP CSUM errata workaround by Arik Gendelman
15  * Slowpath and fastpath rework by Vladislav Zolotarov
16  * Statistics and Link management by Yitchak Gertner
17  *
18  */
19 
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/ip.h>
26 #include <linux/crash_dump.h>
27 #include <net/tcp.h>
28 #include <net/gro.h>
29 #include <net/ipv6.h>
30 #include <net/ip6_checksum.h>
31 #include <linux/prefetch.h>
32 #include "bnx2x_cmn.h"
33 #include "bnx2x_init.h"
34 #include "bnx2x_sp.h"
35 
36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39 static int bnx2x_poll(struct napi_struct *napi, int budget);
40 
41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42 {
43 	int i;
44 
45 	/* Add NAPI objects */
46 	for_each_rx_queue_cnic(bp, i) {
47 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
48 	}
49 }
50 
51 static void bnx2x_add_all_napi(struct bnx2x *bp)
52 {
53 	int i;
54 
55 	/* Add NAPI objects */
56 	for_each_eth_queue(bp, i) {
57 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
58 	}
59 }
60 
61 static int bnx2x_calc_num_queues(struct bnx2x *bp)
62 {
63 	int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
64 
65 	/* Reduce memory usage in kdump environment by using only one queue */
66 	if (is_kdump_kernel())
67 		nq = 1;
68 
69 	nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
70 	return nq;
71 }
72 
73 /**
74  * bnx2x_move_fp - move content of the fastpath structure.
75  *
76  * @bp:		driver handle
77  * @from:	source FP index
78  * @to:		destination FP index
79  *
80  * Makes sure the contents of the bp->fp[to].napi is kept
81  * intact. This is done by first copying the napi struct from
82  * the target to the source, and then mem copying the entire
83  * source onto the target. Update txdata pointers and related
84  * content.
85  */
86 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
87 {
88 	struct bnx2x_fastpath *from_fp = &bp->fp[from];
89 	struct bnx2x_fastpath *to_fp = &bp->fp[to];
90 	struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
91 	struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
92 	struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
93 	struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
94 	int old_max_eth_txqs, new_max_eth_txqs;
95 	int old_txdata_index = 0, new_txdata_index = 0;
96 	struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
97 
98 	/* Copy the NAPI object as it has been already initialized */
99 	from_fp->napi = to_fp->napi;
100 
101 	/* Move bnx2x_fastpath contents */
102 	memcpy(to_fp, from_fp, sizeof(*to_fp));
103 	to_fp->index = to;
104 
105 	/* Retain the tpa_info of the original `to' version as we don't want
106 	 * 2 FPs to contain the same tpa_info pointer.
107 	 */
108 	to_fp->tpa_info = old_tpa_info;
109 
110 	/* move sp_objs contents as well, as their indices match fp ones */
111 	memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
112 
113 	/* move fp_stats contents as well, as their indices match fp ones */
114 	memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
115 
116 	/* Update txdata pointers in fp and move txdata content accordingly:
117 	 * Each fp consumes 'max_cos' txdata structures, so the index should be
118 	 * decremented by max_cos x delta.
119 	 */
120 
121 	old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
122 	new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
123 				(bp)->max_cos;
124 	if (from == FCOE_IDX(bp)) {
125 		old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
126 		new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127 	}
128 
129 	memcpy(&bp->bnx2x_txq[new_txdata_index],
130 	       &bp->bnx2x_txq[old_txdata_index],
131 	       sizeof(struct bnx2x_fp_txdata));
132 	to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
133 }
134 
135 /**
136  * bnx2x_fill_fw_str - Fill buffer with FW version string.
137  *
138  * @bp:        driver handle
139  * @buf:       character buffer to fill with the fw name
140  * @buf_len:   length of the above buffer
141  *
142  */
143 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
144 {
145 	if (IS_PF(bp)) {
146 		u8 phy_fw_ver[PHY_FW_VER_LEN];
147 
148 		phy_fw_ver[0] = '\0';
149 		bnx2x_get_ext_phy_fw_version(&bp->link_params,
150 					     phy_fw_ver, PHY_FW_VER_LEN);
151 		strscpy(buf, bp->fw_ver, buf_len);
152 		snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
153 			 "bc %d.%d.%d%s%s",
154 			 (bp->common.bc_ver & 0xff0000) >> 16,
155 			 (bp->common.bc_ver & 0xff00) >> 8,
156 			 (bp->common.bc_ver & 0xff),
157 			 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
158 	} else {
159 		bnx2x_vf_fill_fw_str(bp, buf, buf_len);
160 	}
161 }
162 
163 /**
164  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
165  *
166  * @bp:	driver handle
167  * @delta:	number of eth queues which were not allocated
168  */
169 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
170 {
171 	int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
172 
173 	/* Queue pointer cannot be re-set on an fp-basis, as moving pointer
174 	 * backward along the array could cause memory to be overridden
175 	 */
176 	for (cos = 1; cos < bp->max_cos; cos++) {
177 		for (i = 0; i < old_eth_num - delta; i++) {
178 			struct bnx2x_fastpath *fp = &bp->fp[i];
179 			int new_idx = cos * (old_eth_num - delta) + i;
180 
181 			memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
182 			       sizeof(struct bnx2x_fp_txdata));
183 			fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
184 		}
185 	}
186 }
187 
188 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
189 
190 /* free skb in the packet ring at pos idx
191  * return idx of last bd freed
192  */
193 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
194 			     u16 idx, unsigned int *pkts_compl,
195 			     unsigned int *bytes_compl)
196 {
197 	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
198 	struct eth_tx_start_bd *tx_start_bd;
199 	struct eth_tx_bd *tx_data_bd;
200 	struct sk_buff *skb = tx_buf->skb;
201 	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
202 	int nbd;
203 	u16 split_bd_len = 0;
204 
205 	/* prefetch skb end pointer to speedup dev_kfree_skb() */
206 	prefetch(&skb->end);
207 
208 	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
209 	   txdata->txq_index, idx, tx_buf, skb);
210 
211 	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
212 
213 	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
214 #ifdef BNX2X_STOP_ON_ERROR
215 	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
216 		BNX2X_ERR("BAD nbd!\n");
217 		bnx2x_panic();
218 	}
219 #endif
220 	new_cons = nbd + tx_buf->first_bd;
221 
222 	/* Get the next bd */
223 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
224 
225 	/* Skip a parse bd... */
226 	--nbd;
227 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
228 
229 	if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
230 		/* Skip second parse bd... */
231 		--nbd;
232 		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
233 	}
234 
235 	/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
236 	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
237 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
238 		split_bd_len = BD_UNMAP_LEN(tx_data_bd);
239 		--nbd;
240 		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
241 	}
242 
243 	/* unmap first bd */
244 	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
245 			 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
246 			 DMA_TO_DEVICE);
247 
248 	/* now free frags */
249 	while (nbd > 0) {
250 
251 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
252 		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
253 			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
254 		if (--nbd)
255 			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
256 	}
257 
258 	/* release skb */
259 	WARN_ON(!skb);
260 	if (likely(skb)) {
261 		(*pkts_compl)++;
262 		(*bytes_compl) += skb->len;
263 		dev_kfree_skb_any(skb);
264 	}
265 
266 	tx_buf->first_bd = 0;
267 	tx_buf->skb = NULL;
268 
269 	return new_cons;
270 }
271 
272 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
273 {
274 	struct netdev_queue *txq;
275 	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
276 	unsigned int pkts_compl = 0, bytes_compl = 0;
277 
278 #ifdef BNX2X_STOP_ON_ERROR
279 	if (unlikely(bp->panic))
280 		return -1;
281 #endif
282 
283 	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
284 	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
285 	sw_cons = txdata->tx_pkt_cons;
286 
287 	/* Ensure subsequent loads occur after hw_cons */
288 	smp_rmb();
289 
290 	while (sw_cons != hw_cons) {
291 		u16 pkt_cons;
292 
293 		pkt_cons = TX_BD(sw_cons);
294 
295 		DP(NETIF_MSG_TX_DONE,
296 		   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
297 		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
298 
299 		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
300 					    &pkts_compl, &bytes_compl);
301 
302 		sw_cons++;
303 	}
304 
305 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
306 
307 	txdata->tx_pkt_cons = sw_cons;
308 	txdata->tx_bd_cons = bd_cons;
309 
310 	/* Need to make the tx_bd_cons update visible to start_xmit()
311 	 * before checking for netif_tx_queue_stopped().  Without the
312 	 * memory barrier, there is a small possibility that
313 	 * start_xmit() will miss it and cause the queue to be stopped
314 	 * forever.
315 	 * On the other hand we need an rmb() here to ensure the proper
316 	 * ordering of bit testing in the following
317 	 * netif_tx_queue_stopped(txq) call.
318 	 */
319 	smp_mb();
320 
321 	if (unlikely(netif_tx_queue_stopped(txq))) {
322 		/* Taking tx_lock() is needed to prevent re-enabling the queue
323 		 * while it's empty. This could have happen if rx_action() gets
324 		 * suspended in bnx2x_tx_int() after the condition before
325 		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
326 		 *
327 		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
328 		 * sends some packets consuming the whole queue again->
329 		 * stops the queue
330 		 */
331 
332 		__netif_tx_lock(txq, smp_processor_id());
333 
334 		if ((netif_tx_queue_stopped(txq)) &&
335 		    (bp->state == BNX2X_STATE_OPEN) &&
336 		    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
337 			netif_tx_wake_queue(txq);
338 
339 		__netif_tx_unlock(txq);
340 	}
341 	return 0;
342 }
343 
344 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
345 					     u16 idx)
346 {
347 	u16 last_max = fp->last_max_sge;
348 
349 	if (SUB_S16(idx, last_max) > 0)
350 		fp->last_max_sge = idx;
351 }
352 
353 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
354 					 u16 sge_len,
355 					 struct eth_end_agg_rx_cqe *cqe)
356 {
357 	struct bnx2x *bp = fp->bp;
358 	u16 last_max, last_elem, first_elem;
359 	u16 delta = 0;
360 	u16 i;
361 
362 	if (!sge_len)
363 		return;
364 
365 	/* First mark all used pages */
366 	for (i = 0; i < sge_len; i++)
367 		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
368 			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
369 
370 	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
371 	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
372 
373 	/* Here we assume that the last SGE index is the biggest */
374 	prefetch((void *)(fp->sge_mask));
375 	bnx2x_update_last_max_sge(fp,
376 		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
377 
378 	last_max = RX_SGE(fp->last_max_sge);
379 	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
380 	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
381 
382 	/* If ring is not full */
383 	if (last_elem + 1 != first_elem)
384 		last_elem++;
385 
386 	/* Now update the prod */
387 	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
388 		if (likely(fp->sge_mask[i]))
389 			break;
390 
391 		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
392 		delta += BIT_VEC64_ELEM_SZ;
393 	}
394 
395 	if (delta > 0) {
396 		fp->rx_sge_prod += delta;
397 		/* clear page-end entries */
398 		bnx2x_clear_sge_mask_next_elems(fp);
399 	}
400 
401 	DP(NETIF_MSG_RX_STATUS,
402 	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
403 	   fp->last_max_sge, fp->rx_sge_prod);
404 }
405 
406 /* Get Toeplitz hash value in the skb using the value from the
407  * CQE (calculated by HW).
408  */
409 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
410 			    const struct eth_fast_path_rx_cqe *cqe,
411 			    enum pkt_hash_types *rxhash_type)
412 {
413 	/* Get Toeplitz hash from CQE */
414 	if ((bp->dev->features & NETIF_F_RXHASH) &&
415 	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
416 		enum eth_rss_hash_type htype;
417 
418 		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
419 		*rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
420 				(htype == TCP_IPV6_HASH_TYPE)) ?
421 			       PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
422 
423 		return le32_to_cpu(cqe->rss_hash_result);
424 	}
425 	*rxhash_type = PKT_HASH_TYPE_NONE;
426 	return 0;
427 }
428 
429 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
430 			    u16 cons, u16 prod,
431 			    struct eth_fast_path_rx_cqe *cqe)
432 {
433 	struct bnx2x *bp = fp->bp;
434 	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
435 	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
436 	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
437 	dma_addr_t mapping;
438 	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
439 	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
440 
441 	/* print error if current state != stop */
442 	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
443 		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
444 
445 	/* Try to map an empty data buffer from the aggregation info  */
446 	mapping = dma_map_single(&bp->pdev->dev,
447 				 first_buf->data + NET_SKB_PAD,
448 				 fp->rx_buf_size, DMA_FROM_DEVICE);
449 	/*
450 	 *  ...if it fails - move the skb from the consumer to the producer
451 	 *  and set the current aggregation state as ERROR to drop it
452 	 *  when TPA_STOP arrives.
453 	 */
454 
455 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
456 		/* Move the BD from the consumer to the producer */
457 		bnx2x_reuse_rx_data(fp, cons, prod);
458 		tpa_info->tpa_state = BNX2X_TPA_ERROR;
459 		return;
460 	}
461 
462 	/* move empty data from pool to prod */
463 	prod_rx_buf->data = first_buf->data;
464 	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
465 	/* point prod_bd to new data */
466 	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
467 	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
468 
469 	/* move partial skb from cons to pool (don't unmap yet) */
470 	*first_buf = *cons_rx_buf;
471 
472 	/* mark bin state as START */
473 	tpa_info->parsing_flags =
474 		le16_to_cpu(cqe->pars_flags.flags);
475 	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
476 	tpa_info->tpa_state = BNX2X_TPA_START;
477 	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
478 	tpa_info->placement_offset = cqe->placement_offset;
479 	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
480 	if (fp->mode == TPA_MODE_GRO) {
481 		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
482 		tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
483 		tpa_info->gro_size = gro_size;
484 	}
485 
486 #ifdef BNX2X_STOP_ON_ERROR
487 	fp->tpa_queue_used |= (1 << queue);
488 	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
489 	   fp->tpa_queue_used);
490 #endif
491 }
492 
493 /* Timestamp option length allowed for TPA aggregation:
494  *
495  *		nop nop kind length echo val
496  */
497 #define TPA_TSTAMP_OPT_LEN	12
498 /**
499  * bnx2x_set_gro_params - compute GRO values
500  *
501  * @skb:		packet skb
502  * @parsing_flags:	parsing flags from the START CQE
503  * @len_on_bd:		total length of the first packet for the
504  *			aggregation.
505  * @pkt_len:		length of all segments
506  * @num_of_coalesced_segs: count of segments
507  *
508  * Approximate value of the MSS for this aggregation calculated using
509  * the first packet of it.
510  * Compute number of aggregated segments, and gso_type.
511  */
512 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
513 				 u16 len_on_bd, unsigned int pkt_len,
514 				 u16 num_of_coalesced_segs)
515 {
516 	/* TPA aggregation won't have either IP options or TCP options
517 	 * other than timestamp or IPv6 extension headers.
518 	 */
519 	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
520 
521 	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
522 	    PRS_FLAG_OVERETH_IPV6) {
523 		hdrs_len += sizeof(struct ipv6hdr);
524 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
525 	} else {
526 		hdrs_len += sizeof(struct iphdr);
527 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
528 	}
529 
530 	/* Check if there was a TCP timestamp, if there is it's will
531 	 * always be 12 bytes length: nop nop kind length echo val.
532 	 *
533 	 * Otherwise FW would close the aggregation.
534 	 */
535 	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
536 		hdrs_len += TPA_TSTAMP_OPT_LEN;
537 
538 	skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
539 
540 	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
541 	 * to skb_shinfo(skb)->gso_segs
542 	 */
543 	NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
544 }
545 
546 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
547 			      u16 index, gfp_t gfp_mask)
548 {
549 	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
550 	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
551 	struct bnx2x_alloc_pool *pool = &fp->page_pool;
552 	dma_addr_t mapping;
553 
554 	if (!pool->page) {
555 		pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
556 		if (unlikely(!pool->page))
557 			return -ENOMEM;
558 
559 		pool->offset = 0;
560 	}
561 
562 	mapping = dma_map_page(&bp->pdev->dev, pool->page,
563 			       pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
564 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
565 		BNX2X_ERR("Can't map sge\n");
566 		return -ENOMEM;
567 	}
568 
569 	sw_buf->page = pool->page;
570 	sw_buf->offset = pool->offset;
571 
572 	dma_unmap_addr_set(sw_buf, mapping, mapping);
573 
574 	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
575 	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
576 
577 	pool->offset += SGE_PAGE_SIZE;
578 	if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
579 		get_page(pool->page);
580 	else
581 		pool->page = NULL;
582 	return 0;
583 }
584 
585 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
586 			       struct bnx2x_agg_info *tpa_info,
587 			       u16 pages,
588 			       struct sk_buff *skb,
589 			       struct eth_end_agg_rx_cqe *cqe,
590 			       u16 cqe_idx)
591 {
592 	struct sw_rx_page *rx_pg, old_rx_pg;
593 	u32 i, frag_len, frag_size;
594 	int err, j, frag_id = 0;
595 	u16 len_on_bd = tpa_info->len_on_bd;
596 	u16 full_page = 0, gro_size = 0;
597 
598 	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
599 
600 	if (fp->mode == TPA_MODE_GRO) {
601 		gro_size = tpa_info->gro_size;
602 		full_page = tpa_info->full_page;
603 	}
604 
605 	/* This is needed in order to enable forwarding support */
606 	if (frag_size)
607 		bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
608 				     le16_to_cpu(cqe->pkt_len),
609 				     le16_to_cpu(cqe->num_of_coalesced_segs));
610 
611 #ifdef BNX2X_STOP_ON_ERROR
612 	if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
613 		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
614 			  pages, cqe_idx);
615 		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
616 		bnx2x_panic();
617 		return -EINVAL;
618 	}
619 #endif
620 
621 	/* Run through the SGL and compose the fragmented skb */
622 	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
623 		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
624 
625 		/* FW gives the indices of the SGE as if the ring is an array
626 		   (meaning that "next" element will consume 2 indices) */
627 		if (fp->mode == TPA_MODE_GRO)
628 			frag_len = min_t(u32, frag_size, (u32)full_page);
629 		else /* LRO */
630 			frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
631 
632 		rx_pg = &fp->rx_page_ring[sge_idx];
633 		old_rx_pg = *rx_pg;
634 
635 		/* If we fail to allocate a substitute page, we simply stop
636 		   where we are and drop the whole packet */
637 		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
638 		if (unlikely(err)) {
639 			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
640 			return err;
641 		}
642 
643 		dma_unmap_page(&bp->pdev->dev,
644 			       dma_unmap_addr(&old_rx_pg, mapping),
645 			       SGE_PAGE_SIZE, DMA_FROM_DEVICE);
646 		/* Add one frag and update the appropriate fields in the skb */
647 		if (fp->mode == TPA_MODE_LRO)
648 			skb_fill_page_desc(skb, j, old_rx_pg.page,
649 					   old_rx_pg.offset, frag_len);
650 		else { /* GRO */
651 			int rem;
652 			int offset = 0;
653 			for (rem = frag_len; rem > 0; rem -= gro_size) {
654 				int len = rem > gro_size ? gro_size : rem;
655 				skb_fill_page_desc(skb, frag_id++,
656 						   old_rx_pg.page,
657 						   old_rx_pg.offset + offset,
658 						   len);
659 				if (offset)
660 					get_page(old_rx_pg.page);
661 				offset += len;
662 			}
663 		}
664 
665 		skb->data_len += frag_len;
666 		skb->truesize += SGE_PAGES;
667 		skb->len += frag_len;
668 
669 		frag_size -= frag_len;
670 	}
671 
672 	return 0;
673 }
674 
675 static struct sk_buff *
676 bnx2x_build_skb(const struct bnx2x_fastpath *fp, void *data)
677 {
678 	struct sk_buff *skb;
679 
680 	if (fp->rx_frag_size)
681 		skb = build_skb(data, fp->rx_frag_size);
682 	else
683 		skb = slab_build_skb(data);
684 	return skb;
685 }
686 
687 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
688 {
689 	if (fp->rx_frag_size)
690 		skb_free_frag(data);
691 	else
692 		kfree(data);
693 }
694 
695 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
696 {
697 	if (fp->rx_frag_size) {
698 		/* GFP_KERNEL allocations are used only during initialization */
699 		if (unlikely(gfpflags_allow_blocking(gfp_mask)))
700 			return (void *)__get_free_page(gfp_mask);
701 
702 		return napi_alloc_frag(fp->rx_frag_size);
703 	}
704 
705 	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
706 }
707 
708 #ifdef CONFIG_INET
709 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
710 {
711 	const struct iphdr *iph = ip_hdr(skb);
712 	struct tcphdr *th;
713 
714 	skb_set_transport_header(skb, sizeof(struct iphdr));
715 	th = tcp_hdr(skb);
716 
717 	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
718 				  iph->saddr, iph->daddr, 0);
719 }
720 
721 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
722 {
723 	struct ipv6hdr *iph = ipv6_hdr(skb);
724 	struct tcphdr *th;
725 
726 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
727 	th = tcp_hdr(skb);
728 
729 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
730 				  &iph->saddr, &iph->daddr, 0);
731 }
732 
733 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
734 			    void (*gro_func)(struct bnx2x*, struct sk_buff*))
735 {
736 	skb_reset_network_header(skb);
737 	gro_func(bp, skb);
738 	tcp_gro_complete(skb);
739 }
740 #endif
741 
742 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
743 			       struct sk_buff *skb)
744 {
745 #ifdef CONFIG_INET
746 	if (skb_shinfo(skb)->gso_size) {
747 		switch (be16_to_cpu(skb->protocol)) {
748 		case ETH_P_IP:
749 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
750 			break;
751 		case ETH_P_IPV6:
752 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
753 			break;
754 		default:
755 			netdev_WARN_ONCE(bp->dev,
756 					 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
757 					 be16_to_cpu(skb->protocol));
758 		}
759 	}
760 #endif
761 	skb_record_rx_queue(skb, fp->rx_queue);
762 	napi_gro_receive(&fp->napi, skb);
763 }
764 
765 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
766 			   struct bnx2x_agg_info *tpa_info,
767 			   u16 pages,
768 			   struct eth_end_agg_rx_cqe *cqe,
769 			   u16 cqe_idx)
770 {
771 	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
772 	u8 pad = tpa_info->placement_offset;
773 	u16 len = tpa_info->len_on_bd;
774 	struct sk_buff *skb = NULL;
775 	u8 *new_data, *data = rx_buf->data;
776 	u8 old_tpa_state = tpa_info->tpa_state;
777 
778 	tpa_info->tpa_state = BNX2X_TPA_STOP;
779 
780 	/* If we there was an error during the handling of the TPA_START -
781 	 * drop this aggregation.
782 	 */
783 	if (old_tpa_state == BNX2X_TPA_ERROR)
784 		goto drop;
785 
786 	/* Try to allocate the new data */
787 	new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
788 	/* Unmap skb in the pool anyway, as we are going to change
789 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
790 	   fails. */
791 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
792 			 fp->rx_buf_size, DMA_FROM_DEVICE);
793 	if (likely(new_data))
794 		skb = bnx2x_build_skb(fp, data);
795 
796 	if (likely(skb)) {
797 #ifdef BNX2X_STOP_ON_ERROR
798 		if (pad + len > fp->rx_buf_size) {
799 			BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
800 				  pad, len, fp->rx_buf_size);
801 			bnx2x_panic();
802 			bnx2x_frag_free(fp, new_data);
803 			return;
804 		}
805 #endif
806 
807 		skb_reserve(skb, pad + NET_SKB_PAD);
808 		skb_put(skb, len);
809 		skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
810 
811 		skb->protocol = eth_type_trans(skb, bp->dev);
812 		skb->ip_summed = CHECKSUM_UNNECESSARY;
813 
814 		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
815 					 skb, cqe, cqe_idx)) {
816 			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
817 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
818 			bnx2x_gro_receive(bp, fp, skb);
819 		} else {
820 			DP(NETIF_MSG_RX_STATUS,
821 			   "Failed to allocate new pages - dropping packet!\n");
822 			dev_kfree_skb_any(skb);
823 		}
824 
825 		/* put new data in bin */
826 		rx_buf->data = new_data;
827 
828 		return;
829 	}
830 	if (new_data)
831 		bnx2x_frag_free(fp, new_data);
832 drop:
833 	/* drop the packet and keep the buffer in the bin */
834 	DP(NETIF_MSG_RX_STATUS,
835 	   "Failed to allocate or map a new skb - dropping packet!\n");
836 	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
837 }
838 
839 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
840 			       u16 index, gfp_t gfp_mask)
841 {
842 	u8 *data;
843 	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
844 	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
845 	dma_addr_t mapping;
846 
847 	data = bnx2x_frag_alloc(fp, gfp_mask);
848 	if (unlikely(data == NULL))
849 		return -ENOMEM;
850 
851 	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
852 				 fp->rx_buf_size,
853 				 DMA_FROM_DEVICE);
854 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
855 		bnx2x_frag_free(fp, data);
856 		BNX2X_ERR("Can't map rx data\n");
857 		return -ENOMEM;
858 	}
859 
860 	rx_buf->data = data;
861 	dma_unmap_addr_set(rx_buf, mapping, mapping);
862 
863 	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
864 	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
865 
866 	return 0;
867 }
868 
869 static
870 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
871 				 struct bnx2x_fastpath *fp,
872 				 struct bnx2x_eth_q_stats *qstats)
873 {
874 	/* Do nothing if no L4 csum validation was done.
875 	 * We do not check whether IP csum was validated. For IPv4 we assume
876 	 * that if the card got as far as validating the L4 csum, it also
877 	 * validated the IP csum. IPv6 has no IP csum.
878 	 */
879 	if (cqe->fast_path_cqe.status_flags &
880 	    ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
881 		return;
882 
883 	/* If L4 validation was done, check if an error was found. */
884 
885 	if (cqe->fast_path_cqe.type_error_flags &
886 	    (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
887 	     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
888 		qstats->hw_csum_err++;
889 	else
890 		skb->ip_summed = CHECKSUM_UNNECESSARY;
891 }
892 
893 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
894 {
895 	struct bnx2x *bp = fp->bp;
896 	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
897 	u16 sw_comp_cons, sw_comp_prod;
898 	int rx_pkt = 0;
899 	union eth_rx_cqe *cqe;
900 	struct eth_fast_path_rx_cqe *cqe_fp;
901 
902 #ifdef BNX2X_STOP_ON_ERROR
903 	if (unlikely(bp->panic))
904 		return 0;
905 #endif
906 	if (budget <= 0)
907 		return rx_pkt;
908 
909 	bd_cons = fp->rx_bd_cons;
910 	bd_prod = fp->rx_bd_prod;
911 	bd_prod_fw = bd_prod;
912 	sw_comp_cons = fp->rx_comp_cons;
913 	sw_comp_prod = fp->rx_comp_prod;
914 
915 	comp_ring_cons = RCQ_BD(sw_comp_cons);
916 	cqe = &fp->rx_comp_ring[comp_ring_cons];
917 	cqe_fp = &cqe->fast_path_cqe;
918 
919 	DP(NETIF_MSG_RX_STATUS,
920 	   "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
921 
922 	while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
923 		struct sw_rx_bd *rx_buf = NULL;
924 		struct sk_buff *skb;
925 		u8 cqe_fp_flags;
926 		enum eth_rx_cqe_type cqe_fp_type;
927 		u16 len, pad, queue;
928 		u8 *data;
929 		u32 rxhash;
930 		enum pkt_hash_types rxhash_type;
931 
932 #ifdef BNX2X_STOP_ON_ERROR
933 		if (unlikely(bp->panic))
934 			return 0;
935 #endif
936 
937 		bd_prod = RX_BD(bd_prod);
938 		bd_cons = RX_BD(bd_cons);
939 
940 		/* A rmb() is required to ensure that the CQE is not read
941 		 * before it is written by the adapter DMA.  PCI ordering
942 		 * rules will make sure the other fields are written before
943 		 * the marker at the end of struct eth_fast_path_rx_cqe
944 		 * but without rmb() a weakly ordered processor can process
945 		 * stale data.  Without the barrier TPA state-machine might
946 		 * enter inconsistent state and kernel stack might be
947 		 * provided with incorrect packet description - these lead
948 		 * to various kernel crashed.
949 		 */
950 		rmb();
951 
952 		cqe_fp_flags = cqe_fp->type_error_flags;
953 		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
954 
955 		DP(NETIF_MSG_RX_STATUS,
956 		   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
957 		   CQE_TYPE(cqe_fp_flags),
958 		   cqe_fp_flags, cqe_fp->status_flags,
959 		   le32_to_cpu(cqe_fp->rss_hash_result),
960 		   le16_to_cpu(cqe_fp->vlan_tag),
961 		   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
962 
963 		/* is this a slowpath msg? */
964 		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
965 			bnx2x_sp_event(fp, cqe);
966 			goto next_cqe;
967 		}
968 
969 		rx_buf = &fp->rx_buf_ring[bd_cons];
970 		data = rx_buf->data;
971 
972 		if (!CQE_TYPE_FAST(cqe_fp_type)) {
973 			struct bnx2x_agg_info *tpa_info;
974 			u16 frag_size, pages;
975 #ifdef BNX2X_STOP_ON_ERROR
976 			/* sanity check */
977 			if (fp->mode == TPA_MODE_DISABLED &&
978 			    (CQE_TYPE_START(cqe_fp_type) ||
979 			     CQE_TYPE_STOP(cqe_fp_type)))
980 				BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
981 					  CQE_TYPE(cqe_fp_type));
982 #endif
983 
984 			if (CQE_TYPE_START(cqe_fp_type)) {
985 				u16 queue = cqe_fp->queue_index;
986 				DP(NETIF_MSG_RX_STATUS,
987 				   "calling tpa_start on queue %d\n",
988 				   queue);
989 
990 				bnx2x_tpa_start(fp, queue,
991 						bd_cons, bd_prod,
992 						cqe_fp);
993 
994 				goto next_rx;
995 			}
996 			queue = cqe->end_agg_cqe.queue_index;
997 			tpa_info = &fp->tpa_info[queue];
998 			DP(NETIF_MSG_RX_STATUS,
999 			   "calling tpa_stop on queue %d\n",
1000 			   queue);
1001 
1002 			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
1003 				    tpa_info->len_on_bd;
1004 
1005 			if (fp->mode == TPA_MODE_GRO)
1006 				pages = (frag_size + tpa_info->full_page - 1) /
1007 					 tpa_info->full_page;
1008 			else
1009 				pages = SGE_PAGE_ALIGN(frag_size) >>
1010 					SGE_PAGE_SHIFT;
1011 
1012 			bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1013 				       &cqe->end_agg_cqe, comp_ring_cons);
1014 #ifdef BNX2X_STOP_ON_ERROR
1015 			if (bp->panic)
1016 				return 0;
1017 #endif
1018 
1019 			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1020 			goto next_cqe;
1021 		}
1022 		/* non TPA */
1023 		len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1024 		pad = cqe_fp->placement_offset;
1025 		dma_sync_single_for_cpu(&bp->pdev->dev,
1026 					dma_unmap_addr(rx_buf, mapping),
1027 					pad + RX_COPY_THRESH,
1028 					DMA_FROM_DEVICE);
1029 		pad += NET_SKB_PAD;
1030 		prefetch(data + pad); /* speedup eth_type_trans() */
1031 		/* is this an error packet? */
1032 		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1033 			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1034 			   "ERROR  flags %x  rx packet %u\n",
1035 			   cqe_fp_flags, sw_comp_cons);
1036 			bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1037 			goto reuse_rx;
1038 		}
1039 
1040 		/* Since we don't have a jumbo ring
1041 		 * copy small packets if mtu > 1500
1042 		 */
1043 		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1044 		    (len <= RX_COPY_THRESH)) {
1045 			skb = napi_alloc_skb(&fp->napi, len);
1046 			if (skb == NULL) {
1047 				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1048 				   "ERROR  packet dropped because of alloc failure\n");
1049 				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1050 				goto reuse_rx;
1051 			}
1052 			memcpy(skb->data, data + pad, len);
1053 			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1054 		} else {
1055 			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1056 						       GFP_ATOMIC) == 0)) {
1057 				dma_unmap_single(&bp->pdev->dev,
1058 						 dma_unmap_addr(rx_buf, mapping),
1059 						 fp->rx_buf_size,
1060 						 DMA_FROM_DEVICE);
1061 				skb = bnx2x_build_skb(fp, data);
1062 				if (unlikely(!skb)) {
1063 					bnx2x_frag_free(fp, data);
1064 					bnx2x_fp_qstats(bp, fp)->
1065 							rx_skb_alloc_failed++;
1066 					goto next_rx;
1067 				}
1068 				skb_reserve(skb, pad);
1069 			} else {
1070 				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1071 				   "ERROR  packet dropped because of alloc failure\n");
1072 				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1073 reuse_rx:
1074 				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1075 				goto next_rx;
1076 			}
1077 		}
1078 
1079 		skb_put(skb, len);
1080 		skb->protocol = eth_type_trans(skb, bp->dev);
1081 
1082 		/* Set Toeplitz hash for a none-LRO skb */
1083 		rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1084 		skb_set_hash(skb, rxhash, rxhash_type);
1085 
1086 		skb_checksum_none_assert(skb);
1087 
1088 		if (bp->dev->features & NETIF_F_RXCSUM)
1089 			bnx2x_csum_validate(skb, cqe, fp,
1090 					    bnx2x_fp_qstats(bp, fp));
1091 
1092 		skb_record_rx_queue(skb, fp->rx_queue);
1093 
1094 		/* Check if this packet was timestamped */
1095 		if (unlikely(cqe->fast_path_cqe.type_error_flags &
1096 			     (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1097 			bnx2x_set_rx_ts(bp, skb);
1098 
1099 		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1100 		    PARSING_FLAGS_VLAN)
1101 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1102 					       le16_to_cpu(cqe_fp->vlan_tag));
1103 
1104 		napi_gro_receive(&fp->napi, skb);
1105 next_rx:
1106 		rx_buf->data = NULL;
1107 
1108 		bd_cons = NEXT_RX_IDX(bd_cons);
1109 		bd_prod = NEXT_RX_IDX(bd_prod);
1110 		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1111 		rx_pkt++;
1112 next_cqe:
1113 		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1114 		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1115 
1116 		/* mark CQE as free */
1117 		BNX2X_SEED_CQE(cqe_fp);
1118 
1119 		if (rx_pkt == budget)
1120 			break;
1121 
1122 		comp_ring_cons = RCQ_BD(sw_comp_cons);
1123 		cqe = &fp->rx_comp_ring[comp_ring_cons];
1124 		cqe_fp = &cqe->fast_path_cqe;
1125 	} /* while */
1126 
1127 	fp->rx_bd_cons = bd_cons;
1128 	fp->rx_bd_prod = bd_prod_fw;
1129 	fp->rx_comp_cons = sw_comp_cons;
1130 	fp->rx_comp_prod = sw_comp_prod;
1131 
1132 	/* Update producers */
1133 	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1134 			     fp->rx_sge_prod);
1135 
1136 	return rx_pkt;
1137 }
1138 
1139 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1140 {
1141 	struct bnx2x_fastpath *fp = fp_cookie;
1142 	struct bnx2x *bp = fp->bp;
1143 	u8 cos;
1144 
1145 	DP(NETIF_MSG_INTR,
1146 	   "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1147 	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
1148 
1149 	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1150 
1151 #ifdef BNX2X_STOP_ON_ERROR
1152 	if (unlikely(bp->panic))
1153 		return IRQ_HANDLED;
1154 #endif
1155 
1156 	/* Handle Rx and Tx according to MSI-X vector */
1157 	for_each_cos_in_tx_queue(fp, cos)
1158 		prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1159 
1160 	prefetch(&fp->sb_running_index[SM_RX_ID]);
1161 	napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1162 
1163 	return IRQ_HANDLED;
1164 }
1165 
1166 /* HW Lock for shared dual port PHYs */
1167 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1168 {
1169 	mutex_lock(&bp->port.phy_mutex);
1170 
1171 	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1172 }
1173 
1174 void bnx2x_release_phy_lock(struct bnx2x *bp)
1175 {
1176 	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1177 
1178 	mutex_unlock(&bp->port.phy_mutex);
1179 }
1180 
1181 /* calculates MF speed according to current linespeed and MF configuration */
1182 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1183 {
1184 	u16 line_speed = bp->link_vars.line_speed;
1185 	if (IS_MF(bp)) {
1186 		u16 maxCfg = bnx2x_extract_max_cfg(bp,
1187 						   bp->mf_config[BP_VN(bp)]);
1188 
1189 		/* Calculate the current MAX line speed limit for the MF
1190 		 * devices
1191 		 */
1192 		if (IS_MF_PERCENT_BW(bp))
1193 			line_speed = (line_speed * maxCfg) / 100;
1194 		else { /* SD mode */
1195 			u16 vn_max_rate = maxCfg * 100;
1196 
1197 			if (vn_max_rate < line_speed)
1198 				line_speed = vn_max_rate;
1199 		}
1200 	}
1201 
1202 	return line_speed;
1203 }
1204 
1205 /**
1206  * bnx2x_fill_report_data - fill link report data to report
1207  *
1208  * @bp:		driver handle
1209  * @data:	link state to update
1210  *
1211  * It uses a none-atomic bit operations because is called under the mutex.
1212  */
1213 static void bnx2x_fill_report_data(struct bnx2x *bp,
1214 				   struct bnx2x_link_report_data *data)
1215 {
1216 	memset(data, 0, sizeof(*data));
1217 
1218 	if (IS_PF(bp)) {
1219 		/* Fill the report data: effective line speed */
1220 		data->line_speed = bnx2x_get_mf_speed(bp);
1221 
1222 		/* Link is down */
1223 		if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1224 			__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1225 				  &data->link_report_flags);
1226 
1227 		if (!BNX2X_NUM_ETH_QUEUES(bp))
1228 			__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1229 				  &data->link_report_flags);
1230 
1231 		/* Full DUPLEX */
1232 		if (bp->link_vars.duplex == DUPLEX_FULL)
1233 			__set_bit(BNX2X_LINK_REPORT_FD,
1234 				  &data->link_report_flags);
1235 
1236 		/* Rx Flow Control is ON */
1237 		if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1238 			__set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1239 				  &data->link_report_flags);
1240 
1241 		/* Tx Flow Control is ON */
1242 		if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1243 			__set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1244 				  &data->link_report_flags);
1245 	} else { /* VF */
1246 		*data = bp->vf_link_vars;
1247 	}
1248 }
1249 
1250 /**
1251  * bnx2x_link_report - report link status to OS.
1252  *
1253  * @bp:		driver handle
1254  *
1255  * Calls the __bnx2x_link_report() under the same locking scheme
1256  * as a link/PHY state managing code to ensure a consistent link
1257  * reporting.
1258  */
1259 
1260 void bnx2x_link_report(struct bnx2x *bp)
1261 {
1262 	bnx2x_acquire_phy_lock(bp);
1263 	__bnx2x_link_report(bp);
1264 	bnx2x_release_phy_lock(bp);
1265 }
1266 
1267 /**
1268  * __bnx2x_link_report - report link status to OS.
1269  *
1270  * @bp:		driver handle
1271  *
1272  * None atomic implementation.
1273  * Should be called under the phy_lock.
1274  */
1275 void __bnx2x_link_report(struct bnx2x *bp)
1276 {
1277 	struct bnx2x_link_report_data cur_data;
1278 
1279 	if (bp->force_link_down) {
1280 		bp->link_vars.link_up = 0;
1281 		return;
1282 	}
1283 
1284 	/* reread mf_cfg */
1285 	if (IS_PF(bp) && !CHIP_IS_E1(bp))
1286 		bnx2x_read_mf_cfg(bp);
1287 
1288 	/* Read the current link report info */
1289 	bnx2x_fill_report_data(bp, &cur_data);
1290 
1291 	/* Don't report link down or exactly the same link status twice */
1292 	if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1293 	    (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1294 		      &bp->last_reported_link.link_report_flags) &&
1295 	     test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1296 		      &cur_data.link_report_flags)))
1297 		return;
1298 
1299 	bp->link_cnt++;
1300 
1301 	/* We are going to report a new link parameters now -
1302 	 * remember the current data for the next time.
1303 	 */
1304 	memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1305 
1306 	/* propagate status to VFs */
1307 	if (IS_PF(bp))
1308 		bnx2x_iov_link_update(bp);
1309 
1310 	if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1311 		     &cur_data.link_report_flags)) {
1312 		netif_carrier_off(bp->dev);
1313 		netdev_err(bp->dev, "NIC Link is Down\n");
1314 		return;
1315 	} else {
1316 		const char *duplex;
1317 		const char *flow;
1318 
1319 		netif_carrier_on(bp->dev);
1320 
1321 		if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1322 				       &cur_data.link_report_flags))
1323 			duplex = "full";
1324 		else
1325 			duplex = "half";
1326 
1327 		/* Handle the FC at the end so that only these flags would be
1328 		 * possibly set. This way we may easily check if there is no FC
1329 		 * enabled.
1330 		 */
1331 		if (cur_data.link_report_flags) {
1332 			if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1333 				     &cur_data.link_report_flags)) {
1334 				if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1335 				     &cur_data.link_report_flags))
1336 					flow = "ON - receive & transmit";
1337 				else
1338 					flow = "ON - receive";
1339 			} else {
1340 				flow = "ON - transmit";
1341 			}
1342 		} else {
1343 			flow = "none";
1344 		}
1345 		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1346 			    cur_data.line_speed, duplex, flow);
1347 	}
1348 }
1349 
1350 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1351 {
1352 	int i;
1353 
1354 	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1355 		struct eth_rx_sge *sge;
1356 
1357 		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1358 		sge->addr_hi =
1359 			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1360 			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1361 
1362 		sge->addr_lo =
1363 			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1364 			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1365 	}
1366 }
1367 
1368 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1369 				struct bnx2x_fastpath *fp, int last)
1370 {
1371 	int i;
1372 
1373 	for (i = 0; i < last; i++) {
1374 		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1375 		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1376 		u8 *data = first_buf->data;
1377 
1378 		if (data == NULL) {
1379 			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1380 			continue;
1381 		}
1382 		if (tpa_info->tpa_state == BNX2X_TPA_START)
1383 			dma_unmap_single(&bp->pdev->dev,
1384 					 dma_unmap_addr(first_buf, mapping),
1385 					 fp->rx_buf_size, DMA_FROM_DEVICE);
1386 		bnx2x_frag_free(fp, data);
1387 		first_buf->data = NULL;
1388 	}
1389 }
1390 
1391 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1392 {
1393 	int j;
1394 
1395 	for_each_rx_queue_cnic(bp, j) {
1396 		struct bnx2x_fastpath *fp = &bp->fp[j];
1397 
1398 		fp->rx_bd_cons = 0;
1399 
1400 		/* Activate BD ring */
1401 		/* Warning!
1402 		 * this will generate an interrupt (to the TSTORM)
1403 		 * must only be done after chip is initialized
1404 		 */
1405 		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1406 				     fp->rx_sge_prod);
1407 	}
1408 }
1409 
1410 void bnx2x_init_rx_rings(struct bnx2x *bp)
1411 {
1412 	int func = BP_FUNC(bp);
1413 	u16 ring_prod;
1414 	int i, j;
1415 
1416 	/* Allocate TPA resources */
1417 	for_each_eth_queue(bp, j) {
1418 		struct bnx2x_fastpath *fp = &bp->fp[j];
1419 
1420 		DP(NETIF_MSG_IFUP,
1421 		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1422 
1423 		if (fp->mode != TPA_MODE_DISABLED) {
1424 			/* Fill the per-aggregation pool */
1425 			for (i = 0; i < MAX_AGG_QS(bp); i++) {
1426 				struct bnx2x_agg_info *tpa_info =
1427 					&fp->tpa_info[i];
1428 				struct sw_rx_bd *first_buf =
1429 					&tpa_info->first_buf;
1430 
1431 				first_buf->data =
1432 					bnx2x_frag_alloc(fp, GFP_KERNEL);
1433 				if (!first_buf->data) {
1434 					BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1435 						  j);
1436 					bnx2x_free_tpa_pool(bp, fp, i);
1437 					fp->mode = TPA_MODE_DISABLED;
1438 					break;
1439 				}
1440 				dma_unmap_addr_set(first_buf, mapping, 0);
1441 				tpa_info->tpa_state = BNX2X_TPA_STOP;
1442 			}
1443 
1444 			/* "next page" elements initialization */
1445 			bnx2x_set_next_page_sgl(fp);
1446 
1447 			/* set SGEs bit mask */
1448 			bnx2x_init_sge_ring_bit_mask(fp);
1449 
1450 			/* Allocate SGEs and initialize the ring elements */
1451 			for (i = 0, ring_prod = 0;
1452 			     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1453 
1454 				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1455 						       GFP_KERNEL) < 0) {
1456 					BNX2X_ERR("was only able to allocate %d rx sges\n",
1457 						  i);
1458 					BNX2X_ERR("disabling TPA for queue[%d]\n",
1459 						  j);
1460 					/* Cleanup already allocated elements */
1461 					bnx2x_free_rx_sge_range(bp, fp,
1462 								ring_prod);
1463 					bnx2x_free_tpa_pool(bp, fp,
1464 							    MAX_AGG_QS(bp));
1465 					fp->mode = TPA_MODE_DISABLED;
1466 					ring_prod = 0;
1467 					break;
1468 				}
1469 				ring_prod = NEXT_SGE_IDX(ring_prod);
1470 			}
1471 
1472 			fp->rx_sge_prod = ring_prod;
1473 		}
1474 	}
1475 
1476 	for_each_eth_queue(bp, j) {
1477 		struct bnx2x_fastpath *fp = &bp->fp[j];
1478 
1479 		fp->rx_bd_cons = 0;
1480 
1481 		/* Activate BD ring */
1482 		/* Warning!
1483 		 * this will generate an interrupt (to the TSTORM)
1484 		 * must only be done after chip is initialized
1485 		 */
1486 		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1487 				     fp->rx_sge_prod);
1488 
1489 		if (j != 0)
1490 			continue;
1491 
1492 		if (CHIP_IS_E1(bp)) {
1493 			REG_WR(bp, BAR_USTRORM_INTMEM +
1494 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1495 			       U64_LO(fp->rx_comp_mapping));
1496 			REG_WR(bp, BAR_USTRORM_INTMEM +
1497 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1498 			       U64_HI(fp->rx_comp_mapping));
1499 		}
1500 	}
1501 }
1502 
1503 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1504 {
1505 	u8 cos;
1506 	struct bnx2x *bp = fp->bp;
1507 
1508 	for_each_cos_in_tx_queue(fp, cos) {
1509 		struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1510 		unsigned pkts_compl = 0, bytes_compl = 0;
1511 
1512 		u16 sw_prod = txdata->tx_pkt_prod;
1513 		u16 sw_cons = txdata->tx_pkt_cons;
1514 
1515 		while (sw_cons != sw_prod) {
1516 			bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1517 					  &pkts_compl, &bytes_compl);
1518 			sw_cons++;
1519 		}
1520 
1521 		netdev_tx_reset_queue(
1522 			netdev_get_tx_queue(bp->dev,
1523 					    txdata->txq_index));
1524 	}
1525 }
1526 
1527 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1528 {
1529 	int i;
1530 
1531 	for_each_tx_queue_cnic(bp, i) {
1532 		bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1533 	}
1534 }
1535 
1536 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1537 {
1538 	int i;
1539 
1540 	for_each_eth_queue(bp, i) {
1541 		bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1542 	}
1543 }
1544 
1545 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1546 {
1547 	struct bnx2x *bp = fp->bp;
1548 	int i;
1549 
1550 	/* ring wasn't allocated */
1551 	if (fp->rx_buf_ring == NULL)
1552 		return;
1553 
1554 	for (i = 0; i < NUM_RX_BD; i++) {
1555 		struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1556 		u8 *data = rx_buf->data;
1557 
1558 		if (data == NULL)
1559 			continue;
1560 		dma_unmap_single(&bp->pdev->dev,
1561 				 dma_unmap_addr(rx_buf, mapping),
1562 				 fp->rx_buf_size, DMA_FROM_DEVICE);
1563 
1564 		rx_buf->data = NULL;
1565 		bnx2x_frag_free(fp, data);
1566 	}
1567 }
1568 
1569 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1570 {
1571 	int j;
1572 
1573 	for_each_rx_queue_cnic(bp, j) {
1574 		bnx2x_free_rx_bds(&bp->fp[j]);
1575 	}
1576 }
1577 
1578 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1579 {
1580 	int j;
1581 
1582 	for_each_eth_queue(bp, j) {
1583 		struct bnx2x_fastpath *fp = &bp->fp[j];
1584 
1585 		bnx2x_free_rx_bds(fp);
1586 
1587 		if (fp->mode != TPA_MODE_DISABLED)
1588 			bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1589 	}
1590 }
1591 
1592 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1593 {
1594 	bnx2x_free_tx_skbs_cnic(bp);
1595 	bnx2x_free_rx_skbs_cnic(bp);
1596 }
1597 
1598 void bnx2x_free_skbs(struct bnx2x *bp)
1599 {
1600 	bnx2x_free_tx_skbs(bp);
1601 	bnx2x_free_rx_skbs(bp);
1602 }
1603 
1604 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1605 {
1606 	/* load old values */
1607 	u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1608 
1609 	if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1610 		/* leave all but MAX value */
1611 		mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1612 
1613 		/* set new MAX value */
1614 		mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1615 				& FUNC_MF_CFG_MAX_BW_MASK;
1616 
1617 		bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1618 	}
1619 }
1620 
1621 /**
1622  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1623  *
1624  * @bp:		driver handle
1625  * @nvecs:	number of vectors to be released
1626  */
1627 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1628 {
1629 	int i, offset = 0;
1630 
1631 	if (nvecs == offset)
1632 		return;
1633 
1634 	/* VFs don't have a default SB */
1635 	if (IS_PF(bp)) {
1636 		free_irq(bp->msix_table[offset].vector, bp->dev);
1637 		DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1638 		   bp->msix_table[offset].vector);
1639 		offset++;
1640 	}
1641 
1642 	if (CNIC_SUPPORT(bp)) {
1643 		if (nvecs == offset)
1644 			return;
1645 		offset++;
1646 	}
1647 
1648 	for_each_eth_queue(bp, i) {
1649 		if (nvecs == offset)
1650 			return;
1651 		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1652 		   i, bp->msix_table[offset].vector);
1653 
1654 		free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1655 	}
1656 }
1657 
1658 void bnx2x_free_irq(struct bnx2x *bp)
1659 {
1660 	if (bp->flags & USING_MSIX_FLAG &&
1661 	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1662 		int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1663 
1664 		/* vfs don't have a default status block */
1665 		if (IS_PF(bp))
1666 			nvecs++;
1667 
1668 		bnx2x_free_msix_irqs(bp, nvecs);
1669 	} else {
1670 		free_irq(bp->dev->irq, bp->dev);
1671 	}
1672 }
1673 
1674 int bnx2x_enable_msix(struct bnx2x *bp)
1675 {
1676 	int msix_vec = 0, i, rc;
1677 
1678 	/* VFs don't have a default status block */
1679 	if (IS_PF(bp)) {
1680 		bp->msix_table[msix_vec].entry = msix_vec;
1681 		BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1682 			       bp->msix_table[0].entry);
1683 		msix_vec++;
1684 	}
1685 
1686 	/* Cnic requires an msix vector for itself */
1687 	if (CNIC_SUPPORT(bp)) {
1688 		bp->msix_table[msix_vec].entry = msix_vec;
1689 		BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1690 			       msix_vec, bp->msix_table[msix_vec].entry);
1691 		msix_vec++;
1692 	}
1693 
1694 	/* We need separate vectors for ETH queues only (not FCoE) */
1695 	for_each_eth_queue(bp, i) {
1696 		bp->msix_table[msix_vec].entry = msix_vec;
1697 		BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1698 			       msix_vec, msix_vec, i);
1699 		msix_vec++;
1700 	}
1701 
1702 	DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1703 	   msix_vec);
1704 
1705 	rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1706 				   BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1707 	/*
1708 	 * reconfigure number of tx/rx queues according to available
1709 	 * MSI-X vectors
1710 	 */
1711 	if (rc == -ENOSPC) {
1712 		/* Get by with single vector */
1713 		rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1714 		if (rc < 0) {
1715 			BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1716 				       rc);
1717 			goto no_msix;
1718 		}
1719 
1720 		BNX2X_DEV_INFO("Using single MSI-X vector\n");
1721 		bp->flags |= USING_SINGLE_MSIX_FLAG;
1722 
1723 		BNX2X_DEV_INFO("set number of queues to 1\n");
1724 		bp->num_ethernet_queues = 1;
1725 		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1726 	} else if (rc < 0) {
1727 		BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1728 		goto no_msix;
1729 	} else if (rc < msix_vec) {
1730 		/* how less vectors we will have? */
1731 		int diff = msix_vec - rc;
1732 
1733 		BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1734 
1735 		/*
1736 		 * decrease number of queues by number of unallocated entries
1737 		 */
1738 		bp->num_ethernet_queues -= diff;
1739 		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1740 
1741 		BNX2X_DEV_INFO("New queue configuration set: %d\n",
1742 			       bp->num_queues);
1743 	}
1744 
1745 	bp->flags |= USING_MSIX_FLAG;
1746 
1747 	return 0;
1748 
1749 no_msix:
1750 	/* fall to INTx if not enough memory */
1751 	if (rc == -ENOMEM)
1752 		bp->flags |= DISABLE_MSI_FLAG;
1753 
1754 	return rc;
1755 }
1756 
1757 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1758 {
1759 	int i, rc, offset = 0;
1760 
1761 	/* no default status block for vf */
1762 	if (IS_PF(bp)) {
1763 		rc = request_irq(bp->msix_table[offset++].vector,
1764 				 bnx2x_msix_sp_int, 0,
1765 				 bp->dev->name, bp->dev);
1766 		if (rc) {
1767 			BNX2X_ERR("request sp irq failed\n");
1768 			return -EBUSY;
1769 		}
1770 	}
1771 
1772 	if (CNIC_SUPPORT(bp))
1773 		offset++;
1774 
1775 	for_each_eth_queue(bp, i) {
1776 		struct bnx2x_fastpath *fp = &bp->fp[i];
1777 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1778 			 bp->dev->name, i);
1779 
1780 		rc = request_irq(bp->msix_table[offset].vector,
1781 				 bnx2x_msix_fp_int, 0, fp->name, fp);
1782 		if (rc) {
1783 			BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1784 			      bp->msix_table[offset].vector, rc);
1785 			bnx2x_free_msix_irqs(bp, offset);
1786 			return -EBUSY;
1787 		}
1788 
1789 		offset++;
1790 	}
1791 
1792 	i = BNX2X_NUM_ETH_QUEUES(bp);
1793 	if (IS_PF(bp)) {
1794 		offset = 1 + CNIC_SUPPORT(bp);
1795 		netdev_info(bp->dev,
1796 			    "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1797 			    bp->msix_table[0].vector,
1798 			    0, bp->msix_table[offset].vector,
1799 			    i - 1, bp->msix_table[offset + i - 1].vector);
1800 	} else {
1801 		offset = CNIC_SUPPORT(bp);
1802 		netdev_info(bp->dev,
1803 			    "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1804 			    0, bp->msix_table[offset].vector,
1805 			    i - 1, bp->msix_table[offset + i - 1].vector);
1806 	}
1807 	return 0;
1808 }
1809 
1810 int bnx2x_enable_msi(struct bnx2x *bp)
1811 {
1812 	int rc;
1813 
1814 	rc = pci_enable_msi(bp->pdev);
1815 	if (rc) {
1816 		BNX2X_DEV_INFO("MSI is not attainable\n");
1817 		return -1;
1818 	}
1819 	bp->flags |= USING_MSI_FLAG;
1820 
1821 	return 0;
1822 }
1823 
1824 static int bnx2x_req_irq(struct bnx2x *bp)
1825 {
1826 	unsigned long flags;
1827 	unsigned int irq;
1828 
1829 	if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1830 		flags = 0;
1831 	else
1832 		flags = IRQF_SHARED;
1833 
1834 	if (bp->flags & USING_MSIX_FLAG)
1835 		irq = bp->msix_table[0].vector;
1836 	else
1837 		irq = bp->pdev->irq;
1838 
1839 	return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1840 }
1841 
1842 static int bnx2x_setup_irqs(struct bnx2x *bp)
1843 {
1844 	int rc = 0;
1845 	if (bp->flags & USING_MSIX_FLAG &&
1846 	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1847 		rc = bnx2x_req_msix_irqs(bp);
1848 		if (rc)
1849 			return rc;
1850 	} else {
1851 		rc = bnx2x_req_irq(bp);
1852 		if (rc) {
1853 			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1854 			return rc;
1855 		}
1856 		if (bp->flags & USING_MSI_FLAG) {
1857 			bp->dev->irq = bp->pdev->irq;
1858 			netdev_info(bp->dev, "using MSI IRQ %d\n",
1859 				    bp->dev->irq);
1860 		}
1861 		if (bp->flags & USING_MSIX_FLAG) {
1862 			bp->dev->irq = bp->msix_table[0].vector;
1863 			netdev_info(bp->dev, "using MSIX IRQ %d\n",
1864 				    bp->dev->irq);
1865 		}
1866 	}
1867 
1868 	return 0;
1869 }
1870 
1871 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1872 {
1873 	int i;
1874 
1875 	for_each_rx_queue_cnic(bp, i) {
1876 		napi_enable(&bnx2x_fp(bp, i, napi));
1877 	}
1878 }
1879 
1880 static void bnx2x_napi_enable(struct bnx2x *bp)
1881 {
1882 	int i;
1883 
1884 	for_each_eth_queue(bp, i) {
1885 		napi_enable(&bnx2x_fp(bp, i, napi));
1886 	}
1887 }
1888 
1889 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1890 {
1891 	int i;
1892 
1893 	for_each_rx_queue_cnic(bp, i) {
1894 		napi_disable(&bnx2x_fp(bp, i, napi));
1895 	}
1896 }
1897 
1898 static void bnx2x_napi_disable(struct bnx2x *bp)
1899 {
1900 	int i;
1901 
1902 	for_each_eth_queue(bp, i) {
1903 		napi_disable(&bnx2x_fp(bp, i, napi));
1904 	}
1905 }
1906 
1907 void bnx2x_netif_start(struct bnx2x *bp)
1908 {
1909 	if (netif_running(bp->dev)) {
1910 		bnx2x_napi_enable(bp);
1911 		if (CNIC_LOADED(bp))
1912 			bnx2x_napi_enable_cnic(bp);
1913 		bnx2x_int_enable(bp);
1914 		if (bp->state == BNX2X_STATE_OPEN)
1915 			netif_tx_wake_all_queues(bp->dev);
1916 	}
1917 }
1918 
1919 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1920 {
1921 	bnx2x_int_disable_sync(bp, disable_hw);
1922 	bnx2x_napi_disable(bp);
1923 	if (CNIC_LOADED(bp))
1924 		bnx2x_napi_disable_cnic(bp);
1925 }
1926 
1927 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1928 		       struct net_device *sb_dev)
1929 {
1930 	struct bnx2x *bp = netdev_priv(dev);
1931 
1932 	if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1933 		struct ethhdr *hdr = (struct ethhdr *)skb->data;
1934 		u16 ether_type = ntohs(hdr->h_proto);
1935 
1936 		/* Skip VLAN tag if present */
1937 		if (ether_type == ETH_P_8021Q) {
1938 			struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
1939 
1940 			ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1941 		}
1942 
1943 		/* If ethertype is FCoE or FIP - use FCoE ring */
1944 		if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1945 			return bnx2x_fcoe_tx(bp, txq_index);
1946 	}
1947 
1948 	/* select a non-FCoE queue */
1949 	return netdev_pick_tx(dev, skb, NULL) %
1950 			(BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1951 }
1952 
1953 void bnx2x_set_num_queues(struct bnx2x *bp)
1954 {
1955 	/* RSS queues */
1956 	bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1957 
1958 	/* override in STORAGE SD modes */
1959 	if (IS_MF_STORAGE_ONLY(bp))
1960 		bp->num_ethernet_queues = 1;
1961 
1962 	/* Add special queues */
1963 	bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1964 	bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1965 
1966 	BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1967 }
1968 
1969 /**
1970  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1971  *
1972  * @bp:		Driver handle
1973  * @include_cnic: handle cnic case
1974  *
1975  * We currently support for at most 16 Tx queues for each CoS thus we will
1976  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1977  * bp->max_cos.
1978  *
1979  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1980  * index after all ETH L2 indices.
1981  *
1982  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1983  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1984  * 16..31,...) with indices that are not coupled with any real Tx queue.
1985  *
1986  * The proper configuration of skb->queue_mapping is handled by
1987  * bnx2x_select_queue() and __skb_tx_hash().
1988  *
1989  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1990  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1991  */
1992 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1993 {
1994 	int rc, tx, rx;
1995 
1996 	tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1997 	rx = BNX2X_NUM_ETH_QUEUES(bp);
1998 
1999 /* account for fcoe queue */
2000 	if (include_cnic && !NO_FCOE(bp)) {
2001 		rx++;
2002 		tx++;
2003 	}
2004 
2005 	rc = netif_set_real_num_tx_queues(bp->dev, tx);
2006 	if (rc) {
2007 		BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2008 		return rc;
2009 	}
2010 	rc = netif_set_real_num_rx_queues(bp->dev, rx);
2011 	if (rc) {
2012 		BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2013 		return rc;
2014 	}
2015 
2016 	DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2017 			  tx, rx);
2018 
2019 	return rc;
2020 }
2021 
2022 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2023 {
2024 	int i;
2025 
2026 	for_each_queue(bp, i) {
2027 		struct bnx2x_fastpath *fp = &bp->fp[i];
2028 		u32 mtu;
2029 
2030 		/* Always use a mini-jumbo MTU for the FCoE L2 ring */
2031 		if (IS_FCOE_IDX(i))
2032 			/*
2033 			 * Although there are no IP frames expected to arrive to
2034 			 * this ring we still want to add an
2035 			 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2036 			 * overrun attack.
2037 			 */
2038 			mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2039 		else
2040 			mtu = bp->dev->mtu;
2041 		fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2042 				  IP_HEADER_ALIGNMENT_PADDING +
2043 				  ETH_OVERHEAD +
2044 				  mtu +
2045 				  BNX2X_FW_RX_ALIGN_END;
2046 		fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2047 		/* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2048 		if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2049 			fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2050 		else
2051 			fp->rx_frag_size = 0;
2052 	}
2053 }
2054 
2055 static int bnx2x_init_rss(struct bnx2x *bp)
2056 {
2057 	int i;
2058 	u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2059 
2060 	/* Prepare the initial contents for the indirection table if RSS is
2061 	 * enabled
2062 	 */
2063 	for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2064 		bp->rss_conf_obj.ind_table[i] =
2065 			bp->fp->cl_id +
2066 			ethtool_rxfh_indir_default(i, num_eth_queues);
2067 
2068 	/*
2069 	 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2070 	 * per-port, so if explicit configuration is needed , do it only
2071 	 * for a PMF.
2072 	 *
2073 	 * For 57712 and newer on the other hand it's a per-function
2074 	 * configuration.
2075 	 */
2076 	return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2077 }
2078 
2079 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2080 	      bool config_hash, bool enable)
2081 {
2082 	struct bnx2x_config_rss_params params = {NULL};
2083 
2084 	/* Although RSS is meaningless when there is a single HW queue we
2085 	 * still need it enabled in order to have HW Rx hash generated.
2086 	 *
2087 	 * if (!is_eth_multi(bp))
2088 	 *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2089 	 */
2090 
2091 	params.rss_obj = rss_obj;
2092 
2093 	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2094 
2095 	if (enable) {
2096 		__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2097 
2098 		/* RSS configuration */
2099 		__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2100 		__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2101 		__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2102 		__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2103 		if (rss_obj->udp_rss_v4)
2104 			__set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2105 		if (rss_obj->udp_rss_v6)
2106 			__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2107 
2108 		if (!CHIP_IS_E1x(bp)) {
2109 			/* valid only for TUNN_MODE_VXLAN tunnel mode */
2110 			__set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2111 			__set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2112 
2113 			/* valid only for TUNN_MODE_GRE tunnel mode */
2114 			__set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2115 		}
2116 	} else {
2117 		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2118 	}
2119 
2120 	/* Hash bits */
2121 	params.rss_result_mask = MULTI_MASK;
2122 
2123 	memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2124 
2125 	if (config_hash) {
2126 		/* RSS keys */
2127 		netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2128 		__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2129 	}
2130 
2131 	if (IS_PF(bp))
2132 		return bnx2x_config_rss(bp, &params);
2133 	else
2134 		return bnx2x_vfpf_config_rss(bp, &params);
2135 }
2136 
2137 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2138 {
2139 	struct bnx2x_func_state_params func_params = {NULL};
2140 
2141 	/* Prepare parameters for function state transitions */
2142 	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2143 
2144 	func_params.f_obj = &bp->func_obj;
2145 	func_params.cmd = BNX2X_F_CMD_HW_INIT;
2146 
2147 	func_params.params.hw_init.load_phase = load_code;
2148 
2149 	return bnx2x_func_state_change(bp, &func_params);
2150 }
2151 
2152 /*
2153  * Cleans the object that have internal lists without sending
2154  * ramrods. Should be run when interrupts are disabled.
2155  */
2156 void bnx2x_squeeze_objects(struct bnx2x *bp)
2157 {
2158 	int rc;
2159 	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2160 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
2161 	struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2162 
2163 	/***************** Cleanup MACs' object first *************************/
2164 
2165 	/* Wait for completion of requested */
2166 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2167 	/* Perform a dry cleanup */
2168 	__set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2169 
2170 	/* Clean ETH primary MAC */
2171 	__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2172 	rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2173 				 &ramrod_flags);
2174 	if (rc != 0)
2175 		BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2176 
2177 	/* Cleanup UC list */
2178 	vlan_mac_flags = 0;
2179 	__set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2180 	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2181 				 &ramrod_flags);
2182 	if (rc != 0)
2183 		BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2184 
2185 	/***************** Now clean mcast object *****************************/
2186 	rparam.mcast_obj = &bp->mcast_obj;
2187 	__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2188 
2189 	/* Add a DEL command... - Since we're doing a driver cleanup only,
2190 	 * we take a lock surrounding both the initial send and the CONTs,
2191 	 * as we don't want a true completion to disrupt us in the middle.
2192 	 */
2193 	netif_addr_lock_bh(bp->dev);
2194 	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2195 	if (rc < 0)
2196 		BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2197 			  rc);
2198 
2199 	/* ...and wait until all pending commands are cleared */
2200 	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2201 	while (rc != 0) {
2202 		if (rc < 0) {
2203 			BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2204 				  rc);
2205 			netif_addr_unlock_bh(bp->dev);
2206 			return;
2207 		}
2208 
2209 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2210 	}
2211 	netif_addr_unlock_bh(bp->dev);
2212 }
2213 
2214 #ifndef BNX2X_STOP_ON_ERROR
2215 #define LOAD_ERROR_EXIT(bp, label) \
2216 	do { \
2217 		(bp)->state = BNX2X_STATE_ERROR; \
2218 		goto label; \
2219 	} while (0)
2220 
2221 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2222 	do { \
2223 		bp->cnic_loaded = false; \
2224 		goto label; \
2225 	} while (0)
2226 #else /*BNX2X_STOP_ON_ERROR*/
2227 #define LOAD_ERROR_EXIT(bp, label) \
2228 	do { \
2229 		(bp)->state = BNX2X_STATE_ERROR; \
2230 		(bp)->panic = 1; \
2231 		return -EBUSY; \
2232 	} while (0)
2233 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2234 	do { \
2235 		bp->cnic_loaded = false; \
2236 		(bp)->panic = 1; \
2237 		return -EBUSY; \
2238 	} while (0)
2239 #endif /*BNX2X_STOP_ON_ERROR*/
2240 
2241 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2242 {
2243 	BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2244 		       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2245 	return;
2246 }
2247 
2248 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2249 {
2250 	int num_groups, vf_headroom = 0;
2251 	int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2252 
2253 	/* number of queues for statistics is number of eth queues + FCoE */
2254 	u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2255 
2256 	/* Total number of FW statistics requests =
2257 	 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2258 	 * and fcoe l2 queue) stats + num of queues (which includes another 1
2259 	 * for fcoe l2 queue if applicable)
2260 	 */
2261 	bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2262 
2263 	/* vf stats appear in the request list, but their data is allocated by
2264 	 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2265 	 * it is used to determine where to place the vf stats queries in the
2266 	 * request struct
2267 	 */
2268 	if (IS_SRIOV(bp))
2269 		vf_headroom = bnx2x_vf_headroom(bp);
2270 
2271 	/* Request is built from stats_query_header and an array of
2272 	 * stats_query_cmd_group each of which contains
2273 	 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2274 	 * configured in the stats_query_header.
2275 	 */
2276 	num_groups =
2277 		(((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2278 		 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2279 		 1 : 0));
2280 
2281 	DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2282 	   bp->fw_stats_num, vf_headroom, num_groups);
2283 	bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2284 		num_groups * sizeof(struct stats_query_cmd_group);
2285 
2286 	/* Data for statistics requests + stats_counter
2287 	 * stats_counter holds per-STORM counters that are incremented
2288 	 * when STORM has finished with the current request.
2289 	 * memory for FCoE offloaded statistics are counted anyway,
2290 	 * even if they will not be sent.
2291 	 * VF stats are not accounted for here as the data of VF stats is stored
2292 	 * in memory allocated by the VF, not here.
2293 	 */
2294 	bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2295 		sizeof(struct per_pf_stats) +
2296 		sizeof(struct fcoe_statistics_params) +
2297 		sizeof(struct per_queue_stats) * num_queue_stats +
2298 		sizeof(struct stats_counter);
2299 
2300 	bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2301 				       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2302 	if (!bp->fw_stats)
2303 		goto alloc_mem_err;
2304 
2305 	/* Set shortcuts */
2306 	bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2307 	bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2308 	bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2309 		((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2310 	bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2311 		bp->fw_stats_req_sz;
2312 
2313 	DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2314 	   U64_HI(bp->fw_stats_req_mapping),
2315 	   U64_LO(bp->fw_stats_req_mapping));
2316 	DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2317 	   U64_HI(bp->fw_stats_data_mapping),
2318 	   U64_LO(bp->fw_stats_data_mapping));
2319 	return 0;
2320 
2321 alloc_mem_err:
2322 	bnx2x_free_fw_stats_mem(bp);
2323 	BNX2X_ERR("Can't allocate FW stats memory\n");
2324 	return -ENOMEM;
2325 }
2326 
2327 /* send load request to mcp and analyze response */
2328 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2329 {
2330 	u32 param;
2331 
2332 	/* init fw_seq */
2333 	bp->fw_seq =
2334 		(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2335 		 DRV_MSG_SEQ_NUMBER_MASK);
2336 	BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2337 
2338 	/* Get current FW pulse sequence */
2339 	bp->fw_drv_pulse_wr_seq =
2340 		(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2341 		 DRV_PULSE_SEQ_MASK);
2342 	BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2343 
2344 	param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2345 
2346 	if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2347 		param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2348 
2349 	/* load request */
2350 	(*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2351 
2352 	/* if mcp fails to respond we must abort */
2353 	if (!(*load_code)) {
2354 		BNX2X_ERR("MCP response failure, aborting\n");
2355 		return -EBUSY;
2356 	}
2357 
2358 	/* If mcp refused (e.g. other port is in diagnostic mode) we
2359 	 * must abort
2360 	 */
2361 	if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2362 		BNX2X_ERR("MCP refused load request, aborting\n");
2363 		return -EBUSY;
2364 	}
2365 	return 0;
2366 }
2367 
2368 /* check whether another PF has already loaded FW to chip. In
2369  * virtualized environments a pf from another VM may have already
2370  * initialized the device including loading FW
2371  */
2372 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2373 {
2374 	/* is another pf loaded on this engine? */
2375 	if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2376 	    load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2377 		u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng;
2378 		u32 loaded_fw;
2379 
2380 		/* read loaded FW from chip */
2381 		loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2382 
2383 		loaded_fw_major = loaded_fw & 0xff;
2384 		loaded_fw_minor = (loaded_fw >> 8) & 0xff;
2385 		loaded_fw_rev = (loaded_fw >> 16) & 0xff;
2386 		loaded_fw_eng = (loaded_fw >> 24) & 0xff;
2387 
2388 		DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n",
2389 		   loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng);
2390 
2391 		/* abort nic load if version mismatch */
2392 		if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION ||
2393 		    loaded_fw_minor != BCM_5710_FW_MINOR_VERSION ||
2394 		    loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION ||
2395 		    loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) {
2396 			if (print_err)
2397 				BNX2X_ERR("loaded FW incompatible. Aborting\n");
2398 			else
2399 				BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n");
2400 
2401 			return -EBUSY;
2402 		}
2403 	}
2404 	return 0;
2405 }
2406 
2407 /* returns the "mcp load_code" according to global load_count array */
2408 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2409 {
2410 	int path = BP_PATH(bp);
2411 
2412 	DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2413 	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2414 	   bnx2x_load_count[path][2]);
2415 	bnx2x_load_count[path][0]++;
2416 	bnx2x_load_count[path][1 + port]++;
2417 	DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2418 	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2419 	   bnx2x_load_count[path][2]);
2420 	if (bnx2x_load_count[path][0] == 1)
2421 		return FW_MSG_CODE_DRV_LOAD_COMMON;
2422 	else if (bnx2x_load_count[path][1 + port] == 1)
2423 		return FW_MSG_CODE_DRV_LOAD_PORT;
2424 	else
2425 		return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2426 }
2427 
2428 /* mark PMF if applicable */
2429 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2430 {
2431 	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2432 	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2433 	    (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2434 		bp->port.pmf = 1;
2435 		/* We need the barrier to ensure the ordering between the
2436 		 * writing to bp->port.pmf here and reading it from the
2437 		 * bnx2x_periodic_task().
2438 		 */
2439 		smp_mb();
2440 	} else {
2441 		bp->port.pmf = 0;
2442 	}
2443 
2444 	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2445 }
2446 
2447 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2448 {
2449 	if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2450 	     (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2451 	    (bp->common.shmem2_base)) {
2452 		if (SHMEM2_HAS(bp, dcc_support))
2453 			SHMEM2_WR(bp, dcc_support,
2454 				  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2455 				   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2456 		if (SHMEM2_HAS(bp, afex_driver_support))
2457 			SHMEM2_WR(bp, afex_driver_support,
2458 				  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2459 	}
2460 
2461 	/* Set AFEX default VLAN tag to an invalid value */
2462 	bp->afex_def_vlan_tag = -1;
2463 }
2464 
2465 /**
2466  * bnx2x_bz_fp - zero content of the fastpath structure.
2467  *
2468  * @bp:		driver handle
2469  * @index:	fastpath index to be zeroed
2470  *
2471  * Makes sure the contents of the bp->fp[index].napi is kept
2472  * intact.
2473  */
2474 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2475 {
2476 	struct bnx2x_fastpath *fp = &bp->fp[index];
2477 	int cos;
2478 	struct napi_struct orig_napi = fp->napi;
2479 	struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2480 
2481 	/* bzero bnx2x_fastpath contents */
2482 	if (fp->tpa_info)
2483 		memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2484 		       sizeof(struct bnx2x_agg_info));
2485 	memset(fp, 0, sizeof(*fp));
2486 
2487 	/* Restore the NAPI object as it has been already initialized */
2488 	fp->napi = orig_napi;
2489 	fp->tpa_info = orig_tpa_info;
2490 	fp->bp = bp;
2491 	fp->index = index;
2492 	if (IS_ETH_FP(fp))
2493 		fp->max_cos = bp->max_cos;
2494 	else
2495 		/* Special queues support only one CoS */
2496 		fp->max_cos = 1;
2497 
2498 	/* Init txdata pointers */
2499 	if (IS_FCOE_FP(fp))
2500 		fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2501 	if (IS_ETH_FP(fp))
2502 		for_each_cos_in_tx_queue(fp, cos)
2503 			fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2504 				BNX2X_NUM_ETH_QUEUES(bp) + index];
2505 
2506 	/* set the tpa flag for each queue. The tpa flag determines the queue
2507 	 * minimal size so it must be set prior to queue memory allocation
2508 	 */
2509 	if (bp->dev->features & NETIF_F_LRO)
2510 		fp->mode = TPA_MODE_LRO;
2511 	else if (bp->dev->features & NETIF_F_GRO_HW)
2512 		fp->mode = TPA_MODE_GRO;
2513 	else
2514 		fp->mode = TPA_MODE_DISABLED;
2515 
2516 	/* We don't want TPA if it's disabled in bp
2517 	 * or if this is an FCoE L2 ring.
2518 	 */
2519 	if (bp->disable_tpa || IS_FCOE_FP(fp))
2520 		fp->mode = TPA_MODE_DISABLED;
2521 }
2522 
2523 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2524 {
2525 	u32 cur;
2526 
2527 	if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2528 		return;
2529 
2530 	cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2531 	DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2532 	   cur, state);
2533 
2534 	SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2535 }
2536 
2537 int bnx2x_load_cnic(struct bnx2x *bp)
2538 {
2539 	int i, rc, port = BP_PORT(bp);
2540 
2541 	DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2542 
2543 	mutex_init(&bp->cnic_mutex);
2544 
2545 	if (IS_PF(bp)) {
2546 		rc = bnx2x_alloc_mem_cnic(bp);
2547 		if (rc) {
2548 			BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2549 			LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2550 		}
2551 	}
2552 
2553 	rc = bnx2x_alloc_fp_mem_cnic(bp);
2554 	if (rc) {
2555 		BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2556 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2557 	}
2558 
2559 	/* Update the number of queues with the cnic queues */
2560 	rc = bnx2x_set_real_num_queues(bp, 1);
2561 	if (rc) {
2562 		BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2563 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2564 	}
2565 
2566 	/* Add all CNIC NAPI objects */
2567 	bnx2x_add_all_napi_cnic(bp);
2568 	DP(NETIF_MSG_IFUP, "cnic napi added\n");
2569 	bnx2x_napi_enable_cnic(bp);
2570 
2571 	rc = bnx2x_init_hw_func_cnic(bp);
2572 	if (rc)
2573 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2574 
2575 	bnx2x_nic_init_cnic(bp);
2576 
2577 	if (IS_PF(bp)) {
2578 		/* Enable Timer scan */
2579 		REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2580 
2581 		/* setup cnic queues */
2582 		for_each_cnic_queue(bp, i) {
2583 			rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2584 			if (rc) {
2585 				BNX2X_ERR("Queue setup failed\n");
2586 				LOAD_ERROR_EXIT(bp, load_error_cnic2);
2587 			}
2588 		}
2589 	}
2590 
2591 	/* Initialize Rx filter. */
2592 	bnx2x_set_rx_mode_inner(bp);
2593 
2594 	/* re-read iscsi info */
2595 	bnx2x_get_iscsi_info(bp);
2596 	bnx2x_setup_cnic_irq_info(bp);
2597 	bnx2x_setup_cnic_info(bp);
2598 	bp->cnic_loaded = true;
2599 	if (bp->state == BNX2X_STATE_OPEN)
2600 		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2601 
2602 	DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2603 
2604 	return 0;
2605 
2606 #ifndef BNX2X_STOP_ON_ERROR
2607 load_error_cnic2:
2608 	/* Disable Timer scan */
2609 	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2610 
2611 load_error_cnic1:
2612 	bnx2x_napi_disable_cnic(bp);
2613 	/* Update the number of queues without the cnic queues */
2614 	if (bnx2x_set_real_num_queues(bp, 0))
2615 		BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2616 load_error_cnic0:
2617 	BNX2X_ERR("CNIC-related load failed\n");
2618 	bnx2x_free_fp_mem_cnic(bp);
2619 	bnx2x_free_mem_cnic(bp);
2620 	return rc;
2621 #endif /* ! BNX2X_STOP_ON_ERROR */
2622 }
2623 
2624 /* must be called with rtnl_lock */
2625 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2626 {
2627 	int port = BP_PORT(bp);
2628 	int i, rc = 0, load_code = 0;
2629 
2630 	DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2631 	DP(NETIF_MSG_IFUP,
2632 	   "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2633 
2634 #ifdef BNX2X_STOP_ON_ERROR
2635 	if (unlikely(bp->panic)) {
2636 		BNX2X_ERR("Can't load NIC when there is panic\n");
2637 		return -EPERM;
2638 	}
2639 #endif
2640 
2641 	bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2642 
2643 	/* zero the structure w/o any lock, before SP handler is initialized */
2644 	memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2645 	__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2646 		&bp->last_reported_link.link_report_flags);
2647 
2648 	if (IS_PF(bp))
2649 		/* must be called before memory allocation and HW init */
2650 		bnx2x_ilt_set_info(bp);
2651 
2652 	/*
2653 	 * Zero fastpath structures preserving invariants like napi, which are
2654 	 * allocated only once, fp index, max_cos, bp pointer.
2655 	 * Also set fp->mode and txdata_ptr.
2656 	 */
2657 	DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2658 	for_each_queue(bp, i)
2659 		bnx2x_bz_fp(bp, i);
2660 	memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2661 				  bp->num_cnic_queues) *
2662 				  sizeof(struct bnx2x_fp_txdata));
2663 
2664 	bp->fcoe_init = false;
2665 
2666 	/* Set the receive queues buffer size */
2667 	bnx2x_set_rx_buf_size(bp);
2668 
2669 	if (IS_PF(bp)) {
2670 		rc = bnx2x_alloc_mem(bp);
2671 		if (rc) {
2672 			BNX2X_ERR("Unable to allocate bp memory\n");
2673 			return rc;
2674 		}
2675 	}
2676 
2677 	/* need to be done after alloc mem, since it's self adjusting to amount
2678 	 * of memory available for RSS queues
2679 	 */
2680 	rc = bnx2x_alloc_fp_mem(bp);
2681 	if (rc) {
2682 		BNX2X_ERR("Unable to allocate memory for fps\n");
2683 		LOAD_ERROR_EXIT(bp, load_error0);
2684 	}
2685 
2686 	/* Allocated memory for FW statistics  */
2687 	rc = bnx2x_alloc_fw_stats_mem(bp);
2688 	if (rc)
2689 		LOAD_ERROR_EXIT(bp, load_error0);
2690 
2691 	/* request pf to initialize status blocks */
2692 	if (IS_VF(bp)) {
2693 		rc = bnx2x_vfpf_init(bp);
2694 		if (rc)
2695 			LOAD_ERROR_EXIT(bp, load_error0);
2696 	}
2697 
2698 	/* As long as bnx2x_alloc_mem() may possibly update
2699 	 * bp->num_queues, bnx2x_set_real_num_queues() should always
2700 	 * come after it. At this stage cnic queues are not counted.
2701 	 */
2702 	rc = bnx2x_set_real_num_queues(bp, 0);
2703 	if (rc) {
2704 		BNX2X_ERR("Unable to set real_num_queues\n");
2705 		LOAD_ERROR_EXIT(bp, load_error0);
2706 	}
2707 
2708 	/* configure multi cos mappings in kernel.
2709 	 * this configuration may be overridden by a multi class queue
2710 	 * discipline or by a dcbx negotiation result.
2711 	 */
2712 	bnx2x_setup_tc(bp->dev, bp->max_cos);
2713 
2714 	/* Add all NAPI objects */
2715 	bnx2x_add_all_napi(bp);
2716 	DP(NETIF_MSG_IFUP, "napi added\n");
2717 	bnx2x_napi_enable(bp);
2718 
2719 	if (IS_PF(bp)) {
2720 		/* set pf load just before approaching the MCP */
2721 		bnx2x_set_pf_load(bp);
2722 
2723 		/* if mcp exists send load request and analyze response */
2724 		if (!BP_NOMCP(bp)) {
2725 			/* attempt to load pf */
2726 			rc = bnx2x_nic_load_request(bp, &load_code);
2727 			if (rc)
2728 				LOAD_ERROR_EXIT(bp, load_error1);
2729 
2730 			/* what did mcp say? */
2731 			rc = bnx2x_compare_fw_ver(bp, load_code, true);
2732 			if (rc) {
2733 				bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2734 				LOAD_ERROR_EXIT(bp, load_error2);
2735 			}
2736 		} else {
2737 			load_code = bnx2x_nic_load_no_mcp(bp, port);
2738 		}
2739 
2740 		/* mark pmf if applicable */
2741 		bnx2x_nic_load_pmf(bp, load_code);
2742 
2743 		/* Init Function state controlling object */
2744 		bnx2x__init_func_obj(bp);
2745 
2746 		/* Initialize HW */
2747 		rc = bnx2x_init_hw(bp, load_code);
2748 		if (rc) {
2749 			BNX2X_ERR("HW init failed, aborting\n");
2750 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2751 			LOAD_ERROR_EXIT(bp, load_error2);
2752 		}
2753 	}
2754 
2755 	bnx2x_pre_irq_nic_init(bp);
2756 
2757 	/* Connect to IRQs */
2758 	rc = bnx2x_setup_irqs(bp);
2759 	if (rc) {
2760 		BNX2X_ERR("setup irqs failed\n");
2761 		if (IS_PF(bp))
2762 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2763 		LOAD_ERROR_EXIT(bp, load_error2);
2764 	}
2765 
2766 	/* Init per-function objects */
2767 	if (IS_PF(bp)) {
2768 		/* Setup NIC internals and enable interrupts */
2769 		bnx2x_post_irq_nic_init(bp, load_code);
2770 
2771 		bnx2x_init_bp_objs(bp);
2772 		bnx2x_iov_nic_init(bp);
2773 
2774 		/* Set AFEX default VLAN tag to an invalid value */
2775 		bp->afex_def_vlan_tag = -1;
2776 		bnx2x_nic_load_afex_dcc(bp, load_code);
2777 		bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2778 		rc = bnx2x_func_start(bp);
2779 		if (rc) {
2780 			BNX2X_ERR("Function start failed!\n");
2781 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2782 
2783 			LOAD_ERROR_EXIT(bp, load_error3);
2784 		}
2785 
2786 		/* Send LOAD_DONE command to MCP */
2787 		if (!BP_NOMCP(bp)) {
2788 			load_code = bnx2x_fw_command(bp,
2789 						     DRV_MSG_CODE_LOAD_DONE, 0);
2790 			if (!load_code) {
2791 				BNX2X_ERR("MCP response failure, aborting\n");
2792 				rc = -EBUSY;
2793 				LOAD_ERROR_EXIT(bp, load_error3);
2794 			}
2795 		}
2796 
2797 		/* initialize FW coalescing state machines in RAM */
2798 		bnx2x_update_coalesce(bp);
2799 	}
2800 
2801 	/* setup the leading queue */
2802 	rc = bnx2x_setup_leading(bp);
2803 	if (rc) {
2804 		BNX2X_ERR("Setup leading failed!\n");
2805 		LOAD_ERROR_EXIT(bp, load_error3);
2806 	}
2807 
2808 	/* set up the rest of the queues */
2809 	for_each_nondefault_eth_queue(bp, i) {
2810 		if (IS_PF(bp))
2811 			rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2812 		else /* VF */
2813 			rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2814 		if (rc) {
2815 			BNX2X_ERR("Queue %d setup failed\n", i);
2816 			LOAD_ERROR_EXIT(bp, load_error3);
2817 		}
2818 	}
2819 
2820 	/* setup rss */
2821 	rc = bnx2x_init_rss(bp);
2822 	if (rc) {
2823 		BNX2X_ERR("PF RSS init failed\n");
2824 		LOAD_ERROR_EXIT(bp, load_error3);
2825 	}
2826 
2827 	/* Now when Clients are configured we are ready to work */
2828 	bp->state = BNX2X_STATE_OPEN;
2829 
2830 	/* Configure a ucast MAC */
2831 	if (IS_PF(bp))
2832 		rc = bnx2x_set_eth_mac(bp, true);
2833 	else /* vf */
2834 		rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2835 					   true);
2836 	if (rc) {
2837 		BNX2X_ERR("Setting Ethernet MAC failed\n");
2838 		LOAD_ERROR_EXIT(bp, load_error3);
2839 	}
2840 
2841 	if (IS_PF(bp) && bp->pending_max) {
2842 		bnx2x_update_max_mf_config(bp, bp->pending_max);
2843 		bp->pending_max = 0;
2844 	}
2845 
2846 	bp->force_link_down = false;
2847 	if (bp->port.pmf) {
2848 		rc = bnx2x_initial_phy_init(bp, load_mode);
2849 		if (rc)
2850 			LOAD_ERROR_EXIT(bp, load_error3);
2851 	}
2852 	bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2853 
2854 	/* Start fast path */
2855 
2856 	/* Re-configure vlan filters */
2857 	rc = bnx2x_vlan_reconfigure_vid(bp);
2858 	if (rc)
2859 		LOAD_ERROR_EXIT(bp, load_error3);
2860 
2861 	/* Initialize Rx filter. */
2862 	bnx2x_set_rx_mode_inner(bp);
2863 
2864 	if (bp->flags & PTP_SUPPORTED) {
2865 		bnx2x_register_phc(bp);
2866 		bnx2x_init_ptp(bp);
2867 		bnx2x_configure_ptp_filters(bp);
2868 	}
2869 	/* Start Tx */
2870 	switch (load_mode) {
2871 	case LOAD_NORMAL:
2872 		/* Tx queue should be only re-enabled */
2873 		netif_tx_wake_all_queues(bp->dev);
2874 		break;
2875 
2876 	case LOAD_OPEN:
2877 		netif_tx_start_all_queues(bp->dev);
2878 		smp_mb__after_atomic();
2879 		break;
2880 
2881 	case LOAD_DIAG:
2882 	case LOAD_LOOPBACK_EXT:
2883 		bp->state = BNX2X_STATE_DIAG;
2884 		break;
2885 
2886 	default:
2887 		break;
2888 	}
2889 
2890 	if (bp->port.pmf)
2891 		bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2892 	else
2893 		bnx2x__link_status_update(bp);
2894 
2895 	/* start the timer */
2896 	mod_timer(&bp->timer, jiffies + bp->current_interval);
2897 
2898 	if (CNIC_ENABLED(bp))
2899 		bnx2x_load_cnic(bp);
2900 
2901 	if (IS_PF(bp))
2902 		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2903 
2904 	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2905 		/* mark driver is loaded in shmem2 */
2906 		u32 val;
2907 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2908 		val &= ~DRV_FLAGS_MTU_MASK;
2909 		val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2910 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2911 			  val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2912 			  DRV_FLAGS_CAPABILITIES_LOADED_L2);
2913 	}
2914 
2915 	/* Wait for all pending SP commands to complete */
2916 	if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2917 		BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2918 		bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2919 		return -EBUSY;
2920 	}
2921 
2922 	/* Update driver data for On-Chip MFW dump. */
2923 	if (IS_PF(bp))
2924 		bnx2x_update_mfw_dump(bp);
2925 
2926 	/* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2927 	if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2928 		bnx2x_dcbx_init(bp, false);
2929 
2930 	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2931 		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2932 
2933 	DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2934 
2935 	return 0;
2936 
2937 #ifndef BNX2X_STOP_ON_ERROR
2938 load_error3:
2939 	if (IS_PF(bp)) {
2940 		bnx2x_int_disable_sync(bp, 1);
2941 
2942 		/* Clean queueable objects */
2943 		bnx2x_squeeze_objects(bp);
2944 	}
2945 
2946 	/* Free SKBs, SGEs, TPA pool and driver internals */
2947 	bnx2x_free_skbs(bp);
2948 	for_each_rx_queue(bp, i)
2949 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2950 
2951 	/* Release IRQs */
2952 	bnx2x_free_irq(bp);
2953 load_error2:
2954 	if (IS_PF(bp) && !BP_NOMCP(bp)) {
2955 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2956 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2957 	}
2958 
2959 	bp->port.pmf = 0;
2960 load_error1:
2961 	bnx2x_napi_disable(bp);
2962 	bnx2x_del_all_napi(bp);
2963 
2964 	/* clear pf_load status, as it was already set */
2965 	if (IS_PF(bp))
2966 		bnx2x_clear_pf_load(bp);
2967 load_error0:
2968 	bnx2x_free_fw_stats_mem(bp);
2969 	bnx2x_free_fp_mem(bp);
2970 	bnx2x_free_mem(bp);
2971 
2972 	return rc;
2973 #endif /* ! BNX2X_STOP_ON_ERROR */
2974 }
2975 
2976 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2977 {
2978 	u8 rc = 0, cos, i;
2979 
2980 	/* Wait until tx fastpath tasks complete */
2981 	for_each_tx_queue(bp, i) {
2982 		struct bnx2x_fastpath *fp = &bp->fp[i];
2983 
2984 		for_each_cos_in_tx_queue(fp, cos)
2985 			rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2986 		if (rc)
2987 			return rc;
2988 	}
2989 	return 0;
2990 }
2991 
2992 /* must be called with rtnl_lock */
2993 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2994 {
2995 	int i;
2996 	bool global = false;
2997 
2998 	DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2999 
3000 	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
3001 		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
3002 
3003 	/* mark driver is unloaded in shmem2 */
3004 	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
3005 		u32 val;
3006 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
3007 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
3008 			  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
3009 	}
3010 
3011 	if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
3012 	    (bp->state == BNX2X_STATE_CLOSED ||
3013 	     bp->state == BNX2X_STATE_ERROR)) {
3014 		/* We can get here if the driver has been unloaded
3015 		 * during parity error recovery and is either waiting for a
3016 		 * leader to complete or for other functions to unload and
3017 		 * then ifdown has been issued. In this case we want to
3018 		 * unload and let other functions to complete a recovery
3019 		 * process.
3020 		 */
3021 		bp->recovery_state = BNX2X_RECOVERY_DONE;
3022 		bp->is_leader = 0;
3023 		bnx2x_release_leader_lock(bp);
3024 		smp_mb();
3025 
3026 		DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3027 		BNX2X_ERR("Can't unload in closed or error state\n");
3028 		return -EINVAL;
3029 	}
3030 
3031 	/* Nothing to do during unload if previous bnx2x_nic_load()
3032 	 * have not completed successfully - all resources are released.
3033 	 *
3034 	 * we can get here only after unsuccessful ndo_* callback, during which
3035 	 * dev->IFF_UP flag is still on.
3036 	 */
3037 	if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3038 		return 0;
3039 
3040 	/* It's important to set the bp->state to the value different from
3041 	 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3042 	 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3043 	 */
3044 	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3045 	smp_mb();
3046 
3047 	/* indicate to VFs that the PF is going down */
3048 	bnx2x_iov_channel_down(bp);
3049 
3050 	if (CNIC_LOADED(bp))
3051 		bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3052 
3053 	/* Stop Tx */
3054 	bnx2x_tx_disable(bp);
3055 	netdev_reset_tc(bp->dev);
3056 
3057 	bp->rx_mode = BNX2X_RX_MODE_NONE;
3058 
3059 	del_timer_sync(&bp->timer);
3060 
3061 	if (IS_PF(bp) && !BP_NOMCP(bp)) {
3062 		/* Set ALWAYS_ALIVE bit in shmem */
3063 		bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3064 		bnx2x_drv_pulse(bp);
3065 		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3066 		bnx2x_save_statistics(bp);
3067 	}
3068 
3069 	/* wait till consumers catch up with producers in all queues.
3070 	 * If we're recovering, FW can't write to host so no reason
3071 	 * to wait for the queues to complete all Tx.
3072 	 */
3073 	if (unload_mode != UNLOAD_RECOVERY)
3074 		bnx2x_drain_tx_queues(bp);
3075 
3076 	/* if VF indicate to PF this function is going down (PF will delete sp
3077 	 * elements and clear initializations
3078 	 */
3079 	if (IS_VF(bp)) {
3080 		bnx2x_clear_vlan_info(bp);
3081 		bnx2x_vfpf_close_vf(bp);
3082 	} else if (unload_mode != UNLOAD_RECOVERY) {
3083 		/* if this is a normal/close unload need to clean up chip*/
3084 		bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3085 	} else {
3086 		/* Send the UNLOAD_REQUEST to the MCP */
3087 		bnx2x_send_unload_req(bp, unload_mode);
3088 
3089 		/* Prevent transactions to host from the functions on the
3090 		 * engine that doesn't reset global blocks in case of global
3091 		 * attention once global blocks are reset and gates are opened
3092 		 * (the engine which leader will perform the recovery
3093 		 * last).
3094 		 */
3095 		if (!CHIP_IS_E1x(bp))
3096 			bnx2x_pf_disable(bp);
3097 
3098 		/* Disable HW interrupts, NAPI */
3099 		bnx2x_netif_stop(bp, 1);
3100 		/* Delete all NAPI objects */
3101 		bnx2x_del_all_napi(bp);
3102 		if (CNIC_LOADED(bp))
3103 			bnx2x_del_all_napi_cnic(bp);
3104 		/* Release IRQs */
3105 		bnx2x_free_irq(bp);
3106 
3107 		/* Report UNLOAD_DONE to MCP */
3108 		bnx2x_send_unload_done(bp, false);
3109 	}
3110 
3111 	/*
3112 	 * At this stage no more interrupts will arrive so we may safely clean
3113 	 * the queueable objects here in case they failed to get cleaned so far.
3114 	 */
3115 	if (IS_PF(bp))
3116 		bnx2x_squeeze_objects(bp);
3117 
3118 	/* There should be no more pending SP commands at this stage */
3119 	bp->sp_state = 0;
3120 
3121 	bp->port.pmf = 0;
3122 
3123 	/* clear pending work in rtnl task */
3124 	bp->sp_rtnl_state = 0;
3125 	smp_mb();
3126 
3127 	/* Free SKBs, SGEs, TPA pool and driver internals */
3128 	bnx2x_free_skbs(bp);
3129 	if (CNIC_LOADED(bp))
3130 		bnx2x_free_skbs_cnic(bp);
3131 	for_each_rx_queue(bp, i)
3132 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3133 
3134 	bnx2x_free_fp_mem(bp);
3135 	if (CNIC_LOADED(bp))
3136 		bnx2x_free_fp_mem_cnic(bp);
3137 
3138 	if (IS_PF(bp)) {
3139 		if (CNIC_LOADED(bp))
3140 			bnx2x_free_mem_cnic(bp);
3141 	}
3142 	bnx2x_free_mem(bp);
3143 
3144 	bp->state = BNX2X_STATE_CLOSED;
3145 	bp->cnic_loaded = false;
3146 
3147 	/* Clear driver version indication in shmem */
3148 	if (IS_PF(bp) && !BP_NOMCP(bp))
3149 		bnx2x_update_mng_version(bp);
3150 
3151 	/* Check if there are pending parity attentions. If there are - set
3152 	 * RECOVERY_IN_PROGRESS.
3153 	 */
3154 	if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3155 		bnx2x_set_reset_in_progress(bp);
3156 
3157 		/* Set RESET_IS_GLOBAL if needed */
3158 		if (global)
3159 			bnx2x_set_reset_global(bp);
3160 	}
3161 
3162 	/* The last driver must disable a "close the gate" if there is no
3163 	 * parity attention or "process kill" pending.
3164 	 */
3165 	if (IS_PF(bp) &&
3166 	    !bnx2x_clear_pf_load(bp) &&
3167 	    bnx2x_reset_is_done(bp, BP_PATH(bp)))
3168 		bnx2x_disable_close_the_gate(bp);
3169 
3170 	DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3171 
3172 	return 0;
3173 }
3174 
3175 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3176 {
3177 	u16 pmcsr;
3178 
3179 	/* If there is no power capability, silently succeed */
3180 	if (!bp->pdev->pm_cap) {
3181 		BNX2X_DEV_INFO("No power capability. Breaking.\n");
3182 		return 0;
3183 	}
3184 
3185 	pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3186 
3187 	switch (state) {
3188 	case PCI_D0:
3189 		pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3190 				      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3191 				       PCI_PM_CTRL_PME_STATUS));
3192 
3193 		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3194 			/* delay required during transition out of D3hot */
3195 			msleep(20);
3196 		break;
3197 
3198 	case PCI_D3hot:
3199 		/* If there are other clients above don't
3200 		   shut down the power */
3201 		if (atomic_read(&bp->pdev->enable_cnt) != 1)
3202 			return 0;
3203 		/* Don't shut down the power for emulation and FPGA */
3204 		if (CHIP_REV_IS_SLOW(bp))
3205 			return 0;
3206 
3207 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3208 		pmcsr |= 3;
3209 
3210 		if (bp->wol)
3211 			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3212 
3213 		pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3214 				      pmcsr);
3215 
3216 		/* No more memory access after this point until
3217 		* device is brought back to D0.
3218 		*/
3219 		break;
3220 
3221 	default:
3222 		dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3223 		return -EINVAL;
3224 	}
3225 	return 0;
3226 }
3227 
3228 /*
3229  * net_device service functions
3230  */
3231 static int bnx2x_poll(struct napi_struct *napi, int budget)
3232 {
3233 	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3234 						 napi);
3235 	struct bnx2x *bp = fp->bp;
3236 	int rx_work_done;
3237 	u8 cos;
3238 
3239 #ifdef BNX2X_STOP_ON_ERROR
3240 	if (unlikely(bp->panic)) {
3241 		napi_complete(napi);
3242 		return 0;
3243 	}
3244 #endif
3245 	for_each_cos_in_tx_queue(fp, cos)
3246 		if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3247 			bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3248 
3249 	rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3250 
3251 	if (rx_work_done < budget) {
3252 		/* No need to update SB for FCoE L2 ring as long as
3253 		 * it's connected to the default SB and the SB
3254 		 * has been updated when NAPI was scheduled.
3255 		 */
3256 		if (IS_FCOE_FP(fp)) {
3257 			napi_complete_done(napi, rx_work_done);
3258 		} else {
3259 			bnx2x_update_fpsb_idx(fp);
3260 			/* bnx2x_has_rx_work() reads the status block,
3261 			 * thus we need to ensure that status block indices
3262 			 * have been actually read (bnx2x_update_fpsb_idx)
3263 			 * prior to this check (bnx2x_has_rx_work) so that
3264 			 * we won't write the "newer" value of the status block
3265 			 * to IGU (if there was a DMA right after
3266 			 * bnx2x_has_rx_work and if there is no rmb, the memory
3267 			 * reading (bnx2x_update_fpsb_idx) may be postponed
3268 			 * to right before bnx2x_ack_sb). In this case there
3269 			 * will never be another interrupt until there is
3270 			 * another update of the status block, while there
3271 			 * is still unhandled work.
3272 			 */
3273 			rmb();
3274 
3275 			if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3276 				if (napi_complete_done(napi, rx_work_done)) {
3277 					/* Re-enable interrupts */
3278 					DP(NETIF_MSG_RX_STATUS,
3279 					   "Update index to %d\n", fp->fp_hc_idx);
3280 					bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3281 						     le16_to_cpu(fp->fp_hc_idx),
3282 						     IGU_INT_ENABLE, 1);
3283 				}
3284 			} else {
3285 				rx_work_done = budget;
3286 			}
3287 		}
3288 	}
3289 
3290 	return rx_work_done;
3291 }
3292 
3293 /* we split the first BD into headers and data BDs
3294  * to ease the pain of our fellow microcode engineers
3295  * we use one mapping for both BDs
3296  */
3297 static u16 bnx2x_tx_split(struct bnx2x *bp,
3298 			  struct bnx2x_fp_txdata *txdata,
3299 			  struct sw_tx_bd *tx_buf,
3300 			  struct eth_tx_start_bd **tx_bd, u16 hlen,
3301 			  u16 bd_prod)
3302 {
3303 	struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3304 	struct eth_tx_bd *d_tx_bd;
3305 	dma_addr_t mapping;
3306 	int old_len = le16_to_cpu(h_tx_bd->nbytes);
3307 
3308 	/* first fix first BD */
3309 	h_tx_bd->nbytes = cpu_to_le16(hlen);
3310 
3311 	DP(NETIF_MSG_TX_QUEUED,	"TSO split header size is %d (%x:%x)\n",
3312 	   h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3313 
3314 	/* now get a new data BD
3315 	 * (after the pbd) and fill it */
3316 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3317 	d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3318 
3319 	mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3320 			   le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3321 
3322 	d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3323 	d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3324 	d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3325 
3326 	/* this marks the BD as one that has no individual mapping */
3327 	tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3328 
3329 	DP(NETIF_MSG_TX_QUEUED,
3330 	   "TSO split data size is %d (%x:%x)\n",
3331 	   d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3332 
3333 	/* update tx_bd */
3334 	*tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3335 
3336 	return bd_prod;
3337 }
3338 
3339 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3340 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3341 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3342 {
3343 	__sum16 tsum = (__force __sum16) csum;
3344 
3345 	if (fix > 0)
3346 		tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3347 				  csum_partial(t_header - fix, fix, 0)));
3348 
3349 	else if (fix < 0)
3350 		tsum = ~csum_fold(csum_add((__force __wsum) csum,
3351 				  csum_partial(t_header, -fix, 0)));
3352 
3353 	return bswab16(tsum);
3354 }
3355 
3356 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3357 {
3358 	u32 rc;
3359 	__u8 prot = 0;
3360 	__be16 protocol;
3361 
3362 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3363 		return XMIT_PLAIN;
3364 
3365 	protocol = vlan_get_protocol(skb);
3366 	if (protocol == htons(ETH_P_IPV6)) {
3367 		rc = XMIT_CSUM_V6;
3368 		prot = ipv6_hdr(skb)->nexthdr;
3369 	} else {
3370 		rc = XMIT_CSUM_V4;
3371 		prot = ip_hdr(skb)->protocol;
3372 	}
3373 
3374 	if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3375 		if (inner_ip_hdr(skb)->version == 6) {
3376 			rc |= XMIT_CSUM_ENC_V6;
3377 			if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3378 				rc |= XMIT_CSUM_TCP;
3379 		} else {
3380 			rc |= XMIT_CSUM_ENC_V4;
3381 			if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3382 				rc |= XMIT_CSUM_TCP;
3383 		}
3384 	}
3385 	if (prot == IPPROTO_TCP)
3386 		rc |= XMIT_CSUM_TCP;
3387 
3388 	if (skb_is_gso(skb)) {
3389 		if (skb_is_gso_v6(skb)) {
3390 			rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3391 			if (rc & XMIT_CSUM_ENC)
3392 				rc |= XMIT_GSO_ENC_V6;
3393 		} else {
3394 			rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3395 			if (rc & XMIT_CSUM_ENC)
3396 				rc |= XMIT_GSO_ENC_V4;
3397 		}
3398 	}
3399 
3400 	return rc;
3401 }
3402 
3403 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3404 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS         4
3405 
3406 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3407 #define BNX2X_NUM_TSO_WIN_SUB_BDS               3
3408 
3409 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3410 /* check if packet requires linearization (packet is too fragmented)
3411    no need to check fragmentation if page size > 8K (there will be no
3412    violation to FW restrictions) */
3413 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3414 			     u32 xmit_type)
3415 {
3416 	int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3417 	int to_copy = 0, hlen = 0;
3418 
3419 	if (xmit_type & XMIT_GSO_ENC)
3420 		num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3421 
3422 	if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3423 		if (xmit_type & XMIT_GSO) {
3424 			unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3425 			int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3426 			/* Number of windows to check */
3427 			int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3428 			int wnd_idx = 0;
3429 			int frag_idx = 0;
3430 			u32 wnd_sum = 0;
3431 
3432 			/* Headers length */
3433 			if (xmit_type & XMIT_GSO_ENC)
3434 				hlen = skb_inner_tcp_all_headers(skb);
3435 			else
3436 				hlen = skb_tcp_all_headers(skb);
3437 
3438 			/* Amount of data (w/o headers) on linear part of SKB*/
3439 			first_bd_sz = skb_headlen(skb) - hlen;
3440 
3441 			wnd_sum  = first_bd_sz;
3442 
3443 			/* Calculate the first sum - it's special */
3444 			for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3445 				wnd_sum +=
3446 					skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3447 
3448 			/* If there was data on linear skb data - check it */
3449 			if (first_bd_sz > 0) {
3450 				if (unlikely(wnd_sum < lso_mss)) {
3451 					to_copy = 1;
3452 					goto exit_lbl;
3453 				}
3454 
3455 				wnd_sum -= first_bd_sz;
3456 			}
3457 
3458 			/* Others are easier: run through the frag list and
3459 			   check all windows */
3460 			for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3461 				wnd_sum +=
3462 			  skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3463 
3464 				if (unlikely(wnd_sum < lso_mss)) {
3465 					to_copy = 1;
3466 					break;
3467 				}
3468 				wnd_sum -=
3469 					skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3470 			}
3471 		} else {
3472 			/* in non-LSO too fragmented packet should always
3473 			   be linearized */
3474 			to_copy = 1;
3475 		}
3476 	}
3477 
3478 exit_lbl:
3479 	if (unlikely(to_copy))
3480 		DP(NETIF_MSG_TX_QUEUED,
3481 		   "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3482 		   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3483 		   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3484 
3485 	return to_copy;
3486 }
3487 #endif
3488 
3489 /**
3490  * bnx2x_set_pbd_gso - update PBD in GSO case.
3491  *
3492  * @skb:	packet skb
3493  * @pbd:	parse BD
3494  * @xmit_type:	xmit flags
3495  */
3496 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3497 			      struct eth_tx_parse_bd_e1x *pbd,
3498 			      u32 xmit_type)
3499 {
3500 	pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3501 	pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3502 	pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3503 
3504 	if (xmit_type & XMIT_GSO_V4) {
3505 		pbd->ip_id = bswab16(ip_hdr(skb)->id);
3506 		pbd->tcp_pseudo_csum =
3507 			bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3508 						   ip_hdr(skb)->daddr,
3509 						   0, IPPROTO_TCP, 0));
3510 	} else {
3511 		pbd->tcp_pseudo_csum =
3512 			bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3513 						 &ipv6_hdr(skb)->daddr,
3514 						 0, IPPROTO_TCP, 0));
3515 	}
3516 
3517 	pbd->global_data |=
3518 		cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3519 }
3520 
3521 /**
3522  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3523  *
3524  * @bp:			driver handle
3525  * @skb:		packet skb
3526  * @parsing_data:	data to be updated
3527  * @xmit_type:		xmit flags
3528  *
3529  * 57712/578xx related, when skb has encapsulation
3530  */
3531 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3532 				 u32 *parsing_data, u32 xmit_type)
3533 {
3534 	*parsing_data |=
3535 		((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3536 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3537 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3538 
3539 	if (xmit_type & XMIT_CSUM_TCP) {
3540 		*parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3541 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3542 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3543 
3544 		return skb_inner_tcp_all_headers(skb);
3545 	}
3546 
3547 	/* We support checksum offload for TCP and UDP only.
3548 	 * No need to pass the UDP header length - it's a constant.
3549 	 */
3550 	return skb_inner_transport_offset(skb) + sizeof(struct udphdr);
3551 }
3552 
3553 /**
3554  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3555  *
3556  * @bp:			driver handle
3557  * @skb:		packet skb
3558  * @parsing_data:	data to be updated
3559  * @xmit_type:		xmit flags
3560  *
3561  * 57712/578xx related
3562  */
3563 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3564 				u32 *parsing_data, u32 xmit_type)
3565 {
3566 	*parsing_data |=
3567 		((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3568 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3569 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3570 
3571 	if (xmit_type & XMIT_CSUM_TCP) {
3572 		*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3573 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3574 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3575 
3576 		return skb_tcp_all_headers(skb);
3577 	}
3578 	/* We support checksum offload for TCP and UDP only.
3579 	 * No need to pass the UDP header length - it's a constant.
3580 	 */
3581 	return skb_transport_offset(skb) + sizeof(struct udphdr);
3582 }
3583 
3584 /* set FW indication according to inner or outer protocols if tunneled */
3585 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3586 			       struct eth_tx_start_bd *tx_start_bd,
3587 			       u32 xmit_type)
3588 {
3589 	tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3590 
3591 	if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3592 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3593 
3594 	if (!(xmit_type & XMIT_CSUM_TCP))
3595 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3596 }
3597 
3598 /**
3599  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3600  *
3601  * @bp:		driver handle
3602  * @skb:	packet skb
3603  * @pbd:	parse BD to be updated
3604  * @xmit_type:	xmit flags
3605  */
3606 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3607 			     struct eth_tx_parse_bd_e1x *pbd,
3608 			     u32 xmit_type)
3609 {
3610 	u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3611 
3612 	/* for now NS flag is not used in Linux */
3613 	pbd->global_data =
3614 		cpu_to_le16(hlen |
3615 			    ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3616 			     ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3617 
3618 	pbd->ip_hlen_w = (skb_transport_header(skb) -
3619 			skb_network_header(skb)) >> 1;
3620 
3621 	hlen += pbd->ip_hlen_w;
3622 
3623 	/* We support checksum offload for TCP and UDP only */
3624 	if (xmit_type & XMIT_CSUM_TCP)
3625 		hlen += tcp_hdrlen(skb) / 2;
3626 	else
3627 		hlen += sizeof(struct udphdr) / 2;
3628 
3629 	pbd->total_hlen_w = cpu_to_le16(hlen);
3630 	hlen = hlen*2;
3631 
3632 	if (xmit_type & XMIT_CSUM_TCP) {
3633 		pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3634 
3635 	} else {
3636 		s8 fix = SKB_CS_OFF(skb); /* signed! */
3637 
3638 		DP(NETIF_MSG_TX_QUEUED,
3639 		   "hlen %d  fix %d  csum before fix %x\n",
3640 		   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3641 
3642 		/* HW bug: fixup the CSUM */
3643 		pbd->tcp_pseudo_csum =
3644 			bnx2x_csum_fix(skb_transport_header(skb),
3645 				       SKB_CS(skb), fix);
3646 
3647 		DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3648 		   pbd->tcp_pseudo_csum);
3649 	}
3650 
3651 	return hlen;
3652 }
3653 
3654 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3655 				      struct eth_tx_parse_bd_e2 *pbd_e2,
3656 				      struct eth_tx_parse_2nd_bd *pbd2,
3657 				      u16 *global_data,
3658 				      u32 xmit_type)
3659 {
3660 	u16 hlen_w = 0;
3661 	u8 outerip_off, outerip_len = 0;
3662 
3663 	/* from outer IP to transport */
3664 	hlen_w = (skb_inner_transport_header(skb) -
3665 		  skb_network_header(skb)) >> 1;
3666 
3667 	/* transport len */
3668 	hlen_w += inner_tcp_hdrlen(skb) >> 1;
3669 
3670 	pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3671 
3672 	/* outer IP header info */
3673 	if (xmit_type & XMIT_CSUM_V4) {
3674 		struct iphdr *iph = ip_hdr(skb);
3675 		u32 csum = (__force u32)(~iph->check) -
3676 			   (__force u32)iph->tot_len -
3677 			   (__force u32)iph->frag_off;
3678 
3679 		outerip_len = iph->ihl << 1;
3680 
3681 		pbd2->fw_ip_csum_wo_len_flags_frag =
3682 			bswab16(csum_fold((__force __wsum)csum));
3683 	} else {
3684 		pbd2->fw_ip_hdr_to_payload_w =
3685 			hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3686 		pbd_e2->data.tunnel_data.flags |=
3687 			ETH_TUNNEL_DATA_IPV6_OUTER;
3688 	}
3689 
3690 	pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3691 
3692 	pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3693 
3694 	/* inner IP header info */
3695 	if (xmit_type & XMIT_CSUM_ENC_V4) {
3696 		pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3697 
3698 		pbd_e2->data.tunnel_data.pseudo_csum =
3699 			bswab16(~csum_tcpudp_magic(
3700 					inner_ip_hdr(skb)->saddr,
3701 					inner_ip_hdr(skb)->daddr,
3702 					0, IPPROTO_TCP, 0));
3703 	} else {
3704 		pbd_e2->data.tunnel_data.pseudo_csum =
3705 			bswab16(~csum_ipv6_magic(
3706 					&inner_ipv6_hdr(skb)->saddr,
3707 					&inner_ipv6_hdr(skb)->daddr,
3708 					0, IPPROTO_TCP, 0));
3709 	}
3710 
3711 	outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3712 
3713 	*global_data |=
3714 		outerip_off |
3715 		(outerip_len <<
3716 			ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3717 		((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3718 			ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3719 
3720 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3721 		SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3722 		pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3723 	}
3724 }
3725 
3726 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3727 					 u32 xmit_type)
3728 {
3729 	struct ipv6hdr *ipv6;
3730 
3731 	if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3732 		return;
3733 
3734 	if (xmit_type & XMIT_GSO_ENC_V6)
3735 		ipv6 = inner_ipv6_hdr(skb);
3736 	else /* XMIT_GSO_V6 */
3737 		ipv6 = ipv6_hdr(skb);
3738 
3739 	if (ipv6->nexthdr == NEXTHDR_IPV6)
3740 		*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3741 }
3742 
3743 /* called with netif_tx_lock
3744  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3745  * netif_wake_queue()
3746  */
3747 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3748 {
3749 	struct bnx2x *bp = netdev_priv(dev);
3750 
3751 	struct netdev_queue *txq;
3752 	struct bnx2x_fp_txdata *txdata;
3753 	struct sw_tx_bd *tx_buf;
3754 	struct eth_tx_start_bd *tx_start_bd, *first_bd;
3755 	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3756 	struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3757 	struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3758 	struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3759 	u32 pbd_e2_parsing_data = 0;
3760 	u16 pkt_prod, bd_prod;
3761 	int nbd, txq_index;
3762 	dma_addr_t mapping;
3763 	u32 xmit_type = bnx2x_xmit_type(bp, skb);
3764 	int i;
3765 	u8 hlen = 0;
3766 	__le16 pkt_size = 0;
3767 	struct ethhdr *eth;
3768 	u8 mac_type = UNICAST_ADDRESS;
3769 
3770 #ifdef BNX2X_STOP_ON_ERROR
3771 	if (unlikely(bp->panic))
3772 		return NETDEV_TX_BUSY;
3773 #endif
3774 
3775 	txq_index = skb_get_queue_mapping(skb);
3776 	txq = netdev_get_tx_queue(dev, txq_index);
3777 
3778 	BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3779 
3780 	txdata = &bp->bnx2x_txq[txq_index];
3781 
3782 	/* enable this debug print to view the transmission queue being used
3783 	DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3784 	   txq_index, fp_index, txdata_index); */
3785 
3786 	/* enable this debug print to view the transmission details
3787 	DP(NETIF_MSG_TX_QUEUED,
3788 	   "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3789 	   txdata->cid, fp_index, txdata_index, txdata, fp); */
3790 
3791 	if (unlikely(bnx2x_tx_avail(bp, txdata) <
3792 			skb_shinfo(skb)->nr_frags +
3793 			BDS_PER_TX_PKT +
3794 			NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3795 		/* Handle special storage cases separately */
3796 		if (txdata->tx_ring_size == 0) {
3797 			struct bnx2x_eth_q_stats *q_stats =
3798 				bnx2x_fp_qstats(bp, txdata->parent_fp);
3799 			q_stats->driver_filtered_tx_pkt++;
3800 			dev_kfree_skb(skb);
3801 			return NETDEV_TX_OK;
3802 		}
3803 		bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3804 		netif_tx_stop_queue(txq);
3805 		BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3806 
3807 		return NETDEV_TX_BUSY;
3808 	}
3809 
3810 	DP(NETIF_MSG_TX_QUEUED,
3811 	   "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3812 	   txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3813 	   ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3814 	   skb->len);
3815 
3816 	eth = (struct ethhdr *)skb->data;
3817 
3818 	/* set flag according to packet type (UNICAST_ADDRESS is default)*/
3819 	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3820 		if (is_broadcast_ether_addr(eth->h_dest))
3821 			mac_type = BROADCAST_ADDRESS;
3822 		else
3823 			mac_type = MULTICAST_ADDRESS;
3824 	}
3825 
3826 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3827 	/* First, check if we need to linearize the skb (due to FW
3828 	   restrictions). No need to check fragmentation if page size > 8K
3829 	   (there will be no violation to FW restrictions) */
3830 	if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3831 		/* Statistics of linearization */
3832 		bp->lin_cnt++;
3833 		if (skb_linearize(skb) != 0) {
3834 			DP(NETIF_MSG_TX_QUEUED,
3835 			   "SKB linearization failed - silently dropping this SKB\n");
3836 			dev_kfree_skb_any(skb);
3837 			return NETDEV_TX_OK;
3838 		}
3839 	}
3840 #endif
3841 	/* Map skb linear data for DMA */
3842 	mapping = dma_map_single(&bp->pdev->dev, skb->data,
3843 				 skb_headlen(skb), DMA_TO_DEVICE);
3844 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3845 		DP(NETIF_MSG_TX_QUEUED,
3846 		   "SKB mapping failed - silently dropping this SKB\n");
3847 		dev_kfree_skb_any(skb);
3848 		return NETDEV_TX_OK;
3849 	}
3850 	/*
3851 	Please read carefully. First we use one BD which we mark as start,
3852 	then we have a parsing info BD (used for TSO or xsum),
3853 	and only then we have the rest of the TSO BDs.
3854 	(don't forget to mark the last one as last,
3855 	and to unmap only AFTER you write to the BD ...)
3856 	And above all, all pdb sizes are in words - NOT DWORDS!
3857 	*/
3858 
3859 	/* get current pkt produced now - advance it just before sending packet
3860 	 * since mapping of pages may fail and cause packet to be dropped
3861 	 */
3862 	pkt_prod = txdata->tx_pkt_prod;
3863 	bd_prod = TX_BD(txdata->tx_bd_prod);
3864 
3865 	/* get a tx_buf and first BD
3866 	 * tx_start_bd may be changed during SPLIT,
3867 	 * but first_bd will always stay first
3868 	 */
3869 	tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3870 	tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3871 	first_bd = tx_start_bd;
3872 
3873 	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3874 
3875 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3876 		if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3877 			bp->eth_stats.ptp_skip_tx_ts++;
3878 			BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3879 		} else if (bp->ptp_tx_skb) {
3880 			bp->eth_stats.ptp_skip_tx_ts++;
3881 			netdev_err_once(bp->dev,
3882 					"Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
3883 		} else {
3884 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3885 			/* schedule check for Tx timestamp */
3886 			bp->ptp_tx_skb = skb_get(skb);
3887 			bp->ptp_tx_start = jiffies;
3888 			schedule_work(&bp->ptp_task);
3889 		}
3890 	}
3891 
3892 	/* header nbd: indirectly zero other flags! */
3893 	tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3894 
3895 	/* remember the first BD of the packet */
3896 	tx_buf->first_bd = txdata->tx_bd_prod;
3897 	tx_buf->skb = skb;
3898 	tx_buf->flags = 0;
3899 
3900 	DP(NETIF_MSG_TX_QUEUED,
3901 	   "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3902 	   pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3903 
3904 	if (skb_vlan_tag_present(skb)) {
3905 		tx_start_bd->vlan_or_ethertype =
3906 		    cpu_to_le16(skb_vlan_tag_get(skb));
3907 		tx_start_bd->bd_flags.as_bitfield |=
3908 		    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3909 	} else {
3910 		/* when transmitting in a vf, start bd must hold the ethertype
3911 		 * for fw to enforce it
3912 		 */
3913 		u16 vlan_tci = 0;
3914 #ifndef BNX2X_STOP_ON_ERROR
3915 		if (IS_VF(bp)) {
3916 #endif
3917 			/* Still need to consider inband vlan for enforced */
3918 			if (__vlan_get_tag(skb, &vlan_tci)) {
3919 				tx_start_bd->vlan_or_ethertype =
3920 					cpu_to_le16(ntohs(eth->h_proto));
3921 			} else {
3922 				tx_start_bd->bd_flags.as_bitfield |=
3923 					(X_ETH_INBAND_VLAN <<
3924 					 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3925 				tx_start_bd->vlan_or_ethertype =
3926 					cpu_to_le16(vlan_tci);
3927 			}
3928 #ifndef BNX2X_STOP_ON_ERROR
3929 		} else {
3930 			/* used by FW for packet accounting */
3931 			tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3932 		}
3933 #endif
3934 	}
3935 
3936 	nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3937 
3938 	/* turn on parsing and get a BD */
3939 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3940 
3941 	if (xmit_type & XMIT_CSUM)
3942 		bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3943 
3944 	if (!CHIP_IS_E1x(bp)) {
3945 		pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3946 		memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3947 
3948 		if (xmit_type & XMIT_CSUM_ENC) {
3949 			u16 global_data = 0;
3950 
3951 			/* Set PBD in enc checksum offload case */
3952 			hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3953 						      &pbd_e2_parsing_data,
3954 						      xmit_type);
3955 
3956 			/* turn on 2nd parsing and get a BD */
3957 			bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3958 
3959 			pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3960 
3961 			memset(pbd2, 0, sizeof(*pbd2));
3962 
3963 			pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3964 				(skb_inner_network_header(skb) -
3965 				 skb->data) >> 1;
3966 
3967 			if (xmit_type & XMIT_GSO_ENC)
3968 				bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3969 							  &global_data,
3970 							  xmit_type);
3971 
3972 			pbd2->global_data = cpu_to_le16(global_data);
3973 
3974 			/* add addition parse BD indication to start BD */
3975 			SET_FLAG(tx_start_bd->general_data,
3976 				 ETH_TX_START_BD_PARSE_NBDS, 1);
3977 			/* set encapsulation flag in start BD */
3978 			SET_FLAG(tx_start_bd->general_data,
3979 				 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3980 
3981 			tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3982 
3983 			nbd++;
3984 		} else if (xmit_type & XMIT_CSUM) {
3985 			/* Set PBD in checksum offload case w/o encapsulation */
3986 			hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3987 						     &pbd_e2_parsing_data,
3988 						     xmit_type);
3989 		}
3990 
3991 		bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3992 		/* Add the macs to the parsing BD if this is a vf or if
3993 		 * Tx Switching is enabled.
3994 		 */
3995 		if (IS_VF(bp)) {
3996 			/* override GRE parameters in BD */
3997 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3998 					      &pbd_e2->data.mac_addr.src_mid,
3999 					      &pbd_e2->data.mac_addr.src_lo,
4000 					      eth->h_source);
4001 
4002 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
4003 					      &pbd_e2->data.mac_addr.dst_mid,
4004 					      &pbd_e2->data.mac_addr.dst_lo,
4005 					      eth->h_dest);
4006 		} else {
4007 			if (bp->flags & TX_SWITCHING)
4008 				bnx2x_set_fw_mac_addr(
4009 						&pbd_e2->data.mac_addr.dst_hi,
4010 						&pbd_e2->data.mac_addr.dst_mid,
4011 						&pbd_e2->data.mac_addr.dst_lo,
4012 						eth->h_dest);
4013 #ifdef BNX2X_STOP_ON_ERROR
4014 			/* Enforce security is always set in Stop on Error -
4015 			 * source mac should be present in the parsing BD
4016 			 */
4017 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4018 					      &pbd_e2->data.mac_addr.src_mid,
4019 					      &pbd_e2->data.mac_addr.src_lo,
4020 					      eth->h_source);
4021 #endif
4022 		}
4023 
4024 		SET_FLAG(pbd_e2_parsing_data,
4025 			 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4026 	} else {
4027 		u16 global_data = 0;
4028 		pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4029 		memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4030 		/* Set PBD in checksum offload case */
4031 		if (xmit_type & XMIT_CSUM)
4032 			hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4033 
4034 		SET_FLAG(global_data,
4035 			 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4036 		pbd_e1x->global_data |= cpu_to_le16(global_data);
4037 	}
4038 
4039 	/* Setup the data pointer of the first BD of the packet */
4040 	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4041 	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4042 	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4043 	pkt_size = tx_start_bd->nbytes;
4044 
4045 	DP(NETIF_MSG_TX_QUEUED,
4046 	   "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
4047 	   tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4048 	   le16_to_cpu(tx_start_bd->nbytes),
4049 	   tx_start_bd->bd_flags.as_bitfield,
4050 	   le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4051 
4052 	if (xmit_type & XMIT_GSO) {
4053 
4054 		DP(NETIF_MSG_TX_QUEUED,
4055 		   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
4056 		   skb->len, hlen, skb_headlen(skb),
4057 		   skb_shinfo(skb)->gso_size);
4058 
4059 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4060 
4061 		if (unlikely(skb_headlen(skb) > hlen)) {
4062 			nbd++;
4063 			bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4064 						 &tx_start_bd, hlen,
4065 						 bd_prod);
4066 		}
4067 		if (!CHIP_IS_E1x(bp))
4068 			pbd_e2_parsing_data |=
4069 				(skb_shinfo(skb)->gso_size <<
4070 				 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4071 				 ETH_TX_PARSE_BD_E2_LSO_MSS;
4072 		else
4073 			bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4074 	}
4075 
4076 	/* Set the PBD's parsing_data field if not zero
4077 	 * (for the chips newer than 57711).
4078 	 */
4079 	if (pbd_e2_parsing_data)
4080 		pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4081 
4082 	tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4083 
4084 	/* Handle fragmented skb */
4085 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4086 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4087 
4088 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4089 					   skb_frag_size(frag), DMA_TO_DEVICE);
4090 		if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4091 			unsigned int pkts_compl = 0, bytes_compl = 0;
4092 
4093 			DP(NETIF_MSG_TX_QUEUED,
4094 			   "Unable to map page - dropping packet...\n");
4095 
4096 			/* we need unmap all buffers already mapped
4097 			 * for this SKB;
4098 			 * first_bd->nbd need to be properly updated
4099 			 * before call to bnx2x_free_tx_pkt
4100 			 */
4101 			first_bd->nbd = cpu_to_le16(nbd);
4102 			bnx2x_free_tx_pkt(bp, txdata,
4103 					  TX_BD(txdata->tx_pkt_prod),
4104 					  &pkts_compl, &bytes_compl);
4105 			return NETDEV_TX_OK;
4106 		}
4107 
4108 		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4109 		tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4110 		if (total_pkt_bd == NULL)
4111 			total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4112 
4113 		tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4114 		tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4115 		tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4116 		le16_add_cpu(&pkt_size, skb_frag_size(frag));
4117 		nbd++;
4118 
4119 		DP(NETIF_MSG_TX_QUEUED,
4120 		   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4121 		   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4122 		   le16_to_cpu(tx_data_bd->nbytes));
4123 	}
4124 
4125 	DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4126 
4127 	/* update with actual num BDs */
4128 	first_bd->nbd = cpu_to_le16(nbd);
4129 
4130 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4131 
4132 	/* now send a tx doorbell, counting the next BD
4133 	 * if the packet contains or ends with it
4134 	 */
4135 	if (TX_BD_POFF(bd_prod) < nbd)
4136 		nbd++;
4137 
4138 	/* total_pkt_bytes should be set on the first data BD if
4139 	 * it's not an LSO packet and there is more than one
4140 	 * data BD. In this case pkt_size is limited by an MTU value.
4141 	 * However we prefer to set it for an LSO packet (while we don't
4142 	 * have to) in order to save some CPU cycles in a none-LSO
4143 	 * case, when we much more care about them.
4144 	 */
4145 	if (total_pkt_bd != NULL)
4146 		total_pkt_bd->total_pkt_bytes = pkt_size;
4147 
4148 	if (pbd_e1x)
4149 		DP(NETIF_MSG_TX_QUEUED,
4150 		   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4151 		   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4152 		   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4153 		   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4154 		    le16_to_cpu(pbd_e1x->total_hlen_w));
4155 	if (pbd_e2)
4156 		DP(NETIF_MSG_TX_QUEUED,
4157 		   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4158 		   pbd_e2,
4159 		   pbd_e2->data.mac_addr.dst_hi,
4160 		   pbd_e2->data.mac_addr.dst_mid,
4161 		   pbd_e2->data.mac_addr.dst_lo,
4162 		   pbd_e2->data.mac_addr.src_hi,
4163 		   pbd_e2->data.mac_addr.src_mid,
4164 		   pbd_e2->data.mac_addr.src_lo,
4165 		   pbd_e2->parsing_data);
4166 	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4167 
4168 	netdev_tx_sent_queue(txq, skb->len);
4169 
4170 	skb_tx_timestamp(skb);
4171 
4172 	txdata->tx_pkt_prod++;
4173 	/*
4174 	 * Make sure that the BD data is updated before updating the producer
4175 	 * since FW might read the BD right after the producer is updated.
4176 	 * This is only applicable for weak-ordered memory model archs such
4177 	 * as IA-64. The following barrier is also mandatory since FW will
4178 	 * assumes packets must have BDs.
4179 	 */
4180 	wmb();
4181 
4182 	txdata->tx_db.data.prod += nbd;
4183 	/* make sure descriptor update is observed by HW */
4184 	wmb();
4185 
4186 	DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
4187 
4188 	txdata->tx_bd_prod += nbd;
4189 
4190 	if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4191 		netif_tx_stop_queue(txq);
4192 
4193 		/* paired memory barrier is in bnx2x_tx_int(), we have to keep
4194 		 * ordering of set_bit() in netif_tx_stop_queue() and read of
4195 		 * fp->bd_tx_cons */
4196 		smp_mb();
4197 
4198 		bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4199 		if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4200 			netif_tx_wake_queue(txq);
4201 	}
4202 	txdata->tx_pkt++;
4203 
4204 	return NETDEV_TX_OK;
4205 }
4206 
4207 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4208 {
4209 	int mfw_vn = BP_FW_MB_IDX(bp);
4210 	u32 tmp;
4211 
4212 	/* If the shmem shouldn't affect configuration, reflect */
4213 	if (!IS_MF_BD(bp)) {
4214 		int i;
4215 
4216 		for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4217 			c2s_map[i] = i;
4218 		*c2s_default = 0;
4219 
4220 		return;
4221 	}
4222 
4223 	tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4224 	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4225 	c2s_map[0] = tmp & 0xff;
4226 	c2s_map[1] = (tmp >> 8) & 0xff;
4227 	c2s_map[2] = (tmp >> 16) & 0xff;
4228 	c2s_map[3] = (tmp >> 24) & 0xff;
4229 
4230 	tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4231 	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4232 	c2s_map[4] = tmp & 0xff;
4233 	c2s_map[5] = (tmp >> 8) & 0xff;
4234 	c2s_map[6] = (tmp >> 16) & 0xff;
4235 	c2s_map[7] = (tmp >> 24) & 0xff;
4236 
4237 	tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4238 	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4239 	*c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4240 }
4241 
4242 /**
4243  * bnx2x_setup_tc - routine to configure net_device for multi tc
4244  *
4245  * @dev: net device to configure
4246  * @num_tc: number of traffic classes to enable
4247  *
4248  * callback connected to the ndo_setup_tc function pointer
4249  */
4250 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4251 {
4252 	struct bnx2x *bp = netdev_priv(dev);
4253 	u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4254 	int cos, prio, count, offset;
4255 
4256 	/* setup tc must be called under rtnl lock */
4257 	ASSERT_RTNL();
4258 
4259 	/* no traffic classes requested. Aborting */
4260 	if (!num_tc) {
4261 		netdev_reset_tc(dev);
4262 		return 0;
4263 	}
4264 
4265 	/* requested to support too many traffic classes */
4266 	if (num_tc > bp->max_cos) {
4267 		BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4268 			  num_tc, bp->max_cos);
4269 		return -EINVAL;
4270 	}
4271 
4272 	/* declare amount of supported traffic classes */
4273 	if (netdev_set_num_tc(dev, num_tc)) {
4274 		BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4275 		return -EINVAL;
4276 	}
4277 
4278 	bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4279 
4280 	/* configure priority to traffic class mapping */
4281 	for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4282 		int outer_prio = c2s_map[prio];
4283 
4284 		netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4285 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4286 		   "mapping priority %d to tc %d\n",
4287 		   outer_prio, bp->prio_to_cos[outer_prio]);
4288 	}
4289 
4290 	/* Use this configuration to differentiate tc0 from other COSes
4291 	   This can be used for ets or pfc, and save the effort of setting
4292 	   up a multio class queue disc or negotiating DCBX with a switch
4293 	netdev_set_prio_tc_map(dev, 0, 0);
4294 	DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4295 	for (prio = 1; prio < 16; prio++) {
4296 		netdev_set_prio_tc_map(dev, prio, 1);
4297 		DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4298 	} */
4299 
4300 	/* configure traffic class to transmission queue mapping */
4301 	for (cos = 0; cos < bp->max_cos; cos++) {
4302 		count = BNX2X_NUM_ETH_QUEUES(bp);
4303 		offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4304 		netdev_set_tc_queue(dev, cos, count, offset);
4305 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4306 		   "mapping tc %d to offset %d count %d\n",
4307 		   cos, offset, count);
4308 	}
4309 
4310 	return 0;
4311 }
4312 
4313 int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4314 		     void *type_data)
4315 {
4316 	struct tc_mqprio_qopt *mqprio = type_data;
4317 
4318 	if (type != TC_SETUP_QDISC_MQPRIO)
4319 		return -EOPNOTSUPP;
4320 
4321 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4322 
4323 	return bnx2x_setup_tc(dev, mqprio->num_tc);
4324 }
4325 
4326 /* called with rtnl_lock */
4327 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4328 {
4329 	struct sockaddr *addr = p;
4330 	struct bnx2x *bp = netdev_priv(dev);
4331 	int rc = 0;
4332 
4333 	if (!is_valid_ether_addr(addr->sa_data)) {
4334 		BNX2X_ERR("Requested MAC address is not valid\n");
4335 		return -EINVAL;
4336 	}
4337 
4338 	if (IS_MF_STORAGE_ONLY(bp)) {
4339 		BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4340 		return -EINVAL;
4341 	}
4342 
4343 	if (netif_running(dev))  {
4344 		rc = bnx2x_set_eth_mac(bp, false);
4345 		if (rc)
4346 			return rc;
4347 	}
4348 
4349 	eth_hw_addr_set(dev, addr->sa_data);
4350 
4351 	if (netif_running(dev))
4352 		rc = bnx2x_set_eth_mac(bp, true);
4353 
4354 	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4355 		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4356 
4357 	return rc;
4358 }
4359 
4360 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4361 {
4362 	union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4363 	struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4364 	u8 cos;
4365 
4366 	/* Common */
4367 
4368 	if (IS_FCOE_IDX(fp_index)) {
4369 		memset(sb, 0, sizeof(union host_hc_status_block));
4370 		fp->status_blk_mapping = 0;
4371 	} else {
4372 		/* status blocks */
4373 		if (!CHIP_IS_E1x(bp))
4374 			BNX2X_PCI_FREE(sb->e2_sb,
4375 				       bnx2x_fp(bp, fp_index,
4376 						status_blk_mapping),
4377 				       sizeof(struct host_hc_status_block_e2));
4378 		else
4379 			BNX2X_PCI_FREE(sb->e1x_sb,
4380 				       bnx2x_fp(bp, fp_index,
4381 						status_blk_mapping),
4382 				       sizeof(struct host_hc_status_block_e1x));
4383 	}
4384 
4385 	/* Rx */
4386 	if (!skip_rx_queue(bp, fp_index)) {
4387 		bnx2x_free_rx_bds(fp);
4388 
4389 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
4390 		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4391 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4392 			       bnx2x_fp(bp, fp_index, rx_desc_mapping),
4393 			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
4394 
4395 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4396 			       bnx2x_fp(bp, fp_index, rx_comp_mapping),
4397 			       sizeof(struct eth_fast_path_rx_cqe) *
4398 			       NUM_RCQ_BD);
4399 
4400 		/* SGE ring */
4401 		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4402 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4403 			       bnx2x_fp(bp, fp_index, rx_sge_mapping),
4404 			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4405 	}
4406 
4407 	/* Tx */
4408 	if (!skip_tx_queue(bp, fp_index)) {
4409 		/* fastpath tx rings: tx_buf tx_desc */
4410 		for_each_cos_in_tx_queue(fp, cos) {
4411 			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4412 
4413 			DP(NETIF_MSG_IFDOWN,
4414 			   "freeing tx memory of fp %d cos %d cid %d\n",
4415 			   fp_index, cos, txdata->cid);
4416 
4417 			BNX2X_FREE(txdata->tx_buf_ring);
4418 			BNX2X_PCI_FREE(txdata->tx_desc_ring,
4419 				txdata->tx_desc_mapping,
4420 				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4421 		}
4422 	}
4423 	/* end of fastpath */
4424 }
4425 
4426 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4427 {
4428 	int i;
4429 	for_each_cnic_queue(bp, i)
4430 		bnx2x_free_fp_mem_at(bp, i);
4431 }
4432 
4433 void bnx2x_free_fp_mem(struct bnx2x *bp)
4434 {
4435 	int i;
4436 	for_each_eth_queue(bp, i)
4437 		bnx2x_free_fp_mem_at(bp, i);
4438 }
4439 
4440 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4441 {
4442 	union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4443 	if (!CHIP_IS_E1x(bp)) {
4444 		bnx2x_fp(bp, index, sb_index_values) =
4445 			(__le16 *)status_blk.e2_sb->sb.index_values;
4446 		bnx2x_fp(bp, index, sb_running_index) =
4447 			(__le16 *)status_blk.e2_sb->sb.running_index;
4448 	} else {
4449 		bnx2x_fp(bp, index, sb_index_values) =
4450 			(__le16 *)status_blk.e1x_sb->sb.index_values;
4451 		bnx2x_fp(bp, index, sb_running_index) =
4452 			(__le16 *)status_blk.e1x_sb->sb.running_index;
4453 	}
4454 }
4455 
4456 /* Returns the number of actually allocated BDs */
4457 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4458 			      int rx_ring_size)
4459 {
4460 	struct bnx2x *bp = fp->bp;
4461 	u16 ring_prod, cqe_ring_prod;
4462 	int i, failure_cnt = 0;
4463 
4464 	fp->rx_comp_cons = 0;
4465 	cqe_ring_prod = ring_prod = 0;
4466 
4467 	/* This routine is called only during fo init so
4468 	 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4469 	 */
4470 	for (i = 0; i < rx_ring_size; i++) {
4471 		if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4472 			failure_cnt++;
4473 			continue;
4474 		}
4475 		ring_prod = NEXT_RX_IDX(ring_prod);
4476 		cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4477 		WARN_ON(ring_prod <= (i - failure_cnt));
4478 	}
4479 
4480 	if (failure_cnt)
4481 		BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4482 			  i - failure_cnt, fp->index);
4483 
4484 	fp->rx_bd_prod = ring_prod;
4485 	/* Limit the CQE producer by the CQE ring size */
4486 	fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4487 			       cqe_ring_prod);
4488 
4489 	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4490 
4491 	return i - failure_cnt;
4492 }
4493 
4494 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4495 {
4496 	int i;
4497 
4498 	for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4499 		struct eth_rx_cqe_next_page *nextpg;
4500 
4501 		nextpg = (struct eth_rx_cqe_next_page *)
4502 			&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4503 		nextpg->addr_hi =
4504 			cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4505 				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4506 		nextpg->addr_lo =
4507 			cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4508 				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4509 	}
4510 }
4511 
4512 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4513 {
4514 	union host_hc_status_block *sb;
4515 	struct bnx2x_fastpath *fp = &bp->fp[index];
4516 	int ring_size = 0;
4517 	u8 cos;
4518 	int rx_ring_size = 0;
4519 
4520 	if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4521 		rx_ring_size = MIN_RX_SIZE_NONTPA;
4522 		bp->rx_ring_size = rx_ring_size;
4523 	} else if (!bp->rx_ring_size) {
4524 		rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4525 
4526 		if (CHIP_IS_E3(bp)) {
4527 			u32 cfg = SHMEM_RD(bp,
4528 					   dev_info.port_hw_config[BP_PORT(bp)].
4529 					   default_cfg);
4530 
4531 			/* Decrease ring size for 1G functions */
4532 			if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4533 			    PORT_HW_CFG_NET_SERDES_IF_SGMII)
4534 				rx_ring_size /= 10;
4535 		}
4536 
4537 		/* allocate at least number of buffers required by FW */
4538 		rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4539 				     MIN_RX_SIZE_TPA, rx_ring_size);
4540 
4541 		bp->rx_ring_size = rx_ring_size;
4542 	} else /* if rx_ring_size specified - use it */
4543 		rx_ring_size = bp->rx_ring_size;
4544 
4545 	DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4546 
4547 	/* Common */
4548 	sb = &bnx2x_fp(bp, index, status_blk);
4549 
4550 	if (!IS_FCOE_IDX(index)) {
4551 		/* status blocks */
4552 		if (!CHIP_IS_E1x(bp)) {
4553 			sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4554 						    sizeof(struct host_hc_status_block_e2));
4555 			if (!sb->e2_sb)
4556 				goto alloc_mem_err;
4557 		} else {
4558 			sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4559 						     sizeof(struct host_hc_status_block_e1x));
4560 			if (!sb->e1x_sb)
4561 				goto alloc_mem_err;
4562 		}
4563 	}
4564 
4565 	/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4566 	 * set shortcuts for it.
4567 	 */
4568 	if (!IS_FCOE_IDX(index))
4569 		set_sb_shortcuts(bp, index);
4570 
4571 	/* Tx */
4572 	if (!skip_tx_queue(bp, index)) {
4573 		/* fastpath tx rings: tx_buf tx_desc */
4574 		for_each_cos_in_tx_queue(fp, cos) {
4575 			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4576 
4577 			DP(NETIF_MSG_IFUP,
4578 			   "allocating tx memory of fp %d cos %d\n",
4579 			   index, cos);
4580 
4581 			txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4582 						      sizeof(struct sw_tx_bd),
4583 						      GFP_KERNEL);
4584 			if (!txdata->tx_buf_ring)
4585 				goto alloc_mem_err;
4586 			txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4587 							       sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4588 			if (!txdata->tx_desc_ring)
4589 				goto alloc_mem_err;
4590 		}
4591 	}
4592 
4593 	/* Rx */
4594 	if (!skip_rx_queue(bp, index)) {
4595 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
4596 		bnx2x_fp(bp, index, rx_buf_ring) =
4597 			kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4598 		if (!bnx2x_fp(bp, index, rx_buf_ring))
4599 			goto alloc_mem_err;
4600 		bnx2x_fp(bp, index, rx_desc_ring) =
4601 			BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4602 					sizeof(struct eth_rx_bd) * NUM_RX_BD);
4603 		if (!bnx2x_fp(bp, index, rx_desc_ring))
4604 			goto alloc_mem_err;
4605 
4606 		/* Seed all CQEs by 1s */
4607 		bnx2x_fp(bp, index, rx_comp_ring) =
4608 			BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4609 					 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4610 		if (!bnx2x_fp(bp, index, rx_comp_ring))
4611 			goto alloc_mem_err;
4612 
4613 		/* SGE ring */
4614 		bnx2x_fp(bp, index, rx_page_ring) =
4615 			kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4616 				GFP_KERNEL);
4617 		if (!bnx2x_fp(bp, index, rx_page_ring))
4618 			goto alloc_mem_err;
4619 		bnx2x_fp(bp, index, rx_sge_ring) =
4620 			BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4621 					BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4622 		if (!bnx2x_fp(bp, index, rx_sge_ring))
4623 			goto alloc_mem_err;
4624 		/* RX BD ring */
4625 		bnx2x_set_next_page_rx_bd(fp);
4626 
4627 		/* CQ ring */
4628 		bnx2x_set_next_page_rx_cq(fp);
4629 
4630 		/* BDs */
4631 		ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4632 		if (ring_size < rx_ring_size)
4633 			goto alloc_mem_err;
4634 	}
4635 
4636 	return 0;
4637 
4638 /* handles low memory cases */
4639 alloc_mem_err:
4640 	BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4641 						index, ring_size);
4642 	/* FW will drop all packets if queue is not big enough,
4643 	 * In these cases we disable the queue
4644 	 * Min size is different for OOO, TPA and non-TPA queues
4645 	 */
4646 	if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4647 				MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4648 			/* release memory allocated for this queue */
4649 			bnx2x_free_fp_mem_at(bp, index);
4650 			return -ENOMEM;
4651 	}
4652 	return 0;
4653 }
4654 
4655 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4656 {
4657 	if (!NO_FCOE(bp))
4658 		/* FCoE */
4659 		if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4660 			/* we will fail load process instead of mark
4661 			 * NO_FCOE_FLAG
4662 			 */
4663 			return -ENOMEM;
4664 
4665 	return 0;
4666 }
4667 
4668 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4669 {
4670 	int i;
4671 
4672 	/* 1. Allocate FP for leading - fatal if error
4673 	 * 2. Allocate RSS - fix number of queues if error
4674 	 */
4675 
4676 	/* leading */
4677 	if (bnx2x_alloc_fp_mem_at(bp, 0))
4678 		return -ENOMEM;
4679 
4680 	/* RSS */
4681 	for_each_nondefault_eth_queue(bp, i)
4682 		if (bnx2x_alloc_fp_mem_at(bp, i))
4683 			break;
4684 
4685 	/* handle memory failures */
4686 	if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4687 		int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4688 
4689 		WARN_ON(delta < 0);
4690 		bnx2x_shrink_eth_fp(bp, delta);
4691 		if (CNIC_SUPPORT(bp))
4692 			/* move non eth FPs next to last eth FP
4693 			 * must be done in that order
4694 			 * FCOE_IDX < FWD_IDX < OOO_IDX
4695 			 */
4696 
4697 			/* move FCoE fp even NO_FCOE_FLAG is on */
4698 			bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4699 		bp->num_ethernet_queues -= delta;
4700 		bp->num_queues = bp->num_ethernet_queues +
4701 				 bp->num_cnic_queues;
4702 		BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4703 			  bp->num_queues + delta, bp->num_queues);
4704 	}
4705 
4706 	return 0;
4707 }
4708 
4709 void bnx2x_free_mem_bp(struct bnx2x *bp)
4710 {
4711 	int i;
4712 
4713 	for (i = 0; i < bp->fp_array_size; i++)
4714 		kfree(bp->fp[i].tpa_info);
4715 	kfree(bp->fp);
4716 	kfree(bp->sp_objs);
4717 	kfree(bp->fp_stats);
4718 	kfree(bp->bnx2x_txq);
4719 	kfree(bp->msix_table);
4720 	kfree(bp->ilt);
4721 }
4722 
4723 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4724 {
4725 	struct bnx2x_fastpath *fp;
4726 	struct msix_entry *tbl;
4727 	struct bnx2x_ilt *ilt;
4728 	int msix_table_size = 0;
4729 	int fp_array_size, txq_array_size;
4730 	int i;
4731 
4732 	/*
4733 	 * The biggest MSI-X table we might need is as a maximum number of fast
4734 	 * path IGU SBs plus default SB (for PF only).
4735 	 */
4736 	msix_table_size = bp->igu_sb_cnt;
4737 	if (IS_PF(bp))
4738 		msix_table_size++;
4739 	BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4740 
4741 	/* fp array: RSS plus CNIC related L2 queues */
4742 	fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4743 	bp->fp_array_size = fp_array_size;
4744 	BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4745 
4746 	fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4747 	if (!fp)
4748 		goto alloc_err;
4749 	for (i = 0; i < bp->fp_array_size; i++) {
4750 		fp[i].tpa_info =
4751 			kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4752 				sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4753 		if (!(fp[i].tpa_info))
4754 			goto alloc_err;
4755 	}
4756 
4757 	bp->fp = fp;
4758 
4759 	/* allocate sp objs */
4760 	bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4761 			      GFP_KERNEL);
4762 	if (!bp->sp_objs)
4763 		goto alloc_err;
4764 
4765 	/* allocate fp_stats */
4766 	bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4767 			       GFP_KERNEL);
4768 	if (!bp->fp_stats)
4769 		goto alloc_err;
4770 
4771 	/* Allocate memory for the transmission queues array */
4772 	txq_array_size =
4773 		BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4774 	BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4775 
4776 	bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4777 				GFP_KERNEL);
4778 	if (!bp->bnx2x_txq)
4779 		goto alloc_err;
4780 
4781 	/* msix table */
4782 	tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4783 	if (!tbl)
4784 		goto alloc_err;
4785 	bp->msix_table = tbl;
4786 
4787 	/* ilt */
4788 	ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4789 	if (!ilt)
4790 		goto alloc_err;
4791 	bp->ilt = ilt;
4792 
4793 	return 0;
4794 alloc_err:
4795 	bnx2x_free_mem_bp(bp);
4796 	return -ENOMEM;
4797 }
4798 
4799 int bnx2x_reload_if_running(struct net_device *dev)
4800 {
4801 	struct bnx2x *bp = netdev_priv(dev);
4802 
4803 	if (unlikely(!netif_running(dev)))
4804 		return 0;
4805 
4806 	bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4807 	return bnx2x_nic_load(bp, LOAD_NORMAL);
4808 }
4809 
4810 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4811 {
4812 	u32 sel_phy_idx = 0;
4813 	if (bp->link_params.num_phys <= 1)
4814 		return INT_PHY;
4815 
4816 	if (bp->link_vars.link_up) {
4817 		sel_phy_idx = EXT_PHY1;
4818 		/* In case link is SERDES, check if the EXT_PHY2 is the one */
4819 		if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4820 		    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4821 			sel_phy_idx = EXT_PHY2;
4822 	} else {
4823 
4824 		switch (bnx2x_phy_selection(&bp->link_params)) {
4825 		case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4826 		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4827 		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4828 		       sel_phy_idx = EXT_PHY1;
4829 		       break;
4830 		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4831 		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4832 		       sel_phy_idx = EXT_PHY2;
4833 		       break;
4834 		}
4835 	}
4836 
4837 	return sel_phy_idx;
4838 }
4839 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4840 {
4841 	u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4842 	/*
4843 	 * The selected activated PHY is always after swapping (in case PHY
4844 	 * swapping is enabled). So when swapping is enabled, we need to reverse
4845 	 * the configuration
4846 	 */
4847 
4848 	if (bp->link_params.multi_phy_config &
4849 	    PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4850 		if (sel_phy_idx == EXT_PHY1)
4851 			sel_phy_idx = EXT_PHY2;
4852 		else if (sel_phy_idx == EXT_PHY2)
4853 			sel_phy_idx = EXT_PHY1;
4854 	}
4855 	return LINK_CONFIG_IDX(sel_phy_idx);
4856 }
4857 
4858 #ifdef NETDEV_FCOE_WWNN
4859 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4860 {
4861 	struct bnx2x *bp = netdev_priv(dev);
4862 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4863 
4864 	switch (type) {
4865 	case NETDEV_FCOE_WWNN:
4866 		*wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4867 				cp->fcoe_wwn_node_name_lo);
4868 		break;
4869 	case NETDEV_FCOE_WWPN:
4870 		*wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4871 				cp->fcoe_wwn_port_name_lo);
4872 		break;
4873 	default:
4874 		BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4875 		return -EINVAL;
4876 	}
4877 
4878 	return 0;
4879 }
4880 #endif
4881 
4882 /* called with rtnl_lock */
4883 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4884 {
4885 	struct bnx2x *bp = netdev_priv(dev);
4886 
4887 	if (pci_num_vf(bp->pdev)) {
4888 		DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4889 		return -EPERM;
4890 	}
4891 
4892 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4893 		BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4894 		return -EAGAIN;
4895 	}
4896 
4897 	/* This does not race with packet allocation
4898 	 * because the actual alloc size is
4899 	 * only updated as part of load
4900 	 */
4901 	dev->mtu = new_mtu;
4902 
4903 	if (!bnx2x_mtu_allows_gro(new_mtu))
4904 		dev->features &= ~NETIF_F_GRO_HW;
4905 
4906 	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4907 		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4908 
4909 	return bnx2x_reload_if_running(dev);
4910 }
4911 
4912 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4913 				     netdev_features_t features)
4914 {
4915 	struct bnx2x *bp = netdev_priv(dev);
4916 
4917 	if (pci_num_vf(bp->pdev)) {
4918 		netdev_features_t changed = dev->features ^ features;
4919 
4920 		/* Revert the requested changes in features if they
4921 		 * would require internal reload of PF in bnx2x_set_features().
4922 		 */
4923 		if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4924 			features &= ~NETIF_F_RXCSUM;
4925 			features |= dev->features & NETIF_F_RXCSUM;
4926 		}
4927 
4928 		if (changed & NETIF_F_LOOPBACK) {
4929 			features &= ~NETIF_F_LOOPBACK;
4930 			features |= dev->features & NETIF_F_LOOPBACK;
4931 		}
4932 	}
4933 
4934 	/* TPA requires Rx CSUM offloading */
4935 	if (!(features & NETIF_F_RXCSUM))
4936 		features &= ~NETIF_F_LRO;
4937 
4938 	if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
4939 		features &= ~NETIF_F_GRO_HW;
4940 	if (features & NETIF_F_GRO_HW)
4941 		features &= ~NETIF_F_LRO;
4942 
4943 	return features;
4944 }
4945 
4946 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4947 {
4948 	struct bnx2x *bp = netdev_priv(dev);
4949 	netdev_features_t changes = features ^ dev->features;
4950 	bool bnx2x_reload = false;
4951 	int rc;
4952 
4953 	/* VFs or non SRIOV PFs should be able to change loopback feature */
4954 	if (!pci_num_vf(bp->pdev)) {
4955 		if (features & NETIF_F_LOOPBACK) {
4956 			if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4957 				bp->link_params.loopback_mode = LOOPBACK_BMAC;
4958 				bnx2x_reload = true;
4959 			}
4960 		} else {
4961 			if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4962 				bp->link_params.loopback_mode = LOOPBACK_NONE;
4963 				bnx2x_reload = true;
4964 			}
4965 		}
4966 	}
4967 
4968 	/* Don't care about GRO changes */
4969 	changes &= ~NETIF_F_GRO;
4970 
4971 	if (changes)
4972 		bnx2x_reload = true;
4973 
4974 	if (bnx2x_reload) {
4975 		if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4976 			dev->features = features;
4977 			rc = bnx2x_reload_if_running(dev);
4978 			return rc ? rc : 1;
4979 		}
4980 		/* else: bnx2x_nic_load() will be called at end of recovery */
4981 	}
4982 
4983 	return 0;
4984 }
4985 
4986 void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue)
4987 {
4988 	struct bnx2x *bp = netdev_priv(dev);
4989 
4990 	/* We want the information of the dump logged,
4991 	 * but calling bnx2x_panic() would kill all chances of recovery.
4992 	 */
4993 	if (!bp->panic)
4994 #ifndef BNX2X_STOP_ON_ERROR
4995 		bnx2x_panic_dump(bp, false);
4996 #else
4997 		bnx2x_panic();
4998 #endif
4999 
5000 	/* This allows the netif to be shutdown gracefully before resetting */
5001 	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
5002 }
5003 
5004 static int __maybe_unused bnx2x_suspend(struct device *dev_d)
5005 {
5006 	struct pci_dev *pdev = to_pci_dev(dev_d);
5007 	struct net_device *dev = pci_get_drvdata(pdev);
5008 	struct bnx2x *bp;
5009 
5010 	if (!dev) {
5011 		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5012 		return -ENODEV;
5013 	}
5014 	bp = netdev_priv(dev);
5015 
5016 	rtnl_lock();
5017 
5018 	if (!netif_running(dev)) {
5019 		rtnl_unlock();
5020 		return 0;
5021 	}
5022 
5023 	netif_device_detach(dev);
5024 
5025 	bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5026 
5027 	rtnl_unlock();
5028 
5029 	return 0;
5030 }
5031 
5032 static int __maybe_unused bnx2x_resume(struct device *dev_d)
5033 {
5034 	struct pci_dev *pdev = to_pci_dev(dev_d);
5035 	struct net_device *dev = pci_get_drvdata(pdev);
5036 	struct bnx2x *bp;
5037 	int rc;
5038 
5039 	if (!dev) {
5040 		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5041 		return -ENODEV;
5042 	}
5043 	bp = netdev_priv(dev);
5044 
5045 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5046 		BNX2X_ERR("Handling parity error recovery. Try again later\n");
5047 		return -EAGAIN;
5048 	}
5049 
5050 	rtnl_lock();
5051 
5052 	if (!netif_running(dev)) {
5053 		rtnl_unlock();
5054 		return 0;
5055 	}
5056 
5057 	netif_device_attach(dev);
5058 
5059 	rc = bnx2x_nic_load(bp, LOAD_OPEN);
5060 
5061 	rtnl_unlock();
5062 
5063 	return rc;
5064 }
5065 
5066 SIMPLE_DEV_PM_OPS(bnx2x_pm_ops, bnx2x_suspend, bnx2x_resume);
5067 
5068 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5069 			      u32 cid)
5070 {
5071 	if (!cxt) {
5072 		BNX2X_ERR("bad context pointer %p\n", cxt);
5073 		return;
5074 	}
5075 
5076 	/* ustorm cxt validation */
5077 	cxt->ustorm_ag_context.cdu_usage =
5078 		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5079 			CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5080 	/* xcontext validation */
5081 	cxt->xstorm_ag_context.cdu_reserved =
5082 		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5083 			CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5084 }
5085 
5086 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5087 				    u8 fw_sb_id, u8 sb_index,
5088 				    u8 ticks)
5089 {
5090 	u32 addr = BAR_CSTRORM_INTMEM +
5091 		   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5092 	REG_WR8(bp, addr, ticks);
5093 	DP(NETIF_MSG_IFUP,
5094 	   "port %x fw_sb_id %d sb_index %d ticks %d\n",
5095 	   port, fw_sb_id, sb_index, ticks);
5096 }
5097 
5098 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5099 				    u16 fw_sb_id, u8 sb_index,
5100 				    u8 disable)
5101 {
5102 	u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5103 	u32 addr = BAR_CSTRORM_INTMEM +
5104 		   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5105 	u8 flags = REG_RD8(bp, addr);
5106 	/* clear and set */
5107 	flags &= ~HC_INDEX_DATA_HC_ENABLED;
5108 	flags |= enable_flag;
5109 	REG_WR8(bp, addr, flags);
5110 	DP(NETIF_MSG_IFUP,
5111 	   "port %x fw_sb_id %d sb_index %d disable %d\n",
5112 	   port, fw_sb_id, sb_index, disable);
5113 }
5114 
5115 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5116 				    u8 sb_index, u8 disable, u16 usec)
5117 {
5118 	int port = BP_PORT(bp);
5119 	u8 ticks = usec / BNX2X_BTR;
5120 
5121 	storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5122 
5123 	disable = disable ? 1 : (usec ? 0 : 1);
5124 	storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5125 }
5126 
5127 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5128 			    u32 verbose)
5129 {
5130 	smp_mb__before_atomic();
5131 	set_bit(flag, &bp->sp_rtnl_state);
5132 	smp_mb__after_atomic();
5133 	DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5134 	   flag);
5135 	schedule_delayed_work(&bp->sp_rtnl_task, 0);
5136 }
5137