1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <net/tcp.h>
25 #include <net/ipv6.h>
26 #include <net/ip6_checksum.h>
27 #include <net/busy_poll.h>
28 #include <linux/prefetch.h>
29 #include "bnx2x_cmn.h"
30 #include "bnx2x_init.h"
31 #include "bnx2x_sp.h"
32 
33 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
34 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
35 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
36 static int bnx2x_poll(struct napi_struct *napi, int budget);
37 
38 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
39 {
40 	int i;
41 
42 	/* Add NAPI objects */
43 	for_each_rx_queue_cnic(bp, i) {
44 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
45 			       bnx2x_poll, NAPI_POLL_WEIGHT);
46 		napi_hash_add(&bnx2x_fp(bp, i, napi));
47 	}
48 }
49 
50 static void bnx2x_add_all_napi(struct bnx2x *bp)
51 {
52 	int i;
53 
54 	/* Add NAPI objects */
55 	for_each_eth_queue(bp, i) {
56 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
57 			       bnx2x_poll, NAPI_POLL_WEIGHT);
58 		napi_hash_add(&bnx2x_fp(bp, i, napi));
59 	}
60 }
61 
62 static int bnx2x_calc_num_queues(struct bnx2x *bp)
63 {
64 	int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
65 
66 	/* Reduce memory usage in kdump environment by using only one queue */
67 	if (reset_devices)
68 		nq = 1;
69 
70 	nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
71 	return nq;
72 }
73 
74 /**
75  * bnx2x_move_fp - move content of the fastpath structure.
76  *
77  * @bp:		driver handle
78  * @from:	source FP index
79  * @to:		destination FP index
80  *
81  * Makes sure the contents of the bp->fp[to].napi is kept
82  * intact. This is done by first copying the napi struct from
83  * the target to the source, and then mem copying the entire
84  * source onto the target. Update txdata pointers and related
85  * content.
86  */
87 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
88 {
89 	struct bnx2x_fastpath *from_fp = &bp->fp[from];
90 	struct bnx2x_fastpath *to_fp = &bp->fp[to];
91 	struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92 	struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93 	struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94 	struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
95 	int old_max_eth_txqs, new_max_eth_txqs;
96 	int old_txdata_index = 0, new_txdata_index = 0;
97 	struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
98 
99 	/* Copy the NAPI object as it has been already initialized */
100 	from_fp->napi = to_fp->napi;
101 
102 	/* Move bnx2x_fastpath contents */
103 	memcpy(to_fp, from_fp, sizeof(*to_fp));
104 	to_fp->index = to;
105 
106 	/* Retain the tpa_info of the original `to' version as we don't want
107 	 * 2 FPs to contain the same tpa_info pointer.
108 	 */
109 	to_fp->tpa_info = old_tpa_info;
110 
111 	/* move sp_objs contents as well, as their indices match fp ones */
112 	memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
113 
114 	/* move fp_stats contents as well, as their indices match fp ones */
115 	memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
116 
117 	/* Update txdata pointers in fp and move txdata content accordingly:
118 	 * Each fp consumes 'max_cos' txdata structures, so the index should be
119 	 * decremented by max_cos x delta.
120 	 */
121 
122 	old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123 	new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
124 				(bp)->max_cos;
125 	if (from == FCOE_IDX(bp)) {
126 		old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127 		new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 	}
129 
130 	memcpy(&bp->bnx2x_txq[new_txdata_index],
131 	       &bp->bnx2x_txq[old_txdata_index],
132 	       sizeof(struct bnx2x_fp_txdata));
133 	to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
134 }
135 
136 /**
137  * bnx2x_fill_fw_str - Fill buffer with FW version string.
138  *
139  * @bp:        driver handle
140  * @buf:       character buffer to fill with the fw name
141  * @buf_len:   length of the above buffer
142  *
143  */
144 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
145 {
146 	if (IS_PF(bp)) {
147 		u8 phy_fw_ver[PHY_FW_VER_LEN];
148 
149 		phy_fw_ver[0] = '\0';
150 		bnx2x_get_ext_phy_fw_version(&bp->link_params,
151 					     phy_fw_ver, PHY_FW_VER_LEN);
152 		strlcpy(buf, bp->fw_ver, buf_len);
153 		snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
154 			 "bc %d.%d.%d%s%s",
155 			 (bp->common.bc_ver & 0xff0000) >> 16,
156 			 (bp->common.bc_ver & 0xff00) >> 8,
157 			 (bp->common.bc_ver & 0xff),
158 			 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
159 	} else {
160 		bnx2x_vf_fill_fw_str(bp, buf, buf_len);
161 	}
162 }
163 
164 /**
165  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
166  *
167  * @bp:	driver handle
168  * @delta:	number of eth queues which were not allocated
169  */
170 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
171 {
172 	int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
173 
174 	/* Queue pointer cannot be re-set on an fp-basis, as moving pointer
175 	 * backward along the array could cause memory to be overridden
176 	 */
177 	for (cos = 1; cos < bp->max_cos; cos++) {
178 		for (i = 0; i < old_eth_num - delta; i++) {
179 			struct bnx2x_fastpath *fp = &bp->fp[i];
180 			int new_idx = cos * (old_eth_num - delta) + i;
181 
182 			memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183 			       sizeof(struct bnx2x_fp_txdata));
184 			fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
185 		}
186 	}
187 }
188 
189 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
190 
191 /* free skb in the packet ring at pos idx
192  * return idx of last bd freed
193  */
194 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
195 			     u16 idx, unsigned int *pkts_compl,
196 			     unsigned int *bytes_compl)
197 {
198 	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199 	struct eth_tx_start_bd *tx_start_bd;
200 	struct eth_tx_bd *tx_data_bd;
201 	struct sk_buff *skb = tx_buf->skb;
202 	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
203 	int nbd;
204 	u16 split_bd_len = 0;
205 
206 	/* prefetch skb end pointer to speedup dev_kfree_skb() */
207 	prefetch(&skb->end);
208 
209 	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
210 	   txdata->txq_index, idx, tx_buf, skb);
211 
212 	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
213 
214 	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215 #ifdef BNX2X_STOP_ON_ERROR
216 	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217 		BNX2X_ERR("BAD nbd!\n");
218 		bnx2x_panic();
219 	}
220 #endif
221 	new_cons = nbd + tx_buf->first_bd;
222 
223 	/* Get the next bd */
224 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
225 
226 	/* Skip a parse bd... */
227 	--nbd;
228 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229 
230 	/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
231 	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
232 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
233 		split_bd_len = BD_UNMAP_LEN(tx_data_bd);
234 		--nbd;
235 		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
236 	}
237 
238 	/* unmap first bd */
239 	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
240 			 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
241 			 DMA_TO_DEVICE);
242 
243 	/* now free frags */
244 	while (nbd > 0) {
245 
246 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
247 		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
248 			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
249 		if (--nbd)
250 			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
251 	}
252 
253 	/* release skb */
254 	WARN_ON(!skb);
255 	if (likely(skb)) {
256 		(*pkts_compl)++;
257 		(*bytes_compl) += skb->len;
258 	}
259 
260 	dev_kfree_skb_any(skb);
261 	tx_buf->first_bd = 0;
262 	tx_buf->skb = NULL;
263 
264 	return new_cons;
265 }
266 
267 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
268 {
269 	struct netdev_queue *txq;
270 	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
271 	unsigned int pkts_compl = 0, bytes_compl = 0;
272 
273 #ifdef BNX2X_STOP_ON_ERROR
274 	if (unlikely(bp->panic))
275 		return -1;
276 #endif
277 
278 	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
279 	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
280 	sw_cons = txdata->tx_pkt_cons;
281 
282 	while (sw_cons != hw_cons) {
283 		u16 pkt_cons;
284 
285 		pkt_cons = TX_BD(sw_cons);
286 
287 		DP(NETIF_MSG_TX_DONE,
288 		   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
289 		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
290 
291 		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
292 					    &pkts_compl, &bytes_compl);
293 
294 		sw_cons++;
295 	}
296 
297 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
298 
299 	txdata->tx_pkt_cons = sw_cons;
300 	txdata->tx_bd_cons = bd_cons;
301 
302 	/* Need to make the tx_bd_cons update visible to start_xmit()
303 	 * before checking for netif_tx_queue_stopped().  Without the
304 	 * memory barrier, there is a small possibility that
305 	 * start_xmit() will miss it and cause the queue to be stopped
306 	 * forever.
307 	 * On the other hand we need an rmb() here to ensure the proper
308 	 * ordering of bit testing in the following
309 	 * netif_tx_queue_stopped(txq) call.
310 	 */
311 	smp_mb();
312 
313 	if (unlikely(netif_tx_queue_stopped(txq))) {
314 		/* Taking tx_lock() is needed to prevent re-enabling the queue
315 		 * while it's empty. This could have happen if rx_action() gets
316 		 * suspended in bnx2x_tx_int() after the condition before
317 		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
318 		 *
319 		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
320 		 * sends some packets consuming the whole queue again->
321 		 * stops the queue
322 		 */
323 
324 		__netif_tx_lock(txq, smp_processor_id());
325 
326 		if ((netif_tx_queue_stopped(txq)) &&
327 		    (bp->state == BNX2X_STATE_OPEN) &&
328 		    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
329 			netif_tx_wake_queue(txq);
330 
331 		__netif_tx_unlock(txq);
332 	}
333 	return 0;
334 }
335 
336 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
337 					     u16 idx)
338 {
339 	u16 last_max = fp->last_max_sge;
340 
341 	if (SUB_S16(idx, last_max) > 0)
342 		fp->last_max_sge = idx;
343 }
344 
345 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
346 					 u16 sge_len,
347 					 struct eth_end_agg_rx_cqe *cqe)
348 {
349 	struct bnx2x *bp = fp->bp;
350 	u16 last_max, last_elem, first_elem;
351 	u16 delta = 0;
352 	u16 i;
353 
354 	if (!sge_len)
355 		return;
356 
357 	/* First mark all used pages */
358 	for (i = 0; i < sge_len; i++)
359 		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
360 			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
361 
362 	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
363 	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
364 
365 	/* Here we assume that the last SGE index is the biggest */
366 	prefetch((void *)(fp->sge_mask));
367 	bnx2x_update_last_max_sge(fp,
368 		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
369 
370 	last_max = RX_SGE(fp->last_max_sge);
371 	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
372 	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
373 
374 	/* If ring is not full */
375 	if (last_elem + 1 != first_elem)
376 		last_elem++;
377 
378 	/* Now update the prod */
379 	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
380 		if (likely(fp->sge_mask[i]))
381 			break;
382 
383 		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
384 		delta += BIT_VEC64_ELEM_SZ;
385 	}
386 
387 	if (delta > 0) {
388 		fp->rx_sge_prod += delta;
389 		/* clear page-end entries */
390 		bnx2x_clear_sge_mask_next_elems(fp);
391 	}
392 
393 	DP(NETIF_MSG_RX_STATUS,
394 	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
395 	   fp->last_max_sge, fp->rx_sge_prod);
396 }
397 
398 /* Get Toeplitz hash value in the skb using the value from the
399  * CQE (calculated by HW).
400  */
401 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
402 			    const struct eth_fast_path_rx_cqe *cqe,
403 			    enum pkt_hash_types *rxhash_type)
404 {
405 	/* Get Toeplitz hash from CQE */
406 	if ((bp->dev->features & NETIF_F_RXHASH) &&
407 	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
408 		enum eth_rss_hash_type htype;
409 
410 		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
411 		*rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
412 				(htype == TCP_IPV6_HASH_TYPE)) ?
413 			       PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
414 
415 		return le32_to_cpu(cqe->rss_hash_result);
416 	}
417 	*rxhash_type = PKT_HASH_TYPE_NONE;
418 	return 0;
419 }
420 
421 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
422 			    u16 cons, u16 prod,
423 			    struct eth_fast_path_rx_cqe *cqe)
424 {
425 	struct bnx2x *bp = fp->bp;
426 	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
427 	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
428 	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
429 	dma_addr_t mapping;
430 	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
431 	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
432 
433 	/* print error if current state != stop */
434 	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
435 		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
436 
437 	/* Try to map an empty data buffer from the aggregation info  */
438 	mapping = dma_map_single(&bp->pdev->dev,
439 				 first_buf->data + NET_SKB_PAD,
440 				 fp->rx_buf_size, DMA_FROM_DEVICE);
441 	/*
442 	 *  ...if it fails - move the skb from the consumer to the producer
443 	 *  and set the current aggregation state as ERROR to drop it
444 	 *  when TPA_STOP arrives.
445 	 */
446 
447 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
448 		/* Move the BD from the consumer to the producer */
449 		bnx2x_reuse_rx_data(fp, cons, prod);
450 		tpa_info->tpa_state = BNX2X_TPA_ERROR;
451 		return;
452 	}
453 
454 	/* move empty data from pool to prod */
455 	prod_rx_buf->data = first_buf->data;
456 	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
457 	/* point prod_bd to new data */
458 	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
459 	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
460 
461 	/* move partial skb from cons to pool (don't unmap yet) */
462 	*first_buf = *cons_rx_buf;
463 
464 	/* mark bin state as START */
465 	tpa_info->parsing_flags =
466 		le16_to_cpu(cqe->pars_flags.flags);
467 	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
468 	tpa_info->tpa_state = BNX2X_TPA_START;
469 	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
470 	tpa_info->placement_offset = cqe->placement_offset;
471 	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
472 	if (fp->mode == TPA_MODE_GRO) {
473 		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
474 		tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
475 		tpa_info->gro_size = gro_size;
476 	}
477 
478 #ifdef BNX2X_STOP_ON_ERROR
479 	fp->tpa_queue_used |= (1 << queue);
480 #ifdef _ASM_GENERIC_INT_L64_H
481 	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
482 #else
483 	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
484 #endif
485 	   fp->tpa_queue_used);
486 #endif
487 }
488 
489 /* Timestamp option length allowed for TPA aggregation:
490  *
491  *		nop nop kind length echo val
492  */
493 #define TPA_TSTAMP_OPT_LEN	12
494 /**
495  * bnx2x_set_gro_params - compute GRO values
496  *
497  * @skb:		packet skb
498  * @parsing_flags:	parsing flags from the START CQE
499  * @len_on_bd:		total length of the first packet for the
500  *			aggregation.
501  * @pkt_len:		length of all segments
502  *
503  * Approximate value of the MSS for this aggregation calculated using
504  * the first packet of it.
505  * Compute number of aggregated segments, and gso_type.
506  */
507 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
508 				 u16 len_on_bd, unsigned int pkt_len,
509 				 u16 num_of_coalesced_segs)
510 {
511 	/* TPA aggregation won't have either IP options or TCP options
512 	 * other than timestamp or IPv6 extension headers.
513 	 */
514 	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
515 
516 	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
517 	    PRS_FLAG_OVERETH_IPV6) {
518 		hdrs_len += sizeof(struct ipv6hdr);
519 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
520 	} else {
521 		hdrs_len += sizeof(struct iphdr);
522 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
523 	}
524 
525 	/* Check if there was a TCP timestamp, if there is it's will
526 	 * always be 12 bytes length: nop nop kind length echo val.
527 	 *
528 	 * Otherwise FW would close the aggregation.
529 	 */
530 	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
531 		hdrs_len += TPA_TSTAMP_OPT_LEN;
532 
533 	skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
534 
535 	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
536 	 * to skb_shinfo(skb)->gso_segs
537 	 */
538 	NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
539 }
540 
541 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
542 			      u16 index, gfp_t gfp_mask)
543 {
544 	struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
545 	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
546 	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
547 	dma_addr_t mapping;
548 
549 	if (unlikely(page == NULL)) {
550 		BNX2X_ERR("Can't alloc sge\n");
551 		return -ENOMEM;
552 	}
553 
554 	mapping = dma_map_page(&bp->pdev->dev, page, 0,
555 			       SGE_PAGES, DMA_FROM_DEVICE);
556 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
557 		__free_pages(page, PAGES_PER_SGE_SHIFT);
558 		BNX2X_ERR("Can't map sge\n");
559 		return -ENOMEM;
560 	}
561 
562 	sw_buf->page = page;
563 	dma_unmap_addr_set(sw_buf, mapping, mapping);
564 
565 	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
566 	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
567 
568 	return 0;
569 }
570 
571 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
572 			       struct bnx2x_agg_info *tpa_info,
573 			       u16 pages,
574 			       struct sk_buff *skb,
575 			       struct eth_end_agg_rx_cqe *cqe,
576 			       u16 cqe_idx)
577 {
578 	struct sw_rx_page *rx_pg, old_rx_pg;
579 	u32 i, frag_len, frag_size;
580 	int err, j, frag_id = 0;
581 	u16 len_on_bd = tpa_info->len_on_bd;
582 	u16 full_page = 0, gro_size = 0;
583 
584 	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
585 
586 	if (fp->mode == TPA_MODE_GRO) {
587 		gro_size = tpa_info->gro_size;
588 		full_page = tpa_info->full_page;
589 	}
590 
591 	/* This is needed in order to enable forwarding support */
592 	if (frag_size)
593 		bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
594 				     le16_to_cpu(cqe->pkt_len),
595 				     le16_to_cpu(cqe->num_of_coalesced_segs));
596 
597 #ifdef BNX2X_STOP_ON_ERROR
598 	if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
599 		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
600 			  pages, cqe_idx);
601 		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
602 		bnx2x_panic();
603 		return -EINVAL;
604 	}
605 #endif
606 
607 	/* Run through the SGL and compose the fragmented skb */
608 	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
609 		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
610 
611 		/* FW gives the indices of the SGE as if the ring is an array
612 		   (meaning that "next" element will consume 2 indices) */
613 		if (fp->mode == TPA_MODE_GRO)
614 			frag_len = min_t(u32, frag_size, (u32)full_page);
615 		else /* LRO */
616 			frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
617 
618 		rx_pg = &fp->rx_page_ring[sge_idx];
619 		old_rx_pg = *rx_pg;
620 
621 		/* If we fail to allocate a substitute page, we simply stop
622 		   where we are and drop the whole packet */
623 		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
624 		if (unlikely(err)) {
625 			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
626 			return err;
627 		}
628 
629 		/* Unmap the page as we're going to pass it to the stack */
630 		dma_unmap_page(&bp->pdev->dev,
631 			       dma_unmap_addr(&old_rx_pg, mapping),
632 			       SGE_PAGES, DMA_FROM_DEVICE);
633 		/* Add one frag and update the appropriate fields in the skb */
634 		if (fp->mode == TPA_MODE_LRO)
635 			skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
636 		else { /* GRO */
637 			int rem;
638 			int offset = 0;
639 			for (rem = frag_len; rem > 0; rem -= gro_size) {
640 				int len = rem > gro_size ? gro_size : rem;
641 				skb_fill_page_desc(skb, frag_id++,
642 						   old_rx_pg.page, offset, len);
643 				if (offset)
644 					get_page(old_rx_pg.page);
645 				offset += len;
646 			}
647 		}
648 
649 		skb->data_len += frag_len;
650 		skb->truesize += SGE_PAGES;
651 		skb->len += frag_len;
652 
653 		frag_size -= frag_len;
654 	}
655 
656 	return 0;
657 }
658 
659 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
660 {
661 	if (fp->rx_frag_size)
662 		put_page(virt_to_head_page(data));
663 	else
664 		kfree(data);
665 }
666 
667 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
668 {
669 	if (fp->rx_frag_size) {
670 		/* GFP_KERNEL allocations are used only during initialization */
671 		if (unlikely(gfp_mask & __GFP_WAIT))
672 			return (void *)__get_free_page(gfp_mask);
673 
674 		return netdev_alloc_frag(fp->rx_frag_size);
675 	}
676 
677 	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
678 }
679 
680 #ifdef CONFIG_INET
681 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
682 {
683 	const struct iphdr *iph = ip_hdr(skb);
684 	struct tcphdr *th;
685 
686 	skb_set_transport_header(skb, sizeof(struct iphdr));
687 	th = tcp_hdr(skb);
688 
689 	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
690 				  iph->saddr, iph->daddr, 0);
691 }
692 
693 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
694 {
695 	struct ipv6hdr *iph = ipv6_hdr(skb);
696 	struct tcphdr *th;
697 
698 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
699 	th = tcp_hdr(skb);
700 
701 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
702 				  &iph->saddr, &iph->daddr, 0);
703 }
704 
705 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
706 			    void (*gro_func)(struct bnx2x*, struct sk_buff*))
707 {
708 	skb_set_network_header(skb, 0);
709 	gro_func(bp, skb);
710 	tcp_gro_complete(skb);
711 }
712 #endif
713 
714 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
715 			       struct sk_buff *skb)
716 {
717 #ifdef CONFIG_INET
718 	if (skb_shinfo(skb)->gso_size) {
719 		switch (be16_to_cpu(skb->protocol)) {
720 		case ETH_P_IP:
721 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
722 			break;
723 		case ETH_P_IPV6:
724 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
725 			break;
726 		default:
727 			BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
728 				  be16_to_cpu(skb->protocol));
729 		}
730 	}
731 #endif
732 	skb_record_rx_queue(skb, fp->rx_queue);
733 	napi_gro_receive(&fp->napi, skb);
734 }
735 
736 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
737 			   struct bnx2x_agg_info *tpa_info,
738 			   u16 pages,
739 			   struct eth_end_agg_rx_cqe *cqe,
740 			   u16 cqe_idx)
741 {
742 	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
743 	u8 pad = tpa_info->placement_offset;
744 	u16 len = tpa_info->len_on_bd;
745 	struct sk_buff *skb = NULL;
746 	u8 *new_data, *data = rx_buf->data;
747 	u8 old_tpa_state = tpa_info->tpa_state;
748 
749 	tpa_info->tpa_state = BNX2X_TPA_STOP;
750 
751 	/* If we there was an error during the handling of the TPA_START -
752 	 * drop this aggregation.
753 	 */
754 	if (old_tpa_state == BNX2X_TPA_ERROR)
755 		goto drop;
756 
757 	/* Try to allocate the new data */
758 	new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
759 	/* Unmap skb in the pool anyway, as we are going to change
760 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
761 	   fails. */
762 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
763 			 fp->rx_buf_size, DMA_FROM_DEVICE);
764 	if (likely(new_data))
765 		skb = build_skb(data, fp->rx_frag_size);
766 
767 	if (likely(skb)) {
768 #ifdef BNX2X_STOP_ON_ERROR
769 		if (pad + len > fp->rx_buf_size) {
770 			BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
771 				  pad, len, fp->rx_buf_size);
772 			bnx2x_panic();
773 			return;
774 		}
775 #endif
776 
777 		skb_reserve(skb, pad + NET_SKB_PAD);
778 		skb_put(skb, len);
779 		skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
780 
781 		skb->protocol = eth_type_trans(skb, bp->dev);
782 		skb->ip_summed = CHECKSUM_UNNECESSARY;
783 
784 		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
785 					 skb, cqe, cqe_idx)) {
786 			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
787 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
788 			bnx2x_gro_receive(bp, fp, skb);
789 		} else {
790 			DP(NETIF_MSG_RX_STATUS,
791 			   "Failed to allocate new pages - dropping packet!\n");
792 			dev_kfree_skb_any(skb);
793 		}
794 
795 		/* put new data in bin */
796 		rx_buf->data = new_data;
797 
798 		return;
799 	}
800 	bnx2x_frag_free(fp, new_data);
801 drop:
802 	/* drop the packet and keep the buffer in the bin */
803 	DP(NETIF_MSG_RX_STATUS,
804 	   "Failed to allocate or map a new skb - dropping packet!\n");
805 	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
806 }
807 
808 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
809 			       u16 index, gfp_t gfp_mask)
810 {
811 	u8 *data;
812 	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
813 	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
814 	dma_addr_t mapping;
815 
816 	data = bnx2x_frag_alloc(fp, gfp_mask);
817 	if (unlikely(data == NULL))
818 		return -ENOMEM;
819 
820 	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
821 				 fp->rx_buf_size,
822 				 DMA_FROM_DEVICE);
823 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
824 		bnx2x_frag_free(fp, data);
825 		BNX2X_ERR("Can't map rx data\n");
826 		return -ENOMEM;
827 	}
828 
829 	rx_buf->data = data;
830 	dma_unmap_addr_set(rx_buf, mapping, mapping);
831 
832 	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
833 	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
834 
835 	return 0;
836 }
837 
838 static
839 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
840 				 struct bnx2x_fastpath *fp,
841 				 struct bnx2x_eth_q_stats *qstats)
842 {
843 	/* Do nothing if no L4 csum validation was done.
844 	 * We do not check whether IP csum was validated. For IPv4 we assume
845 	 * that if the card got as far as validating the L4 csum, it also
846 	 * validated the IP csum. IPv6 has no IP csum.
847 	 */
848 	if (cqe->fast_path_cqe.status_flags &
849 	    ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
850 		return;
851 
852 	/* If L4 validation was done, check if an error was found. */
853 
854 	if (cqe->fast_path_cqe.type_error_flags &
855 	    (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
856 	     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
857 		qstats->hw_csum_err++;
858 	else
859 		skb->ip_summed = CHECKSUM_UNNECESSARY;
860 }
861 
862 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
863 {
864 	struct bnx2x *bp = fp->bp;
865 	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
866 	u16 sw_comp_cons, sw_comp_prod;
867 	int rx_pkt = 0;
868 	union eth_rx_cqe *cqe;
869 	struct eth_fast_path_rx_cqe *cqe_fp;
870 
871 #ifdef BNX2X_STOP_ON_ERROR
872 	if (unlikely(bp->panic))
873 		return 0;
874 #endif
875 	if (budget <= 0)
876 		return rx_pkt;
877 
878 	bd_cons = fp->rx_bd_cons;
879 	bd_prod = fp->rx_bd_prod;
880 	bd_prod_fw = bd_prod;
881 	sw_comp_cons = fp->rx_comp_cons;
882 	sw_comp_prod = fp->rx_comp_prod;
883 
884 	comp_ring_cons = RCQ_BD(sw_comp_cons);
885 	cqe = &fp->rx_comp_ring[comp_ring_cons];
886 	cqe_fp = &cqe->fast_path_cqe;
887 
888 	DP(NETIF_MSG_RX_STATUS,
889 	   "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
890 
891 	while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
892 		struct sw_rx_bd *rx_buf = NULL;
893 		struct sk_buff *skb;
894 		u8 cqe_fp_flags;
895 		enum eth_rx_cqe_type cqe_fp_type;
896 		u16 len, pad, queue;
897 		u8 *data;
898 		u32 rxhash;
899 		enum pkt_hash_types rxhash_type;
900 
901 #ifdef BNX2X_STOP_ON_ERROR
902 		if (unlikely(bp->panic))
903 			return 0;
904 #endif
905 
906 		bd_prod = RX_BD(bd_prod);
907 		bd_cons = RX_BD(bd_cons);
908 
909 		cqe_fp_flags = cqe_fp->type_error_flags;
910 		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
911 
912 		DP(NETIF_MSG_RX_STATUS,
913 		   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
914 		   CQE_TYPE(cqe_fp_flags),
915 		   cqe_fp_flags, cqe_fp->status_flags,
916 		   le32_to_cpu(cqe_fp->rss_hash_result),
917 		   le16_to_cpu(cqe_fp->vlan_tag),
918 		   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
919 
920 		/* is this a slowpath msg? */
921 		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
922 			bnx2x_sp_event(fp, cqe);
923 			goto next_cqe;
924 		}
925 
926 		rx_buf = &fp->rx_buf_ring[bd_cons];
927 		data = rx_buf->data;
928 
929 		if (!CQE_TYPE_FAST(cqe_fp_type)) {
930 			struct bnx2x_agg_info *tpa_info;
931 			u16 frag_size, pages;
932 #ifdef BNX2X_STOP_ON_ERROR
933 			/* sanity check */
934 			if (fp->disable_tpa &&
935 			    (CQE_TYPE_START(cqe_fp_type) ||
936 			     CQE_TYPE_STOP(cqe_fp_type)))
937 				BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
938 					  CQE_TYPE(cqe_fp_type));
939 #endif
940 
941 			if (CQE_TYPE_START(cqe_fp_type)) {
942 				u16 queue = cqe_fp->queue_index;
943 				DP(NETIF_MSG_RX_STATUS,
944 				   "calling tpa_start on queue %d\n",
945 				   queue);
946 
947 				bnx2x_tpa_start(fp, queue,
948 						bd_cons, bd_prod,
949 						cqe_fp);
950 
951 				goto next_rx;
952 			}
953 			queue = cqe->end_agg_cqe.queue_index;
954 			tpa_info = &fp->tpa_info[queue];
955 			DP(NETIF_MSG_RX_STATUS,
956 			   "calling tpa_stop on queue %d\n",
957 			   queue);
958 
959 			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
960 				    tpa_info->len_on_bd;
961 
962 			if (fp->mode == TPA_MODE_GRO)
963 				pages = (frag_size + tpa_info->full_page - 1) /
964 					 tpa_info->full_page;
965 			else
966 				pages = SGE_PAGE_ALIGN(frag_size) >>
967 					SGE_PAGE_SHIFT;
968 
969 			bnx2x_tpa_stop(bp, fp, tpa_info, pages,
970 				       &cqe->end_agg_cqe, comp_ring_cons);
971 #ifdef BNX2X_STOP_ON_ERROR
972 			if (bp->panic)
973 				return 0;
974 #endif
975 
976 			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
977 			goto next_cqe;
978 		}
979 		/* non TPA */
980 		len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
981 		pad = cqe_fp->placement_offset;
982 		dma_sync_single_for_cpu(&bp->pdev->dev,
983 					dma_unmap_addr(rx_buf, mapping),
984 					pad + RX_COPY_THRESH,
985 					DMA_FROM_DEVICE);
986 		pad += NET_SKB_PAD;
987 		prefetch(data + pad); /* speedup eth_type_trans() */
988 		/* is this an error packet? */
989 		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
990 			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
991 			   "ERROR  flags %x  rx packet %u\n",
992 			   cqe_fp_flags, sw_comp_cons);
993 			bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
994 			goto reuse_rx;
995 		}
996 
997 		/* Since we don't have a jumbo ring
998 		 * copy small packets if mtu > 1500
999 		 */
1000 		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1001 		    (len <= RX_COPY_THRESH)) {
1002 			skb = netdev_alloc_skb_ip_align(bp->dev, len);
1003 			if (skb == NULL) {
1004 				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1005 				   "ERROR  packet dropped because of alloc failure\n");
1006 				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1007 				goto reuse_rx;
1008 			}
1009 			memcpy(skb->data, data + pad, len);
1010 			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1011 		} else {
1012 			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1013 						       GFP_ATOMIC) == 0)) {
1014 				dma_unmap_single(&bp->pdev->dev,
1015 						 dma_unmap_addr(rx_buf, mapping),
1016 						 fp->rx_buf_size,
1017 						 DMA_FROM_DEVICE);
1018 				skb = build_skb(data, fp->rx_frag_size);
1019 				if (unlikely(!skb)) {
1020 					bnx2x_frag_free(fp, data);
1021 					bnx2x_fp_qstats(bp, fp)->
1022 							rx_skb_alloc_failed++;
1023 					goto next_rx;
1024 				}
1025 				skb_reserve(skb, pad);
1026 			} else {
1027 				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1028 				   "ERROR  packet dropped because of alloc failure\n");
1029 				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1030 reuse_rx:
1031 				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1032 				goto next_rx;
1033 			}
1034 		}
1035 
1036 		skb_put(skb, len);
1037 		skb->protocol = eth_type_trans(skb, bp->dev);
1038 
1039 		/* Set Toeplitz hash for a none-LRO skb */
1040 		rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1041 		skb_set_hash(skb, rxhash, rxhash_type);
1042 
1043 		skb_checksum_none_assert(skb);
1044 
1045 		if (bp->dev->features & NETIF_F_RXCSUM)
1046 			bnx2x_csum_validate(skb, cqe, fp,
1047 					    bnx2x_fp_qstats(bp, fp));
1048 
1049 		skb_record_rx_queue(skb, fp->rx_queue);
1050 
1051 		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1052 		    PARSING_FLAGS_VLAN)
1053 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1054 					       le16_to_cpu(cqe_fp->vlan_tag));
1055 
1056 		skb_mark_napi_id(skb, &fp->napi);
1057 
1058 		if (bnx2x_fp_ll_polling(fp))
1059 			netif_receive_skb(skb);
1060 		else
1061 			napi_gro_receive(&fp->napi, skb);
1062 next_rx:
1063 		rx_buf->data = NULL;
1064 
1065 		bd_cons = NEXT_RX_IDX(bd_cons);
1066 		bd_prod = NEXT_RX_IDX(bd_prod);
1067 		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1068 		rx_pkt++;
1069 next_cqe:
1070 		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1071 		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1072 
1073 		/* mark CQE as free */
1074 		BNX2X_SEED_CQE(cqe_fp);
1075 
1076 		if (rx_pkt == budget)
1077 			break;
1078 
1079 		comp_ring_cons = RCQ_BD(sw_comp_cons);
1080 		cqe = &fp->rx_comp_ring[comp_ring_cons];
1081 		cqe_fp = &cqe->fast_path_cqe;
1082 	} /* while */
1083 
1084 	fp->rx_bd_cons = bd_cons;
1085 	fp->rx_bd_prod = bd_prod_fw;
1086 	fp->rx_comp_cons = sw_comp_cons;
1087 	fp->rx_comp_prod = sw_comp_prod;
1088 
1089 	/* Update producers */
1090 	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1091 			     fp->rx_sge_prod);
1092 
1093 	fp->rx_pkt += rx_pkt;
1094 	fp->rx_calls++;
1095 
1096 	return rx_pkt;
1097 }
1098 
1099 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1100 {
1101 	struct bnx2x_fastpath *fp = fp_cookie;
1102 	struct bnx2x *bp = fp->bp;
1103 	u8 cos;
1104 
1105 	DP(NETIF_MSG_INTR,
1106 	   "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1107 	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
1108 
1109 	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1110 
1111 #ifdef BNX2X_STOP_ON_ERROR
1112 	if (unlikely(bp->panic))
1113 		return IRQ_HANDLED;
1114 #endif
1115 
1116 	/* Handle Rx and Tx according to MSI-X vector */
1117 	for_each_cos_in_tx_queue(fp, cos)
1118 		prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1119 
1120 	prefetch(&fp->sb_running_index[SM_RX_ID]);
1121 	napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1122 
1123 	return IRQ_HANDLED;
1124 }
1125 
1126 /* HW Lock for shared dual port PHYs */
1127 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1128 {
1129 	mutex_lock(&bp->port.phy_mutex);
1130 
1131 	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1132 }
1133 
1134 void bnx2x_release_phy_lock(struct bnx2x *bp)
1135 {
1136 	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1137 
1138 	mutex_unlock(&bp->port.phy_mutex);
1139 }
1140 
1141 /* calculates MF speed according to current linespeed and MF configuration */
1142 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1143 {
1144 	u16 line_speed = bp->link_vars.line_speed;
1145 	if (IS_MF(bp)) {
1146 		u16 maxCfg = bnx2x_extract_max_cfg(bp,
1147 						   bp->mf_config[BP_VN(bp)]);
1148 
1149 		/* Calculate the current MAX line speed limit for the MF
1150 		 * devices
1151 		 */
1152 		if (IS_MF_SI(bp))
1153 			line_speed = (line_speed * maxCfg) / 100;
1154 		else { /* SD mode */
1155 			u16 vn_max_rate = maxCfg * 100;
1156 
1157 			if (vn_max_rate < line_speed)
1158 				line_speed = vn_max_rate;
1159 		}
1160 	}
1161 
1162 	return line_speed;
1163 }
1164 
1165 /**
1166  * bnx2x_fill_report_data - fill link report data to report
1167  *
1168  * @bp:		driver handle
1169  * @data:	link state to update
1170  *
1171  * It uses a none-atomic bit operations because is called under the mutex.
1172  */
1173 static void bnx2x_fill_report_data(struct bnx2x *bp,
1174 				   struct bnx2x_link_report_data *data)
1175 {
1176 	u16 line_speed = bnx2x_get_mf_speed(bp);
1177 
1178 	memset(data, 0, sizeof(*data));
1179 
1180 	/* Fill the report data: effective line speed */
1181 	data->line_speed = line_speed;
1182 
1183 	/* Link is down */
1184 	if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1185 		__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1186 			  &data->link_report_flags);
1187 
1188 	/* Full DUPLEX */
1189 	if (bp->link_vars.duplex == DUPLEX_FULL)
1190 		__set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1191 
1192 	/* Rx Flow Control is ON */
1193 	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1194 		__set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1195 
1196 	/* Tx Flow Control is ON */
1197 	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1198 		__set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1199 }
1200 
1201 /**
1202  * bnx2x_link_report - report link status to OS.
1203  *
1204  * @bp:		driver handle
1205  *
1206  * Calls the __bnx2x_link_report() under the same locking scheme
1207  * as a link/PHY state managing code to ensure a consistent link
1208  * reporting.
1209  */
1210 
1211 void bnx2x_link_report(struct bnx2x *bp)
1212 {
1213 	bnx2x_acquire_phy_lock(bp);
1214 	__bnx2x_link_report(bp);
1215 	bnx2x_release_phy_lock(bp);
1216 }
1217 
1218 /**
1219  * __bnx2x_link_report - report link status to OS.
1220  *
1221  * @bp:		driver handle
1222  *
1223  * None atomic implementation.
1224  * Should be called under the phy_lock.
1225  */
1226 void __bnx2x_link_report(struct bnx2x *bp)
1227 {
1228 	struct bnx2x_link_report_data cur_data;
1229 
1230 	/* reread mf_cfg */
1231 	if (IS_PF(bp) && !CHIP_IS_E1(bp))
1232 		bnx2x_read_mf_cfg(bp);
1233 
1234 	/* Read the current link report info */
1235 	bnx2x_fill_report_data(bp, &cur_data);
1236 
1237 	/* Don't report link down or exactly the same link status twice */
1238 	if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1239 	    (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1240 		      &bp->last_reported_link.link_report_flags) &&
1241 	     test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1242 		      &cur_data.link_report_flags)))
1243 		return;
1244 
1245 	bp->link_cnt++;
1246 
1247 	/* We are going to report a new link parameters now -
1248 	 * remember the current data for the next time.
1249 	 */
1250 	memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1251 
1252 	if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1253 		     &cur_data.link_report_flags)) {
1254 		netif_carrier_off(bp->dev);
1255 		netdev_err(bp->dev, "NIC Link is Down\n");
1256 		return;
1257 	} else {
1258 		const char *duplex;
1259 		const char *flow;
1260 
1261 		netif_carrier_on(bp->dev);
1262 
1263 		if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1264 				       &cur_data.link_report_flags))
1265 			duplex = "full";
1266 		else
1267 			duplex = "half";
1268 
1269 		/* Handle the FC at the end so that only these flags would be
1270 		 * possibly set. This way we may easily check if there is no FC
1271 		 * enabled.
1272 		 */
1273 		if (cur_data.link_report_flags) {
1274 			if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1275 				     &cur_data.link_report_flags)) {
1276 				if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1277 				     &cur_data.link_report_flags))
1278 					flow = "ON - receive & transmit";
1279 				else
1280 					flow = "ON - receive";
1281 			} else {
1282 				flow = "ON - transmit";
1283 			}
1284 		} else {
1285 			flow = "none";
1286 		}
1287 		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1288 			    cur_data.line_speed, duplex, flow);
1289 	}
1290 }
1291 
1292 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1293 {
1294 	int i;
1295 
1296 	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1297 		struct eth_rx_sge *sge;
1298 
1299 		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1300 		sge->addr_hi =
1301 			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1302 			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1303 
1304 		sge->addr_lo =
1305 			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1306 			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1307 	}
1308 }
1309 
1310 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1311 				struct bnx2x_fastpath *fp, int last)
1312 {
1313 	int i;
1314 
1315 	for (i = 0; i < last; i++) {
1316 		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1317 		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1318 		u8 *data = first_buf->data;
1319 
1320 		if (data == NULL) {
1321 			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1322 			continue;
1323 		}
1324 		if (tpa_info->tpa_state == BNX2X_TPA_START)
1325 			dma_unmap_single(&bp->pdev->dev,
1326 					 dma_unmap_addr(first_buf, mapping),
1327 					 fp->rx_buf_size, DMA_FROM_DEVICE);
1328 		bnx2x_frag_free(fp, data);
1329 		first_buf->data = NULL;
1330 	}
1331 }
1332 
1333 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1334 {
1335 	int j;
1336 
1337 	for_each_rx_queue_cnic(bp, j) {
1338 		struct bnx2x_fastpath *fp = &bp->fp[j];
1339 
1340 		fp->rx_bd_cons = 0;
1341 
1342 		/* Activate BD ring */
1343 		/* Warning!
1344 		 * this will generate an interrupt (to the TSTORM)
1345 		 * must only be done after chip is initialized
1346 		 */
1347 		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1348 				     fp->rx_sge_prod);
1349 	}
1350 }
1351 
1352 void bnx2x_init_rx_rings(struct bnx2x *bp)
1353 {
1354 	int func = BP_FUNC(bp);
1355 	u16 ring_prod;
1356 	int i, j;
1357 
1358 	/* Allocate TPA resources */
1359 	for_each_eth_queue(bp, j) {
1360 		struct bnx2x_fastpath *fp = &bp->fp[j];
1361 
1362 		DP(NETIF_MSG_IFUP,
1363 		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1364 
1365 		if (!fp->disable_tpa) {
1366 			/* Fill the per-aggregation pool */
1367 			for (i = 0; i < MAX_AGG_QS(bp); i++) {
1368 				struct bnx2x_agg_info *tpa_info =
1369 					&fp->tpa_info[i];
1370 				struct sw_rx_bd *first_buf =
1371 					&tpa_info->first_buf;
1372 
1373 				first_buf->data =
1374 					bnx2x_frag_alloc(fp, GFP_KERNEL);
1375 				if (!first_buf->data) {
1376 					BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1377 						  j);
1378 					bnx2x_free_tpa_pool(bp, fp, i);
1379 					fp->disable_tpa = 1;
1380 					break;
1381 				}
1382 				dma_unmap_addr_set(first_buf, mapping, 0);
1383 				tpa_info->tpa_state = BNX2X_TPA_STOP;
1384 			}
1385 
1386 			/* "next page" elements initialization */
1387 			bnx2x_set_next_page_sgl(fp);
1388 
1389 			/* set SGEs bit mask */
1390 			bnx2x_init_sge_ring_bit_mask(fp);
1391 
1392 			/* Allocate SGEs and initialize the ring elements */
1393 			for (i = 0, ring_prod = 0;
1394 			     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1395 
1396 				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1397 						       GFP_KERNEL) < 0) {
1398 					BNX2X_ERR("was only able to allocate %d rx sges\n",
1399 						  i);
1400 					BNX2X_ERR("disabling TPA for queue[%d]\n",
1401 						  j);
1402 					/* Cleanup already allocated elements */
1403 					bnx2x_free_rx_sge_range(bp, fp,
1404 								ring_prod);
1405 					bnx2x_free_tpa_pool(bp, fp,
1406 							    MAX_AGG_QS(bp));
1407 					fp->disable_tpa = 1;
1408 					ring_prod = 0;
1409 					break;
1410 				}
1411 				ring_prod = NEXT_SGE_IDX(ring_prod);
1412 			}
1413 
1414 			fp->rx_sge_prod = ring_prod;
1415 		}
1416 	}
1417 
1418 	for_each_eth_queue(bp, j) {
1419 		struct bnx2x_fastpath *fp = &bp->fp[j];
1420 
1421 		fp->rx_bd_cons = 0;
1422 
1423 		/* Activate BD ring */
1424 		/* Warning!
1425 		 * this will generate an interrupt (to the TSTORM)
1426 		 * must only be done after chip is initialized
1427 		 */
1428 		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1429 				     fp->rx_sge_prod);
1430 
1431 		if (j != 0)
1432 			continue;
1433 
1434 		if (CHIP_IS_E1(bp)) {
1435 			REG_WR(bp, BAR_USTRORM_INTMEM +
1436 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1437 			       U64_LO(fp->rx_comp_mapping));
1438 			REG_WR(bp, BAR_USTRORM_INTMEM +
1439 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1440 			       U64_HI(fp->rx_comp_mapping));
1441 		}
1442 	}
1443 }
1444 
1445 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1446 {
1447 	u8 cos;
1448 	struct bnx2x *bp = fp->bp;
1449 
1450 	for_each_cos_in_tx_queue(fp, cos) {
1451 		struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1452 		unsigned pkts_compl = 0, bytes_compl = 0;
1453 
1454 		u16 sw_prod = txdata->tx_pkt_prod;
1455 		u16 sw_cons = txdata->tx_pkt_cons;
1456 
1457 		while (sw_cons != sw_prod) {
1458 			bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1459 					  &pkts_compl, &bytes_compl);
1460 			sw_cons++;
1461 		}
1462 
1463 		netdev_tx_reset_queue(
1464 			netdev_get_tx_queue(bp->dev,
1465 					    txdata->txq_index));
1466 	}
1467 }
1468 
1469 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1470 {
1471 	int i;
1472 
1473 	for_each_tx_queue_cnic(bp, i) {
1474 		bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1475 	}
1476 }
1477 
1478 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1479 {
1480 	int i;
1481 
1482 	for_each_eth_queue(bp, i) {
1483 		bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1484 	}
1485 }
1486 
1487 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1488 {
1489 	struct bnx2x *bp = fp->bp;
1490 	int i;
1491 
1492 	/* ring wasn't allocated */
1493 	if (fp->rx_buf_ring == NULL)
1494 		return;
1495 
1496 	for (i = 0; i < NUM_RX_BD; i++) {
1497 		struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1498 		u8 *data = rx_buf->data;
1499 
1500 		if (data == NULL)
1501 			continue;
1502 		dma_unmap_single(&bp->pdev->dev,
1503 				 dma_unmap_addr(rx_buf, mapping),
1504 				 fp->rx_buf_size, DMA_FROM_DEVICE);
1505 
1506 		rx_buf->data = NULL;
1507 		bnx2x_frag_free(fp, data);
1508 	}
1509 }
1510 
1511 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1512 {
1513 	int j;
1514 
1515 	for_each_rx_queue_cnic(bp, j) {
1516 		bnx2x_free_rx_bds(&bp->fp[j]);
1517 	}
1518 }
1519 
1520 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1521 {
1522 	int j;
1523 
1524 	for_each_eth_queue(bp, j) {
1525 		struct bnx2x_fastpath *fp = &bp->fp[j];
1526 
1527 		bnx2x_free_rx_bds(fp);
1528 
1529 		if (!fp->disable_tpa)
1530 			bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1531 	}
1532 }
1533 
1534 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1535 {
1536 	bnx2x_free_tx_skbs_cnic(bp);
1537 	bnx2x_free_rx_skbs_cnic(bp);
1538 }
1539 
1540 void bnx2x_free_skbs(struct bnx2x *bp)
1541 {
1542 	bnx2x_free_tx_skbs(bp);
1543 	bnx2x_free_rx_skbs(bp);
1544 }
1545 
1546 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1547 {
1548 	/* load old values */
1549 	u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1550 
1551 	if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1552 		/* leave all but MAX value */
1553 		mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1554 
1555 		/* set new MAX value */
1556 		mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1557 				& FUNC_MF_CFG_MAX_BW_MASK;
1558 
1559 		bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1560 	}
1561 }
1562 
1563 /**
1564  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1565  *
1566  * @bp:		driver handle
1567  * @nvecs:	number of vectors to be released
1568  */
1569 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1570 {
1571 	int i, offset = 0;
1572 
1573 	if (nvecs == offset)
1574 		return;
1575 
1576 	/* VFs don't have a default SB */
1577 	if (IS_PF(bp)) {
1578 		free_irq(bp->msix_table[offset].vector, bp->dev);
1579 		DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1580 		   bp->msix_table[offset].vector);
1581 		offset++;
1582 	}
1583 
1584 	if (CNIC_SUPPORT(bp)) {
1585 		if (nvecs == offset)
1586 			return;
1587 		offset++;
1588 	}
1589 
1590 	for_each_eth_queue(bp, i) {
1591 		if (nvecs == offset)
1592 			return;
1593 		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1594 		   i, bp->msix_table[offset].vector);
1595 
1596 		free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1597 	}
1598 }
1599 
1600 void bnx2x_free_irq(struct bnx2x *bp)
1601 {
1602 	if (bp->flags & USING_MSIX_FLAG &&
1603 	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1604 		int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1605 
1606 		/* vfs don't have a default status block */
1607 		if (IS_PF(bp))
1608 			nvecs++;
1609 
1610 		bnx2x_free_msix_irqs(bp, nvecs);
1611 	} else {
1612 		free_irq(bp->dev->irq, bp->dev);
1613 	}
1614 }
1615 
1616 int bnx2x_enable_msix(struct bnx2x *bp)
1617 {
1618 	int msix_vec = 0, i, rc;
1619 
1620 	/* VFs don't have a default status block */
1621 	if (IS_PF(bp)) {
1622 		bp->msix_table[msix_vec].entry = msix_vec;
1623 		BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1624 			       bp->msix_table[0].entry);
1625 		msix_vec++;
1626 	}
1627 
1628 	/* Cnic requires an msix vector for itself */
1629 	if (CNIC_SUPPORT(bp)) {
1630 		bp->msix_table[msix_vec].entry = msix_vec;
1631 		BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1632 			       msix_vec, bp->msix_table[msix_vec].entry);
1633 		msix_vec++;
1634 	}
1635 
1636 	/* We need separate vectors for ETH queues only (not FCoE) */
1637 	for_each_eth_queue(bp, i) {
1638 		bp->msix_table[msix_vec].entry = msix_vec;
1639 		BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1640 			       msix_vec, msix_vec, i);
1641 		msix_vec++;
1642 	}
1643 
1644 	DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1645 	   msix_vec);
1646 
1647 	rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1648 				   BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1649 	/*
1650 	 * reconfigure number of tx/rx queues according to available
1651 	 * MSI-X vectors
1652 	 */
1653 	if (rc == -ENOSPC) {
1654 		/* Get by with single vector */
1655 		rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1656 		if (rc < 0) {
1657 			BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1658 				       rc);
1659 			goto no_msix;
1660 		}
1661 
1662 		BNX2X_DEV_INFO("Using single MSI-X vector\n");
1663 		bp->flags |= USING_SINGLE_MSIX_FLAG;
1664 
1665 		BNX2X_DEV_INFO("set number of queues to 1\n");
1666 		bp->num_ethernet_queues = 1;
1667 		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1668 	} else if (rc < 0) {
1669 		BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1670 		goto no_msix;
1671 	} else if (rc < msix_vec) {
1672 		/* how less vectors we will have? */
1673 		int diff = msix_vec - rc;
1674 
1675 		BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1676 
1677 		/*
1678 		 * decrease number of queues by number of unallocated entries
1679 		 */
1680 		bp->num_ethernet_queues -= diff;
1681 		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1682 
1683 		BNX2X_DEV_INFO("New queue configuration set: %d\n",
1684 			       bp->num_queues);
1685 	}
1686 
1687 	bp->flags |= USING_MSIX_FLAG;
1688 
1689 	return 0;
1690 
1691 no_msix:
1692 	/* fall to INTx if not enough memory */
1693 	if (rc == -ENOMEM)
1694 		bp->flags |= DISABLE_MSI_FLAG;
1695 
1696 	return rc;
1697 }
1698 
1699 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1700 {
1701 	int i, rc, offset = 0;
1702 
1703 	/* no default status block for vf */
1704 	if (IS_PF(bp)) {
1705 		rc = request_irq(bp->msix_table[offset++].vector,
1706 				 bnx2x_msix_sp_int, 0,
1707 				 bp->dev->name, bp->dev);
1708 		if (rc) {
1709 			BNX2X_ERR("request sp irq failed\n");
1710 			return -EBUSY;
1711 		}
1712 	}
1713 
1714 	if (CNIC_SUPPORT(bp))
1715 		offset++;
1716 
1717 	for_each_eth_queue(bp, i) {
1718 		struct bnx2x_fastpath *fp = &bp->fp[i];
1719 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1720 			 bp->dev->name, i);
1721 
1722 		rc = request_irq(bp->msix_table[offset].vector,
1723 				 bnx2x_msix_fp_int, 0, fp->name, fp);
1724 		if (rc) {
1725 			BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1726 			      bp->msix_table[offset].vector, rc);
1727 			bnx2x_free_msix_irqs(bp, offset);
1728 			return -EBUSY;
1729 		}
1730 
1731 		offset++;
1732 	}
1733 
1734 	i = BNX2X_NUM_ETH_QUEUES(bp);
1735 	if (IS_PF(bp)) {
1736 		offset = 1 + CNIC_SUPPORT(bp);
1737 		netdev_info(bp->dev,
1738 			    "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1739 			    bp->msix_table[0].vector,
1740 			    0, bp->msix_table[offset].vector,
1741 			    i - 1, bp->msix_table[offset + i - 1].vector);
1742 	} else {
1743 		offset = CNIC_SUPPORT(bp);
1744 		netdev_info(bp->dev,
1745 			    "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1746 			    0, bp->msix_table[offset].vector,
1747 			    i - 1, bp->msix_table[offset + i - 1].vector);
1748 	}
1749 	return 0;
1750 }
1751 
1752 int bnx2x_enable_msi(struct bnx2x *bp)
1753 {
1754 	int rc;
1755 
1756 	rc = pci_enable_msi(bp->pdev);
1757 	if (rc) {
1758 		BNX2X_DEV_INFO("MSI is not attainable\n");
1759 		return -1;
1760 	}
1761 	bp->flags |= USING_MSI_FLAG;
1762 
1763 	return 0;
1764 }
1765 
1766 static int bnx2x_req_irq(struct bnx2x *bp)
1767 {
1768 	unsigned long flags;
1769 	unsigned int irq;
1770 
1771 	if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1772 		flags = 0;
1773 	else
1774 		flags = IRQF_SHARED;
1775 
1776 	if (bp->flags & USING_MSIX_FLAG)
1777 		irq = bp->msix_table[0].vector;
1778 	else
1779 		irq = bp->pdev->irq;
1780 
1781 	return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1782 }
1783 
1784 static int bnx2x_setup_irqs(struct bnx2x *bp)
1785 {
1786 	int rc = 0;
1787 	if (bp->flags & USING_MSIX_FLAG &&
1788 	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1789 		rc = bnx2x_req_msix_irqs(bp);
1790 		if (rc)
1791 			return rc;
1792 	} else {
1793 		rc = bnx2x_req_irq(bp);
1794 		if (rc) {
1795 			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1796 			return rc;
1797 		}
1798 		if (bp->flags & USING_MSI_FLAG) {
1799 			bp->dev->irq = bp->pdev->irq;
1800 			netdev_info(bp->dev, "using MSI IRQ %d\n",
1801 				    bp->dev->irq);
1802 		}
1803 		if (bp->flags & USING_MSIX_FLAG) {
1804 			bp->dev->irq = bp->msix_table[0].vector;
1805 			netdev_info(bp->dev, "using MSIX IRQ %d\n",
1806 				    bp->dev->irq);
1807 		}
1808 	}
1809 
1810 	return 0;
1811 }
1812 
1813 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1814 {
1815 	int i;
1816 
1817 	for_each_rx_queue_cnic(bp, i) {
1818 		bnx2x_fp_init_lock(&bp->fp[i]);
1819 		napi_enable(&bnx2x_fp(bp, i, napi));
1820 	}
1821 }
1822 
1823 static void bnx2x_napi_enable(struct bnx2x *bp)
1824 {
1825 	int i;
1826 
1827 	for_each_eth_queue(bp, i) {
1828 		bnx2x_fp_init_lock(&bp->fp[i]);
1829 		napi_enable(&bnx2x_fp(bp, i, napi));
1830 	}
1831 }
1832 
1833 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1834 {
1835 	int i;
1836 
1837 	for_each_rx_queue_cnic(bp, i) {
1838 		napi_disable(&bnx2x_fp(bp, i, napi));
1839 		while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1840 			usleep_range(1000, 2000);
1841 	}
1842 }
1843 
1844 static void bnx2x_napi_disable(struct bnx2x *bp)
1845 {
1846 	int i;
1847 
1848 	for_each_eth_queue(bp, i) {
1849 		napi_disable(&bnx2x_fp(bp, i, napi));
1850 		while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1851 			usleep_range(1000, 2000);
1852 	}
1853 }
1854 
1855 void bnx2x_netif_start(struct bnx2x *bp)
1856 {
1857 	if (netif_running(bp->dev)) {
1858 		bnx2x_napi_enable(bp);
1859 		if (CNIC_LOADED(bp))
1860 			bnx2x_napi_enable_cnic(bp);
1861 		bnx2x_int_enable(bp);
1862 		if (bp->state == BNX2X_STATE_OPEN)
1863 			netif_tx_wake_all_queues(bp->dev);
1864 	}
1865 }
1866 
1867 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1868 {
1869 	bnx2x_int_disable_sync(bp, disable_hw);
1870 	bnx2x_napi_disable(bp);
1871 	if (CNIC_LOADED(bp))
1872 		bnx2x_napi_disable_cnic(bp);
1873 }
1874 
1875 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1876 		       void *accel_priv, select_queue_fallback_t fallback)
1877 {
1878 	struct bnx2x *bp = netdev_priv(dev);
1879 
1880 	if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1881 		struct ethhdr *hdr = (struct ethhdr *)skb->data;
1882 		u16 ether_type = ntohs(hdr->h_proto);
1883 
1884 		/* Skip VLAN tag if present */
1885 		if (ether_type == ETH_P_8021Q) {
1886 			struct vlan_ethhdr *vhdr =
1887 				(struct vlan_ethhdr *)skb->data;
1888 
1889 			ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1890 		}
1891 
1892 		/* If ethertype is FCoE or FIP - use FCoE ring */
1893 		if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1894 			return bnx2x_fcoe_tx(bp, txq_index);
1895 	}
1896 
1897 	/* select a non-FCoE queue */
1898 	return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1899 }
1900 
1901 void bnx2x_set_num_queues(struct bnx2x *bp)
1902 {
1903 	/* RSS queues */
1904 	bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1905 
1906 	/* override in STORAGE SD modes */
1907 	if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1908 		bp->num_ethernet_queues = 1;
1909 
1910 	/* Add special queues */
1911 	bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1912 	bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1913 
1914 	BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1915 }
1916 
1917 /**
1918  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1919  *
1920  * @bp:		Driver handle
1921  *
1922  * We currently support for at most 16 Tx queues for each CoS thus we will
1923  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1924  * bp->max_cos.
1925  *
1926  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1927  * index after all ETH L2 indices.
1928  *
1929  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1930  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1931  * 16..31,...) with indices that are not coupled with any real Tx queue.
1932  *
1933  * The proper configuration of skb->queue_mapping is handled by
1934  * bnx2x_select_queue() and __skb_tx_hash().
1935  *
1936  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1937  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1938  */
1939 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1940 {
1941 	int rc, tx, rx;
1942 
1943 	tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1944 	rx = BNX2X_NUM_ETH_QUEUES(bp);
1945 
1946 /* account for fcoe queue */
1947 	if (include_cnic && !NO_FCOE(bp)) {
1948 		rx++;
1949 		tx++;
1950 	}
1951 
1952 	rc = netif_set_real_num_tx_queues(bp->dev, tx);
1953 	if (rc) {
1954 		BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1955 		return rc;
1956 	}
1957 	rc = netif_set_real_num_rx_queues(bp->dev, rx);
1958 	if (rc) {
1959 		BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1960 		return rc;
1961 	}
1962 
1963 	DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1964 			  tx, rx);
1965 
1966 	return rc;
1967 }
1968 
1969 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1970 {
1971 	int i;
1972 
1973 	for_each_queue(bp, i) {
1974 		struct bnx2x_fastpath *fp = &bp->fp[i];
1975 		u32 mtu;
1976 
1977 		/* Always use a mini-jumbo MTU for the FCoE L2 ring */
1978 		if (IS_FCOE_IDX(i))
1979 			/*
1980 			 * Although there are no IP frames expected to arrive to
1981 			 * this ring we still want to add an
1982 			 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1983 			 * overrun attack.
1984 			 */
1985 			mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1986 		else
1987 			mtu = bp->dev->mtu;
1988 		fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1989 				  IP_HEADER_ALIGNMENT_PADDING +
1990 				  ETH_OVREHEAD +
1991 				  mtu +
1992 				  BNX2X_FW_RX_ALIGN_END;
1993 		/* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
1994 		if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1995 			fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1996 		else
1997 			fp->rx_frag_size = 0;
1998 	}
1999 }
2000 
2001 static int bnx2x_init_rss(struct bnx2x *bp)
2002 {
2003 	int i;
2004 	u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2005 
2006 	/* Prepare the initial contents for the indirection table if RSS is
2007 	 * enabled
2008 	 */
2009 	for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2010 		bp->rss_conf_obj.ind_table[i] =
2011 			bp->fp->cl_id +
2012 			ethtool_rxfh_indir_default(i, num_eth_queues);
2013 
2014 	/*
2015 	 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2016 	 * per-port, so if explicit configuration is needed , do it only
2017 	 * for a PMF.
2018 	 *
2019 	 * For 57712 and newer on the other hand it's a per-function
2020 	 * configuration.
2021 	 */
2022 	return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2023 }
2024 
2025 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2026 	      bool config_hash, bool enable)
2027 {
2028 	struct bnx2x_config_rss_params params = {NULL};
2029 
2030 	/* Although RSS is meaningless when there is a single HW queue we
2031 	 * still need it enabled in order to have HW Rx hash generated.
2032 	 *
2033 	 * if (!is_eth_multi(bp))
2034 	 *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2035 	 */
2036 
2037 	params.rss_obj = rss_obj;
2038 
2039 	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2040 
2041 	if (enable) {
2042 		__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2043 
2044 		/* RSS configuration */
2045 		__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2046 		__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2047 		__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2048 		__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2049 		if (rss_obj->udp_rss_v4)
2050 			__set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2051 		if (rss_obj->udp_rss_v6)
2052 			__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2053 	} else {
2054 		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2055 	}
2056 
2057 	/* Hash bits */
2058 	params.rss_result_mask = MULTI_MASK;
2059 
2060 	memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2061 
2062 	if (config_hash) {
2063 		/* RSS keys */
2064 		prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
2065 		__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2066 	}
2067 
2068 	if (IS_PF(bp))
2069 		return bnx2x_config_rss(bp, &params);
2070 	else
2071 		return bnx2x_vfpf_config_rss(bp, &params);
2072 }
2073 
2074 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2075 {
2076 	struct bnx2x_func_state_params func_params = {NULL};
2077 
2078 	/* Prepare parameters for function state transitions */
2079 	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2080 
2081 	func_params.f_obj = &bp->func_obj;
2082 	func_params.cmd = BNX2X_F_CMD_HW_INIT;
2083 
2084 	func_params.params.hw_init.load_phase = load_code;
2085 
2086 	return bnx2x_func_state_change(bp, &func_params);
2087 }
2088 
2089 /*
2090  * Cleans the object that have internal lists without sending
2091  * ramrods. Should be run when interrupts are disabled.
2092  */
2093 void bnx2x_squeeze_objects(struct bnx2x *bp)
2094 {
2095 	int rc;
2096 	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2097 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
2098 	struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2099 
2100 	/***************** Cleanup MACs' object first *************************/
2101 
2102 	/* Wait for completion of requested */
2103 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2104 	/* Perform a dry cleanup */
2105 	__set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2106 
2107 	/* Clean ETH primary MAC */
2108 	__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2109 	rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2110 				 &ramrod_flags);
2111 	if (rc != 0)
2112 		BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2113 
2114 	/* Cleanup UC list */
2115 	vlan_mac_flags = 0;
2116 	__set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2117 	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2118 				 &ramrod_flags);
2119 	if (rc != 0)
2120 		BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2121 
2122 	/***************** Now clean mcast object *****************************/
2123 	rparam.mcast_obj = &bp->mcast_obj;
2124 	__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2125 
2126 	/* Add a DEL command... - Since we're doing a driver cleanup only,
2127 	 * we take a lock surrounding both the initial send and the CONTs,
2128 	 * as we don't want a true completion to disrupt us in the middle.
2129 	 */
2130 	netif_addr_lock_bh(bp->dev);
2131 	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2132 	if (rc < 0)
2133 		BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2134 			  rc);
2135 
2136 	/* ...and wait until all pending commands are cleared */
2137 	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2138 	while (rc != 0) {
2139 		if (rc < 0) {
2140 			BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2141 				  rc);
2142 			netif_addr_unlock_bh(bp->dev);
2143 			return;
2144 		}
2145 
2146 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2147 	}
2148 	netif_addr_unlock_bh(bp->dev);
2149 }
2150 
2151 #ifndef BNX2X_STOP_ON_ERROR
2152 #define LOAD_ERROR_EXIT(bp, label) \
2153 	do { \
2154 		(bp)->state = BNX2X_STATE_ERROR; \
2155 		goto label; \
2156 	} while (0)
2157 
2158 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2159 	do { \
2160 		bp->cnic_loaded = false; \
2161 		goto label; \
2162 	} while (0)
2163 #else /*BNX2X_STOP_ON_ERROR*/
2164 #define LOAD_ERROR_EXIT(bp, label) \
2165 	do { \
2166 		(bp)->state = BNX2X_STATE_ERROR; \
2167 		(bp)->panic = 1; \
2168 		return -EBUSY; \
2169 	} while (0)
2170 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2171 	do { \
2172 		bp->cnic_loaded = false; \
2173 		(bp)->panic = 1; \
2174 		return -EBUSY; \
2175 	} while (0)
2176 #endif /*BNX2X_STOP_ON_ERROR*/
2177 
2178 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2179 {
2180 	BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2181 		       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2182 	return;
2183 }
2184 
2185 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2186 {
2187 	int num_groups, vf_headroom = 0;
2188 	int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2189 
2190 	/* number of queues for statistics is number of eth queues + FCoE */
2191 	u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2192 
2193 	/* Total number of FW statistics requests =
2194 	 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2195 	 * and fcoe l2 queue) stats + num of queues (which includes another 1
2196 	 * for fcoe l2 queue if applicable)
2197 	 */
2198 	bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2199 
2200 	/* vf stats appear in the request list, but their data is allocated by
2201 	 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2202 	 * it is used to determine where to place the vf stats queries in the
2203 	 * request struct
2204 	 */
2205 	if (IS_SRIOV(bp))
2206 		vf_headroom = bnx2x_vf_headroom(bp);
2207 
2208 	/* Request is built from stats_query_header and an array of
2209 	 * stats_query_cmd_group each of which contains
2210 	 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2211 	 * configured in the stats_query_header.
2212 	 */
2213 	num_groups =
2214 		(((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2215 		 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2216 		 1 : 0));
2217 
2218 	DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2219 	   bp->fw_stats_num, vf_headroom, num_groups);
2220 	bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2221 		num_groups * sizeof(struct stats_query_cmd_group);
2222 
2223 	/* Data for statistics requests + stats_counter
2224 	 * stats_counter holds per-STORM counters that are incremented
2225 	 * when STORM has finished with the current request.
2226 	 * memory for FCoE offloaded statistics are counted anyway,
2227 	 * even if they will not be sent.
2228 	 * VF stats are not accounted for here as the data of VF stats is stored
2229 	 * in memory allocated by the VF, not here.
2230 	 */
2231 	bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2232 		sizeof(struct per_pf_stats) +
2233 		sizeof(struct fcoe_statistics_params) +
2234 		sizeof(struct per_queue_stats) * num_queue_stats +
2235 		sizeof(struct stats_counter);
2236 
2237 	bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2238 				       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2239 	if (!bp->fw_stats)
2240 		goto alloc_mem_err;
2241 
2242 	/* Set shortcuts */
2243 	bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2244 	bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2245 	bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2246 		((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2247 	bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2248 		bp->fw_stats_req_sz;
2249 
2250 	DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2251 	   U64_HI(bp->fw_stats_req_mapping),
2252 	   U64_LO(bp->fw_stats_req_mapping));
2253 	DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2254 	   U64_HI(bp->fw_stats_data_mapping),
2255 	   U64_LO(bp->fw_stats_data_mapping));
2256 	return 0;
2257 
2258 alloc_mem_err:
2259 	bnx2x_free_fw_stats_mem(bp);
2260 	BNX2X_ERR("Can't allocate FW stats memory\n");
2261 	return -ENOMEM;
2262 }
2263 
2264 /* send load request to mcp and analyze response */
2265 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2266 {
2267 	u32 param;
2268 
2269 	/* init fw_seq */
2270 	bp->fw_seq =
2271 		(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2272 		 DRV_MSG_SEQ_NUMBER_MASK);
2273 	BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2274 
2275 	/* Get current FW pulse sequence */
2276 	bp->fw_drv_pulse_wr_seq =
2277 		(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2278 		 DRV_PULSE_SEQ_MASK);
2279 	BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2280 
2281 	param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2282 
2283 	if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2284 		param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2285 
2286 	/* load request */
2287 	(*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2288 
2289 	/* if mcp fails to respond we must abort */
2290 	if (!(*load_code)) {
2291 		BNX2X_ERR("MCP response failure, aborting\n");
2292 		return -EBUSY;
2293 	}
2294 
2295 	/* If mcp refused (e.g. other port is in diagnostic mode) we
2296 	 * must abort
2297 	 */
2298 	if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2299 		BNX2X_ERR("MCP refused load request, aborting\n");
2300 		return -EBUSY;
2301 	}
2302 	return 0;
2303 }
2304 
2305 /* check whether another PF has already loaded FW to chip. In
2306  * virtualized environments a pf from another VM may have already
2307  * initialized the device including loading FW
2308  */
2309 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2310 {
2311 	/* is another pf loaded on this engine? */
2312 	if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2313 	    load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2314 		/* build my FW version dword */
2315 		u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2316 			(BCM_5710_FW_MINOR_VERSION << 8) +
2317 			(BCM_5710_FW_REVISION_VERSION << 16) +
2318 			(BCM_5710_FW_ENGINEERING_VERSION << 24);
2319 
2320 		/* read loaded FW from chip */
2321 		u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2322 
2323 		DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2324 		   loaded_fw, my_fw);
2325 
2326 		/* abort nic load if version mismatch */
2327 		if (my_fw != loaded_fw) {
2328 			if (print_err)
2329 				BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2330 					  loaded_fw, my_fw);
2331 			else
2332 				BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2333 					       loaded_fw, my_fw);
2334 			return -EBUSY;
2335 		}
2336 	}
2337 	return 0;
2338 }
2339 
2340 /* returns the "mcp load_code" according to global load_count array */
2341 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2342 {
2343 	int path = BP_PATH(bp);
2344 
2345 	DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2346 	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2347 	   bnx2x_load_count[path][2]);
2348 	bnx2x_load_count[path][0]++;
2349 	bnx2x_load_count[path][1 + port]++;
2350 	DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2351 	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2352 	   bnx2x_load_count[path][2]);
2353 	if (bnx2x_load_count[path][0] == 1)
2354 		return FW_MSG_CODE_DRV_LOAD_COMMON;
2355 	else if (bnx2x_load_count[path][1 + port] == 1)
2356 		return FW_MSG_CODE_DRV_LOAD_PORT;
2357 	else
2358 		return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2359 }
2360 
2361 /* mark PMF if applicable */
2362 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2363 {
2364 	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2365 	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2366 	    (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2367 		bp->port.pmf = 1;
2368 		/* We need the barrier to ensure the ordering between the
2369 		 * writing to bp->port.pmf here and reading it from the
2370 		 * bnx2x_periodic_task().
2371 		 */
2372 		smp_mb();
2373 	} else {
2374 		bp->port.pmf = 0;
2375 	}
2376 
2377 	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2378 }
2379 
2380 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2381 {
2382 	if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2383 	     (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2384 	    (bp->common.shmem2_base)) {
2385 		if (SHMEM2_HAS(bp, dcc_support))
2386 			SHMEM2_WR(bp, dcc_support,
2387 				  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2388 				   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2389 		if (SHMEM2_HAS(bp, afex_driver_support))
2390 			SHMEM2_WR(bp, afex_driver_support,
2391 				  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2392 	}
2393 
2394 	/* Set AFEX default VLAN tag to an invalid value */
2395 	bp->afex_def_vlan_tag = -1;
2396 }
2397 
2398 /**
2399  * bnx2x_bz_fp - zero content of the fastpath structure.
2400  *
2401  * @bp:		driver handle
2402  * @index:	fastpath index to be zeroed
2403  *
2404  * Makes sure the contents of the bp->fp[index].napi is kept
2405  * intact.
2406  */
2407 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2408 {
2409 	struct bnx2x_fastpath *fp = &bp->fp[index];
2410 	int cos;
2411 	struct napi_struct orig_napi = fp->napi;
2412 	struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2413 
2414 	/* bzero bnx2x_fastpath contents */
2415 	if (fp->tpa_info)
2416 		memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2417 		       sizeof(struct bnx2x_agg_info));
2418 	memset(fp, 0, sizeof(*fp));
2419 
2420 	/* Restore the NAPI object as it has been already initialized */
2421 	fp->napi = orig_napi;
2422 	fp->tpa_info = orig_tpa_info;
2423 	fp->bp = bp;
2424 	fp->index = index;
2425 	if (IS_ETH_FP(fp))
2426 		fp->max_cos = bp->max_cos;
2427 	else
2428 		/* Special queues support only one CoS */
2429 		fp->max_cos = 1;
2430 
2431 	/* Init txdata pointers */
2432 	if (IS_FCOE_FP(fp))
2433 		fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2434 	if (IS_ETH_FP(fp))
2435 		for_each_cos_in_tx_queue(fp, cos)
2436 			fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2437 				BNX2X_NUM_ETH_QUEUES(bp) + index];
2438 
2439 	/* set the tpa flag for each queue. The tpa flag determines the queue
2440 	 * minimal size so it must be set prior to queue memory allocation
2441 	 */
2442 	fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2443 				  (bp->flags & GRO_ENABLE_FLAG &&
2444 				   bnx2x_mtu_allows_gro(bp->dev->mtu)));
2445 	if (bp->flags & TPA_ENABLE_FLAG)
2446 		fp->mode = TPA_MODE_LRO;
2447 	else if (bp->flags & GRO_ENABLE_FLAG)
2448 		fp->mode = TPA_MODE_GRO;
2449 
2450 	/* We don't want TPA on an FCoE L2 ring */
2451 	if (IS_FCOE_FP(fp))
2452 		fp->disable_tpa = 1;
2453 }
2454 
2455 int bnx2x_load_cnic(struct bnx2x *bp)
2456 {
2457 	int i, rc, port = BP_PORT(bp);
2458 
2459 	DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2460 
2461 	mutex_init(&bp->cnic_mutex);
2462 
2463 	if (IS_PF(bp)) {
2464 		rc = bnx2x_alloc_mem_cnic(bp);
2465 		if (rc) {
2466 			BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2467 			LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2468 		}
2469 	}
2470 
2471 	rc = bnx2x_alloc_fp_mem_cnic(bp);
2472 	if (rc) {
2473 		BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2474 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2475 	}
2476 
2477 	/* Update the number of queues with the cnic queues */
2478 	rc = bnx2x_set_real_num_queues(bp, 1);
2479 	if (rc) {
2480 		BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2481 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2482 	}
2483 
2484 	/* Add all CNIC NAPI objects */
2485 	bnx2x_add_all_napi_cnic(bp);
2486 	DP(NETIF_MSG_IFUP, "cnic napi added\n");
2487 	bnx2x_napi_enable_cnic(bp);
2488 
2489 	rc = bnx2x_init_hw_func_cnic(bp);
2490 	if (rc)
2491 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2492 
2493 	bnx2x_nic_init_cnic(bp);
2494 
2495 	if (IS_PF(bp)) {
2496 		/* Enable Timer scan */
2497 		REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2498 
2499 		/* setup cnic queues */
2500 		for_each_cnic_queue(bp, i) {
2501 			rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2502 			if (rc) {
2503 				BNX2X_ERR("Queue setup failed\n");
2504 				LOAD_ERROR_EXIT(bp, load_error_cnic2);
2505 			}
2506 		}
2507 	}
2508 
2509 	/* Initialize Rx filter. */
2510 	bnx2x_set_rx_mode_inner(bp);
2511 
2512 	/* re-read iscsi info */
2513 	bnx2x_get_iscsi_info(bp);
2514 	bnx2x_setup_cnic_irq_info(bp);
2515 	bnx2x_setup_cnic_info(bp);
2516 	bp->cnic_loaded = true;
2517 	if (bp->state == BNX2X_STATE_OPEN)
2518 		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2519 
2520 	DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2521 
2522 	return 0;
2523 
2524 #ifndef BNX2X_STOP_ON_ERROR
2525 load_error_cnic2:
2526 	/* Disable Timer scan */
2527 	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2528 
2529 load_error_cnic1:
2530 	bnx2x_napi_disable_cnic(bp);
2531 	/* Update the number of queues without the cnic queues */
2532 	if (bnx2x_set_real_num_queues(bp, 0))
2533 		BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2534 load_error_cnic0:
2535 	BNX2X_ERR("CNIC-related load failed\n");
2536 	bnx2x_free_fp_mem_cnic(bp);
2537 	bnx2x_free_mem_cnic(bp);
2538 	return rc;
2539 #endif /* ! BNX2X_STOP_ON_ERROR */
2540 }
2541 
2542 /* must be called with rtnl_lock */
2543 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2544 {
2545 	int port = BP_PORT(bp);
2546 	int i, rc = 0, load_code = 0;
2547 
2548 	DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2549 	DP(NETIF_MSG_IFUP,
2550 	   "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2551 
2552 #ifdef BNX2X_STOP_ON_ERROR
2553 	if (unlikely(bp->panic)) {
2554 		BNX2X_ERR("Can't load NIC when there is panic\n");
2555 		return -EPERM;
2556 	}
2557 #endif
2558 
2559 	bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2560 
2561 	/* zero the structure w/o any lock, before SP handler is initialized */
2562 	memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2563 	__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2564 		&bp->last_reported_link.link_report_flags);
2565 
2566 	if (IS_PF(bp))
2567 		/* must be called before memory allocation and HW init */
2568 		bnx2x_ilt_set_info(bp);
2569 
2570 	/*
2571 	 * Zero fastpath structures preserving invariants like napi, which are
2572 	 * allocated only once, fp index, max_cos, bp pointer.
2573 	 * Also set fp->disable_tpa and txdata_ptr.
2574 	 */
2575 	DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2576 	for_each_queue(bp, i)
2577 		bnx2x_bz_fp(bp, i);
2578 	memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2579 				  bp->num_cnic_queues) *
2580 				  sizeof(struct bnx2x_fp_txdata));
2581 
2582 	bp->fcoe_init = false;
2583 
2584 	/* Set the receive queues buffer size */
2585 	bnx2x_set_rx_buf_size(bp);
2586 
2587 	if (IS_PF(bp)) {
2588 		rc = bnx2x_alloc_mem(bp);
2589 		if (rc) {
2590 			BNX2X_ERR("Unable to allocate bp memory\n");
2591 			return rc;
2592 		}
2593 	}
2594 
2595 	/* need to be done after alloc mem, since it's self adjusting to amount
2596 	 * of memory available for RSS queues
2597 	 */
2598 	rc = bnx2x_alloc_fp_mem(bp);
2599 	if (rc) {
2600 		BNX2X_ERR("Unable to allocate memory for fps\n");
2601 		LOAD_ERROR_EXIT(bp, load_error0);
2602 	}
2603 
2604 	/* Allocated memory for FW statistics  */
2605 	if (bnx2x_alloc_fw_stats_mem(bp))
2606 		LOAD_ERROR_EXIT(bp, load_error0);
2607 
2608 	/* request pf to initialize status blocks */
2609 	if (IS_VF(bp)) {
2610 		rc = bnx2x_vfpf_init(bp);
2611 		if (rc)
2612 			LOAD_ERROR_EXIT(bp, load_error0);
2613 	}
2614 
2615 	/* As long as bnx2x_alloc_mem() may possibly update
2616 	 * bp->num_queues, bnx2x_set_real_num_queues() should always
2617 	 * come after it. At this stage cnic queues are not counted.
2618 	 */
2619 	rc = bnx2x_set_real_num_queues(bp, 0);
2620 	if (rc) {
2621 		BNX2X_ERR("Unable to set real_num_queues\n");
2622 		LOAD_ERROR_EXIT(bp, load_error0);
2623 	}
2624 
2625 	/* configure multi cos mappings in kernel.
2626 	 * this configuration may be overridden by a multi class queue
2627 	 * discipline or by a dcbx negotiation result.
2628 	 */
2629 	bnx2x_setup_tc(bp->dev, bp->max_cos);
2630 
2631 	/* Add all NAPI objects */
2632 	bnx2x_add_all_napi(bp);
2633 	DP(NETIF_MSG_IFUP, "napi added\n");
2634 	bnx2x_napi_enable(bp);
2635 
2636 	if (IS_PF(bp)) {
2637 		/* set pf load just before approaching the MCP */
2638 		bnx2x_set_pf_load(bp);
2639 
2640 		/* if mcp exists send load request and analyze response */
2641 		if (!BP_NOMCP(bp)) {
2642 			/* attempt to load pf */
2643 			rc = bnx2x_nic_load_request(bp, &load_code);
2644 			if (rc)
2645 				LOAD_ERROR_EXIT(bp, load_error1);
2646 
2647 			/* what did mcp say? */
2648 			rc = bnx2x_compare_fw_ver(bp, load_code, true);
2649 			if (rc) {
2650 				bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2651 				LOAD_ERROR_EXIT(bp, load_error2);
2652 			}
2653 		} else {
2654 			load_code = bnx2x_nic_load_no_mcp(bp, port);
2655 		}
2656 
2657 		/* mark pmf if applicable */
2658 		bnx2x_nic_load_pmf(bp, load_code);
2659 
2660 		/* Init Function state controlling object */
2661 		bnx2x__init_func_obj(bp);
2662 
2663 		/* Initialize HW */
2664 		rc = bnx2x_init_hw(bp, load_code);
2665 		if (rc) {
2666 			BNX2X_ERR("HW init failed, aborting\n");
2667 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2668 			LOAD_ERROR_EXIT(bp, load_error2);
2669 		}
2670 	}
2671 
2672 	bnx2x_pre_irq_nic_init(bp);
2673 
2674 	/* Connect to IRQs */
2675 	rc = bnx2x_setup_irqs(bp);
2676 	if (rc) {
2677 		BNX2X_ERR("setup irqs failed\n");
2678 		if (IS_PF(bp))
2679 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2680 		LOAD_ERROR_EXIT(bp, load_error2);
2681 	}
2682 
2683 	/* Init per-function objects */
2684 	if (IS_PF(bp)) {
2685 		/* Setup NIC internals and enable interrupts */
2686 		bnx2x_post_irq_nic_init(bp, load_code);
2687 
2688 		bnx2x_init_bp_objs(bp);
2689 		bnx2x_iov_nic_init(bp);
2690 
2691 		/* Set AFEX default VLAN tag to an invalid value */
2692 		bp->afex_def_vlan_tag = -1;
2693 		bnx2x_nic_load_afex_dcc(bp, load_code);
2694 		bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2695 		rc = bnx2x_func_start(bp);
2696 		if (rc) {
2697 			BNX2X_ERR("Function start failed!\n");
2698 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2699 
2700 			LOAD_ERROR_EXIT(bp, load_error3);
2701 		}
2702 
2703 		/* Send LOAD_DONE command to MCP */
2704 		if (!BP_NOMCP(bp)) {
2705 			load_code = bnx2x_fw_command(bp,
2706 						     DRV_MSG_CODE_LOAD_DONE, 0);
2707 			if (!load_code) {
2708 				BNX2X_ERR("MCP response failure, aborting\n");
2709 				rc = -EBUSY;
2710 				LOAD_ERROR_EXIT(bp, load_error3);
2711 			}
2712 		}
2713 
2714 		/* initialize FW coalescing state machines in RAM */
2715 		bnx2x_update_coalesce(bp);
2716 	}
2717 
2718 	/* setup the leading queue */
2719 	rc = bnx2x_setup_leading(bp);
2720 	if (rc) {
2721 		BNX2X_ERR("Setup leading failed!\n");
2722 		LOAD_ERROR_EXIT(bp, load_error3);
2723 	}
2724 
2725 	/* set up the rest of the queues */
2726 	for_each_nondefault_eth_queue(bp, i) {
2727 		if (IS_PF(bp))
2728 			rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2729 		else /* VF */
2730 			rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2731 		if (rc) {
2732 			BNX2X_ERR("Queue %d setup failed\n", i);
2733 			LOAD_ERROR_EXIT(bp, load_error3);
2734 		}
2735 	}
2736 
2737 	/* setup rss */
2738 	rc = bnx2x_init_rss(bp);
2739 	if (rc) {
2740 		BNX2X_ERR("PF RSS init failed\n");
2741 		LOAD_ERROR_EXIT(bp, load_error3);
2742 	}
2743 
2744 	/* Now when Clients are configured we are ready to work */
2745 	bp->state = BNX2X_STATE_OPEN;
2746 
2747 	/* Configure a ucast MAC */
2748 	if (IS_PF(bp))
2749 		rc = bnx2x_set_eth_mac(bp, true);
2750 	else /* vf */
2751 		rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2752 					   true);
2753 	if (rc) {
2754 		BNX2X_ERR("Setting Ethernet MAC failed\n");
2755 		LOAD_ERROR_EXIT(bp, load_error3);
2756 	}
2757 
2758 	if (IS_PF(bp) && bp->pending_max) {
2759 		bnx2x_update_max_mf_config(bp, bp->pending_max);
2760 		bp->pending_max = 0;
2761 	}
2762 
2763 	if (bp->port.pmf) {
2764 		rc = bnx2x_initial_phy_init(bp, load_mode);
2765 		if (rc)
2766 			LOAD_ERROR_EXIT(bp, load_error3);
2767 	}
2768 	bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2769 
2770 	/* Start fast path */
2771 
2772 	/* Initialize Rx filter. */
2773 	bnx2x_set_rx_mode_inner(bp);
2774 
2775 	/* Start the Tx */
2776 	switch (load_mode) {
2777 	case LOAD_NORMAL:
2778 		/* Tx queue should be only re-enabled */
2779 		netif_tx_wake_all_queues(bp->dev);
2780 		break;
2781 
2782 	case LOAD_OPEN:
2783 		netif_tx_start_all_queues(bp->dev);
2784 		smp_mb__after_clear_bit();
2785 		break;
2786 
2787 	case LOAD_DIAG:
2788 	case LOAD_LOOPBACK_EXT:
2789 		bp->state = BNX2X_STATE_DIAG;
2790 		break;
2791 
2792 	default:
2793 		break;
2794 	}
2795 
2796 	if (bp->port.pmf)
2797 		bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2798 	else
2799 		bnx2x__link_status_update(bp);
2800 
2801 	/* start the timer */
2802 	mod_timer(&bp->timer, jiffies + bp->current_interval);
2803 
2804 	if (CNIC_ENABLED(bp))
2805 		bnx2x_load_cnic(bp);
2806 
2807 	if (IS_PF(bp))
2808 		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2809 
2810 	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2811 		/* mark driver is loaded in shmem2 */
2812 		u32 val;
2813 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2814 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2815 			  val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2816 			  DRV_FLAGS_CAPABILITIES_LOADED_L2);
2817 	}
2818 
2819 	/* Wait for all pending SP commands to complete */
2820 	if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2821 		BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2822 		bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2823 		return -EBUSY;
2824 	}
2825 
2826 	/* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2827 	if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2828 		bnx2x_dcbx_init(bp, false);
2829 
2830 	DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2831 
2832 	return 0;
2833 
2834 #ifndef BNX2X_STOP_ON_ERROR
2835 load_error3:
2836 	if (IS_PF(bp)) {
2837 		bnx2x_int_disable_sync(bp, 1);
2838 
2839 		/* Clean queueable objects */
2840 		bnx2x_squeeze_objects(bp);
2841 	}
2842 
2843 	/* Free SKBs, SGEs, TPA pool and driver internals */
2844 	bnx2x_free_skbs(bp);
2845 	for_each_rx_queue(bp, i)
2846 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2847 
2848 	/* Release IRQs */
2849 	bnx2x_free_irq(bp);
2850 load_error2:
2851 	if (IS_PF(bp) && !BP_NOMCP(bp)) {
2852 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2853 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2854 	}
2855 
2856 	bp->port.pmf = 0;
2857 load_error1:
2858 	bnx2x_napi_disable(bp);
2859 	bnx2x_del_all_napi(bp);
2860 
2861 	/* clear pf_load status, as it was already set */
2862 	if (IS_PF(bp))
2863 		bnx2x_clear_pf_load(bp);
2864 load_error0:
2865 	bnx2x_free_fw_stats_mem(bp);
2866 	bnx2x_free_fp_mem(bp);
2867 	bnx2x_free_mem(bp);
2868 
2869 	return rc;
2870 #endif /* ! BNX2X_STOP_ON_ERROR */
2871 }
2872 
2873 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2874 {
2875 	u8 rc = 0, cos, i;
2876 
2877 	/* Wait until tx fastpath tasks complete */
2878 	for_each_tx_queue(bp, i) {
2879 		struct bnx2x_fastpath *fp = &bp->fp[i];
2880 
2881 		for_each_cos_in_tx_queue(fp, cos)
2882 			rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2883 		if (rc)
2884 			return rc;
2885 	}
2886 	return 0;
2887 }
2888 
2889 /* must be called with rtnl_lock */
2890 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2891 {
2892 	int i;
2893 	bool global = false;
2894 
2895 	DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2896 
2897 	/* mark driver is unloaded in shmem2 */
2898 	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2899 		u32 val;
2900 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2901 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2902 			  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2903 	}
2904 
2905 	if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2906 	    (bp->state == BNX2X_STATE_CLOSED ||
2907 	     bp->state == BNX2X_STATE_ERROR)) {
2908 		/* We can get here if the driver has been unloaded
2909 		 * during parity error recovery and is either waiting for a
2910 		 * leader to complete or for other functions to unload and
2911 		 * then ifdown has been issued. In this case we want to
2912 		 * unload and let other functions to complete a recovery
2913 		 * process.
2914 		 */
2915 		bp->recovery_state = BNX2X_RECOVERY_DONE;
2916 		bp->is_leader = 0;
2917 		bnx2x_release_leader_lock(bp);
2918 		smp_mb();
2919 
2920 		DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2921 		BNX2X_ERR("Can't unload in closed or error state\n");
2922 		return -EINVAL;
2923 	}
2924 
2925 	/* Nothing to do during unload if previous bnx2x_nic_load()
2926 	 * have not completed successfully - all resources are released.
2927 	 *
2928 	 * we can get here only after unsuccessful ndo_* callback, during which
2929 	 * dev->IFF_UP flag is still on.
2930 	 */
2931 	if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2932 		return 0;
2933 
2934 	/* It's important to set the bp->state to the value different from
2935 	 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2936 	 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2937 	 */
2938 	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2939 	smp_mb();
2940 
2941 	/* indicate to VFs that the PF is going down */
2942 	bnx2x_iov_channel_down(bp);
2943 
2944 	if (CNIC_LOADED(bp))
2945 		bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2946 
2947 	/* Stop Tx */
2948 	bnx2x_tx_disable(bp);
2949 	netdev_reset_tc(bp->dev);
2950 
2951 	bp->rx_mode = BNX2X_RX_MODE_NONE;
2952 
2953 	del_timer_sync(&bp->timer);
2954 
2955 	if (IS_PF(bp)) {
2956 		/* Set ALWAYS_ALIVE bit in shmem */
2957 		bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2958 		bnx2x_drv_pulse(bp);
2959 		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2960 		bnx2x_save_statistics(bp);
2961 	}
2962 
2963 	/* wait till consumers catch up with producers in all queues */
2964 	bnx2x_drain_tx_queues(bp);
2965 
2966 	/* if VF indicate to PF this function is going down (PF will delete sp
2967 	 * elements and clear initializations
2968 	 */
2969 	if (IS_VF(bp))
2970 		bnx2x_vfpf_close_vf(bp);
2971 	else if (unload_mode != UNLOAD_RECOVERY)
2972 		/* if this is a normal/close unload need to clean up chip*/
2973 		bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2974 	else {
2975 		/* Send the UNLOAD_REQUEST to the MCP */
2976 		bnx2x_send_unload_req(bp, unload_mode);
2977 
2978 		/* Prevent transactions to host from the functions on the
2979 		 * engine that doesn't reset global blocks in case of global
2980 		 * attention once global blocks are reset and gates are opened
2981 		 * (the engine which leader will perform the recovery
2982 		 * last).
2983 		 */
2984 		if (!CHIP_IS_E1x(bp))
2985 			bnx2x_pf_disable(bp);
2986 
2987 		/* Disable HW interrupts, NAPI */
2988 		bnx2x_netif_stop(bp, 1);
2989 		/* Delete all NAPI objects */
2990 		bnx2x_del_all_napi(bp);
2991 		if (CNIC_LOADED(bp))
2992 			bnx2x_del_all_napi_cnic(bp);
2993 		/* Release IRQs */
2994 		bnx2x_free_irq(bp);
2995 
2996 		/* Report UNLOAD_DONE to MCP */
2997 		bnx2x_send_unload_done(bp, false);
2998 	}
2999 
3000 	/*
3001 	 * At this stage no more interrupts will arrive so we may safely clean
3002 	 * the queueable objects here in case they failed to get cleaned so far.
3003 	 */
3004 	if (IS_PF(bp))
3005 		bnx2x_squeeze_objects(bp);
3006 
3007 	/* There should be no more pending SP commands at this stage */
3008 	bp->sp_state = 0;
3009 
3010 	bp->port.pmf = 0;
3011 
3012 	/* clear pending work in rtnl task */
3013 	bp->sp_rtnl_state = 0;
3014 	smp_mb();
3015 
3016 	/* Free SKBs, SGEs, TPA pool and driver internals */
3017 	bnx2x_free_skbs(bp);
3018 	if (CNIC_LOADED(bp))
3019 		bnx2x_free_skbs_cnic(bp);
3020 	for_each_rx_queue(bp, i)
3021 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3022 
3023 	bnx2x_free_fp_mem(bp);
3024 	if (CNIC_LOADED(bp))
3025 		bnx2x_free_fp_mem_cnic(bp);
3026 
3027 	if (IS_PF(bp)) {
3028 		if (CNIC_LOADED(bp))
3029 			bnx2x_free_mem_cnic(bp);
3030 	}
3031 	bnx2x_free_mem(bp);
3032 
3033 	bp->state = BNX2X_STATE_CLOSED;
3034 	bp->cnic_loaded = false;
3035 
3036 	/* Clear driver version indication in shmem */
3037 	if (IS_PF(bp))
3038 		bnx2x_update_mng_version(bp);
3039 
3040 	/* Check if there are pending parity attentions. If there are - set
3041 	 * RECOVERY_IN_PROGRESS.
3042 	 */
3043 	if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3044 		bnx2x_set_reset_in_progress(bp);
3045 
3046 		/* Set RESET_IS_GLOBAL if needed */
3047 		if (global)
3048 			bnx2x_set_reset_global(bp);
3049 	}
3050 
3051 	/* The last driver must disable a "close the gate" if there is no
3052 	 * parity attention or "process kill" pending.
3053 	 */
3054 	if (IS_PF(bp) &&
3055 	    !bnx2x_clear_pf_load(bp) &&
3056 	    bnx2x_reset_is_done(bp, BP_PATH(bp)))
3057 		bnx2x_disable_close_the_gate(bp);
3058 
3059 	DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3060 
3061 	return 0;
3062 }
3063 
3064 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3065 {
3066 	u16 pmcsr;
3067 
3068 	/* If there is no power capability, silently succeed */
3069 	if (!bp->pdev->pm_cap) {
3070 		BNX2X_DEV_INFO("No power capability. Breaking.\n");
3071 		return 0;
3072 	}
3073 
3074 	pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3075 
3076 	switch (state) {
3077 	case PCI_D0:
3078 		pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3079 				      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3080 				       PCI_PM_CTRL_PME_STATUS));
3081 
3082 		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3083 			/* delay required during transition out of D3hot */
3084 			msleep(20);
3085 		break;
3086 
3087 	case PCI_D3hot:
3088 		/* If there are other clients above don't
3089 		   shut down the power */
3090 		if (atomic_read(&bp->pdev->enable_cnt) != 1)
3091 			return 0;
3092 		/* Don't shut down the power for emulation and FPGA */
3093 		if (CHIP_REV_IS_SLOW(bp))
3094 			return 0;
3095 
3096 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3097 		pmcsr |= 3;
3098 
3099 		if (bp->wol)
3100 			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3101 
3102 		pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3103 				      pmcsr);
3104 
3105 		/* No more memory access after this point until
3106 		* device is brought back to D0.
3107 		*/
3108 		break;
3109 
3110 	default:
3111 		dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3112 		return -EINVAL;
3113 	}
3114 	return 0;
3115 }
3116 
3117 /*
3118  * net_device service functions
3119  */
3120 static int bnx2x_poll(struct napi_struct *napi, int budget)
3121 {
3122 	int work_done = 0;
3123 	u8 cos;
3124 	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3125 						 napi);
3126 	struct bnx2x *bp = fp->bp;
3127 
3128 	while (1) {
3129 #ifdef BNX2X_STOP_ON_ERROR
3130 		if (unlikely(bp->panic)) {
3131 			napi_complete(napi);
3132 			return 0;
3133 		}
3134 #endif
3135 		if (!bnx2x_fp_lock_napi(fp))
3136 			return work_done;
3137 
3138 		for_each_cos_in_tx_queue(fp, cos)
3139 			if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3140 				bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3141 
3142 		if (bnx2x_has_rx_work(fp)) {
3143 			work_done += bnx2x_rx_int(fp, budget - work_done);
3144 
3145 			/* must not complete if we consumed full budget */
3146 			if (work_done >= budget) {
3147 				bnx2x_fp_unlock_napi(fp);
3148 				break;
3149 			}
3150 		}
3151 
3152 		/* Fall out from the NAPI loop if needed */
3153 		if (!bnx2x_fp_unlock_napi(fp) &&
3154 		    !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3155 
3156 			/* No need to update SB for FCoE L2 ring as long as
3157 			 * it's connected to the default SB and the SB
3158 			 * has been updated when NAPI was scheduled.
3159 			 */
3160 			if (IS_FCOE_FP(fp)) {
3161 				napi_complete(napi);
3162 				break;
3163 			}
3164 			bnx2x_update_fpsb_idx(fp);
3165 			/* bnx2x_has_rx_work() reads the status block,
3166 			 * thus we need to ensure that status block indices
3167 			 * have been actually read (bnx2x_update_fpsb_idx)
3168 			 * prior to this check (bnx2x_has_rx_work) so that
3169 			 * we won't write the "newer" value of the status block
3170 			 * to IGU (if there was a DMA right after
3171 			 * bnx2x_has_rx_work and if there is no rmb, the memory
3172 			 * reading (bnx2x_update_fpsb_idx) may be postponed
3173 			 * to right before bnx2x_ack_sb). In this case there
3174 			 * will never be another interrupt until there is
3175 			 * another update of the status block, while there
3176 			 * is still unhandled work.
3177 			 */
3178 			rmb();
3179 
3180 			if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3181 				napi_complete(napi);
3182 				/* Re-enable interrupts */
3183 				DP(NETIF_MSG_RX_STATUS,
3184 				   "Update index to %d\n", fp->fp_hc_idx);
3185 				bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3186 					     le16_to_cpu(fp->fp_hc_idx),
3187 					     IGU_INT_ENABLE, 1);
3188 				break;
3189 			}
3190 		}
3191 	}
3192 
3193 	return work_done;
3194 }
3195 
3196 #ifdef CONFIG_NET_RX_BUSY_POLL
3197 /* must be called with local_bh_disable()d */
3198 int bnx2x_low_latency_recv(struct napi_struct *napi)
3199 {
3200 	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3201 						 napi);
3202 	struct bnx2x *bp = fp->bp;
3203 	int found = 0;
3204 
3205 	if ((bp->state == BNX2X_STATE_CLOSED) ||
3206 	    (bp->state == BNX2X_STATE_ERROR) ||
3207 	    (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3208 		return LL_FLUSH_FAILED;
3209 
3210 	if (!bnx2x_fp_lock_poll(fp))
3211 		return LL_FLUSH_BUSY;
3212 
3213 	if (bnx2x_has_rx_work(fp))
3214 		found = bnx2x_rx_int(fp, 4);
3215 
3216 	bnx2x_fp_unlock_poll(fp);
3217 
3218 	return found;
3219 }
3220 #endif
3221 
3222 /* we split the first BD into headers and data BDs
3223  * to ease the pain of our fellow microcode engineers
3224  * we use one mapping for both BDs
3225  */
3226 static u16 bnx2x_tx_split(struct bnx2x *bp,
3227 			  struct bnx2x_fp_txdata *txdata,
3228 			  struct sw_tx_bd *tx_buf,
3229 			  struct eth_tx_start_bd **tx_bd, u16 hlen,
3230 			  u16 bd_prod)
3231 {
3232 	struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3233 	struct eth_tx_bd *d_tx_bd;
3234 	dma_addr_t mapping;
3235 	int old_len = le16_to_cpu(h_tx_bd->nbytes);
3236 
3237 	/* first fix first BD */
3238 	h_tx_bd->nbytes = cpu_to_le16(hlen);
3239 
3240 	DP(NETIF_MSG_TX_QUEUED,	"TSO split header size is %d (%x:%x)\n",
3241 	   h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3242 
3243 	/* now get a new data BD
3244 	 * (after the pbd) and fill it */
3245 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3246 	d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3247 
3248 	mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3249 			   le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3250 
3251 	d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3252 	d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3253 	d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3254 
3255 	/* this marks the BD as one that has no individual mapping */
3256 	tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3257 
3258 	DP(NETIF_MSG_TX_QUEUED,
3259 	   "TSO split data size is %d (%x:%x)\n",
3260 	   d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3261 
3262 	/* update tx_bd */
3263 	*tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3264 
3265 	return bd_prod;
3266 }
3267 
3268 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3269 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3270 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3271 {
3272 	__sum16 tsum = (__force __sum16) csum;
3273 
3274 	if (fix > 0)
3275 		tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3276 				  csum_partial(t_header - fix, fix, 0)));
3277 
3278 	else if (fix < 0)
3279 		tsum = ~csum_fold(csum_add((__force __wsum) csum,
3280 				  csum_partial(t_header, -fix, 0)));
3281 
3282 	return bswab16(tsum);
3283 }
3284 
3285 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3286 {
3287 	u32 rc;
3288 	__u8 prot = 0;
3289 	__be16 protocol;
3290 
3291 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3292 		return XMIT_PLAIN;
3293 
3294 	protocol = vlan_get_protocol(skb);
3295 	if (protocol == htons(ETH_P_IPV6)) {
3296 		rc = XMIT_CSUM_V6;
3297 		prot = ipv6_hdr(skb)->nexthdr;
3298 	} else {
3299 		rc = XMIT_CSUM_V4;
3300 		prot = ip_hdr(skb)->protocol;
3301 	}
3302 
3303 	if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3304 		if (inner_ip_hdr(skb)->version == 6) {
3305 			rc |= XMIT_CSUM_ENC_V6;
3306 			if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3307 				rc |= XMIT_CSUM_TCP;
3308 		} else {
3309 			rc |= XMIT_CSUM_ENC_V4;
3310 			if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3311 				rc |= XMIT_CSUM_TCP;
3312 		}
3313 	}
3314 	if (prot == IPPROTO_TCP)
3315 		rc |= XMIT_CSUM_TCP;
3316 
3317 	if (skb_is_gso(skb)) {
3318 		if (skb_is_gso_v6(skb)) {
3319 			rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3320 			if (rc & XMIT_CSUM_ENC)
3321 				rc |= XMIT_GSO_ENC_V6;
3322 		} else {
3323 			rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3324 			if (rc & XMIT_CSUM_ENC)
3325 				rc |= XMIT_GSO_ENC_V4;
3326 		}
3327 	}
3328 
3329 	return rc;
3330 }
3331 
3332 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3333 /* check if packet requires linearization (packet is too fragmented)
3334    no need to check fragmentation if page size > 8K (there will be no
3335    violation to FW restrictions) */
3336 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3337 			     u32 xmit_type)
3338 {
3339 	int to_copy = 0;
3340 	int hlen = 0;
3341 	int first_bd_sz = 0;
3342 
3343 	/* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3344 	if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3345 
3346 		if (xmit_type & XMIT_GSO) {
3347 			unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3348 			/* Check if LSO packet needs to be copied:
3349 			   3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3350 			int wnd_size = MAX_FETCH_BD - 3;
3351 			/* Number of windows to check */
3352 			int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3353 			int wnd_idx = 0;
3354 			int frag_idx = 0;
3355 			u32 wnd_sum = 0;
3356 
3357 			/* Headers length */
3358 			hlen = (int)(skb_transport_header(skb) - skb->data) +
3359 				tcp_hdrlen(skb);
3360 
3361 			/* Amount of data (w/o headers) on linear part of SKB*/
3362 			first_bd_sz = skb_headlen(skb) - hlen;
3363 
3364 			wnd_sum  = first_bd_sz;
3365 
3366 			/* Calculate the first sum - it's special */
3367 			for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3368 				wnd_sum +=
3369 					skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3370 
3371 			/* If there was data on linear skb data - check it */
3372 			if (first_bd_sz > 0) {
3373 				if (unlikely(wnd_sum < lso_mss)) {
3374 					to_copy = 1;
3375 					goto exit_lbl;
3376 				}
3377 
3378 				wnd_sum -= first_bd_sz;
3379 			}
3380 
3381 			/* Others are easier: run through the frag list and
3382 			   check all windows */
3383 			for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3384 				wnd_sum +=
3385 			  skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3386 
3387 				if (unlikely(wnd_sum < lso_mss)) {
3388 					to_copy = 1;
3389 					break;
3390 				}
3391 				wnd_sum -=
3392 					skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3393 			}
3394 		} else {
3395 			/* in non-LSO too fragmented packet should always
3396 			   be linearized */
3397 			to_copy = 1;
3398 		}
3399 	}
3400 
3401 exit_lbl:
3402 	if (unlikely(to_copy))
3403 		DP(NETIF_MSG_TX_QUEUED,
3404 		   "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3405 		   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3406 		   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3407 
3408 	return to_copy;
3409 }
3410 #endif
3411 
3412 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3413 				 u32 xmit_type)
3414 {
3415 	struct ipv6hdr *ipv6;
3416 
3417 	*parsing_data |= (skb_shinfo(skb)->gso_size <<
3418 			      ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3419 			      ETH_TX_PARSE_BD_E2_LSO_MSS;
3420 
3421 	if (xmit_type & XMIT_GSO_ENC_V6)
3422 		ipv6 = inner_ipv6_hdr(skb);
3423 	else if (xmit_type & XMIT_GSO_V6)
3424 		ipv6 = ipv6_hdr(skb);
3425 	else
3426 		ipv6 = NULL;
3427 
3428 	if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3429 		*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3430 }
3431 
3432 /**
3433  * bnx2x_set_pbd_gso - update PBD in GSO case.
3434  *
3435  * @skb:	packet skb
3436  * @pbd:	parse BD
3437  * @xmit_type:	xmit flags
3438  */
3439 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3440 			      struct eth_tx_parse_bd_e1x *pbd,
3441 			      struct eth_tx_start_bd *tx_start_bd,
3442 			      u32 xmit_type)
3443 {
3444 	pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3445 	pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3446 	pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3447 
3448 	if (xmit_type & XMIT_GSO_V4) {
3449 		pbd->ip_id = bswab16(ip_hdr(skb)->id);
3450 		pbd->tcp_pseudo_csum =
3451 			bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3452 						   ip_hdr(skb)->daddr,
3453 						   0, IPPROTO_TCP, 0));
3454 
3455 		/* GSO on 57710/57711 needs FW to calculate IP checksum */
3456 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3457 	} else {
3458 		pbd->tcp_pseudo_csum =
3459 			bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3460 						 &ipv6_hdr(skb)->daddr,
3461 						 0, IPPROTO_TCP, 0));
3462 	}
3463 
3464 	pbd->global_data |=
3465 		cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3466 }
3467 
3468 /**
3469  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3470  *
3471  * @bp:			driver handle
3472  * @skb:		packet skb
3473  * @parsing_data:	data to be updated
3474  * @xmit_type:		xmit flags
3475  *
3476  * 57712/578xx related, when skb has encapsulation
3477  */
3478 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3479 				 u32 *parsing_data, u32 xmit_type)
3480 {
3481 	*parsing_data |=
3482 		((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3483 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3484 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3485 
3486 	if (xmit_type & XMIT_CSUM_TCP) {
3487 		*parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3488 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3489 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3490 
3491 		return skb_inner_transport_header(skb) +
3492 			inner_tcp_hdrlen(skb) - skb->data;
3493 	}
3494 
3495 	/* We support checksum offload for TCP and UDP only.
3496 	 * No need to pass the UDP header length - it's a constant.
3497 	 */
3498 	return skb_inner_transport_header(skb) +
3499 		sizeof(struct udphdr) - skb->data;
3500 }
3501 
3502 /**
3503  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3504  *
3505  * @bp:			driver handle
3506  * @skb:		packet skb
3507  * @parsing_data:	data to be updated
3508  * @xmit_type:		xmit flags
3509  *
3510  * 57712/578xx related
3511  */
3512 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3513 				u32 *parsing_data, u32 xmit_type)
3514 {
3515 	*parsing_data |=
3516 		((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3517 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3518 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3519 
3520 	if (xmit_type & XMIT_CSUM_TCP) {
3521 		*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3522 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3523 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3524 
3525 		return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3526 	}
3527 	/* We support checksum offload for TCP and UDP only.
3528 	 * No need to pass the UDP header length - it's a constant.
3529 	 */
3530 	return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3531 }
3532 
3533 /* set FW indication according to inner or outer protocols if tunneled */
3534 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3535 			       struct eth_tx_start_bd *tx_start_bd,
3536 			       u32 xmit_type)
3537 {
3538 	tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3539 
3540 	if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3541 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3542 
3543 	if (!(xmit_type & XMIT_CSUM_TCP))
3544 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3545 }
3546 
3547 /**
3548  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3549  *
3550  * @bp:		driver handle
3551  * @skb:	packet skb
3552  * @pbd:	parse BD to be updated
3553  * @xmit_type:	xmit flags
3554  */
3555 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3556 			     struct eth_tx_parse_bd_e1x *pbd,
3557 			     u32 xmit_type)
3558 {
3559 	u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3560 
3561 	/* for now NS flag is not used in Linux */
3562 	pbd->global_data =
3563 		cpu_to_le16(hlen |
3564 			    ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3565 			     ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3566 
3567 	pbd->ip_hlen_w = (skb_transport_header(skb) -
3568 			skb_network_header(skb)) >> 1;
3569 
3570 	hlen += pbd->ip_hlen_w;
3571 
3572 	/* We support checksum offload for TCP and UDP only */
3573 	if (xmit_type & XMIT_CSUM_TCP)
3574 		hlen += tcp_hdrlen(skb) / 2;
3575 	else
3576 		hlen += sizeof(struct udphdr) / 2;
3577 
3578 	pbd->total_hlen_w = cpu_to_le16(hlen);
3579 	hlen = hlen*2;
3580 
3581 	if (xmit_type & XMIT_CSUM_TCP) {
3582 		pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3583 
3584 	} else {
3585 		s8 fix = SKB_CS_OFF(skb); /* signed! */
3586 
3587 		DP(NETIF_MSG_TX_QUEUED,
3588 		   "hlen %d  fix %d  csum before fix %x\n",
3589 		   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3590 
3591 		/* HW bug: fixup the CSUM */
3592 		pbd->tcp_pseudo_csum =
3593 			bnx2x_csum_fix(skb_transport_header(skb),
3594 				       SKB_CS(skb), fix);
3595 
3596 		DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3597 		   pbd->tcp_pseudo_csum);
3598 	}
3599 
3600 	return hlen;
3601 }
3602 
3603 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3604 				      struct eth_tx_parse_bd_e2 *pbd_e2,
3605 				      struct eth_tx_parse_2nd_bd *pbd2,
3606 				      u16 *global_data,
3607 				      u32 xmit_type)
3608 {
3609 	u16 hlen_w = 0;
3610 	u8 outerip_off, outerip_len = 0;
3611 
3612 	/* from outer IP to transport */
3613 	hlen_w = (skb_inner_transport_header(skb) -
3614 		  skb_network_header(skb)) >> 1;
3615 
3616 	/* transport len */
3617 	hlen_w += inner_tcp_hdrlen(skb) >> 1;
3618 
3619 	pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3620 
3621 	/* outer IP header info */
3622 	if (xmit_type & XMIT_CSUM_V4) {
3623 		struct iphdr *iph = ip_hdr(skb);
3624 		u32 csum = (__force u32)(~iph->check) -
3625 			   (__force u32)iph->tot_len -
3626 			   (__force u32)iph->frag_off;
3627 
3628 		pbd2->fw_ip_csum_wo_len_flags_frag =
3629 			bswab16(csum_fold((__force __wsum)csum));
3630 	} else {
3631 		pbd2->fw_ip_hdr_to_payload_w =
3632 			hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3633 	}
3634 
3635 	pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3636 
3637 	pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3638 
3639 	if (xmit_type & XMIT_GSO_V4) {
3640 		pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3641 
3642 		pbd_e2->data.tunnel_data.pseudo_csum =
3643 			bswab16(~csum_tcpudp_magic(
3644 					inner_ip_hdr(skb)->saddr,
3645 					inner_ip_hdr(skb)->daddr,
3646 					0, IPPROTO_TCP, 0));
3647 
3648 		outerip_len = ip_hdr(skb)->ihl << 1;
3649 	} else {
3650 		pbd_e2->data.tunnel_data.pseudo_csum =
3651 			bswab16(~csum_ipv6_magic(
3652 					&inner_ipv6_hdr(skb)->saddr,
3653 					&inner_ipv6_hdr(skb)->daddr,
3654 					0, IPPROTO_TCP, 0));
3655 	}
3656 
3657 	outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3658 
3659 	*global_data |=
3660 		outerip_off |
3661 		(!!(xmit_type & XMIT_CSUM_V6) <<
3662 			ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3663 		(outerip_len <<
3664 			ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3665 		((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3666 			ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3667 
3668 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3669 		SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3670 		pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3671 	}
3672 }
3673 
3674 /* called with netif_tx_lock
3675  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3676  * netif_wake_queue()
3677  */
3678 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3679 {
3680 	struct bnx2x *bp = netdev_priv(dev);
3681 
3682 	struct netdev_queue *txq;
3683 	struct bnx2x_fp_txdata *txdata;
3684 	struct sw_tx_bd *tx_buf;
3685 	struct eth_tx_start_bd *tx_start_bd, *first_bd;
3686 	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3687 	struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3688 	struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3689 	struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3690 	u32 pbd_e2_parsing_data = 0;
3691 	u16 pkt_prod, bd_prod;
3692 	int nbd, txq_index;
3693 	dma_addr_t mapping;
3694 	u32 xmit_type = bnx2x_xmit_type(bp, skb);
3695 	int i;
3696 	u8 hlen = 0;
3697 	__le16 pkt_size = 0;
3698 	struct ethhdr *eth;
3699 	u8 mac_type = UNICAST_ADDRESS;
3700 
3701 #ifdef BNX2X_STOP_ON_ERROR
3702 	if (unlikely(bp->panic))
3703 		return NETDEV_TX_BUSY;
3704 #endif
3705 
3706 	txq_index = skb_get_queue_mapping(skb);
3707 	txq = netdev_get_tx_queue(dev, txq_index);
3708 
3709 	BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3710 
3711 	txdata = &bp->bnx2x_txq[txq_index];
3712 
3713 	/* enable this debug print to view the transmission queue being used
3714 	DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3715 	   txq_index, fp_index, txdata_index); */
3716 
3717 	/* enable this debug print to view the transmission details
3718 	DP(NETIF_MSG_TX_QUEUED,
3719 	   "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3720 	   txdata->cid, fp_index, txdata_index, txdata, fp); */
3721 
3722 	if (unlikely(bnx2x_tx_avail(bp, txdata) <
3723 			skb_shinfo(skb)->nr_frags +
3724 			BDS_PER_TX_PKT +
3725 			NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3726 		/* Handle special storage cases separately */
3727 		if (txdata->tx_ring_size == 0) {
3728 			struct bnx2x_eth_q_stats *q_stats =
3729 				bnx2x_fp_qstats(bp, txdata->parent_fp);
3730 			q_stats->driver_filtered_tx_pkt++;
3731 			dev_kfree_skb(skb);
3732 			return NETDEV_TX_OK;
3733 		}
3734 		bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3735 		netif_tx_stop_queue(txq);
3736 		BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3737 
3738 		return NETDEV_TX_BUSY;
3739 	}
3740 
3741 	DP(NETIF_MSG_TX_QUEUED,
3742 	   "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3743 	   txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3744 	   ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3745 	   skb->len);
3746 
3747 	eth = (struct ethhdr *)skb->data;
3748 
3749 	/* set flag according to packet type (UNICAST_ADDRESS is default)*/
3750 	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3751 		if (is_broadcast_ether_addr(eth->h_dest))
3752 			mac_type = BROADCAST_ADDRESS;
3753 		else
3754 			mac_type = MULTICAST_ADDRESS;
3755 	}
3756 
3757 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3758 	/* First, check if we need to linearize the skb (due to FW
3759 	   restrictions). No need to check fragmentation if page size > 8K
3760 	   (there will be no violation to FW restrictions) */
3761 	if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3762 		/* Statistics of linearization */
3763 		bp->lin_cnt++;
3764 		if (skb_linearize(skb) != 0) {
3765 			DP(NETIF_MSG_TX_QUEUED,
3766 			   "SKB linearization failed - silently dropping this SKB\n");
3767 			dev_kfree_skb_any(skb);
3768 			return NETDEV_TX_OK;
3769 		}
3770 	}
3771 #endif
3772 	/* Map skb linear data for DMA */
3773 	mapping = dma_map_single(&bp->pdev->dev, skb->data,
3774 				 skb_headlen(skb), DMA_TO_DEVICE);
3775 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3776 		DP(NETIF_MSG_TX_QUEUED,
3777 		   "SKB mapping failed - silently dropping this SKB\n");
3778 		dev_kfree_skb_any(skb);
3779 		return NETDEV_TX_OK;
3780 	}
3781 	/*
3782 	Please read carefully. First we use one BD which we mark as start,
3783 	then we have a parsing info BD (used for TSO or xsum),
3784 	and only then we have the rest of the TSO BDs.
3785 	(don't forget to mark the last one as last,
3786 	and to unmap only AFTER you write to the BD ...)
3787 	And above all, all pdb sizes are in words - NOT DWORDS!
3788 	*/
3789 
3790 	/* get current pkt produced now - advance it just before sending packet
3791 	 * since mapping of pages may fail and cause packet to be dropped
3792 	 */
3793 	pkt_prod = txdata->tx_pkt_prod;
3794 	bd_prod = TX_BD(txdata->tx_bd_prod);
3795 
3796 	/* get a tx_buf and first BD
3797 	 * tx_start_bd may be changed during SPLIT,
3798 	 * but first_bd will always stay first
3799 	 */
3800 	tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3801 	tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3802 	first_bd = tx_start_bd;
3803 
3804 	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3805 
3806 	/* header nbd: indirectly zero other flags! */
3807 	tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3808 
3809 	/* remember the first BD of the packet */
3810 	tx_buf->first_bd = txdata->tx_bd_prod;
3811 	tx_buf->skb = skb;
3812 	tx_buf->flags = 0;
3813 
3814 	DP(NETIF_MSG_TX_QUEUED,
3815 	   "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3816 	   pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3817 
3818 	if (vlan_tx_tag_present(skb)) {
3819 		tx_start_bd->vlan_or_ethertype =
3820 		    cpu_to_le16(vlan_tx_tag_get(skb));
3821 		tx_start_bd->bd_flags.as_bitfield |=
3822 		    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3823 	} else {
3824 		/* when transmitting in a vf, start bd must hold the ethertype
3825 		 * for fw to enforce it
3826 		 */
3827 		if (IS_VF(bp))
3828 			tx_start_bd->vlan_or_ethertype =
3829 				cpu_to_le16(ntohs(eth->h_proto));
3830 		else
3831 			/* used by FW for packet accounting */
3832 			tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3833 	}
3834 
3835 	nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3836 
3837 	/* turn on parsing and get a BD */
3838 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3839 
3840 	if (xmit_type & XMIT_CSUM)
3841 		bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3842 
3843 	if (!CHIP_IS_E1x(bp)) {
3844 		pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3845 		memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3846 
3847 		if (xmit_type & XMIT_CSUM_ENC) {
3848 			u16 global_data = 0;
3849 
3850 			/* Set PBD in enc checksum offload case */
3851 			hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3852 						      &pbd_e2_parsing_data,
3853 						      xmit_type);
3854 
3855 			/* turn on 2nd parsing and get a BD */
3856 			bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3857 
3858 			pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3859 
3860 			memset(pbd2, 0, sizeof(*pbd2));
3861 
3862 			pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3863 				(skb_inner_network_header(skb) -
3864 				 skb->data) >> 1;
3865 
3866 			if (xmit_type & XMIT_GSO_ENC)
3867 				bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3868 							  &global_data,
3869 							  xmit_type);
3870 
3871 			pbd2->global_data = cpu_to_le16(global_data);
3872 
3873 			/* add addition parse BD indication to start BD */
3874 			SET_FLAG(tx_start_bd->general_data,
3875 				 ETH_TX_START_BD_PARSE_NBDS, 1);
3876 			/* set encapsulation flag in start BD */
3877 			SET_FLAG(tx_start_bd->general_data,
3878 				 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3879 			nbd++;
3880 		} else if (xmit_type & XMIT_CSUM) {
3881 			/* Set PBD in checksum offload case w/o encapsulation */
3882 			hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3883 						     &pbd_e2_parsing_data,
3884 						     xmit_type);
3885 		}
3886 
3887 		/* Add the macs to the parsing BD if this is a vf or if
3888 		 * Tx Switching is enabled.
3889 		 */
3890 		if (IS_VF(bp)) {
3891 			/* override GRE parameters in BD */
3892 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3893 					      &pbd_e2->data.mac_addr.src_mid,
3894 					      &pbd_e2->data.mac_addr.src_lo,
3895 					      eth->h_source);
3896 
3897 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3898 					      &pbd_e2->data.mac_addr.dst_mid,
3899 					      &pbd_e2->data.mac_addr.dst_lo,
3900 					      eth->h_dest);
3901 		} else if (bp->flags & TX_SWITCHING) {
3902 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3903 					      &pbd_e2->data.mac_addr.dst_mid,
3904 					      &pbd_e2->data.mac_addr.dst_lo,
3905 					      eth->h_dest);
3906 		}
3907 
3908 		SET_FLAG(pbd_e2_parsing_data,
3909 			 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3910 	} else {
3911 		u16 global_data = 0;
3912 		pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3913 		memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3914 		/* Set PBD in checksum offload case */
3915 		if (xmit_type & XMIT_CSUM)
3916 			hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3917 
3918 		SET_FLAG(global_data,
3919 			 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3920 		pbd_e1x->global_data |= cpu_to_le16(global_data);
3921 	}
3922 
3923 	/* Setup the data pointer of the first BD of the packet */
3924 	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3925 	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3926 	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3927 	pkt_size = tx_start_bd->nbytes;
3928 
3929 	DP(NETIF_MSG_TX_QUEUED,
3930 	   "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
3931 	   tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3932 	   le16_to_cpu(tx_start_bd->nbytes),
3933 	   tx_start_bd->bd_flags.as_bitfield,
3934 	   le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3935 
3936 	if (xmit_type & XMIT_GSO) {
3937 
3938 		DP(NETIF_MSG_TX_QUEUED,
3939 		   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
3940 		   skb->len, hlen, skb_headlen(skb),
3941 		   skb_shinfo(skb)->gso_size);
3942 
3943 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3944 
3945 		if (unlikely(skb_headlen(skb) > hlen)) {
3946 			nbd++;
3947 			bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3948 						 &tx_start_bd, hlen,
3949 						 bd_prod);
3950 		}
3951 		if (!CHIP_IS_E1x(bp))
3952 			bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3953 					     xmit_type);
3954 		else
3955 			bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3956 	}
3957 
3958 	/* Set the PBD's parsing_data field if not zero
3959 	 * (for the chips newer than 57711).
3960 	 */
3961 	if (pbd_e2_parsing_data)
3962 		pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3963 
3964 	tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3965 
3966 	/* Handle fragmented skb */
3967 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3968 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3969 
3970 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3971 					   skb_frag_size(frag), DMA_TO_DEVICE);
3972 		if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3973 			unsigned int pkts_compl = 0, bytes_compl = 0;
3974 
3975 			DP(NETIF_MSG_TX_QUEUED,
3976 			   "Unable to map page - dropping packet...\n");
3977 
3978 			/* we need unmap all buffers already mapped
3979 			 * for this SKB;
3980 			 * first_bd->nbd need to be properly updated
3981 			 * before call to bnx2x_free_tx_pkt
3982 			 */
3983 			first_bd->nbd = cpu_to_le16(nbd);
3984 			bnx2x_free_tx_pkt(bp, txdata,
3985 					  TX_BD(txdata->tx_pkt_prod),
3986 					  &pkts_compl, &bytes_compl);
3987 			return NETDEV_TX_OK;
3988 		}
3989 
3990 		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3991 		tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3992 		if (total_pkt_bd == NULL)
3993 			total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3994 
3995 		tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3996 		tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3997 		tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3998 		le16_add_cpu(&pkt_size, skb_frag_size(frag));
3999 		nbd++;
4000 
4001 		DP(NETIF_MSG_TX_QUEUED,
4002 		   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4003 		   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4004 		   le16_to_cpu(tx_data_bd->nbytes));
4005 	}
4006 
4007 	DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4008 
4009 	/* update with actual num BDs */
4010 	first_bd->nbd = cpu_to_le16(nbd);
4011 
4012 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4013 
4014 	/* now send a tx doorbell, counting the next BD
4015 	 * if the packet contains or ends with it
4016 	 */
4017 	if (TX_BD_POFF(bd_prod) < nbd)
4018 		nbd++;
4019 
4020 	/* total_pkt_bytes should be set on the first data BD if
4021 	 * it's not an LSO packet and there is more than one
4022 	 * data BD. In this case pkt_size is limited by an MTU value.
4023 	 * However we prefer to set it for an LSO packet (while we don't
4024 	 * have to) in order to save some CPU cycles in a none-LSO
4025 	 * case, when we much more care about them.
4026 	 */
4027 	if (total_pkt_bd != NULL)
4028 		total_pkt_bd->total_pkt_bytes = pkt_size;
4029 
4030 	if (pbd_e1x)
4031 		DP(NETIF_MSG_TX_QUEUED,
4032 		   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4033 		   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4034 		   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4035 		   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4036 		    le16_to_cpu(pbd_e1x->total_hlen_w));
4037 	if (pbd_e2)
4038 		DP(NETIF_MSG_TX_QUEUED,
4039 		   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4040 		   pbd_e2,
4041 		   pbd_e2->data.mac_addr.dst_hi,
4042 		   pbd_e2->data.mac_addr.dst_mid,
4043 		   pbd_e2->data.mac_addr.dst_lo,
4044 		   pbd_e2->data.mac_addr.src_hi,
4045 		   pbd_e2->data.mac_addr.src_mid,
4046 		   pbd_e2->data.mac_addr.src_lo,
4047 		   pbd_e2->parsing_data);
4048 	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4049 
4050 	netdev_tx_sent_queue(txq, skb->len);
4051 
4052 	skb_tx_timestamp(skb);
4053 
4054 	txdata->tx_pkt_prod++;
4055 	/*
4056 	 * Make sure that the BD data is updated before updating the producer
4057 	 * since FW might read the BD right after the producer is updated.
4058 	 * This is only applicable for weak-ordered memory model archs such
4059 	 * as IA-64. The following barrier is also mandatory since FW will
4060 	 * assumes packets must have BDs.
4061 	 */
4062 	wmb();
4063 
4064 	txdata->tx_db.data.prod += nbd;
4065 	barrier();
4066 
4067 	DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4068 
4069 	mmiowb();
4070 
4071 	txdata->tx_bd_prod += nbd;
4072 
4073 	if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4074 		netif_tx_stop_queue(txq);
4075 
4076 		/* paired memory barrier is in bnx2x_tx_int(), we have to keep
4077 		 * ordering of set_bit() in netif_tx_stop_queue() and read of
4078 		 * fp->bd_tx_cons */
4079 		smp_mb();
4080 
4081 		bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4082 		if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4083 			netif_tx_wake_queue(txq);
4084 	}
4085 	txdata->tx_pkt++;
4086 
4087 	return NETDEV_TX_OK;
4088 }
4089 
4090 /**
4091  * bnx2x_setup_tc - routine to configure net_device for multi tc
4092  *
4093  * @netdev: net device to configure
4094  * @tc: number of traffic classes to enable
4095  *
4096  * callback connected to the ndo_setup_tc function pointer
4097  */
4098 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4099 {
4100 	int cos, prio, count, offset;
4101 	struct bnx2x *bp = netdev_priv(dev);
4102 
4103 	/* setup tc must be called under rtnl lock */
4104 	ASSERT_RTNL();
4105 
4106 	/* no traffic classes requested. Aborting */
4107 	if (!num_tc) {
4108 		netdev_reset_tc(dev);
4109 		return 0;
4110 	}
4111 
4112 	/* requested to support too many traffic classes */
4113 	if (num_tc > bp->max_cos) {
4114 		BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4115 			  num_tc, bp->max_cos);
4116 		return -EINVAL;
4117 	}
4118 
4119 	/* declare amount of supported traffic classes */
4120 	if (netdev_set_num_tc(dev, num_tc)) {
4121 		BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4122 		return -EINVAL;
4123 	}
4124 
4125 	/* configure priority to traffic class mapping */
4126 	for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4127 		netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4128 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4129 		   "mapping priority %d to tc %d\n",
4130 		   prio, bp->prio_to_cos[prio]);
4131 	}
4132 
4133 	/* Use this configuration to differentiate tc0 from other COSes
4134 	   This can be used for ets or pfc, and save the effort of setting
4135 	   up a multio class queue disc or negotiating DCBX with a switch
4136 	netdev_set_prio_tc_map(dev, 0, 0);
4137 	DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4138 	for (prio = 1; prio < 16; prio++) {
4139 		netdev_set_prio_tc_map(dev, prio, 1);
4140 		DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4141 	} */
4142 
4143 	/* configure traffic class to transmission queue mapping */
4144 	for (cos = 0; cos < bp->max_cos; cos++) {
4145 		count = BNX2X_NUM_ETH_QUEUES(bp);
4146 		offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4147 		netdev_set_tc_queue(dev, cos, count, offset);
4148 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4149 		   "mapping tc %d to offset %d count %d\n",
4150 		   cos, offset, count);
4151 	}
4152 
4153 	return 0;
4154 }
4155 
4156 /* called with rtnl_lock */
4157 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4158 {
4159 	struct sockaddr *addr = p;
4160 	struct bnx2x *bp = netdev_priv(dev);
4161 	int rc = 0;
4162 
4163 	if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4164 		BNX2X_ERR("Requested MAC address is not valid\n");
4165 		return -EINVAL;
4166 	}
4167 
4168 	if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4169 	    !is_zero_ether_addr(addr->sa_data)) {
4170 		BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4171 		return -EINVAL;
4172 	}
4173 
4174 	if (netif_running(dev))  {
4175 		rc = bnx2x_set_eth_mac(bp, false);
4176 		if (rc)
4177 			return rc;
4178 	}
4179 
4180 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4181 
4182 	if (netif_running(dev))
4183 		rc = bnx2x_set_eth_mac(bp, true);
4184 
4185 	return rc;
4186 }
4187 
4188 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4189 {
4190 	union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4191 	struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4192 	u8 cos;
4193 
4194 	/* Common */
4195 
4196 	if (IS_FCOE_IDX(fp_index)) {
4197 		memset(sb, 0, sizeof(union host_hc_status_block));
4198 		fp->status_blk_mapping = 0;
4199 	} else {
4200 		/* status blocks */
4201 		if (!CHIP_IS_E1x(bp))
4202 			BNX2X_PCI_FREE(sb->e2_sb,
4203 				       bnx2x_fp(bp, fp_index,
4204 						status_blk_mapping),
4205 				       sizeof(struct host_hc_status_block_e2));
4206 		else
4207 			BNX2X_PCI_FREE(sb->e1x_sb,
4208 				       bnx2x_fp(bp, fp_index,
4209 						status_blk_mapping),
4210 				       sizeof(struct host_hc_status_block_e1x));
4211 	}
4212 
4213 	/* Rx */
4214 	if (!skip_rx_queue(bp, fp_index)) {
4215 		bnx2x_free_rx_bds(fp);
4216 
4217 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
4218 		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4219 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4220 			       bnx2x_fp(bp, fp_index, rx_desc_mapping),
4221 			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
4222 
4223 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4224 			       bnx2x_fp(bp, fp_index, rx_comp_mapping),
4225 			       sizeof(struct eth_fast_path_rx_cqe) *
4226 			       NUM_RCQ_BD);
4227 
4228 		/* SGE ring */
4229 		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4230 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4231 			       bnx2x_fp(bp, fp_index, rx_sge_mapping),
4232 			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4233 	}
4234 
4235 	/* Tx */
4236 	if (!skip_tx_queue(bp, fp_index)) {
4237 		/* fastpath tx rings: tx_buf tx_desc */
4238 		for_each_cos_in_tx_queue(fp, cos) {
4239 			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4240 
4241 			DP(NETIF_MSG_IFDOWN,
4242 			   "freeing tx memory of fp %d cos %d cid %d\n",
4243 			   fp_index, cos, txdata->cid);
4244 
4245 			BNX2X_FREE(txdata->tx_buf_ring);
4246 			BNX2X_PCI_FREE(txdata->tx_desc_ring,
4247 				txdata->tx_desc_mapping,
4248 				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4249 		}
4250 	}
4251 	/* end of fastpath */
4252 }
4253 
4254 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4255 {
4256 	int i;
4257 	for_each_cnic_queue(bp, i)
4258 		bnx2x_free_fp_mem_at(bp, i);
4259 }
4260 
4261 void bnx2x_free_fp_mem(struct bnx2x *bp)
4262 {
4263 	int i;
4264 	for_each_eth_queue(bp, i)
4265 		bnx2x_free_fp_mem_at(bp, i);
4266 }
4267 
4268 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4269 {
4270 	union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4271 	if (!CHIP_IS_E1x(bp)) {
4272 		bnx2x_fp(bp, index, sb_index_values) =
4273 			(__le16 *)status_blk.e2_sb->sb.index_values;
4274 		bnx2x_fp(bp, index, sb_running_index) =
4275 			(__le16 *)status_blk.e2_sb->sb.running_index;
4276 	} else {
4277 		bnx2x_fp(bp, index, sb_index_values) =
4278 			(__le16 *)status_blk.e1x_sb->sb.index_values;
4279 		bnx2x_fp(bp, index, sb_running_index) =
4280 			(__le16 *)status_blk.e1x_sb->sb.running_index;
4281 	}
4282 }
4283 
4284 /* Returns the number of actually allocated BDs */
4285 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4286 			      int rx_ring_size)
4287 {
4288 	struct bnx2x *bp = fp->bp;
4289 	u16 ring_prod, cqe_ring_prod;
4290 	int i, failure_cnt = 0;
4291 
4292 	fp->rx_comp_cons = 0;
4293 	cqe_ring_prod = ring_prod = 0;
4294 
4295 	/* This routine is called only during fo init so
4296 	 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4297 	 */
4298 	for (i = 0; i < rx_ring_size; i++) {
4299 		if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4300 			failure_cnt++;
4301 			continue;
4302 		}
4303 		ring_prod = NEXT_RX_IDX(ring_prod);
4304 		cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4305 		WARN_ON(ring_prod <= (i - failure_cnt));
4306 	}
4307 
4308 	if (failure_cnt)
4309 		BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4310 			  i - failure_cnt, fp->index);
4311 
4312 	fp->rx_bd_prod = ring_prod;
4313 	/* Limit the CQE producer by the CQE ring size */
4314 	fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4315 			       cqe_ring_prod);
4316 	fp->rx_pkt = fp->rx_calls = 0;
4317 
4318 	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4319 
4320 	return i - failure_cnt;
4321 }
4322 
4323 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4324 {
4325 	int i;
4326 
4327 	for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4328 		struct eth_rx_cqe_next_page *nextpg;
4329 
4330 		nextpg = (struct eth_rx_cqe_next_page *)
4331 			&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4332 		nextpg->addr_hi =
4333 			cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4334 				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4335 		nextpg->addr_lo =
4336 			cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4337 				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4338 	}
4339 }
4340 
4341 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4342 {
4343 	union host_hc_status_block *sb;
4344 	struct bnx2x_fastpath *fp = &bp->fp[index];
4345 	int ring_size = 0;
4346 	u8 cos;
4347 	int rx_ring_size = 0;
4348 
4349 	if (!bp->rx_ring_size &&
4350 	    (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4351 		rx_ring_size = MIN_RX_SIZE_NONTPA;
4352 		bp->rx_ring_size = rx_ring_size;
4353 	} else if (!bp->rx_ring_size) {
4354 		rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4355 
4356 		if (CHIP_IS_E3(bp)) {
4357 			u32 cfg = SHMEM_RD(bp,
4358 					   dev_info.port_hw_config[BP_PORT(bp)].
4359 					   default_cfg);
4360 
4361 			/* Decrease ring size for 1G functions */
4362 			if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4363 			    PORT_HW_CFG_NET_SERDES_IF_SGMII)
4364 				rx_ring_size /= 10;
4365 		}
4366 
4367 		/* allocate at least number of buffers required by FW */
4368 		rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4369 				     MIN_RX_SIZE_TPA, rx_ring_size);
4370 
4371 		bp->rx_ring_size = rx_ring_size;
4372 	} else /* if rx_ring_size specified - use it */
4373 		rx_ring_size = bp->rx_ring_size;
4374 
4375 	DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4376 
4377 	/* Common */
4378 	sb = &bnx2x_fp(bp, index, status_blk);
4379 
4380 	if (!IS_FCOE_IDX(index)) {
4381 		/* status blocks */
4382 		if (!CHIP_IS_E1x(bp)) {
4383 			sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4384 						    sizeof(struct host_hc_status_block_e2));
4385 			if (!sb->e2_sb)
4386 				goto alloc_mem_err;
4387 		} else {
4388 			sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4389 						     sizeof(struct host_hc_status_block_e1x));
4390 			if (!sb->e1x_sb)
4391 				goto alloc_mem_err;
4392 		}
4393 	}
4394 
4395 	/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4396 	 * set shortcuts for it.
4397 	 */
4398 	if (!IS_FCOE_IDX(index))
4399 		set_sb_shortcuts(bp, index);
4400 
4401 	/* Tx */
4402 	if (!skip_tx_queue(bp, index)) {
4403 		/* fastpath tx rings: tx_buf tx_desc */
4404 		for_each_cos_in_tx_queue(fp, cos) {
4405 			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4406 
4407 			DP(NETIF_MSG_IFUP,
4408 			   "allocating tx memory of fp %d cos %d\n",
4409 			   index, cos);
4410 
4411 			txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4412 						      sizeof(struct sw_tx_bd),
4413 						      GFP_KERNEL);
4414 			if (!txdata->tx_buf_ring)
4415 				goto alloc_mem_err;
4416 			txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4417 							       sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4418 			if (!txdata->tx_desc_ring)
4419 				goto alloc_mem_err;
4420 		}
4421 	}
4422 
4423 	/* Rx */
4424 	if (!skip_rx_queue(bp, index)) {
4425 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
4426 		bnx2x_fp(bp, index, rx_buf_ring) =
4427 			kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4428 		if (!bnx2x_fp(bp, index, rx_buf_ring))
4429 			goto alloc_mem_err;
4430 		bnx2x_fp(bp, index, rx_desc_ring) =
4431 			BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4432 					sizeof(struct eth_rx_bd) * NUM_RX_BD);
4433 		if (!bnx2x_fp(bp, index, rx_desc_ring))
4434 			goto alloc_mem_err;
4435 
4436 		/* Seed all CQEs by 1s */
4437 		bnx2x_fp(bp, index, rx_comp_ring) =
4438 			BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4439 					 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4440 		if (!bnx2x_fp(bp, index, rx_comp_ring))
4441 			goto alloc_mem_err;
4442 
4443 		/* SGE ring */
4444 		bnx2x_fp(bp, index, rx_page_ring) =
4445 			kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4446 				GFP_KERNEL);
4447 		if (!bnx2x_fp(bp, index, rx_page_ring))
4448 			goto alloc_mem_err;
4449 		bnx2x_fp(bp, index, rx_sge_ring) =
4450 			BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4451 					BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4452 		if (!bnx2x_fp(bp, index, rx_sge_ring))
4453 			goto alloc_mem_err;
4454 		/* RX BD ring */
4455 		bnx2x_set_next_page_rx_bd(fp);
4456 
4457 		/* CQ ring */
4458 		bnx2x_set_next_page_rx_cq(fp);
4459 
4460 		/* BDs */
4461 		ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4462 		if (ring_size < rx_ring_size)
4463 			goto alloc_mem_err;
4464 	}
4465 
4466 	return 0;
4467 
4468 /* handles low memory cases */
4469 alloc_mem_err:
4470 	BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4471 						index, ring_size);
4472 	/* FW will drop all packets if queue is not big enough,
4473 	 * In these cases we disable the queue
4474 	 * Min size is different for OOO, TPA and non-TPA queues
4475 	 */
4476 	if (ring_size < (fp->disable_tpa ?
4477 				MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4478 			/* release memory allocated for this queue */
4479 			bnx2x_free_fp_mem_at(bp, index);
4480 			return -ENOMEM;
4481 	}
4482 	return 0;
4483 }
4484 
4485 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4486 {
4487 	if (!NO_FCOE(bp))
4488 		/* FCoE */
4489 		if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4490 			/* we will fail load process instead of mark
4491 			 * NO_FCOE_FLAG
4492 			 */
4493 			return -ENOMEM;
4494 
4495 	return 0;
4496 }
4497 
4498 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4499 {
4500 	int i;
4501 
4502 	/* 1. Allocate FP for leading - fatal if error
4503 	 * 2. Allocate RSS - fix number of queues if error
4504 	 */
4505 
4506 	/* leading */
4507 	if (bnx2x_alloc_fp_mem_at(bp, 0))
4508 		return -ENOMEM;
4509 
4510 	/* RSS */
4511 	for_each_nondefault_eth_queue(bp, i)
4512 		if (bnx2x_alloc_fp_mem_at(bp, i))
4513 			break;
4514 
4515 	/* handle memory failures */
4516 	if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4517 		int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4518 
4519 		WARN_ON(delta < 0);
4520 		bnx2x_shrink_eth_fp(bp, delta);
4521 		if (CNIC_SUPPORT(bp))
4522 			/* move non eth FPs next to last eth FP
4523 			 * must be done in that order
4524 			 * FCOE_IDX < FWD_IDX < OOO_IDX
4525 			 */
4526 
4527 			/* move FCoE fp even NO_FCOE_FLAG is on */
4528 			bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4529 		bp->num_ethernet_queues -= delta;
4530 		bp->num_queues = bp->num_ethernet_queues +
4531 				 bp->num_cnic_queues;
4532 		BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4533 			  bp->num_queues + delta, bp->num_queues);
4534 	}
4535 
4536 	return 0;
4537 }
4538 
4539 void bnx2x_free_mem_bp(struct bnx2x *bp)
4540 {
4541 	int i;
4542 
4543 	for (i = 0; i < bp->fp_array_size; i++)
4544 		kfree(bp->fp[i].tpa_info);
4545 	kfree(bp->fp);
4546 	kfree(bp->sp_objs);
4547 	kfree(bp->fp_stats);
4548 	kfree(bp->bnx2x_txq);
4549 	kfree(bp->msix_table);
4550 	kfree(bp->ilt);
4551 }
4552 
4553 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4554 {
4555 	struct bnx2x_fastpath *fp;
4556 	struct msix_entry *tbl;
4557 	struct bnx2x_ilt *ilt;
4558 	int msix_table_size = 0;
4559 	int fp_array_size, txq_array_size;
4560 	int i;
4561 
4562 	/*
4563 	 * The biggest MSI-X table we might need is as a maximum number of fast
4564 	 * path IGU SBs plus default SB (for PF only).
4565 	 */
4566 	msix_table_size = bp->igu_sb_cnt;
4567 	if (IS_PF(bp))
4568 		msix_table_size++;
4569 	BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4570 
4571 	/* fp array: RSS plus CNIC related L2 queues */
4572 	fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4573 	bp->fp_array_size = fp_array_size;
4574 	BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4575 
4576 	fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4577 	if (!fp)
4578 		goto alloc_err;
4579 	for (i = 0; i < bp->fp_array_size; i++) {
4580 		fp[i].tpa_info =
4581 			kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4582 				sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4583 		if (!(fp[i].tpa_info))
4584 			goto alloc_err;
4585 	}
4586 
4587 	bp->fp = fp;
4588 
4589 	/* allocate sp objs */
4590 	bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4591 			      GFP_KERNEL);
4592 	if (!bp->sp_objs)
4593 		goto alloc_err;
4594 
4595 	/* allocate fp_stats */
4596 	bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4597 			       GFP_KERNEL);
4598 	if (!bp->fp_stats)
4599 		goto alloc_err;
4600 
4601 	/* Allocate memory for the transmission queues array */
4602 	txq_array_size =
4603 		BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4604 	BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4605 
4606 	bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4607 				GFP_KERNEL);
4608 	if (!bp->bnx2x_txq)
4609 		goto alloc_err;
4610 
4611 	/* msix table */
4612 	tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4613 	if (!tbl)
4614 		goto alloc_err;
4615 	bp->msix_table = tbl;
4616 
4617 	/* ilt */
4618 	ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4619 	if (!ilt)
4620 		goto alloc_err;
4621 	bp->ilt = ilt;
4622 
4623 	return 0;
4624 alloc_err:
4625 	bnx2x_free_mem_bp(bp);
4626 	return -ENOMEM;
4627 }
4628 
4629 int bnx2x_reload_if_running(struct net_device *dev)
4630 {
4631 	struct bnx2x *bp = netdev_priv(dev);
4632 
4633 	if (unlikely(!netif_running(dev)))
4634 		return 0;
4635 
4636 	bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4637 	return bnx2x_nic_load(bp, LOAD_NORMAL);
4638 }
4639 
4640 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4641 {
4642 	u32 sel_phy_idx = 0;
4643 	if (bp->link_params.num_phys <= 1)
4644 		return INT_PHY;
4645 
4646 	if (bp->link_vars.link_up) {
4647 		sel_phy_idx = EXT_PHY1;
4648 		/* In case link is SERDES, check if the EXT_PHY2 is the one */
4649 		if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4650 		    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4651 			sel_phy_idx = EXT_PHY2;
4652 	} else {
4653 
4654 		switch (bnx2x_phy_selection(&bp->link_params)) {
4655 		case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4656 		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4657 		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4658 		       sel_phy_idx = EXT_PHY1;
4659 		       break;
4660 		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4661 		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4662 		       sel_phy_idx = EXT_PHY2;
4663 		       break;
4664 		}
4665 	}
4666 
4667 	return sel_phy_idx;
4668 }
4669 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4670 {
4671 	u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4672 	/*
4673 	 * The selected activated PHY is always after swapping (in case PHY
4674 	 * swapping is enabled). So when swapping is enabled, we need to reverse
4675 	 * the configuration
4676 	 */
4677 
4678 	if (bp->link_params.multi_phy_config &
4679 	    PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4680 		if (sel_phy_idx == EXT_PHY1)
4681 			sel_phy_idx = EXT_PHY2;
4682 		else if (sel_phy_idx == EXT_PHY2)
4683 			sel_phy_idx = EXT_PHY1;
4684 	}
4685 	return LINK_CONFIG_IDX(sel_phy_idx);
4686 }
4687 
4688 #ifdef NETDEV_FCOE_WWNN
4689 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4690 {
4691 	struct bnx2x *bp = netdev_priv(dev);
4692 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4693 
4694 	switch (type) {
4695 	case NETDEV_FCOE_WWNN:
4696 		*wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4697 				cp->fcoe_wwn_node_name_lo);
4698 		break;
4699 	case NETDEV_FCOE_WWPN:
4700 		*wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4701 				cp->fcoe_wwn_port_name_lo);
4702 		break;
4703 	default:
4704 		BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4705 		return -EINVAL;
4706 	}
4707 
4708 	return 0;
4709 }
4710 #endif
4711 
4712 /* called with rtnl_lock */
4713 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4714 {
4715 	struct bnx2x *bp = netdev_priv(dev);
4716 
4717 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4718 		BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4719 		return -EAGAIN;
4720 	}
4721 
4722 	if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4723 	    ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4724 		BNX2X_ERR("Can't support requested MTU size\n");
4725 		return -EINVAL;
4726 	}
4727 
4728 	/* This does not race with packet allocation
4729 	 * because the actual alloc size is
4730 	 * only updated as part of load
4731 	 */
4732 	dev->mtu = new_mtu;
4733 
4734 	return bnx2x_reload_if_running(dev);
4735 }
4736 
4737 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4738 				     netdev_features_t features)
4739 {
4740 	struct bnx2x *bp = netdev_priv(dev);
4741 
4742 	/* TPA requires Rx CSUM offloading */
4743 	if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4744 		features &= ~NETIF_F_LRO;
4745 		features &= ~NETIF_F_GRO;
4746 	}
4747 
4748 	return features;
4749 }
4750 
4751 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4752 {
4753 	struct bnx2x *bp = netdev_priv(dev);
4754 	u32 flags = bp->flags;
4755 	u32 changes;
4756 	bool bnx2x_reload = false;
4757 
4758 	if (features & NETIF_F_LRO)
4759 		flags |= TPA_ENABLE_FLAG;
4760 	else
4761 		flags &= ~TPA_ENABLE_FLAG;
4762 
4763 	if (features & NETIF_F_GRO)
4764 		flags |= GRO_ENABLE_FLAG;
4765 	else
4766 		flags &= ~GRO_ENABLE_FLAG;
4767 
4768 	if (features & NETIF_F_LOOPBACK) {
4769 		if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4770 			bp->link_params.loopback_mode = LOOPBACK_BMAC;
4771 			bnx2x_reload = true;
4772 		}
4773 	} else {
4774 		if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4775 			bp->link_params.loopback_mode = LOOPBACK_NONE;
4776 			bnx2x_reload = true;
4777 		}
4778 	}
4779 
4780 	changes = flags ^ bp->flags;
4781 
4782 	/* if GRO is changed while LRO is enabled, don't force a reload */
4783 	if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4784 		changes &= ~GRO_ENABLE_FLAG;
4785 
4786 	if (changes)
4787 		bnx2x_reload = true;
4788 
4789 	bp->flags = flags;
4790 
4791 	if (bnx2x_reload) {
4792 		if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4793 			return bnx2x_reload_if_running(dev);
4794 		/* else: bnx2x_nic_load() will be called at end of recovery */
4795 	}
4796 
4797 	return 0;
4798 }
4799 
4800 void bnx2x_tx_timeout(struct net_device *dev)
4801 {
4802 	struct bnx2x *bp = netdev_priv(dev);
4803 
4804 #ifdef BNX2X_STOP_ON_ERROR
4805 	if (!bp->panic)
4806 		bnx2x_panic();
4807 #endif
4808 
4809 	/* This allows the netif to be shutdown gracefully before resetting */
4810 	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4811 }
4812 
4813 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4814 {
4815 	struct net_device *dev = pci_get_drvdata(pdev);
4816 	struct bnx2x *bp;
4817 
4818 	if (!dev) {
4819 		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4820 		return -ENODEV;
4821 	}
4822 	bp = netdev_priv(dev);
4823 
4824 	rtnl_lock();
4825 
4826 	pci_save_state(pdev);
4827 
4828 	if (!netif_running(dev)) {
4829 		rtnl_unlock();
4830 		return 0;
4831 	}
4832 
4833 	netif_device_detach(dev);
4834 
4835 	bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4836 
4837 	bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4838 
4839 	rtnl_unlock();
4840 
4841 	return 0;
4842 }
4843 
4844 int bnx2x_resume(struct pci_dev *pdev)
4845 {
4846 	struct net_device *dev = pci_get_drvdata(pdev);
4847 	struct bnx2x *bp;
4848 	int rc;
4849 
4850 	if (!dev) {
4851 		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4852 		return -ENODEV;
4853 	}
4854 	bp = netdev_priv(dev);
4855 
4856 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4857 		BNX2X_ERR("Handling parity error recovery. Try again later\n");
4858 		return -EAGAIN;
4859 	}
4860 
4861 	rtnl_lock();
4862 
4863 	pci_restore_state(pdev);
4864 
4865 	if (!netif_running(dev)) {
4866 		rtnl_unlock();
4867 		return 0;
4868 	}
4869 
4870 	bnx2x_set_power_state(bp, PCI_D0);
4871 	netif_device_attach(dev);
4872 
4873 	rc = bnx2x_nic_load(bp, LOAD_OPEN);
4874 
4875 	rtnl_unlock();
4876 
4877 	return rc;
4878 }
4879 
4880 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4881 			      u32 cid)
4882 {
4883 	if (!cxt) {
4884 		BNX2X_ERR("bad context pointer %p\n", cxt);
4885 		return;
4886 	}
4887 
4888 	/* ustorm cxt validation */
4889 	cxt->ustorm_ag_context.cdu_usage =
4890 		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4891 			CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4892 	/* xcontext validation */
4893 	cxt->xstorm_ag_context.cdu_reserved =
4894 		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4895 			CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4896 }
4897 
4898 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4899 				    u8 fw_sb_id, u8 sb_index,
4900 				    u8 ticks)
4901 {
4902 	u32 addr = BAR_CSTRORM_INTMEM +
4903 		   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4904 	REG_WR8(bp, addr, ticks);
4905 	DP(NETIF_MSG_IFUP,
4906 	   "port %x fw_sb_id %d sb_index %d ticks %d\n",
4907 	   port, fw_sb_id, sb_index, ticks);
4908 }
4909 
4910 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4911 				    u16 fw_sb_id, u8 sb_index,
4912 				    u8 disable)
4913 {
4914 	u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4915 	u32 addr = BAR_CSTRORM_INTMEM +
4916 		   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4917 	u8 flags = REG_RD8(bp, addr);
4918 	/* clear and set */
4919 	flags &= ~HC_INDEX_DATA_HC_ENABLED;
4920 	flags |= enable_flag;
4921 	REG_WR8(bp, addr, flags);
4922 	DP(NETIF_MSG_IFUP,
4923 	   "port %x fw_sb_id %d sb_index %d disable %d\n",
4924 	   port, fw_sb_id, sb_index, disable);
4925 }
4926 
4927 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4928 				    u8 sb_index, u8 disable, u16 usec)
4929 {
4930 	int port = BP_PORT(bp);
4931 	u8 ticks = usec / BNX2X_BTR;
4932 
4933 	storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4934 
4935 	disable = disable ? 1 : (usec ? 0 : 1);
4936 	storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4937 }
4938 
4939 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
4940 			    u32 verbose)
4941 {
4942 	smp_mb__before_clear_bit();
4943 	set_bit(flag, &bp->sp_rtnl_state);
4944 	smp_mb__after_clear_bit();
4945 	DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
4946 	   flag);
4947 	schedule_delayed_work(&bp->sp_rtnl_task, 0);
4948 }
4949 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
4950