xref: /openbmc/linux/drivers/net/ethernet/brocade/bna/bnad.c (revision 72a9730b3f556e18912f3e1b494a7aee7ae3dd91)
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/in.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
29 
30 #include "bnad.h"
31 #include "bna.h"
32 #include "cna.h"
33 
34 static DEFINE_MUTEX(bnad_fwimg_mutex);
35 
36 /*
37  * Module params
38  */
39 static uint bnad_msix_disable;
40 module_param(bnad_msix_disable, uint, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
42 
43 static uint bnad_ioc_auto_recover = 1;
44 module_param(bnad_ioc_auto_recover, uint, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
46 
47 /*
48  * Global variables
49  */
50 u32 bnad_rxqs_per_cq = 2;
51 u32 bna_id;
52 struct mutex bnad_list_mutex;
53 LIST_HEAD(bnad_list);
54 static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
55 
56 /*
57  * Local MACROS
58  */
59 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
60 
61 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
62 
63 #define BNAD_GET_MBOX_IRQ(_bnad)				\
64 	(((_bnad)->cfg_flags & BNAD_CF_MSIX) ?			\
65 	 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
66 	 ((_bnad)->pcidev->irq))
67 
68 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth)	\
69 do {								\
70 	(_res_info)->res_type = BNA_RES_T_MEM;			\
71 	(_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;	\
72 	(_res_info)->res_u.mem_info.num = (_num);		\
73 	(_res_info)->res_u.mem_info.len =			\
74 	sizeof(struct bnad_unmap_q) +				\
75 	(sizeof(struct bnad_skb_unmap) * ((_depth) - 1));	\
76 } while (0)
77 
78 #define BNAD_TXRX_SYNC_MDELAY	250	/* 250 msecs */
79 
80 static void
81 bnad_add_to_list(struct bnad *bnad)
82 {
83 	mutex_lock(&bnad_list_mutex);
84 	list_add_tail(&bnad->list_entry, &bnad_list);
85 	bnad->id = bna_id++;
86 	mutex_unlock(&bnad_list_mutex);
87 }
88 
89 static void
90 bnad_remove_from_list(struct bnad *bnad)
91 {
92 	mutex_lock(&bnad_list_mutex);
93 	list_del(&bnad->list_entry);
94 	mutex_unlock(&bnad_list_mutex);
95 }
96 
97 /*
98  * Reinitialize completions in CQ, once Rx is taken down
99  */
100 static void
101 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
102 {
103 	struct bna_cq_entry *cmpl, *next_cmpl;
104 	unsigned int wi_range, wis = 0, ccb_prod = 0;
105 	int i;
106 
107 	BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
108 			    wi_range);
109 
110 	for (i = 0; i < ccb->q_depth; i++) {
111 		wis++;
112 		if (likely(--wi_range))
113 			next_cmpl = cmpl + 1;
114 		else {
115 			BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
116 			wis = 0;
117 			BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
118 						next_cmpl, wi_range);
119 		}
120 		cmpl->valid = 0;
121 		cmpl = next_cmpl;
122 	}
123 }
124 
125 static u32
126 bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
127 	u32 index, u32 depth, struct sk_buff *skb, u32 frag)
128 {
129 	int j;
130 	array[index].skb = NULL;
131 
132 	dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
133 			skb_headlen(skb), DMA_TO_DEVICE);
134 	dma_unmap_addr_set(&array[index], dma_addr, 0);
135 	BNA_QE_INDX_ADD(index, 1, depth);
136 
137 	for (j = 0; j < frag; j++) {
138 		dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
139 			  skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE);
140 		dma_unmap_addr_set(&array[index], dma_addr, 0);
141 		BNA_QE_INDX_ADD(index, 1, depth);
142 	}
143 
144 	return index;
145 }
146 
147 /*
148  * Frees all pending Tx Bufs
149  * At this point no activity is expected on the Q,
150  * so DMA unmap & freeing is fine.
151  */
152 static void
153 bnad_free_all_txbufs(struct bnad *bnad,
154 		 struct bna_tcb *tcb)
155 {
156 	u32		unmap_cons;
157 	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
158 	struct bnad_skb_unmap *unmap_array;
159 	struct sk_buff		*skb = NULL;
160 	int			q;
161 
162 	unmap_array = unmap_q->unmap_array;
163 
164 	for (q = 0; q < unmap_q->q_depth; q++) {
165 		skb = unmap_array[q].skb;
166 		if (!skb)
167 			continue;
168 
169 		unmap_cons = q;
170 		unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
171 				unmap_cons, unmap_q->q_depth, skb,
172 				skb_shinfo(skb)->nr_frags);
173 
174 		dev_kfree_skb_any(skb);
175 	}
176 }
177 
178 /* Data Path Handlers */
179 
180 /*
181  * bnad_free_txbufs : Frees the Tx bufs on Tx completion
182  * Can be called in a) Interrupt context
183  *		    b) Sending context
184  *		    c) Tasklet context
185  */
186 static u32
187 bnad_free_txbufs(struct bnad *bnad,
188 		 struct bna_tcb *tcb)
189 {
190 	u32		unmap_cons, sent_packets = 0, sent_bytes = 0;
191 	u16		wis, updated_hw_cons;
192 	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
193 	struct bnad_skb_unmap *unmap_array;
194 	struct sk_buff		*skb;
195 
196 	/*
197 	 * Just return if TX is stopped. This check is useful
198 	 * when bnad_free_txbufs() runs out of a tasklet scheduled
199 	 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
200 	 * but this routine runs actually after the cleanup has been
201 	 * executed.
202 	 */
203 	if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
204 		return 0;
205 
206 	updated_hw_cons = *(tcb->hw_consumer_index);
207 
208 	wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
209 				  updated_hw_cons, tcb->q_depth);
210 
211 	BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
212 
213 	unmap_array = unmap_q->unmap_array;
214 	unmap_cons = unmap_q->consumer_index;
215 
216 	prefetch(&unmap_array[unmap_cons + 1]);
217 	while (wis) {
218 		skb = unmap_array[unmap_cons].skb;
219 
220 		sent_packets++;
221 		sent_bytes += skb->len;
222 		wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
223 
224 		unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
225 				unmap_cons, unmap_q->q_depth, skb,
226 				skb_shinfo(skb)->nr_frags);
227 
228 		dev_kfree_skb_any(skb);
229 	}
230 
231 	/* Update consumer pointers. */
232 	tcb->consumer_index = updated_hw_cons;
233 	unmap_q->consumer_index = unmap_cons;
234 
235 	tcb->txq->tx_packets += sent_packets;
236 	tcb->txq->tx_bytes += sent_bytes;
237 
238 	return sent_packets;
239 }
240 
241 /* Tx Free Tasklet function */
242 /* Frees for all the tcb's in all the Tx's */
243 /*
244  * Scheduled from sending context, so that
245  * the fat Tx lock is not held for too long
246  * in the sending context.
247  */
248 static void
249 bnad_tx_free_tasklet(unsigned long bnad_ptr)
250 {
251 	struct bnad *bnad = (struct bnad *)bnad_ptr;
252 	struct bna_tcb *tcb;
253 	u32		acked = 0;
254 	int			i, j;
255 
256 	for (i = 0; i < bnad->num_tx; i++) {
257 		for (j = 0; j < bnad->num_txq_per_tx; j++) {
258 			tcb = bnad->tx_info[i].tcb[j];
259 			if (!tcb)
260 				continue;
261 			if (((u16) (*tcb->hw_consumer_index) !=
262 				tcb->consumer_index) &&
263 				(!test_and_set_bit(BNAD_TXQ_FREE_SENT,
264 						  &tcb->flags))) {
265 				acked = bnad_free_txbufs(bnad, tcb);
266 				if (likely(test_bit(BNAD_TXQ_TX_STARTED,
267 					&tcb->flags)))
268 					bna_ib_ack(tcb->i_dbell, acked);
269 				smp_mb__before_clear_bit();
270 				clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
271 			}
272 			if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
273 						&tcb->flags)))
274 				continue;
275 			if (netif_queue_stopped(bnad->netdev)) {
276 				if (acked && netif_carrier_ok(bnad->netdev) &&
277 					BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
278 						BNAD_NETIF_WAKE_THRESHOLD) {
279 					netif_wake_queue(bnad->netdev);
280 					/* TODO */
281 					/* Counters for individual TxQs? */
282 					BNAD_UPDATE_CTR(bnad,
283 						netif_queue_wakeup);
284 				}
285 			}
286 		}
287 	}
288 }
289 
290 static u32
291 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
292 {
293 	struct net_device *netdev = bnad->netdev;
294 	u32 sent = 0;
295 
296 	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
297 		return 0;
298 
299 	sent = bnad_free_txbufs(bnad, tcb);
300 	if (sent) {
301 		if (netif_queue_stopped(netdev) &&
302 		    netif_carrier_ok(netdev) &&
303 		    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
304 				    BNAD_NETIF_WAKE_THRESHOLD) {
305 			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
306 				netif_wake_queue(netdev);
307 				BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
308 			}
309 		}
310 	}
311 
312 	if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
313 		bna_ib_ack(tcb->i_dbell, sent);
314 
315 	smp_mb__before_clear_bit();
316 	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
317 
318 	return sent;
319 }
320 
321 /* MSIX Tx Completion Handler */
322 static irqreturn_t
323 bnad_msix_tx(int irq, void *data)
324 {
325 	struct bna_tcb *tcb = (struct bna_tcb *)data;
326 	struct bnad *bnad = tcb->bnad;
327 
328 	bnad_tx(bnad, tcb);
329 
330 	return IRQ_HANDLED;
331 }
332 
333 static void
334 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
335 {
336 	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
337 
338 	rcb->producer_index = 0;
339 	rcb->consumer_index = 0;
340 
341 	unmap_q->producer_index = 0;
342 	unmap_q->consumer_index = 0;
343 }
344 
345 static void
346 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
347 {
348 	struct bnad_unmap_q *unmap_q;
349 	struct bnad_skb_unmap *unmap_array;
350 	struct sk_buff *skb;
351 	int unmap_cons;
352 
353 	unmap_q = rcb->unmap_q;
354 	unmap_array = unmap_q->unmap_array;
355 	for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
356 		skb = unmap_array[unmap_cons].skb;
357 		if (!skb)
358 			continue;
359 		unmap_array[unmap_cons].skb = NULL;
360 		dma_unmap_single(&bnad->pcidev->dev,
361 				 dma_unmap_addr(&unmap_array[unmap_cons],
362 						dma_addr),
363 				 rcb->rxq->buffer_size,
364 				 DMA_FROM_DEVICE);
365 		dev_kfree_skb(skb);
366 	}
367 	bnad_reset_rcb(bnad, rcb);
368 }
369 
370 static void
371 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
372 {
373 	u16 to_alloc, alloced, unmap_prod, wi_range;
374 	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
375 	struct bnad_skb_unmap *unmap_array;
376 	struct bna_rxq_entry *rxent;
377 	struct sk_buff *skb;
378 	dma_addr_t dma_addr;
379 
380 	alloced = 0;
381 	to_alloc =
382 		BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
383 
384 	unmap_array = unmap_q->unmap_array;
385 	unmap_prod = unmap_q->producer_index;
386 
387 	BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
388 
389 	while (to_alloc--) {
390 		if (!wi_range)
391 			BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
392 					     wi_range);
393 		skb = netdev_alloc_skb_ip_align(bnad->netdev,
394 						rcb->rxq->buffer_size);
395 		if (unlikely(!skb)) {
396 			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
397 			rcb->rxq->rxbuf_alloc_failed++;
398 			goto finishing;
399 		}
400 		unmap_array[unmap_prod].skb = skb;
401 		dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
402 					  rcb->rxq->buffer_size,
403 					  DMA_FROM_DEVICE);
404 		dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
405 				   dma_addr);
406 		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
407 		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
408 
409 		rxent++;
410 		wi_range--;
411 		alloced++;
412 	}
413 
414 finishing:
415 	if (likely(alloced)) {
416 		unmap_q->producer_index = unmap_prod;
417 		rcb->producer_index = unmap_prod;
418 		smp_mb();
419 		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
420 			bna_rxq_prod_indx_doorbell(rcb);
421 	}
422 }
423 
424 static inline void
425 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
426 {
427 	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
428 
429 	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
430 		if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
431 			 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
432 			bnad_alloc_n_post_rxbufs(bnad, rcb);
433 		smp_mb__before_clear_bit();
434 		clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
435 	}
436 }
437 
438 static u32
439 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
440 {
441 	struct bna_cq_entry *cmpl, *next_cmpl;
442 	struct bna_rcb *rcb = NULL;
443 	unsigned int wi_range, packets = 0, wis = 0;
444 	struct bnad_unmap_q *unmap_q;
445 	struct bnad_skb_unmap *unmap_array;
446 	struct sk_buff *skb;
447 	u32 flags, unmap_cons;
448 	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
449 	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
450 
451 	set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
452 
453 	if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
454 		clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
455 		return 0;
456 	}
457 
458 	prefetch(bnad->netdev);
459 	BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
460 			    wi_range);
461 	BUG_ON(!(wi_range <= ccb->q_depth));
462 	while (cmpl->valid && packets < budget) {
463 		packets++;
464 		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
465 
466 		if (bna_is_small_rxq(cmpl->rxq_id))
467 			rcb = ccb->rcb[1];
468 		else
469 			rcb = ccb->rcb[0];
470 
471 		unmap_q = rcb->unmap_q;
472 		unmap_array = unmap_q->unmap_array;
473 		unmap_cons = unmap_q->consumer_index;
474 
475 		skb = unmap_array[unmap_cons].skb;
476 		BUG_ON(!(skb));
477 		unmap_array[unmap_cons].skb = NULL;
478 		dma_unmap_single(&bnad->pcidev->dev,
479 				 dma_unmap_addr(&unmap_array[unmap_cons],
480 						dma_addr),
481 				 rcb->rxq->buffer_size,
482 				 DMA_FROM_DEVICE);
483 		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
484 
485 		/* Should be more efficient ? Performance ? */
486 		BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
487 
488 		wis++;
489 		if (likely(--wi_range))
490 			next_cmpl = cmpl + 1;
491 		else {
492 			BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
493 			wis = 0;
494 			BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
495 						next_cmpl, wi_range);
496 			BUG_ON(!(wi_range <= ccb->q_depth));
497 		}
498 		prefetch(next_cmpl);
499 
500 		flags = ntohl(cmpl->flags);
501 		if (unlikely
502 		    (flags &
503 		     (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
504 		      BNA_CQ_EF_TOO_LONG))) {
505 			dev_kfree_skb_any(skb);
506 			rcb->rxq->rx_packets_with_error++;
507 			goto next;
508 		}
509 
510 		skb_put(skb, ntohs(cmpl->length));
511 		if (likely
512 		    ((bnad->netdev->features & NETIF_F_RXCSUM) &&
513 		     (((flags & BNA_CQ_EF_IPV4) &&
514 		      (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
515 		      (flags & BNA_CQ_EF_IPV6)) &&
516 		      (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
517 		      (flags & BNA_CQ_EF_L4_CKSUM_OK)))
518 			skb->ip_summed = CHECKSUM_UNNECESSARY;
519 		else
520 			skb_checksum_none_assert(skb);
521 
522 		rcb->rxq->rx_packets++;
523 		rcb->rxq->rx_bytes += skb->len;
524 		skb->protocol = eth_type_trans(skb, bnad->netdev);
525 
526 		if (flags & BNA_CQ_EF_VLAN)
527 			__vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
528 
529 		if (skb->ip_summed == CHECKSUM_UNNECESSARY)
530 			napi_gro_receive(&rx_ctrl->napi, skb);
531 		else {
532 			netif_receive_skb(skb);
533 		}
534 
535 next:
536 		cmpl->valid = 0;
537 		cmpl = next_cmpl;
538 	}
539 
540 	BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
541 
542 	if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
543 		bna_ib_ack_disable_irq(ccb->i_dbell, packets);
544 
545 	bnad_refill_rxq(bnad, ccb->rcb[0]);
546 	if (ccb->rcb[1])
547 		bnad_refill_rxq(bnad, ccb->rcb[1]);
548 
549 	clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
550 
551 	return packets;
552 }
553 
554 static void
555 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
556 {
557 	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
558 	struct napi_struct *napi = &rx_ctrl->napi;
559 
560 	if (likely(napi_schedule_prep(napi))) {
561 		__napi_schedule(napi);
562 		rx_ctrl->rx_schedule++;
563 	}
564 }
565 
566 /* MSIX Rx Path Handler */
567 static irqreturn_t
568 bnad_msix_rx(int irq, void *data)
569 {
570 	struct bna_ccb *ccb = (struct bna_ccb *)data;
571 
572 	if (ccb) {
573 		((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
574 		bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
575 	}
576 
577 	return IRQ_HANDLED;
578 }
579 
580 /* Interrupt handlers */
581 
582 /* Mbox Interrupt Handlers */
583 static irqreturn_t
584 bnad_msix_mbox_handler(int irq, void *data)
585 {
586 	u32 intr_status;
587 	unsigned long flags;
588 	struct bnad *bnad = (struct bnad *)data;
589 
590 	spin_lock_irqsave(&bnad->bna_lock, flags);
591 	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
592 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
593 		return IRQ_HANDLED;
594 	}
595 
596 	bna_intr_status_get(&bnad->bna, intr_status);
597 
598 	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
599 		bna_mbox_handler(&bnad->bna, intr_status);
600 
601 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
602 
603 	return IRQ_HANDLED;
604 }
605 
606 static irqreturn_t
607 bnad_isr(int irq, void *data)
608 {
609 	int i, j;
610 	u32 intr_status;
611 	unsigned long flags;
612 	struct bnad *bnad = (struct bnad *)data;
613 	struct bnad_rx_info *rx_info;
614 	struct bnad_rx_ctrl *rx_ctrl;
615 	struct bna_tcb *tcb = NULL;
616 
617 	spin_lock_irqsave(&bnad->bna_lock, flags);
618 	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
619 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
620 		return IRQ_NONE;
621 	}
622 
623 	bna_intr_status_get(&bnad->bna, intr_status);
624 
625 	if (unlikely(!intr_status)) {
626 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
627 		return IRQ_NONE;
628 	}
629 
630 	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
631 		bna_mbox_handler(&bnad->bna, intr_status);
632 
633 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
634 
635 	if (!BNA_IS_INTX_DATA_INTR(intr_status))
636 		return IRQ_HANDLED;
637 
638 	/* Process data interrupts */
639 	/* Tx processing */
640 	for (i = 0; i < bnad->num_tx; i++) {
641 		for (j = 0; j < bnad->num_txq_per_tx; j++) {
642 			tcb = bnad->tx_info[i].tcb[j];
643 			if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
644 				bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
645 		}
646 	}
647 	/* Rx processing */
648 	for (i = 0; i < bnad->num_rx; i++) {
649 		rx_info = &bnad->rx_info[i];
650 		if (!rx_info->rx)
651 			continue;
652 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
653 			rx_ctrl = &rx_info->rx_ctrl[j];
654 			if (rx_ctrl->ccb)
655 				bnad_netif_rx_schedule_poll(bnad,
656 							    rx_ctrl->ccb);
657 		}
658 	}
659 	return IRQ_HANDLED;
660 }
661 
662 /*
663  * Called in interrupt / callback context
664  * with bna_lock held, so cfg_flags access is OK
665  */
666 static void
667 bnad_enable_mbox_irq(struct bnad *bnad)
668 {
669 	clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
670 
671 	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
672 }
673 
674 /*
675  * Called with bnad->bna_lock held b'cos of
676  * bnad->cfg_flags access.
677  */
678 static void
679 bnad_disable_mbox_irq(struct bnad *bnad)
680 {
681 	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
682 
683 	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
684 }
685 
686 static void
687 bnad_set_netdev_perm_addr(struct bnad *bnad)
688 {
689 	struct net_device *netdev = bnad->netdev;
690 
691 	memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
692 	if (is_zero_ether_addr(netdev->dev_addr))
693 		memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
694 }
695 
696 /* Control Path Handlers */
697 
698 /* Callbacks */
699 void
700 bnad_cb_mbox_intr_enable(struct bnad *bnad)
701 {
702 	bnad_enable_mbox_irq(bnad);
703 }
704 
705 void
706 bnad_cb_mbox_intr_disable(struct bnad *bnad)
707 {
708 	bnad_disable_mbox_irq(bnad);
709 }
710 
711 void
712 bnad_cb_ioceth_ready(struct bnad *bnad)
713 {
714 	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
715 	complete(&bnad->bnad_completions.ioc_comp);
716 }
717 
718 void
719 bnad_cb_ioceth_failed(struct bnad *bnad)
720 {
721 	bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
722 	complete(&bnad->bnad_completions.ioc_comp);
723 }
724 
725 void
726 bnad_cb_ioceth_disabled(struct bnad *bnad)
727 {
728 	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
729 	complete(&bnad->bnad_completions.ioc_comp);
730 }
731 
732 static void
733 bnad_cb_enet_disabled(void *arg)
734 {
735 	struct bnad *bnad = (struct bnad *)arg;
736 
737 	netif_carrier_off(bnad->netdev);
738 	complete(&bnad->bnad_completions.enet_comp);
739 }
740 
741 void
742 bnad_cb_ethport_link_status(struct bnad *bnad,
743 			enum bna_link_status link_status)
744 {
745 	bool link_up = false;
746 
747 	link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
748 
749 	if (link_status == BNA_CEE_UP) {
750 		if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
751 			BNAD_UPDATE_CTR(bnad, cee_toggle);
752 		set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
753 	} else {
754 		if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
755 			BNAD_UPDATE_CTR(bnad, cee_toggle);
756 		clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
757 	}
758 
759 	if (link_up) {
760 		if (!netif_carrier_ok(bnad->netdev)) {
761 			uint tx_id, tcb_id;
762 			printk(KERN_WARNING "bna: %s link up\n",
763 				bnad->netdev->name);
764 			netif_carrier_on(bnad->netdev);
765 			BNAD_UPDATE_CTR(bnad, link_toggle);
766 			for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
767 				for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
768 				      tcb_id++) {
769 					struct bna_tcb *tcb =
770 					bnad->tx_info[tx_id].tcb[tcb_id];
771 					u32 txq_id;
772 					if (!tcb)
773 						continue;
774 
775 					txq_id = tcb->id;
776 
777 					if (test_bit(BNAD_TXQ_TX_STARTED,
778 						     &tcb->flags)) {
779 						/*
780 						 * Force an immediate
781 						 * Transmit Schedule */
782 						printk(KERN_INFO "bna: %s %d "
783 						      "TXQ_STARTED\n",
784 						       bnad->netdev->name,
785 						       txq_id);
786 						netif_wake_subqueue(
787 								bnad->netdev,
788 								txq_id);
789 						BNAD_UPDATE_CTR(bnad,
790 							netif_queue_wakeup);
791 					} else {
792 						netif_stop_subqueue(
793 								bnad->netdev,
794 								txq_id);
795 						BNAD_UPDATE_CTR(bnad,
796 							netif_queue_stop);
797 					}
798 				}
799 			}
800 		}
801 	} else {
802 		if (netif_carrier_ok(bnad->netdev)) {
803 			printk(KERN_WARNING "bna: %s link down\n",
804 				bnad->netdev->name);
805 			netif_carrier_off(bnad->netdev);
806 			BNAD_UPDATE_CTR(bnad, link_toggle);
807 		}
808 	}
809 }
810 
811 static void
812 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
813 {
814 	struct bnad *bnad = (struct bnad *)arg;
815 
816 	complete(&bnad->bnad_completions.tx_comp);
817 }
818 
819 static void
820 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
821 {
822 	struct bnad_tx_info *tx_info =
823 			(struct bnad_tx_info *)tcb->txq->tx->priv;
824 	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
825 
826 	tx_info->tcb[tcb->id] = tcb;
827 	unmap_q->producer_index = 0;
828 	unmap_q->consumer_index = 0;
829 	unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
830 }
831 
832 static void
833 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
834 {
835 	struct bnad_tx_info *tx_info =
836 			(struct bnad_tx_info *)tcb->txq->tx->priv;
837 	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
838 
839 	while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
840 		cpu_relax();
841 
842 	bnad_free_all_txbufs(bnad, tcb);
843 
844 	unmap_q->producer_index = 0;
845 	unmap_q->consumer_index = 0;
846 
847 	smp_mb__before_clear_bit();
848 	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
849 
850 	tx_info->tcb[tcb->id] = NULL;
851 }
852 
853 static void
854 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
855 {
856 	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
857 
858 	unmap_q->producer_index = 0;
859 	unmap_q->consumer_index = 0;
860 	unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
861 }
862 
863 static void
864 bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
865 {
866 	bnad_free_all_rxbufs(bnad, rcb);
867 }
868 
869 static void
870 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
871 {
872 	struct bnad_rx_info *rx_info =
873 			(struct bnad_rx_info *)ccb->cq->rx->priv;
874 
875 	rx_info->rx_ctrl[ccb->id].ccb = ccb;
876 	ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
877 }
878 
879 static void
880 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
881 {
882 	struct bnad_rx_info *rx_info =
883 			(struct bnad_rx_info *)ccb->cq->rx->priv;
884 
885 	rx_info->rx_ctrl[ccb->id].ccb = NULL;
886 }
887 
888 static void
889 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
890 {
891 	struct bnad_tx_info *tx_info =
892 			(struct bnad_tx_info *)tx->priv;
893 	struct bna_tcb *tcb;
894 	u32 txq_id;
895 	int i;
896 
897 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
898 		tcb = tx_info->tcb[i];
899 		if (!tcb)
900 			continue;
901 		txq_id = tcb->id;
902 		clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
903 		netif_stop_subqueue(bnad->netdev, txq_id);
904 		printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
905 			bnad->netdev->name, txq_id);
906 	}
907 }
908 
909 static void
910 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
911 {
912 	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
913 	struct bna_tcb *tcb;
914 	struct bnad_unmap_q *unmap_q;
915 	u32 txq_id;
916 	int i;
917 
918 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
919 		tcb = tx_info->tcb[i];
920 		if (!tcb)
921 			continue;
922 		txq_id = tcb->id;
923 
924 		unmap_q = tcb->unmap_q;
925 
926 		if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
927 			continue;
928 
929 		while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
930 			cpu_relax();
931 
932 		bnad_free_all_txbufs(bnad, tcb);
933 
934 		unmap_q->producer_index = 0;
935 		unmap_q->consumer_index = 0;
936 
937 		smp_mb__before_clear_bit();
938 		clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
939 
940 		set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
941 
942 		if (netif_carrier_ok(bnad->netdev)) {
943 			printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
944 				bnad->netdev->name, txq_id);
945 			netif_wake_subqueue(bnad->netdev, txq_id);
946 			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
947 		}
948 	}
949 
950 	/*
951 	 * Workaround for first ioceth enable failure & we
952 	 * get a 0 MAC address. We try to get the MAC address
953 	 * again here.
954 	 */
955 	if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
956 		bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
957 		bnad_set_netdev_perm_addr(bnad);
958 	}
959 }
960 
961 static void
962 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
963 {
964 	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
965 	struct bna_tcb *tcb;
966 	int i;
967 
968 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
969 		tcb = tx_info->tcb[i];
970 		if (!tcb)
971 			continue;
972 	}
973 
974 	mdelay(BNAD_TXRX_SYNC_MDELAY);
975 	bna_tx_cleanup_complete(tx);
976 }
977 
978 static void
979 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
980 {
981 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
982 	struct bna_ccb *ccb;
983 	struct bnad_rx_ctrl *rx_ctrl;
984 	int i;
985 
986 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
987 		rx_ctrl = &rx_info->rx_ctrl[i];
988 		ccb = rx_ctrl->ccb;
989 		if (!ccb)
990 			continue;
991 
992 		clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
993 
994 		if (ccb->rcb[1])
995 			clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
996 	}
997 }
998 
999 static void
1000 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1001 {
1002 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1003 	struct bna_ccb *ccb;
1004 	struct bnad_rx_ctrl *rx_ctrl;
1005 	int i;
1006 
1007 	mdelay(BNAD_TXRX_SYNC_MDELAY);
1008 
1009 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1010 		rx_ctrl = &rx_info->rx_ctrl[i];
1011 		ccb = rx_ctrl->ccb;
1012 		if (!ccb)
1013 			continue;
1014 
1015 		clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1016 
1017 		if (ccb->rcb[1])
1018 			clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1019 
1020 		while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
1021 			cpu_relax();
1022 	}
1023 
1024 	bna_rx_cleanup_complete(rx);
1025 }
1026 
1027 static void
1028 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1029 {
1030 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1031 	struct bna_ccb *ccb;
1032 	struct bna_rcb *rcb;
1033 	struct bnad_rx_ctrl *rx_ctrl;
1034 	struct bnad_unmap_q *unmap_q;
1035 	int i;
1036 	int j;
1037 
1038 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1039 		rx_ctrl = &rx_info->rx_ctrl[i];
1040 		ccb = rx_ctrl->ccb;
1041 		if (!ccb)
1042 			continue;
1043 
1044 		bnad_cq_cmpl_init(bnad, ccb);
1045 
1046 		for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1047 			rcb = ccb->rcb[j];
1048 			if (!rcb)
1049 				continue;
1050 			bnad_free_all_rxbufs(bnad, rcb);
1051 
1052 			set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1053 			set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1054 			unmap_q = rcb->unmap_q;
1055 
1056 			/* Now allocate & post buffers for this RCB */
1057 			/* !!Allocation in callback context */
1058 			if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1059 				if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1060 					>> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1061 					bnad_alloc_n_post_rxbufs(bnad, rcb);
1062 					smp_mb__before_clear_bit();
1063 				clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1064 			}
1065 		}
1066 	}
1067 }
1068 
1069 static void
1070 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1071 {
1072 	struct bnad *bnad = (struct bnad *)arg;
1073 
1074 	complete(&bnad->bnad_completions.rx_comp);
1075 }
1076 
1077 static void
1078 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1079 {
1080 	bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1081 	complete(&bnad->bnad_completions.mcast_comp);
1082 }
1083 
1084 void
1085 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1086 		       struct bna_stats *stats)
1087 {
1088 	if (status == BNA_CB_SUCCESS)
1089 		BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1090 
1091 	if (!netif_running(bnad->netdev) ||
1092 		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1093 		return;
1094 
1095 	mod_timer(&bnad->stats_timer,
1096 		  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1097 }
1098 
1099 static void
1100 bnad_cb_enet_mtu_set(struct bnad *bnad)
1101 {
1102 	bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1103 	complete(&bnad->bnad_completions.mtu_comp);
1104 }
1105 
1106 void
1107 bnad_cb_completion(void *arg, enum bfa_status status)
1108 {
1109 	struct bnad_iocmd_comp *iocmd_comp =
1110 			(struct bnad_iocmd_comp *)arg;
1111 
1112 	iocmd_comp->comp_status = (u32) status;
1113 	complete(&iocmd_comp->comp);
1114 }
1115 
1116 /* Resource allocation, free functions */
1117 
1118 static void
1119 bnad_mem_free(struct bnad *bnad,
1120 	      struct bna_mem_info *mem_info)
1121 {
1122 	int i;
1123 	dma_addr_t dma_pa;
1124 
1125 	if (mem_info->mdl == NULL)
1126 		return;
1127 
1128 	for (i = 0; i < mem_info->num; i++) {
1129 		if (mem_info->mdl[i].kva != NULL) {
1130 			if (mem_info->mem_type == BNA_MEM_T_DMA) {
1131 				BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1132 						dma_pa);
1133 				dma_free_coherent(&bnad->pcidev->dev,
1134 						  mem_info->mdl[i].len,
1135 						  mem_info->mdl[i].kva, dma_pa);
1136 			} else
1137 				kfree(mem_info->mdl[i].kva);
1138 		}
1139 	}
1140 	kfree(mem_info->mdl);
1141 	mem_info->mdl = NULL;
1142 }
1143 
1144 static int
1145 bnad_mem_alloc(struct bnad *bnad,
1146 	       struct bna_mem_info *mem_info)
1147 {
1148 	int i;
1149 	dma_addr_t dma_pa;
1150 
1151 	if ((mem_info->num == 0) || (mem_info->len == 0)) {
1152 		mem_info->mdl = NULL;
1153 		return 0;
1154 	}
1155 
1156 	mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1157 				GFP_KERNEL);
1158 	if (mem_info->mdl == NULL)
1159 		return -ENOMEM;
1160 
1161 	if (mem_info->mem_type == BNA_MEM_T_DMA) {
1162 		for (i = 0; i < mem_info->num; i++) {
1163 			mem_info->mdl[i].len = mem_info->len;
1164 			mem_info->mdl[i].kva =
1165 				dma_alloc_coherent(&bnad->pcidev->dev,
1166 						mem_info->len, &dma_pa,
1167 						GFP_KERNEL);
1168 
1169 			if (mem_info->mdl[i].kva == NULL)
1170 				goto err_return;
1171 
1172 			BNA_SET_DMA_ADDR(dma_pa,
1173 					 &(mem_info->mdl[i].dma));
1174 		}
1175 	} else {
1176 		for (i = 0; i < mem_info->num; i++) {
1177 			mem_info->mdl[i].len = mem_info->len;
1178 			mem_info->mdl[i].kva = kzalloc(mem_info->len,
1179 							GFP_KERNEL);
1180 			if (mem_info->mdl[i].kva == NULL)
1181 				goto err_return;
1182 		}
1183 	}
1184 
1185 	return 0;
1186 
1187 err_return:
1188 	bnad_mem_free(bnad, mem_info);
1189 	return -ENOMEM;
1190 }
1191 
1192 /* Free IRQ for Mailbox */
1193 static void
1194 bnad_mbox_irq_free(struct bnad *bnad)
1195 {
1196 	int irq;
1197 	unsigned long flags;
1198 
1199 	spin_lock_irqsave(&bnad->bna_lock, flags);
1200 	bnad_disable_mbox_irq(bnad);
1201 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1202 
1203 	irq = BNAD_GET_MBOX_IRQ(bnad);
1204 	free_irq(irq, bnad);
1205 }
1206 
1207 /*
1208  * Allocates IRQ for Mailbox, but keep it disabled
1209  * This will be enabled once we get the mbox enable callback
1210  * from bna
1211  */
1212 static int
1213 bnad_mbox_irq_alloc(struct bnad *bnad)
1214 {
1215 	int		err = 0;
1216 	unsigned long	irq_flags, flags;
1217 	u32	irq;
1218 	irq_handler_t	irq_handler;
1219 
1220 	spin_lock_irqsave(&bnad->bna_lock, flags);
1221 	if (bnad->cfg_flags & BNAD_CF_MSIX) {
1222 		irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1223 		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1224 		irq_flags = 0;
1225 	} else {
1226 		irq_handler = (irq_handler_t)bnad_isr;
1227 		irq = bnad->pcidev->irq;
1228 		irq_flags = IRQF_SHARED;
1229 	}
1230 
1231 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1232 	sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1233 
1234 	/*
1235 	 * Set the Mbox IRQ disable flag, so that the IRQ handler
1236 	 * called from request_irq() for SHARED IRQs do not execute
1237 	 */
1238 	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1239 
1240 	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1241 
1242 	err = request_irq(irq, irq_handler, irq_flags,
1243 			  bnad->mbox_irq_name, bnad);
1244 
1245 	return err;
1246 }
1247 
1248 static void
1249 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1250 {
1251 	kfree(intr_info->idl);
1252 	intr_info->idl = NULL;
1253 }
1254 
1255 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1256 static int
1257 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1258 		    u32 txrx_id, struct bna_intr_info *intr_info)
1259 {
1260 	int i, vector_start = 0;
1261 	u32 cfg_flags;
1262 	unsigned long flags;
1263 
1264 	spin_lock_irqsave(&bnad->bna_lock, flags);
1265 	cfg_flags = bnad->cfg_flags;
1266 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1267 
1268 	if (cfg_flags & BNAD_CF_MSIX) {
1269 		intr_info->intr_type = BNA_INTR_T_MSIX;
1270 		intr_info->idl = kcalloc(intr_info->num,
1271 					sizeof(struct bna_intr_descr),
1272 					GFP_KERNEL);
1273 		if (!intr_info->idl)
1274 			return -ENOMEM;
1275 
1276 		switch (src) {
1277 		case BNAD_INTR_TX:
1278 			vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1279 			break;
1280 
1281 		case BNAD_INTR_RX:
1282 			vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1283 					(bnad->num_tx * bnad->num_txq_per_tx) +
1284 					txrx_id;
1285 			break;
1286 
1287 		default:
1288 			BUG();
1289 		}
1290 
1291 		for (i = 0; i < intr_info->num; i++)
1292 			intr_info->idl[i].vector = vector_start + i;
1293 	} else {
1294 		intr_info->intr_type = BNA_INTR_T_INTX;
1295 		intr_info->num = 1;
1296 		intr_info->idl = kcalloc(intr_info->num,
1297 					sizeof(struct bna_intr_descr),
1298 					GFP_KERNEL);
1299 		if (!intr_info->idl)
1300 			return -ENOMEM;
1301 
1302 		switch (src) {
1303 		case BNAD_INTR_TX:
1304 			intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1305 			break;
1306 
1307 		case BNAD_INTR_RX:
1308 			intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1309 			break;
1310 		}
1311 	}
1312 	return 0;
1313 }
1314 
1315 /**
1316  * NOTE: Should be called for MSIX only
1317  * Unregisters Tx MSIX vector(s) from the kernel
1318  */
1319 static void
1320 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1321 			int num_txqs)
1322 {
1323 	int i;
1324 	int vector_num;
1325 
1326 	for (i = 0; i < num_txqs; i++) {
1327 		if (tx_info->tcb[i] == NULL)
1328 			continue;
1329 
1330 		vector_num = tx_info->tcb[i]->intr_vector;
1331 		free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1332 	}
1333 }
1334 
1335 /**
1336  * NOTE: Should be called for MSIX only
1337  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1338  */
1339 static int
1340 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1341 			u32 tx_id, int num_txqs)
1342 {
1343 	int i;
1344 	int err;
1345 	int vector_num;
1346 
1347 	for (i = 0; i < num_txqs; i++) {
1348 		vector_num = tx_info->tcb[i]->intr_vector;
1349 		sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1350 				tx_id + tx_info->tcb[i]->id);
1351 		err = request_irq(bnad->msix_table[vector_num].vector,
1352 				  (irq_handler_t)bnad_msix_tx, 0,
1353 				  tx_info->tcb[i]->name,
1354 				  tx_info->tcb[i]);
1355 		if (err)
1356 			goto err_return;
1357 	}
1358 
1359 	return 0;
1360 
1361 err_return:
1362 	if (i > 0)
1363 		bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1364 	return -1;
1365 }
1366 
1367 /**
1368  * NOTE: Should be called for MSIX only
1369  * Unregisters Rx MSIX vector(s) from the kernel
1370  */
1371 static void
1372 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1373 			int num_rxps)
1374 {
1375 	int i;
1376 	int vector_num;
1377 
1378 	for (i = 0; i < num_rxps; i++) {
1379 		if (rx_info->rx_ctrl[i].ccb == NULL)
1380 			continue;
1381 
1382 		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1383 		free_irq(bnad->msix_table[vector_num].vector,
1384 			 rx_info->rx_ctrl[i].ccb);
1385 	}
1386 }
1387 
1388 /**
1389  * NOTE: Should be called for MSIX only
1390  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1391  */
1392 static int
1393 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1394 			u32 rx_id, int num_rxps)
1395 {
1396 	int i;
1397 	int err;
1398 	int vector_num;
1399 
1400 	for (i = 0; i < num_rxps; i++) {
1401 		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1402 		sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1403 			bnad->netdev->name,
1404 			rx_id + rx_info->rx_ctrl[i].ccb->id);
1405 		err = request_irq(bnad->msix_table[vector_num].vector,
1406 				  (irq_handler_t)bnad_msix_rx, 0,
1407 				  rx_info->rx_ctrl[i].ccb->name,
1408 				  rx_info->rx_ctrl[i].ccb);
1409 		if (err)
1410 			goto err_return;
1411 	}
1412 
1413 	return 0;
1414 
1415 err_return:
1416 	if (i > 0)
1417 		bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1418 	return -1;
1419 }
1420 
1421 /* Free Tx object Resources */
1422 static void
1423 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1424 {
1425 	int i;
1426 
1427 	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1428 		if (res_info[i].res_type == BNA_RES_T_MEM)
1429 			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1430 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1431 			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1432 	}
1433 }
1434 
1435 /* Allocates memory and interrupt resources for Tx object */
1436 static int
1437 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1438 		  u32 tx_id)
1439 {
1440 	int i, err = 0;
1441 
1442 	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1443 		if (res_info[i].res_type == BNA_RES_T_MEM)
1444 			err = bnad_mem_alloc(bnad,
1445 					&res_info[i].res_u.mem_info);
1446 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1447 			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1448 					&res_info[i].res_u.intr_info);
1449 		if (err)
1450 			goto err_return;
1451 	}
1452 	return 0;
1453 
1454 err_return:
1455 	bnad_tx_res_free(bnad, res_info);
1456 	return err;
1457 }
1458 
1459 /* Free Rx object Resources */
1460 static void
1461 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1462 {
1463 	int i;
1464 
1465 	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1466 		if (res_info[i].res_type == BNA_RES_T_MEM)
1467 			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1468 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1469 			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1470 	}
1471 }
1472 
1473 /* Allocates memory and interrupt resources for Rx object */
1474 static int
1475 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1476 		  uint rx_id)
1477 {
1478 	int i, err = 0;
1479 
1480 	/* All memory needs to be allocated before setup_ccbs */
1481 	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1482 		if (res_info[i].res_type == BNA_RES_T_MEM)
1483 			err = bnad_mem_alloc(bnad,
1484 					&res_info[i].res_u.mem_info);
1485 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1486 			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1487 					&res_info[i].res_u.intr_info);
1488 		if (err)
1489 			goto err_return;
1490 	}
1491 	return 0;
1492 
1493 err_return:
1494 	bnad_rx_res_free(bnad, res_info);
1495 	return err;
1496 }
1497 
1498 /* Timer callbacks */
1499 /* a) IOC timer */
1500 static void
1501 bnad_ioc_timeout(unsigned long data)
1502 {
1503 	struct bnad *bnad = (struct bnad *)data;
1504 	unsigned long flags;
1505 
1506 	spin_lock_irqsave(&bnad->bna_lock, flags);
1507 	bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1508 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1509 }
1510 
1511 static void
1512 bnad_ioc_hb_check(unsigned long data)
1513 {
1514 	struct bnad *bnad = (struct bnad *)data;
1515 	unsigned long flags;
1516 
1517 	spin_lock_irqsave(&bnad->bna_lock, flags);
1518 	bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1519 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1520 }
1521 
1522 static void
1523 bnad_iocpf_timeout(unsigned long data)
1524 {
1525 	struct bnad *bnad = (struct bnad *)data;
1526 	unsigned long flags;
1527 
1528 	spin_lock_irqsave(&bnad->bna_lock, flags);
1529 	bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1530 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1531 }
1532 
1533 static void
1534 bnad_iocpf_sem_timeout(unsigned long data)
1535 {
1536 	struct bnad *bnad = (struct bnad *)data;
1537 	unsigned long flags;
1538 
1539 	spin_lock_irqsave(&bnad->bna_lock, flags);
1540 	bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1541 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1542 }
1543 
1544 /*
1545  * All timer routines use bnad->bna_lock to protect against
1546  * the following race, which may occur in case of no locking:
1547  *	Time	CPU m	CPU n
1548  *	0       1 = test_bit
1549  *	1			clear_bit
1550  *	2			del_timer_sync
1551  *	3	mod_timer
1552  */
1553 
1554 /* b) Dynamic Interrupt Moderation Timer */
1555 static void
1556 bnad_dim_timeout(unsigned long data)
1557 {
1558 	struct bnad *bnad = (struct bnad *)data;
1559 	struct bnad_rx_info *rx_info;
1560 	struct bnad_rx_ctrl *rx_ctrl;
1561 	int i, j;
1562 	unsigned long flags;
1563 
1564 	if (!netif_carrier_ok(bnad->netdev))
1565 		return;
1566 
1567 	spin_lock_irqsave(&bnad->bna_lock, flags);
1568 	for (i = 0; i < bnad->num_rx; i++) {
1569 		rx_info = &bnad->rx_info[i];
1570 		if (!rx_info->rx)
1571 			continue;
1572 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1573 			rx_ctrl = &rx_info->rx_ctrl[j];
1574 			if (!rx_ctrl->ccb)
1575 				continue;
1576 			bna_rx_dim_update(rx_ctrl->ccb);
1577 		}
1578 	}
1579 
1580 	/* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1581 	if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1582 		mod_timer(&bnad->dim_timer,
1583 			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1584 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1585 }
1586 
1587 /* c)  Statistics Timer */
1588 static void
1589 bnad_stats_timeout(unsigned long data)
1590 {
1591 	struct bnad *bnad = (struct bnad *)data;
1592 	unsigned long flags;
1593 
1594 	if (!netif_running(bnad->netdev) ||
1595 		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1596 		return;
1597 
1598 	spin_lock_irqsave(&bnad->bna_lock, flags);
1599 	bna_hw_stats_get(&bnad->bna);
1600 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1601 }
1602 
1603 /*
1604  * Set up timer for DIM
1605  * Called with bnad->bna_lock held
1606  */
1607 void
1608 bnad_dim_timer_start(struct bnad *bnad)
1609 {
1610 	if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1611 	    !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1612 		setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1613 			    (unsigned long)bnad);
1614 		set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1615 		mod_timer(&bnad->dim_timer,
1616 			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1617 	}
1618 }
1619 
1620 /*
1621  * Set up timer for statistics
1622  * Called with mutex_lock(&bnad->conf_mutex) held
1623  */
1624 static void
1625 bnad_stats_timer_start(struct bnad *bnad)
1626 {
1627 	unsigned long flags;
1628 
1629 	spin_lock_irqsave(&bnad->bna_lock, flags);
1630 	if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1631 		setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1632 			    (unsigned long)bnad);
1633 		mod_timer(&bnad->stats_timer,
1634 			  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1635 	}
1636 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1637 }
1638 
1639 /*
1640  * Stops the stats timer
1641  * Called with mutex_lock(&bnad->conf_mutex) held
1642  */
1643 static void
1644 bnad_stats_timer_stop(struct bnad *bnad)
1645 {
1646 	int to_del = 0;
1647 	unsigned long flags;
1648 
1649 	spin_lock_irqsave(&bnad->bna_lock, flags);
1650 	if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1651 		to_del = 1;
1652 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1653 	if (to_del)
1654 		del_timer_sync(&bnad->stats_timer);
1655 }
1656 
1657 /* Utilities */
1658 
1659 static void
1660 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1661 {
1662 	int i = 1; /* Index 0 has broadcast address */
1663 	struct netdev_hw_addr *mc_addr;
1664 
1665 	netdev_for_each_mc_addr(mc_addr, netdev) {
1666 		memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1667 							ETH_ALEN);
1668 		i++;
1669 	}
1670 }
1671 
1672 static int
1673 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1674 {
1675 	struct bnad_rx_ctrl *rx_ctrl =
1676 		container_of(napi, struct bnad_rx_ctrl, napi);
1677 	struct bnad *bnad = rx_ctrl->bnad;
1678 	int rcvd = 0;
1679 
1680 	rx_ctrl->rx_poll_ctr++;
1681 
1682 	if (!netif_carrier_ok(bnad->netdev))
1683 		goto poll_exit;
1684 
1685 	rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
1686 	if (rcvd >= budget)
1687 		return rcvd;
1688 
1689 poll_exit:
1690 	napi_complete(napi);
1691 
1692 	rx_ctrl->rx_complete++;
1693 
1694 	if (rx_ctrl->ccb)
1695 		bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1696 
1697 	return rcvd;
1698 }
1699 
1700 #define BNAD_NAPI_POLL_QUOTA		64
1701 static void
1702 bnad_napi_init(struct bnad *bnad, u32 rx_id)
1703 {
1704 	struct bnad_rx_ctrl *rx_ctrl;
1705 	int i;
1706 
1707 	/* Initialize & enable NAPI */
1708 	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
1709 		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1710 		netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1711 			       bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1712 	}
1713 }
1714 
1715 static void
1716 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1717 {
1718 	struct bnad_rx_ctrl *rx_ctrl;
1719 	int i;
1720 
1721 	/* Initialize & enable NAPI */
1722 	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
1723 		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1724 
1725 		napi_enable(&rx_ctrl->napi);
1726 	}
1727 }
1728 
1729 static void
1730 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1731 {
1732 	int i;
1733 
1734 	/* First disable and then clean up */
1735 	for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1736 		napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1737 		netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1738 	}
1739 }
1740 
1741 /* Should be held with conf_lock held */
1742 void
1743 bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
1744 {
1745 	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1746 	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1747 	unsigned long flags;
1748 
1749 	if (!tx_info->tx)
1750 		return;
1751 
1752 	init_completion(&bnad->bnad_completions.tx_comp);
1753 	spin_lock_irqsave(&bnad->bna_lock, flags);
1754 	bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1755 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1756 	wait_for_completion(&bnad->bnad_completions.tx_comp);
1757 
1758 	if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1759 		bnad_tx_msix_unregister(bnad, tx_info,
1760 			bnad->num_txq_per_tx);
1761 
1762 	if (0 == tx_id)
1763 		tasklet_kill(&bnad->tx_free_tasklet);
1764 
1765 	spin_lock_irqsave(&bnad->bna_lock, flags);
1766 	bna_tx_destroy(tx_info->tx);
1767 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1768 
1769 	tx_info->tx = NULL;
1770 	tx_info->tx_id = 0;
1771 
1772 	bnad_tx_res_free(bnad, res_info);
1773 }
1774 
1775 /* Should be held with conf_lock held */
1776 int
1777 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1778 {
1779 	int err;
1780 	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1781 	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1782 	struct bna_intr_info *intr_info =
1783 			&res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1784 	struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1785 	static const struct bna_tx_event_cbfn tx_cbfn = {
1786 		.tcb_setup_cbfn = bnad_cb_tcb_setup,
1787 		.tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1788 		.tx_stall_cbfn = bnad_cb_tx_stall,
1789 		.tx_resume_cbfn = bnad_cb_tx_resume,
1790 		.tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1791 	};
1792 
1793 	struct bna_tx *tx;
1794 	unsigned long flags;
1795 
1796 	tx_info->tx_id = tx_id;
1797 
1798 	/* Initialize the Tx object configuration */
1799 	tx_config->num_txq = bnad->num_txq_per_tx;
1800 	tx_config->txq_depth = bnad->txq_depth;
1801 	tx_config->tx_type = BNA_TX_T_REGULAR;
1802 	tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1803 
1804 	/* Get BNA's resource requirement for one tx object */
1805 	spin_lock_irqsave(&bnad->bna_lock, flags);
1806 	bna_tx_res_req(bnad->num_txq_per_tx,
1807 		bnad->txq_depth, res_info);
1808 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1809 
1810 	/* Fill Unmap Q memory requirements */
1811 	BNAD_FILL_UNMAPQ_MEM_REQ(
1812 			&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1813 			bnad->num_txq_per_tx,
1814 			BNAD_TX_UNMAPQ_DEPTH);
1815 
1816 	/* Allocate resources */
1817 	err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1818 	if (err)
1819 		return err;
1820 
1821 	/* Ask BNA to create one Tx object, supplying required resources */
1822 	spin_lock_irqsave(&bnad->bna_lock, flags);
1823 	tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1824 			tx_info);
1825 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1826 	if (!tx)
1827 		goto err_return;
1828 	tx_info->tx = tx;
1829 
1830 	/* Register ISR for the Tx object */
1831 	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1832 		err = bnad_tx_msix_register(bnad, tx_info,
1833 			tx_id, bnad->num_txq_per_tx);
1834 		if (err)
1835 			goto err_return;
1836 	}
1837 
1838 	spin_lock_irqsave(&bnad->bna_lock, flags);
1839 	bna_tx_enable(tx);
1840 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1841 
1842 	return 0;
1843 
1844 err_return:
1845 	bnad_tx_res_free(bnad, res_info);
1846 	return err;
1847 }
1848 
1849 /* Setup the rx config for bna_rx_create */
1850 /* bnad decides the configuration */
1851 static void
1852 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1853 {
1854 	rx_config->rx_type = BNA_RX_T_REGULAR;
1855 	rx_config->num_paths = bnad->num_rxp_per_rx;
1856 	rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
1857 
1858 	if (bnad->num_rxp_per_rx > 1) {
1859 		rx_config->rss_status = BNA_STATUS_T_ENABLED;
1860 		rx_config->rss_config.hash_type =
1861 				(BFI_ENET_RSS_IPV6 |
1862 				 BFI_ENET_RSS_IPV6_TCP |
1863 				 BFI_ENET_RSS_IPV4 |
1864 				 BFI_ENET_RSS_IPV4_TCP);
1865 		rx_config->rss_config.hash_mask =
1866 				bnad->num_rxp_per_rx - 1;
1867 		get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1868 			sizeof(rx_config->rss_config.toeplitz_hash_key));
1869 	} else {
1870 		rx_config->rss_status = BNA_STATUS_T_DISABLED;
1871 		memset(&rx_config->rss_config, 0,
1872 		       sizeof(rx_config->rss_config));
1873 	}
1874 	rx_config->rxp_type = BNA_RXP_SLR;
1875 	rx_config->q_depth = bnad->rxq_depth;
1876 
1877 	rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1878 
1879 	rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1880 }
1881 
1882 static void
1883 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1884 {
1885 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1886 	int i;
1887 
1888 	for (i = 0; i < bnad->num_rxp_per_rx; i++)
1889 		rx_info->rx_ctrl[i].bnad = bnad;
1890 }
1891 
1892 /* Called with mutex_lock(&bnad->conf_mutex) held */
1893 void
1894 bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
1895 {
1896 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1897 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1898 	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1899 	unsigned long flags;
1900 	int to_del = 0;
1901 
1902 	if (!rx_info->rx)
1903 		return;
1904 
1905 	if (0 == rx_id) {
1906 		spin_lock_irqsave(&bnad->bna_lock, flags);
1907 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1908 		    test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1909 			clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1910 			to_del = 1;
1911 		}
1912 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
1913 		if (to_del)
1914 			del_timer_sync(&bnad->dim_timer);
1915 	}
1916 
1917 	init_completion(&bnad->bnad_completions.rx_comp);
1918 	spin_lock_irqsave(&bnad->bna_lock, flags);
1919 	bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1920 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1921 	wait_for_completion(&bnad->bnad_completions.rx_comp);
1922 
1923 	if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1924 		bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1925 
1926 	bnad_napi_disable(bnad, rx_id);
1927 
1928 	spin_lock_irqsave(&bnad->bna_lock, flags);
1929 	bna_rx_destroy(rx_info->rx);
1930 
1931 	rx_info->rx = NULL;
1932 	rx_info->rx_id = 0;
1933 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1934 
1935 	bnad_rx_res_free(bnad, res_info);
1936 }
1937 
1938 /* Called with mutex_lock(&bnad->conf_mutex) held */
1939 int
1940 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1941 {
1942 	int err;
1943 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1944 	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1945 	struct bna_intr_info *intr_info =
1946 			&res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1947 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1948 	static const struct bna_rx_event_cbfn rx_cbfn = {
1949 		.rcb_setup_cbfn = bnad_cb_rcb_setup,
1950 		.rcb_destroy_cbfn = bnad_cb_rcb_destroy,
1951 		.ccb_setup_cbfn = bnad_cb_ccb_setup,
1952 		.ccb_destroy_cbfn = bnad_cb_ccb_destroy,
1953 		.rx_stall_cbfn = bnad_cb_rx_stall,
1954 		.rx_cleanup_cbfn = bnad_cb_rx_cleanup,
1955 		.rx_post_cbfn = bnad_cb_rx_post,
1956 	};
1957 	struct bna_rx *rx;
1958 	unsigned long flags;
1959 
1960 	rx_info->rx_id = rx_id;
1961 
1962 	/* Initialize the Rx object configuration */
1963 	bnad_init_rx_config(bnad, rx_config);
1964 
1965 	/* Get BNA's resource requirement for one Rx object */
1966 	spin_lock_irqsave(&bnad->bna_lock, flags);
1967 	bna_rx_res_req(rx_config, res_info);
1968 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1969 
1970 	/* Fill Unmap Q memory requirements */
1971 	BNAD_FILL_UNMAPQ_MEM_REQ(
1972 			&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1973 			rx_config->num_paths +
1974 			((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1975 				rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1976 
1977 	/* Allocate resource */
1978 	err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1979 	if (err)
1980 		return err;
1981 
1982 	bnad_rx_ctrl_init(bnad, rx_id);
1983 
1984 	/* Ask BNA to create one Rx object, supplying required resources */
1985 	spin_lock_irqsave(&bnad->bna_lock, flags);
1986 	rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1987 			rx_info);
1988 	if (!rx) {
1989 		err = -ENOMEM;
1990 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
1991 		goto err_return;
1992 	}
1993 	rx_info->rx = rx;
1994 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1995 
1996 	/*
1997 	 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
1998 	 * so that IRQ handler cannot schedule NAPI at this point.
1999 	 */
2000 	bnad_napi_init(bnad, rx_id);
2001 
2002 	/* Register ISR for the Rx object */
2003 	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2004 		err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2005 						rx_config->num_paths);
2006 		if (err)
2007 			goto err_return;
2008 	}
2009 
2010 	spin_lock_irqsave(&bnad->bna_lock, flags);
2011 	if (0 == rx_id) {
2012 		/* Set up Dynamic Interrupt Moderation Vector */
2013 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2014 			bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2015 
2016 		/* Enable VLAN filtering only on the default Rx */
2017 		bna_rx_vlanfilter_enable(rx);
2018 
2019 		/* Start the DIM timer */
2020 		bnad_dim_timer_start(bnad);
2021 	}
2022 
2023 	bna_rx_enable(rx);
2024 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2025 
2026 	/* Enable scheduling of NAPI */
2027 	bnad_napi_enable(bnad, rx_id);
2028 
2029 	return 0;
2030 
2031 err_return:
2032 	bnad_cleanup_rx(bnad, rx_id);
2033 	return err;
2034 }
2035 
2036 /* Called with conf_lock & bnad->bna_lock held */
2037 void
2038 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2039 {
2040 	struct bnad_tx_info *tx_info;
2041 
2042 	tx_info = &bnad->tx_info[0];
2043 	if (!tx_info->tx)
2044 		return;
2045 
2046 	bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2047 }
2048 
2049 /* Called with conf_lock & bnad->bna_lock held */
2050 void
2051 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2052 {
2053 	struct bnad_rx_info *rx_info;
2054 	int	i;
2055 
2056 	for (i = 0; i < bnad->num_rx; i++) {
2057 		rx_info = &bnad->rx_info[i];
2058 		if (!rx_info->rx)
2059 			continue;
2060 		bna_rx_coalescing_timeo_set(rx_info->rx,
2061 				bnad->rx_coalescing_timeo);
2062 	}
2063 }
2064 
2065 /*
2066  * Called with bnad->bna_lock held
2067  */
2068 int
2069 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2070 {
2071 	int ret;
2072 
2073 	if (!is_valid_ether_addr(mac_addr))
2074 		return -EADDRNOTAVAIL;
2075 
2076 	/* If datapath is down, pretend everything went through */
2077 	if (!bnad->rx_info[0].rx)
2078 		return 0;
2079 
2080 	ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2081 	if (ret != BNA_CB_SUCCESS)
2082 		return -EADDRNOTAVAIL;
2083 
2084 	return 0;
2085 }
2086 
2087 /* Should be called with conf_lock held */
2088 int
2089 bnad_enable_default_bcast(struct bnad *bnad)
2090 {
2091 	struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2092 	int ret;
2093 	unsigned long flags;
2094 
2095 	init_completion(&bnad->bnad_completions.mcast_comp);
2096 
2097 	spin_lock_irqsave(&bnad->bna_lock, flags);
2098 	ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2099 				bnad_cb_rx_mcast_add);
2100 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2101 
2102 	if (ret == BNA_CB_SUCCESS)
2103 		wait_for_completion(&bnad->bnad_completions.mcast_comp);
2104 	else
2105 		return -ENODEV;
2106 
2107 	if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2108 		return -ENODEV;
2109 
2110 	return 0;
2111 }
2112 
2113 /* Called with mutex_lock(&bnad->conf_mutex) held */
2114 void
2115 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2116 {
2117 	u16 vid;
2118 	unsigned long flags;
2119 
2120 	for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2121 		spin_lock_irqsave(&bnad->bna_lock, flags);
2122 		bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2123 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2124 	}
2125 }
2126 
2127 /* Statistics utilities */
2128 void
2129 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2130 {
2131 	int i, j;
2132 
2133 	for (i = 0; i < bnad->num_rx; i++) {
2134 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2135 			if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2136 				stats->rx_packets += bnad->rx_info[i].
2137 				rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2138 				stats->rx_bytes += bnad->rx_info[i].
2139 					rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2140 				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2141 					bnad->rx_info[i].rx_ctrl[j].ccb->
2142 					rcb[1]->rxq) {
2143 					stats->rx_packets +=
2144 						bnad->rx_info[i].rx_ctrl[j].
2145 						ccb->rcb[1]->rxq->rx_packets;
2146 					stats->rx_bytes +=
2147 						bnad->rx_info[i].rx_ctrl[j].
2148 						ccb->rcb[1]->rxq->rx_bytes;
2149 				}
2150 			}
2151 		}
2152 	}
2153 	for (i = 0; i < bnad->num_tx; i++) {
2154 		for (j = 0; j < bnad->num_txq_per_tx; j++) {
2155 			if (bnad->tx_info[i].tcb[j]) {
2156 				stats->tx_packets +=
2157 				bnad->tx_info[i].tcb[j]->txq->tx_packets;
2158 				stats->tx_bytes +=
2159 					bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2160 			}
2161 		}
2162 	}
2163 }
2164 
2165 /*
2166  * Must be called with the bna_lock held.
2167  */
2168 void
2169 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2170 {
2171 	struct bfi_enet_stats_mac *mac_stats;
2172 	u32 bmap;
2173 	int i;
2174 
2175 	mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2176 	stats->rx_errors =
2177 		mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2178 		mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2179 		mac_stats->rx_undersize;
2180 	stats->tx_errors = mac_stats->tx_fcs_error +
2181 					mac_stats->tx_undersize;
2182 	stats->rx_dropped = mac_stats->rx_drop;
2183 	stats->tx_dropped = mac_stats->tx_drop;
2184 	stats->multicast = mac_stats->rx_multicast;
2185 	stats->collisions = mac_stats->tx_total_collision;
2186 
2187 	stats->rx_length_errors = mac_stats->rx_frame_length_error;
2188 
2189 	/* receive ring buffer overflow  ?? */
2190 
2191 	stats->rx_crc_errors = mac_stats->rx_fcs_error;
2192 	stats->rx_frame_errors = mac_stats->rx_alignment_error;
2193 	/* recv'r fifo overrun */
2194 	bmap = bna_rx_rid_mask(&bnad->bna);
2195 	for (i = 0; bmap; i++) {
2196 		if (bmap & 1) {
2197 			stats->rx_fifo_errors +=
2198 				bnad->stats.bna_stats->
2199 					hw_stats.rxf_stats[i].frame_drops;
2200 			break;
2201 		}
2202 		bmap >>= 1;
2203 	}
2204 }
2205 
2206 static void
2207 bnad_mbox_irq_sync(struct bnad *bnad)
2208 {
2209 	u32 irq;
2210 	unsigned long flags;
2211 
2212 	spin_lock_irqsave(&bnad->bna_lock, flags);
2213 	if (bnad->cfg_flags & BNAD_CF_MSIX)
2214 		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2215 	else
2216 		irq = bnad->pcidev->irq;
2217 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2218 
2219 	synchronize_irq(irq);
2220 }
2221 
2222 /* Utility used by bnad_start_xmit, for doing TSO */
2223 static int
2224 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2225 {
2226 	int err;
2227 
2228 	if (skb_header_cloned(skb)) {
2229 		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2230 		if (err) {
2231 			BNAD_UPDATE_CTR(bnad, tso_err);
2232 			return err;
2233 		}
2234 	}
2235 
2236 	/*
2237 	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2238 	 * excluding the length field.
2239 	 */
2240 	if (skb->protocol == htons(ETH_P_IP)) {
2241 		struct iphdr *iph = ip_hdr(skb);
2242 
2243 		/* Do we really need these? */
2244 		iph->tot_len = 0;
2245 		iph->check = 0;
2246 
2247 		tcp_hdr(skb)->check =
2248 			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2249 					   IPPROTO_TCP, 0);
2250 		BNAD_UPDATE_CTR(bnad, tso4);
2251 	} else {
2252 		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2253 
2254 		ipv6h->payload_len = 0;
2255 		tcp_hdr(skb)->check =
2256 			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2257 					 IPPROTO_TCP, 0);
2258 		BNAD_UPDATE_CTR(bnad, tso6);
2259 	}
2260 
2261 	return 0;
2262 }
2263 
2264 /*
2265  * Initialize Q numbers depending on Rx Paths
2266  * Called with bnad->bna_lock held, because of cfg_flags
2267  * access.
2268  */
2269 static void
2270 bnad_q_num_init(struct bnad *bnad)
2271 {
2272 	int rxps;
2273 
2274 	rxps = min((uint)num_online_cpus(),
2275 			(uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2276 
2277 	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2278 		rxps = 1;	/* INTx */
2279 
2280 	bnad->num_rx = 1;
2281 	bnad->num_tx = 1;
2282 	bnad->num_rxp_per_rx = rxps;
2283 	bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2284 }
2285 
2286 /*
2287  * Adjusts the Q numbers, given a number of msix vectors
2288  * Give preference to RSS as opposed to Tx priority Queues,
2289  * in such a case, just use 1 Tx Q
2290  * Called with bnad->bna_lock held b'cos of cfg_flags access
2291  */
2292 static void
2293 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2294 {
2295 	bnad->num_txq_per_tx = 1;
2296 	if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2297 	     bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2298 	    (bnad->cfg_flags & BNAD_CF_MSIX)) {
2299 		bnad->num_rxp_per_rx = msix_vectors -
2300 			(bnad->num_tx * bnad->num_txq_per_tx) -
2301 			BNAD_MAILBOX_MSIX_VECTORS;
2302 	} else
2303 		bnad->num_rxp_per_rx = 1;
2304 }
2305 
2306 /* Enable / disable ioceth */
2307 static int
2308 bnad_ioceth_disable(struct bnad *bnad)
2309 {
2310 	unsigned long flags;
2311 	int err = 0;
2312 
2313 	spin_lock_irqsave(&bnad->bna_lock, flags);
2314 	init_completion(&bnad->bnad_completions.ioc_comp);
2315 	bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2316 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2317 
2318 	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2319 		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2320 
2321 	err = bnad->bnad_completions.ioc_comp_status;
2322 	return err;
2323 }
2324 
2325 static int
2326 bnad_ioceth_enable(struct bnad *bnad)
2327 {
2328 	int err = 0;
2329 	unsigned long flags;
2330 
2331 	spin_lock_irqsave(&bnad->bna_lock, flags);
2332 	init_completion(&bnad->bnad_completions.ioc_comp);
2333 	bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2334 	bna_ioceth_enable(&bnad->bna.ioceth);
2335 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2336 
2337 	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2338 		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2339 
2340 	err = bnad->bnad_completions.ioc_comp_status;
2341 
2342 	return err;
2343 }
2344 
2345 /* Free BNA resources */
2346 static void
2347 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2348 		u32 res_val_max)
2349 {
2350 	int i;
2351 
2352 	for (i = 0; i < res_val_max; i++)
2353 		bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2354 }
2355 
2356 /* Allocates memory and interrupt resources for BNA */
2357 static int
2358 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2359 		u32 res_val_max)
2360 {
2361 	int i, err;
2362 
2363 	for (i = 0; i < res_val_max; i++) {
2364 		err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2365 		if (err)
2366 			goto err_return;
2367 	}
2368 	return 0;
2369 
2370 err_return:
2371 	bnad_res_free(bnad, res_info, res_val_max);
2372 	return err;
2373 }
2374 
2375 /* Interrupt enable / disable */
2376 static void
2377 bnad_enable_msix(struct bnad *bnad)
2378 {
2379 	int i, ret;
2380 	unsigned long flags;
2381 
2382 	spin_lock_irqsave(&bnad->bna_lock, flags);
2383 	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2384 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2385 		return;
2386 	}
2387 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2388 
2389 	if (bnad->msix_table)
2390 		return;
2391 
2392 	bnad->msix_table =
2393 		kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2394 
2395 	if (!bnad->msix_table)
2396 		goto intx_mode;
2397 
2398 	for (i = 0; i < bnad->msix_num; i++)
2399 		bnad->msix_table[i].entry = i;
2400 
2401 	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2402 	if (ret > 0) {
2403 		/* Not enough MSI-X vectors. */
2404 		pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2405 			ret, bnad->msix_num);
2406 
2407 		spin_lock_irqsave(&bnad->bna_lock, flags);
2408 		/* ret = #of vectors that we got */
2409 		bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2410 			(ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2411 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2412 
2413 		bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2414 			 BNAD_MAILBOX_MSIX_VECTORS;
2415 
2416 		if (bnad->msix_num > ret)
2417 			goto intx_mode;
2418 
2419 		/* Try once more with adjusted numbers */
2420 		/* If this fails, fall back to INTx */
2421 		ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2422 				      bnad->msix_num);
2423 		if (ret)
2424 			goto intx_mode;
2425 
2426 	} else if (ret < 0)
2427 		goto intx_mode;
2428 
2429 	pci_intx(bnad->pcidev, 0);
2430 
2431 	return;
2432 
2433 intx_mode:
2434 	pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2435 
2436 	kfree(bnad->msix_table);
2437 	bnad->msix_table = NULL;
2438 	bnad->msix_num = 0;
2439 	spin_lock_irqsave(&bnad->bna_lock, flags);
2440 	bnad->cfg_flags &= ~BNAD_CF_MSIX;
2441 	bnad_q_num_init(bnad);
2442 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2443 }
2444 
2445 static void
2446 bnad_disable_msix(struct bnad *bnad)
2447 {
2448 	u32 cfg_flags;
2449 	unsigned long flags;
2450 
2451 	spin_lock_irqsave(&bnad->bna_lock, flags);
2452 	cfg_flags = bnad->cfg_flags;
2453 	if (bnad->cfg_flags & BNAD_CF_MSIX)
2454 		bnad->cfg_flags &= ~BNAD_CF_MSIX;
2455 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2456 
2457 	if (cfg_flags & BNAD_CF_MSIX) {
2458 		pci_disable_msix(bnad->pcidev);
2459 		kfree(bnad->msix_table);
2460 		bnad->msix_table = NULL;
2461 	}
2462 }
2463 
2464 /* Netdev entry points */
2465 static int
2466 bnad_open(struct net_device *netdev)
2467 {
2468 	int err;
2469 	struct bnad *bnad = netdev_priv(netdev);
2470 	struct bna_pause_config pause_config;
2471 	int mtu;
2472 	unsigned long flags;
2473 
2474 	mutex_lock(&bnad->conf_mutex);
2475 
2476 	/* Tx */
2477 	err = bnad_setup_tx(bnad, 0);
2478 	if (err)
2479 		goto err_return;
2480 
2481 	/* Rx */
2482 	err = bnad_setup_rx(bnad, 0);
2483 	if (err)
2484 		goto cleanup_tx;
2485 
2486 	/* Port */
2487 	pause_config.tx_pause = 0;
2488 	pause_config.rx_pause = 0;
2489 
2490 	mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2491 
2492 	spin_lock_irqsave(&bnad->bna_lock, flags);
2493 	bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2494 	bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2495 	bna_enet_enable(&bnad->bna.enet);
2496 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2497 
2498 	/* Enable broadcast */
2499 	bnad_enable_default_bcast(bnad);
2500 
2501 	/* Restore VLANs, if any */
2502 	bnad_restore_vlans(bnad, 0);
2503 
2504 	/* Set the UCAST address */
2505 	spin_lock_irqsave(&bnad->bna_lock, flags);
2506 	bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2507 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2508 
2509 	/* Start the stats timer */
2510 	bnad_stats_timer_start(bnad);
2511 
2512 	mutex_unlock(&bnad->conf_mutex);
2513 
2514 	return 0;
2515 
2516 cleanup_tx:
2517 	bnad_cleanup_tx(bnad, 0);
2518 
2519 err_return:
2520 	mutex_unlock(&bnad->conf_mutex);
2521 	return err;
2522 }
2523 
2524 static int
2525 bnad_stop(struct net_device *netdev)
2526 {
2527 	struct bnad *bnad = netdev_priv(netdev);
2528 	unsigned long flags;
2529 
2530 	mutex_lock(&bnad->conf_mutex);
2531 
2532 	/* Stop the stats timer */
2533 	bnad_stats_timer_stop(bnad);
2534 
2535 	init_completion(&bnad->bnad_completions.enet_comp);
2536 
2537 	spin_lock_irqsave(&bnad->bna_lock, flags);
2538 	bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2539 			bnad_cb_enet_disabled);
2540 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2541 
2542 	wait_for_completion(&bnad->bnad_completions.enet_comp);
2543 
2544 	bnad_cleanup_tx(bnad, 0);
2545 	bnad_cleanup_rx(bnad, 0);
2546 
2547 	/* Synchronize mailbox IRQ */
2548 	bnad_mbox_irq_sync(bnad);
2549 
2550 	mutex_unlock(&bnad->conf_mutex);
2551 
2552 	return 0;
2553 }
2554 
2555 /* TX */
2556 /*
2557  * bnad_start_xmit : Netdev entry point for Transmit
2558  *		     Called under lock held by net_device
2559  */
2560 static netdev_tx_t
2561 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2562 {
2563 	struct bnad *bnad = netdev_priv(netdev);
2564 	u32 txq_id = 0;
2565 	struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
2566 
2567 	u16		txq_prod, vlan_tag = 0;
2568 	u32		unmap_prod, wis, wis_used, wi_range;
2569 	u32		vectors, vect_id, i, acked;
2570 	int			err;
2571 	unsigned int		len;
2572 	u32				gso_size;
2573 
2574 	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
2575 	dma_addr_t		dma_addr;
2576 	struct bna_txq_entry *txqent;
2577 	u16	flags;
2578 
2579 	if (unlikely(skb->len <= ETH_HLEN)) {
2580 		dev_kfree_skb(skb);
2581 		BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2582 		return NETDEV_TX_OK;
2583 	}
2584 	if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
2585 		dev_kfree_skb(skb);
2586 		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2587 		return NETDEV_TX_OK;
2588 	}
2589 	if (unlikely(skb_headlen(skb) == 0)) {
2590 		dev_kfree_skb(skb);
2591 		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2592 		return NETDEV_TX_OK;
2593 	}
2594 
2595 	/*
2596 	 * Takes care of the Tx that is scheduled between clearing the flag
2597 	 * and the netif_tx_stop_all_queues() call.
2598 	 */
2599 	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2600 		dev_kfree_skb(skb);
2601 		BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2602 		return NETDEV_TX_OK;
2603 	}
2604 
2605 	vectors = 1 + skb_shinfo(skb)->nr_frags;
2606 	if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2607 		dev_kfree_skb(skb);
2608 		BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2609 		return NETDEV_TX_OK;
2610 	}
2611 	wis = BNA_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
2612 	acked = 0;
2613 	if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2614 			vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2615 		if ((u16) (*tcb->hw_consumer_index) !=
2616 		    tcb->consumer_index &&
2617 		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2618 			acked = bnad_free_txbufs(bnad, tcb);
2619 			if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2620 				bna_ib_ack(tcb->i_dbell, acked);
2621 			smp_mb__before_clear_bit();
2622 			clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2623 		} else {
2624 			netif_stop_queue(netdev);
2625 			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2626 		}
2627 
2628 		smp_mb();
2629 		/*
2630 		 * Check again to deal with race condition between
2631 		 * netif_stop_queue here, and netif_wake_queue in
2632 		 * interrupt handler which is not inside netif tx lock.
2633 		 */
2634 		if (likely
2635 		    (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2636 		     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2637 			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2638 			return NETDEV_TX_BUSY;
2639 		} else {
2640 			netif_wake_queue(netdev);
2641 			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2642 		}
2643 	}
2644 
2645 	unmap_prod = unmap_q->producer_index;
2646 	flags = 0;
2647 
2648 	txq_prod = tcb->producer_index;
2649 	BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2650 	txqent->hdr.wi.reserved = 0;
2651 	txqent->hdr.wi.num_vectors = vectors;
2652 
2653 	if (vlan_tx_tag_present(skb)) {
2654 		vlan_tag = (u16) vlan_tx_tag_get(skb);
2655 		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2656 	}
2657 	if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2658 		vlan_tag =
2659 			(tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2660 		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2661 	}
2662 
2663 	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2664 
2665 	if (skb_is_gso(skb)) {
2666 		gso_size = skb_shinfo(skb)->gso_size;
2667 
2668 		if (unlikely(gso_size > netdev->mtu)) {
2669 			dev_kfree_skb(skb);
2670 			BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2671 			return NETDEV_TX_OK;
2672 		}
2673 		if (unlikely((gso_size + skb_transport_offset(skb) +
2674 			tcp_hdrlen(skb)) >= skb->len)) {
2675 			txqent->hdr.wi.opcode =
2676 				__constant_htons(BNA_TXQ_WI_SEND);
2677 			txqent->hdr.wi.lso_mss = 0;
2678 			BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2679 		} else {
2680 			txqent->hdr.wi.opcode =
2681 				__constant_htons(BNA_TXQ_WI_SEND_LSO);
2682 			txqent->hdr.wi.lso_mss = htons(gso_size);
2683 		}
2684 
2685 		err = bnad_tso_prepare(bnad, skb);
2686 		if (unlikely(err)) {
2687 			dev_kfree_skb(skb);
2688 			BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2689 			return NETDEV_TX_OK;
2690 		}
2691 		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2692 		txqent->hdr.wi.l4_hdr_size_n_offset =
2693 			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2694 			      (tcp_hdrlen(skb) >> 2,
2695 			       skb_transport_offset(skb)));
2696 	} else {
2697 		txqent->hdr.wi.opcode =	__constant_htons(BNA_TXQ_WI_SEND);
2698 		txqent->hdr.wi.lso_mss = 0;
2699 
2700 		if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
2701 			dev_kfree_skb(skb);
2702 			BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2703 			return NETDEV_TX_OK;
2704 		}
2705 
2706 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
2707 			u8 proto = 0;
2708 
2709 			if (skb->protocol == __constant_htons(ETH_P_IP))
2710 				proto = ip_hdr(skb)->protocol;
2711 			else if (skb->protocol ==
2712 				 __constant_htons(ETH_P_IPV6)) {
2713 				/* nexthdr may not be TCP immediately. */
2714 				proto = ipv6_hdr(skb)->nexthdr;
2715 			}
2716 			if (proto == IPPROTO_TCP) {
2717 				flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2718 				txqent->hdr.wi.l4_hdr_size_n_offset =
2719 					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2720 					      (0, skb_transport_offset(skb)));
2721 
2722 				BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2723 
2724 				if (unlikely(skb_headlen(skb) <
2725 				skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2726 					dev_kfree_skb(skb);
2727 					BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2728 					return NETDEV_TX_OK;
2729 				}
2730 
2731 			} else if (proto == IPPROTO_UDP) {
2732 				flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2733 				txqent->hdr.wi.l4_hdr_size_n_offset =
2734 					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2735 					      (0, skb_transport_offset(skb)));
2736 
2737 				BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2738 				if (unlikely(skb_headlen(skb) <
2739 				    skb_transport_offset(skb) +
2740 				    sizeof(struct udphdr))) {
2741 					dev_kfree_skb(skb);
2742 					BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2743 					return NETDEV_TX_OK;
2744 				}
2745 			} else {
2746 				dev_kfree_skb(skb);
2747 				BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2748 				return NETDEV_TX_OK;
2749 			}
2750 		} else {
2751 			txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2752 		}
2753 	}
2754 
2755 	txqent->hdr.wi.flags = htons(flags);
2756 
2757 	txqent->hdr.wi.frame_length = htonl(skb->len);
2758 
2759 	unmap_q->unmap_array[unmap_prod].skb = skb;
2760 	len = skb_headlen(skb);
2761 	txqent->vector[0].length = htons(len);
2762 	dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2763 				  skb_headlen(skb), DMA_TO_DEVICE);
2764 	dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2765 			   dma_addr);
2766 
2767 	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
2768 	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2769 
2770 	vect_id = 0;
2771 	wis_used = 1;
2772 
2773 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2774 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2775 		u16		size = skb_frag_size(frag);
2776 
2777 		if (unlikely(size == 0)) {
2778 			unmap_prod = unmap_q->producer_index;
2779 
2780 			unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2781 					   unmap_q->unmap_array,
2782 					   unmap_prod, unmap_q->q_depth, skb,
2783 					   i);
2784 			dev_kfree_skb(skb);
2785 			BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2786 			return NETDEV_TX_OK;
2787 		}
2788 
2789 		len += size;
2790 
2791 		if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2792 			vect_id = 0;
2793 			if (--wi_range)
2794 				txqent++;
2795 			else {
2796 				BNA_QE_INDX_ADD(txq_prod, wis_used,
2797 						tcb->q_depth);
2798 				wis_used = 0;
2799 				BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2800 						     txqent, wi_range);
2801 			}
2802 			wis_used++;
2803 			txqent->hdr.wi_ext.opcode =
2804 				__constant_htons(BNA_TXQ_WI_EXTENSION);
2805 		}
2806 
2807 		BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2808 		txqent->vector[vect_id].length = htons(size);
2809 		dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
2810 					    0, size, DMA_TO_DEVICE);
2811 		dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2812 				   dma_addr);
2813 		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2814 		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2815 	}
2816 
2817 	if (unlikely(len != skb->len)) {
2818 		unmap_prod = unmap_q->producer_index;
2819 
2820 		unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2821 				unmap_q->unmap_array, unmap_prod,
2822 				unmap_q->q_depth, skb,
2823 				skb_shinfo(skb)->nr_frags);
2824 		dev_kfree_skb(skb);
2825 		BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2826 		return NETDEV_TX_OK;
2827 	}
2828 
2829 	unmap_q->producer_index = unmap_prod;
2830 	BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2831 	tcb->producer_index = txq_prod;
2832 
2833 	smp_mb();
2834 
2835 	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2836 		return NETDEV_TX_OK;
2837 
2838 	bna_txq_prod_indx_doorbell(tcb);
2839 	smp_mb();
2840 
2841 	if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2842 		tasklet_schedule(&bnad->tx_free_tasklet);
2843 
2844 	return NETDEV_TX_OK;
2845 }
2846 
2847 /*
2848  * Used spin_lock to synchronize reading of stats structures, which
2849  * is written by BNA under the same lock.
2850  */
2851 static struct rtnl_link_stats64 *
2852 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2853 {
2854 	struct bnad *bnad = netdev_priv(netdev);
2855 	unsigned long flags;
2856 
2857 	spin_lock_irqsave(&bnad->bna_lock, flags);
2858 
2859 	bnad_netdev_qstats_fill(bnad, stats);
2860 	bnad_netdev_hwstats_fill(bnad, stats);
2861 
2862 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2863 
2864 	return stats;
2865 }
2866 
2867 void
2868 bnad_set_rx_mode(struct net_device *netdev)
2869 {
2870 	struct bnad *bnad = netdev_priv(netdev);
2871 	u32	new_mask, valid_mask;
2872 	unsigned long flags;
2873 
2874 	spin_lock_irqsave(&bnad->bna_lock, flags);
2875 
2876 	new_mask = valid_mask = 0;
2877 
2878 	if (netdev->flags & IFF_PROMISC) {
2879 		if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2880 			new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2881 			valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2882 			bnad->cfg_flags |= BNAD_CF_PROMISC;
2883 		}
2884 	} else {
2885 		if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2886 			new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2887 			valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2888 			bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2889 		}
2890 	}
2891 
2892 	if (netdev->flags & IFF_ALLMULTI) {
2893 		if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2894 			new_mask |= BNA_RXMODE_ALLMULTI;
2895 			valid_mask |= BNA_RXMODE_ALLMULTI;
2896 			bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2897 		}
2898 	} else {
2899 		if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2900 			new_mask &= ~BNA_RXMODE_ALLMULTI;
2901 			valid_mask |= BNA_RXMODE_ALLMULTI;
2902 			bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2903 		}
2904 	}
2905 
2906 	if (bnad->rx_info[0].rx == NULL)
2907 		goto unlock;
2908 
2909 	bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2910 
2911 	if (!netdev_mc_empty(netdev)) {
2912 		u8 *mcaddr_list;
2913 		int mc_count = netdev_mc_count(netdev);
2914 
2915 		/* Index 0 holds the broadcast address */
2916 		mcaddr_list =
2917 			kzalloc((mc_count + 1) * ETH_ALEN,
2918 				GFP_ATOMIC);
2919 		if (!mcaddr_list)
2920 			goto unlock;
2921 
2922 		memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2923 
2924 		/* Copy rest of the MC addresses */
2925 		bnad_netdev_mc_list_get(netdev, mcaddr_list);
2926 
2927 		bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2928 					mcaddr_list, NULL);
2929 
2930 		/* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2931 		kfree(mcaddr_list);
2932 	}
2933 unlock:
2934 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2935 }
2936 
2937 /*
2938  * bna_lock is used to sync writes to netdev->addr
2939  * conf_lock cannot be used since this call may be made
2940  * in a non-blocking context.
2941  */
2942 static int
2943 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2944 {
2945 	int err;
2946 	struct bnad *bnad = netdev_priv(netdev);
2947 	struct sockaddr *sa = (struct sockaddr *)mac_addr;
2948 	unsigned long flags;
2949 
2950 	spin_lock_irqsave(&bnad->bna_lock, flags);
2951 
2952 	err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2953 
2954 	if (!err)
2955 		memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2956 
2957 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2958 
2959 	return err;
2960 }
2961 
2962 static int
2963 bnad_mtu_set(struct bnad *bnad, int mtu)
2964 {
2965 	unsigned long flags;
2966 
2967 	init_completion(&bnad->bnad_completions.mtu_comp);
2968 
2969 	spin_lock_irqsave(&bnad->bna_lock, flags);
2970 	bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2971 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2972 
2973 	wait_for_completion(&bnad->bnad_completions.mtu_comp);
2974 
2975 	return bnad->bnad_completions.mtu_comp_status;
2976 }
2977 
2978 static int
2979 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2980 {
2981 	int err, mtu = netdev->mtu;
2982 	struct bnad *bnad = netdev_priv(netdev);
2983 
2984 	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2985 		return -EINVAL;
2986 
2987 	mutex_lock(&bnad->conf_mutex);
2988 
2989 	netdev->mtu = new_mtu;
2990 
2991 	mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2992 	err = bnad_mtu_set(bnad, mtu);
2993 	if (err)
2994 		err = -EBUSY;
2995 
2996 	mutex_unlock(&bnad->conf_mutex);
2997 	return err;
2998 }
2999 
3000 static int
3001 bnad_vlan_rx_add_vid(struct net_device *netdev,
3002 				 unsigned short vid)
3003 {
3004 	struct bnad *bnad = netdev_priv(netdev);
3005 	unsigned long flags;
3006 
3007 	if (!bnad->rx_info[0].rx)
3008 		return 0;
3009 
3010 	mutex_lock(&bnad->conf_mutex);
3011 
3012 	spin_lock_irqsave(&bnad->bna_lock, flags);
3013 	bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3014 	set_bit(vid, bnad->active_vlans);
3015 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3016 
3017 	mutex_unlock(&bnad->conf_mutex);
3018 
3019 	return 0;
3020 }
3021 
3022 static int
3023 bnad_vlan_rx_kill_vid(struct net_device *netdev,
3024 				  unsigned short vid)
3025 {
3026 	struct bnad *bnad = netdev_priv(netdev);
3027 	unsigned long flags;
3028 
3029 	if (!bnad->rx_info[0].rx)
3030 		return 0;
3031 
3032 	mutex_lock(&bnad->conf_mutex);
3033 
3034 	spin_lock_irqsave(&bnad->bna_lock, flags);
3035 	clear_bit(vid, bnad->active_vlans);
3036 	bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3037 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3038 
3039 	mutex_unlock(&bnad->conf_mutex);
3040 
3041 	return 0;
3042 }
3043 
3044 #ifdef CONFIG_NET_POLL_CONTROLLER
3045 static void
3046 bnad_netpoll(struct net_device *netdev)
3047 {
3048 	struct bnad *bnad = netdev_priv(netdev);
3049 	struct bnad_rx_info *rx_info;
3050 	struct bnad_rx_ctrl *rx_ctrl;
3051 	u32 curr_mask;
3052 	int i, j;
3053 
3054 	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3055 		bna_intx_disable(&bnad->bna, curr_mask);
3056 		bnad_isr(bnad->pcidev->irq, netdev);
3057 		bna_intx_enable(&bnad->bna, curr_mask);
3058 	} else {
3059 		/*
3060 		 * Tx processing may happen in sending context, so no need
3061 		 * to explicitly process completions here
3062 		 */
3063 
3064 		/* Rx processing */
3065 		for (i = 0; i < bnad->num_rx; i++) {
3066 			rx_info = &bnad->rx_info[i];
3067 			if (!rx_info->rx)
3068 				continue;
3069 			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3070 				rx_ctrl = &rx_info->rx_ctrl[j];
3071 				if (rx_ctrl->ccb)
3072 					bnad_netif_rx_schedule_poll(bnad,
3073 							    rx_ctrl->ccb);
3074 			}
3075 		}
3076 	}
3077 }
3078 #endif
3079 
3080 static const struct net_device_ops bnad_netdev_ops = {
3081 	.ndo_open		= bnad_open,
3082 	.ndo_stop		= bnad_stop,
3083 	.ndo_start_xmit		= bnad_start_xmit,
3084 	.ndo_get_stats64		= bnad_get_stats64,
3085 	.ndo_set_rx_mode	= bnad_set_rx_mode,
3086 	.ndo_validate_addr      = eth_validate_addr,
3087 	.ndo_set_mac_address    = bnad_set_mac_address,
3088 	.ndo_change_mtu		= bnad_change_mtu,
3089 	.ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3090 	.ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3091 #ifdef CONFIG_NET_POLL_CONTROLLER
3092 	.ndo_poll_controller    = bnad_netpoll
3093 #endif
3094 };
3095 
3096 static void
3097 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3098 {
3099 	struct net_device *netdev = bnad->netdev;
3100 
3101 	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3102 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3103 		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
3104 
3105 	netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3106 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3107 		NETIF_F_TSO | NETIF_F_TSO6;
3108 
3109 	netdev->features |= netdev->hw_features |
3110 		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3111 
3112 	if (using_dac)
3113 		netdev->features |= NETIF_F_HIGHDMA;
3114 
3115 	netdev->mem_start = bnad->mmio_start;
3116 	netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3117 
3118 	netdev->netdev_ops = &bnad_netdev_ops;
3119 	bnad_set_ethtool_ops(netdev);
3120 }
3121 
3122 /*
3123  * 1. Initialize the bnad structure
3124  * 2. Setup netdev pointer in pci_dev
3125  * 3. Initialze Tx free tasklet
3126  * 4. Initialize no. of TxQ & CQs & MSIX vectors
3127  */
3128 static int
3129 bnad_init(struct bnad *bnad,
3130 	  struct pci_dev *pdev, struct net_device *netdev)
3131 {
3132 	unsigned long flags;
3133 
3134 	SET_NETDEV_DEV(netdev, &pdev->dev);
3135 	pci_set_drvdata(pdev, netdev);
3136 
3137 	bnad->netdev = netdev;
3138 	bnad->pcidev = pdev;
3139 	bnad->mmio_start = pci_resource_start(pdev, 0);
3140 	bnad->mmio_len = pci_resource_len(pdev, 0);
3141 	bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3142 	if (!bnad->bar0) {
3143 		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3144 		pci_set_drvdata(pdev, NULL);
3145 		return -ENOMEM;
3146 	}
3147 	pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3148 	       (unsigned long long) bnad->mmio_len);
3149 
3150 	spin_lock_irqsave(&bnad->bna_lock, flags);
3151 	if (!bnad_msix_disable)
3152 		bnad->cfg_flags = BNAD_CF_MSIX;
3153 
3154 	bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3155 
3156 	bnad_q_num_init(bnad);
3157 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3158 
3159 	bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3160 		(bnad->num_rx * bnad->num_rxp_per_rx) +
3161 			 BNAD_MAILBOX_MSIX_VECTORS;
3162 
3163 	bnad->txq_depth = BNAD_TXQ_DEPTH;
3164 	bnad->rxq_depth = BNAD_RXQ_DEPTH;
3165 
3166 	bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3167 	bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3168 
3169 	tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
3170 		     (unsigned long)bnad);
3171 
3172 	return 0;
3173 }
3174 
3175 /*
3176  * Must be called after bnad_pci_uninit()
3177  * so that iounmap() and pci_set_drvdata(NULL)
3178  * happens only after PCI uninitialization.
3179  */
3180 static void
3181 bnad_uninit(struct bnad *bnad)
3182 {
3183 	if (bnad->bar0)
3184 		iounmap(bnad->bar0);
3185 	pci_set_drvdata(bnad->pcidev, NULL);
3186 }
3187 
3188 /*
3189  * Initialize locks
3190 	a) Per ioceth mutes used for serializing configuration
3191 	   changes from OS interface
3192 	b) spin lock used to protect bna state machine
3193  */
3194 static void
3195 bnad_lock_init(struct bnad *bnad)
3196 {
3197 	spin_lock_init(&bnad->bna_lock);
3198 	mutex_init(&bnad->conf_mutex);
3199 	mutex_init(&bnad_list_mutex);
3200 }
3201 
3202 static void
3203 bnad_lock_uninit(struct bnad *bnad)
3204 {
3205 	mutex_destroy(&bnad->conf_mutex);
3206 	mutex_destroy(&bnad_list_mutex);
3207 }
3208 
3209 /* PCI Initialization */
3210 static int
3211 bnad_pci_init(struct bnad *bnad,
3212 	      struct pci_dev *pdev, bool *using_dac)
3213 {
3214 	int err;
3215 
3216 	err = pci_enable_device(pdev);
3217 	if (err)
3218 		return err;
3219 	err = pci_request_regions(pdev, BNAD_NAME);
3220 	if (err)
3221 		goto disable_device;
3222 	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3223 	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3224 		*using_dac = true;
3225 	} else {
3226 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3227 		if (err) {
3228 			err = dma_set_coherent_mask(&pdev->dev,
3229 						    DMA_BIT_MASK(32));
3230 			if (err)
3231 				goto release_regions;
3232 		}
3233 		*using_dac = false;
3234 	}
3235 	pci_set_master(pdev);
3236 	return 0;
3237 
3238 release_regions:
3239 	pci_release_regions(pdev);
3240 disable_device:
3241 	pci_disable_device(pdev);
3242 
3243 	return err;
3244 }
3245 
3246 static void
3247 bnad_pci_uninit(struct pci_dev *pdev)
3248 {
3249 	pci_release_regions(pdev);
3250 	pci_disable_device(pdev);
3251 }
3252 
3253 static int __devinit
3254 bnad_pci_probe(struct pci_dev *pdev,
3255 		const struct pci_device_id *pcidev_id)
3256 {
3257 	bool	using_dac;
3258 	int	err;
3259 	struct bnad *bnad;
3260 	struct bna *bna;
3261 	struct net_device *netdev;
3262 	struct bfa_pcidev pcidev_info;
3263 	unsigned long flags;
3264 
3265 	pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3266 	       pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3267 
3268 	mutex_lock(&bnad_fwimg_mutex);
3269 	if (!cna_get_firmware_buf(pdev)) {
3270 		mutex_unlock(&bnad_fwimg_mutex);
3271 		pr_warn("Failed to load Firmware Image!\n");
3272 		return -ENODEV;
3273 	}
3274 	mutex_unlock(&bnad_fwimg_mutex);
3275 
3276 	/*
3277 	 * Allocates sizeof(struct net_device + struct bnad)
3278 	 * bnad = netdev->priv
3279 	 */
3280 	netdev = alloc_etherdev(sizeof(struct bnad));
3281 	if (!netdev) {
3282 		dev_err(&pdev->dev, "netdev allocation failed\n");
3283 		err = -ENOMEM;
3284 		return err;
3285 	}
3286 	bnad = netdev_priv(netdev);
3287 	bnad_lock_init(bnad);
3288 	bnad_add_to_list(bnad);
3289 
3290 	mutex_lock(&bnad->conf_mutex);
3291 	/*
3292 	 * PCI initialization
3293 	 *	Output : using_dac = 1 for 64 bit DMA
3294 	 *			   = 0 for 32 bit DMA
3295 	 */
3296 	err = bnad_pci_init(bnad, pdev, &using_dac);
3297 	if (err)
3298 		goto unlock_mutex;
3299 
3300 	/*
3301 	 * Initialize bnad structure
3302 	 * Setup relation between pci_dev & netdev
3303 	 * Init Tx free tasklet
3304 	 */
3305 	err = bnad_init(bnad, pdev, netdev);
3306 	if (err)
3307 		goto pci_uninit;
3308 
3309 	/* Initialize netdev structure, set up ethtool ops */
3310 	bnad_netdev_init(bnad, using_dac);
3311 
3312 	/* Set link to down state */
3313 	netif_carrier_off(netdev);
3314 
3315 	/* Get resource requirement form bna */
3316 	spin_lock_irqsave(&bnad->bna_lock, flags);
3317 	bna_res_req(&bnad->res_info[0]);
3318 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3319 
3320 	/* Allocate resources from bna */
3321 	err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3322 	if (err)
3323 		goto drv_uninit;
3324 
3325 	bna = &bnad->bna;
3326 
3327 	/* Setup pcidev_info for bna_init() */
3328 	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3329 	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3330 	pcidev_info.device_id = bnad->pcidev->device;
3331 	pcidev_info.pci_bar_kva = bnad->bar0;
3332 
3333 	spin_lock_irqsave(&bnad->bna_lock, flags);
3334 	bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3335 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3336 
3337 	bnad->stats.bna_stats = &bna->stats;
3338 
3339 	bnad_enable_msix(bnad);
3340 	err = bnad_mbox_irq_alloc(bnad);
3341 	if (err)
3342 		goto res_free;
3343 
3344 
3345 	/* Set up timers */
3346 	setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3347 				((unsigned long)bnad));
3348 	setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3349 				((unsigned long)bnad));
3350 	setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3351 				((unsigned long)bnad));
3352 	setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3353 				((unsigned long)bnad));
3354 
3355 	/* Now start the timer before calling IOC */
3356 	mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3357 		  jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3358 
3359 	/*
3360 	 * Start the chip
3361 	 * If the call back comes with error, we bail out.
3362 	 * This is a catastrophic error.
3363 	 */
3364 	err = bnad_ioceth_enable(bnad);
3365 	if (err) {
3366 		pr_err("BNA: Initialization failed err=%d\n",
3367 		       err);
3368 		goto probe_success;
3369 	}
3370 
3371 	spin_lock_irqsave(&bnad->bna_lock, flags);
3372 	if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3373 		bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3374 		bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3375 			bna_attr(bna)->num_rxp - 1);
3376 		if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3377 			bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3378 			err = -EIO;
3379 	}
3380 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3381 	if (err)
3382 		goto disable_ioceth;
3383 
3384 	spin_lock_irqsave(&bnad->bna_lock, flags);
3385 	bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3386 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3387 
3388 	err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3389 	if (err) {
3390 		err = -EIO;
3391 		goto disable_ioceth;
3392 	}
3393 
3394 	spin_lock_irqsave(&bnad->bna_lock, flags);
3395 	bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3396 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3397 
3398 	/* Get the burnt-in mac */
3399 	spin_lock_irqsave(&bnad->bna_lock, flags);
3400 	bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3401 	bnad_set_netdev_perm_addr(bnad);
3402 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3403 
3404 	mutex_unlock(&bnad->conf_mutex);
3405 
3406 	/* Finally, reguister with net_device layer */
3407 	err = register_netdev(netdev);
3408 	if (err) {
3409 		pr_err("BNA : Registering with netdev failed\n");
3410 		goto probe_uninit;
3411 	}
3412 	set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3413 
3414 	return 0;
3415 
3416 probe_success:
3417 	mutex_unlock(&bnad->conf_mutex);
3418 	return 0;
3419 
3420 probe_uninit:
3421 	mutex_lock(&bnad->conf_mutex);
3422 	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3423 disable_ioceth:
3424 	bnad_ioceth_disable(bnad);
3425 	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3426 	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3427 	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3428 	spin_lock_irqsave(&bnad->bna_lock, flags);
3429 	bna_uninit(bna);
3430 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3431 	bnad_mbox_irq_free(bnad);
3432 	bnad_disable_msix(bnad);
3433 res_free:
3434 	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3435 drv_uninit:
3436 	bnad_uninit(bnad);
3437 pci_uninit:
3438 	bnad_pci_uninit(pdev);
3439 unlock_mutex:
3440 	mutex_unlock(&bnad->conf_mutex);
3441 	bnad_remove_from_list(bnad);
3442 	bnad_lock_uninit(bnad);
3443 	free_netdev(netdev);
3444 	return err;
3445 }
3446 
3447 static void __devexit
3448 bnad_pci_remove(struct pci_dev *pdev)
3449 {
3450 	struct net_device *netdev = pci_get_drvdata(pdev);
3451 	struct bnad *bnad;
3452 	struct bna *bna;
3453 	unsigned long flags;
3454 
3455 	if (!netdev)
3456 		return;
3457 
3458 	pr_info("%s bnad_pci_remove\n", netdev->name);
3459 	bnad = netdev_priv(netdev);
3460 	bna = &bnad->bna;
3461 
3462 	if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3463 		unregister_netdev(netdev);
3464 
3465 	mutex_lock(&bnad->conf_mutex);
3466 	bnad_ioceth_disable(bnad);
3467 	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3468 	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3469 	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3470 	spin_lock_irqsave(&bnad->bna_lock, flags);
3471 	bna_uninit(bna);
3472 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3473 
3474 	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3475 	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3476 	bnad_mbox_irq_free(bnad);
3477 	bnad_disable_msix(bnad);
3478 	bnad_pci_uninit(pdev);
3479 	mutex_unlock(&bnad->conf_mutex);
3480 	bnad_remove_from_list(bnad);
3481 	bnad_lock_uninit(bnad);
3482 	bnad_uninit(bnad);
3483 	free_netdev(netdev);
3484 }
3485 
3486 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3487 	{
3488 		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3489 			PCI_DEVICE_ID_BROCADE_CT),
3490 		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
3491 		.class_mask =  0xffff00
3492 	},
3493 	{
3494 		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3495 			BFA_PCI_DEVICE_ID_CT2),
3496 		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
3497 		.class_mask =  0xffff00
3498 	},
3499 	{0,  },
3500 };
3501 
3502 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3503 
3504 static struct pci_driver bnad_pci_driver = {
3505 	.name = BNAD_NAME,
3506 	.id_table = bnad_pci_id_table,
3507 	.probe = bnad_pci_probe,
3508 	.remove = __devexit_p(bnad_pci_remove),
3509 };
3510 
3511 static int __init
3512 bnad_module_init(void)
3513 {
3514 	int err;
3515 
3516 	pr_info("Brocade 10G Ethernet driver - version: %s\n",
3517 			BNAD_VERSION);
3518 
3519 	bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3520 
3521 	err = pci_register_driver(&bnad_pci_driver);
3522 	if (err < 0) {
3523 		pr_err("bna : PCI registration failed in module init "
3524 		       "(%d)\n", err);
3525 		return err;
3526 	}
3527 
3528 	return 0;
3529 }
3530 
3531 static void __exit
3532 bnad_module_exit(void)
3533 {
3534 	pci_unregister_driver(&bnad_pci_driver);
3535 
3536 	if (bfi_fw)
3537 		release_firmware(bfi_fw);
3538 }
3539 
3540 module_init(bnad_module_init);
3541 module_exit(bnad_module_exit);
3542 
3543 MODULE_AUTHOR("Brocade");
3544 MODULE_LICENSE("GPL");
3545 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3546 MODULE_VERSION(BNAD_VERSION);
3547 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3548 MODULE_FIRMWARE(CNA_FW_FILE_CT2);
3549