1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/in.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
29 
30 #include "bnad.h"
31 #include "bna.h"
32 #include "cna.h"
33 
34 static DEFINE_MUTEX(bnad_fwimg_mutex);
35 
36 /*
37  * Module params
38  */
39 static uint bnad_msix_disable;
40 module_param(bnad_msix_disable, uint, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
42 
43 static uint bnad_ioc_auto_recover = 1;
44 module_param(bnad_ioc_auto_recover, uint, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
46 
47 static uint bna_debugfs_enable = 1;
48 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
49 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
50 		 " Range[false:0|true:1]");
51 
52 /*
53  * Global variables
54  */
55 u32 bnad_rxqs_per_cq = 2;
56 static u32 bna_id;
57 static struct mutex bnad_list_mutex;
58 static LIST_HEAD(bnad_list);
59 static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
60 
61 /*
62  * Local MACROS
63  */
64 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
65 
66 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
67 
68 #define BNAD_GET_MBOX_IRQ(_bnad)				\
69 	(((_bnad)->cfg_flags & BNAD_CF_MSIX) ?			\
70 	 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
71 	 ((_bnad)->pcidev->irq))
72 
73 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth)	\
74 do {								\
75 	(_res_info)->res_type = BNA_RES_T_MEM;			\
76 	(_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;	\
77 	(_res_info)->res_u.mem_info.num = (_num);		\
78 	(_res_info)->res_u.mem_info.len =			\
79 	sizeof(struct bnad_unmap_q) +				\
80 	(sizeof(struct bnad_skb_unmap) * ((_depth) - 1));	\
81 } while (0)
82 
83 static void
84 bnad_add_to_list(struct bnad *bnad)
85 {
86 	mutex_lock(&bnad_list_mutex);
87 	list_add_tail(&bnad->list_entry, &bnad_list);
88 	bnad->id = bna_id++;
89 	mutex_unlock(&bnad_list_mutex);
90 }
91 
92 static void
93 bnad_remove_from_list(struct bnad *bnad)
94 {
95 	mutex_lock(&bnad_list_mutex);
96 	list_del(&bnad->list_entry);
97 	mutex_unlock(&bnad_list_mutex);
98 }
99 
100 /*
101  * Reinitialize completions in CQ, once Rx is taken down
102  */
103 static void
104 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
105 {
106 	struct bna_cq_entry *cmpl, *next_cmpl;
107 	unsigned int wi_range, wis = 0, ccb_prod = 0;
108 	int i;
109 
110 	BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
111 			    wi_range);
112 
113 	for (i = 0; i < ccb->q_depth; i++) {
114 		wis++;
115 		if (likely(--wi_range))
116 			next_cmpl = cmpl + 1;
117 		else {
118 			BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
119 			wis = 0;
120 			BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
121 						next_cmpl, wi_range);
122 		}
123 		cmpl->valid = 0;
124 		cmpl = next_cmpl;
125 	}
126 }
127 
128 static u32
129 bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
130 	u32 index, u32 depth, struct sk_buff *skb, u32 frag)
131 {
132 	int j;
133 	array[index].skb = NULL;
134 
135 	dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
136 			skb_headlen(skb), DMA_TO_DEVICE);
137 	dma_unmap_addr_set(&array[index], dma_addr, 0);
138 	BNA_QE_INDX_ADD(index, 1, depth);
139 
140 	for (j = 0; j < frag; j++) {
141 		dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
142 			  skb_frag_size(&skb_shinfo(skb)->frags[j]),
143 						DMA_TO_DEVICE);
144 		dma_unmap_addr_set(&array[index], dma_addr, 0);
145 		BNA_QE_INDX_ADD(index, 1, depth);
146 	}
147 
148 	return index;
149 }
150 
151 /*
152  * Frees all pending Tx Bufs
153  * At this point no activity is expected on the Q,
154  * so DMA unmap & freeing is fine.
155  */
156 static void
157 bnad_txq_cleanup(struct bnad *bnad,
158 		 struct bna_tcb *tcb)
159 {
160 	u32		unmap_cons;
161 	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
162 	struct bnad_skb_unmap *unmap_array;
163 	struct sk_buff		*skb = NULL;
164 	int			q;
165 
166 	unmap_array = unmap_q->unmap_array;
167 
168 	for (q = 0; q < unmap_q->q_depth; q++) {
169 		skb = unmap_array[q].skb;
170 		if (!skb)
171 			continue;
172 
173 		unmap_cons = q;
174 		unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
175 				unmap_cons, unmap_q->q_depth, skb,
176 				skb_shinfo(skb)->nr_frags);
177 
178 		dev_kfree_skb_any(skb);
179 	}
180 }
181 
182 /* Data Path Handlers */
183 
184 /*
185  * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
186  * Can be called in a) Interrupt context
187  *		    b) Sending context
188  */
189 static u32
190 bnad_txcmpl_process(struct bnad *bnad,
191 		 struct bna_tcb *tcb)
192 {
193 	u32		unmap_cons, sent_packets = 0, sent_bytes = 0;
194 	u16		wis, updated_hw_cons;
195 	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
196 	struct bnad_skb_unmap *unmap_array;
197 	struct sk_buff		*skb;
198 
199 	/* Just return if TX is stopped */
200 	if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
201 		return 0;
202 
203 	updated_hw_cons = *(tcb->hw_consumer_index);
204 
205 	wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
206 				  updated_hw_cons, tcb->q_depth);
207 
208 	BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
209 
210 	unmap_array = unmap_q->unmap_array;
211 	unmap_cons = unmap_q->consumer_index;
212 
213 	prefetch(&unmap_array[unmap_cons + 1]);
214 	while (wis) {
215 		skb = unmap_array[unmap_cons].skb;
216 
217 		sent_packets++;
218 		sent_bytes += skb->len;
219 		wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
220 
221 		unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
222 				unmap_cons, unmap_q->q_depth, skb,
223 				skb_shinfo(skb)->nr_frags);
224 
225 		dev_kfree_skb_any(skb);
226 	}
227 
228 	/* Update consumer pointers. */
229 	tcb->consumer_index = updated_hw_cons;
230 	unmap_q->consumer_index = unmap_cons;
231 
232 	tcb->txq->tx_packets += sent_packets;
233 	tcb->txq->tx_bytes += sent_bytes;
234 
235 	return sent_packets;
236 }
237 
238 static u32
239 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
240 {
241 	struct net_device *netdev = bnad->netdev;
242 	u32 sent = 0;
243 
244 	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
245 		return 0;
246 
247 	sent = bnad_txcmpl_process(bnad, tcb);
248 	if (sent) {
249 		if (netif_queue_stopped(netdev) &&
250 		    netif_carrier_ok(netdev) &&
251 		    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
252 				    BNAD_NETIF_WAKE_THRESHOLD) {
253 			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
254 				netif_wake_queue(netdev);
255 				BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
256 			}
257 		}
258 	}
259 
260 	if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
261 		bna_ib_ack(tcb->i_dbell, sent);
262 
263 	smp_mb__before_clear_bit();
264 	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
265 
266 	return sent;
267 }
268 
269 /* MSIX Tx Completion Handler */
270 static irqreturn_t
271 bnad_msix_tx(int irq, void *data)
272 {
273 	struct bna_tcb *tcb = (struct bna_tcb *)data;
274 	struct bnad *bnad = tcb->bnad;
275 
276 	bnad_tx_complete(bnad, tcb);
277 
278 	return IRQ_HANDLED;
279 }
280 
281 static void
282 bnad_rcb_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
283 {
284 	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
285 
286 	rcb->producer_index = 0;
287 	rcb->consumer_index = 0;
288 
289 	unmap_q->producer_index = 0;
290 	unmap_q->consumer_index = 0;
291 }
292 
293 static void
294 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
295 {
296 	struct bnad_unmap_q *unmap_q;
297 	struct bnad_skb_unmap *unmap_array;
298 	struct sk_buff *skb;
299 	int unmap_cons;
300 
301 	unmap_q = rcb->unmap_q;
302 	unmap_array = unmap_q->unmap_array;
303 	for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
304 		skb = unmap_array[unmap_cons].skb;
305 		if (!skb)
306 			continue;
307 		unmap_array[unmap_cons].skb = NULL;
308 		dma_unmap_single(&bnad->pcidev->dev,
309 				 dma_unmap_addr(&unmap_array[unmap_cons],
310 						dma_addr),
311 				 rcb->rxq->buffer_size,
312 				 DMA_FROM_DEVICE);
313 		dev_kfree_skb(skb);
314 	}
315 	bnad_rcb_cleanup(bnad, rcb);
316 }
317 
318 static void
319 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
320 {
321 	u16 to_alloc, alloced, unmap_prod, wi_range;
322 	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
323 	struct bnad_skb_unmap *unmap_array;
324 	struct bna_rxq_entry *rxent;
325 	struct sk_buff *skb;
326 	dma_addr_t dma_addr;
327 
328 	alloced = 0;
329 	to_alloc =
330 		BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
331 
332 	unmap_array = unmap_q->unmap_array;
333 	unmap_prod = unmap_q->producer_index;
334 
335 	BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
336 
337 	while (to_alloc--) {
338 		if (!wi_range)
339 			BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
340 					     wi_range);
341 		skb = netdev_alloc_skb_ip_align(bnad->netdev,
342 						rcb->rxq->buffer_size);
343 		if (unlikely(!skb)) {
344 			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
345 			rcb->rxq->rxbuf_alloc_failed++;
346 			goto finishing;
347 		}
348 		unmap_array[unmap_prod].skb = skb;
349 		dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
350 					  rcb->rxq->buffer_size,
351 					  DMA_FROM_DEVICE);
352 		dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
353 				   dma_addr);
354 		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
355 		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
356 
357 		rxent++;
358 		wi_range--;
359 		alloced++;
360 	}
361 
362 finishing:
363 	if (likely(alloced)) {
364 		unmap_q->producer_index = unmap_prod;
365 		rcb->producer_index = unmap_prod;
366 		smp_mb();
367 		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
368 			bna_rxq_prod_indx_doorbell(rcb);
369 	}
370 }
371 
372 static inline void
373 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
374 {
375 	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
376 
377 	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
378 		if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
379 			 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
380 			bnad_rxq_post(bnad, rcb);
381 		smp_mb__before_clear_bit();
382 		clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
383 	}
384 }
385 
386 static u32
387 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
388 {
389 	struct bna_cq_entry *cmpl, *next_cmpl;
390 	struct bna_rcb *rcb = NULL;
391 	unsigned int wi_range, packets = 0, wis = 0;
392 	struct bnad_unmap_q *unmap_q;
393 	struct bnad_skb_unmap *unmap_array;
394 	struct sk_buff *skb;
395 	u32 flags, unmap_cons;
396 	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
397 	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
398 
399 	if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
400 		return 0;
401 
402 	prefetch(bnad->netdev);
403 	BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
404 			    wi_range);
405 	BUG_ON(!(wi_range <= ccb->q_depth));
406 	while (cmpl->valid && packets < budget) {
407 		packets++;
408 		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
409 
410 		if (bna_is_small_rxq(cmpl->rxq_id))
411 			rcb = ccb->rcb[1];
412 		else
413 			rcb = ccb->rcb[0];
414 
415 		unmap_q = rcb->unmap_q;
416 		unmap_array = unmap_q->unmap_array;
417 		unmap_cons = unmap_q->consumer_index;
418 
419 		skb = unmap_array[unmap_cons].skb;
420 		BUG_ON(!(skb));
421 		unmap_array[unmap_cons].skb = NULL;
422 		dma_unmap_single(&bnad->pcidev->dev,
423 				 dma_unmap_addr(&unmap_array[unmap_cons],
424 						dma_addr),
425 				 rcb->rxq->buffer_size,
426 				 DMA_FROM_DEVICE);
427 		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
428 
429 		/* Should be more efficient ? Performance ? */
430 		BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
431 
432 		wis++;
433 		if (likely(--wi_range))
434 			next_cmpl = cmpl + 1;
435 		else {
436 			BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
437 			wis = 0;
438 			BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
439 						next_cmpl, wi_range);
440 			BUG_ON(!(wi_range <= ccb->q_depth));
441 		}
442 		prefetch(next_cmpl);
443 
444 		flags = ntohl(cmpl->flags);
445 		if (unlikely
446 		    (flags &
447 		     (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
448 		      BNA_CQ_EF_TOO_LONG))) {
449 			dev_kfree_skb_any(skb);
450 			rcb->rxq->rx_packets_with_error++;
451 			goto next;
452 		}
453 
454 		skb_put(skb, ntohs(cmpl->length));
455 		if (likely
456 		    ((bnad->netdev->features & NETIF_F_RXCSUM) &&
457 		     (((flags & BNA_CQ_EF_IPV4) &&
458 		      (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
459 		      (flags & BNA_CQ_EF_IPV6)) &&
460 		      (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
461 		      (flags & BNA_CQ_EF_L4_CKSUM_OK)))
462 			skb->ip_summed = CHECKSUM_UNNECESSARY;
463 		else
464 			skb_checksum_none_assert(skb);
465 
466 		rcb->rxq->rx_packets++;
467 		rcb->rxq->rx_bytes += skb->len;
468 		skb->protocol = eth_type_trans(skb, bnad->netdev);
469 
470 		if (flags & BNA_CQ_EF_VLAN)
471 			__vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
472 
473 		if (skb->ip_summed == CHECKSUM_UNNECESSARY)
474 			napi_gro_receive(&rx_ctrl->napi, skb);
475 		else
476 			netif_receive_skb(skb);
477 
478 next:
479 		cmpl->valid = 0;
480 		cmpl = next_cmpl;
481 	}
482 
483 	BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
484 
485 	if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
486 		bna_ib_ack_disable_irq(ccb->i_dbell, packets);
487 
488 	bnad_refill_rxq(bnad, ccb->rcb[0]);
489 	if (ccb->rcb[1])
490 		bnad_refill_rxq(bnad, ccb->rcb[1]);
491 
492 	clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
493 
494 	return packets;
495 }
496 
497 static void
498 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
499 {
500 	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
501 	struct napi_struct *napi = &rx_ctrl->napi;
502 
503 	if (likely(napi_schedule_prep(napi))) {
504 		__napi_schedule(napi);
505 		rx_ctrl->rx_schedule++;
506 	}
507 }
508 
509 /* MSIX Rx Path Handler */
510 static irqreturn_t
511 bnad_msix_rx(int irq, void *data)
512 {
513 	struct bna_ccb *ccb = (struct bna_ccb *)data;
514 
515 	if (ccb) {
516 		((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
517 		bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
518 	}
519 
520 	return IRQ_HANDLED;
521 }
522 
523 /* Interrupt handlers */
524 
525 /* Mbox Interrupt Handlers */
526 static irqreturn_t
527 bnad_msix_mbox_handler(int irq, void *data)
528 {
529 	u32 intr_status;
530 	unsigned long flags;
531 	struct bnad *bnad = (struct bnad *)data;
532 
533 	spin_lock_irqsave(&bnad->bna_lock, flags);
534 	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
535 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
536 		return IRQ_HANDLED;
537 	}
538 
539 	bna_intr_status_get(&bnad->bna, intr_status);
540 
541 	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
542 		bna_mbox_handler(&bnad->bna, intr_status);
543 
544 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
545 
546 	return IRQ_HANDLED;
547 }
548 
549 static irqreturn_t
550 bnad_isr(int irq, void *data)
551 {
552 	int i, j;
553 	u32 intr_status;
554 	unsigned long flags;
555 	struct bnad *bnad = (struct bnad *)data;
556 	struct bnad_rx_info *rx_info;
557 	struct bnad_rx_ctrl *rx_ctrl;
558 	struct bna_tcb *tcb = NULL;
559 
560 	spin_lock_irqsave(&bnad->bna_lock, flags);
561 	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
562 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
563 		return IRQ_NONE;
564 	}
565 
566 	bna_intr_status_get(&bnad->bna, intr_status);
567 
568 	if (unlikely(!intr_status)) {
569 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
570 		return IRQ_NONE;
571 	}
572 
573 	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
574 		bna_mbox_handler(&bnad->bna, intr_status);
575 
576 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
577 
578 	if (!BNA_IS_INTX_DATA_INTR(intr_status))
579 		return IRQ_HANDLED;
580 
581 	/* Process data interrupts */
582 	/* Tx processing */
583 	for (i = 0; i < bnad->num_tx; i++) {
584 		for (j = 0; j < bnad->num_txq_per_tx; j++) {
585 			tcb = bnad->tx_info[i].tcb[j];
586 			if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
587 				bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
588 		}
589 	}
590 	/* Rx processing */
591 	for (i = 0; i < bnad->num_rx; i++) {
592 		rx_info = &bnad->rx_info[i];
593 		if (!rx_info->rx)
594 			continue;
595 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
596 			rx_ctrl = &rx_info->rx_ctrl[j];
597 			if (rx_ctrl->ccb)
598 				bnad_netif_rx_schedule_poll(bnad,
599 							    rx_ctrl->ccb);
600 		}
601 	}
602 	return IRQ_HANDLED;
603 }
604 
605 /*
606  * Called in interrupt / callback context
607  * with bna_lock held, so cfg_flags access is OK
608  */
609 static void
610 bnad_enable_mbox_irq(struct bnad *bnad)
611 {
612 	clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
613 
614 	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
615 }
616 
617 /*
618  * Called with bnad->bna_lock held b'cos of
619  * bnad->cfg_flags access.
620  */
621 static void
622 bnad_disable_mbox_irq(struct bnad *bnad)
623 {
624 	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
625 
626 	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
627 }
628 
629 static void
630 bnad_set_netdev_perm_addr(struct bnad *bnad)
631 {
632 	struct net_device *netdev = bnad->netdev;
633 
634 	memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
635 	if (is_zero_ether_addr(netdev->dev_addr))
636 		memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
637 }
638 
639 /* Control Path Handlers */
640 
641 /* Callbacks */
642 void
643 bnad_cb_mbox_intr_enable(struct bnad *bnad)
644 {
645 	bnad_enable_mbox_irq(bnad);
646 }
647 
648 void
649 bnad_cb_mbox_intr_disable(struct bnad *bnad)
650 {
651 	bnad_disable_mbox_irq(bnad);
652 }
653 
654 void
655 bnad_cb_ioceth_ready(struct bnad *bnad)
656 {
657 	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
658 	complete(&bnad->bnad_completions.ioc_comp);
659 }
660 
661 void
662 bnad_cb_ioceth_failed(struct bnad *bnad)
663 {
664 	bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
665 	complete(&bnad->bnad_completions.ioc_comp);
666 }
667 
668 void
669 bnad_cb_ioceth_disabled(struct bnad *bnad)
670 {
671 	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
672 	complete(&bnad->bnad_completions.ioc_comp);
673 }
674 
675 static void
676 bnad_cb_enet_disabled(void *arg)
677 {
678 	struct bnad *bnad = (struct bnad *)arg;
679 
680 	netif_carrier_off(bnad->netdev);
681 	complete(&bnad->bnad_completions.enet_comp);
682 }
683 
684 void
685 bnad_cb_ethport_link_status(struct bnad *bnad,
686 			enum bna_link_status link_status)
687 {
688 	bool link_up = false;
689 
690 	link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
691 
692 	if (link_status == BNA_CEE_UP) {
693 		if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
694 			BNAD_UPDATE_CTR(bnad, cee_toggle);
695 		set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
696 	} else {
697 		if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
698 			BNAD_UPDATE_CTR(bnad, cee_toggle);
699 		clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
700 	}
701 
702 	if (link_up) {
703 		if (!netif_carrier_ok(bnad->netdev)) {
704 			uint tx_id, tcb_id;
705 			printk(KERN_WARNING "bna: %s link up\n",
706 				bnad->netdev->name);
707 			netif_carrier_on(bnad->netdev);
708 			BNAD_UPDATE_CTR(bnad, link_toggle);
709 			for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
710 				for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
711 				      tcb_id++) {
712 					struct bna_tcb *tcb =
713 					bnad->tx_info[tx_id].tcb[tcb_id];
714 					u32 txq_id;
715 					if (!tcb)
716 						continue;
717 
718 					txq_id = tcb->id;
719 
720 					if (test_bit(BNAD_TXQ_TX_STARTED,
721 						     &tcb->flags)) {
722 						/*
723 						 * Force an immediate
724 						 * Transmit Schedule */
725 						printk(KERN_INFO "bna: %s %d "
726 						      "TXQ_STARTED\n",
727 						       bnad->netdev->name,
728 						       txq_id);
729 						netif_wake_subqueue(
730 								bnad->netdev,
731 								txq_id);
732 						BNAD_UPDATE_CTR(bnad,
733 							netif_queue_wakeup);
734 					} else {
735 						netif_stop_subqueue(
736 								bnad->netdev,
737 								txq_id);
738 						BNAD_UPDATE_CTR(bnad,
739 							netif_queue_stop);
740 					}
741 				}
742 			}
743 		}
744 	} else {
745 		if (netif_carrier_ok(bnad->netdev)) {
746 			printk(KERN_WARNING "bna: %s link down\n",
747 				bnad->netdev->name);
748 			netif_carrier_off(bnad->netdev);
749 			BNAD_UPDATE_CTR(bnad, link_toggle);
750 		}
751 	}
752 }
753 
754 static void
755 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
756 {
757 	struct bnad *bnad = (struct bnad *)arg;
758 
759 	complete(&bnad->bnad_completions.tx_comp);
760 }
761 
762 static void
763 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
764 {
765 	struct bnad_tx_info *tx_info =
766 			(struct bnad_tx_info *)tcb->txq->tx->priv;
767 	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
768 
769 	tx_info->tcb[tcb->id] = tcb;
770 	unmap_q->producer_index = 0;
771 	unmap_q->consumer_index = 0;
772 	unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
773 }
774 
775 static void
776 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
777 {
778 	struct bnad_tx_info *tx_info =
779 			(struct bnad_tx_info *)tcb->txq->tx->priv;
780 
781 	tx_info->tcb[tcb->id] = NULL;
782 	tcb->priv = NULL;
783 }
784 
785 static void
786 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
787 {
788 	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
789 
790 	unmap_q->producer_index = 0;
791 	unmap_q->consumer_index = 0;
792 	unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
793 }
794 
795 static void
796 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
797 {
798 	struct bnad_rx_info *rx_info =
799 			(struct bnad_rx_info *)ccb->cq->rx->priv;
800 
801 	rx_info->rx_ctrl[ccb->id].ccb = ccb;
802 	ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
803 }
804 
805 static void
806 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
807 {
808 	struct bnad_rx_info *rx_info =
809 			(struct bnad_rx_info *)ccb->cq->rx->priv;
810 
811 	rx_info->rx_ctrl[ccb->id].ccb = NULL;
812 }
813 
814 static void
815 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
816 {
817 	struct bnad_tx_info *tx_info =
818 			(struct bnad_tx_info *)tx->priv;
819 	struct bna_tcb *tcb;
820 	u32 txq_id;
821 	int i;
822 
823 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
824 		tcb = tx_info->tcb[i];
825 		if (!tcb)
826 			continue;
827 		txq_id = tcb->id;
828 		clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
829 		netif_stop_subqueue(bnad->netdev, txq_id);
830 		printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
831 			bnad->netdev->name, txq_id);
832 	}
833 }
834 
835 static void
836 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
837 {
838 	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
839 	struct bna_tcb *tcb;
840 	u32 txq_id;
841 	int i;
842 
843 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
844 		tcb = tx_info->tcb[i];
845 		if (!tcb)
846 			continue;
847 		txq_id = tcb->id;
848 
849 		BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
850 		set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
851 		BUG_ON(*(tcb->hw_consumer_index) != 0);
852 
853 		if (netif_carrier_ok(bnad->netdev)) {
854 			printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
855 				bnad->netdev->name, txq_id);
856 			netif_wake_subqueue(bnad->netdev, txq_id);
857 			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
858 		}
859 	}
860 
861 	/*
862 	 * Workaround for first ioceth enable failure & we
863 	 * get a 0 MAC address. We try to get the MAC address
864 	 * again here.
865 	 */
866 	if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
867 		bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
868 		bnad_set_netdev_perm_addr(bnad);
869 	}
870 }
871 
872 /*
873  * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
874  */
875 static void
876 bnad_tx_cleanup(struct delayed_work *work)
877 {
878 	struct bnad_tx_info *tx_info =
879 		container_of(work, struct bnad_tx_info, tx_cleanup_work);
880 	struct bnad *bnad = NULL;
881 	struct bnad_unmap_q *unmap_q;
882 	struct bna_tcb *tcb;
883 	unsigned long flags;
884 	uint32_t i, pending = 0;
885 
886 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
887 		tcb = tx_info->tcb[i];
888 		if (!tcb)
889 			continue;
890 
891 		bnad = tcb->bnad;
892 
893 		if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
894 			pending++;
895 			continue;
896 		}
897 
898 		bnad_txq_cleanup(bnad, tcb);
899 
900 		unmap_q = tcb->unmap_q;
901 		unmap_q->producer_index = 0;
902 		unmap_q->consumer_index = 0;
903 
904 		smp_mb__before_clear_bit();
905 		clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
906 	}
907 
908 	if (pending) {
909 		queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
910 			msecs_to_jiffies(1));
911 		return;
912 	}
913 
914 	spin_lock_irqsave(&bnad->bna_lock, flags);
915 	bna_tx_cleanup_complete(tx_info->tx);
916 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
917 }
918 
919 
920 static void
921 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
922 {
923 	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
924 	struct bna_tcb *tcb;
925 	int i;
926 
927 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
928 		tcb = tx_info->tcb[i];
929 		if (!tcb)
930 			continue;
931 	}
932 
933 	queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
934 }
935 
936 static void
937 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
938 {
939 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
940 	struct bna_ccb *ccb;
941 	struct bnad_rx_ctrl *rx_ctrl;
942 	int i;
943 
944 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
945 		rx_ctrl = &rx_info->rx_ctrl[i];
946 		ccb = rx_ctrl->ccb;
947 		if (!ccb)
948 			continue;
949 
950 		clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
951 
952 		if (ccb->rcb[1])
953 			clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
954 	}
955 }
956 
957 /*
958  * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
959  */
960 static void
961 bnad_rx_cleanup(void *work)
962 {
963 	struct bnad_rx_info *rx_info =
964 		container_of(work, struct bnad_rx_info, rx_cleanup_work);
965 	struct bnad_rx_ctrl *rx_ctrl;
966 	struct bnad *bnad = NULL;
967 	unsigned long flags;
968 	uint32_t i;
969 
970 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
971 		rx_ctrl = &rx_info->rx_ctrl[i];
972 
973 		if (!rx_ctrl->ccb)
974 			continue;
975 
976 		bnad = rx_ctrl->ccb->bnad;
977 
978 		/*
979 		 * Wait till the poll handler has exited
980 		 * and nothing can be scheduled anymore
981 		 */
982 		napi_disable(&rx_ctrl->napi);
983 
984 		bnad_cq_cleanup(bnad, rx_ctrl->ccb);
985 		bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
986 		if (rx_ctrl->ccb->rcb[1])
987 			bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
988 	}
989 
990 	spin_lock_irqsave(&bnad->bna_lock, flags);
991 	bna_rx_cleanup_complete(rx_info->rx);
992 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
993 }
994 
995 static void
996 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
997 {
998 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
999 	struct bna_ccb *ccb;
1000 	struct bnad_rx_ctrl *rx_ctrl;
1001 	int i;
1002 
1003 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1004 		rx_ctrl = &rx_info->rx_ctrl[i];
1005 		ccb = rx_ctrl->ccb;
1006 		if (!ccb)
1007 			continue;
1008 
1009 		clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1010 
1011 		if (ccb->rcb[1])
1012 			clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1013 	}
1014 
1015 	queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1016 }
1017 
1018 static void
1019 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1020 {
1021 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1022 	struct bna_ccb *ccb;
1023 	struct bna_rcb *rcb;
1024 	struct bnad_rx_ctrl *rx_ctrl;
1025 	struct bnad_unmap_q *unmap_q;
1026 	int i;
1027 	int j;
1028 
1029 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1030 		rx_ctrl = &rx_info->rx_ctrl[i];
1031 		ccb = rx_ctrl->ccb;
1032 		if (!ccb)
1033 			continue;
1034 
1035 		napi_enable(&rx_ctrl->napi);
1036 
1037 		for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1038 			rcb = ccb->rcb[j];
1039 			if (!rcb)
1040 				continue;
1041 
1042 			set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1043 			set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1044 			unmap_q = rcb->unmap_q;
1045 
1046 			/* Now allocate & post buffers for this RCB */
1047 			/* !!Allocation in callback context */
1048 			if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1049 				if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1050 					>> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1051 					bnad_rxq_post(bnad, rcb);
1052 					smp_mb__before_clear_bit();
1053 				clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1054 			}
1055 		}
1056 	}
1057 }
1058 
1059 static void
1060 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1061 {
1062 	struct bnad *bnad = (struct bnad *)arg;
1063 
1064 	complete(&bnad->bnad_completions.rx_comp);
1065 }
1066 
1067 static void
1068 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1069 {
1070 	bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1071 	complete(&bnad->bnad_completions.mcast_comp);
1072 }
1073 
1074 void
1075 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1076 		       struct bna_stats *stats)
1077 {
1078 	if (status == BNA_CB_SUCCESS)
1079 		BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1080 
1081 	if (!netif_running(bnad->netdev) ||
1082 		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1083 		return;
1084 
1085 	mod_timer(&bnad->stats_timer,
1086 		  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1087 }
1088 
1089 static void
1090 bnad_cb_enet_mtu_set(struct bnad *bnad)
1091 {
1092 	bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1093 	complete(&bnad->bnad_completions.mtu_comp);
1094 }
1095 
1096 void
1097 bnad_cb_completion(void *arg, enum bfa_status status)
1098 {
1099 	struct bnad_iocmd_comp *iocmd_comp =
1100 			(struct bnad_iocmd_comp *)arg;
1101 
1102 	iocmd_comp->comp_status = (u32) status;
1103 	complete(&iocmd_comp->comp);
1104 }
1105 
1106 /* Resource allocation, free functions */
1107 
1108 static void
1109 bnad_mem_free(struct bnad *bnad,
1110 	      struct bna_mem_info *mem_info)
1111 {
1112 	int i;
1113 	dma_addr_t dma_pa;
1114 
1115 	if (mem_info->mdl == NULL)
1116 		return;
1117 
1118 	for (i = 0; i < mem_info->num; i++) {
1119 		if (mem_info->mdl[i].kva != NULL) {
1120 			if (mem_info->mem_type == BNA_MEM_T_DMA) {
1121 				BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1122 						dma_pa);
1123 				dma_free_coherent(&bnad->pcidev->dev,
1124 						  mem_info->mdl[i].len,
1125 						  mem_info->mdl[i].kva, dma_pa);
1126 			} else
1127 				kfree(mem_info->mdl[i].kva);
1128 		}
1129 	}
1130 	kfree(mem_info->mdl);
1131 	mem_info->mdl = NULL;
1132 }
1133 
1134 static int
1135 bnad_mem_alloc(struct bnad *bnad,
1136 	       struct bna_mem_info *mem_info)
1137 {
1138 	int i;
1139 	dma_addr_t dma_pa;
1140 
1141 	if ((mem_info->num == 0) || (mem_info->len == 0)) {
1142 		mem_info->mdl = NULL;
1143 		return 0;
1144 	}
1145 
1146 	mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1147 				GFP_KERNEL);
1148 	if (mem_info->mdl == NULL)
1149 		return -ENOMEM;
1150 
1151 	if (mem_info->mem_type == BNA_MEM_T_DMA) {
1152 		for (i = 0; i < mem_info->num; i++) {
1153 			mem_info->mdl[i].len = mem_info->len;
1154 			mem_info->mdl[i].kva =
1155 				dma_alloc_coherent(&bnad->pcidev->dev,
1156 						mem_info->len, &dma_pa,
1157 						GFP_KERNEL);
1158 
1159 			if (mem_info->mdl[i].kva == NULL)
1160 				goto err_return;
1161 
1162 			BNA_SET_DMA_ADDR(dma_pa,
1163 					 &(mem_info->mdl[i].dma));
1164 		}
1165 	} else {
1166 		for (i = 0; i < mem_info->num; i++) {
1167 			mem_info->mdl[i].len = mem_info->len;
1168 			mem_info->mdl[i].kva = kzalloc(mem_info->len,
1169 							GFP_KERNEL);
1170 			if (mem_info->mdl[i].kva == NULL)
1171 				goto err_return;
1172 		}
1173 	}
1174 
1175 	return 0;
1176 
1177 err_return:
1178 	bnad_mem_free(bnad, mem_info);
1179 	return -ENOMEM;
1180 }
1181 
1182 /* Free IRQ for Mailbox */
1183 static void
1184 bnad_mbox_irq_free(struct bnad *bnad)
1185 {
1186 	int irq;
1187 	unsigned long flags;
1188 
1189 	spin_lock_irqsave(&bnad->bna_lock, flags);
1190 	bnad_disable_mbox_irq(bnad);
1191 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1192 
1193 	irq = BNAD_GET_MBOX_IRQ(bnad);
1194 	free_irq(irq, bnad);
1195 }
1196 
1197 /*
1198  * Allocates IRQ for Mailbox, but keep it disabled
1199  * This will be enabled once we get the mbox enable callback
1200  * from bna
1201  */
1202 static int
1203 bnad_mbox_irq_alloc(struct bnad *bnad)
1204 {
1205 	int		err = 0;
1206 	unsigned long	irq_flags, flags;
1207 	u32	irq;
1208 	irq_handler_t	irq_handler;
1209 
1210 	spin_lock_irqsave(&bnad->bna_lock, flags);
1211 	if (bnad->cfg_flags & BNAD_CF_MSIX) {
1212 		irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1213 		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1214 		irq_flags = 0;
1215 	} else {
1216 		irq_handler = (irq_handler_t)bnad_isr;
1217 		irq = bnad->pcidev->irq;
1218 		irq_flags = IRQF_SHARED;
1219 	}
1220 
1221 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1222 	sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1223 
1224 	/*
1225 	 * Set the Mbox IRQ disable flag, so that the IRQ handler
1226 	 * called from request_irq() for SHARED IRQs do not execute
1227 	 */
1228 	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1229 
1230 	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1231 
1232 	err = request_irq(irq, irq_handler, irq_flags,
1233 			  bnad->mbox_irq_name, bnad);
1234 
1235 	return err;
1236 }
1237 
1238 static void
1239 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1240 {
1241 	kfree(intr_info->idl);
1242 	intr_info->idl = NULL;
1243 }
1244 
1245 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1246 static int
1247 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1248 		    u32 txrx_id, struct bna_intr_info *intr_info)
1249 {
1250 	int i, vector_start = 0;
1251 	u32 cfg_flags;
1252 	unsigned long flags;
1253 
1254 	spin_lock_irqsave(&bnad->bna_lock, flags);
1255 	cfg_flags = bnad->cfg_flags;
1256 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1257 
1258 	if (cfg_flags & BNAD_CF_MSIX) {
1259 		intr_info->intr_type = BNA_INTR_T_MSIX;
1260 		intr_info->idl = kcalloc(intr_info->num,
1261 					sizeof(struct bna_intr_descr),
1262 					GFP_KERNEL);
1263 		if (!intr_info->idl)
1264 			return -ENOMEM;
1265 
1266 		switch (src) {
1267 		case BNAD_INTR_TX:
1268 			vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1269 			break;
1270 
1271 		case BNAD_INTR_RX:
1272 			vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1273 					(bnad->num_tx * bnad->num_txq_per_tx) +
1274 					txrx_id;
1275 			break;
1276 
1277 		default:
1278 			BUG();
1279 		}
1280 
1281 		for (i = 0; i < intr_info->num; i++)
1282 			intr_info->idl[i].vector = vector_start + i;
1283 	} else {
1284 		intr_info->intr_type = BNA_INTR_T_INTX;
1285 		intr_info->num = 1;
1286 		intr_info->idl = kcalloc(intr_info->num,
1287 					sizeof(struct bna_intr_descr),
1288 					GFP_KERNEL);
1289 		if (!intr_info->idl)
1290 			return -ENOMEM;
1291 
1292 		switch (src) {
1293 		case BNAD_INTR_TX:
1294 			intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1295 			break;
1296 
1297 		case BNAD_INTR_RX:
1298 			intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1299 			break;
1300 		}
1301 	}
1302 	return 0;
1303 }
1304 
1305 /**
1306  * NOTE: Should be called for MSIX only
1307  * Unregisters Tx MSIX vector(s) from the kernel
1308  */
1309 static void
1310 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1311 			int num_txqs)
1312 {
1313 	int i;
1314 	int vector_num;
1315 
1316 	for (i = 0; i < num_txqs; i++) {
1317 		if (tx_info->tcb[i] == NULL)
1318 			continue;
1319 
1320 		vector_num = tx_info->tcb[i]->intr_vector;
1321 		free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1322 	}
1323 }
1324 
1325 /**
1326  * NOTE: Should be called for MSIX only
1327  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1328  */
1329 static int
1330 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1331 			u32 tx_id, int num_txqs)
1332 {
1333 	int i;
1334 	int err;
1335 	int vector_num;
1336 
1337 	for (i = 0; i < num_txqs; i++) {
1338 		vector_num = tx_info->tcb[i]->intr_vector;
1339 		sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1340 				tx_id + tx_info->tcb[i]->id);
1341 		err = request_irq(bnad->msix_table[vector_num].vector,
1342 				  (irq_handler_t)bnad_msix_tx, 0,
1343 				  tx_info->tcb[i]->name,
1344 				  tx_info->tcb[i]);
1345 		if (err)
1346 			goto err_return;
1347 	}
1348 
1349 	return 0;
1350 
1351 err_return:
1352 	if (i > 0)
1353 		bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1354 	return -1;
1355 }
1356 
1357 /**
1358  * NOTE: Should be called for MSIX only
1359  * Unregisters Rx MSIX vector(s) from the kernel
1360  */
1361 static void
1362 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1363 			int num_rxps)
1364 {
1365 	int i;
1366 	int vector_num;
1367 
1368 	for (i = 0; i < num_rxps; i++) {
1369 		if (rx_info->rx_ctrl[i].ccb == NULL)
1370 			continue;
1371 
1372 		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1373 		free_irq(bnad->msix_table[vector_num].vector,
1374 			 rx_info->rx_ctrl[i].ccb);
1375 	}
1376 }
1377 
1378 /**
1379  * NOTE: Should be called for MSIX only
1380  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1381  */
1382 static int
1383 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1384 			u32 rx_id, int num_rxps)
1385 {
1386 	int i;
1387 	int err;
1388 	int vector_num;
1389 
1390 	for (i = 0; i < num_rxps; i++) {
1391 		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1392 		sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1393 			bnad->netdev->name,
1394 			rx_id + rx_info->rx_ctrl[i].ccb->id);
1395 		err = request_irq(bnad->msix_table[vector_num].vector,
1396 				  (irq_handler_t)bnad_msix_rx, 0,
1397 				  rx_info->rx_ctrl[i].ccb->name,
1398 				  rx_info->rx_ctrl[i].ccb);
1399 		if (err)
1400 			goto err_return;
1401 	}
1402 
1403 	return 0;
1404 
1405 err_return:
1406 	if (i > 0)
1407 		bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1408 	return -1;
1409 }
1410 
1411 /* Free Tx object Resources */
1412 static void
1413 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1414 {
1415 	int i;
1416 
1417 	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1418 		if (res_info[i].res_type == BNA_RES_T_MEM)
1419 			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1420 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1421 			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1422 	}
1423 }
1424 
1425 /* Allocates memory and interrupt resources for Tx object */
1426 static int
1427 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1428 		  u32 tx_id)
1429 {
1430 	int i, err = 0;
1431 
1432 	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1433 		if (res_info[i].res_type == BNA_RES_T_MEM)
1434 			err = bnad_mem_alloc(bnad,
1435 					&res_info[i].res_u.mem_info);
1436 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1437 			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1438 					&res_info[i].res_u.intr_info);
1439 		if (err)
1440 			goto err_return;
1441 	}
1442 	return 0;
1443 
1444 err_return:
1445 	bnad_tx_res_free(bnad, res_info);
1446 	return err;
1447 }
1448 
1449 /* Free Rx object Resources */
1450 static void
1451 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1452 {
1453 	int i;
1454 
1455 	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1456 		if (res_info[i].res_type == BNA_RES_T_MEM)
1457 			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1458 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1459 			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1460 	}
1461 }
1462 
1463 /* Allocates memory and interrupt resources for Rx object */
1464 static int
1465 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1466 		  uint rx_id)
1467 {
1468 	int i, err = 0;
1469 
1470 	/* All memory needs to be allocated before setup_ccbs */
1471 	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1472 		if (res_info[i].res_type == BNA_RES_T_MEM)
1473 			err = bnad_mem_alloc(bnad,
1474 					&res_info[i].res_u.mem_info);
1475 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1476 			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1477 					&res_info[i].res_u.intr_info);
1478 		if (err)
1479 			goto err_return;
1480 	}
1481 	return 0;
1482 
1483 err_return:
1484 	bnad_rx_res_free(bnad, res_info);
1485 	return err;
1486 }
1487 
1488 /* Timer callbacks */
1489 /* a) IOC timer */
1490 static void
1491 bnad_ioc_timeout(unsigned long data)
1492 {
1493 	struct bnad *bnad = (struct bnad *)data;
1494 	unsigned long flags;
1495 
1496 	spin_lock_irqsave(&bnad->bna_lock, flags);
1497 	bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1498 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1499 }
1500 
1501 static void
1502 bnad_ioc_hb_check(unsigned long data)
1503 {
1504 	struct bnad *bnad = (struct bnad *)data;
1505 	unsigned long flags;
1506 
1507 	spin_lock_irqsave(&bnad->bna_lock, flags);
1508 	bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1509 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1510 }
1511 
1512 static void
1513 bnad_iocpf_timeout(unsigned long data)
1514 {
1515 	struct bnad *bnad = (struct bnad *)data;
1516 	unsigned long flags;
1517 
1518 	spin_lock_irqsave(&bnad->bna_lock, flags);
1519 	bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1520 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1521 }
1522 
1523 static void
1524 bnad_iocpf_sem_timeout(unsigned long data)
1525 {
1526 	struct bnad *bnad = (struct bnad *)data;
1527 	unsigned long flags;
1528 
1529 	spin_lock_irqsave(&bnad->bna_lock, flags);
1530 	bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1531 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1532 }
1533 
1534 /*
1535  * All timer routines use bnad->bna_lock to protect against
1536  * the following race, which may occur in case of no locking:
1537  *	Time	CPU m	CPU n
1538  *	0       1 = test_bit
1539  *	1			clear_bit
1540  *	2			del_timer_sync
1541  *	3	mod_timer
1542  */
1543 
1544 /* b) Dynamic Interrupt Moderation Timer */
1545 static void
1546 bnad_dim_timeout(unsigned long data)
1547 {
1548 	struct bnad *bnad = (struct bnad *)data;
1549 	struct bnad_rx_info *rx_info;
1550 	struct bnad_rx_ctrl *rx_ctrl;
1551 	int i, j;
1552 	unsigned long flags;
1553 
1554 	if (!netif_carrier_ok(bnad->netdev))
1555 		return;
1556 
1557 	spin_lock_irqsave(&bnad->bna_lock, flags);
1558 	for (i = 0; i < bnad->num_rx; i++) {
1559 		rx_info = &bnad->rx_info[i];
1560 		if (!rx_info->rx)
1561 			continue;
1562 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1563 			rx_ctrl = &rx_info->rx_ctrl[j];
1564 			if (!rx_ctrl->ccb)
1565 				continue;
1566 			bna_rx_dim_update(rx_ctrl->ccb);
1567 		}
1568 	}
1569 
1570 	/* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1571 	if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1572 		mod_timer(&bnad->dim_timer,
1573 			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1574 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1575 }
1576 
1577 /* c)  Statistics Timer */
1578 static void
1579 bnad_stats_timeout(unsigned long data)
1580 {
1581 	struct bnad *bnad = (struct bnad *)data;
1582 	unsigned long flags;
1583 
1584 	if (!netif_running(bnad->netdev) ||
1585 		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1586 		return;
1587 
1588 	spin_lock_irqsave(&bnad->bna_lock, flags);
1589 	bna_hw_stats_get(&bnad->bna);
1590 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1591 }
1592 
1593 /*
1594  * Set up timer for DIM
1595  * Called with bnad->bna_lock held
1596  */
1597 void
1598 bnad_dim_timer_start(struct bnad *bnad)
1599 {
1600 	if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1601 	    !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1602 		setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1603 			    (unsigned long)bnad);
1604 		set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1605 		mod_timer(&bnad->dim_timer,
1606 			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1607 	}
1608 }
1609 
1610 /*
1611  * Set up timer for statistics
1612  * Called with mutex_lock(&bnad->conf_mutex) held
1613  */
1614 static void
1615 bnad_stats_timer_start(struct bnad *bnad)
1616 {
1617 	unsigned long flags;
1618 
1619 	spin_lock_irqsave(&bnad->bna_lock, flags);
1620 	if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1621 		setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1622 			    (unsigned long)bnad);
1623 		mod_timer(&bnad->stats_timer,
1624 			  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1625 	}
1626 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1627 }
1628 
1629 /*
1630  * Stops the stats timer
1631  * Called with mutex_lock(&bnad->conf_mutex) held
1632  */
1633 static void
1634 bnad_stats_timer_stop(struct bnad *bnad)
1635 {
1636 	int to_del = 0;
1637 	unsigned long flags;
1638 
1639 	spin_lock_irqsave(&bnad->bna_lock, flags);
1640 	if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1641 		to_del = 1;
1642 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1643 	if (to_del)
1644 		del_timer_sync(&bnad->stats_timer);
1645 }
1646 
1647 /* Utilities */
1648 
1649 static void
1650 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1651 {
1652 	int i = 1; /* Index 0 has broadcast address */
1653 	struct netdev_hw_addr *mc_addr;
1654 
1655 	netdev_for_each_mc_addr(mc_addr, netdev) {
1656 		memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1657 							ETH_ALEN);
1658 		i++;
1659 	}
1660 }
1661 
1662 static int
1663 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1664 {
1665 	struct bnad_rx_ctrl *rx_ctrl =
1666 		container_of(napi, struct bnad_rx_ctrl, napi);
1667 	struct bnad *bnad = rx_ctrl->bnad;
1668 	int rcvd = 0;
1669 
1670 	rx_ctrl->rx_poll_ctr++;
1671 
1672 	if (!netif_carrier_ok(bnad->netdev))
1673 		goto poll_exit;
1674 
1675 	rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1676 	if (rcvd >= budget)
1677 		return rcvd;
1678 
1679 poll_exit:
1680 	napi_complete(napi);
1681 
1682 	rx_ctrl->rx_complete++;
1683 
1684 	if (rx_ctrl->ccb)
1685 		bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1686 
1687 	return rcvd;
1688 }
1689 
1690 #define BNAD_NAPI_POLL_QUOTA		64
1691 static void
1692 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1693 {
1694 	struct bnad_rx_ctrl *rx_ctrl;
1695 	int i;
1696 
1697 	/* Initialize & enable NAPI */
1698 	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
1699 		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1700 		netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1701 			       bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1702 	}
1703 }
1704 
1705 static void
1706 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1707 {
1708 	int i;
1709 
1710 	/* First disable and then clean up */
1711 	for (i = 0; i < bnad->num_rxp_per_rx; i++)
1712 		netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1713 }
1714 
1715 /* Should be held with conf_lock held */
1716 void
1717 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1718 {
1719 	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1720 	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1721 	unsigned long flags;
1722 
1723 	if (!tx_info->tx)
1724 		return;
1725 
1726 	init_completion(&bnad->bnad_completions.tx_comp);
1727 	spin_lock_irqsave(&bnad->bna_lock, flags);
1728 	bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1729 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1730 	wait_for_completion(&bnad->bnad_completions.tx_comp);
1731 
1732 	if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1733 		bnad_tx_msix_unregister(bnad, tx_info,
1734 			bnad->num_txq_per_tx);
1735 
1736 	spin_lock_irqsave(&bnad->bna_lock, flags);
1737 	bna_tx_destroy(tx_info->tx);
1738 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1739 
1740 	tx_info->tx = NULL;
1741 	tx_info->tx_id = 0;
1742 
1743 	bnad_tx_res_free(bnad, res_info);
1744 }
1745 
1746 /* Should be held with conf_lock held */
1747 int
1748 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1749 {
1750 	int err;
1751 	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1752 	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1753 	struct bna_intr_info *intr_info =
1754 			&res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1755 	struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1756 	static const struct bna_tx_event_cbfn tx_cbfn = {
1757 		.tcb_setup_cbfn = bnad_cb_tcb_setup,
1758 		.tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1759 		.tx_stall_cbfn = bnad_cb_tx_stall,
1760 		.tx_resume_cbfn = bnad_cb_tx_resume,
1761 		.tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1762 	};
1763 
1764 	struct bna_tx *tx;
1765 	unsigned long flags;
1766 
1767 	tx_info->tx_id = tx_id;
1768 
1769 	/* Initialize the Tx object configuration */
1770 	tx_config->num_txq = bnad->num_txq_per_tx;
1771 	tx_config->txq_depth = bnad->txq_depth;
1772 	tx_config->tx_type = BNA_TX_T_REGULAR;
1773 	tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1774 
1775 	/* Get BNA's resource requirement for one tx object */
1776 	spin_lock_irqsave(&bnad->bna_lock, flags);
1777 	bna_tx_res_req(bnad->num_txq_per_tx,
1778 		bnad->txq_depth, res_info);
1779 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1780 
1781 	/* Fill Unmap Q memory requirements */
1782 	BNAD_FILL_UNMAPQ_MEM_REQ(
1783 			&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1784 			bnad->num_txq_per_tx,
1785 			BNAD_TX_UNMAPQ_DEPTH);
1786 
1787 	/* Allocate resources */
1788 	err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1789 	if (err)
1790 		return err;
1791 
1792 	/* Ask BNA to create one Tx object, supplying required resources */
1793 	spin_lock_irqsave(&bnad->bna_lock, flags);
1794 	tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1795 			tx_info);
1796 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1797 	if (!tx)
1798 		goto err_return;
1799 	tx_info->tx = tx;
1800 
1801 	INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1802 			(work_func_t)bnad_tx_cleanup);
1803 
1804 	/* Register ISR for the Tx object */
1805 	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1806 		err = bnad_tx_msix_register(bnad, tx_info,
1807 			tx_id, bnad->num_txq_per_tx);
1808 		if (err)
1809 			goto err_return;
1810 	}
1811 
1812 	spin_lock_irqsave(&bnad->bna_lock, flags);
1813 	bna_tx_enable(tx);
1814 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1815 
1816 	return 0;
1817 
1818 err_return:
1819 	bnad_tx_res_free(bnad, res_info);
1820 	return err;
1821 }
1822 
1823 /* Setup the rx config for bna_rx_create */
1824 /* bnad decides the configuration */
1825 static void
1826 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1827 {
1828 	rx_config->rx_type = BNA_RX_T_REGULAR;
1829 	rx_config->num_paths = bnad->num_rxp_per_rx;
1830 	rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
1831 
1832 	if (bnad->num_rxp_per_rx > 1) {
1833 		rx_config->rss_status = BNA_STATUS_T_ENABLED;
1834 		rx_config->rss_config.hash_type =
1835 				(BFI_ENET_RSS_IPV6 |
1836 				 BFI_ENET_RSS_IPV6_TCP |
1837 				 BFI_ENET_RSS_IPV4 |
1838 				 BFI_ENET_RSS_IPV4_TCP);
1839 		rx_config->rss_config.hash_mask =
1840 				bnad->num_rxp_per_rx - 1;
1841 		get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1842 			sizeof(rx_config->rss_config.toeplitz_hash_key));
1843 	} else {
1844 		rx_config->rss_status = BNA_STATUS_T_DISABLED;
1845 		memset(&rx_config->rss_config, 0,
1846 		       sizeof(rx_config->rss_config));
1847 	}
1848 	rx_config->rxp_type = BNA_RXP_SLR;
1849 	rx_config->q_depth = bnad->rxq_depth;
1850 
1851 	rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1852 
1853 	rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1854 }
1855 
1856 static void
1857 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1858 {
1859 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1860 	int i;
1861 
1862 	for (i = 0; i < bnad->num_rxp_per_rx; i++)
1863 		rx_info->rx_ctrl[i].bnad = bnad;
1864 }
1865 
1866 /* Called with mutex_lock(&bnad->conf_mutex) held */
1867 void
1868 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
1869 {
1870 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1871 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1872 	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1873 	unsigned long flags;
1874 	int to_del = 0;
1875 
1876 	if (!rx_info->rx)
1877 		return;
1878 
1879 	if (0 == rx_id) {
1880 		spin_lock_irqsave(&bnad->bna_lock, flags);
1881 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1882 		    test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1883 			clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1884 			to_del = 1;
1885 		}
1886 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
1887 		if (to_del)
1888 			del_timer_sync(&bnad->dim_timer);
1889 	}
1890 
1891 	init_completion(&bnad->bnad_completions.rx_comp);
1892 	spin_lock_irqsave(&bnad->bna_lock, flags);
1893 	bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1894 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1895 	wait_for_completion(&bnad->bnad_completions.rx_comp);
1896 
1897 	if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1898 		bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1899 
1900 	bnad_napi_delete(bnad, rx_id);
1901 
1902 	spin_lock_irqsave(&bnad->bna_lock, flags);
1903 	bna_rx_destroy(rx_info->rx);
1904 
1905 	rx_info->rx = NULL;
1906 	rx_info->rx_id = 0;
1907 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1908 
1909 	bnad_rx_res_free(bnad, res_info);
1910 }
1911 
1912 /* Called with mutex_lock(&bnad->conf_mutex) held */
1913 int
1914 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1915 {
1916 	int err;
1917 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1918 	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1919 	struct bna_intr_info *intr_info =
1920 			&res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1921 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1922 	static const struct bna_rx_event_cbfn rx_cbfn = {
1923 		.rcb_setup_cbfn = bnad_cb_rcb_setup,
1924 		.rcb_destroy_cbfn = NULL,
1925 		.ccb_setup_cbfn = bnad_cb_ccb_setup,
1926 		.ccb_destroy_cbfn = bnad_cb_ccb_destroy,
1927 		.rx_stall_cbfn = bnad_cb_rx_stall,
1928 		.rx_cleanup_cbfn = bnad_cb_rx_cleanup,
1929 		.rx_post_cbfn = bnad_cb_rx_post,
1930 	};
1931 	struct bna_rx *rx;
1932 	unsigned long flags;
1933 
1934 	rx_info->rx_id = rx_id;
1935 
1936 	/* Initialize the Rx object configuration */
1937 	bnad_init_rx_config(bnad, rx_config);
1938 
1939 	/* Get BNA's resource requirement for one Rx object */
1940 	spin_lock_irqsave(&bnad->bna_lock, flags);
1941 	bna_rx_res_req(rx_config, res_info);
1942 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1943 
1944 	/* Fill Unmap Q memory requirements */
1945 	BNAD_FILL_UNMAPQ_MEM_REQ(
1946 			&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1947 			rx_config->num_paths +
1948 			((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1949 				rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1950 
1951 	/* Allocate resource */
1952 	err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1953 	if (err)
1954 		return err;
1955 
1956 	bnad_rx_ctrl_init(bnad, rx_id);
1957 
1958 	/* Ask BNA to create one Rx object, supplying required resources */
1959 	spin_lock_irqsave(&bnad->bna_lock, flags);
1960 	rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1961 			rx_info);
1962 	if (!rx) {
1963 		err = -ENOMEM;
1964 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
1965 		goto err_return;
1966 	}
1967 	rx_info->rx = rx;
1968 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1969 
1970 	INIT_WORK(&rx_info->rx_cleanup_work,
1971 			(work_func_t)(bnad_rx_cleanup));
1972 
1973 	/*
1974 	 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
1975 	 * so that IRQ handler cannot schedule NAPI at this point.
1976 	 */
1977 	bnad_napi_add(bnad, rx_id);
1978 
1979 	/* Register ISR for the Rx object */
1980 	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1981 		err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1982 						rx_config->num_paths);
1983 		if (err)
1984 			goto err_return;
1985 	}
1986 
1987 	spin_lock_irqsave(&bnad->bna_lock, flags);
1988 	if (0 == rx_id) {
1989 		/* Set up Dynamic Interrupt Moderation Vector */
1990 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1991 			bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1992 
1993 		/* Enable VLAN filtering only on the default Rx */
1994 		bna_rx_vlanfilter_enable(rx);
1995 
1996 		/* Start the DIM timer */
1997 		bnad_dim_timer_start(bnad);
1998 	}
1999 
2000 	bna_rx_enable(rx);
2001 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2002 
2003 	return 0;
2004 
2005 err_return:
2006 	bnad_destroy_rx(bnad, rx_id);
2007 	return err;
2008 }
2009 
2010 /* Called with conf_lock & bnad->bna_lock held */
2011 void
2012 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2013 {
2014 	struct bnad_tx_info *tx_info;
2015 
2016 	tx_info = &bnad->tx_info[0];
2017 	if (!tx_info->tx)
2018 		return;
2019 
2020 	bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2021 }
2022 
2023 /* Called with conf_lock & bnad->bna_lock held */
2024 void
2025 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2026 {
2027 	struct bnad_rx_info *rx_info;
2028 	int	i;
2029 
2030 	for (i = 0; i < bnad->num_rx; i++) {
2031 		rx_info = &bnad->rx_info[i];
2032 		if (!rx_info->rx)
2033 			continue;
2034 		bna_rx_coalescing_timeo_set(rx_info->rx,
2035 				bnad->rx_coalescing_timeo);
2036 	}
2037 }
2038 
2039 /*
2040  * Called with bnad->bna_lock held
2041  */
2042 int
2043 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2044 {
2045 	int ret;
2046 
2047 	if (!is_valid_ether_addr(mac_addr))
2048 		return -EADDRNOTAVAIL;
2049 
2050 	/* If datapath is down, pretend everything went through */
2051 	if (!bnad->rx_info[0].rx)
2052 		return 0;
2053 
2054 	ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2055 	if (ret != BNA_CB_SUCCESS)
2056 		return -EADDRNOTAVAIL;
2057 
2058 	return 0;
2059 }
2060 
2061 /* Should be called with conf_lock held */
2062 int
2063 bnad_enable_default_bcast(struct bnad *bnad)
2064 {
2065 	struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2066 	int ret;
2067 	unsigned long flags;
2068 
2069 	init_completion(&bnad->bnad_completions.mcast_comp);
2070 
2071 	spin_lock_irqsave(&bnad->bna_lock, flags);
2072 	ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2073 				bnad_cb_rx_mcast_add);
2074 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2075 
2076 	if (ret == BNA_CB_SUCCESS)
2077 		wait_for_completion(&bnad->bnad_completions.mcast_comp);
2078 	else
2079 		return -ENODEV;
2080 
2081 	if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2082 		return -ENODEV;
2083 
2084 	return 0;
2085 }
2086 
2087 /* Called with mutex_lock(&bnad->conf_mutex) held */
2088 void
2089 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2090 {
2091 	u16 vid;
2092 	unsigned long flags;
2093 
2094 	for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2095 		spin_lock_irqsave(&bnad->bna_lock, flags);
2096 		bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2097 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2098 	}
2099 }
2100 
2101 /* Statistics utilities */
2102 void
2103 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2104 {
2105 	int i, j;
2106 
2107 	for (i = 0; i < bnad->num_rx; i++) {
2108 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2109 			if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2110 				stats->rx_packets += bnad->rx_info[i].
2111 				rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2112 				stats->rx_bytes += bnad->rx_info[i].
2113 					rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2114 				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2115 					bnad->rx_info[i].rx_ctrl[j].ccb->
2116 					rcb[1]->rxq) {
2117 					stats->rx_packets +=
2118 						bnad->rx_info[i].rx_ctrl[j].
2119 						ccb->rcb[1]->rxq->rx_packets;
2120 					stats->rx_bytes +=
2121 						bnad->rx_info[i].rx_ctrl[j].
2122 						ccb->rcb[1]->rxq->rx_bytes;
2123 				}
2124 			}
2125 		}
2126 	}
2127 	for (i = 0; i < bnad->num_tx; i++) {
2128 		for (j = 0; j < bnad->num_txq_per_tx; j++) {
2129 			if (bnad->tx_info[i].tcb[j]) {
2130 				stats->tx_packets +=
2131 				bnad->tx_info[i].tcb[j]->txq->tx_packets;
2132 				stats->tx_bytes +=
2133 					bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2134 			}
2135 		}
2136 	}
2137 }
2138 
2139 /*
2140  * Must be called with the bna_lock held.
2141  */
2142 void
2143 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2144 {
2145 	struct bfi_enet_stats_mac *mac_stats;
2146 	u32 bmap;
2147 	int i;
2148 
2149 	mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2150 	stats->rx_errors =
2151 		mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2152 		mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2153 		mac_stats->rx_undersize;
2154 	stats->tx_errors = mac_stats->tx_fcs_error +
2155 					mac_stats->tx_undersize;
2156 	stats->rx_dropped = mac_stats->rx_drop;
2157 	stats->tx_dropped = mac_stats->tx_drop;
2158 	stats->multicast = mac_stats->rx_multicast;
2159 	stats->collisions = mac_stats->tx_total_collision;
2160 
2161 	stats->rx_length_errors = mac_stats->rx_frame_length_error;
2162 
2163 	/* receive ring buffer overflow  ?? */
2164 
2165 	stats->rx_crc_errors = mac_stats->rx_fcs_error;
2166 	stats->rx_frame_errors = mac_stats->rx_alignment_error;
2167 	/* recv'r fifo overrun */
2168 	bmap = bna_rx_rid_mask(&bnad->bna);
2169 	for (i = 0; bmap; i++) {
2170 		if (bmap & 1) {
2171 			stats->rx_fifo_errors +=
2172 				bnad->stats.bna_stats->
2173 					hw_stats.rxf_stats[i].frame_drops;
2174 			break;
2175 		}
2176 		bmap >>= 1;
2177 	}
2178 }
2179 
2180 static void
2181 bnad_mbox_irq_sync(struct bnad *bnad)
2182 {
2183 	u32 irq;
2184 	unsigned long flags;
2185 
2186 	spin_lock_irqsave(&bnad->bna_lock, flags);
2187 	if (bnad->cfg_flags & BNAD_CF_MSIX)
2188 		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2189 	else
2190 		irq = bnad->pcidev->irq;
2191 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2192 
2193 	synchronize_irq(irq);
2194 }
2195 
2196 /* Utility used by bnad_start_xmit, for doing TSO */
2197 static int
2198 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2199 {
2200 	int err;
2201 
2202 	if (skb_header_cloned(skb)) {
2203 		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2204 		if (err) {
2205 			BNAD_UPDATE_CTR(bnad, tso_err);
2206 			return err;
2207 		}
2208 	}
2209 
2210 	/*
2211 	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2212 	 * excluding the length field.
2213 	 */
2214 	if (skb->protocol == htons(ETH_P_IP)) {
2215 		struct iphdr *iph = ip_hdr(skb);
2216 
2217 		/* Do we really need these? */
2218 		iph->tot_len = 0;
2219 		iph->check = 0;
2220 
2221 		tcp_hdr(skb)->check =
2222 			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2223 					   IPPROTO_TCP, 0);
2224 		BNAD_UPDATE_CTR(bnad, tso4);
2225 	} else {
2226 		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2227 
2228 		ipv6h->payload_len = 0;
2229 		tcp_hdr(skb)->check =
2230 			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2231 					 IPPROTO_TCP, 0);
2232 		BNAD_UPDATE_CTR(bnad, tso6);
2233 	}
2234 
2235 	return 0;
2236 }
2237 
2238 /*
2239  * Initialize Q numbers depending on Rx Paths
2240  * Called with bnad->bna_lock held, because of cfg_flags
2241  * access.
2242  */
2243 static void
2244 bnad_q_num_init(struct bnad *bnad)
2245 {
2246 	int rxps;
2247 
2248 	rxps = min((uint)num_online_cpus(),
2249 			(uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2250 
2251 	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2252 		rxps = 1;	/* INTx */
2253 
2254 	bnad->num_rx = 1;
2255 	bnad->num_tx = 1;
2256 	bnad->num_rxp_per_rx = rxps;
2257 	bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2258 }
2259 
2260 /*
2261  * Adjusts the Q numbers, given a number of msix vectors
2262  * Give preference to RSS as opposed to Tx priority Queues,
2263  * in such a case, just use 1 Tx Q
2264  * Called with bnad->bna_lock held b'cos of cfg_flags access
2265  */
2266 static void
2267 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2268 {
2269 	bnad->num_txq_per_tx = 1;
2270 	if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2271 	     bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2272 	    (bnad->cfg_flags & BNAD_CF_MSIX)) {
2273 		bnad->num_rxp_per_rx = msix_vectors -
2274 			(bnad->num_tx * bnad->num_txq_per_tx) -
2275 			BNAD_MAILBOX_MSIX_VECTORS;
2276 	} else
2277 		bnad->num_rxp_per_rx = 1;
2278 }
2279 
2280 /* Enable / disable ioceth */
2281 static int
2282 bnad_ioceth_disable(struct bnad *bnad)
2283 {
2284 	unsigned long flags;
2285 	int err = 0;
2286 
2287 	spin_lock_irqsave(&bnad->bna_lock, flags);
2288 	init_completion(&bnad->bnad_completions.ioc_comp);
2289 	bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2290 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2291 
2292 	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2293 		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2294 
2295 	err = bnad->bnad_completions.ioc_comp_status;
2296 	return err;
2297 }
2298 
2299 static int
2300 bnad_ioceth_enable(struct bnad *bnad)
2301 {
2302 	int err = 0;
2303 	unsigned long flags;
2304 
2305 	spin_lock_irqsave(&bnad->bna_lock, flags);
2306 	init_completion(&bnad->bnad_completions.ioc_comp);
2307 	bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2308 	bna_ioceth_enable(&bnad->bna.ioceth);
2309 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2310 
2311 	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2312 		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2313 
2314 	err = bnad->bnad_completions.ioc_comp_status;
2315 
2316 	return err;
2317 }
2318 
2319 /* Free BNA resources */
2320 static void
2321 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2322 		u32 res_val_max)
2323 {
2324 	int i;
2325 
2326 	for (i = 0; i < res_val_max; i++)
2327 		bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2328 }
2329 
2330 /* Allocates memory and interrupt resources for BNA */
2331 static int
2332 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2333 		u32 res_val_max)
2334 {
2335 	int i, err;
2336 
2337 	for (i = 0; i < res_val_max; i++) {
2338 		err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2339 		if (err)
2340 			goto err_return;
2341 	}
2342 	return 0;
2343 
2344 err_return:
2345 	bnad_res_free(bnad, res_info, res_val_max);
2346 	return err;
2347 }
2348 
2349 /* Interrupt enable / disable */
2350 static void
2351 bnad_enable_msix(struct bnad *bnad)
2352 {
2353 	int i, ret;
2354 	unsigned long flags;
2355 
2356 	spin_lock_irqsave(&bnad->bna_lock, flags);
2357 	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2358 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2359 		return;
2360 	}
2361 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2362 
2363 	if (bnad->msix_table)
2364 		return;
2365 
2366 	bnad->msix_table =
2367 		kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2368 
2369 	if (!bnad->msix_table)
2370 		goto intx_mode;
2371 
2372 	for (i = 0; i < bnad->msix_num; i++)
2373 		bnad->msix_table[i].entry = i;
2374 
2375 	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2376 	if (ret > 0) {
2377 		/* Not enough MSI-X vectors. */
2378 		pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2379 			ret, bnad->msix_num);
2380 
2381 		spin_lock_irqsave(&bnad->bna_lock, flags);
2382 		/* ret = #of vectors that we got */
2383 		bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2384 			(ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2385 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2386 
2387 		bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2388 			 BNAD_MAILBOX_MSIX_VECTORS;
2389 
2390 		if (bnad->msix_num > ret)
2391 			goto intx_mode;
2392 
2393 		/* Try once more with adjusted numbers */
2394 		/* If this fails, fall back to INTx */
2395 		ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2396 				      bnad->msix_num);
2397 		if (ret)
2398 			goto intx_mode;
2399 
2400 	} else if (ret < 0)
2401 		goto intx_mode;
2402 
2403 	pci_intx(bnad->pcidev, 0);
2404 
2405 	return;
2406 
2407 intx_mode:
2408 	pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2409 
2410 	kfree(bnad->msix_table);
2411 	bnad->msix_table = NULL;
2412 	bnad->msix_num = 0;
2413 	spin_lock_irqsave(&bnad->bna_lock, flags);
2414 	bnad->cfg_flags &= ~BNAD_CF_MSIX;
2415 	bnad_q_num_init(bnad);
2416 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2417 }
2418 
2419 static void
2420 bnad_disable_msix(struct bnad *bnad)
2421 {
2422 	u32 cfg_flags;
2423 	unsigned long flags;
2424 
2425 	spin_lock_irqsave(&bnad->bna_lock, flags);
2426 	cfg_flags = bnad->cfg_flags;
2427 	if (bnad->cfg_flags & BNAD_CF_MSIX)
2428 		bnad->cfg_flags &= ~BNAD_CF_MSIX;
2429 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2430 
2431 	if (cfg_flags & BNAD_CF_MSIX) {
2432 		pci_disable_msix(bnad->pcidev);
2433 		kfree(bnad->msix_table);
2434 		bnad->msix_table = NULL;
2435 	}
2436 }
2437 
2438 /* Netdev entry points */
2439 static int
2440 bnad_open(struct net_device *netdev)
2441 {
2442 	int err;
2443 	struct bnad *bnad = netdev_priv(netdev);
2444 	struct bna_pause_config pause_config;
2445 	int mtu;
2446 	unsigned long flags;
2447 
2448 	mutex_lock(&bnad->conf_mutex);
2449 
2450 	/* Tx */
2451 	err = bnad_setup_tx(bnad, 0);
2452 	if (err)
2453 		goto err_return;
2454 
2455 	/* Rx */
2456 	err = bnad_setup_rx(bnad, 0);
2457 	if (err)
2458 		goto cleanup_tx;
2459 
2460 	/* Port */
2461 	pause_config.tx_pause = 0;
2462 	pause_config.rx_pause = 0;
2463 
2464 	mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2465 
2466 	spin_lock_irqsave(&bnad->bna_lock, flags);
2467 	bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2468 	bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2469 	bna_enet_enable(&bnad->bna.enet);
2470 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2471 
2472 	/* Enable broadcast */
2473 	bnad_enable_default_bcast(bnad);
2474 
2475 	/* Restore VLANs, if any */
2476 	bnad_restore_vlans(bnad, 0);
2477 
2478 	/* Set the UCAST address */
2479 	spin_lock_irqsave(&bnad->bna_lock, flags);
2480 	bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2481 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2482 
2483 	/* Start the stats timer */
2484 	bnad_stats_timer_start(bnad);
2485 
2486 	mutex_unlock(&bnad->conf_mutex);
2487 
2488 	return 0;
2489 
2490 cleanup_tx:
2491 	bnad_destroy_tx(bnad, 0);
2492 
2493 err_return:
2494 	mutex_unlock(&bnad->conf_mutex);
2495 	return err;
2496 }
2497 
2498 static int
2499 bnad_stop(struct net_device *netdev)
2500 {
2501 	struct bnad *bnad = netdev_priv(netdev);
2502 	unsigned long flags;
2503 
2504 	mutex_lock(&bnad->conf_mutex);
2505 
2506 	/* Stop the stats timer */
2507 	bnad_stats_timer_stop(bnad);
2508 
2509 	init_completion(&bnad->bnad_completions.enet_comp);
2510 
2511 	spin_lock_irqsave(&bnad->bna_lock, flags);
2512 	bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2513 			bnad_cb_enet_disabled);
2514 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2515 
2516 	wait_for_completion(&bnad->bnad_completions.enet_comp);
2517 
2518 	bnad_destroy_tx(bnad, 0);
2519 	bnad_destroy_rx(bnad, 0);
2520 
2521 	/* Synchronize mailbox IRQ */
2522 	bnad_mbox_irq_sync(bnad);
2523 
2524 	mutex_unlock(&bnad->conf_mutex);
2525 
2526 	return 0;
2527 }
2528 
2529 /* TX */
2530 /*
2531  * bnad_start_xmit : Netdev entry point for Transmit
2532  *		     Called under lock held by net_device
2533  */
2534 static netdev_tx_t
2535 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2536 {
2537 	struct bnad *bnad = netdev_priv(netdev);
2538 	u32 txq_id = 0;
2539 	struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
2540 
2541 	u16		txq_prod, vlan_tag = 0;
2542 	u32		unmap_prod, wis, wis_used, wi_range;
2543 	u32		vectors, vect_id, i, acked;
2544 	int			err;
2545 	unsigned int		len;
2546 	u32				gso_size;
2547 
2548 	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
2549 	dma_addr_t		dma_addr;
2550 	struct bna_txq_entry *txqent;
2551 	u16	flags;
2552 
2553 	if (unlikely(skb->len <= ETH_HLEN)) {
2554 		dev_kfree_skb(skb);
2555 		BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2556 		return NETDEV_TX_OK;
2557 	}
2558 	if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
2559 		dev_kfree_skb(skb);
2560 		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2561 		return NETDEV_TX_OK;
2562 	}
2563 	if (unlikely(skb_headlen(skb) == 0)) {
2564 		dev_kfree_skb(skb);
2565 		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2566 		return NETDEV_TX_OK;
2567 	}
2568 
2569 	/*
2570 	 * Takes care of the Tx that is scheduled between clearing the flag
2571 	 * and the netif_tx_stop_all_queues() call.
2572 	 */
2573 	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2574 		dev_kfree_skb(skb);
2575 		BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2576 		return NETDEV_TX_OK;
2577 	}
2578 
2579 	vectors = 1 + skb_shinfo(skb)->nr_frags;
2580 	if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2581 		dev_kfree_skb(skb);
2582 		BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2583 		return NETDEV_TX_OK;
2584 	}
2585 	wis = BNA_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
2586 	acked = 0;
2587 	if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2588 			vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2589 		if ((u16) (*tcb->hw_consumer_index) !=
2590 		    tcb->consumer_index &&
2591 		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2592 			acked = bnad_txcmpl_process(bnad, tcb);
2593 			if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2594 				bna_ib_ack(tcb->i_dbell, acked);
2595 			smp_mb__before_clear_bit();
2596 			clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2597 		} else {
2598 			netif_stop_queue(netdev);
2599 			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2600 		}
2601 
2602 		smp_mb();
2603 		/*
2604 		 * Check again to deal with race condition between
2605 		 * netif_stop_queue here, and netif_wake_queue in
2606 		 * interrupt handler which is not inside netif tx lock.
2607 		 */
2608 		if (likely
2609 		    (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2610 		     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2611 			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2612 			return NETDEV_TX_BUSY;
2613 		} else {
2614 			netif_wake_queue(netdev);
2615 			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2616 		}
2617 	}
2618 
2619 	unmap_prod = unmap_q->producer_index;
2620 	flags = 0;
2621 
2622 	txq_prod = tcb->producer_index;
2623 	BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2624 	txqent->hdr.wi.reserved = 0;
2625 	txqent->hdr.wi.num_vectors = vectors;
2626 
2627 	if (vlan_tx_tag_present(skb)) {
2628 		vlan_tag = (u16) vlan_tx_tag_get(skb);
2629 		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2630 	}
2631 	if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2632 		vlan_tag =
2633 			(tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2634 		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2635 	}
2636 
2637 	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2638 
2639 	if (skb_is_gso(skb)) {
2640 		gso_size = skb_shinfo(skb)->gso_size;
2641 
2642 		if (unlikely(gso_size > netdev->mtu)) {
2643 			dev_kfree_skb(skb);
2644 			BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2645 			return NETDEV_TX_OK;
2646 		}
2647 		if (unlikely((gso_size + skb_transport_offset(skb) +
2648 			tcp_hdrlen(skb)) >= skb->len)) {
2649 			txqent->hdr.wi.opcode =
2650 				__constant_htons(BNA_TXQ_WI_SEND);
2651 			txqent->hdr.wi.lso_mss = 0;
2652 			BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2653 		} else {
2654 			txqent->hdr.wi.opcode =
2655 				__constant_htons(BNA_TXQ_WI_SEND_LSO);
2656 			txqent->hdr.wi.lso_mss = htons(gso_size);
2657 		}
2658 
2659 		err = bnad_tso_prepare(bnad, skb);
2660 		if (unlikely(err)) {
2661 			dev_kfree_skb(skb);
2662 			BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2663 			return NETDEV_TX_OK;
2664 		}
2665 		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2666 		txqent->hdr.wi.l4_hdr_size_n_offset =
2667 			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2668 			      (tcp_hdrlen(skb) >> 2,
2669 			       skb_transport_offset(skb)));
2670 	} else {
2671 		txqent->hdr.wi.opcode =	__constant_htons(BNA_TXQ_WI_SEND);
2672 		txqent->hdr.wi.lso_mss = 0;
2673 
2674 		if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
2675 			dev_kfree_skb(skb);
2676 			BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2677 			return NETDEV_TX_OK;
2678 		}
2679 
2680 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
2681 			u8 proto = 0;
2682 
2683 			if (skb->protocol == __constant_htons(ETH_P_IP))
2684 				proto = ip_hdr(skb)->protocol;
2685 			else if (skb->protocol ==
2686 				 __constant_htons(ETH_P_IPV6)) {
2687 				/* nexthdr may not be TCP immediately. */
2688 				proto = ipv6_hdr(skb)->nexthdr;
2689 			}
2690 			if (proto == IPPROTO_TCP) {
2691 				flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2692 				txqent->hdr.wi.l4_hdr_size_n_offset =
2693 					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2694 					      (0, skb_transport_offset(skb)));
2695 
2696 				BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2697 
2698 				if (unlikely(skb_headlen(skb) <
2699 				skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2700 					dev_kfree_skb(skb);
2701 					BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2702 					return NETDEV_TX_OK;
2703 				}
2704 
2705 			} else if (proto == IPPROTO_UDP) {
2706 				flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2707 				txqent->hdr.wi.l4_hdr_size_n_offset =
2708 					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2709 					      (0, skb_transport_offset(skb)));
2710 
2711 				BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2712 				if (unlikely(skb_headlen(skb) <
2713 				    skb_transport_offset(skb) +
2714 				    sizeof(struct udphdr))) {
2715 					dev_kfree_skb(skb);
2716 					BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2717 					return NETDEV_TX_OK;
2718 				}
2719 			} else {
2720 				dev_kfree_skb(skb);
2721 				BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2722 				return NETDEV_TX_OK;
2723 			}
2724 		} else {
2725 			txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2726 		}
2727 	}
2728 
2729 	txqent->hdr.wi.flags = htons(flags);
2730 
2731 	txqent->hdr.wi.frame_length = htonl(skb->len);
2732 
2733 	unmap_q->unmap_array[unmap_prod].skb = skb;
2734 	len = skb_headlen(skb);
2735 	txqent->vector[0].length = htons(len);
2736 	dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2737 				  skb_headlen(skb), DMA_TO_DEVICE);
2738 	dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2739 			   dma_addr);
2740 
2741 	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
2742 	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2743 
2744 	vect_id = 0;
2745 	wis_used = 1;
2746 
2747 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2748 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2749 		u16		size = skb_frag_size(frag);
2750 
2751 		if (unlikely(size == 0)) {
2752 			unmap_prod = unmap_q->producer_index;
2753 
2754 			unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2755 					   unmap_q->unmap_array,
2756 					   unmap_prod, unmap_q->q_depth, skb,
2757 					   i);
2758 			dev_kfree_skb(skb);
2759 			BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2760 			return NETDEV_TX_OK;
2761 		}
2762 
2763 		len += size;
2764 
2765 		if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2766 			vect_id = 0;
2767 			if (--wi_range)
2768 				txqent++;
2769 			else {
2770 				BNA_QE_INDX_ADD(txq_prod, wis_used,
2771 						tcb->q_depth);
2772 				wis_used = 0;
2773 				BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2774 						     txqent, wi_range);
2775 			}
2776 			wis_used++;
2777 			txqent->hdr.wi_ext.opcode =
2778 				__constant_htons(BNA_TXQ_WI_EXTENSION);
2779 		}
2780 
2781 		BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2782 		txqent->vector[vect_id].length = htons(size);
2783 		dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
2784 					    0, size, DMA_TO_DEVICE);
2785 		dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2786 				   dma_addr);
2787 		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2788 		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2789 	}
2790 
2791 	if (unlikely(len != skb->len)) {
2792 		unmap_prod = unmap_q->producer_index;
2793 
2794 		unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2795 				unmap_q->unmap_array, unmap_prod,
2796 				unmap_q->q_depth, skb,
2797 				skb_shinfo(skb)->nr_frags);
2798 		dev_kfree_skb(skb);
2799 		BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2800 		return NETDEV_TX_OK;
2801 	}
2802 
2803 	unmap_q->producer_index = unmap_prod;
2804 	BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2805 	tcb->producer_index = txq_prod;
2806 
2807 	smp_mb();
2808 
2809 	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2810 		return NETDEV_TX_OK;
2811 
2812 	bna_txq_prod_indx_doorbell(tcb);
2813 	smp_mb();
2814 
2815 	return NETDEV_TX_OK;
2816 }
2817 
2818 /*
2819  * Used spin_lock to synchronize reading of stats structures, which
2820  * is written by BNA under the same lock.
2821  */
2822 static struct rtnl_link_stats64 *
2823 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2824 {
2825 	struct bnad *bnad = netdev_priv(netdev);
2826 	unsigned long flags;
2827 
2828 	spin_lock_irqsave(&bnad->bna_lock, flags);
2829 
2830 	bnad_netdev_qstats_fill(bnad, stats);
2831 	bnad_netdev_hwstats_fill(bnad, stats);
2832 
2833 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2834 
2835 	return stats;
2836 }
2837 
2838 void
2839 bnad_set_rx_mode(struct net_device *netdev)
2840 {
2841 	struct bnad *bnad = netdev_priv(netdev);
2842 	u32	new_mask, valid_mask;
2843 	unsigned long flags;
2844 
2845 	spin_lock_irqsave(&bnad->bna_lock, flags);
2846 
2847 	new_mask = valid_mask = 0;
2848 
2849 	if (netdev->flags & IFF_PROMISC) {
2850 		if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2851 			new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2852 			valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2853 			bnad->cfg_flags |= BNAD_CF_PROMISC;
2854 		}
2855 	} else {
2856 		if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2857 			new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2858 			valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2859 			bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2860 		}
2861 	}
2862 
2863 	if (netdev->flags & IFF_ALLMULTI) {
2864 		if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2865 			new_mask |= BNA_RXMODE_ALLMULTI;
2866 			valid_mask |= BNA_RXMODE_ALLMULTI;
2867 			bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2868 		}
2869 	} else {
2870 		if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2871 			new_mask &= ~BNA_RXMODE_ALLMULTI;
2872 			valid_mask |= BNA_RXMODE_ALLMULTI;
2873 			bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2874 		}
2875 	}
2876 
2877 	if (bnad->rx_info[0].rx == NULL)
2878 		goto unlock;
2879 
2880 	bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2881 
2882 	if (!netdev_mc_empty(netdev)) {
2883 		u8 *mcaddr_list;
2884 		int mc_count = netdev_mc_count(netdev);
2885 
2886 		/* Index 0 holds the broadcast address */
2887 		mcaddr_list =
2888 			kzalloc((mc_count + 1) * ETH_ALEN,
2889 				GFP_ATOMIC);
2890 		if (!mcaddr_list)
2891 			goto unlock;
2892 
2893 		memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2894 
2895 		/* Copy rest of the MC addresses */
2896 		bnad_netdev_mc_list_get(netdev, mcaddr_list);
2897 
2898 		bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2899 					mcaddr_list, NULL);
2900 
2901 		/* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2902 		kfree(mcaddr_list);
2903 	}
2904 unlock:
2905 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2906 }
2907 
2908 /*
2909  * bna_lock is used to sync writes to netdev->addr
2910  * conf_lock cannot be used since this call may be made
2911  * in a non-blocking context.
2912  */
2913 static int
2914 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2915 {
2916 	int err;
2917 	struct bnad *bnad = netdev_priv(netdev);
2918 	struct sockaddr *sa = (struct sockaddr *)mac_addr;
2919 	unsigned long flags;
2920 
2921 	spin_lock_irqsave(&bnad->bna_lock, flags);
2922 
2923 	err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2924 
2925 	if (!err)
2926 		memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2927 
2928 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2929 
2930 	return err;
2931 }
2932 
2933 static int
2934 bnad_mtu_set(struct bnad *bnad, int mtu)
2935 {
2936 	unsigned long flags;
2937 
2938 	init_completion(&bnad->bnad_completions.mtu_comp);
2939 
2940 	spin_lock_irqsave(&bnad->bna_lock, flags);
2941 	bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2942 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2943 
2944 	wait_for_completion(&bnad->bnad_completions.mtu_comp);
2945 
2946 	return bnad->bnad_completions.mtu_comp_status;
2947 }
2948 
2949 static int
2950 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2951 {
2952 	int err, mtu = netdev->mtu;
2953 	struct bnad *bnad = netdev_priv(netdev);
2954 
2955 	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2956 		return -EINVAL;
2957 
2958 	mutex_lock(&bnad->conf_mutex);
2959 
2960 	netdev->mtu = new_mtu;
2961 
2962 	mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2963 	err = bnad_mtu_set(bnad, mtu);
2964 	if (err)
2965 		err = -EBUSY;
2966 
2967 	mutex_unlock(&bnad->conf_mutex);
2968 	return err;
2969 }
2970 
2971 static int
2972 bnad_vlan_rx_add_vid(struct net_device *netdev,
2973 				 unsigned short vid)
2974 {
2975 	struct bnad *bnad = netdev_priv(netdev);
2976 	unsigned long flags;
2977 
2978 	if (!bnad->rx_info[0].rx)
2979 		return 0;
2980 
2981 	mutex_lock(&bnad->conf_mutex);
2982 
2983 	spin_lock_irqsave(&bnad->bna_lock, flags);
2984 	bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2985 	set_bit(vid, bnad->active_vlans);
2986 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2987 
2988 	mutex_unlock(&bnad->conf_mutex);
2989 
2990 	return 0;
2991 }
2992 
2993 static int
2994 bnad_vlan_rx_kill_vid(struct net_device *netdev,
2995 				  unsigned short vid)
2996 {
2997 	struct bnad *bnad = netdev_priv(netdev);
2998 	unsigned long flags;
2999 
3000 	if (!bnad->rx_info[0].rx)
3001 		return 0;
3002 
3003 	mutex_lock(&bnad->conf_mutex);
3004 
3005 	spin_lock_irqsave(&bnad->bna_lock, flags);
3006 	clear_bit(vid, bnad->active_vlans);
3007 	bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3008 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3009 
3010 	mutex_unlock(&bnad->conf_mutex);
3011 
3012 	return 0;
3013 }
3014 
3015 #ifdef CONFIG_NET_POLL_CONTROLLER
3016 static void
3017 bnad_netpoll(struct net_device *netdev)
3018 {
3019 	struct bnad *bnad = netdev_priv(netdev);
3020 	struct bnad_rx_info *rx_info;
3021 	struct bnad_rx_ctrl *rx_ctrl;
3022 	u32 curr_mask;
3023 	int i, j;
3024 
3025 	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3026 		bna_intx_disable(&bnad->bna, curr_mask);
3027 		bnad_isr(bnad->pcidev->irq, netdev);
3028 		bna_intx_enable(&bnad->bna, curr_mask);
3029 	} else {
3030 		/*
3031 		 * Tx processing may happen in sending context, so no need
3032 		 * to explicitly process completions here
3033 		 */
3034 
3035 		/* Rx processing */
3036 		for (i = 0; i < bnad->num_rx; i++) {
3037 			rx_info = &bnad->rx_info[i];
3038 			if (!rx_info->rx)
3039 				continue;
3040 			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3041 				rx_ctrl = &rx_info->rx_ctrl[j];
3042 				if (rx_ctrl->ccb)
3043 					bnad_netif_rx_schedule_poll(bnad,
3044 							    rx_ctrl->ccb);
3045 			}
3046 		}
3047 	}
3048 }
3049 #endif
3050 
3051 static const struct net_device_ops bnad_netdev_ops = {
3052 	.ndo_open		= bnad_open,
3053 	.ndo_stop		= bnad_stop,
3054 	.ndo_start_xmit		= bnad_start_xmit,
3055 	.ndo_get_stats64		= bnad_get_stats64,
3056 	.ndo_set_rx_mode	= bnad_set_rx_mode,
3057 	.ndo_validate_addr      = eth_validate_addr,
3058 	.ndo_set_mac_address    = bnad_set_mac_address,
3059 	.ndo_change_mtu		= bnad_change_mtu,
3060 	.ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3061 	.ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3062 #ifdef CONFIG_NET_POLL_CONTROLLER
3063 	.ndo_poll_controller    = bnad_netpoll
3064 #endif
3065 };
3066 
3067 static void
3068 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3069 {
3070 	struct net_device *netdev = bnad->netdev;
3071 
3072 	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3073 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3074 		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
3075 
3076 	netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3077 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3078 		NETIF_F_TSO | NETIF_F_TSO6;
3079 
3080 	netdev->features |= netdev->hw_features |
3081 		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3082 
3083 	if (using_dac)
3084 		netdev->features |= NETIF_F_HIGHDMA;
3085 
3086 	netdev->mem_start = bnad->mmio_start;
3087 	netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3088 
3089 	netdev->netdev_ops = &bnad_netdev_ops;
3090 	bnad_set_ethtool_ops(netdev);
3091 }
3092 
3093 /*
3094  * 1. Initialize the bnad structure
3095  * 2. Setup netdev pointer in pci_dev
3096  * 3. Initialize no. of TxQ & CQs & MSIX vectors
3097  * 4. Initialize work queue.
3098  */
3099 static int
3100 bnad_init(struct bnad *bnad,
3101 	  struct pci_dev *pdev, struct net_device *netdev)
3102 {
3103 	unsigned long flags;
3104 
3105 	SET_NETDEV_DEV(netdev, &pdev->dev);
3106 	pci_set_drvdata(pdev, netdev);
3107 
3108 	bnad->netdev = netdev;
3109 	bnad->pcidev = pdev;
3110 	bnad->mmio_start = pci_resource_start(pdev, 0);
3111 	bnad->mmio_len = pci_resource_len(pdev, 0);
3112 	bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3113 	if (!bnad->bar0) {
3114 		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3115 		pci_set_drvdata(pdev, NULL);
3116 		return -ENOMEM;
3117 	}
3118 	pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3119 	       (unsigned long long) bnad->mmio_len);
3120 
3121 	spin_lock_irqsave(&bnad->bna_lock, flags);
3122 	if (!bnad_msix_disable)
3123 		bnad->cfg_flags = BNAD_CF_MSIX;
3124 
3125 	bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3126 
3127 	bnad_q_num_init(bnad);
3128 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3129 
3130 	bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3131 		(bnad->num_rx * bnad->num_rxp_per_rx) +
3132 			 BNAD_MAILBOX_MSIX_VECTORS;
3133 
3134 	bnad->txq_depth = BNAD_TXQ_DEPTH;
3135 	bnad->rxq_depth = BNAD_RXQ_DEPTH;
3136 
3137 	bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3138 	bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3139 
3140 	sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3141 	bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3142 
3143 	if (!bnad->work_q)
3144 		return -ENOMEM;
3145 
3146 	return 0;
3147 }
3148 
3149 /*
3150  * Must be called after bnad_pci_uninit()
3151  * so that iounmap() and pci_set_drvdata(NULL)
3152  * happens only after PCI uninitialization.
3153  */
3154 static void
3155 bnad_uninit(struct bnad *bnad)
3156 {
3157 	if (bnad->work_q) {
3158 		flush_workqueue(bnad->work_q);
3159 		destroy_workqueue(bnad->work_q);
3160 		bnad->work_q = NULL;
3161 	}
3162 
3163 	if (bnad->bar0)
3164 		iounmap(bnad->bar0);
3165 	pci_set_drvdata(bnad->pcidev, NULL);
3166 }
3167 
3168 /*
3169  * Initialize locks
3170 	a) Per ioceth mutes used for serializing configuration
3171 	   changes from OS interface
3172 	b) spin lock used to protect bna state machine
3173  */
3174 static void
3175 bnad_lock_init(struct bnad *bnad)
3176 {
3177 	spin_lock_init(&bnad->bna_lock);
3178 	mutex_init(&bnad->conf_mutex);
3179 	mutex_init(&bnad_list_mutex);
3180 }
3181 
3182 static void
3183 bnad_lock_uninit(struct bnad *bnad)
3184 {
3185 	mutex_destroy(&bnad->conf_mutex);
3186 	mutex_destroy(&bnad_list_mutex);
3187 }
3188 
3189 /* PCI Initialization */
3190 static int
3191 bnad_pci_init(struct bnad *bnad,
3192 	      struct pci_dev *pdev, bool *using_dac)
3193 {
3194 	int err;
3195 
3196 	err = pci_enable_device(pdev);
3197 	if (err)
3198 		return err;
3199 	err = pci_request_regions(pdev, BNAD_NAME);
3200 	if (err)
3201 		goto disable_device;
3202 	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3203 	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3204 		*using_dac = true;
3205 	} else {
3206 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3207 		if (err) {
3208 			err = dma_set_coherent_mask(&pdev->dev,
3209 						    DMA_BIT_MASK(32));
3210 			if (err)
3211 				goto release_regions;
3212 		}
3213 		*using_dac = false;
3214 	}
3215 	pci_set_master(pdev);
3216 	return 0;
3217 
3218 release_regions:
3219 	pci_release_regions(pdev);
3220 disable_device:
3221 	pci_disable_device(pdev);
3222 
3223 	return err;
3224 }
3225 
3226 static void
3227 bnad_pci_uninit(struct pci_dev *pdev)
3228 {
3229 	pci_release_regions(pdev);
3230 	pci_disable_device(pdev);
3231 }
3232 
3233 static int __devinit
3234 bnad_pci_probe(struct pci_dev *pdev,
3235 		const struct pci_device_id *pcidev_id)
3236 {
3237 	bool	using_dac;
3238 	int	err;
3239 	struct bnad *bnad;
3240 	struct bna *bna;
3241 	struct net_device *netdev;
3242 	struct bfa_pcidev pcidev_info;
3243 	unsigned long flags;
3244 
3245 	pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3246 	       pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3247 
3248 	mutex_lock(&bnad_fwimg_mutex);
3249 	if (!cna_get_firmware_buf(pdev)) {
3250 		mutex_unlock(&bnad_fwimg_mutex);
3251 		pr_warn("Failed to load Firmware Image!\n");
3252 		return -ENODEV;
3253 	}
3254 	mutex_unlock(&bnad_fwimg_mutex);
3255 
3256 	/*
3257 	 * Allocates sizeof(struct net_device + struct bnad)
3258 	 * bnad = netdev->priv
3259 	 */
3260 	netdev = alloc_etherdev(sizeof(struct bnad));
3261 	if (!netdev) {
3262 		err = -ENOMEM;
3263 		return err;
3264 	}
3265 	bnad = netdev_priv(netdev);
3266 	bnad_lock_init(bnad);
3267 	bnad_add_to_list(bnad);
3268 
3269 	mutex_lock(&bnad->conf_mutex);
3270 	/*
3271 	 * PCI initialization
3272 	 *	Output : using_dac = 1 for 64 bit DMA
3273 	 *			   = 0 for 32 bit DMA
3274 	 */
3275 	err = bnad_pci_init(bnad, pdev, &using_dac);
3276 	if (err)
3277 		goto unlock_mutex;
3278 
3279 	/*
3280 	 * Initialize bnad structure
3281 	 * Setup relation between pci_dev & netdev
3282 	 */
3283 	err = bnad_init(bnad, pdev, netdev);
3284 	if (err)
3285 		goto pci_uninit;
3286 
3287 	/* Initialize netdev structure, set up ethtool ops */
3288 	bnad_netdev_init(bnad, using_dac);
3289 
3290 	/* Set link to down state */
3291 	netif_carrier_off(netdev);
3292 
3293 	/* Setup the debugfs node for this bfad */
3294 	if (bna_debugfs_enable)
3295 		bnad_debugfs_init(bnad);
3296 
3297 	/* Get resource requirement form bna */
3298 	spin_lock_irqsave(&bnad->bna_lock, flags);
3299 	bna_res_req(&bnad->res_info[0]);
3300 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3301 
3302 	/* Allocate resources from bna */
3303 	err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3304 	if (err)
3305 		goto drv_uninit;
3306 
3307 	bna = &bnad->bna;
3308 
3309 	/* Setup pcidev_info for bna_init() */
3310 	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3311 	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3312 	pcidev_info.device_id = bnad->pcidev->device;
3313 	pcidev_info.pci_bar_kva = bnad->bar0;
3314 
3315 	spin_lock_irqsave(&bnad->bna_lock, flags);
3316 	bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3317 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3318 
3319 	bnad->stats.bna_stats = &bna->stats;
3320 
3321 	bnad_enable_msix(bnad);
3322 	err = bnad_mbox_irq_alloc(bnad);
3323 	if (err)
3324 		goto res_free;
3325 
3326 
3327 	/* Set up timers */
3328 	setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3329 				((unsigned long)bnad));
3330 	setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3331 				((unsigned long)bnad));
3332 	setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3333 				((unsigned long)bnad));
3334 	setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3335 				((unsigned long)bnad));
3336 
3337 	/* Now start the timer before calling IOC */
3338 	mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3339 		  jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3340 
3341 	/*
3342 	 * Start the chip
3343 	 * If the call back comes with error, we bail out.
3344 	 * This is a catastrophic error.
3345 	 */
3346 	err = bnad_ioceth_enable(bnad);
3347 	if (err) {
3348 		pr_err("BNA: Initialization failed err=%d\n",
3349 		       err);
3350 		goto probe_success;
3351 	}
3352 
3353 	spin_lock_irqsave(&bnad->bna_lock, flags);
3354 	if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3355 		bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3356 		bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3357 			bna_attr(bna)->num_rxp - 1);
3358 		if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3359 			bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3360 			err = -EIO;
3361 	}
3362 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3363 	if (err)
3364 		goto disable_ioceth;
3365 
3366 	spin_lock_irqsave(&bnad->bna_lock, flags);
3367 	bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3368 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3369 
3370 	err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3371 	if (err) {
3372 		err = -EIO;
3373 		goto disable_ioceth;
3374 	}
3375 
3376 	spin_lock_irqsave(&bnad->bna_lock, flags);
3377 	bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3378 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3379 
3380 	/* Get the burnt-in mac */
3381 	spin_lock_irqsave(&bnad->bna_lock, flags);
3382 	bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3383 	bnad_set_netdev_perm_addr(bnad);
3384 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3385 
3386 	mutex_unlock(&bnad->conf_mutex);
3387 
3388 	/* Finally, reguister with net_device layer */
3389 	err = register_netdev(netdev);
3390 	if (err) {
3391 		pr_err("BNA : Registering with netdev failed\n");
3392 		goto probe_uninit;
3393 	}
3394 	set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3395 
3396 	return 0;
3397 
3398 probe_success:
3399 	mutex_unlock(&bnad->conf_mutex);
3400 	return 0;
3401 
3402 probe_uninit:
3403 	mutex_lock(&bnad->conf_mutex);
3404 	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3405 disable_ioceth:
3406 	bnad_ioceth_disable(bnad);
3407 	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3408 	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3409 	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3410 	spin_lock_irqsave(&bnad->bna_lock, flags);
3411 	bna_uninit(bna);
3412 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3413 	bnad_mbox_irq_free(bnad);
3414 	bnad_disable_msix(bnad);
3415 res_free:
3416 	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3417 drv_uninit:
3418 	/* Remove the debugfs node for this bnad */
3419 	kfree(bnad->regdata);
3420 	bnad_debugfs_uninit(bnad);
3421 	bnad_uninit(bnad);
3422 pci_uninit:
3423 	bnad_pci_uninit(pdev);
3424 unlock_mutex:
3425 	mutex_unlock(&bnad->conf_mutex);
3426 	bnad_remove_from_list(bnad);
3427 	bnad_lock_uninit(bnad);
3428 	free_netdev(netdev);
3429 	return err;
3430 }
3431 
3432 static void __devexit
3433 bnad_pci_remove(struct pci_dev *pdev)
3434 {
3435 	struct net_device *netdev = pci_get_drvdata(pdev);
3436 	struct bnad *bnad;
3437 	struct bna *bna;
3438 	unsigned long flags;
3439 
3440 	if (!netdev)
3441 		return;
3442 
3443 	pr_info("%s bnad_pci_remove\n", netdev->name);
3444 	bnad = netdev_priv(netdev);
3445 	bna = &bnad->bna;
3446 
3447 	if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3448 		unregister_netdev(netdev);
3449 
3450 	mutex_lock(&bnad->conf_mutex);
3451 	bnad_ioceth_disable(bnad);
3452 	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3453 	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3454 	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3455 	spin_lock_irqsave(&bnad->bna_lock, flags);
3456 	bna_uninit(bna);
3457 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3458 
3459 	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3460 	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3461 	bnad_mbox_irq_free(bnad);
3462 	bnad_disable_msix(bnad);
3463 	bnad_pci_uninit(pdev);
3464 	mutex_unlock(&bnad->conf_mutex);
3465 	bnad_remove_from_list(bnad);
3466 	bnad_lock_uninit(bnad);
3467 	/* Remove the debugfs node for this bnad */
3468 	kfree(bnad->regdata);
3469 	bnad_debugfs_uninit(bnad);
3470 	bnad_uninit(bnad);
3471 	free_netdev(netdev);
3472 }
3473 
3474 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3475 	{
3476 		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3477 			PCI_DEVICE_ID_BROCADE_CT),
3478 		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
3479 		.class_mask =  0xffff00
3480 	},
3481 	{
3482 		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3483 			BFA_PCI_DEVICE_ID_CT2),
3484 		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
3485 		.class_mask =  0xffff00
3486 	},
3487 	{0,  },
3488 };
3489 
3490 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3491 
3492 static struct pci_driver bnad_pci_driver = {
3493 	.name = BNAD_NAME,
3494 	.id_table = bnad_pci_id_table,
3495 	.probe = bnad_pci_probe,
3496 	.remove = __devexit_p(bnad_pci_remove),
3497 };
3498 
3499 static int __init
3500 bnad_module_init(void)
3501 {
3502 	int err;
3503 
3504 	pr_info("Brocade 10G Ethernet driver - version: %s\n",
3505 			BNAD_VERSION);
3506 
3507 	bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3508 
3509 	err = pci_register_driver(&bnad_pci_driver);
3510 	if (err < 0) {
3511 		pr_err("bna : PCI registration failed in module init "
3512 		       "(%d)\n", err);
3513 		return err;
3514 	}
3515 
3516 	return 0;
3517 }
3518 
3519 static void __exit
3520 bnad_module_exit(void)
3521 {
3522 	pci_unregister_driver(&bnad_pci_driver);
3523 
3524 	if (bfi_fw)
3525 		release_firmware(bfi_fw);
3526 }
3527 
3528 module_init(bnad_module_init);
3529 module_exit(bnad_module_exit);
3530 
3531 MODULE_AUTHOR("Brocade");
3532 MODULE_LICENSE("GPL");
3533 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3534 MODULE_VERSION(BNAD_VERSION);
3535 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3536 MODULE_FIRMWARE(CNA_FW_FILE_CT2);
3537