1 /*
2  * Linux network driver for QLogic BR-series Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15  * Copyright (c) 2014-2015 QLogic Corporation
16  * All rights reserved
17  * www.qlogic.com
18  */
19 #include <linux/bitops.h>
20 #include <linux/netdevice.h>
21 #include <linux/skbuff.h>
22 #include <linux/etherdevice.h>
23 #include <linux/in.h>
24 #include <linux/ethtool.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_ether.h>
27 #include <linux/ip.h>
28 #include <linux/prefetch.h>
29 #include <linux/module.h>
30 
31 #include "bnad.h"
32 #include "bna.h"
33 #include "cna.h"
34 
35 static DEFINE_MUTEX(bnad_fwimg_mutex);
36 
37 /*
38  * Module params
39  */
40 static uint bnad_msix_disable;
41 module_param(bnad_msix_disable, uint, 0444);
42 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
43 
44 static uint bnad_ioc_auto_recover = 1;
45 module_param(bnad_ioc_auto_recover, uint, 0444);
46 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
47 
48 static uint bna_debugfs_enable = 1;
49 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
50 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
51 		 " Range[false:0|true:1]");
52 
53 /*
54  * Global variables
55  */
56 static u32 bnad_rxqs_per_cq = 2;
57 static atomic_t bna_id;
58 static const u8 bnad_bcast_addr[] __aligned(2) =
59 	{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
60 
61 /*
62  * Local MACROS
63  */
64 #define BNAD_GET_MBOX_IRQ(_bnad)				\
65 	(((_bnad)->cfg_flags & BNAD_CF_MSIX) ?			\
66 	 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
67 	 ((_bnad)->pcidev->irq))
68 
69 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size)	\
70 do {								\
71 	(_res_info)->res_type = BNA_RES_T_MEM;			\
72 	(_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;	\
73 	(_res_info)->res_u.mem_info.num = (_num);		\
74 	(_res_info)->res_u.mem_info.len = (_size);		\
75 } while (0)
76 
77 /*
78  * Reinitialize completions in CQ, once Rx is taken down
79  */
80 static void
81 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
82 {
83 	struct bna_cq_entry *cmpl;
84 	int i;
85 
86 	for (i = 0; i < ccb->q_depth; i++) {
87 		cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
88 		cmpl->valid = 0;
89 	}
90 }
91 
92 /* Tx Datapath functions */
93 
94 
95 /* Caller should ensure that the entry at unmap_q[index] is valid */
96 static u32
97 bnad_tx_buff_unmap(struct bnad *bnad,
98 			      struct bnad_tx_unmap *unmap_q,
99 			      u32 q_depth, u32 index)
100 {
101 	struct bnad_tx_unmap *unmap;
102 	struct sk_buff *skb;
103 	int vector, nvecs;
104 
105 	unmap = &unmap_q[index];
106 	nvecs = unmap->nvecs;
107 
108 	skb = unmap->skb;
109 	unmap->skb = NULL;
110 	unmap->nvecs = 0;
111 	dma_unmap_single(&bnad->pcidev->dev,
112 		dma_unmap_addr(&unmap->vectors[0], dma_addr),
113 		skb_headlen(skb), DMA_TO_DEVICE);
114 	dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
115 	nvecs--;
116 
117 	vector = 0;
118 	while (nvecs) {
119 		vector++;
120 		if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
121 			vector = 0;
122 			BNA_QE_INDX_INC(index, q_depth);
123 			unmap = &unmap_q[index];
124 		}
125 
126 		dma_unmap_page(&bnad->pcidev->dev,
127 			dma_unmap_addr(&unmap->vectors[vector], dma_addr),
128 			dma_unmap_len(&unmap->vectors[vector], dma_len),
129 			DMA_TO_DEVICE);
130 		dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
131 		nvecs--;
132 	}
133 
134 	BNA_QE_INDX_INC(index, q_depth);
135 
136 	return index;
137 }
138 
139 /*
140  * Frees all pending Tx Bufs
141  * At this point no activity is expected on the Q,
142  * so DMA unmap & freeing is fine.
143  */
144 static void
145 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
146 {
147 	struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
148 	struct sk_buff *skb;
149 	int i;
150 
151 	for (i = 0; i < tcb->q_depth; i++) {
152 		skb = unmap_q[i].skb;
153 		if (!skb)
154 			continue;
155 		bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
156 
157 		dev_kfree_skb_any(skb);
158 	}
159 }
160 
161 /*
162  * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
163  * Can be called in a) Interrupt context
164  *		    b) Sending context
165  */
166 static u32
167 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
168 {
169 	u32 sent_packets = 0, sent_bytes = 0;
170 	u32 wis, unmap_wis, hw_cons, cons, q_depth;
171 	struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
172 	struct bnad_tx_unmap *unmap;
173 	struct sk_buff *skb;
174 
175 	/* Just return if TX is stopped */
176 	if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
177 		return 0;
178 
179 	hw_cons = *(tcb->hw_consumer_index);
180 	cons = tcb->consumer_index;
181 	q_depth = tcb->q_depth;
182 
183 	wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
184 	BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
185 
186 	while (wis) {
187 		unmap = &unmap_q[cons];
188 
189 		skb = unmap->skb;
190 
191 		sent_packets++;
192 		sent_bytes += skb->len;
193 
194 		unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
195 		wis -= unmap_wis;
196 
197 		cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
198 		dev_kfree_skb_any(skb);
199 	}
200 
201 	/* Update consumer pointers. */
202 	tcb->consumer_index = hw_cons;
203 
204 	tcb->txq->tx_packets += sent_packets;
205 	tcb->txq->tx_bytes += sent_bytes;
206 
207 	return sent_packets;
208 }
209 
210 static u32
211 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
212 {
213 	struct net_device *netdev = bnad->netdev;
214 	u32 sent = 0;
215 
216 	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
217 		return 0;
218 
219 	sent = bnad_txcmpl_process(bnad, tcb);
220 	if (sent) {
221 		if (netif_queue_stopped(netdev) &&
222 		    netif_carrier_ok(netdev) &&
223 		    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
224 				    BNAD_NETIF_WAKE_THRESHOLD) {
225 			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
226 				netif_wake_queue(netdev);
227 				BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
228 			}
229 		}
230 	}
231 
232 	if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
233 		bna_ib_ack(tcb->i_dbell, sent);
234 
235 	smp_mb__before_atomic();
236 	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
237 
238 	return sent;
239 }
240 
241 /* MSIX Tx Completion Handler */
242 static irqreturn_t
243 bnad_msix_tx(int irq, void *data)
244 {
245 	struct bna_tcb *tcb = (struct bna_tcb *)data;
246 	struct bnad *bnad = tcb->bnad;
247 
248 	bnad_tx_complete(bnad, tcb);
249 
250 	return IRQ_HANDLED;
251 }
252 
253 static inline void
254 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
255 {
256 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
257 
258 	unmap_q->reuse_pi = -1;
259 	unmap_q->alloc_order = -1;
260 	unmap_q->map_size = 0;
261 	unmap_q->type = BNAD_RXBUF_NONE;
262 }
263 
264 /* Default is page-based allocation. Multi-buffer support - TBD */
265 static int
266 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
267 {
268 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
269 	int order;
270 
271 	bnad_rxq_alloc_uninit(bnad, rcb);
272 
273 	order = get_order(rcb->rxq->buffer_size);
274 
275 	unmap_q->type = BNAD_RXBUF_PAGE;
276 
277 	if (bna_is_small_rxq(rcb->id)) {
278 		unmap_q->alloc_order = 0;
279 		unmap_q->map_size = rcb->rxq->buffer_size;
280 	} else {
281 		if (rcb->rxq->multi_buffer) {
282 			unmap_q->alloc_order = 0;
283 			unmap_q->map_size = rcb->rxq->buffer_size;
284 			unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
285 		} else {
286 			unmap_q->alloc_order = order;
287 			unmap_q->map_size =
288 				(rcb->rxq->buffer_size > 2048) ?
289 				PAGE_SIZE << order : 2048;
290 		}
291 	}
292 
293 	BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
294 
295 	return 0;
296 }
297 
298 static inline void
299 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
300 {
301 	if (!unmap->page)
302 		return;
303 
304 	dma_unmap_page(&bnad->pcidev->dev,
305 			dma_unmap_addr(&unmap->vector, dma_addr),
306 			unmap->vector.len, DMA_FROM_DEVICE);
307 	put_page(unmap->page);
308 	unmap->page = NULL;
309 	dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
310 	unmap->vector.len = 0;
311 }
312 
313 static inline void
314 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
315 {
316 	if (!unmap->skb)
317 		return;
318 
319 	dma_unmap_single(&bnad->pcidev->dev,
320 			dma_unmap_addr(&unmap->vector, dma_addr),
321 			unmap->vector.len, DMA_FROM_DEVICE);
322 	dev_kfree_skb_any(unmap->skb);
323 	unmap->skb = NULL;
324 	dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
325 	unmap->vector.len = 0;
326 }
327 
328 static void
329 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
330 {
331 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
332 	int i;
333 
334 	for (i = 0; i < rcb->q_depth; i++) {
335 		struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
336 
337 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
338 			bnad_rxq_cleanup_skb(bnad, unmap);
339 		else
340 			bnad_rxq_cleanup_page(bnad, unmap);
341 	}
342 	bnad_rxq_alloc_uninit(bnad, rcb);
343 }
344 
345 static u32
346 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
347 {
348 	u32 alloced, prod, q_depth;
349 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
350 	struct bnad_rx_unmap *unmap, *prev;
351 	struct bna_rxq_entry *rxent;
352 	struct page *page;
353 	u32 page_offset, alloc_size;
354 	dma_addr_t dma_addr;
355 
356 	prod = rcb->producer_index;
357 	q_depth = rcb->q_depth;
358 
359 	alloc_size = PAGE_SIZE << unmap_q->alloc_order;
360 	alloced = 0;
361 
362 	while (nalloc--) {
363 		unmap = &unmap_q->unmap[prod];
364 
365 		if (unmap_q->reuse_pi < 0) {
366 			page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
367 					unmap_q->alloc_order);
368 			page_offset = 0;
369 		} else {
370 			prev = &unmap_q->unmap[unmap_q->reuse_pi];
371 			page = prev->page;
372 			page_offset = prev->page_offset + unmap_q->map_size;
373 			get_page(page);
374 		}
375 
376 		if (unlikely(!page)) {
377 			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
378 			rcb->rxq->rxbuf_alloc_failed++;
379 			goto finishing;
380 		}
381 
382 		dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
383 					unmap_q->map_size, DMA_FROM_DEVICE);
384 		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
385 			put_page(page);
386 			BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
387 			rcb->rxq->rxbuf_map_failed++;
388 			goto finishing;
389 		}
390 
391 		unmap->page = page;
392 		unmap->page_offset = page_offset;
393 		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
394 		unmap->vector.len = unmap_q->map_size;
395 		page_offset += unmap_q->map_size;
396 
397 		if (page_offset < alloc_size)
398 			unmap_q->reuse_pi = prod;
399 		else
400 			unmap_q->reuse_pi = -1;
401 
402 		rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
403 		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
404 		BNA_QE_INDX_INC(prod, q_depth);
405 		alloced++;
406 	}
407 
408 finishing:
409 	if (likely(alloced)) {
410 		rcb->producer_index = prod;
411 		smp_mb();
412 		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
413 			bna_rxq_prod_indx_doorbell(rcb);
414 	}
415 
416 	return alloced;
417 }
418 
419 static u32
420 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
421 {
422 	u32 alloced, prod, q_depth, buff_sz;
423 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
424 	struct bnad_rx_unmap *unmap;
425 	struct bna_rxq_entry *rxent;
426 	struct sk_buff *skb;
427 	dma_addr_t dma_addr;
428 
429 	buff_sz = rcb->rxq->buffer_size;
430 	prod = rcb->producer_index;
431 	q_depth = rcb->q_depth;
432 
433 	alloced = 0;
434 	while (nalloc--) {
435 		unmap = &unmap_q->unmap[prod];
436 
437 		skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
438 
439 		if (unlikely(!skb)) {
440 			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
441 			rcb->rxq->rxbuf_alloc_failed++;
442 			goto finishing;
443 		}
444 
445 		dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
446 					  buff_sz, DMA_FROM_DEVICE);
447 		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
448 			dev_kfree_skb_any(skb);
449 			BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
450 			rcb->rxq->rxbuf_map_failed++;
451 			goto finishing;
452 		}
453 
454 		unmap->skb = skb;
455 		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
456 		unmap->vector.len = buff_sz;
457 
458 		rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
459 		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
460 		BNA_QE_INDX_INC(prod, q_depth);
461 		alloced++;
462 	}
463 
464 finishing:
465 	if (likely(alloced)) {
466 		rcb->producer_index = prod;
467 		smp_mb();
468 		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
469 			bna_rxq_prod_indx_doorbell(rcb);
470 	}
471 
472 	return alloced;
473 }
474 
475 static inline void
476 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
477 {
478 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
479 	u32 to_alloc;
480 
481 	to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
482 	if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
483 		return;
484 
485 	if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
486 		bnad_rxq_refill_skb(bnad, rcb, to_alloc);
487 	else
488 		bnad_rxq_refill_page(bnad, rcb, to_alloc);
489 }
490 
491 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
492 					BNA_CQ_EF_IPV6 | \
493 					BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
494 					BNA_CQ_EF_L4_CKSUM_OK)
495 
496 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
497 				BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
498 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
499 				BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
500 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
501 				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
502 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
503 				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
504 
505 static void
506 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
507 		    u32 sop_ci, u32 nvecs)
508 {
509 	struct bnad_rx_unmap_q *unmap_q;
510 	struct bnad_rx_unmap *unmap;
511 	u32 ci, vec;
512 
513 	unmap_q = rcb->unmap_q;
514 	for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
515 		unmap = &unmap_q->unmap[ci];
516 		BNA_QE_INDX_INC(ci, rcb->q_depth);
517 
518 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
519 			bnad_rxq_cleanup_skb(bnad, unmap);
520 		else
521 			bnad_rxq_cleanup_page(bnad, unmap);
522 	}
523 }
524 
525 static void
526 bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
527 {
528 	struct bna_rcb *rcb;
529 	struct bnad *bnad;
530 	struct bnad_rx_unmap_q *unmap_q;
531 	struct bna_cq_entry *cq, *cmpl;
532 	u32 ci, pi, totlen = 0;
533 
534 	cq = ccb->sw_q;
535 	pi = ccb->producer_index;
536 	cmpl = &cq[pi];
537 
538 	rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
539 	unmap_q = rcb->unmap_q;
540 	bnad = rcb->bnad;
541 	ci = rcb->consumer_index;
542 
543 	/* prefetch header */
544 	prefetch(page_address(unmap_q->unmap[ci].page) +
545 		 unmap_q->unmap[ci].page_offset);
546 
547 	while (nvecs--) {
548 		struct bnad_rx_unmap *unmap;
549 		u32 len;
550 
551 		unmap = &unmap_q->unmap[ci];
552 		BNA_QE_INDX_INC(ci, rcb->q_depth);
553 
554 		dma_unmap_page(&bnad->pcidev->dev,
555 			       dma_unmap_addr(&unmap->vector, dma_addr),
556 			       unmap->vector.len, DMA_FROM_DEVICE);
557 
558 		len = ntohs(cmpl->length);
559 		skb->truesize += unmap->vector.len;
560 		totlen += len;
561 
562 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
563 				   unmap->page, unmap->page_offset, len);
564 
565 		unmap->page = NULL;
566 		unmap->vector.len = 0;
567 
568 		BNA_QE_INDX_INC(pi, ccb->q_depth);
569 		cmpl = &cq[pi];
570 	}
571 
572 	skb->len += totlen;
573 	skb->data_len += totlen;
574 }
575 
576 static inline void
577 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
578 		  struct bnad_rx_unmap *unmap, u32 len)
579 {
580 	prefetch(skb->data);
581 
582 	dma_unmap_single(&bnad->pcidev->dev,
583 			dma_unmap_addr(&unmap->vector, dma_addr),
584 			unmap->vector.len, DMA_FROM_DEVICE);
585 
586 	skb_put(skb, len);
587 	skb->protocol = eth_type_trans(skb, bnad->netdev);
588 
589 	unmap->skb = NULL;
590 	unmap->vector.len = 0;
591 }
592 
593 static u32
594 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
595 {
596 	struct bna_cq_entry *cq, *cmpl, *next_cmpl;
597 	struct bna_rcb *rcb = NULL;
598 	struct bnad_rx_unmap_q *unmap_q;
599 	struct bnad_rx_unmap *unmap = NULL;
600 	struct sk_buff *skb = NULL;
601 	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
602 	struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
603 	u32 packets = 0, len = 0, totlen = 0;
604 	u32 pi, vec, sop_ci = 0, nvecs = 0;
605 	u32 flags, masked_flags;
606 
607 	prefetch(bnad->netdev);
608 
609 	cq = ccb->sw_q;
610 
611 	while (packets < budget) {
612 		cmpl = &cq[ccb->producer_index];
613 		if (!cmpl->valid)
614 			break;
615 		/* The 'valid' field is set by the adapter, only after writing
616 		 * the other fields of completion entry. Hence, do not load
617 		 * other fields of completion entry *before* the 'valid' is
618 		 * loaded. Adding the rmb() here prevents the compiler and/or
619 		 * CPU from reordering the reads which would potentially result
620 		 * in reading stale values in completion entry.
621 		 */
622 		rmb();
623 
624 		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
625 
626 		if (bna_is_small_rxq(cmpl->rxq_id))
627 			rcb = ccb->rcb[1];
628 		else
629 			rcb = ccb->rcb[0];
630 
631 		unmap_q = rcb->unmap_q;
632 
633 		/* start of packet ci */
634 		sop_ci = rcb->consumer_index;
635 
636 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
637 			unmap = &unmap_q->unmap[sop_ci];
638 			skb = unmap->skb;
639 		} else {
640 			skb = napi_get_frags(&rx_ctrl->napi);
641 			if (unlikely(!skb))
642 				break;
643 		}
644 		prefetch(skb);
645 
646 		flags = ntohl(cmpl->flags);
647 		len = ntohs(cmpl->length);
648 		totlen = len;
649 		nvecs = 1;
650 
651 		/* Check all the completions for this frame.
652 		 * busy-wait doesn't help much, break here.
653 		 */
654 		if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
655 		    (flags & BNA_CQ_EF_EOP) == 0) {
656 			pi = ccb->producer_index;
657 			do {
658 				BNA_QE_INDX_INC(pi, ccb->q_depth);
659 				next_cmpl = &cq[pi];
660 
661 				if (!next_cmpl->valid)
662 					break;
663 				/* The 'valid' field is set by the adapter, only
664 				 * after writing the other fields of completion
665 				 * entry. Hence, do not load other fields of
666 				 * completion entry *before* the 'valid' is
667 				 * loaded. Adding the rmb() here prevents the
668 				 * compiler and/or CPU from reordering the reads
669 				 * which would potentially result in reading
670 				 * stale values in completion entry.
671 				 */
672 				rmb();
673 
674 				len = ntohs(next_cmpl->length);
675 				flags = ntohl(next_cmpl->flags);
676 
677 				nvecs++;
678 				totlen += len;
679 			} while ((flags & BNA_CQ_EF_EOP) == 0);
680 
681 			if (!next_cmpl->valid)
682 				break;
683 		}
684 		packets++;
685 
686 		/* TODO: BNA_CQ_EF_LOCAL ? */
687 		if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
688 						BNA_CQ_EF_FCS_ERROR |
689 						BNA_CQ_EF_TOO_LONG))) {
690 			bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
691 			rcb->rxq->rx_packets_with_error++;
692 
693 			goto next;
694 		}
695 
696 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
697 			bnad_cq_setup_skb(bnad, skb, unmap, len);
698 		else
699 			bnad_cq_setup_skb_frags(ccb, skb, nvecs);
700 
701 		rcb->rxq->rx_packets++;
702 		rcb->rxq->rx_bytes += totlen;
703 		ccb->bytes_per_intr += totlen;
704 
705 		masked_flags = flags & flags_cksum_prot_mask;
706 
707 		if (likely
708 		    ((bnad->netdev->features & NETIF_F_RXCSUM) &&
709 		     ((masked_flags == flags_tcp4) ||
710 		      (masked_flags == flags_udp4) ||
711 		      (masked_flags == flags_tcp6) ||
712 		      (masked_flags == flags_udp6))))
713 			skb->ip_summed = CHECKSUM_UNNECESSARY;
714 		else
715 			skb_checksum_none_assert(skb);
716 
717 		if ((flags & BNA_CQ_EF_VLAN) &&
718 		    (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
719 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
720 
721 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
722 			netif_receive_skb(skb);
723 		else
724 			napi_gro_frags(&rx_ctrl->napi);
725 
726 next:
727 		BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
728 		for (vec = 0; vec < nvecs; vec++) {
729 			cmpl = &cq[ccb->producer_index];
730 			cmpl->valid = 0;
731 			BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
732 		}
733 	}
734 
735 	napi_gro_flush(&rx_ctrl->napi, false);
736 	if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
737 		bna_ib_ack_disable_irq(ccb->i_dbell, packets);
738 
739 	bnad_rxq_post(bnad, ccb->rcb[0]);
740 	if (ccb->rcb[1])
741 		bnad_rxq_post(bnad, ccb->rcb[1]);
742 
743 	return packets;
744 }
745 
746 static void
747 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
748 {
749 	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
750 	struct napi_struct *napi = &rx_ctrl->napi;
751 
752 	if (likely(napi_schedule_prep(napi))) {
753 		__napi_schedule(napi);
754 		rx_ctrl->rx_schedule++;
755 	}
756 }
757 
758 /* MSIX Rx Path Handler */
759 static irqreturn_t
760 bnad_msix_rx(int irq, void *data)
761 {
762 	struct bna_ccb *ccb = (struct bna_ccb *)data;
763 
764 	if (ccb) {
765 		((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
766 		bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
767 	}
768 
769 	return IRQ_HANDLED;
770 }
771 
772 /* Interrupt handlers */
773 
774 /* Mbox Interrupt Handlers */
775 static irqreturn_t
776 bnad_msix_mbox_handler(int irq, void *data)
777 {
778 	u32 intr_status;
779 	unsigned long flags;
780 	struct bnad *bnad = (struct bnad *)data;
781 
782 	spin_lock_irqsave(&bnad->bna_lock, flags);
783 	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
784 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
785 		return IRQ_HANDLED;
786 	}
787 
788 	bna_intr_status_get(&bnad->bna, intr_status);
789 
790 	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
791 		bna_mbox_handler(&bnad->bna, intr_status);
792 
793 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
794 
795 	return IRQ_HANDLED;
796 }
797 
798 static irqreturn_t
799 bnad_isr(int irq, void *data)
800 {
801 	int i, j;
802 	u32 intr_status;
803 	unsigned long flags;
804 	struct bnad *bnad = (struct bnad *)data;
805 	struct bnad_rx_info *rx_info;
806 	struct bnad_rx_ctrl *rx_ctrl;
807 	struct bna_tcb *tcb = NULL;
808 
809 	spin_lock_irqsave(&bnad->bna_lock, flags);
810 	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
811 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
812 		return IRQ_NONE;
813 	}
814 
815 	bna_intr_status_get(&bnad->bna, intr_status);
816 
817 	if (unlikely(!intr_status)) {
818 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
819 		return IRQ_NONE;
820 	}
821 
822 	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
823 		bna_mbox_handler(&bnad->bna, intr_status);
824 
825 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
826 
827 	if (!BNA_IS_INTX_DATA_INTR(intr_status))
828 		return IRQ_HANDLED;
829 
830 	/* Process data interrupts */
831 	/* Tx processing */
832 	for (i = 0; i < bnad->num_tx; i++) {
833 		for (j = 0; j < bnad->num_txq_per_tx; j++) {
834 			tcb = bnad->tx_info[i].tcb[j];
835 			if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
836 				bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
837 		}
838 	}
839 	/* Rx processing */
840 	for (i = 0; i < bnad->num_rx; i++) {
841 		rx_info = &bnad->rx_info[i];
842 		if (!rx_info->rx)
843 			continue;
844 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
845 			rx_ctrl = &rx_info->rx_ctrl[j];
846 			if (rx_ctrl->ccb)
847 				bnad_netif_rx_schedule_poll(bnad,
848 							    rx_ctrl->ccb);
849 		}
850 	}
851 	return IRQ_HANDLED;
852 }
853 
854 /*
855  * Called in interrupt / callback context
856  * with bna_lock held, so cfg_flags access is OK
857  */
858 static void
859 bnad_enable_mbox_irq(struct bnad *bnad)
860 {
861 	clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
862 
863 	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
864 }
865 
866 /*
867  * Called with bnad->bna_lock held b'cos of
868  * bnad->cfg_flags access.
869  */
870 static void
871 bnad_disable_mbox_irq(struct bnad *bnad)
872 {
873 	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
874 
875 	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
876 }
877 
878 static void
879 bnad_set_netdev_perm_addr(struct bnad *bnad)
880 {
881 	struct net_device *netdev = bnad->netdev;
882 
883 	ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
884 	if (is_zero_ether_addr(netdev->dev_addr))
885 		ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
886 }
887 
888 /* Control Path Handlers */
889 
890 /* Callbacks */
891 void
892 bnad_cb_mbox_intr_enable(struct bnad *bnad)
893 {
894 	bnad_enable_mbox_irq(bnad);
895 }
896 
897 void
898 bnad_cb_mbox_intr_disable(struct bnad *bnad)
899 {
900 	bnad_disable_mbox_irq(bnad);
901 }
902 
903 void
904 bnad_cb_ioceth_ready(struct bnad *bnad)
905 {
906 	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
907 	complete(&bnad->bnad_completions.ioc_comp);
908 }
909 
910 void
911 bnad_cb_ioceth_failed(struct bnad *bnad)
912 {
913 	bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
914 	complete(&bnad->bnad_completions.ioc_comp);
915 }
916 
917 void
918 bnad_cb_ioceth_disabled(struct bnad *bnad)
919 {
920 	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
921 	complete(&bnad->bnad_completions.ioc_comp);
922 }
923 
924 static void
925 bnad_cb_enet_disabled(void *arg)
926 {
927 	struct bnad *bnad = (struct bnad *)arg;
928 
929 	netif_carrier_off(bnad->netdev);
930 	complete(&bnad->bnad_completions.enet_comp);
931 }
932 
933 void
934 bnad_cb_ethport_link_status(struct bnad *bnad,
935 			enum bna_link_status link_status)
936 {
937 	bool link_up = false;
938 
939 	link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
940 
941 	if (link_status == BNA_CEE_UP) {
942 		if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
943 			BNAD_UPDATE_CTR(bnad, cee_toggle);
944 		set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
945 	} else {
946 		if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
947 			BNAD_UPDATE_CTR(bnad, cee_toggle);
948 		clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
949 	}
950 
951 	if (link_up) {
952 		if (!netif_carrier_ok(bnad->netdev)) {
953 			uint tx_id, tcb_id;
954 			netdev_info(bnad->netdev, "link up\n");
955 			netif_carrier_on(bnad->netdev);
956 			BNAD_UPDATE_CTR(bnad, link_toggle);
957 			for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
958 				for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
959 				      tcb_id++) {
960 					struct bna_tcb *tcb =
961 					bnad->tx_info[tx_id].tcb[tcb_id];
962 					u32 txq_id;
963 					if (!tcb)
964 						continue;
965 
966 					txq_id = tcb->id;
967 
968 					if (test_bit(BNAD_TXQ_TX_STARTED,
969 						     &tcb->flags)) {
970 						/*
971 						 * Force an immediate
972 						 * Transmit Schedule */
973 						netif_wake_subqueue(
974 								bnad->netdev,
975 								txq_id);
976 						BNAD_UPDATE_CTR(bnad,
977 							netif_queue_wakeup);
978 					} else {
979 						netif_stop_subqueue(
980 								bnad->netdev,
981 								txq_id);
982 						BNAD_UPDATE_CTR(bnad,
983 							netif_queue_stop);
984 					}
985 				}
986 			}
987 		}
988 	} else {
989 		if (netif_carrier_ok(bnad->netdev)) {
990 			netdev_info(bnad->netdev, "link down\n");
991 			netif_carrier_off(bnad->netdev);
992 			BNAD_UPDATE_CTR(bnad, link_toggle);
993 		}
994 	}
995 }
996 
997 static void
998 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
999 {
1000 	struct bnad *bnad = (struct bnad *)arg;
1001 
1002 	complete(&bnad->bnad_completions.tx_comp);
1003 }
1004 
1005 static void
1006 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1007 {
1008 	struct bnad_tx_info *tx_info =
1009 			(struct bnad_tx_info *)tcb->txq->tx->priv;
1010 
1011 	tcb->priv = tcb;
1012 	tx_info->tcb[tcb->id] = tcb;
1013 }
1014 
1015 static void
1016 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1017 {
1018 	struct bnad_tx_info *tx_info =
1019 			(struct bnad_tx_info *)tcb->txq->tx->priv;
1020 
1021 	tx_info->tcb[tcb->id] = NULL;
1022 	tcb->priv = NULL;
1023 }
1024 
1025 static void
1026 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1027 {
1028 	struct bnad_rx_info *rx_info =
1029 			(struct bnad_rx_info *)ccb->cq->rx->priv;
1030 
1031 	rx_info->rx_ctrl[ccb->id].ccb = ccb;
1032 	ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1033 }
1034 
1035 static void
1036 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1037 {
1038 	struct bnad_rx_info *rx_info =
1039 			(struct bnad_rx_info *)ccb->cq->rx->priv;
1040 
1041 	rx_info->rx_ctrl[ccb->id].ccb = NULL;
1042 }
1043 
1044 static void
1045 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1046 {
1047 	struct bnad_tx_info *tx_info =
1048 			(struct bnad_tx_info *)tx->priv;
1049 	struct bna_tcb *tcb;
1050 	u32 txq_id;
1051 	int i;
1052 
1053 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1054 		tcb = tx_info->tcb[i];
1055 		if (!tcb)
1056 			continue;
1057 		txq_id = tcb->id;
1058 		clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1059 		netif_stop_subqueue(bnad->netdev, txq_id);
1060 	}
1061 }
1062 
1063 static void
1064 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1065 {
1066 	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1067 	struct bna_tcb *tcb;
1068 	u32 txq_id;
1069 	int i;
1070 
1071 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1072 		tcb = tx_info->tcb[i];
1073 		if (!tcb)
1074 			continue;
1075 		txq_id = tcb->id;
1076 
1077 		BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1078 		set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1079 		BUG_ON(*(tcb->hw_consumer_index) != 0);
1080 
1081 		if (netif_carrier_ok(bnad->netdev)) {
1082 			netif_wake_subqueue(bnad->netdev, txq_id);
1083 			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1084 		}
1085 	}
1086 
1087 	/*
1088 	 * Workaround for first ioceth enable failure & we
1089 	 * get a 0 MAC address. We try to get the MAC address
1090 	 * again here.
1091 	 */
1092 	if (is_zero_ether_addr(bnad->perm_addr)) {
1093 		bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1094 		bnad_set_netdev_perm_addr(bnad);
1095 	}
1096 }
1097 
1098 /*
1099  * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1100  */
1101 static void
1102 bnad_tx_cleanup(struct delayed_work *work)
1103 {
1104 	struct bnad_tx_info *tx_info =
1105 		container_of(work, struct bnad_tx_info, tx_cleanup_work);
1106 	struct bnad *bnad = NULL;
1107 	struct bna_tcb *tcb;
1108 	unsigned long flags;
1109 	u32 i, pending = 0;
1110 
1111 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1112 		tcb = tx_info->tcb[i];
1113 		if (!tcb)
1114 			continue;
1115 
1116 		bnad = tcb->bnad;
1117 
1118 		if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1119 			pending++;
1120 			continue;
1121 		}
1122 
1123 		bnad_txq_cleanup(bnad, tcb);
1124 
1125 		smp_mb__before_atomic();
1126 		clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1127 	}
1128 
1129 	if (pending) {
1130 		queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1131 			msecs_to_jiffies(1));
1132 		return;
1133 	}
1134 
1135 	spin_lock_irqsave(&bnad->bna_lock, flags);
1136 	bna_tx_cleanup_complete(tx_info->tx);
1137 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1138 }
1139 
1140 static void
1141 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1142 {
1143 	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1144 	struct bna_tcb *tcb;
1145 	int i;
1146 
1147 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1148 		tcb = tx_info->tcb[i];
1149 		if (!tcb)
1150 			continue;
1151 	}
1152 
1153 	queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1154 }
1155 
1156 static void
1157 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1158 {
1159 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1160 	struct bna_ccb *ccb;
1161 	struct bnad_rx_ctrl *rx_ctrl;
1162 	int i;
1163 
1164 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1165 		rx_ctrl = &rx_info->rx_ctrl[i];
1166 		ccb = rx_ctrl->ccb;
1167 		if (!ccb)
1168 			continue;
1169 
1170 		clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1171 
1172 		if (ccb->rcb[1])
1173 			clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1174 	}
1175 }
1176 
1177 /*
1178  * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1179  */
1180 static void
1181 bnad_rx_cleanup(void *work)
1182 {
1183 	struct bnad_rx_info *rx_info =
1184 		container_of(work, struct bnad_rx_info, rx_cleanup_work);
1185 	struct bnad_rx_ctrl *rx_ctrl;
1186 	struct bnad *bnad = NULL;
1187 	unsigned long flags;
1188 	u32 i;
1189 
1190 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1191 		rx_ctrl = &rx_info->rx_ctrl[i];
1192 
1193 		if (!rx_ctrl->ccb)
1194 			continue;
1195 
1196 		bnad = rx_ctrl->ccb->bnad;
1197 
1198 		/*
1199 		 * Wait till the poll handler has exited
1200 		 * and nothing can be scheduled anymore
1201 		 */
1202 		napi_disable(&rx_ctrl->napi);
1203 
1204 		bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1205 		bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1206 		if (rx_ctrl->ccb->rcb[1])
1207 			bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1208 	}
1209 
1210 	spin_lock_irqsave(&bnad->bna_lock, flags);
1211 	bna_rx_cleanup_complete(rx_info->rx);
1212 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1213 }
1214 
1215 static void
1216 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1217 {
1218 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1219 	struct bna_ccb *ccb;
1220 	struct bnad_rx_ctrl *rx_ctrl;
1221 	int i;
1222 
1223 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1224 		rx_ctrl = &rx_info->rx_ctrl[i];
1225 		ccb = rx_ctrl->ccb;
1226 		if (!ccb)
1227 			continue;
1228 
1229 		clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1230 
1231 		if (ccb->rcb[1])
1232 			clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1233 	}
1234 
1235 	queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1236 }
1237 
1238 static void
1239 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1240 {
1241 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1242 	struct bna_ccb *ccb;
1243 	struct bna_rcb *rcb;
1244 	struct bnad_rx_ctrl *rx_ctrl;
1245 	int i, j;
1246 
1247 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1248 		rx_ctrl = &rx_info->rx_ctrl[i];
1249 		ccb = rx_ctrl->ccb;
1250 		if (!ccb)
1251 			continue;
1252 
1253 		napi_enable(&rx_ctrl->napi);
1254 
1255 		for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1256 			rcb = ccb->rcb[j];
1257 			if (!rcb)
1258 				continue;
1259 
1260 			bnad_rxq_alloc_init(bnad, rcb);
1261 			set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1262 			set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1263 			bnad_rxq_post(bnad, rcb);
1264 		}
1265 	}
1266 }
1267 
1268 static void
1269 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1270 {
1271 	struct bnad *bnad = (struct bnad *)arg;
1272 
1273 	complete(&bnad->bnad_completions.rx_comp);
1274 }
1275 
1276 static void
1277 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1278 {
1279 	bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1280 	complete(&bnad->bnad_completions.mcast_comp);
1281 }
1282 
1283 void
1284 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1285 		       struct bna_stats *stats)
1286 {
1287 	if (status == BNA_CB_SUCCESS)
1288 		BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1289 
1290 	if (!netif_running(bnad->netdev) ||
1291 		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1292 		return;
1293 
1294 	mod_timer(&bnad->stats_timer,
1295 		  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1296 }
1297 
1298 static void
1299 bnad_cb_enet_mtu_set(struct bnad *bnad)
1300 {
1301 	bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1302 	complete(&bnad->bnad_completions.mtu_comp);
1303 }
1304 
1305 void
1306 bnad_cb_completion(void *arg, enum bfa_status status)
1307 {
1308 	struct bnad_iocmd_comp *iocmd_comp =
1309 			(struct bnad_iocmd_comp *)arg;
1310 
1311 	iocmd_comp->comp_status = (u32) status;
1312 	complete(&iocmd_comp->comp);
1313 }
1314 
1315 /* Resource allocation, free functions */
1316 
1317 static void
1318 bnad_mem_free(struct bnad *bnad,
1319 	      struct bna_mem_info *mem_info)
1320 {
1321 	int i;
1322 	dma_addr_t dma_pa;
1323 
1324 	if (mem_info->mdl == NULL)
1325 		return;
1326 
1327 	for (i = 0; i < mem_info->num; i++) {
1328 		if (mem_info->mdl[i].kva != NULL) {
1329 			if (mem_info->mem_type == BNA_MEM_T_DMA) {
1330 				BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1331 						dma_pa);
1332 				dma_free_coherent(&bnad->pcidev->dev,
1333 						  mem_info->mdl[i].len,
1334 						  mem_info->mdl[i].kva, dma_pa);
1335 			} else
1336 				kfree(mem_info->mdl[i].kva);
1337 		}
1338 	}
1339 	kfree(mem_info->mdl);
1340 	mem_info->mdl = NULL;
1341 }
1342 
1343 static int
1344 bnad_mem_alloc(struct bnad *bnad,
1345 	       struct bna_mem_info *mem_info)
1346 {
1347 	int i;
1348 	dma_addr_t dma_pa;
1349 
1350 	if ((mem_info->num == 0) || (mem_info->len == 0)) {
1351 		mem_info->mdl = NULL;
1352 		return 0;
1353 	}
1354 
1355 	mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1356 				GFP_KERNEL);
1357 	if (mem_info->mdl == NULL)
1358 		return -ENOMEM;
1359 
1360 	if (mem_info->mem_type == BNA_MEM_T_DMA) {
1361 		for (i = 0; i < mem_info->num; i++) {
1362 			mem_info->mdl[i].len = mem_info->len;
1363 			mem_info->mdl[i].kva =
1364 				dma_alloc_coherent(&bnad->pcidev->dev,
1365 						   mem_info->len, &dma_pa,
1366 						   GFP_KERNEL);
1367 			if (mem_info->mdl[i].kva == NULL)
1368 				goto err_return;
1369 
1370 			BNA_SET_DMA_ADDR(dma_pa,
1371 					 &(mem_info->mdl[i].dma));
1372 		}
1373 	} else {
1374 		for (i = 0; i < mem_info->num; i++) {
1375 			mem_info->mdl[i].len = mem_info->len;
1376 			mem_info->mdl[i].kva = kzalloc(mem_info->len,
1377 							GFP_KERNEL);
1378 			if (mem_info->mdl[i].kva == NULL)
1379 				goto err_return;
1380 		}
1381 	}
1382 
1383 	return 0;
1384 
1385 err_return:
1386 	bnad_mem_free(bnad, mem_info);
1387 	return -ENOMEM;
1388 }
1389 
1390 /* Free IRQ for Mailbox */
1391 static void
1392 bnad_mbox_irq_free(struct bnad *bnad)
1393 {
1394 	int irq;
1395 	unsigned long flags;
1396 
1397 	spin_lock_irqsave(&bnad->bna_lock, flags);
1398 	bnad_disable_mbox_irq(bnad);
1399 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1400 
1401 	irq = BNAD_GET_MBOX_IRQ(bnad);
1402 	free_irq(irq, bnad);
1403 }
1404 
1405 /*
1406  * Allocates IRQ for Mailbox, but keep it disabled
1407  * This will be enabled once we get the mbox enable callback
1408  * from bna
1409  */
1410 static int
1411 bnad_mbox_irq_alloc(struct bnad *bnad)
1412 {
1413 	int		err = 0;
1414 	unsigned long	irq_flags, flags;
1415 	u32	irq;
1416 	irq_handler_t	irq_handler;
1417 
1418 	spin_lock_irqsave(&bnad->bna_lock, flags);
1419 	if (bnad->cfg_flags & BNAD_CF_MSIX) {
1420 		irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1421 		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1422 		irq_flags = 0;
1423 	} else {
1424 		irq_handler = (irq_handler_t)bnad_isr;
1425 		irq = bnad->pcidev->irq;
1426 		irq_flags = IRQF_SHARED;
1427 	}
1428 
1429 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1430 	sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1431 
1432 	/*
1433 	 * Set the Mbox IRQ disable flag, so that the IRQ handler
1434 	 * called from request_irq() for SHARED IRQs do not execute
1435 	 */
1436 	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1437 
1438 	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1439 
1440 	err = request_irq(irq, irq_handler, irq_flags,
1441 			  bnad->mbox_irq_name, bnad);
1442 
1443 	return err;
1444 }
1445 
1446 static void
1447 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1448 {
1449 	kfree(intr_info->idl);
1450 	intr_info->idl = NULL;
1451 }
1452 
1453 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1454 static int
1455 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1456 		    u32 txrx_id, struct bna_intr_info *intr_info)
1457 {
1458 	int i, vector_start = 0;
1459 	u32 cfg_flags;
1460 	unsigned long flags;
1461 
1462 	spin_lock_irqsave(&bnad->bna_lock, flags);
1463 	cfg_flags = bnad->cfg_flags;
1464 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1465 
1466 	if (cfg_flags & BNAD_CF_MSIX) {
1467 		intr_info->intr_type = BNA_INTR_T_MSIX;
1468 		intr_info->idl = kcalloc(intr_info->num,
1469 					sizeof(struct bna_intr_descr),
1470 					GFP_KERNEL);
1471 		if (!intr_info->idl)
1472 			return -ENOMEM;
1473 
1474 		switch (src) {
1475 		case BNAD_INTR_TX:
1476 			vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1477 			break;
1478 
1479 		case BNAD_INTR_RX:
1480 			vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1481 					(bnad->num_tx * bnad->num_txq_per_tx) +
1482 					txrx_id;
1483 			break;
1484 
1485 		default:
1486 			BUG();
1487 		}
1488 
1489 		for (i = 0; i < intr_info->num; i++)
1490 			intr_info->idl[i].vector = vector_start + i;
1491 	} else {
1492 		intr_info->intr_type = BNA_INTR_T_INTX;
1493 		intr_info->num = 1;
1494 		intr_info->idl = kcalloc(intr_info->num,
1495 					sizeof(struct bna_intr_descr),
1496 					GFP_KERNEL);
1497 		if (!intr_info->idl)
1498 			return -ENOMEM;
1499 
1500 		switch (src) {
1501 		case BNAD_INTR_TX:
1502 			intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1503 			break;
1504 
1505 		case BNAD_INTR_RX:
1506 			intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1507 			break;
1508 		}
1509 	}
1510 	return 0;
1511 }
1512 
1513 /* NOTE: Should be called for MSIX only
1514  * Unregisters Tx MSIX vector(s) from the kernel
1515  */
1516 static void
1517 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1518 			int num_txqs)
1519 {
1520 	int i;
1521 	int vector_num;
1522 
1523 	for (i = 0; i < num_txqs; i++) {
1524 		if (tx_info->tcb[i] == NULL)
1525 			continue;
1526 
1527 		vector_num = tx_info->tcb[i]->intr_vector;
1528 		free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1529 	}
1530 }
1531 
1532 /* NOTE: Should be called for MSIX only
1533  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1534  */
1535 static int
1536 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1537 			u32 tx_id, int num_txqs)
1538 {
1539 	int i;
1540 	int err;
1541 	int vector_num;
1542 
1543 	for (i = 0; i < num_txqs; i++) {
1544 		vector_num = tx_info->tcb[i]->intr_vector;
1545 		sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1546 				tx_id + tx_info->tcb[i]->id);
1547 		err = request_irq(bnad->msix_table[vector_num].vector,
1548 				  (irq_handler_t)bnad_msix_tx, 0,
1549 				  tx_info->tcb[i]->name,
1550 				  tx_info->tcb[i]);
1551 		if (err)
1552 			goto err_return;
1553 	}
1554 
1555 	return 0;
1556 
1557 err_return:
1558 	if (i > 0)
1559 		bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1560 	return -1;
1561 }
1562 
1563 /* NOTE: Should be called for MSIX only
1564  * Unregisters Rx MSIX vector(s) from the kernel
1565  */
1566 static void
1567 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1568 			int num_rxps)
1569 {
1570 	int i;
1571 	int vector_num;
1572 
1573 	for (i = 0; i < num_rxps; i++) {
1574 		if (rx_info->rx_ctrl[i].ccb == NULL)
1575 			continue;
1576 
1577 		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1578 		free_irq(bnad->msix_table[vector_num].vector,
1579 			 rx_info->rx_ctrl[i].ccb);
1580 	}
1581 }
1582 
1583 /* NOTE: Should be called for MSIX only
1584  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1585  */
1586 static int
1587 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1588 			u32 rx_id, int num_rxps)
1589 {
1590 	int i;
1591 	int err;
1592 	int vector_num;
1593 
1594 	for (i = 0; i < num_rxps; i++) {
1595 		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1596 		sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1597 			bnad->netdev->name,
1598 			rx_id + rx_info->rx_ctrl[i].ccb->id);
1599 		err = request_irq(bnad->msix_table[vector_num].vector,
1600 				  (irq_handler_t)bnad_msix_rx, 0,
1601 				  rx_info->rx_ctrl[i].ccb->name,
1602 				  rx_info->rx_ctrl[i].ccb);
1603 		if (err)
1604 			goto err_return;
1605 	}
1606 
1607 	return 0;
1608 
1609 err_return:
1610 	if (i > 0)
1611 		bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1612 	return -1;
1613 }
1614 
1615 /* Free Tx object Resources */
1616 static void
1617 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1618 {
1619 	int i;
1620 
1621 	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1622 		if (res_info[i].res_type == BNA_RES_T_MEM)
1623 			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1624 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1625 			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1626 	}
1627 }
1628 
1629 /* Allocates memory and interrupt resources for Tx object */
1630 static int
1631 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1632 		  u32 tx_id)
1633 {
1634 	int i, err = 0;
1635 
1636 	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1637 		if (res_info[i].res_type == BNA_RES_T_MEM)
1638 			err = bnad_mem_alloc(bnad,
1639 					&res_info[i].res_u.mem_info);
1640 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1641 			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1642 					&res_info[i].res_u.intr_info);
1643 		if (err)
1644 			goto err_return;
1645 	}
1646 	return 0;
1647 
1648 err_return:
1649 	bnad_tx_res_free(bnad, res_info);
1650 	return err;
1651 }
1652 
1653 /* Free Rx object Resources */
1654 static void
1655 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1656 {
1657 	int i;
1658 
1659 	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1660 		if (res_info[i].res_type == BNA_RES_T_MEM)
1661 			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1662 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1663 			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1664 	}
1665 }
1666 
1667 /* Allocates memory and interrupt resources for Rx object */
1668 static int
1669 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1670 		  uint rx_id)
1671 {
1672 	int i, err = 0;
1673 
1674 	/* All memory needs to be allocated before setup_ccbs */
1675 	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1676 		if (res_info[i].res_type == BNA_RES_T_MEM)
1677 			err = bnad_mem_alloc(bnad,
1678 					&res_info[i].res_u.mem_info);
1679 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1680 			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1681 					&res_info[i].res_u.intr_info);
1682 		if (err)
1683 			goto err_return;
1684 	}
1685 	return 0;
1686 
1687 err_return:
1688 	bnad_rx_res_free(bnad, res_info);
1689 	return err;
1690 }
1691 
1692 /* Timer callbacks */
1693 /* a) IOC timer */
1694 static void
1695 bnad_ioc_timeout(unsigned long data)
1696 {
1697 	struct bnad *bnad = (struct bnad *)data;
1698 	unsigned long flags;
1699 
1700 	spin_lock_irqsave(&bnad->bna_lock, flags);
1701 	bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1702 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1703 }
1704 
1705 static void
1706 bnad_ioc_hb_check(unsigned long data)
1707 {
1708 	struct bnad *bnad = (struct bnad *)data;
1709 	unsigned long flags;
1710 
1711 	spin_lock_irqsave(&bnad->bna_lock, flags);
1712 	bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1713 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1714 }
1715 
1716 static void
1717 bnad_iocpf_timeout(unsigned long data)
1718 {
1719 	struct bnad *bnad = (struct bnad *)data;
1720 	unsigned long flags;
1721 
1722 	spin_lock_irqsave(&bnad->bna_lock, flags);
1723 	bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1724 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1725 }
1726 
1727 static void
1728 bnad_iocpf_sem_timeout(unsigned long data)
1729 {
1730 	struct bnad *bnad = (struct bnad *)data;
1731 	unsigned long flags;
1732 
1733 	spin_lock_irqsave(&bnad->bna_lock, flags);
1734 	bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1735 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1736 }
1737 
1738 /*
1739  * All timer routines use bnad->bna_lock to protect against
1740  * the following race, which may occur in case of no locking:
1741  *	Time	CPU m	CPU n
1742  *	0       1 = test_bit
1743  *	1			clear_bit
1744  *	2			del_timer_sync
1745  *	3	mod_timer
1746  */
1747 
1748 /* b) Dynamic Interrupt Moderation Timer */
1749 static void
1750 bnad_dim_timeout(unsigned long data)
1751 {
1752 	struct bnad *bnad = (struct bnad *)data;
1753 	struct bnad_rx_info *rx_info;
1754 	struct bnad_rx_ctrl *rx_ctrl;
1755 	int i, j;
1756 	unsigned long flags;
1757 
1758 	if (!netif_carrier_ok(bnad->netdev))
1759 		return;
1760 
1761 	spin_lock_irqsave(&bnad->bna_lock, flags);
1762 	for (i = 0; i < bnad->num_rx; i++) {
1763 		rx_info = &bnad->rx_info[i];
1764 		if (!rx_info->rx)
1765 			continue;
1766 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1767 			rx_ctrl = &rx_info->rx_ctrl[j];
1768 			if (!rx_ctrl->ccb)
1769 				continue;
1770 			bna_rx_dim_update(rx_ctrl->ccb);
1771 		}
1772 	}
1773 
1774 	/* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1775 	if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1776 		mod_timer(&bnad->dim_timer,
1777 			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1778 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1779 }
1780 
1781 /* c)  Statistics Timer */
1782 static void
1783 bnad_stats_timeout(unsigned long data)
1784 {
1785 	struct bnad *bnad = (struct bnad *)data;
1786 	unsigned long flags;
1787 
1788 	if (!netif_running(bnad->netdev) ||
1789 		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1790 		return;
1791 
1792 	spin_lock_irqsave(&bnad->bna_lock, flags);
1793 	bna_hw_stats_get(&bnad->bna);
1794 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1795 }
1796 
1797 /*
1798  * Set up timer for DIM
1799  * Called with bnad->bna_lock held
1800  */
1801 void
1802 bnad_dim_timer_start(struct bnad *bnad)
1803 {
1804 	if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1805 	    !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1806 		setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1807 			    (unsigned long)bnad);
1808 		set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1809 		mod_timer(&bnad->dim_timer,
1810 			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1811 	}
1812 }
1813 
1814 /*
1815  * Set up timer for statistics
1816  * Called with mutex_lock(&bnad->conf_mutex) held
1817  */
1818 static void
1819 bnad_stats_timer_start(struct bnad *bnad)
1820 {
1821 	unsigned long flags;
1822 
1823 	spin_lock_irqsave(&bnad->bna_lock, flags);
1824 	if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1825 		setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1826 			    (unsigned long)bnad);
1827 		mod_timer(&bnad->stats_timer,
1828 			  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1829 	}
1830 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1831 }
1832 
1833 /*
1834  * Stops the stats timer
1835  * Called with mutex_lock(&bnad->conf_mutex) held
1836  */
1837 static void
1838 bnad_stats_timer_stop(struct bnad *bnad)
1839 {
1840 	int to_del = 0;
1841 	unsigned long flags;
1842 
1843 	spin_lock_irqsave(&bnad->bna_lock, flags);
1844 	if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1845 		to_del = 1;
1846 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1847 	if (to_del)
1848 		del_timer_sync(&bnad->stats_timer);
1849 }
1850 
1851 /* Utilities */
1852 
1853 static void
1854 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1855 {
1856 	int i = 1; /* Index 0 has broadcast address */
1857 	struct netdev_hw_addr *mc_addr;
1858 
1859 	netdev_for_each_mc_addr(mc_addr, netdev) {
1860 		ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1861 		i++;
1862 	}
1863 }
1864 
1865 static int
1866 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1867 {
1868 	struct bnad_rx_ctrl *rx_ctrl =
1869 		container_of(napi, struct bnad_rx_ctrl, napi);
1870 	struct bnad *bnad = rx_ctrl->bnad;
1871 	int rcvd = 0;
1872 
1873 	rx_ctrl->rx_poll_ctr++;
1874 
1875 	if (!netif_carrier_ok(bnad->netdev))
1876 		goto poll_exit;
1877 
1878 	rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1879 	if (rcvd >= budget)
1880 		return rcvd;
1881 
1882 poll_exit:
1883 	napi_complete(napi);
1884 
1885 	rx_ctrl->rx_complete++;
1886 
1887 	if (rx_ctrl->ccb)
1888 		bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1889 
1890 	return rcvd;
1891 }
1892 
1893 #define BNAD_NAPI_POLL_QUOTA		64
1894 static void
1895 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1896 {
1897 	struct bnad_rx_ctrl *rx_ctrl;
1898 	int i;
1899 
1900 	/* Initialize & enable NAPI */
1901 	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
1902 		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1903 		netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1904 			       bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1905 	}
1906 }
1907 
1908 static void
1909 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1910 {
1911 	int i;
1912 
1913 	/* First disable and then clean up */
1914 	for (i = 0; i < bnad->num_rxp_per_rx; i++)
1915 		netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1916 }
1917 
1918 /* Should be held with conf_lock held */
1919 void
1920 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1921 {
1922 	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1923 	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1924 	unsigned long flags;
1925 
1926 	if (!tx_info->tx)
1927 		return;
1928 
1929 	init_completion(&bnad->bnad_completions.tx_comp);
1930 	spin_lock_irqsave(&bnad->bna_lock, flags);
1931 	bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1932 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1933 	wait_for_completion(&bnad->bnad_completions.tx_comp);
1934 
1935 	if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1936 		bnad_tx_msix_unregister(bnad, tx_info,
1937 			bnad->num_txq_per_tx);
1938 
1939 	spin_lock_irqsave(&bnad->bna_lock, flags);
1940 	bna_tx_destroy(tx_info->tx);
1941 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1942 
1943 	tx_info->tx = NULL;
1944 	tx_info->tx_id = 0;
1945 
1946 	bnad_tx_res_free(bnad, res_info);
1947 }
1948 
1949 /* Should be held with conf_lock held */
1950 int
1951 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1952 {
1953 	int err;
1954 	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1955 	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1956 	struct bna_intr_info *intr_info =
1957 			&res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1958 	struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1959 	static const struct bna_tx_event_cbfn tx_cbfn = {
1960 		.tcb_setup_cbfn = bnad_cb_tcb_setup,
1961 		.tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1962 		.tx_stall_cbfn = bnad_cb_tx_stall,
1963 		.tx_resume_cbfn = bnad_cb_tx_resume,
1964 		.tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1965 	};
1966 
1967 	struct bna_tx *tx;
1968 	unsigned long flags;
1969 
1970 	tx_info->tx_id = tx_id;
1971 
1972 	/* Initialize the Tx object configuration */
1973 	tx_config->num_txq = bnad->num_txq_per_tx;
1974 	tx_config->txq_depth = bnad->txq_depth;
1975 	tx_config->tx_type = BNA_TX_T_REGULAR;
1976 	tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1977 
1978 	/* Get BNA's resource requirement for one tx object */
1979 	spin_lock_irqsave(&bnad->bna_lock, flags);
1980 	bna_tx_res_req(bnad->num_txq_per_tx,
1981 		bnad->txq_depth, res_info);
1982 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1983 
1984 	/* Fill Unmap Q memory requirements */
1985 	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1986 			bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1987 			bnad->txq_depth));
1988 
1989 	/* Allocate resources */
1990 	err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1991 	if (err)
1992 		return err;
1993 
1994 	/* Ask BNA to create one Tx object, supplying required resources */
1995 	spin_lock_irqsave(&bnad->bna_lock, flags);
1996 	tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1997 			tx_info);
1998 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1999 	if (!tx) {
2000 		err = -ENOMEM;
2001 		goto err_return;
2002 	}
2003 	tx_info->tx = tx;
2004 
2005 	INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
2006 			(work_func_t)bnad_tx_cleanup);
2007 
2008 	/* Register ISR for the Tx object */
2009 	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2010 		err = bnad_tx_msix_register(bnad, tx_info,
2011 			tx_id, bnad->num_txq_per_tx);
2012 		if (err)
2013 			goto cleanup_tx;
2014 	}
2015 
2016 	spin_lock_irqsave(&bnad->bna_lock, flags);
2017 	bna_tx_enable(tx);
2018 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2019 
2020 	return 0;
2021 
2022 cleanup_tx:
2023 	spin_lock_irqsave(&bnad->bna_lock, flags);
2024 	bna_tx_destroy(tx_info->tx);
2025 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2026 	tx_info->tx = NULL;
2027 	tx_info->tx_id = 0;
2028 err_return:
2029 	bnad_tx_res_free(bnad, res_info);
2030 	return err;
2031 }
2032 
2033 /* Setup the rx config for bna_rx_create */
2034 /* bnad decides the configuration */
2035 static void
2036 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2037 {
2038 	memset(rx_config, 0, sizeof(*rx_config));
2039 	rx_config->rx_type = BNA_RX_T_REGULAR;
2040 	rx_config->num_paths = bnad->num_rxp_per_rx;
2041 	rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2042 
2043 	if (bnad->num_rxp_per_rx > 1) {
2044 		rx_config->rss_status = BNA_STATUS_T_ENABLED;
2045 		rx_config->rss_config.hash_type =
2046 				(BFI_ENET_RSS_IPV6 |
2047 				 BFI_ENET_RSS_IPV6_TCP |
2048 				 BFI_ENET_RSS_IPV4 |
2049 				 BFI_ENET_RSS_IPV4_TCP);
2050 		rx_config->rss_config.hash_mask =
2051 				bnad->num_rxp_per_rx - 1;
2052 		netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2053 			sizeof(rx_config->rss_config.toeplitz_hash_key));
2054 	} else {
2055 		rx_config->rss_status = BNA_STATUS_T_DISABLED;
2056 		memset(&rx_config->rss_config, 0,
2057 		       sizeof(rx_config->rss_config));
2058 	}
2059 
2060 	rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2061 	rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2062 
2063 	/* BNA_RXP_SINGLE - one data-buffer queue
2064 	 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2065 	 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2066 	 */
2067 	/* TODO: configurable param for queue type */
2068 	rx_config->rxp_type = BNA_RXP_SLR;
2069 
2070 	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2071 	    rx_config->frame_size > 4096) {
2072 		/* though size_routing_enable is set in SLR,
2073 		 * small packets may get routed to same rxq.
2074 		 * set buf_size to 2048 instead of PAGE_SIZE.
2075 		 */
2076 		rx_config->q0_buf_size = 2048;
2077 		/* this should be in multiples of 2 */
2078 		rx_config->q0_num_vecs = 4;
2079 		rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2080 		rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2081 	} else {
2082 		rx_config->q0_buf_size = rx_config->frame_size;
2083 		rx_config->q0_num_vecs = 1;
2084 		rx_config->q0_depth = bnad->rxq_depth;
2085 	}
2086 
2087 	/* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2088 	if (rx_config->rxp_type == BNA_RXP_SLR) {
2089 		rx_config->q1_depth = bnad->rxq_depth;
2090 		rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2091 	}
2092 
2093 	rx_config->vlan_strip_status =
2094 		(bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2095 		BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2096 }
2097 
2098 static void
2099 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2100 {
2101 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2102 	int i;
2103 
2104 	for (i = 0; i < bnad->num_rxp_per_rx; i++)
2105 		rx_info->rx_ctrl[i].bnad = bnad;
2106 }
2107 
2108 /* Called with mutex_lock(&bnad->conf_mutex) held */
2109 static u32
2110 bnad_reinit_rx(struct bnad *bnad)
2111 {
2112 	struct net_device *netdev = bnad->netdev;
2113 	u32 err = 0, current_err = 0;
2114 	u32 rx_id = 0, count = 0;
2115 	unsigned long flags;
2116 
2117 	/* destroy and create new rx objects */
2118 	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2119 		if (!bnad->rx_info[rx_id].rx)
2120 			continue;
2121 		bnad_destroy_rx(bnad, rx_id);
2122 	}
2123 
2124 	spin_lock_irqsave(&bnad->bna_lock, flags);
2125 	bna_enet_mtu_set(&bnad->bna.enet,
2126 			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2127 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2128 
2129 	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2130 		count++;
2131 		current_err = bnad_setup_rx(bnad, rx_id);
2132 		if (current_err && !err) {
2133 			err = current_err;
2134 			netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
2135 		}
2136 	}
2137 
2138 	/* restore rx configuration */
2139 	if (bnad->rx_info[0].rx && !err) {
2140 		bnad_restore_vlans(bnad, 0);
2141 		bnad_enable_default_bcast(bnad);
2142 		spin_lock_irqsave(&bnad->bna_lock, flags);
2143 		bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2144 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2145 		bnad_set_rx_mode(netdev);
2146 	}
2147 
2148 	return count;
2149 }
2150 
2151 /* Called with bnad_conf_lock() held */
2152 void
2153 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2154 {
2155 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2156 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2157 	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2158 	unsigned long flags;
2159 	int to_del = 0;
2160 
2161 	if (!rx_info->rx)
2162 		return;
2163 
2164 	if (0 == rx_id) {
2165 		spin_lock_irqsave(&bnad->bna_lock, flags);
2166 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2167 		    test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2168 			clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2169 			to_del = 1;
2170 		}
2171 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2172 		if (to_del)
2173 			del_timer_sync(&bnad->dim_timer);
2174 	}
2175 
2176 	init_completion(&bnad->bnad_completions.rx_comp);
2177 	spin_lock_irqsave(&bnad->bna_lock, flags);
2178 	bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2179 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2180 	wait_for_completion(&bnad->bnad_completions.rx_comp);
2181 
2182 	if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2183 		bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2184 
2185 	bnad_napi_delete(bnad, rx_id);
2186 
2187 	spin_lock_irqsave(&bnad->bna_lock, flags);
2188 	bna_rx_destroy(rx_info->rx);
2189 
2190 	rx_info->rx = NULL;
2191 	rx_info->rx_id = 0;
2192 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2193 
2194 	bnad_rx_res_free(bnad, res_info);
2195 }
2196 
2197 /* Called with mutex_lock(&bnad->conf_mutex) held */
2198 int
2199 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2200 {
2201 	int err;
2202 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2203 	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2204 	struct bna_intr_info *intr_info =
2205 			&res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2206 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2207 	static const struct bna_rx_event_cbfn rx_cbfn = {
2208 		.rcb_setup_cbfn = NULL,
2209 		.rcb_destroy_cbfn = NULL,
2210 		.ccb_setup_cbfn = bnad_cb_ccb_setup,
2211 		.ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2212 		.rx_stall_cbfn = bnad_cb_rx_stall,
2213 		.rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2214 		.rx_post_cbfn = bnad_cb_rx_post,
2215 	};
2216 	struct bna_rx *rx;
2217 	unsigned long flags;
2218 
2219 	rx_info->rx_id = rx_id;
2220 
2221 	/* Initialize the Rx object configuration */
2222 	bnad_init_rx_config(bnad, rx_config);
2223 
2224 	/* Get BNA's resource requirement for one Rx object */
2225 	spin_lock_irqsave(&bnad->bna_lock, flags);
2226 	bna_rx_res_req(rx_config, res_info);
2227 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2228 
2229 	/* Fill Unmap Q memory requirements */
2230 	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2231 				 rx_config->num_paths,
2232 			(rx_config->q0_depth *
2233 			 sizeof(struct bnad_rx_unmap)) +
2234 			 sizeof(struct bnad_rx_unmap_q));
2235 
2236 	if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2237 		BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2238 					 rx_config->num_paths,
2239 				(rx_config->q1_depth *
2240 				 sizeof(struct bnad_rx_unmap) +
2241 				 sizeof(struct bnad_rx_unmap_q)));
2242 	}
2243 	/* Allocate resource */
2244 	err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2245 	if (err)
2246 		return err;
2247 
2248 	bnad_rx_ctrl_init(bnad, rx_id);
2249 
2250 	/* Ask BNA to create one Rx object, supplying required resources */
2251 	spin_lock_irqsave(&bnad->bna_lock, flags);
2252 	rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2253 			rx_info);
2254 	if (!rx) {
2255 		err = -ENOMEM;
2256 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2257 		goto err_return;
2258 	}
2259 	rx_info->rx = rx;
2260 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2261 
2262 	INIT_WORK(&rx_info->rx_cleanup_work,
2263 			(work_func_t)(bnad_rx_cleanup));
2264 
2265 	/*
2266 	 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2267 	 * so that IRQ handler cannot schedule NAPI at this point.
2268 	 */
2269 	bnad_napi_add(bnad, rx_id);
2270 
2271 	/* Register ISR for the Rx object */
2272 	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2273 		err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2274 						rx_config->num_paths);
2275 		if (err)
2276 			goto err_return;
2277 	}
2278 
2279 	spin_lock_irqsave(&bnad->bna_lock, flags);
2280 	if (0 == rx_id) {
2281 		/* Set up Dynamic Interrupt Moderation Vector */
2282 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2283 			bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2284 
2285 		/* Enable VLAN filtering only on the default Rx */
2286 		bna_rx_vlanfilter_enable(rx);
2287 
2288 		/* Start the DIM timer */
2289 		bnad_dim_timer_start(bnad);
2290 	}
2291 
2292 	bna_rx_enable(rx);
2293 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2294 
2295 	return 0;
2296 
2297 err_return:
2298 	bnad_destroy_rx(bnad, rx_id);
2299 	return err;
2300 }
2301 
2302 /* Called with conf_lock & bnad->bna_lock held */
2303 void
2304 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2305 {
2306 	struct bnad_tx_info *tx_info;
2307 
2308 	tx_info = &bnad->tx_info[0];
2309 	if (!tx_info->tx)
2310 		return;
2311 
2312 	bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2313 }
2314 
2315 /* Called with conf_lock & bnad->bna_lock held */
2316 void
2317 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2318 {
2319 	struct bnad_rx_info *rx_info;
2320 	int	i;
2321 
2322 	for (i = 0; i < bnad->num_rx; i++) {
2323 		rx_info = &bnad->rx_info[i];
2324 		if (!rx_info->rx)
2325 			continue;
2326 		bna_rx_coalescing_timeo_set(rx_info->rx,
2327 				bnad->rx_coalescing_timeo);
2328 	}
2329 }
2330 
2331 /*
2332  * Called with bnad->bna_lock held
2333  */
2334 int
2335 bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2336 {
2337 	int ret;
2338 
2339 	if (!is_valid_ether_addr(mac_addr))
2340 		return -EADDRNOTAVAIL;
2341 
2342 	/* If datapath is down, pretend everything went through */
2343 	if (!bnad->rx_info[0].rx)
2344 		return 0;
2345 
2346 	ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2347 	if (ret != BNA_CB_SUCCESS)
2348 		return -EADDRNOTAVAIL;
2349 
2350 	return 0;
2351 }
2352 
2353 /* Should be called with conf_lock held */
2354 int
2355 bnad_enable_default_bcast(struct bnad *bnad)
2356 {
2357 	struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2358 	int ret;
2359 	unsigned long flags;
2360 
2361 	init_completion(&bnad->bnad_completions.mcast_comp);
2362 
2363 	spin_lock_irqsave(&bnad->bna_lock, flags);
2364 	ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2365 			       bnad_cb_rx_mcast_add);
2366 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2367 
2368 	if (ret == BNA_CB_SUCCESS)
2369 		wait_for_completion(&bnad->bnad_completions.mcast_comp);
2370 	else
2371 		return -ENODEV;
2372 
2373 	if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2374 		return -ENODEV;
2375 
2376 	return 0;
2377 }
2378 
2379 /* Called with mutex_lock(&bnad->conf_mutex) held */
2380 void
2381 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2382 {
2383 	u16 vid;
2384 	unsigned long flags;
2385 
2386 	for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2387 		spin_lock_irqsave(&bnad->bna_lock, flags);
2388 		bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2389 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2390 	}
2391 }
2392 
2393 /* Statistics utilities */
2394 void
2395 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2396 {
2397 	int i, j;
2398 
2399 	for (i = 0; i < bnad->num_rx; i++) {
2400 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2401 			if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2402 				stats->rx_packets += bnad->rx_info[i].
2403 				rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2404 				stats->rx_bytes += bnad->rx_info[i].
2405 					rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2406 				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2407 					bnad->rx_info[i].rx_ctrl[j].ccb->
2408 					rcb[1]->rxq) {
2409 					stats->rx_packets +=
2410 						bnad->rx_info[i].rx_ctrl[j].
2411 						ccb->rcb[1]->rxq->rx_packets;
2412 					stats->rx_bytes +=
2413 						bnad->rx_info[i].rx_ctrl[j].
2414 						ccb->rcb[1]->rxq->rx_bytes;
2415 				}
2416 			}
2417 		}
2418 	}
2419 	for (i = 0; i < bnad->num_tx; i++) {
2420 		for (j = 0; j < bnad->num_txq_per_tx; j++) {
2421 			if (bnad->tx_info[i].tcb[j]) {
2422 				stats->tx_packets +=
2423 				bnad->tx_info[i].tcb[j]->txq->tx_packets;
2424 				stats->tx_bytes +=
2425 					bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2426 			}
2427 		}
2428 	}
2429 }
2430 
2431 /*
2432  * Must be called with the bna_lock held.
2433  */
2434 void
2435 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2436 {
2437 	struct bfi_enet_stats_mac *mac_stats;
2438 	u32 bmap;
2439 	int i;
2440 
2441 	mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2442 	stats->rx_errors =
2443 		mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2444 		mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2445 		mac_stats->rx_undersize;
2446 	stats->tx_errors = mac_stats->tx_fcs_error +
2447 					mac_stats->tx_undersize;
2448 	stats->rx_dropped = mac_stats->rx_drop;
2449 	stats->tx_dropped = mac_stats->tx_drop;
2450 	stats->multicast = mac_stats->rx_multicast;
2451 	stats->collisions = mac_stats->tx_total_collision;
2452 
2453 	stats->rx_length_errors = mac_stats->rx_frame_length_error;
2454 
2455 	/* receive ring buffer overflow  ?? */
2456 
2457 	stats->rx_crc_errors = mac_stats->rx_fcs_error;
2458 	stats->rx_frame_errors = mac_stats->rx_alignment_error;
2459 	/* recv'r fifo overrun */
2460 	bmap = bna_rx_rid_mask(&bnad->bna);
2461 	for (i = 0; bmap; i++) {
2462 		if (bmap & 1) {
2463 			stats->rx_fifo_errors +=
2464 				bnad->stats.bna_stats->
2465 					hw_stats.rxf_stats[i].frame_drops;
2466 			break;
2467 		}
2468 		bmap >>= 1;
2469 	}
2470 }
2471 
2472 static void
2473 bnad_mbox_irq_sync(struct bnad *bnad)
2474 {
2475 	u32 irq;
2476 	unsigned long flags;
2477 
2478 	spin_lock_irqsave(&bnad->bna_lock, flags);
2479 	if (bnad->cfg_flags & BNAD_CF_MSIX)
2480 		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2481 	else
2482 		irq = bnad->pcidev->irq;
2483 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2484 
2485 	synchronize_irq(irq);
2486 }
2487 
2488 /* Utility used by bnad_start_xmit, for doing TSO */
2489 static int
2490 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2491 {
2492 	int err;
2493 
2494 	err = skb_cow_head(skb, 0);
2495 	if (err < 0) {
2496 		BNAD_UPDATE_CTR(bnad, tso_err);
2497 		return err;
2498 	}
2499 
2500 	/*
2501 	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2502 	 * excluding the length field.
2503 	 */
2504 	if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2505 		struct iphdr *iph = ip_hdr(skb);
2506 
2507 		/* Do we really need these? */
2508 		iph->tot_len = 0;
2509 		iph->check = 0;
2510 
2511 		tcp_hdr(skb)->check =
2512 			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2513 					   IPPROTO_TCP, 0);
2514 		BNAD_UPDATE_CTR(bnad, tso4);
2515 	} else {
2516 		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2517 
2518 		ipv6h->payload_len = 0;
2519 		tcp_hdr(skb)->check =
2520 			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2521 					 IPPROTO_TCP, 0);
2522 		BNAD_UPDATE_CTR(bnad, tso6);
2523 	}
2524 
2525 	return 0;
2526 }
2527 
2528 /*
2529  * Initialize Q numbers depending on Rx Paths
2530  * Called with bnad->bna_lock held, because of cfg_flags
2531  * access.
2532  */
2533 static void
2534 bnad_q_num_init(struct bnad *bnad)
2535 {
2536 	int rxps;
2537 
2538 	rxps = min((uint)num_online_cpus(),
2539 			(uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2540 
2541 	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2542 		rxps = 1;	/* INTx */
2543 
2544 	bnad->num_rx = 1;
2545 	bnad->num_tx = 1;
2546 	bnad->num_rxp_per_rx = rxps;
2547 	bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2548 }
2549 
2550 /*
2551  * Adjusts the Q numbers, given a number of msix vectors
2552  * Give preference to RSS as opposed to Tx priority Queues,
2553  * in such a case, just use 1 Tx Q
2554  * Called with bnad->bna_lock held b'cos of cfg_flags access
2555  */
2556 static void
2557 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2558 {
2559 	bnad->num_txq_per_tx = 1;
2560 	if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2561 	     bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2562 	    (bnad->cfg_flags & BNAD_CF_MSIX)) {
2563 		bnad->num_rxp_per_rx = msix_vectors -
2564 			(bnad->num_tx * bnad->num_txq_per_tx) -
2565 			BNAD_MAILBOX_MSIX_VECTORS;
2566 	} else
2567 		bnad->num_rxp_per_rx = 1;
2568 }
2569 
2570 /* Enable / disable ioceth */
2571 static int
2572 bnad_ioceth_disable(struct bnad *bnad)
2573 {
2574 	unsigned long flags;
2575 	int err = 0;
2576 
2577 	spin_lock_irqsave(&bnad->bna_lock, flags);
2578 	init_completion(&bnad->bnad_completions.ioc_comp);
2579 	bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2580 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2581 
2582 	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2583 		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2584 
2585 	err = bnad->bnad_completions.ioc_comp_status;
2586 	return err;
2587 }
2588 
2589 static int
2590 bnad_ioceth_enable(struct bnad *bnad)
2591 {
2592 	int err = 0;
2593 	unsigned long flags;
2594 
2595 	spin_lock_irqsave(&bnad->bna_lock, flags);
2596 	init_completion(&bnad->bnad_completions.ioc_comp);
2597 	bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2598 	bna_ioceth_enable(&bnad->bna.ioceth);
2599 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2600 
2601 	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2602 		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2603 
2604 	err = bnad->bnad_completions.ioc_comp_status;
2605 
2606 	return err;
2607 }
2608 
2609 /* Free BNA resources */
2610 static void
2611 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2612 		u32 res_val_max)
2613 {
2614 	int i;
2615 
2616 	for (i = 0; i < res_val_max; i++)
2617 		bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2618 }
2619 
2620 /* Allocates memory and interrupt resources for BNA */
2621 static int
2622 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2623 		u32 res_val_max)
2624 {
2625 	int i, err;
2626 
2627 	for (i = 0; i < res_val_max; i++) {
2628 		err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2629 		if (err)
2630 			goto err_return;
2631 	}
2632 	return 0;
2633 
2634 err_return:
2635 	bnad_res_free(bnad, res_info, res_val_max);
2636 	return err;
2637 }
2638 
2639 /* Interrupt enable / disable */
2640 static void
2641 bnad_enable_msix(struct bnad *bnad)
2642 {
2643 	int i, ret;
2644 	unsigned long flags;
2645 
2646 	spin_lock_irqsave(&bnad->bna_lock, flags);
2647 	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2648 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2649 		return;
2650 	}
2651 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2652 
2653 	if (bnad->msix_table)
2654 		return;
2655 
2656 	bnad->msix_table =
2657 		kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2658 
2659 	if (!bnad->msix_table)
2660 		goto intx_mode;
2661 
2662 	for (i = 0; i < bnad->msix_num; i++)
2663 		bnad->msix_table[i].entry = i;
2664 
2665 	ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2666 				    1, bnad->msix_num);
2667 	if (ret < 0) {
2668 		goto intx_mode;
2669 	} else if (ret < bnad->msix_num) {
2670 		dev_warn(&bnad->pcidev->dev,
2671 			 "%d MSI-X vectors allocated < %d requested\n",
2672 			 ret, bnad->msix_num);
2673 
2674 		spin_lock_irqsave(&bnad->bna_lock, flags);
2675 		/* ret = #of vectors that we got */
2676 		bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2677 			(ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2678 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2679 
2680 		bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2681 			 BNAD_MAILBOX_MSIX_VECTORS;
2682 
2683 		if (bnad->msix_num > ret) {
2684 			pci_disable_msix(bnad->pcidev);
2685 			goto intx_mode;
2686 		}
2687 	}
2688 
2689 	pci_intx(bnad->pcidev, 0);
2690 
2691 	return;
2692 
2693 intx_mode:
2694 	dev_warn(&bnad->pcidev->dev,
2695 		 "MSI-X enable failed - operating in INTx mode\n");
2696 
2697 	kfree(bnad->msix_table);
2698 	bnad->msix_table = NULL;
2699 	bnad->msix_num = 0;
2700 	spin_lock_irqsave(&bnad->bna_lock, flags);
2701 	bnad->cfg_flags &= ~BNAD_CF_MSIX;
2702 	bnad_q_num_init(bnad);
2703 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2704 }
2705 
2706 static void
2707 bnad_disable_msix(struct bnad *bnad)
2708 {
2709 	u32 cfg_flags;
2710 	unsigned long flags;
2711 
2712 	spin_lock_irqsave(&bnad->bna_lock, flags);
2713 	cfg_flags = bnad->cfg_flags;
2714 	if (bnad->cfg_flags & BNAD_CF_MSIX)
2715 		bnad->cfg_flags &= ~BNAD_CF_MSIX;
2716 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2717 
2718 	if (cfg_flags & BNAD_CF_MSIX) {
2719 		pci_disable_msix(bnad->pcidev);
2720 		kfree(bnad->msix_table);
2721 		bnad->msix_table = NULL;
2722 	}
2723 }
2724 
2725 /* Netdev entry points */
2726 static int
2727 bnad_open(struct net_device *netdev)
2728 {
2729 	int err;
2730 	struct bnad *bnad = netdev_priv(netdev);
2731 	struct bna_pause_config pause_config;
2732 	unsigned long flags;
2733 
2734 	mutex_lock(&bnad->conf_mutex);
2735 
2736 	/* Tx */
2737 	err = bnad_setup_tx(bnad, 0);
2738 	if (err)
2739 		goto err_return;
2740 
2741 	/* Rx */
2742 	err = bnad_setup_rx(bnad, 0);
2743 	if (err)
2744 		goto cleanup_tx;
2745 
2746 	/* Port */
2747 	pause_config.tx_pause = 0;
2748 	pause_config.rx_pause = 0;
2749 
2750 	spin_lock_irqsave(&bnad->bna_lock, flags);
2751 	bna_enet_mtu_set(&bnad->bna.enet,
2752 			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2753 	bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2754 	bna_enet_enable(&bnad->bna.enet);
2755 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2756 
2757 	/* Enable broadcast */
2758 	bnad_enable_default_bcast(bnad);
2759 
2760 	/* Restore VLANs, if any */
2761 	bnad_restore_vlans(bnad, 0);
2762 
2763 	/* Set the UCAST address */
2764 	spin_lock_irqsave(&bnad->bna_lock, flags);
2765 	bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2766 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2767 
2768 	/* Start the stats timer */
2769 	bnad_stats_timer_start(bnad);
2770 
2771 	mutex_unlock(&bnad->conf_mutex);
2772 
2773 	return 0;
2774 
2775 cleanup_tx:
2776 	bnad_destroy_tx(bnad, 0);
2777 
2778 err_return:
2779 	mutex_unlock(&bnad->conf_mutex);
2780 	return err;
2781 }
2782 
2783 static int
2784 bnad_stop(struct net_device *netdev)
2785 {
2786 	struct bnad *bnad = netdev_priv(netdev);
2787 	unsigned long flags;
2788 
2789 	mutex_lock(&bnad->conf_mutex);
2790 
2791 	/* Stop the stats timer */
2792 	bnad_stats_timer_stop(bnad);
2793 
2794 	init_completion(&bnad->bnad_completions.enet_comp);
2795 
2796 	spin_lock_irqsave(&bnad->bna_lock, flags);
2797 	bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2798 			bnad_cb_enet_disabled);
2799 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2800 
2801 	wait_for_completion(&bnad->bnad_completions.enet_comp);
2802 
2803 	bnad_destroy_tx(bnad, 0);
2804 	bnad_destroy_rx(bnad, 0);
2805 
2806 	/* Synchronize mailbox IRQ */
2807 	bnad_mbox_irq_sync(bnad);
2808 
2809 	mutex_unlock(&bnad->conf_mutex);
2810 
2811 	return 0;
2812 }
2813 
2814 /* TX */
2815 /* Returns 0 for success */
2816 static int
2817 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2818 		    struct sk_buff *skb, struct bna_txq_entry *txqent)
2819 {
2820 	u16 flags = 0;
2821 	u32 gso_size;
2822 	u16 vlan_tag = 0;
2823 
2824 	if (skb_vlan_tag_present(skb)) {
2825 		vlan_tag = (u16)skb_vlan_tag_get(skb);
2826 		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2827 	}
2828 	if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2829 		vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2830 				| (vlan_tag & 0x1fff);
2831 		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2832 	}
2833 	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2834 
2835 	if (skb_is_gso(skb)) {
2836 		gso_size = skb_shinfo(skb)->gso_size;
2837 		if (unlikely(gso_size > bnad->netdev->mtu)) {
2838 			BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2839 			return -EINVAL;
2840 		}
2841 		if (unlikely((gso_size + skb_transport_offset(skb) +
2842 			      tcp_hdrlen(skb)) >= skb->len)) {
2843 			txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2844 			txqent->hdr.wi.lso_mss = 0;
2845 			BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2846 		} else {
2847 			txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2848 			txqent->hdr.wi.lso_mss = htons(gso_size);
2849 		}
2850 
2851 		if (bnad_tso_prepare(bnad, skb)) {
2852 			BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2853 			return -EINVAL;
2854 		}
2855 
2856 		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2857 		txqent->hdr.wi.l4_hdr_size_n_offset =
2858 			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2859 			tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2860 	} else  {
2861 		txqent->hdr.wi.opcode =	htons(BNA_TXQ_WI_SEND);
2862 		txqent->hdr.wi.lso_mss = 0;
2863 
2864 		if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2865 			BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2866 			return -EINVAL;
2867 		}
2868 
2869 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
2870 			__be16 net_proto = vlan_get_protocol(skb);
2871 			u8 proto = 0;
2872 
2873 			if (net_proto == htons(ETH_P_IP))
2874 				proto = ip_hdr(skb)->protocol;
2875 #ifdef NETIF_F_IPV6_CSUM
2876 			else if (net_proto == htons(ETH_P_IPV6)) {
2877 				/* nexthdr may not be TCP immediately. */
2878 				proto = ipv6_hdr(skb)->nexthdr;
2879 			}
2880 #endif
2881 			if (proto == IPPROTO_TCP) {
2882 				flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2883 				txqent->hdr.wi.l4_hdr_size_n_offset =
2884 					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2885 					      (0, skb_transport_offset(skb)));
2886 
2887 				BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2888 
2889 				if (unlikely(skb_headlen(skb) <
2890 					    skb_transport_offset(skb) +
2891 				    tcp_hdrlen(skb))) {
2892 					BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2893 					return -EINVAL;
2894 				}
2895 			} else if (proto == IPPROTO_UDP) {
2896 				flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2897 				txqent->hdr.wi.l4_hdr_size_n_offset =
2898 					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2899 					      (0, skb_transport_offset(skb)));
2900 
2901 				BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2902 				if (unlikely(skb_headlen(skb) <
2903 					    skb_transport_offset(skb) +
2904 				    sizeof(struct udphdr))) {
2905 					BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2906 					return -EINVAL;
2907 				}
2908 			} else {
2909 
2910 				BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2911 				return -EINVAL;
2912 			}
2913 		} else
2914 			txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2915 	}
2916 
2917 	txqent->hdr.wi.flags = htons(flags);
2918 	txqent->hdr.wi.frame_length = htonl(skb->len);
2919 
2920 	return 0;
2921 }
2922 
2923 /*
2924  * bnad_start_xmit : Netdev entry point for Transmit
2925  *		     Called under lock held by net_device
2926  */
2927 static netdev_tx_t
2928 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2929 {
2930 	struct bnad *bnad = netdev_priv(netdev);
2931 	u32 txq_id = 0;
2932 	struct bna_tcb *tcb = NULL;
2933 	struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2934 	u32		prod, q_depth, vect_id;
2935 	u32		wis, vectors, len;
2936 	int		i;
2937 	dma_addr_t		dma_addr;
2938 	struct bna_txq_entry *txqent;
2939 
2940 	len = skb_headlen(skb);
2941 
2942 	/* Sanity checks for the skb */
2943 
2944 	if (unlikely(skb->len <= ETH_HLEN)) {
2945 		dev_kfree_skb_any(skb);
2946 		BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2947 		return NETDEV_TX_OK;
2948 	}
2949 	if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2950 		dev_kfree_skb_any(skb);
2951 		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2952 		return NETDEV_TX_OK;
2953 	}
2954 	if (unlikely(len == 0)) {
2955 		dev_kfree_skb_any(skb);
2956 		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2957 		return NETDEV_TX_OK;
2958 	}
2959 
2960 	tcb = bnad->tx_info[0].tcb[txq_id];
2961 
2962 	/*
2963 	 * Takes care of the Tx that is scheduled between clearing the flag
2964 	 * and the netif_tx_stop_all_queues() call.
2965 	 */
2966 	if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2967 		dev_kfree_skb_any(skb);
2968 		BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2969 		return NETDEV_TX_OK;
2970 	}
2971 
2972 	q_depth = tcb->q_depth;
2973 	prod = tcb->producer_index;
2974 	unmap_q = tcb->unmap_q;
2975 
2976 	vectors = 1 + skb_shinfo(skb)->nr_frags;
2977 	wis = BNA_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
2978 
2979 	if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2980 		dev_kfree_skb_any(skb);
2981 		BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2982 		return NETDEV_TX_OK;
2983 	}
2984 
2985 	/* Check for available TxQ resources */
2986 	if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2987 		if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2988 		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2989 			u32 sent;
2990 			sent = bnad_txcmpl_process(bnad, tcb);
2991 			if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2992 				bna_ib_ack(tcb->i_dbell, sent);
2993 			smp_mb__before_atomic();
2994 			clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2995 		} else {
2996 			netif_stop_queue(netdev);
2997 			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2998 		}
2999 
3000 		smp_mb();
3001 		/*
3002 		 * Check again to deal with race condition between
3003 		 * netif_stop_queue here, and netif_wake_queue in
3004 		 * interrupt handler which is not inside netif tx lock.
3005 		 */
3006 		if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3007 			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3008 			return NETDEV_TX_BUSY;
3009 		} else {
3010 			netif_wake_queue(netdev);
3011 			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3012 		}
3013 	}
3014 
3015 	txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3016 	head_unmap = &unmap_q[prod];
3017 
3018 	/* Program the opcode, flags, frame_len, num_vectors in WI */
3019 	if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3020 		dev_kfree_skb_any(skb);
3021 		return NETDEV_TX_OK;
3022 	}
3023 	txqent->hdr.wi.reserved = 0;
3024 	txqent->hdr.wi.num_vectors = vectors;
3025 
3026 	head_unmap->skb = skb;
3027 	head_unmap->nvecs = 0;
3028 
3029 	/* Program the vectors */
3030 	unmap = head_unmap;
3031 	dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3032 				  len, DMA_TO_DEVICE);
3033 	if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3034 		dev_kfree_skb_any(skb);
3035 		BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3036 		return NETDEV_TX_OK;
3037 	}
3038 	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3039 	txqent->vector[0].length = htons(len);
3040 	dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3041 	head_unmap->nvecs++;
3042 
3043 	for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3044 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
3045 		u32		size = skb_frag_size(frag);
3046 
3047 		if (unlikely(size == 0)) {
3048 			/* Undo the changes starting at tcb->producer_index */
3049 			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3050 				tcb->producer_index);
3051 			dev_kfree_skb_any(skb);
3052 			BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3053 			return NETDEV_TX_OK;
3054 		}
3055 
3056 		len += size;
3057 
3058 		vect_id++;
3059 		if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3060 			vect_id = 0;
3061 			BNA_QE_INDX_INC(prod, q_depth);
3062 			txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3063 			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3064 			unmap = &unmap_q[prod];
3065 		}
3066 
3067 		dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3068 					    0, size, DMA_TO_DEVICE);
3069 		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3070 			/* Undo the changes starting at tcb->producer_index */
3071 			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3072 					   tcb->producer_index);
3073 			dev_kfree_skb_any(skb);
3074 			BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3075 			return NETDEV_TX_OK;
3076 		}
3077 
3078 		dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3079 		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3080 		txqent->vector[vect_id].length = htons(size);
3081 		dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3082 				   dma_addr);
3083 		head_unmap->nvecs++;
3084 	}
3085 
3086 	if (unlikely(len != skb->len)) {
3087 		/* Undo the changes starting at tcb->producer_index */
3088 		bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3089 		dev_kfree_skb_any(skb);
3090 		BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3091 		return NETDEV_TX_OK;
3092 	}
3093 
3094 	BNA_QE_INDX_INC(prod, q_depth);
3095 	tcb->producer_index = prod;
3096 
3097 	smp_mb();
3098 
3099 	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3100 		return NETDEV_TX_OK;
3101 
3102 	skb_tx_timestamp(skb);
3103 
3104 	bna_txq_prod_indx_doorbell(tcb);
3105 	smp_mb();
3106 
3107 	return NETDEV_TX_OK;
3108 }
3109 
3110 /*
3111  * Used spin_lock to synchronize reading of stats structures, which
3112  * is written by BNA under the same lock.
3113  */
3114 static struct rtnl_link_stats64 *
3115 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3116 {
3117 	struct bnad *bnad = netdev_priv(netdev);
3118 	unsigned long flags;
3119 
3120 	spin_lock_irqsave(&bnad->bna_lock, flags);
3121 
3122 	bnad_netdev_qstats_fill(bnad, stats);
3123 	bnad_netdev_hwstats_fill(bnad, stats);
3124 
3125 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3126 
3127 	return stats;
3128 }
3129 
3130 static void
3131 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3132 {
3133 	struct net_device *netdev = bnad->netdev;
3134 	int uc_count = netdev_uc_count(netdev);
3135 	enum bna_cb_status ret;
3136 	u8 *mac_list;
3137 	struct netdev_hw_addr *ha;
3138 	int entry;
3139 
3140 	if (netdev_uc_empty(bnad->netdev)) {
3141 		bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3142 		return;
3143 	}
3144 
3145 	if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3146 		goto mode_default;
3147 
3148 	mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3149 	if (mac_list == NULL)
3150 		goto mode_default;
3151 
3152 	entry = 0;
3153 	netdev_for_each_uc_addr(ha, netdev) {
3154 		ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
3155 		entry++;
3156 	}
3157 
3158 	ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3159 	kfree(mac_list);
3160 
3161 	if (ret != BNA_CB_SUCCESS)
3162 		goto mode_default;
3163 
3164 	return;
3165 
3166 	/* ucast packets not in UCAM are routed to default function */
3167 mode_default:
3168 	bnad->cfg_flags |= BNAD_CF_DEFAULT;
3169 	bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3170 }
3171 
3172 static void
3173 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3174 {
3175 	struct net_device *netdev = bnad->netdev;
3176 	int mc_count = netdev_mc_count(netdev);
3177 	enum bna_cb_status ret;
3178 	u8 *mac_list;
3179 
3180 	if (netdev->flags & IFF_ALLMULTI)
3181 		goto mode_allmulti;
3182 
3183 	if (netdev_mc_empty(netdev))
3184 		return;
3185 
3186 	if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3187 		goto mode_allmulti;
3188 
3189 	mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3190 
3191 	if (mac_list == NULL)
3192 		goto mode_allmulti;
3193 
3194 	ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
3195 
3196 	/* copy rest of the MCAST addresses */
3197 	bnad_netdev_mc_list_get(netdev, mac_list);
3198 	ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3199 	kfree(mac_list);
3200 
3201 	if (ret != BNA_CB_SUCCESS)
3202 		goto mode_allmulti;
3203 
3204 	return;
3205 
3206 mode_allmulti:
3207 	bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3208 	bna_rx_mcast_delall(bnad->rx_info[0].rx);
3209 }
3210 
3211 void
3212 bnad_set_rx_mode(struct net_device *netdev)
3213 {
3214 	struct bnad *bnad = netdev_priv(netdev);
3215 	enum bna_rxmode new_mode, mode_mask;
3216 	unsigned long flags;
3217 
3218 	spin_lock_irqsave(&bnad->bna_lock, flags);
3219 
3220 	if (bnad->rx_info[0].rx == NULL) {
3221 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
3222 		return;
3223 	}
3224 
3225 	/* clear bnad flags to update it with new settings */
3226 	bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3227 			BNAD_CF_ALLMULTI);
3228 
3229 	new_mode = 0;
3230 	if (netdev->flags & IFF_PROMISC) {
3231 		new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3232 		bnad->cfg_flags |= BNAD_CF_PROMISC;
3233 	} else {
3234 		bnad_set_rx_mcast_fltr(bnad);
3235 
3236 		if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3237 			new_mode |= BNA_RXMODE_ALLMULTI;
3238 
3239 		bnad_set_rx_ucast_fltr(bnad);
3240 
3241 		if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3242 			new_mode |= BNA_RXMODE_DEFAULT;
3243 	}
3244 
3245 	mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3246 			BNA_RXMODE_ALLMULTI;
3247 	bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3248 
3249 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3250 }
3251 
3252 /*
3253  * bna_lock is used to sync writes to netdev->addr
3254  * conf_lock cannot be used since this call may be made
3255  * in a non-blocking context.
3256  */
3257 static int
3258 bnad_set_mac_address(struct net_device *netdev, void *addr)
3259 {
3260 	int err;
3261 	struct bnad *bnad = netdev_priv(netdev);
3262 	struct sockaddr *sa = (struct sockaddr *)addr;
3263 	unsigned long flags;
3264 
3265 	spin_lock_irqsave(&bnad->bna_lock, flags);
3266 
3267 	err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3268 	if (!err)
3269 		ether_addr_copy(netdev->dev_addr, sa->sa_data);
3270 
3271 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3272 
3273 	return err;
3274 }
3275 
3276 static int
3277 bnad_mtu_set(struct bnad *bnad, int frame_size)
3278 {
3279 	unsigned long flags;
3280 
3281 	init_completion(&bnad->bnad_completions.mtu_comp);
3282 
3283 	spin_lock_irqsave(&bnad->bna_lock, flags);
3284 	bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3285 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3286 
3287 	wait_for_completion(&bnad->bnad_completions.mtu_comp);
3288 
3289 	return bnad->bnad_completions.mtu_comp_status;
3290 }
3291 
3292 static int
3293 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3294 {
3295 	int err, mtu;
3296 	struct bnad *bnad = netdev_priv(netdev);
3297 	u32 rx_count = 0, frame, new_frame;
3298 
3299 	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3300 		return -EINVAL;
3301 
3302 	mutex_lock(&bnad->conf_mutex);
3303 
3304 	mtu = netdev->mtu;
3305 	netdev->mtu = new_mtu;
3306 
3307 	frame = BNAD_FRAME_SIZE(mtu);
3308 	new_frame = BNAD_FRAME_SIZE(new_mtu);
3309 
3310 	/* check if multi-buffer needs to be enabled */
3311 	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3312 	    netif_running(bnad->netdev)) {
3313 		/* only when transition is over 4K */
3314 		if ((frame <= 4096 && new_frame > 4096) ||
3315 		    (frame > 4096 && new_frame <= 4096))
3316 			rx_count = bnad_reinit_rx(bnad);
3317 	}
3318 
3319 	/* rx_count > 0 - new rx created
3320 	 *	- Linux set err = 0 and return
3321 	 */
3322 	err = bnad_mtu_set(bnad, new_frame);
3323 	if (err)
3324 		err = -EBUSY;
3325 
3326 	mutex_unlock(&bnad->conf_mutex);
3327 	return err;
3328 }
3329 
3330 static int
3331 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3332 {
3333 	struct bnad *bnad = netdev_priv(netdev);
3334 	unsigned long flags;
3335 
3336 	if (!bnad->rx_info[0].rx)
3337 		return 0;
3338 
3339 	mutex_lock(&bnad->conf_mutex);
3340 
3341 	spin_lock_irqsave(&bnad->bna_lock, flags);
3342 	bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3343 	set_bit(vid, bnad->active_vlans);
3344 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3345 
3346 	mutex_unlock(&bnad->conf_mutex);
3347 
3348 	return 0;
3349 }
3350 
3351 static int
3352 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3353 {
3354 	struct bnad *bnad = netdev_priv(netdev);
3355 	unsigned long flags;
3356 
3357 	if (!bnad->rx_info[0].rx)
3358 		return 0;
3359 
3360 	mutex_lock(&bnad->conf_mutex);
3361 
3362 	spin_lock_irqsave(&bnad->bna_lock, flags);
3363 	clear_bit(vid, bnad->active_vlans);
3364 	bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3365 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3366 
3367 	mutex_unlock(&bnad->conf_mutex);
3368 
3369 	return 0;
3370 }
3371 
3372 static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3373 {
3374 	struct bnad *bnad = netdev_priv(dev);
3375 	netdev_features_t changed = features ^ dev->features;
3376 
3377 	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3378 		unsigned long flags;
3379 
3380 		spin_lock_irqsave(&bnad->bna_lock, flags);
3381 
3382 		if (features & NETIF_F_HW_VLAN_CTAG_RX)
3383 			bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3384 		else
3385 			bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3386 
3387 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
3388 	}
3389 
3390 	return 0;
3391 }
3392 
3393 #ifdef CONFIG_NET_POLL_CONTROLLER
3394 static void
3395 bnad_netpoll(struct net_device *netdev)
3396 {
3397 	struct bnad *bnad = netdev_priv(netdev);
3398 	struct bnad_rx_info *rx_info;
3399 	struct bnad_rx_ctrl *rx_ctrl;
3400 	u32 curr_mask;
3401 	int i, j;
3402 
3403 	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3404 		bna_intx_disable(&bnad->bna, curr_mask);
3405 		bnad_isr(bnad->pcidev->irq, netdev);
3406 		bna_intx_enable(&bnad->bna, curr_mask);
3407 	} else {
3408 		/*
3409 		 * Tx processing may happen in sending context, so no need
3410 		 * to explicitly process completions here
3411 		 */
3412 
3413 		/* Rx processing */
3414 		for (i = 0; i < bnad->num_rx; i++) {
3415 			rx_info = &bnad->rx_info[i];
3416 			if (!rx_info->rx)
3417 				continue;
3418 			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3419 				rx_ctrl = &rx_info->rx_ctrl[j];
3420 				if (rx_ctrl->ccb)
3421 					bnad_netif_rx_schedule_poll(bnad,
3422 							    rx_ctrl->ccb);
3423 			}
3424 		}
3425 	}
3426 }
3427 #endif
3428 
3429 static const struct net_device_ops bnad_netdev_ops = {
3430 	.ndo_open		= bnad_open,
3431 	.ndo_stop		= bnad_stop,
3432 	.ndo_start_xmit		= bnad_start_xmit,
3433 	.ndo_get_stats64		= bnad_get_stats64,
3434 	.ndo_set_rx_mode	= bnad_set_rx_mode,
3435 	.ndo_validate_addr      = eth_validate_addr,
3436 	.ndo_set_mac_address    = bnad_set_mac_address,
3437 	.ndo_change_mtu		= bnad_change_mtu,
3438 	.ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3439 	.ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3440 	.ndo_set_features	= bnad_set_features,
3441 #ifdef CONFIG_NET_POLL_CONTROLLER
3442 	.ndo_poll_controller    = bnad_netpoll
3443 #endif
3444 };
3445 
3446 static void
3447 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3448 {
3449 	struct net_device *netdev = bnad->netdev;
3450 
3451 	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3452 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3453 		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3454 		NETIF_F_HW_VLAN_CTAG_RX;
3455 
3456 	netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3457 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3458 		NETIF_F_TSO | NETIF_F_TSO6;
3459 
3460 	netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3461 
3462 	if (using_dac)
3463 		netdev->features |= NETIF_F_HIGHDMA;
3464 
3465 	netdev->mem_start = bnad->mmio_start;
3466 	netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3467 
3468 	netdev->netdev_ops = &bnad_netdev_ops;
3469 	bnad_set_ethtool_ops(netdev);
3470 }
3471 
3472 /*
3473  * 1. Initialize the bnad structure
3474  * 2. Setup netdev pointer in pci_dev
3475  * 3. Initialize no. of TxQ & CQs & MSIX vectors
3476  * 4. Initialize work queue.
3477  */
3478 static int
3479 bnad_init(struct bnad *bnad,
3480 	  struct pci_dev *pdev, struct net_device *netdev)
3481 {
3482 	unsigned long flags;
3483 
3484 	SET_NETDEV_DEV(netdev, &pdev->dev);
3485 	pci_set_drvdata(pdev, netdev);
3486 
3487 	bnad->netdev = netdev;
3488 	bnad->pcidev = pdev;
3489 	bnad->mmio_start = pci_resource_start(pdev, 0);
3490 	bnad->mmio_len = pci_resource_len(pdev, 0);
3491 	bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3492 	if (!bnad->bar0) {
3493 		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3494 		return -ENOMEM;
3495 	}
3496 	dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3497 		 (unsigned long long) bnad->mmio_len);
3498 
3499 	spin_lock_irqsave(&bnad->bna_lock, flags);
3500 	if (!bnad_msix_disable)
3501 		bnad->cfg_flags = BNAD_CF_MSIX;
3502 
3503 	bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3504 
3505 	bnad_q_num_init(bnad);
3506 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3507 
3508 	bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3509 		(bnad->num_rx * bnad->num_rxp_per_rx) +
3510 			 BNAD_MAILBOX_MSIX_VECTORS;
3511 
3512 	bnad->txq_depth = BNAD_TXQ_DEPTH;
3513 	bnad->rxq_depth = BNAD_RXQ_DEPTH;
3514 
3515 	bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3516 	bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3517 
3518 	sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3519 	bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3520 	if (!bnad->work_q) {
3521 		iounmap(bnad->bar0);
3522 		return -ENOMEM;
3523 	}
3524 
3525 	return 0;
3526 }
3527 
3528 /*
3529  * Must be called after bnad_pci_uninit()
3530  * so that iounmap() and pci_set_drvdata(NULL)
3531  * happens only after PCI uninitialization.
3532  */
3533 static void
3534 bnad_uninit(struct bnad *bnad)
3535 {
3536 	if (bnad->work_q) {
3537 		flush_workqueue(bnad->work_q);
3538 		destroy_workqueue(bnad->work_q);
3539 		bnad->work_q = NULL;
3540 	}
3541 
3542 	if (bnad->bar0)
3543 		iounmap(bnad->bar0);
3544 }
3545 
3546 /*
3547  * Initialize locks
3548 	a) Per ioceth mutes used for serializing configuration
3549 	   changes from OS interface
3550 	b) spin lock used to protect bna state machine
3551  */
3552 static void
3553 bnad_lock_init(struct bnad *bnad)
3554 {
3555 	spin_lock_init(&bnad->bna_lock);
3556 	mutex_init(&bnad->conf_mutex);
3557 }
3558 
3559 static void
3560 bnad_lock_uninit(struct bnad *bnad)
3561 {
3562 	mutex_destroy(&bnad->conf_mutex);
3563 }
3564 
3565 /* PCI Initialization */
3566 static int
3567 bnad_pci_init(struct bnad *bnad,
3568 	      struct pci_dev *pdev, bool *using_dac)
3569 {
3570 	int err;
3571 
3572 	err = pci_enable_device(pdev);
3573 	if (err)
3574 		return err;
3575 	err = pci_request_regions(pdev, BNAD_NAME);
3576 	if (err)
3577 		goto disable_device;
3578 	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3579 		*using_dac = true;
3580 	} else {
3581 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3582 		if (err)
3583 			goto release_regions;
3584 		*using_dac = false;
3585 	}
3586 	pci_set_master(pdev);
3587 	return 0;
3588 
3589 release_regions:
3590 	pci_release_regions(pdev);
3591 disable_device:
3592 	pci_disable_device(pdev);
3593 
3594 	return err;
3595 }
3596 
3597 static void
3598 bnad_pci_uninit(struct pci_dev *pdev)
3599 {
3600 	pci_release_regions(pdev);
3601 	pci_disable_device(pdev);
3602 }
3603 
3604 static int
3605 bnad_pci_probe(struct pci_dev *pdev,
3606 		const struct pci_device_id *pcidev_id)
3607 {
3608 	bool	using_dac;
3609 	int	err;
3610 	struct bnad *bnad;
3611 	struct bna *bna;
3612 	struct net_device *netdev;
3613 	struct bfa_pcidev pcidev_info;
3614 	unsigned long flags;
3615 
3616 	mutex_lock(&bnad_fwimg_mutex);
3617 	if (!cna_get_firmware_buf(pdev)) {
3618 		mutex_unlock(&bnad_fwimg_mutex);
3619 		dev_err(&pdev->dev, "failed to load firmware image!\n");
3620 		return -ENODEV;
3621 	}
3622 	mutex_unlock(&bnad_fwimg_mutex);
3623 
3624 	/*
3625 	 * Allocates sizeof(struct net_device + struct bnad)
3626 	 * bnad = netdev->priv
3627 	 */
3628 	netdev = alloc_etherdev(sizeof(struct bnad));
3629 	if (!netdev) {
3630 		err = -ENOMEM;
3631 		return err;
3632 	}
3633 	bnad = netdev_priv(netdev);
3634 	bnad_lock_init(bnad);
3635 	bnad->id = atomic_inc_return(&bna_id) - 1;
3636 
3637 	mutex_lock(&bnad->conf_mutex);
3638 	/*
3639 	 * PCI initialization
3640 	 *	Output : using_dac = 1 for 64 bit DMA
3641 	 *			   = 0 for 32 bit DMA
3642 	 */
3643 	using_dac = false;
3644 	err = bnad_pci_init(bnad, pdev, &using_dac);
3645 	if (err)
3646 		goto unlock_mutex;
3647 
3648 	/*
3649 	 * Initialize bnad structure
3650 	 * Setup relation between pci_dev & netdev
3651 	 */
3652 	err = bnad_init(bnad, pdev, netdev);
3653 	if (err)
3654 		goto pci_uninit;
3655 
3656 	/* Initialize netdev structure, set up ethtool ops */
3657 	bnad_netdev_init(bnad, using_dac);
3658 
3659 	/* Set link to down state */
3660 	netif_carrier_off(netdev);
3661 
3662 	/* Setup the debugfs node for this bfad */
3663 	if (bna_debugfs_enable)
3664 		bnad_debugfs_init(bnad);
3665 
3666 	/* Get resource requirement form bna */
3667 	spin_lock_irqsave(&bnad->bna_lock, flags);
3668 	bna_res_req(&bnad->res_info[0]);
3669 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3670 
3671 	/* Allocate resources from bna */
3672 	err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3673 	if (err)
3674 		goto drv_uninit;
3675 
3676 	bna = &bnad->bna;
3677 
3678 	/* Setup pcidev_info for bna_init() */
3679 	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3680 	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3681 	pcidev_info.device_id = bnad->pcidev->device;
3682 	pcidev_info.pci_bar_kva = bnad->bar0;
3683 
3684 	spin_lock_irqsave(&bnad->bna_lock, flags);
3685 	bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3686 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3687 
3688 	bnad->stats.bna_stats = &bna->stats;
3689 
3690 	bnad_enable_msix(bnad);
3691 	err = bnad_mbox_irq_alloc(bnad);
3692 	if (err)
3693 		goto res_free;
3694 
3695 	/* Set up timers */
3696 	setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3697 		    (unsigned long)bnad);
3698 	setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3699 		    (unsigned long)bnad);
3700 	setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3701 		    (unsigned long)bnad);
3702 	setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3703 		    (unsigned long)bnad);
3704 
3705 	/*
3706 	 * Start the chip
3707 	 * If the call back comes with error, we bail out.
3708 	 * This is a catastrophic error.
3709 	 */
3710 	err = bnad_ioceth_enable(bnad);
3711 	if (err) {
3712 		dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3713 		goto probe_success;
3714 	}
3715 
3716 	spin_lock_irqsave(&bnad->bna_lock, flags);
3717 	if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3718 		bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3719 		bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3720 			bna_attr(bna)->num_rxp - 1);
3721 		if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3722 			bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3723 			err = -EIO;
3724 	}
3725 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3726 	if (err)
3727 		goto disable_ioceth;
3728 
3729 	spin_lock_irqsave(&bnad->bna_lock, flags);
3730 	bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3731 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3732 
3733 	err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3734 	if (err) {
3735 		err = -EIO;
3736 		goto disable_ioceth;
3737 	}
3738 
3739 	spin_lock_irqsave(&bnad->bna_lock, flags);
3740 	bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3741 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3742 
3743 	/* Get the burnt-in mac */
3744 	spin_lock_irqsave(&bnad->bna_lock, flags);
3745 	bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3746 	bnad_set_netdev_perm_addr(bnad);
3747 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3748 
3749 	mutex_unlock(&bnad->conf_mutex);
3750 
3751 	/* Finally, reguister with net_device layer */
3752 	err = register_netdev(netdev);
3753 	if (err) {
3754 		dev_err(&pdev->dev, "registering net device failed\n");
3755 		goto probe_uninit;
3756 	}
3757 	set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3758 
3759 	return 0;
3760 
3761 probe_success:
3762 	mutex_unlock(&bnad->conf_mutex);
3763 	return 0;
3764 
3765 probe_uninit:
3766 	mutex_lock(&bnad->conf_mutex);
3767 	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3768 disable_ioceth:
3769 	bnad_ioceth_disable(bnad);
3770 	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3771 	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3772 	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3773 	spin_lock_irqsave(&bnad->bna_lock, flags);
3774 	bna_uninit(bna);
3775 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3776 	bnad_mbox_irq_free(bnad);
3777 	bnad_disable_msix(bnad);
3778 res_free:
3779 	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3780 drv_uninit:
3781 	/* Remove the debugfs node for this bnad */
3782 	kfree(bnad->regdata);
3783 	bnad_debugfs_uninit(bnad);
3784 	bnad_uninit(bnad);
3785 pci_uninit:
3786 	bnad_pci_uninit(pdev);
3787 unlock_mutex:
3788 	mutex_unlock(&bnad->conf_mutex);
3789 	bnad_lock_uninit(bnad);
3790 	free_netdev(netdev);
3791 	return err;
3792 }
3793 
3794 static void
3795 bnad_pci_remove(struct pci_dev *pdev)
3796 {
3797 	struct net_device *netdev = pci_get_drvdata(pdev);
3798 	struct bnad *bnad;
3799 	struct bna *bna;
3800 	unsigned long flags;
3801 
3802 	if (!netdev)
3803 		return;
3804 
3805 	bnad = netdev_priv(netdev);
3806 	bna = &bnad->bna;
3807 
3808 	if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3809 		unregister_netdev(netdev);
3810 
3811 	mutex_lock(&bnad->conf_mutex);
3812 	bnad_ioceth_disable(bnad);
3813 	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3814 	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3815 	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3816 	spin_lock_irqsave(&bnad->bna_lock, flags);
3817 	bna_uninit(bna);
3818 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3819 
3820 	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3821 	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3822 	bnad_mbox_irq_free(bnad);
3823 	bnad_disable_msix(bnad);
3824 	bnad_pci_uninit(pdev);
3825 	mutex_unlock(&bnad->conf_mutex);
3826 	bnad_lock_uninit(bnad);
3827 	/* Remove the debugfs node for this bnad */
3828 	kfree(bnad->regdata);
3829 	bnad_debugfs_uninit(bnad);
3830 	bnad_uninit(bnad);
3831 	free_netdev(netdev);
3832 }
3833 
3834 static const struct pci_device_id bnad_pci_id_table[] = {
3835 	{
3836 		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3837 			PCI_DEVICE_ID_BROCADE_CT),
3838 		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
3839 		.class_mask =  0xffff00
3840 	},
3841 	{
3842 		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3843 			BFA_PCI_DEVICE_ID_CT2),
3844 		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
3845 		.class_mask =  0xffff00
3846 	},
3847 	{0,  },
3848 };
3849 
3850 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3851 
3852 static struct pci_driver bnad_pci_driver = {
3853 	.name = BNAD_NAME,
3854 	.id_table = bnad_pci_id_table,
3855 	.probe = bnad_pci_probe,
3856 	.remove = bnad_pci_remove,
3857 };
3858 
3859 static int __init
3860 bnad_module_init(void)
3861 {
3862 	int err;
3863 
3864 	pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
3865 		BNAD_VERSION);
3866 
3867 	bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3868 
3869 	err = pci_register_driver(&bnad_pci_driver);
3870 	if (err < 0) {
3871 		pr_err("bna: PCI driver registration failed err=%d\n", err);
3872 		return err;
3873 	}
3874 
3875 	return 0;
3876 }
3877 
3878 static void __exit
3879 bnad_module_exit(void)
3880 {
3881 	pci_unregister_driver(&bnad_pci_driver);
3882 	release_firmware(bfi_fw);
3883 }
3884 
3885 module_init(bnad_module_init);
3886 module_exit(bnad_module_exit);
3887 
3888 MODULE_AUTHOR("Brocade");
3889 MODULE_LICENSE("GPL");
3890 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3891 MODULE_VERSION(BNAD_VERSION);
3892 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3893 MODULE_FIRMWARE(CNA_FW_FILE_CT2);
3894