1 /*
2  * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/prefetch.h>
41 #include <net/arp.h>
42 #include "common.h"
43 #include "regs.h"
44 #include "sge_defs.h"
45 #include "t3_cpl.h"
46 #include "firmware_exports.h"
47 #include "cxgb3_offload.h"
48 
49 #define USE_GTS 0
50 
51 #define SGE_RX_SM_BUF_SIZE 1536
52 
53 #define SGE_RX_COPY_THRES  256
54 #define SGE_RX_PULL_LEN    128
55 
56 #define SGE_PG_RSVD SMP_CACHE_BYTES
57 /*
58  * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
59  * It must be a divisor of PAGE_SIZE.  If set to 0 FL0 will use sk_buffs
60  * directly.
61  */
62 #define FL0_PG_CHUNK_SIZE  2048
63 #define FL0_PG_ORDER 0
64 #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
65 #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
66 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
67 #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
68 
69 #define SGE_RX_DROP_THRES 16
70 #define RX_RECLAIM_PERIOD (HZ/4)
71 
72 /*
73  * Max number of Rx buffers we replenish at a time.
74  */
75 #define MAX_RX_REFILL 16U
76 /*
77  * Period of the Tx buffer reclaim timer.  This timer does not need to run
78  * frequently as Tx buffers are usually reclaimed by new Tx packets.
79  */
80 #define TX_RECLAIM_PERIOD (HZ / 4)
81 #define TX_RECLAIM_TIMER_CHUNK 64U
82 #define TX_RECLAIM_CHUNK 16U
83 
84 /* WR size in bytes */
85 #define WR_LEN (WR_FLITS * 8)
86 
87 /*
88  * Types of Tx queues in each queue set.  Order here matters, do not change.
89  */
90 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
91 
92 /* Values for sge_txq.flags */
93 enum {
94 	TXQ_RUNNING = 1 << 0,	/* fetch engine is running */
95 	TXQ_LAST_PKT_DB = 1 << 1,	/* last packet rang the doorbell */
96 };
97 
98 struct tx_desc {
99 	__be64 flit[TX_DESC_FLITS];
100 };
101 
102 struct rx_desc {
103 	__be32 addr_lo;
104 	__be32 len_gen;
105 	__be32 gen2;
106 	__be32 addr_hi;
107 };
108 
109 struct tx_sw_desc {		/* SW state per Tx descriptor */
110 	struct sk_buff *skb;
111 	u8 eop;       /* set if last descriptor for packet */
112 	u8 addr_idx;  /* buffer index of first SGL entry in descriptor */
113 	u8 fragidx;   /* first page fragment associated with descriptor */
114 	s8 sflit;     /* start flit of first SGL entry in descriptor */
115 };
116 
117 struct rx_sw_desc {                /* SW state per Rx descriptor */
118 	union {
119 		struct sk_buff *skb;
120 		struct fl_pg_chunk pg_chunk;
121 	};
122 	DEFINE_DMA_UNMAP_ADDR(dma_addr);
123 };
124 
125 struct rsp_desc {		/* response queue descriptor */
126 	struct rss_header rss_hdr;
127 	__be32 flags;
128 	__be32 len_cq;
129 	u8 imm_data[47];
130 	u8 intr_gen;
131 };
132 
133 /*
134  * Holds unmapping information for Tx packets that need deferred unmapping.
135  * This structure lives at skb->head and must be allocated by callers.
136  */
137 struct deferred_unmap_info {
138 	struct pci_dev *pdev;
139 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
140 };
141 
142 /*
143  * Maps a number of flits to the number of Tx descriptors that can hold them.
144  * The formula is
145  *
146  * desc = 1 + (flits - 2) / (WR_FLITS - 1).
147  *
148  * HW allows up to 4 descriptors to be combined into a WR.
149  */
150 static u8 flit_desc_map[] = {
151 	0,
152 #if SGE_NUM_GENBITS == 1
153 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
154 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
155 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
156 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
157 #elif SGE_NUM_GENBITS == 2
158 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
159 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
160 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
161 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
162 #else
163 # error "SGE_NUM_GENBITS must be 1 or 2"
164 #endif
165 };
166 
167 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
168 {
169 	return container_of(q, struct sge_qset, fl[qidx]);
170 }
171 
172 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
173 {
174 	return container_of(q, struct sge_qset, rspq);
175 }
176 
177 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
178 {
179 	return container_of(q, struct sge_qset, txq[qidx]);
180 }
181 
182 /**
183  *	refill_rspq - replenish an SGE response queue
184  *	@adapter: the adapter
185  *	@q: the response queue to replenish
186  *	@credits: how many new responses to make available
187  *
188  *	Replenishes a response queue by making the supplied number of responses
189  *	available to HW.
190  */
191 static inline void refill_rspq(struct adapter *adapter,
192 			       const struct sge_rspq *q, unsigned int credits)
193 {
194 	rmb();
195 	t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
196 		     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
197 }
198 
199 /**
200  *	need_skb_unmap - does the platform need unmapping of sk_buffs?
201  *
202  *	Returns true if the platform needs sk_buff unmapping.  The compiler
203  *	optimizes away unnecessary code if this returns true.
204  */
205 static inline int need_skb_unmap(void)
206 {
207 #ifdef CONFIG_NEED_DMA_MAP_STATE
208 	return 1;
209 #else
210 	return 0;
211 #endif
212 }
213 
214 /**
215  *	unmap_skb - unmap a packet main body and its page fragments
216  *	@skb: the packet
217  *	@q: the Tx queue containing Tx descriptors for the packet
218  *	@cidx: index of Tx descriptor
219  *	@pdev: the PCI device
220  *
221  *	Unmap the main body of an sk_buff and its page fragments, if any.
222  *	Because of the fairly complicated structure of our SGLs and the desire
223  *	to conserve space for metadata, the information necessary to unmap an
224  *	sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
225  *	descriptors (the physical addresses of the various data buffers), and
226  *	the SW descriptor state (assorted indices).  The send functions
227  *	initialize the indices for the first packet descriptor so we can unmap
228  *	the buffers held in the first Tx descriptor here, and we have enough
229  *	information at this point to set the state for the next Tx descriptor.
230  *
231  *	Note that it is possible to clean up the first descriptor of a packet
232  *	before the send routines have written the next descriptors, but this
233  *	race does not cause any problem.  We just end up writing the unmapping
234  *	info for the descriptor first.
235  */
236 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
237 			     unsigned int cidx, struct pci_dev *pdev)
238 {
239 	const struct sg_ent *sgp;
240 	struct tx_sw_desc *d = &q->sdesc[cidx];
241 	int nfrags, frag_idx, curflit, j = d->addr_idx;
242 
243 	sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
244 	frag_idx = d->fragidx;
245 
246 	if (frag_idx == 0 && skb_headlen(skb)) {
247 		dma_unmap_single(&pdev->dev, be64_to_cpu(sgp->addr[0]),
248 				 skb_headlen(skb), DMA_TO_DEVICE);
249 		j = 1;
250 	}
251 
252 	curflit = d->sflit + 1 + j;
253 	nfrags = skb_shinfo(skb)->nr_frags;
254 
255 	while (frag_idx < nfrags && curflit < WR_FLITS) {
256 		dma_unmap_page(&pdev->dev, be64_to_cpu(sgp->addr[j]),
257 			       skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
258 			       DMA_TO_DEVICE);
259 		j ^= 1;
260 		if (j == 0) {
261 			sgp++;
262 			curflit++;
263 		}
264 		curflit++;
265 		frag_idx++;
266 	}
267 
268 	if (frag_idx < nfrags) {   /* SGL continues into next Tx descriptor */
269 		d = cidx + 1 == q->size ? q->sdesc : d + 1;
270 		d->fragidx = frag_idx;
271 		d->addr_idx = j;
272 		d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
273 	}
274 }
275 
276 /**
277  *	free_tx_desc - reclaims Tx descriptors and their buffers
278  *	@adapter: the adapter
279  *	@q: the Tx queue to reclaim descriptors from
280  *	@n: the number of descriptors to reclaim
281  *
282  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
283  *	Tx buffers.  Called with the Tx queue lock held.
284  */
285 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
286 			 unsigned int n)
287 {
288 	struct tx_sw_desc *d;
289 	struct pci_dev *pdev = adapter->pdev;
290 	unsigned int cidx = q->cidx;
291 
292 	const int need_unmap = need_skb_unmap() &&
293 			       q->cntxt_id >= FW_TUNNEL_SGEEC_START;
294 
295 	d = &q->sdesc[cidx];
296 	while (n--) {
297 		if (d->skb) {	/* an SGL is present */
298 			if (need_unmap)
299 				unmap_skb(d->skb, q, cidx, pdev);
300 			if (d->eop) {
301 				dev_consume_skb_any(d->skb);
302 				d->skb = NULL;
303 			}
304 		}
305 		++d;
306 		if (++cidx == q->size) {
307 			cidx = 0;
308 			d = q->sdesc;
309 		}
310 	}
311 	q->cidx = cidx;
312 }
313 
314 /**
315  *	reclaim_completed_tx - reclaims completed Tx descriptors
316  *	@adapter: the adapter
317  *	@q: the Tx queue to reclaim completed descriptors from
318  *	@chunk: maximum number of descriptors to reclaim
319  *
320  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
321  *	and frees the associated buffers if possible.  Called with the Tx
322  *	queue's lock held.
323  */
324 static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
325 						struct sge_txq *q,
326 						unsigned int chunk)
327 {
328 	unsigned int reclaim = q->processed - q->cleaned;
329 
330 	reclaim = min(chunk, reclaim);
331 	if (reclaim) {
332 		free_tx_desc(adapter, q, reclaim);
333 		q->cleaned += reclaim;
334 		q->in_use -= reclaim;
335 	}
336 	return q->processed - q->cleaned;
337 }
338 
339 /**
340  *	should_restart_tx - are there enough resources to restart a Tx queue?
341  *	@q: the Tx queue
342  *
343  *	Checks if there are enough descriptors to restart a suspended Tx queue.
344  */
345 static inline int should_restart_tx(const struct sge_txq *q)
346 {
347 	unsigned int r = q->processed - q->cleaned;
348 
349 	return q->in_use - r < (q->size >> 1);
350 }
351 
352 static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
353 			  struct rx_sw_desc *d)
354 {
355 	if (q->use_pages && d->pg_chunk.page) {
356 		(*d->pg_chunk.p_cnt)--;
357 		if (!*d->pg_chunk.p_cnt)
358 			dma_unmap_page(&pdev->dev, d->pg_chunk.mapping,
359 				       q->alloc_size, DMA_FROM_DEVICE);
360 
361 		put_page(d->pg_chunk.page);
362 		d->pg_chunk.page = NULL;
363 	} else {
364 		dma_unmap_single(&pdev->dev, dma_unmap_addr(d, dma_addr),
365 				 q->buf_size, DMA_FROM_DEVICE);
366 		kfree_skb(d->skb);
367 		d->skb = NULL;
368 	}
369 }
370 
371 /**
372  *	free_rx_bufs - free the Rx buffers on an SGE free list
373  *	@pdev: the PCI device associated with the adapter
374  *	@q: the SGE free list to clean up
375  *
376  *	Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
377  *	this queue should be stopped before calling this function.
378  */
379 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
380 {
381 	unsigned int cidx = q->cidx;
382 
383 	while (q->credits--) {
384 		struct rx_sw_desc *d = &q->sdesc[cidx];
385 
386 
387 		clear_rx_desc(pdev, q, d);
388 		if (++cidx == q->size)
389 			cidx = 0;
390 	}
391 
392 	if (q->pg_chunk.page) {
393 		__free_pages(q->pg_chunk.page, q->order);
394 		q->pg_chunk.page = NULL;
395 	}
396 }
397 
398 /**
399  *	add_one_rx_buf - add a packet buffer to a free-buffer list
400  *	@va:  buffer start VA
401  *	@len: the buffer length
402  *	@d: the HW Rx descriptor to write
403  *	@sd: the SW Rx descriptor to write
404  *	@gen: the generation bit value
405  *	@pdev: the PCI device associated with the adapter
406  *
407  *	Add a buffer of the given length to the supplied HW and SW Rx
408  *	descriptors.
409  */
410 static inline int add_one_rx_buf(void *va, unsigned int len,
411 				 struct rx_desc *d, struct rx_sw_desc *sd,
412 				 unsigned int gen, struct pci_dev *pdev)
413 {
414 	dma_addr_t mapping;
415 
416 	mapping = dma_map_single(&pdev->dev, va, len, DMA_FROM_DEVICE);
417 	if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
418 		return -ENOMEM;
419 
420 	dma_unmap_addr_set(sd, dma_addr, mapping);
421 
422 	d->addr_lo = cpu_to_be32(mapping);
423 	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
424 	dma_wmb();
425 	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
426 	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
427 	return 0;
428 }
429 
430 static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
431 				   unsigned int gen)
432 {
433 	d->addr_lo = cpu_to_be32(mapping);
434 	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
435 	dma_wmb();
436 	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
437 	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
438 	return 0;
439 }
440 
441 static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
442 			  struct rx_sw_desc *sd, gfp_t gfp,
443 			  unsigned int order)
444 {
445 	if (!q->pg_chunk.page) {
446 		dma_addr_t mapping;
447 
448 		q->pg_chunk.page = alloc_pages(gfp, order);
449 		if (unlikely(!q->pg_chunk.page))
450 			return -ENOMEM;
451 		q->pg_chunk.va = page_address(q->pg_chunk.page);
452 		q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
453 				    SGE_PG_RSVD;
454 		q->pg_chunk.offset = 0;
455 		mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page,
456 				       0, q->alloc_size, DMA_FROM_DEVICE);
457 		if (unlikely(dma_mapping_error(&adapter->pdev->dev, mapping))) {
458 			__free_pages(q->pg_chunk.page, order);
459 			q->pg_chunk.page = NULL;
460 			return -EIO;
461 		}
462 		q->pg_chunk.mapping = mapping;
463 	}
464 	sd->pg_chunk = q->pg_chunk;
465 
466 	prefetch(sd->pg_chunk.p_cnt);
467 
468 	q->pg_chunk.offset += q->buf_size;
469 	if (q->pg_chunk.offset == (PAGE_SIZE << order))
470 		q->pg_chunk.page = NULL;
471 	else {
472 		q->pg_chunk.va += q->buf_size;
473 		get_page(q->pg_chunk.page);
474 	}
475 
476 	if (sd->pg_chunk.offset == 0)
477 		*sd->pg_chunk.p_cnt = 1;
478 	else
479 		*sd->pg_chunk.p_cnt += 1;
480 
481 	return 0;
482 }
483 
484 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
485 {
486 	if (q->pend_cred >= q->credits / 4) {
487 		q->pend_cred = 0;
488 		wmb();
489 		t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
490 	}
491 }
492 
493 /**
494  *	refill_fl - refill an SGE free-buffer list
495  *	@adap: the adapter
496  *	@q: the free-list to refill
497  *	@n: the number of new buffers to allocate
498  *	@gfp: the gfp flags for allocating new buffers
499  *
500  *	(Re)populate an SGE free-buffer list with up to @n new packet buffers,
501  *	allocated with the supplied gfp flags.  The caller must assure that
502  *	@n does not exceed the queue's capacity.
503  */
504 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
505 {
506 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
507 	struct rx_desc *d = &q->desc[q->pidx];
508 	unsigned int count = 0;
509 
510 	while (n--) {
511 		dma_addr_t mapping;
512 		int err;
513 
514 		if (q->use_pages) {
515 			if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
516 						    q->order))) {
517 nomem:				q->alloc_failed++;
518 				break;
519 			}
520 			mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
521 			dma_unmap_addr_set(sd, dma_addr, mapping);
522 
523 			add_one_rx_chunk(mapping, d, q->gen);
524 			dma_sync_single_for_device(&adap->pdev->dev, mapping,
525 						   q->buf_size - SGE_PG_RSVD,
526 						   DMA_FROM_DEVICE);
527 		} else {
528 			void *buf_start;
529 
530 			struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
531 			if (!skb)
532 				goto nomem;
533 
534 			sd->skb = skb;
535 			buf_start = skb->data;
536 			err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
537 					     q->gen, adap->pdev);
538 			if (unlikely(err)) {
539 				clear_rx_desc(adap->pdev, q, sd);
540 				break;
541 			}
542 		}
543 
544 		d++;
545 		sd++;
546 		if (++q->pidx == q->size) {
547 			q->pidx = 0;
548 			q->gen ^= 1;
549 			sd = q->sdesc;
550 			d = q->desc;
551 		}
552 		count++;
553 	}
554 
555 	q->credits += count;
556 	q->pend_cred += count;
557 	ring_fl_db(adap, q);
558 
559 	return count;
560 }
561 
562 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
563 {
564 	refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
565 		  GFP_ATOMIC | __GFP_COMP);
566 }
567 
568 /**
569  *	recycle_rx_buf - recycle a receive buffer
570  *	@adap: the adapter
571  *	@q: the SGE free list
572  *	@idx: index of buffer to recycle
573  *
574  *	Recycles the specified buffer on the given free list by adding it at
575  *	the next available slot on the list.
576  */
577 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
578 			   unsigned int idx)
579 {
580 	struct rx_desc *from = &q->desc[idx];
581 	struct rx_desc *to = &q->desc[q->pidx];
582 
583 	q->sdesc[q->pidx] = q->sdesc[idx];
584 	to->addr_lo = from->addr_lo;	/* already big endian */
585 	to->addr_hi = from->addr_hi;	/* likewise */
586 	dma_wmb();
587 	to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
588 	to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
589 
590 	if (++q->pidx == q->size) {
591 		q->pidx = 0;
592 		q->gen ^= 1;
593 	}
594 
595 	q->credits++;
596 	q->pend_cred++;
597 	ring_fl_db(adap, q);
598 }
599 
600 /**
601  *	alloc_ring - allocate resources for an SGE descriptor ring
602  *	@pdev: the PCI device
603  *	@nelem: the number of descriptors
604  *	@elem_size: the size of each descriptor
605  *	@sw_size: the size of the SW state associated with each ring element
606  *	@phys: the physical address of the allocated ring
607  *	@metadata: address of the array holding the SW state for the ring
608  *
609  *	Allocates resources for an SGE descriptor ring, such as Tx queues,
610  *	free buffer lists, or response queues.  Each SGE ring requires
611  *	space for its HW descriptors plus, optionally, space for the SW state
612  *	associated with each HW entry (the metadata).  The function returns
613  *	three values: the virtual address for the HW ring (the return value
614  *	of the function), the physical address of the HW ring, and the address
615  *	of the SW ring.
616  */
617 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
618 			size_t sw_size, dma_addr_t * phys, void *metadata)
619 {
620 	size_t len = nelem * elem_size;
621 	void *s = NULL;
622 	void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
623 
624 	if (!p)
625 		return NULL;
626 	if (sw_size && metadata) {
627 		s = kcalloc(nelem, sw_size, GFP_KERNEL);
628 
629 		if (!s) {
630 			dma_free_coherent(&pdev->dev, len, p, *phys);
631 			return NULL;
632 		}
633 		*(void **)metadata = s;
634 	}
635 	return p;
636 }
637 
638 /**
639  *	t3_reset_qset - reset a sge qset
640  *	@q: the queue set
641  *
642  *	Reset the qset structure.
643  *	the NAPI structure is preserved in the event of
644  *	the qset's reincarnation, for example during EEH recovery.
645  */
646 static void t3_reset_qset(struct sge_qset *q)
647 {
648 	if (q->adap &&
649 	    !(q->adap->flags & NAPI_INIT)) {
650 		memset(q, 0, sizeof(*q));
651 		return;
652 	}
653 
654 	q->adap = NULL;
655 	memset(&q->rspq, 0, sizeof(q->rspq));
656 	memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
657 	memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
658 	q->txq_stopped = 0;
659 	q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
660 	q->rx_reclaim_timer.function = NULL;
661 	q->nomem = 0;
662 	napi_free_frags(&q->napi);
663 }
664 
665 
666 /**
667  *	t3_free_qset - free the resources of an SGE queue set
668  *	@adapter: the adapter owning the queue set
669  *	@q: the queue set
670  *
671  *	Release the HW and SW resources associated with an SGE queue set, such
672  *	as HW contexts, packet buffers, and descriptor rings.  Traffic to the
673  *	queue set must be quiesced prior to calling this.
674  */
675 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
676 {
677 	int i;
678 	struct pci_dev *pdev = adapter->pdev;
679 
680 	for (i = 0; i < SGE_RXQ_PER_SET; ++i)
681 		if (q->fl[i].desc) {
682 			spin_lock_irq(&adapter->sge.reg_lock);
683 			t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
684 			spin_unlock_irq(&adapter->sge.reg_lock);
685 			free_rx_bufs(pdev, &q->fl[i]);
686 			kfree(q->fl[i].sdesc);
687 			dma_free_coherent(&pdev->dev,
688 					  q->fl[i].size *
689 					  sizeof(struct rx_desc), q->fl[i].desc,
690 					  q->fl[i].phys_addr);
691 		}
692 
693 	for (i = 0; i < SGE_TXQ_PER_SET; ++i)
694 		if (q->txq[i].desc) {
695 			spin_lock_irq(&adapter->sge.reg_lock);
696 			t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
697 			spin_unlock_irq(&adapter->sge.reg_lock);
698 			if (q->txq[i].sdesc) {
699 				free_tx_desc(adapter, &q->txq[i],
700 					     q->txq[i].in_use);
701 				kfree(q->txq[i].sdesc);
702 			}
703 			dma_free_coherent(&pdev->dev,
704 					  q->txq[i].size *
705 					  sizeof(struct tx_desc),
706 					  q->txq[i].desc, q->txq[i].phys_addr);
707 			__skb_queue_purge(&q->txq[i].sendq);
708 		}
709 
710 	if (q->rspq.desc) {
711 		spin_lock_irq(&adapter->sge.reg_lock);
712 		t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
713 		spin_unlock_irq(&adapter->sge.reg_lock);
714 		dma_free_coherent(&pdev->dev,
715 				  q->rspq.size * sizeof(struct rsp_desc),
716 				  q->rspq.desc, q->rspq.phys_addr);
717 	}
718 
719 	t3_reset_qset(q);
720 }
721 
722 /**
723  *	init_qset_cntxt - initialize an SGE queue set context info
724  *	@qs: the queue set
725  *	@id: the queue set id
726  *
727  *	Initializes the TIDs and context ids for the queues of a queue set.
728  */
729 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
730 {
731 	qs->rspq.cntxt_id = id;
732 	qs->fl[0].cntxt_id = 2 * id;
733 	qs->fl[1].cntxt_id = 2 * id + 1;
734 	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
735 	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
736 	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
737 	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
738 	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
739 }
740 
741 /**
742  *	sgl_len - calculates the size of an SGL of the given capacity
743  *	@n: the number of SGL entries
744  *
745  *	Calculates the number of flits needed for a scatter/gather list that
746  *	can hold the given number of entries.
747  */
748 static inline unsigned int sgl_len(unsigned int n)
749 {
750 	/* alternatively: 3 * (n / 2) + 2 * (n & 1) */
751 	return (3 * n) / 2 + (n & 1);
752 }
753 
754 /**
755  *	flits_to_desc - returns the num of Tx descriptors for the given flits
756  *	@n: the number of flits
757  *
758  *	Calculates the number of Tx descriptors needed for the supplied number
759  *	of flits.
760  */
761 static inline unsigned int flits_to_desc(unsigned int n)
762 {
763 	BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
764 	return flit_desc_map[n];
765 }
766 
767 /**
768  *	get_packet - return the next ingress packet buffer from a free list
769  *	@adap: the adapter that received the packet
770  *	@fl: the SGE free list holding the packet
771  *	@len: the packet length including any SGE padding
772  *	@drop_thres: # of remaining buffers before we start dropping packets
773  *
774  *	Get the next packet from a free list and complete setup of the
775  *	sk_buff.  If the packet is small we make a copy and recycle the
776  *	original buffer, otherwise we use the original buffer itself.  If a
777  *	positive drop threshold is supplied packets are dropped and their
778  *	buffers recycled if (a) the number of remaining buffers is under the
779  *	threshold and the packet is too big to copy, or (b) the packet should
780  *	be copied but there is no memory for the copy.
781  */
782 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
783 				  unsigned int len, unsigned int drop_thres)
784 {
785 	struct sk_buff *skb = NULL;
786 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
787 
788 	prefetch(sd->skb->data);
789 	fl->credits--;
790 
791 	if (len <= SGE_RX_COPY_THRES) {
792 		skb = alloc_skb(len, GFP_ATOMIC);
793 		if (likely(skb != NULL)) {
794 			__skb_put(skb, len);
795 			dma_sync_single_for_cpu(&adap->pdev->dev,
796 						dma_unmap_addr(sd, dma_addr),
797 						len, DMA_FROM_DEVICE);
798 			memcpy(skb->data, sd->skb->data, len);
799 			dma_sync_single_for_device(&adap->pdev->dev,
800 						   dma_unmap_addr(sd, dma_addr),
801 						   len, DMA_FROM_DEVICE);
802 		} else if (!drop_thres)
803 			goto use_orig_buf;
804 recycle:
805 		recycle_rx_buf(adap, fl, fl->cidx);
806 		return skb;
807 	}
808 
809 	if (unlikely(fl->credits < drop_thres) &&
810 	    refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
811 		      GFP_ATOMIC | __GFP_COMP) == 0)
812 		goto recycle;
813 
814 use_orig_buf:
815 	dma_unmap_single(&adap->pdev->dev, dma_unmap_addr(sd, dma_addr),
816 			 fl->buf_size, DMA_FROM_DEVICE);
817 	skb = sd->skb;
818 	skb_put(skb, len);
819 	__refill_fl(adap, fl);
820 	return skb;
821 }
822 
823 /**
824  *	get_packet_pg - return the next ingress packet buffer from a free list
825  *	@adap: the adapter that received the packet
826  *	@fl: the SGE free list holding the packet
827  *	@q: the queue
828  *	@len: the packet length including any SGE padding
829  *	@drop_thres: # of remaining buffers before we start dropping packets
830  *
831  *	Get the next packet from a free list populated with page chunks.
832  *	If the packet is small we make a copy and recycle the original buffer,
833  *	otherwise we attach the original buffer as a page fragment to a fresh
834  *	sk_buff.  If a positive drop threshold is supplied packets are dropped
835  *	and their buffers recycled if (a) the number of remaining buffers is
836  *	under the threshold and the packet is too big to copy, or (b) there's
837  *	no system memory.
838  *
839  * 	Note: this function is similar to @get_packet but deals with Rx buffers
840  * 	that are page chunks rather than sk_buffs.
841  */
842 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
843 				     struct sge_rspq *q, unsigned int len,
844 				     unsigned int drop_thres)
845 {
846 	struct sk_buff *newskb, *skb;
847 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
848 
849 	dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
850 
851 	newskb = skb = q->pg_skb;
852 	if (!skb && (len <= SGE_RX_COPY_THRES)) {
853 		newskb = alloc_skb(len, GFP_ATOMIC);
854 		if (likely(newskb != NULL)) {
855 			__skb_put(newskb, len);
856 			dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr,
857 						len, DMA_FROM_DEVICE);
858 			memcpy(newskb->data, sd->pg_chunk.va, len);
859 			dma_sync_single_for_device(&adap->pdev->dev, dma_addr,
860 						   len, DMA_FROM_DEVICE);
861 		} else if (!drop_thres)
862 			return NULL;
863 recycle:
864 		fl->credits--;
865 		recycle_rx_buf(adap, fl, fl->cidx);
866 		q->rx_recycle_buf++;
867 		return newskb;
868 	}
869 
870 	if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
871 		goto recycle;
872 
873 	prefetch(sd->pg_chunk.p_cnt);
874 
875 	if (!skb)
876 		newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
877 
878 	if (unlikely(!newskb)) {
879 		if (!drop_thres)
880 			return NULL;
881 		goto recycle;
882 	}
883 
884 	dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr, len,
885 				DMA_FROM_DEVICE);
886 	(*sd->pg_chunk.p_cnt)--;
887 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
888 		dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
889 			       fl->alloc_size, DMA_FROM_DEVICE);
890 	if (!skb) {
891 		__skb_put(newskb, SGE_RX_PULL_LEN);
892 		memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
893 		skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
894 				   sd->pg_chunk.offset + SGE_RX_PULL_LEN,
895 				   len - SGE_RX_PULL_LEN);
896 		newskb->len = len;
897 		newskb->data_len = len - SGE_RX_PULL_LEN;
898 		newskb->truesize += newskb->data_len;
899 	} else {
900 		skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
901 				   sd->pg_chunk.page,
902 				   sd->pg_chunk.offset, len);
903 		newskb->len += len;
904 		newskb->data_len += len;
905 		newskb->truesize += len;
906 	}
907 
908 	fl->credits--;
909 	/*
910 	 * We do not refill FLs here, we let the caller do it to overlap a
911 	 * prefetch.
912 	 */
913 	return newskb;
914 }
915 
916 /**
917  *	get_imm_packet - return the next ingress packet buffer from a response
918  *	@resp: the response descriptor containing the packet data
919  *
920  *	Return a packet containing the immediate data of the given response.
921  */
922 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
923 {
924 	struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
925 
926 	if (skb) {
927 		__skb_put(skb, IMMED_PKT_SIZE);
928 		skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
929 	}
930 	return skb;
931 }
932 
933 /**
934  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
935  *	@skb: the packet
936  *
937  * 	Returns the number of Tx descriptors needed for the given Ethernet
938  * 	packet.  Ethernet packets require addition of WR and CPL headers.
939  */
940 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
941 {
942 	unsigned int flits;
943 
944 	if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
945 		return 1;
946 
947 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
948 	if (skb_shinfo(skb)->gso_size)
949 		flits++;
950 	return flits_to_desc(flits);
951 }
952 
953 /*	map_skb - map a packet main body and its page fragments
954  *	@pdev: the PCI device
955  *	@skb: the packet
956  *	@addr: placeholder to save the mapped addresses
957  *
958  *	map the main body of an sk_buff and its page fragments, if any.
959  */
960 static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
961 		   dma_addr_t *addr)
962 {
963 	const skb_frag_t *fp, *end;
964 	const struct skb_shared_info *si;
965 
966 	if (skb_headlen(skb)) {
967 		*addr = dma_map_single(&pdev->dev, skb->data,
968 				       skb_headlen(skb), DMA_TO_DEVICE);
969 		if (dma_mapping_error(&pdev->dev, *addr))
970 			goto out_err;
971 		addr++;
972 	}
973 
974 	si = skb_shinfo(skb);
975 	end = &si->frags[si->nr_frags];
976 
977 	for (fp = si->frags; fp < end; fp++) {
978 		*addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
979 					 DMA_TO_DEVICE);
980 		if (dma_mapping_error(&pdev->dev, *addr))
981 			goto unwind;
982 		addr++;
983 	}
984 	return 0;
985 
986 unwind:
987 	while (fp-- > si->frags)
988 		dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
989 			       DMA_TO_DEVICE);
990 
991 	dma_unmap_single(&pdev->dev, addr[-1], skb_headlen(skb),
992 			 DMA_TO_DEVICE);
993 out_err:
994 	return -ENOMEM;
995 }
996 
997 /**
998  *	write_sgl - populate a scatter/gather list for a packet
999  *	@skb: the packet
1000  *	@sgp: the SGL to populate
1001  *	@start: start address of skb main body data to include in the SGL
1002  *	@len: length of skb main body data to include in the SGL
1003  *	@addr: the list of the mapped addresses
1004  *
1005  *	Copies the scatter/gather list for the buffers that make up a packet
1006  *	and returns the SGL size in 8-byte words.  The caller must size the SGL
1007  *	appropriately.
1008  */
1009 static inline unsigned int write_sgl(const struct sk_buff *skb,
1010 				     struct sg_ent *sgp, unsigned char *start,
1011 				     unsigned int len, const dma_addr_t *addr)
1012 {
1013 	unsigned int i, j = 0, k = 0, nfrags;
1014 
1015 	if (len) {
1016 		sgp->len[0] = cpu_to_be32(len);
1017 		sgp->addr[j++] = cpu_to_be64(addr[k++]);
1018 	}
1019 
1020 	nfrags = skb_shinfo(skb)->nr_frags;
1021 	for (i = 0; i < nfrags; i++) {
1022 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1023 
1024 		sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
1025 		sgp->addr[j] = cpu_to_be64(addr[k++]);
1026 		j ^= 1;
1027 		if (j == 0)
1028 			++sgp;
1029 	}
1030 	if (j)
1031 		sgp->len[j] = 0;
1032 	return ((nfrags + (len != 0)) * 3) / 2 + j;
1033 }
1034 
1035 /**
1036  *	check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1037  *	@adap: the adapter
1038  *	@q: the Tx queue
1039  *
1040  *	Ring the doorbel if a Tx queue is asleep.  There is a natural race,
1041  *	where the HW is going to sleep just after we checked, however,
1042  *	then the interrupt handler will detect the outstanding TX packet
1043  *	and ring the doorbell for us.
1044  *
1045  *	When GTS is disabled we unconditionally ring the doorbell.
1046  */
1047 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1048 {
1049 #if USE_GTS
1050 	clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1051 	if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1052 		set_bit(TXQ_LAST_PKT_DB, &q->flags);
1053 		t3_write_reg(adap, A_SG_KDOORBELL,
1054 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1055 	}
1056 #else
1057 	wmb();			/* write descriptors before telling HW */
1058 	t3_write_reg(adap, A_SG_KDOORBELL,
1059 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1060 #endif
1061 }
1062 
1063 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1064 {
1065 #if SGE_NUM_GENBITS == 2
1066 	d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1067 #endif
1068 }
1069 
1070 /**
1071  *	write_wr_hdr_sgl - write a WR header and, optionally, SGL
1072  *	@ndesc: number of Tx descriptors spanned by the SGL
1073  *	@skb: the packet corresponding to the WR
1074  *	@d: first Tx descriptor to be written
1075  *	@pidx: index of above descriptors
1076  *	@q: the SGE Tx queue
1077  *	@sgl: the SGL
1078  *	@flits: number of flits to the start of the SGL in the first descriptor
1079  *	@sgl_flits: the SGL size in flits
1080  *	@gen: the Tx descriptor generation
1081  *	@wr_hi: top 32 bits of WR header based on WR type (big endian)
1082  *	@wr_lo: low 32 bits of WR header based on WR type (big endian)
1083  *
1084  *	Write a work request header and an associated SGL.  If the SGL is
1085  *	small enough to fit into one Tx descriptor it has already been written
1086  *	and we just need to write the WR header.  Otherwise we distribute the
1087  *	SGL across the number of descriptors it spans.
1088  */
1089 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1090 			     struct tx_desc *d, unsigned int pidx,
1091 			     const struct sge_txq *q,
1092 			     const struct sg_ent *sgl,
1093 			     unsigned int flits, unsigned int sgl_flits,
1094 			     unsigned int gen, __be32 wr_hi,
1095 			     __be32 wr_lo)
1096 {
1097 	struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1098 	struct tx_sw_desc *sd = &q->sdesc[pidx];
1099 
1100 	sd->skb = skb;
1101 	if (need_skb_unmap()) {
1102 		sd->fragidx = 0;
1103 		sd->addr_idx = 0;
1104 		sd->sflit = flits;
1105 	}
1106 
1107 	if (likely(ndesc == 1)) {
1108 		sd->eop = 1;
1109 		wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1110 				   V_WR_SGLSFLT(flits)) | wr_hi;
1111 		dma_wmb();
1112 		wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1113 				   V_WR_GEN(gen)) | wr_lo;
1114 		wr_gen2(d, gen);
1115 	} else {
1116 		unsigned int ogen = gen;
1117 		const u64 *fp = (const u64 *)sgl;
1118 		struct work_request_hdr *wp = wrp;
1119 
1120 		wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1121 				   V_WR_SGLSFLT(flits)) | wr_hi;
1122 
1123 		while (sgl_flits) {
1124 			unsigned int avail = WR_FLITS - flits;
1125 
1126 			if (avail > sgl_flits)
1127 				avail = sgl_flits;
1128 			memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1129 			sgl_flits -= avail;
1130 			ndesc--;
1131 			if (!sgl_flits)
1132 				break;
1133 
1134 			fp += avail;
1135 			d++;
1136 			sd->eop = 0;
1137 			sd++;
1138 			if (++pidx == q->size) {
1139 				pidx = 0;
1140 				gen ^= 1;
1141 				d = q->desc;
1142 				sd = q->sdesc;
1143 			}
1144 
1145 			sd->skb = skb;
1146 			wrp = (struct work_request_hdr *)d;
1147 			wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1148 					   V_WR_SGLSFLT(1)) | wr_hi;
1149 			wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1150 							sgl_flits + 1)) |
1151 					   V_WR_GEN(gen)) | wr_lo;
1152 			wr_gen2(d, gen);
1153 			flits = 1;
1154 		}
1155 		sd->eop = 1;
1156 		wrp->wr_hi |= htonl(F_WR_EOP);
1157 		dma_wmb();
1158 		wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1159 		wr_gen2((struct tx_desc *)wp, ogen);
1160 		WARN_ON(ndesc != 0);
1161 	}
1162 }
1163 
1164 /**
1165  *	write_tx_pkt_wr - write a TX_PKT work request
1166  *	@adap: the adapter
1167  *	@skb: the packet to send
1168  *	@pi: the egress interface
1169  *	@pidx: index of the first Tx descriptor to write
1170  *	@gen: the generation value to use
1171  *	@q: the Tx queue
1172  *	@ndesc: number of descriptors the packet will occupy
1173  *	@compl: the value of the COMPL bit to use
1174  *	@addr: address
1175  *
1176  *	Generate a TX_PKT work request to send the supplied packet.
1177  */
1178 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1179 			    const struct port_info *pi,
1180 			    unsigned int pidx, unsigned int gen,
1181 			    struct sge_txq *q, unsigned int ndesc,
1182 			    unsigned int compl, const dma_addr_t *addr)
1183 {
1184 	unsigned int flits, sgl_flits, cntrl, tso_info;
1185 	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1186 	struct tx_desc *d = &q->desc[pidx];
1187 	struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1188 
1189 	cpl->len = htonl(skb->len);
1190 	cntrl = V_TXPKT_INTF(pi->port_id);
1191 
1192 	if (skb_vlan_tag_present(skb))
1193 		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
1194 
1195 	tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1196 	if (tso_info) {
1197 		int eth_type;
1198 		struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1199 
1200 		d->flit[2] = 0;
1201 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1202 		hdr->cntrl = htonl(cntrl);
1203 		eth_type = skb_network_offset(skb) == ETH_HLEN ?
1204 		    CPL_ETH_II : CPL_ETH_II_VLAN;
1205 		tso_info |= V_LSO_ETH_TYPE(eth_type) |
1206 		    V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1207 		    V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1208 		hdr->lso_info = htonl(tso_info);
1209 		flits = 3;
1210 	} else {
1211 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1212 		cntrl |= F_TXPKT_IPCSUM_DIS;	/* SW calculates IP csum */
1213 		cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1214 		cpl->cntrl = htonl(cntrl);
1215 
1216 		if (skb->len <= WR_LEN - sizeof(*cpl)) {
1217 			q->sdesc[pidx].skb = NULL;
1218 			if (!skb->data_len)
1219 				skb_copy_from_linear_data(skb, &d->flit[2],
1220 							  skb->len);
1221 			else
1222 				skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1223 
1224 			flits = (skb->len + 7) / 8 + 2;
1225 			cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1226 					      V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1227 					      | F_WR_SOP | F_WR_EOP | compl);
1228 			dma_wmb();
1229 			cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1230 					      V_WR_TID(q->token));
1231 			wr_gen2(d, gen);
1232 			dev_consume_skb_any(skb);
1233 			return;
1234 		}
1235 
1236 		flits = 2;
1237 	}
1238 
1239 	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1240 	sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
1241 
1242 	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1243 			 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1244 			 htonl(V_WR_TID(q->token)));
1245 }
1246 
1247 static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1248 				    struct sge_qset *qs, struct sge_txq *q)
1249 {
1250 	netif_tx_stop_queue(txq);
1251 	set_bit(TXQ_ETH, &qs->txq_stopped);
1252 	q->stops++;
1253 }
1254 
1255 /**
1256  *	t3_eth_xmit - add a packet to the Ethernet Tx queue
1257  *	@skb: the packet
1258  *	@dev: the egress net device
1259  *
1260  *	Add a packet to an SGE Tx queue.  Runs with softirqs disabled.
1261  */
1262 netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1263 {
1264 	int qidx;
1265 	unsigned int ndesc, pidx, credits, gen, compl;
1266 	const struct port_info *pi = netdev_priv(dev);
1267 	struct adapter *adap = pi->adapter;
1268 	struct netdev_queue *txq;
1269 	struct sge_qset *qs;
1270 	struct sge_txq *q;
1271 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1272 
1273 	/*
1274 	 * The chip min packet length is 9 octets but play safe and reject
1275 	 * anything shorter than an Ethernet header.
1276 	 */
1277 	if (unlikely(skb->len < ETH_HLEN)) {
1278 		dev_kfree_skb_any(skb);
1279 		return NETDEV_TX_OK;
1280 	}
1281 
1282 	qidx = skb_get_queue_mapping(skb);
1283 	qs = &pi->qs[qidx];
1284 	q = &qs->txq[TXQ_ETH];
1285 	txq = netdev_get_tx_queue(dev, qidx);
1286 
1287 	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1288 
1289 	credits = q->size - q->in_use;
1290 	ndesc = calc_tx_descs(skb);
1291 
1292 	if (unlikely(credits < ndesc)) {
1293 		t3_stop_tx_queue(txq, qs, q);
1294 		dev_err(&adap->pdev->dev,
1295 			"%s: Tx ring %u full while queue awake!\n",
1296 			dev->name, q->cntxt_id & 7);
1297 		return NETDEV_TX_BUSY;
1298 	}
1299 
1300 	/* Check if ethernet packet can't be sent as immediate data */
1301 	if (skb->len > (WR_LEN - sizeof(struct cpl_tx_pkt))) {
1302 		if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1303 			dev_kfree_skb(skb);
1304 			return NETDEV_TX_OK;
1305 		}
1306 	}
1307 
1308 	q->in_use += ndesc;
1309 	if (unlikely(credits - ndesc < q->stop_thres)) {
1310 		t3_stop_tx_queue(txq, qs, q);
1311 
1312 		if (should_restart_tx(q) &&
1313 		    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1314 			q->restarts++;
1315 			netif_tx_start_queue(txq);
1316 		}
1317 	}
1318 
1319 	gen = q->gen;
1320 	q->unacked += ndesc;
1321 	compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1322 	q->unacked &= 7;
1323 	pidx = q->pidx;
1324 	q->pidx += ndesc;
1325 	if (q->pidx >= q->size) {
1326 		q->pidx -= q->size;
1327 		q->gen ^= 1;
1328 	}
1329 
1330 	/* update port statistics */
1331 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1332 		qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1333 	if (skb_shinfo(skb)->gso_size)
1334 		qs->port_stats[SGE_PSTAT_TSO]++;
1335 	if (skb_vlan_tag_present(skb))
1336 		qs->port_stats[SGE_PSTAT_VLANINS]++;
1337 
1338 	/*
1339 	 * We do not use Tx completion interrupts to free DMAd Tx packets.
1340 	 * This is good for performance but means that we rely on new Tx
1341 	 * packets arriving to run the destructors of completed packets,
1342 	 * which open up space in their sockets' send queues.  Sometimes
1343 	 * we do not get such new packets causing Tx to stall.  A single
1344 	 * UDP transmitter is a good example of this situation.  We have
1345 	 * a clean up timer that periodically reclaims completed packets
1346 	 * but it doesn't run often enough (nor do we want it to) to prevent
1347 	 * lengthy stalls.  A solution to this problem is to run the
1348 	 * destructor early, after the packet is queued but before it's DMAd.
1349 	 * A cons is that we lie to socket memory accounting, but the amount
1350 	 * of extra memory is reasonable (limited by the number of Tx
1351 	 * descriptors), the packets do actually get freed quickly by new
1352 	 * packets almost always, and for protocols like TCP that wait for
1353 	 * acks to really free up the data the extra memory is even less.
1354 	 * On the positive side we run the destructors on the sending CPU
1355 	 * rather than on a potentially different completing CPU, usually a
1356 	 * good thing.  We also run them without holding our Tx queue lock,
1357 	 * unlike what reclaim_completed_tx() would otherwise do.
1358 	 *
1359 	 * Run the destructor before telling the DMA engine about the packet
1360 	 * to make sure it doesn't complete and get freed prematurely.
1361 	 */
1362 	if (likely(!skb_shared(skb)))
1363 		skb_orphan(skb);
1364 
1365 	write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
1366 	check_ring_tx_db(adap, q);
1367 	return NETDEV_TX_OK;
1368 }
1369 
1370 /**
1371  *	write_imm - write a packet into a Tx descriptor as immediate data
1372  *	@d: the Tx descriptor to write
1373  *	@skb: the packet
1374  *	@len: the length of packet data to write as immediate data
1375  *	@gen: the generation bit value to write
1376  *
1377  *	Writes a packet as immediate data into a Tx descriptor.  The packet
1378  *	contains a work request at its beginning.  We must write the packet
1379  *	carefully so the SGE doesn't read it accidentally before it's written
1380  *	in its entirety.
1381  */
1382 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1383 			     unsigned int len, unsigned int gen)
1384 {
1385 	struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1386 	struct work_request_hdr *to = (struct work_request_hdr *)d;
1387 
1388 	if (likely(!skb->data_len))
1389 		memcpy(&to[1], &from[1], len - sizeof(*from));
1390 	else
1391 		skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1392 
1393 	to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1394 					V_WR_BCNTLFLT(len & 7));
1395 	dma_wmb();
1396 	to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1397 					V_WR_LEN((len + 7) / 8));
1398 	wr_gen2(d, gen);
1399 	kfree_skb(skb);
1400 }
1401 
1402 /**
1403  *	check_desc_avail - check descriptor availability on a send queue
1404  *	@adap: the adapter
1405  *	@q: the send queue
1406  *	@skb: the packet needing the descriptors
1407  *	@ndesc: the number of Tx descriptors needed
1408  *	@qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1409  *
1410  *	Checks if the requested number of Tx descriptors is available on an
1411  *	SGE send queue.  If the queue is already suspended or not enough
1412  *	descriptors are available the packet is queued for later transmission.
1413  *	Must be called with the Tx queue locked.
1414  *
1415  *	Returns 0 if enough descriptors are available, 1 if there aren't
1416  *	enough descriptors and the packet has been queued, and 2 if the caller
1417  *	needs to retry because there weren't enough descriptors at the
1418  *	beginning of the call but some freed up in the mean time.
1419  */
1420 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1421 				   struct sk_buff *skb, unsigned int ndesc,
1422 				   unsigned int qid)
1423 {
1424 	if (unlikely(!skb_queue_empty(&q->sendq))) {
1425 	      addq_exit:__skb_queue_tail(&q->sendq, skb);
1426 		return 1;
1427 	}
1428 	if (unlikely(q->size - q->in_use < ndesc)) {
1429 		struct sge_qset *qs = txq_to_qset(q, qid);
1430 
1431 		set_bit(qid, &qs->txq_stopped);
1432 		smp_mb__after_atomic();
1433 
1434 		if (should_restart_tx(q) &&
1435 		    test_and_clear_bit(qid, &qs->txq_stopped))
1436 			return 2;
1437 
1438 		q->stops++;
1439 		goto addq_exit;
1440 	}
1441 	return 0;
1442 }
1443 
1444 /**
1445  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1446  *	@q: the SGE control Tx queue
1447  *
1448  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1449  *	that send only immediate data (presently just the control queues) and
1450  *	thus do not have any sk_buffs to release.
1451  */
1452 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1453 {
1454 	unsigned int reclaim = q->processed - q->cleaned;
1455 
1456 	q->in_use -= reclaim;
1457 	q->cleaned += reclaim;
1458 }
1459 
1460 static inline int immediate(const struct sk_buff *skb)
1461 {
1462 	return skb->len <= WR_LEN;
1463 }
1464 
1465 /**
1466  *	ctrl_xmit - send a packet through an SGE control Tx queue
1467  *	@adap: the adapter
1468  *	@q: the control queue
1469  *	@skb: the packet
1470  *
1471  *	Send a packet through an SGE control Tx queue.  Packets sent through
1472  *	a control queue must fit entirely as immediate data in a single Tx
1473  *	descriptor and have no page fragments.
1474  */
1475 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1476 		     struct sk_buff *skb)
1477 {
1478 	int ret;
1479 	struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1480 
1481 	if (unlikely(!immediate(skb))) {
1482 		WARN_ON(1);
1483 		dev_kfree_skb(skb);
1484 		return NET_XMIT_SUCCESS;
1485 	}
1486 
1487 	wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1488 	wrp->wr_lo = htonl(V_WR_TID(q->token));
1489 
1490 	spin_lock(&q->lock);
1491       again:reclaim_completed_tx_imm(q);
1492 
1493 	ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1494 	if (unlikely(ret)) {
1495 		if (ret == 1) {
1496 			spin_unlock(&q->lock);
1497 			return NET_XMIT_CN;
1498 		}
1499 		goto again;
1500 	}
1501 
1502 	write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1503 
1504 	q->in_use++;
1505 	if (++q->pidx >= q->size) {
1506 		q->pidx = 0;
1507 		q->gen ^= 1;
1508 	}
1509 	spin_unlock(&q->lock);
1510 	wmb();
1511 	t3_write_reg(adap, A_SG_KDOORBELL,
1512 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1513 	return NET_XMIT_SUCCESS;
1514 }
1515 
1516 /**
1517  *	restart_ctrlq - restart a suspended control queue
1518  *	@w: pointer to the work associated with this handler
1519  *
1520  *	Resumes transmission on a suspended Tx control queue.
1521  */
1522 static void restart_ctrlq(struct work_struct *w)
1523 {
1524 	struct sk_buff *skb;
1525 	struct sge_qset *qs = container_of(w, struct sge_qset,
1526 					   txq[TXQ_CTRL].qresume_task);
1527 	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1528 
1529 	spin_lock(&q->lock);
1530       again:reclaim_completed_tx_imm(q);
1531 
1532 	while (q->in_use < q->size &&
1533 	       (skb = __skb_dequeue(&q->sendq)) != NULL) {
1534 
1535 		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1536 
1537 		if (++q->pidx >= q->size) {
1538 			q->pidx = 0;
1539 			q->gen ^= 1;
1540 		}
1541 		q->in_use++;
1542 	}
1543 
1544 	if (!skb_queue_empty(&q->sendq)) {
1545 		set_bit(TXQ_CTRL, &qs->txq_stopped);
1546 		smp_mb__after_atomic();
1547 
1548 		if (should_restart_tx(q) &&
1549 		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1550 			goto again;
1551 		q->stops++;
1552 	}
1553 
1554 	spin_unlock(&q->lock);
1555 	wmb();
1556 	t3_write_reg(qs->adap, A_SG_KDOORBELL,
1557 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1558 }
1559 
1560 /*
1561  * Send a management message through control queue 0
1562  */
1563 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1564 {
1565 	int ret;
1566 	local_bh_disable();
1567 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1568 	local_bh_enable();
1569 
1570 	return ret;
1571 }
1572 
1573 /**
1574  *	deferred_unmap_destructor - unmap a packet when it is freed
1575  *	@skb: the packet
1576  *
1577  *	This is the packet destructor used for Tx packets that need to remain
1578  *	mapped until they are freed rather than until their Tx descriptors are
1579  *	freed.
1580  */
1581 static void deferred_unmap_destructor(struct sk_buff *skb)
1582 {
1583 	int i;
1584 	const dma_addr_t *p;
1585 	const struct skb_shared_info *si;
1586 	const struct deferred_unmap_info *dui;
1587 
1588 	dui = (struct deferred_unmap_info *)skb->head;
1589 	p = dui->addr;
1590 
1591 	if (skb_tail_pointer(skb) - skb_transport_header(skb))
1592 		dma_unmap_single(&dui->pdev->dev, *p++,
1593 				 skb_tail_pointer(skb) - skb_transport_header(skb),
1594 				 DMA_TO_DEVICE);
1595 
1596 	si = skb_shinfo(skb);
1597 	for (i = 0; i < si->nr_frags; i++)
1598 		dma_unmap_page(&dui->pdev->dev, *p++,
1599 			       skb_frag_size(&si->frags[i]), DMA_TO_DEVICE);
1600 }
1601 
1602 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1603 				     const struct sg_ent *sgl, int sgl_flits)
1604 {
1605 	dma_addr_t *p;
1606 	struct deferred_unmap_info *dui;
1607 
1608 	dui = (struct deferred_unmap_info *)skb->head;
1609 	dui->pdev = pdev;
1610 	for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1611 		*p++ = be64_to_cpu(sgl->addr[0]);
1612 		*p++ = be64_to_cpu(sgl->addr[1]);
1613 	}
1614 	if (sgl_flits)
1615 		*p = be64_to_cpu(sgl->addr[0]);
1616 }
1617 
1618 /**
1619  *	write_ofld_wr - write an offload work request
1620  *	@adap: the adapter
1621  *	@skb: the packet to send
1622  *	@q: the Tx queue
1623  *	@pidx: index of the first Tx descriptor to write
1624  *	@gen: the generation value to use
1625  *	@ndesc: number of descriptors the packet will occupy
1626  *	@addr: the address
1627  *
1628  *	Write an offload work request to send the supplied packet.  The packet
1629  *	data already carry the work request with most fields populated.
1630  */
1631 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1632 			  struct sge_txq *q, unsigned int pidx,
1633 			  unsigned int gen, unsigned int ndesc,
1634 			  const dma_addr_t *addr)
1635 {
1636 	unsigned int sgl_flits, flits;
1637 	struct work_request_hdr *from;
1638 	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1639 	struct tx_desc *d = &q->desc[pidx];
1640 
1641 	if (immediate(skb)) {
1642 		q->sdesc[pidx].skb = NULL;
1643 		write_imm(d, skb, skb->len, gen);
1644 		return;
1645 	}
1646 
1647 	/* Only TX_DATA builds SGLs */
1648 
1649 	from = (struct work_request_hdr *)skb->data;
1650 	memcpy(&d->flit[1], &from[1],
1651 	       skb_transport_offset(skb) - sizeof(*from));
1652 
1653 	flits = skb_transport_offset(skb) / 8;
1654 	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1655 	sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
1656 			      skb_tail_pointer(skb) - skb_transport_header(skb),
1657 			      addr);
1658 	if (need_skb_unmap()) {
1659 		setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1660 		skb->destructor = deferred_unmap_destructor;
1661 	}
1662 
1663 	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1664 			 gen, from->wr_hi, from->wr_lo);
1665 }
1666 
1667 /**
1668  *	calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1669  *	@skb: the packet
1670  *
1671  * 	Returns the number of Tx descriptors needed for the given offload
1672  * 	packet.  These packets are already fully constructed.
1673  */
1674 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1675 {
1676 	unsigned int flits, cnt;
1677 
1678 	if (skb->len <= WR_LEN)
1679 		return 1;	/* packet fits as immediate data */
1680 
1681 	flits = skb_transport_offset(skb) / 8;	/* headers */
1682 	cnt = skb_shinfo(skb)->nr_frags;
1683 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
1684 		cnt++;
1685 	return flits_to_desc(flits + sgl_len(cnt));
1686 }
1687 
1688 /**
1689  *	ofld_xmit - send a packet through an offload queue
1690  *	@adap: the adapter
1691  *	@q: the Tx offload queue
1692  *	@skb: the packet
1693  *
1694  *	Send an offload packet through an SGE offload queue.
1695  */
1696 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1697 		     struct sk_buff *skb)
1698 {
1699 	int ret;
1700 	unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1701 
1702 	spin_lock(&q->lock);
1703 again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1704 
1705 	ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1706 	if (unlikely(ret)) {
1707 		if (ret == 1) {
1708 			skb->priority = ndesc;	/* save for restart */
1709 			spin_unlock(&q->lock);
1710 			return NET_XMIT_CN;
1711 		}
1712 		goto again;
1713 	}
1714 
1715 	if (!immediate(skb) &&
1716 	    map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1717 		spin_unlock(&q->lock);
1718 		return NET_XMIT_SUCCESS;
1719 	}
1720 
1721 	gen = q->gen;
1722 	q->in_use += ndesc;
1723 	pidx = q->pidx;
1724 	q->pidx += ndesc;
1725 	if (q->pidx >= q->size) {
1726 		q->pidx -= q->size;
1727 		q->gen ^= 1;
1728 	}
1729 	spin_unlock(&q->lock);
1730 
1731 	write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
1732 	check_ring_tx_db(adap, q);
1733 	return NET_XMIT_SUCCESS;
1734 }
1735 
1736 /**
1737  *	restart_offloadq - restart a suspended offload queue
1738  *	@w: pointer to the work associated with this handler
1739  *
1740  *	Resumes transmission on a suspended Tx offload queue.
1741  */
1742 static void restart_offloadq(struct work_struct *w)
1743 {
1744 	struct sk_buff *skb;
1745 	struct sge_qset *qs = container_of(w, struct sge_qset,
1746 					   txq[TXQ_OFLD].qresume_task);
1747 	struct sge_txq *q = &qs->txq[TXQ_OFLD];
1748 	const struct port_info *pi = netdev_priv(qs->netdev);
1749 	struct adapter *adap = pi->adapter;
1750 	unsigned int written = 0;
1751 
1752 	spin_lock(&q->lock);
1753 again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1754 
1755 	while ((skb = skb_peek(&q->sendq)) != NULL) {
1756 		unsigned int gen, pidx;
1757 		unsigned int ndesc = skb->priority;
1758 
1759 		if (unlikely(q->size - q->in_use < ndesc)) {
1760 			set_bit(TXQ_OFLD, &qs->txq_stopped);
1761 			smp_mb__after_atomic();
1762 
1763 			if (should_restart_tx(q) &&
1764 			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1765 				goto again;
1766 			q->stops++;
1767 			break;
1768 		}
1769 
1770 		if (!immediate(skb) &&
1771 		    map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1772 			break;
1773 
1774 		gen = q->gen;
1775 		q->in_use += ndesc;
1776 		pidx = q->pidx;
1777 		q->pidx += ndesc;
1778 		written += ndesc;
1779 		if (q->pidx >= q->size) {
1780 			q->pidx -= q->size;
1781 			q->gen ^= 1;
1782 		}
1783 		__skb_unlink(skb, &q->sendq);
1784 		spin_unlock(&q->lock);
1785 
1786 		write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1787 			      (dma_addr_t *)skb->head);
1788 		spin_lock(&q->lock);
1789 	}
1790 	spin_unlock(&q->lock);
1791 
1792 #if USE_GTS
1793 	set_bit(TXQ_RUNNING, &q->flags);
1794 	set_bit(TXQ_LAST_PKT_DB, &q->flags);
1795 #endif
1796 	wmb();
1797 	if (likely(written))
1798 		t3_write_reg(adap, A_SG_KDOORBELL,
1799 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1800 }
1801 
1802 /**
1803  *	queue_set - return the queue set a packet should use
1804  *	@skb: the packet
1805  *
1806  *	Maps a packet to the SGE queue set it should use.  The desired queue
1807  *	set is carried in bits 1-3 in the packet's priority.
1808  */
1809 static inline int queue_set(const struct sk_buff *skb)
1810 {
1811 	return skb->priority >> 1;
1812 }
1813 
1814 /**
1815  *	is_ctrl_pkt - return whether an offload packet is a control packet
1816  *	@skb: the packet
1817  *
1818  *	Determines whether an offload packet should use an OFLD or a CTRL
1819  *	Tx queue.  This is indicated by bit 0 in the packet's priority.
1820  */
1821 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1822 {
1823 	return skb->priority & 1;
1824 }
1825 
1826 /**
1827  *	t3_offload_tx - send an offload packet
1828  *	@tdev: the offload device to send to
1829  *	@skb: the packet
1830  *
1831  *	Sends an offload packet.  We use the packet priority to select the
1832  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1833  *	should be sent as regular or control, bits 1-3 select the queue set.
1834  */
1835 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1836 {
1837 	struct adapter *adap = tdev2adap(tdev);
1838 	struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1839 
1840 	if (unlikely(is_ctrl_pkt(skb)))
1841 		return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1842 
1843 	return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1844 }
1845 
1846 /**
1847  *	offload_enqueue - add an offload packet to an SGE offload receive queue
1848  *	@q: the SGE response queue
1849  *	@skb: the packet
1850  *
1851  *	Add a new offload packet to an SGE response queue's offload packet
1852  *	queue.  If the packet is the first on the queue it schedules the RX
1853  *	softirq to process the queue.
1854  */
1855 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1856 {
1857 	int was_empty = skb_queue_empty(&q->rx_queue);
1858 
1859 	__skb_queue_tail(&q->rx_queue, skb);
1860 
1861 	if (was_empty) {
1862 		struct sge_qset *qs = rspq_to_qset(q);
1863 
1864 		napi_schedule(&qs->napi);
1865 	}
1866 }
1867 
1868 /**
1869  *	deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1870  *	@tdev: the offload device that will be receiving the packets
1871  *	@q: the SGE response queue that assembled the bundle
1872  *	@skbs: the partial bundle
1873  *	@n: the number of packets in the bundle
1874  *
1875  *	Delivers a (partial) bundle of Rx offload packets to an offload device.
1876  */
1877 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1878 					  struct sge_rspq *q,
1879 					  struct sk_buff *skbs[], int n)
1880 {
1881 	if (n) {
1882 		q->offload_bundles++;
1883 		tdev->recv(tdev, skbs, n);
1884 	}
1885 }
1886 
1887 /**
1888  *	ofld_poll - NAPI handler for offload packets in interrupt mode
1889  *	@napi: the network device doing the polling
1890  *	@budget: polling budget
1891  *
1892  *	The NAPI handler for offload packets when a response queue is serviced
1893  *	by the hard interrupt handler, i.e., when it's operating in non-polling
1894  *	mode.  Creates small packet batches and sends them through the offload
1895  *	receive handler.  Batches need to be of modest size as we do prefetches
1896  *	on the packets in each.
1897  */
1898 static int ofld_poll(struct napi_struct *napi, int budget)
1899 {
1900 	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1901 	struct sge_rspq *q = &qs->rspq;
1902 	struct adapter *adapter = qs->adap;
1903 	int work_done = 0;
1904 
1905 	while (work_done < budget) {
1906 		struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1907 		struct sk_buff_head queue;
1908 		int ngathered;
1909 
1910 		spin_lock_irq(&q->lock);
1911 		__skb_queue_head_init(&queue);
1912 		skb_queue_splice_init(&q->rx_queue, &queue);
1913 		if (skb_queue_empty(&queue)) {
1914 			napi_complete_done(napi, work_done);
1915 			spin_unlock_irq(&q->lock);
1916 			return work_done;
1917 		}
1918 		spin_unlock_irq(&q->lock);
1919 
1920 		ngathered = 0;
1921 		skb_queue_walk_safe(&queue, skb, tmp) {
1922 			if (work_done >= budget)
1923 				break;
1924 			work_done++;
1925 
1926 			__skb_unlink(skb, &queue);
1927 			prefetch(skb->data);
1928 			skbs[ngathered] = skb;
1929 			if (++ngathered == RX_BUNDLE_SIZE) {
1930 				q->offload_bundles++;
1931 				adapter->tdev.recv(&adapter->tdev, skbs,
1932 						   ngathered);
1933 				ngathered = 0;
1934 			}
1935 		}
1936 		if (!skb_queue_empty(&queue)) {
1937 			/* splice remaining packets back onto Rx queue */
1938 			spin_lock_irq(&q->lock);
1939 			skb_queue_splice(&queue, &q->rx_queue);
1940 			spin_unlock_irq(&q->lock);
1941 		}
1942 		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1943 	}
1944 
1945 	return work_done;
1946 }
1947 
1948 /**
1949  *	rx_offload - process a received offload packet
1950  *	@tdev: the offload device receiving the packet
1951  *	@rq: the response queue that received the packet
1952  *	@skb: the packet
1953  *	@rx_gather: a gather list of packets if we are building a bundle
1954  *	@gather_idx: index of the next available slot in the bundle
1955  *
1956  *	Process an ingress offload pakcet and add it to the offload ingress
1957  *	queue. 	Returns the index of the next available slot in the bundle.
1958  */
1959 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1960 			     struct sk_buff *skb, struct sk_buff *rx_gather[],
1961 			     unsigned int gather_idx)
1962 {
1963 	skb_reset_mac_header(skb);
1964 	skb_reset_network_header(skb);
1965 	skb_reset_transport_header(skb);
1966 
1967 	if (rq->polling) {
1968 		rx_gather[gather_idx++] = skb;
1969 		if (gather_idx == RX_BUNDLE_SIZE) {
1970 			tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1971 			gather_idx = 0;
1972 			rq->offload_bundles++;
1973 		}
1974 	} else
1975 		offload_enqueue(rq, skb);
1976 
1977 	return gather_idx;
1978 }
1979 
1980 /**
1981  *	restart_tx - check whether to restart suspended Tx queues
1982  *	@qs: the queue set to resume
1983  *
1984  *	Restarts suspended Tx queues of an SGE queue set if they have enough
1985  *	free resources to resume operation.
1986  */
1987 static void restart_tx(struct sge_qset *qs)
1988 {
1989 	if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1990 	    should_restart_tx(&qs->txq[TXQ_ETH]) &&
1991 	    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1992 		qs->txq[TXQ_ETH].restarts++;
1993 		if (netif_running(qs->netdev))
1994 			netif_tx_wake_queue(qs->tx_q);
1995 	}
1996 
1997 	if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1998 	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1999 	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2000 		qs->txq[TXQ_OFLD].restarts++;
2001 
2002 		/* The work can be quite lengthy so we use driver's own queue */
2003 		queue_work(cxgb3_wq, &qs->txq[TXQ_OFLD].qresume_task);
2004 	}
2005 	if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
2006 	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2007 	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2008 		qs->txq[TXQ_CTRL].restarts++;
2009 
2010 		/* The work can be quite lengthy so we use driver's own queue */
2011 		queue_work(cxgb3_wq, &qs->txq[TXQ_CTRL].qresume_task);
2012 	}
2013 }
2014 
2015 /**
2016  *	cxgb3_arp_process - process an ARP request probing a private IP address
2017  *	@pi: the port info
2018  *	@skb: the skbuff containing the ARP request
2019  *
2020  *	Check if the ARP request is probing the private IP address
2021  *	dedicated to iSCSI, generate an ARP reply if so.
2022  */
2023 static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
2024 {
2025 	struct net_device *dev = skb->dev;
2026 	struct arphdr *arp;
2027 	unsigned char *arp_ptr;
2028 	unsigned char *sha;
2029 	__be32 sip, tip;
2030 
2031 	if (!dev)
2032 		return;
2033 
2034 	skb_reset_network_header(skb);
2035 	arp = arp_hdr(skb);
2036 
2037 	if (arp->ar_op != htons(ARPOP_REQUEST))
2038 		return;
2039 
2040 	arp_ptr = (unsigned char *)(arp + 1);
2041 	sha = arp_ptr;
2042 	arp_ptr += dev->addr_len;
2043 	memcpy(&sip, arp_ptr, sizeof(sip));
2044 	arp_ptr += sizeof(sip);
2045 	arp_ptr += dev->addr_len;
2046 	memcpy(&tip, arp_ptr, sizeof(tip));
2047 
2048 	if (tip != pi->iscsi_ipv4addr)
2049 		return;
2050 
2051 	arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
2052 		 pi->iscsic.mac_addr, sha);
2053 
2054 }
2055 
2056 static inline int is_arp(struct sk_buff *skb)
2057 {
2058 	return skb->protocol == htons(ETH_P_ARP);
2059 }
2060 
2061 static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
2062 					struct sk_buff *skb)
2063 {
2064 	if (is_arp(skb)) {
2065 		cxgb3_arp_process(pi, skb);
2066 		return;
2067 	}
2068 
2069 	if (pi->iscsic.recv)
2070 		pi->iscsic.recv(pi, skb);
2071 
2072 }
2073 
2074 /**
2075  *	rx_eth - process an ingress ethernet packet
2076  *	@adap: the adapter
2077  *	@rq: the response queue that received the packet
2078  *	@skb: the packet
2079  *	@pad: padding
2080  *	@lro: large receive offload
2081  *
2082  *	Process an ingress ethernet pakcet and deliver it to the stack.
2083  *	The padding is 2 if the packet was delivered in an Rx buffer and 0
2084  *	if it was immediate data in a response.
2085  */
2086 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2087 		   struct sk_buff *skb, int pad, int lro)
2088 {
2089 	struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2090 	struct sge_qset *qs = rspq_to_qset(rq);
2091 	struct port_info *pi;
2092 
2093 	skb_pull(skb, sizeof(*p) + pad);
2094 	skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2095 	pi = netdev_priv(skb->dev);
2096 	if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2097 	    p->csum == htons(0xffff) && !p->fragment) {
2098 		qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2099 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2100 	} else
2101 		skb_checksum_none_assert(skb);
2102 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2103 
2104 	if (p->vlan_valid) {
2105 		qs->port_stats[SGE_PSTAT_VLANEX]++;
2106 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
2107 	}
2108 	if (rq->polling) {
2109 		if (lro)
2110 			napi_gro_receive(&qs->napi, skb);
2111 		else {
2112 			if (unlikely(pi->iscsic.flags))
2113 				cxgb3_process_iscsi_prov_pack(pi, skb);
2114 			netif_receive_skb(skb);
2115 		}
2116 	} else
2117 		netif_rx(skb);
2118 }
2119 
2120 static inline int is_eth_tcp(u32 rss)
2121 {
2122 	return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2123 }
2124 
2125 /**
2126  *	lro_add_page - add a page chunk to an LRO session
2127  *	@adap: the adapter
2128  *	@qs: the associated queue set
2129  *	@fl: the free list containing the page chunk to add
2130  *	@len: packet length
2131  *	@complete: Indicates the last fragment of a frame
2132  *
2133  *	Add a received packet contained in a page chunk to an existing LRO
2134  *	session.
2135  */
2136 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2137 			 struct sge_fl *fl, int len, int complete)
2138 {
2139 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2140 	struct port_info *pi = netdev_priv(qs->netdev);
2141 	struct sk_buff *skb = NULL;
2142 	struct cpl_rx_pkt *cpl;
2143 	skb_frag_t *rx_frag;
2144 	int nr_frags;
2145 	int offset = 0;
2146 
2147 	if (!qs->nomem) {
2148 		skb = napi_get_frags(&qs->napi);
2149 		qs->nomem = !skb;
2150 	}
2151 
2152 	fl->credits--;
2153 
2154 	dma_sync_single_for_cpu(&adap->pdev->dev,
2155 				dma_unmap_addr(sd, dma_addr),
2156 				fl->buf_size - SGE_PG_RSVD, DMA_FROM_DEVICE);
2157 
2158 	(*sd->pg_chunk.p_cnt)--;
2159 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2160 		dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
2161 			       fl->alloc_size, DMA_FROM_DEVICE);
2162 
2163 	if (!skb) {
2164 		put_page(sd->pg_chunk.page);
2165 		if (complete)
2166 			qs->nomem = 0;
2167 		return;
2168 	}
2169 
2170 	rx_frag = skb_shinfo(skb)->frags;
2171 	nr_frags = skb_shinfo(skb)->nr_frags;
2172 
2173 	if (!nr_frags) {
2174 		offset = 2 + sizeof(struct cpl_rx_pkt);
2175 		cpl = qs->lro_va = sd->pg_chunk.va + 2;
2176 
2177 		if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2178 		     cpl->csum_valid && cpl->csum == htons(0xffff)) {
2179 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2180 			qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2181 		} else
2182 			skb->ip_summed = CHECKSUM_NONE;
2183 	} else
2184 		cpl = qs->lro_va;
2185 
2186 	len -= offset;
2187 
2188 	rx_frag += nr_frags;
2189 	__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
2190 	skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset);
2191 	skb_frag_size_set(rx_frag, len);
2192 
2193 	skb->len += len;
2194 	skb->data_len += len;
2195 	skb->truesize += len;
2196 	skb_shinfo(skb)->nr_frags++;
2197 
2198 	if (!complete)
2199 		return;
2200 
2201 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2202 
2203 	if (cpl->vlan_valid) {
2204 		qs->port_stats[SGE_PSTAT_VLANEX]++;
2205 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
2206 	}
2207 	napi_gro_frags(&qs->napi);
2208 }
2209 
2210 /**
2211  *	handle_rsp_cntrl_info - handles control information in a response
2212  *	@qs: the queue set corresponding to the response
2213  *	@flags: the response control flags
2214  *
2215  *	Handles the control information of an SGE response, such as GTS
2216  *	indications and completion credits for the queue set's Tx queues.
2217  *	HW coalesces credits, we don't do any extra SW coalescing.
2218  */
2219 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2220 {
2221 	unsigned int credits;
2222 
2223 #if USE_GTS
2224 	if (flags & F_RSPD_TXQ0_GTS)
2225 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2226 #endif
2227 
2228 	credits = G_RSPD_TXQ0_CR(flags);
2229 	if (credits)
2230 		qs->txq[TXQ_ETH].processed += credits;
2231 
2232 	credits = G_RSPD_TXQ2_CR(flags);
2233 	if (credits)
2234 		qs->txq[TXQ_CTRL].processed += credits;
2235 
2236 # if USE_GTS
2237 	if (flags & F_RSPD_TXQ1_GTS)
2238 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2239 # endif
2240 	credits = G_RSPD_TXQ1_CR(flags);
2241 	if (credits)
2242 		qs->txq[TXQ_OFLD].processed += credits;
2243 }
2244 
2245 /**
2246  *	check_ring_db - check if we need to ring any doorbells
2247  *	@adap: the adapter
2248  *	@qs: the queue set whose Tx queues are to be examined
2249  *	@sleeping: indicates which Tx queue sent GTS
2250  *
2251  *	Checks if some of a queue set's Tx queues need to ring their doorbells
2252  *	to resume transmission after idling while they still have unprocessed
2253  *	descriptors.
2254  */
2255 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2256 			  unsigned int sleeping)
2257 {
2258 	if (sleeping & F_RSPD_TXQ0_GTS) {
2259 		struct sge_txq *txq = &qs->txq[TXQ_ETH];
2260 
2261 		if (txq->cleaned + txq->in_use != txq->processed &&
2262 		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2263 			set_bit(TXQ_RUNNING, &txq->flags);
2264 			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2265 				     V_EGRCNTX(txq->cntxt_id));
2266 		}
2267 	}
2268 
2269 	if (sleeping & F_RSPD_TXQ1_GTS) {
2270 		struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2271 
2272 		if (txq->cleaned + txq->in_use != txq->processed &&
2273 		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2274 			set_bit(TXQ_RUNNING, &txq->flags);
2275 			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2276 				     V_EGRCNTX(txq->cntxt_id));
2277 		}
2278 	}
2279 }
2280 
2281 /**
2282  *	is_new_response - check if a response is newly written
2283  *	@r: the response descriptor
2284  *	@q: the response queue
2285  *
2286  *	Returns true if a response descriptor contains a yet unprocessed
2287  *	response.
2288  */
2289 static inline int is_new_response(const struct rsp_desc *r,
2290 				  const struct sge_rspq *q)
2291 {
2292 	return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2293 }
2294 
2295 static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2296 {
2297 	q->pg_skb = NULL;
2298 	q->rx_recycle_buf = 0;
2299 }
2300 
2301 #define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2302 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2303 			V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2304 			V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2305 			V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2306 
2307 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2308 #define NOMEM_INTR_DELAY 2500
2309 
2310 /**
2311  *	process_responses - process responses from an SGE response queue
2312  *	@adap: the adapter
2313  *	@qs: the queue set to which the response queue belongs
2314  *	@budget: how many responses can be processed in this round
2315  *
2316  *	Process responses from an SGE response queue up to the supplied budget.
2317  *	Responses include received packets as well as credits and other events
2318  *	for the queues that belong to the response queue's queue set.
2319  *	A negative budget is effectively unlimited.
2320  *
2321  *	Additionally choose the interrupt holdoff time for the next interrupt
2322  *	on this queue.  If the system is under memory shortage use a fairly
2323  *	long delay to help recovery.
2324  */
2325 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2326 			     int budget)
2327 {
2328 	struct sge_rspq *q = &qs->rspq;
2329 	struct rsp_desc *r = &q->desc[q->cidx];
2330 	int budget_left = budget;
2331 	unsigned int sleeping = 0;
2332 	struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2333 	int ngathered = 0;
2334 
2335 	q->next_holdoff = q->holdoff_tmr;
2336 
2337 	while (likely(budget_left && is_new_response(r, q))) {
2338 		int packet_complete, eth, ethpad = 2;
2339 		int lro = !!(qs->netdev->features & NETIF_F_GRO);
2340 		struct sk_buff *skb = NULL;
2341 		u32 len, flags;
2342 		__be32 rss_hi, rss_lo;
2343 
2344 		dma_rmb();
2345 		eth = r->rss_hdr.opcode == CPL_RX_PKT;
2346 		rss_hi = *(const __be32 *)r;
2347 		rss_lo = r->rss_hdr.rss_hash_val;
2348 		flags = ntohl(r->flags);
2349 
2350 		if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2351 			skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2352 			if (!skb)
2353 				goto no_mem;
2354 
2355 			__skb_put_data(skb, r, AN_PKT_SIZE);
2356 			skb->data[0] = CPL_ASYNC_NOTIF;
2357 			rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2358 			q->async_notif++;
2359 		} else if (flags & F_RSPD_IMM_DATA_VALID) {
2360 			skb = get_imm_packet(r);
2361 			if (unlikely(!skb)) {
2362 no_mem:
2363 				q->next_holdoff = NOMEM_INTR_DELAY;
2364 				q->nomem++;
2365 				/* consume one credit since we tried */
2366 				budget_left--;
2367 				break;
2368 			}
2369 			q->imm_data++;
2370 			ethpad = 0;
2371 		} else if ((len = ntohl(r->len_cq)) != 0) {
2372 			struct sge_fl *fl;
2373 
2374 			lro &= eth && is_eth_tcp(rss_hi);
2375 
2376 			fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2377 			if (fl->use_pages) {
2378 				void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2379 
2380 				net_prefetch(addr);
2381 				__refill_fl(adap, fl);
2382 				if (lro > 0) {
2383 					lro_add_page(adap, qs, fl,
2384 						     G_RSPD_LEN(len),
2385 						     flags & F_RSPD_EOP);
2386 					goto next_fl;
2387 				}
2388 
2389 				skb = get_packet_pg(adap, fl, q,
2390 						    G_RSPD_LEN(len),
2391 						    eth ?
2392 						    SGE_RX_DROP_THRES : 0);
2393 				q->pg_skb = skb;
2394 			} else
2395 				skb = get_packet(adap, fl, G_RSPD_LEN(len),
2396 						 eth ? SGE_RX_DROP_THRES : 0);
2397 			if (unlikely(!skb)) {
2398 				if (!eth)
2399 					goto no_mem;
2400 				q->rx_drops++;
2401 			} else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2402 				__skb_pull(skb, 2);
2403 next_fl:
2404 			if (++fl->cidx == fl->size)
2405 				fl->cidx = 0;
2406 		} else
2407 			q->pure_rsps++;
2408 
2409 		if (flags & RSPD_CTRL_MASK) {
2410 			sleeping |= flags & RSPD_GTS_MASK;
2411 			handle_rsp_cntrl_info(qs, flags);
2412 		}
2413 
2414 		r++;
2415 		if (unlikely(++q->cidx == q->size)) {
2416 			q->cidx = 0;
2417 			q->gen ^= 1;
2418 			r = q->desc;
2419 		}
2420 		prefetch(r);
2421 
2422 		if (++q->credits >= (q->size / 4)) {
2423 			refill_rspq(adap, q, q->credits);
2424 			q->credits = 0;
2425 		}
2426 
2427 		packet_complete = flags &
2428 				  (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2429 				   F_RSPD_ASYNC_NOTIF);
2430 
2431 		if (skb != NULL && packet_complete) {
2432 			if (eth)
2433 				rx_eth(adap, q, skb, ethpad, lro);
2434 			else {
2435 				q->offload_pkts++;
2436 				/* Preserve the RSS info in csum & priority */
2437 				skb->csum = rss_hi;
2438 				skb->priority = rss_lo;
2439 				ngathered = rx_offload(&adap->tdev, q, skb,
2440 						       offload_skbs,
2441 						       ngathered);
2442 			}
2443 
2444 			if (flags & F_RSPD_EOP)
2445 				clear_rspq_bufstate(q);
2446 		}
2447 		--budget_left;
2448 	}
2449 
2450 	deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2451 
2452 	if (sleeping)
2453 		check_ring_db(adap, qs, sleeping);
2454 
2455 	smp_mb();		/* commit Tx queue .processed updates */
2456 	if (unlikely(qs->txq_stopped != 0))
2457 		restart_tx(qs);
2458 
2459 	budget -= budget_left;
2460 	return budget;
2461 }
2462 
2463 static inline int is_pure_response(const struct rsp_desc *r)
2464 {
2465 	__be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2466 
2467 	return (n | r->len_cq) == 0;
2468 }
2469 
2470 /**
2471  *	napi_rx_handler - the NAPI handler for Rx processing
2472  *	@napi: the napi instance
2473  *	@budget: how many packets we can process in this round
2474  *
2475  *	Handler for new data events when using NAPI.
2476  */
2477 static int napi_rx_handler(struct napi_struct *napi, int budget)
2478 {
2479 	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2480 	struct adapter *adap = qs->adap;
2481 	int work_done = process_responses(adap, qs, budget);
2482 
2483 	if (likely(work_done < budget)) {
2484 		napi_complete_done(napi, work_done);
2485 
2486 		/*
2487 		 * Because we don't atomically flush the following
2488 		 * write it is possible that in very rare cases it can
2489 		 * reach the device in a way that races with a new
2490 		 * response being written plus an error interrupt
2491 		 * causing the NAPI interrupt handler below to return
2492 		 * unhandled status to the OS.  To protect against
2493 		 * this would require flushing the write and doing
2494 		 * both the write and the flush with interrupts off.
2495 		 * Way too expensive and unjustifiable given the
2496 		 * rarity of the race.
2497 		 *
2498 		 * The race cannot happen at all with MSI-X.
2499 		 */
2500 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2501 			     V_NEWTIMER(qs->rspq.next_holdoff) |
2502 			     V_NEWINDEX(qs->rspq.cidx));
2503 	}
2504 	return work_done;
2505 }
2506 
2507 /*
2508  * Returns true if the device is already scheduled for polling.
2509  */
2510 static inline int napi_is_scheduled(struct napi_struct *napi)
2511 {
2512 	return test_bit(NAPI_STATE_SCHED, &napi->state);
2513 }
2514 
2515 /**
2516  *	process_pure_responses - process pure responses from a response queue
2517  *	@adap: the adapter
2518  *	@qs: the queue set owning the response queue
2519  *	@r: the first pure response to process
2520  *
2521  *	A simpler version of process_responses() that handles only pure (i.e.,
2522  *	non data-carrying) responses.  Such respones are too light-weight to
2523  *	justify calling a softirq under NAPI, so we handle them specially in
2524  *	the interrupt handler.  The function is called with a pointer to a
2525  *	response, which the caller must ensure is a valid pure response.
2526  *
2527  *	Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2528  */
2529 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2530 				  struct rsp_desc *r)
2531 {
2532 	struct sge_rspq *q = &qs->rspq;
2533 	unsigned int sleeping = 0;
2534 
2535 	do {
2536 		u32 flags = ntohl(r->flags);
2537 
2538 		r++;
2539 		if (unlikely(++q->cidx == q->size)) {
2540 			q->cidx = 0;
2541 			q->gen ^= 1;
2542 			r = q->desc;
2543 		}
2544 		prefetch(r);
2545 
2546 		if (flags & RSPD_CTRL_MASK) {
2547 			sleeping |= flags & RSPD_GTS_MASK;
2548 			handle_rsp_cntrl_info(qs, flags);
2549 		}
2550 
2551 		q->pure_rsps++;
2552 		if (++q->credits >= (q->size / 4)) {
2553 			refill_rspq(adap, q, q->credits);
2554 			q->credits = 0;
2555 		}
2556 		if (!is_new_response(r, q))
2557 			break;
2558 		dma_rmb();
2559 	} while (is_pure_response(r));
2560 
2561 	if (sleeping)
2562 		check_ring_db(adap, qs, sleeping);
2563 
2564 	smp_mb();		/* commit Tx queue .processed updates */
2565 	if (unlikely(qs->txq_stopped != 0))
2566 		restart_tx(qs);
2567 
2568 	return is_new_response(r, q);
2569 }
2570 
2571 /**
2572  *	handle_responses - decide what to do with new responses in NAPI mode
2573  *	@adap: the adapter
2574  *	@q: the response queue
2575  *
2576  *	This is used by the NAPI interrupt handlers to decide what to do with
2577  *	new SGE responses.  If there are no new responses it returns -1.  If
2578  *	there are new responses and they are pure (i.e., non-data carrying)
2579  *	it handles them straight in hard interrupt context as they are very
2580  *	cheap and don't deliver any packets.  Finally, if there are any data
2581  *	signaling responses it schedules the NAPI handler.  Returns 1 if it
2582  *	schedules NAPI, 0 if all new responses were pure.
2583  *
2584  *	The caller must ascertain NAPI is not already running.
2585  */
2586 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2587 {
2588 	struct sge_qset *qs = rspq_to_qset(q);
2589 	struct rsp_desc *r = &q->desc[q->cidx];
2590 
2591 	if (!is_new_response(r, q))
2592 		return -1;
2593 	dma_rmb();
2594 	if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2595 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2596 			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2597 		return 0;
2598 	}
2599 	napi_schedule(&qs->napi);
2600 	return 1;
2601 }
2602 
2603 /*
2604  * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2605  * (i.e., response queue serviced in hard interrupt).
2606  */
2607 static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2608 {
2609 	struct sge_qset *qs = cookie;
2610 	struct adapter *adap = qs->adap;
2611 	struct sge_rspq *q = &qs->rspq;
2612 
2613 	spin_lock(&q->lock);
2614 	if (process_responses(adap, qs, -1) == 0)
2615 		q->unhandled_irqs++;
2616 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2617 		     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2618 	spin_unlock(&q->lock);
2619 	return IRQ_HANDLED;
2620 }
2621 
2622 /*
2623  * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2624  * (i.e., response queue serviced by NAPI polling).
2625  */
2626 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2627 {
2628 	struct sge_qset *qs = cookie;
2629 	struct sge_rspq *q = &qs->rspq;
2630 
2631 	spin_lock(&q->lock);
2632 
2633 	if (handle_responses(qs->adap, q) < 0)
2634 		q->unhandled_irqs++;
2635 	spin_unlock(&q->lock);
2636 	return IRQ_HANDLED;
2637 }
2638 
2639 /*
2640  * The non-NAPI MSI interrupt handler.  This needs to handle data events from
2641  * SGE response queues as well as error and other async events as they all use
2642  * the same MSI vector.  We use one SGE response queue per port in this mode
2643  * and protect all response queues with queue 0's lock.
2644  */
2645 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2646 {
2647 	int new_packets = 0;
2648 	struct adapter *adap = cookie;
2649 	struct sge_rspq *q = &adap->sge.qs[0].rspq;
2650 
2651 	spin_lock(&q->lock);
2652 
2653 	if (process_responses(adap, &adap->sge.qs[0], -1)) {
2654 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2655 			     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2656 		new_packets = 1;
2657 	}
2658 
2659 	if (adap->params.nports == 2 &&
2660 	    process_responses(adap, &adap->sge.qs[1], -1)) {
2661 		struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2662 
2663 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2664 			     V_NEWTIMER(q1->next_holdoff) |
2665 			     V_NEWINDEX(q1->cidx));
2666 		new_packets = 1;
2667 	}
2668 
2669 	if (!new_packets && t3_slow_intr_handler(adap) == 0)
2670 		q->unhandled_irqs++;
2671 
2672 	spin_unlock(&q->lock);
2673 	return IRQ_HANDLED;
2674 }
2675 
2676 static int rspq_check_napi(struct sge_qset *qs)
2677 {
2678 	struct sge_rspq *q = &qs->rspq;
2679 
2680 	if (!napi_is_scheduled(&qs->napi) &&
2681 	    is_new_response(&q->desc[q->cidx], q)) {
2682 		napi_schedule(&qs->napi);
2683 		return 1;
2684 	}
2685 	return 0;
2686 }
2687 
2688 /*
2689  * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2690  * by NAPI polling).  Handles data events from SGE response queues as well as
2691  * error and other async events as they all use the same MSI vector.  We use
2692  * one SGE response queue per port in this mode and protect all response
2693  * queues with queue 0's lock.
2694  */
2695 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2696 {
2697 	int new_packets;
2698 	struct adapter *adap = cookie;
2699 	struct sge_rspq *q = &adap->sge.qs[0].rspq;
2700 
2701 	spin_lock(&q->lock);
2702 
2703 	new_packets = rspq_check_napi(&adap->sge.qs[0]);
2704 	if (adap->params.nports == 2)
2705 		new_packets += rspq_check_napi(&adap->sge.qs[1]);
2706 	if (!new_packets && t3_slow_intr_handler(adap) == 0)
2707 		q->unhandled_irqs++;
2708 
2709 	spin_unlock(&q->lock);
2710 	return IRQ_HANDLED;
2711 }
2712 
2713 /*
2714  * A helper function that processes responses and issues GTS.
2715  */
2716 static inline int process_responses_gts(struct adapter *adap,
2717 					struct sge_rspq *rq)
2718 {
2719 	int work;
2720 
2721 	work = process_responses(adap, rspq_to_qset(rq), -1);
2722 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2723 		     V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2724 	return work;
2725 }
2726 
2727 /*
2728  * The legacy INTx interrupt handler.  This needs to handle data events from
2729  * SGE response queues as well as error and other async events as they all use
2730  * the same interrupt pin.  We use one SGE response queue per port in this mode
2731  * and protect all response queues with queue 0's lock.
2732  */
2733 static irqreturn_t t3_intr(int irq, void *cookie)
2734 {
2735 	int work_done, w0, w1;
2736 	struct adapter *adap = cookie;
2737 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2738 	struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2739 
2740 	spin_lock(&q0->lock);
2741 
2742 	w0 = is_new_response(&q0->desc[q0->cidx], q0);
2743 	w1 = adap->params.nports == 2 &&
2744 	    is_new_response(&q1->desc[q1->cidx], q1);
2745 
2746 	if (likely(w0 | w1)) {
2747 		t3_write_reg(adap, A_PL_CLI, 0);
2748 		t3_read_reg(adap, A_PL_CLI);	/* flush */
2749 
2750 		if (likely(w0))
2751 			process_responses_gts(adap, q0);
2752 
2753 		if (w1)
2754 			process_responses_gts(adap, q1);
2755 
2756 		work_done = w0 | w1;
2757 	} else
2758 		work_done = t3_slow_intr_handler(adap);
2759 
2760 	spin_unlock(&q0->lock);
2761 	return IRQ_RETVAL(work_done != 0);
2762 }
2763 
2764 /*
2765  * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2766  * Handles data events from SGE response queues as well as error and other
2767  * async events as they all use the same interrupt pin.  We use one SGE
2768  * response queue per port in this mode and protect all response queues with
2769  * queue 0's lock.
2770  */
2771 static irqreturn_t t3b_intr(int irq, void *cookie)
2772 {
2773 	u32 map;
2774 	struct adapter *adap = cookie;
2775 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2776 
2777 	t3_write_reg(adap, A_PL_CLI, 0);
2778 	map = t3_read_reg(adap, A_SG_DATA_INTR);
2779 
2780 	if (unlikely(!map))	/* shared interrupt, most likely */
2781 		return IRQ_NONE;
2782 
2783 	spin_lock(&q0->lock);
2784 
2785 	if (unlikely(map & F_ERRINTR))
2786 		t3_slow_intr_handler(adap);
2787 
2788 	if (likely(map & 1))
2789 		process_responses_gts(adap, q0);
2790 
2791 	if (map & 2)
2792 		process_responses_gts(adap, &adap->sge.qs[1].rspq);
2793 
2794 	spin_unlock(&q0->lock);
2795 	return IRQ_HANDLED;
2796 }
2797 
2798 /*
2799  * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2800  * Handles data events from SGE response queues as well as error and other
2801  * async events as they all use the same interrupt pin.  We use one SGE
2802  * response queue per port in this mode and protect all response queues with
2803  * queue 0's lock.
2804  */
2805 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2806 {
2807 	u32 map;
2808 	struct adapter *adap = cookie;
2809 	struct sge_qset *qs0 = &adap->sge.qs[0];
2810 	struct sge_rspq *q0 = &qs0->rspq;
2811 
2812 	t3_write_reg(adap, A_PL_CLI, 0);
2813 	map = t3_read_reg(adap, A_SG_DATA_INTR);
2814 
2815 	if (unlikely(!map))	/* shared interrupt, most likely */
2816 		return IRQ_NONE;
2817 
2818 	spin_lock(&q0->lock);
2819 
2820 	if (unlikely(map & F_ERRINTR))
2821 		t3_slow_intr_handler(adap);
2822 
2823 	if (likely(map & 1))
2824 		napi_schedule(&qs0->napi);
2825 
2826 	if (map & 2)
2827 		napi_schedule(&adap->sge.qs[1].napi);
2828 
2829 	spin_unlock(&q0->lock);
2830 	return IRQ_HANDLED;
2831 }
2832 
2833 /**
2834  *	t3_intr_handler - select the top-level interrupt handler
2835  *	@adap: the adapter
2836  *	@polling: whether using NAPI to service response queues
2837  *
2838  *	Selects the top-level interrupt handler based on the type of interrupts
2839  *	(MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2840  *	response queues.
2841  */
2842 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2843 {
2844 	if (adap->flags & USING_MSIX)
2845 		return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2846 	if (adap->flags & USING_MSI)
2847 		return polling ? t3_intr_msi_napi : t3_intr_msi;
2848 	if (adap->params.rev > 0)
2849 		return polling ? t3b_intr_napi : t3b_intr;
2850 	return t3_intr;
2851 }
2852 
2853 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2854 		    F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2855 		    V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2856 		    F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2857 		    F_HIRCQPARITYERROR)
2858 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2859 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2860 		      F_RSPQDISABLED)
2861 
2862 /**
2863  *	t3_sge_err_intr_handler - SGE async event interrupt handler
2864  *	@adapter: the adapter
2865  *
2866  *	Interrupt handler for SGE asynchronous (non-data) events.
2867  */
2868 void t3_sge_err_intr_handler(struct adapter *adapter)
2869 {
2870 	unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2871 				 ~F_FLEMPTY;
2872 
2873 	if (status & SGE_PARERR)
2874 		CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2875 			 status & SGE_PARERR);
2876 	if (status & SGE_FRAMINGERR)
2877 		CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2878 			 status & SGE_FRAMINGERR);
2879 
2880 	if (status & F_RSPQCREDITOVERFOW)
2881 		CH_ALERT(adapter, "SGE response queue credit overflow\n");
2882 
2883 	if (status & F_RSPQDISABLED) {
2884 		v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2885 
2886 		CH_ALERT(adapter,
2887 			 "packet delivered to disabled response queue "
2888 			 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2889 	}
2890 
2891 	if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2892 		queue_work(cxgb3_wq, &adapter->db_drop_task);
2893 
2894 	if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2895 		queue_work(cxgb3_wq, &adapter->db_full_task);
2896 
2897 	if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2898 		queue_work(cxgb3_wq, &adapter->db_empty_task);
2899 
2900 	t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2901 	if (status &  SGE_FATALERR)
2902 		t3_fatal_err(adapter);
2903 }
2904 
2905 /**
2906  *	sge_timer_tx - perform periodic maintenance of an SGE qset
2907  *	@t: a timer list containing the SGE queue set to maintain
2908  *
2909  *	Runs periodically from a timer to perform maintenance of an SGE queue
2910  *	set.  It performs two tasks:
2911  *
2912  *	Cleans up any completed Tx descriptors that may still be pending.
2913  *	Normal descriptor cleanup happens when new packets are added to a Tx
2914  *	queue so this timer is relatively infrequent and does any cleanup only
2915  *	if the Tx queue has not seen any new packets in a while.  We make a
2916  *	best effort attempt to reclaim descriptors, in that we don't wait
2917  *	around if we cannot get a queue's lock (which most likely is because
2918  *	someone else is queueing new packets and so will also handle the clean
2919  *	up).  Since control queues use immediate data exclusively we don't
2920  *	bother cleaning them up here.
2921  *
2922  */
2923 static void sge_timer_tx(struct timer_list *t)
2924 {
2925 	struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
2926 	struct port_info *pi = netdev_priv(qs->netdev);
2927 	struct adapter *adap = pi->adapter;
2928 	unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2929 	unsigned long next_period;
2930 
2931 	if (__netif_tx_trylock(qs->tx_q)) {
2932                 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2933                                                      TX_RECLAIM_TIMER_CHUNK);
2934 		__netif_tx_unlock(qs->tx_q);
2935 	}
2936 
2937 	if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2938 		tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2939 						     TX_RECLAIM_TIMER_CHUNK);
2940 		spin_unlock(&qs->txq[TXQ_OFLD].lock);
2941 	}
2942 
2943 	next_period = TX_RECLAIM_PERIOD >>
2944                       (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2945                       TX_RECLAIM_TIMER_CHUNK);
2946 	mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2947 }
2948 
2949 /**
2950  *	sge_timer_rx - perform periodic maintenance of an SGE qset
2951  *	@t: the timer list containing the SGE queue set to maintain
2952  *
2953  *	a) Replenishes Rx queues that have run out due to memory shortage.
2954  *	Normally new Rx buffers are added when existing ones are consumed but
2955  *	when out of memory a queue can become empty.  We try to add only a few
2956  *	buffers here, the queue will be replenished fully as these new buffers
2957  *	are used up if memory shortage has subsided.
2958  *
2959  *	b) Return coalesced response queue credits in case a response queue is
2960  *	starved.
2961  *
2962  */
2963 static void sge_timer_rx(struct timer_list *t)
2964 {
2965 	spinlock_t *lock;
2966 	struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
2967 	struct port_info *pi = netdev_priv(qs->netdev);
2968 	struct adapter *adap = pi->adapter;
2969 	u32 status;
2970 
2971 	lock = adap->params.rev > 0 ?
2972 	       &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2973 
2974 	if (!spin_trylock_irq(lock))
2975 		goto out;
2976 
2977 	if (napi_is_scheduled(&qs->napi))
2978 		goto unlock;
2979 
2980 	if (adap->params.rev < 4) {
2981 		status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2982 
2983 		if (status & (1 << qs->rspq.cntxt_id)) {
2984 			qs->rspq.starved++;
2985 			if (qs->rspq.credits) {
2986 				qs->rspq.credits--;
2987 				refill_rspq(adap, &qs->rspq, 1);
2988 				qs->rspq.restarted++;
2989 				t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2990 					     1 << qs->rspq.cntxt_id);
2991 			}
2992 		}
2993 	}
2994 
2995 	if (qs->fl[0].credits < qs->fl[0].size)
2996 		__refill_fl(adap, &qs->fl[0]);
2997 	if (qs->fl[1].credits < qs->fl[1].size)
2998 		__refill_fl(adap, &qs->fl[1]);
2999 
3000 unlock:
3001 	spin_unlock_irq(lock);
3002 out:
3003 	mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3004 }
3005 
3006 /**
3007  *	t3_update_qset_coalesce - update coalescing settings for a queue set
3008  *	@qs: the SGE queue set
3009  *	@p: new queue set parameters
3010  *
3011  *	Update the coalescing settings for an SGE queue set.  Nothing is done
3012  *	if the queue set is not initialized yet.
3013  */
3014 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
3015 {
3016 	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
3017 	qs->rspq.polling = p->polling;
3018 	qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
3019 }
3020 
3021 /**
3022  *	t3_sge_alloc_qset - initialize an SGE queue set
3023  *	@adapter: the adapter
3024  *	@id: the queue set id
3025  *	@nports: how many Ethernet ports will be using this queue set
3026  *	@irq_vec_idx: the IRQ vector index for response queue interrupts
3027  *	@p: configuration parameters for this queue set
3028  *	@ntxq: number of Tx queues for the queue set
3029  *	@dev: net device associated with this queue set
3030  *	@netdevq: net device TX queue associated with this queue set
3031  *
3032  *	Allocate resources and initialize an SGE queue set.  A queue set
3033  *	comprises a response queue, two Rx free-buffer queues, and up to 3
3034  *	Tx queues.  The Tx queues are assigned roles in the order Ethernet
3035  *	queue, offload queue, and control queue.
3036  */
3037 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3038 		      int irq_vec_idx, const struct qset_params *p,
3039 		      int ntxq, struct net_device *dev,
3040 		      struct netdev_queue *netdevq)
3041 {
3042 	int i, avail, ret = -ENOMEM;
3043 	struct sge_qset *q = &adapter->sge.qs[id];
3044 
3045 	init_qset_cntxt(q, id);
3046 	timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0);
3047 	timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0);
3048 
3049 	q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
3050 				   sizeof(struct rx_desc),
3051 				   sizeof(struct rx_sw_desc),
3052 				   &q->fl[0].phys_addr, &q->fl[0].sdesc);
3053 	if (!q->fl[0].desc)
3054 		goto err;
3055 
3056 	q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
3057 				   sizeof(struct rx_desc),
3058 				   sizeof(struct rx_sw_desc),
3059 				   &q->fl[1].phys_addr, &q->fl[1].sdesc);
3060 	if (!q->fl[1].desc)
3061 		goto err;
3062 
3063 	q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
3064 				  sizeof(struct rsp_desc), 0,
3065 				  &q->rspq.phys_addr, NULL);
3066 	if (!q->rspq.desc)
3067 		goto err;
3068 
3069 	for (i = 0; i < ntxq; ++i) {
3070 		/*
3071 		 * The control queue always uses immediate data so does not
3072 		 * need to keep track of any sk_buffs.
3073 		 */
3074 		size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3075 
3076 		q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3077 					    sizeof(struct tx_desc), sz,
3078 					    &q->txq[i].phys_addr,
3079 					    &q->txq[i].sdesc);
3080 		if (!q->txq[i].desc)
3081 			goto err;
3082 
3083 		q->txq[i].gen = 1;
3084 		q->txq[i].size = p->txq_size[i];
3085 		spin_lock_init(&q->txq[i].lock);
3086 		skb_queue_head_init(&q->txq[i].sendq);
3087 	}
3088 
3089 	INIT_WORK(&q->txq[TXQ_OFLD].qresume_task, restart_offloadq);
3090 	INIT_WORK(&q->txq[TXQ_CTRL].qresume_task, restart_ctrlq);
3091 
3092 	q->fl[0].gen = q->fl[1].gen = 1;
3093 	q->fl[0].size = p->fl_size;
3094 	q->fl[1].size = p->jumbo_size;
3095 
3096 	q->rspq.gen = 1;
3097 	q->rspq.size = p->rspq_size;
3098 	spin_lock_init(&q->rspq.lock);
3099 	skb_queue_head_init(&q->rspq.rx_queue);
3100 
3101 	q->txq[TXQ_ETH].stop_thres = nports *
3102 	    flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3103 
3104 #if FL0_PG_CHUNK_SIZE > 0
3105 	q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3106 #else
3107 	q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3108 #endif
3109 #if FL1_PG_CHUNK_SIZE > 0
3110 	q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3111 #else
3112 	q->fl[1].buf_size = is_offload(adapter) ?
3113 		(16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3114 		MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3115 #endif
3116 
3117 	q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3118 	q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3119 	q->fl[0].order = FL0_PG_ORDER;
3120 	q->fl[1].order = FL1_PG_ORDER;
3121 	q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3122 	q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3123 
3124 	spin_lock_irq(&adapter->sge.reg_lock);
3125 
3126 	/* FL threshold comparison uses < */
3127 	ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3128 				   q->rspq.phys_addr, q->rspq.size,
3129 				   q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3130 	if (ret)
3131 		goto err_unlock;
3132 
3133 	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3134 		ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3135 					  q->fl[i].phys_addr, q->fl[i].size,
3136 					  q->fl[i].buf_size - SGE_PG_RSVD,
3137 					  p->cong_thres, 1, 0);
3138 		if (ret)
3139 			goto err_unlock;
3140 	}
3141 
3142 	ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3143 				 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3144 				 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3145 				 1, 0);
3146 	if (ret)
3147 		goto err_unlock;
3148 
3149 	if (ntxq > 1) {
3150 		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3151 					 USE_GTS, SGE_CNTXT_OFLD, id,
3152 					 q->txq[TXQ_OFLD].phys_addr,
3153 					 q->txq[TXQ_OFLD].size, 0, 1, 0);
3154 		if (ret)
3155 			goto err_unlock;
3156 	}
3157 
3158 	if (ntxq > 2) {
3159 		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3160 					 SGE_CNTXT_CTRL, id,
3161 					 q->txq[TXQ_CTRL].phys_addr,
3162 					 q->txq[TXQ_CTRL].size,
3163 					 q->txq[TXQ_CTRL].token, 1, 0);
3164 		if (ret)
3165 			goto err_unlock;
3166 	}
3167 
3168 	spin_unlock_irq(&adapter->sge.reg_lock);
3169 
3170 	q->adap = adapter;
3171 	q->netdev = dev;
3172 	q->tx_q = netdevq;
3173 	t3_update_qset_coalesce(q, p);
3174 
3175 	avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3176 			  GFP_KERNEL | __GFP_COMP);
3177 	if (!avail) {
3178 		CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3179 		ret = -ENOMEM;
3180 		goto err;
3181 	}
3182 	if (avail < q->fl[0].size)
3183 		CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3184 			avail);
3185 
3186 	avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3187 			  GFP_KERNEL | __GFP_COMP);
3188 	if (avail < q->fl[1].size)
3189 		CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3190 			avail);
3191 	refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3192 
3193 	t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3194 		     V_NEWTIMER(q->rspq.holdoff_tmr));
3195 
3196 	return 0;
3197 
3198 err_unlock:
3199 	spin_unlock_irq(&adapter->sge.reg_lock);
3200 err:
3201 	t3_free_qset(adapter, q);
3202 	return ret;
3203 }
3204 
3205 /**
3206  *      t3_start_sge_timers - start SGE timer call backs
3207  *      @adap: the adapter
3208  *
3209  *      Starts each SGE queue set's timer call back
3210  */
3211 void t3_start_sge_timers(struct adapter *adap)
3212 {
3213 	int i;
3214 
3215 	for (i = 0; i < SGE_QSETS; ++i) {
3216 		struct sge_qset *q = &adap->sge.qs[i];
3217 
3218 		if (q->tx_reclaim_timer.function)
3219 			mod_timer(&q->tx_reclaim_timer,
3220 				  jiffies + TX_RECLAIM_PERIOD);
3221 
3222 		if (q->rx_reclaim_timer.function)
3223 			mod_timer(&q->rx_reclaim_timer,
3224 				  jiffies + RX_RECLAIM_PERIOD);
3225 	}
3226 }
3227 
3228 /**
3229  *	t3_stop_sge_timers - stop SGE timer call backs
3230  *	@adap: the adapter
3231  *
3232  *	Stops each SGE queue set's timer call back
3233  */
3234 void t3_stop_sge_timers(struct adapter *adap)
3235 {
3236 	int i;
3237 
3238 	for (i = 0; i < SGE_QSETS; ++i) {
3239 		struct sge_qset *q = &adap->sge.qs[i];
3240 
3241 		if (q->tx_reclaim_timer.function)
3242 			del_timer_sync(&q->tx_reclaim_timer);
3243 		if (q->rx_reclaim_timer.function)
3244 			del_timer_sync(&q->rx_reclaim_timer);
3245 	}
3246 }
3247 
3248 /**
3249  *	t3_free_sge_resources - free SGE resources
3250  *	@adap: the adapter
3251  *
3252  *	Frees resources used by the SGE queue sets.
3253  */
3254 void t3_free_sge_resources(struct adapter *adap)
3255 {
3256 	int i;
3257 
3258 	for (i = 0; i < SGE_QSETS; ++i)
3259 		t3_free_qset(adap, &adap->sge.qs[i]);
3260 }
3261 
3262 /**
3263  *	t3_sge_start - enable SGE
3264  *	@adap: the adapter
3265  *
3266  *	Enables the SGE for DMAs.  This is the last step in starting packet
3267  *	transfers.
3268  */
3269 void t3_sge_start(struct adapter *adap)
3270 {
3271 	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3272 }
3273 
3274 /**
3275  *	t3_sge_stop_dma - Disable SGE DMA engine operation
3276  *	@adap: the adapter
3277  *
3278  *	Can be invoked from interrupt context e.g.  error handler.
3279  *
3280  *	Note that this function cannot disable the restart of works as
3281  *	it cannot wait if called from interrupt context, however the
3282  *	works will have no effect since the doorbells are disabled. The
3283  *	driver will call tg3_sge_stop() later from process context, at
3284  *	which time the works will be stopped if they are still running.
3285  */
3286 void t3_sge_stop_dma(struct adapter *adap)
3287 {
3288 	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3289 }
3290 
3291 /**
3292  *	t3_sge_stop - disable SGE operation completly
3293  *	@adap: the adapter
3294  *
3295  *	Called from process context. Disables the DMA engine and any
3296  *	pending queue restart works.
3297  */
3298 void t3_sge_stop(struct adapter *adap)
3299 {
3300 	int i;
3301 
3302 	t3_sge_stop_dma(adap);
3303 
3304 	/* workqueues aren't initialized otherwise */
3305 	if (!(adap->flags & FULL_INIT_DONE))
3306 		return;
3307 	for (i = 0; i < SGE_QSETS; ++i) {
3308 		struct sge_qset *qs = &adap->sge.qs[i];
3309 
3310 		cancel_work_sync(&qs->txq[TXQ_OFLD].qresume_task);
3311 		cancel_work_sync(&qs->txq[TXQ_CTRL].qresume_task);
3312 	}
3313 }
3314 
3315 /**
3316  *	t3_sge_init - initialize SGE
3317  *	@adap: the adapter
3318  *	@p: the SGE parameters
3319  *
3320  *	Performs SGE initialization needed every time after a chip reset.
3321  *	We do not initialize any of the queue sets here, instead the driver
3322  *	top-level must request those individually.  We also do not enable DMA
3323  *	here, that should be done after the queues have been set up.
3324  */
3325 void t3_sge_init(struct adapter *adap, struct sge_params *p)
3326 {
3327 	unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3328 
3329 	ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3330 	    F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3331 	    V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3332 	    V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3333 #if SGE_NUM_GENBITS == 1
3334 	ctrl |= F_EGRGENCTRL;
3335 #endif
3336 	if (adap->params.rev > 0) {
3337 		if (!(adap->flags & (USING_MSIX | USING_MSI)))
3338 			ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3339 	}
3340 	t3_write_reg(adap, A_SG_CONTROL, ctrl);
3341 	t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3342 		     V_LORCQDRBTHRSH(512));
3343 	t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3344 	t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3345 		     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3346 	t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3347 		     adap->params.rev < T3_REV_C ? 1000 : 500);
3348 	t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3349 	t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3350 	t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3351 	t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3352 	t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3353 }
3354 
3355 /**
3356  *	t3_sge_prep - one-time SGE initialization
3357  *	@adap: the associated adapter
3358  *	@p: SGE parameters
3359  *
3360  *	Performs one-time initialization of SGE SW state.  Includes determining
3361  *	defaults for the assorted SGE parameters, which admins can change until
3362  *	they are used to initialize the SGE.
3363  */
3364 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3365 {
3366 	int i;
3367 
3368 	p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3369 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3370 
3371 	for (i = 0; i < SGE_QSETS; ++i) {
3372 		struct qset_params *q = p->qset + i;
3373 
3374 		q->polling = adap->params.rev > 0;
3375 		q->coalesce_usecs = 5;
3376 		q->rspq_size = 1024;
3377 		q->fl_size = 1024;
3378 		q->jumbo_size = 512;
3379 		q->txq_size[TXQ_ETH] = 1024;
3380 		q->txq_size[TXQ_OFLD] = 1024;
3381 		q->txq_size[TXQ_CTRL] = 256;
3382 		q->cong_thres = 0;
3383 	}
3384 
3385 	spin_lock_init(&adap->sge.reg_lock);
3386 }
3387