1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
39 #include <linux/ip.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
44 #include <net/ipv6.h>
45 #include <net/tcp.h>
46 #include "cxgb4.h"
47 #include "t4_regs.h"
48 #include "t4_msg.h"
49 #include "t4fw_api.h"
50 
51 /*
52  * Rx buffer size.  We use largish buffers if possible but settle for single
53  * pages under memory shortage.
54  */
55 #if PAGE_SHIFT >= 16
56 # define FL_PG_ORDER 0
57 #else
58 # define FL_PG_ORDER (16 - PAGE_SHIFT)
59 #endif
60 
61 /* RX_PULL_LEN should be <= RX_COPY_THRES */
62 #define RX_COPY_THRES    256
63 #define RX_PULL_LEN      128
64 
65 /*
66  * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
67  * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
68  */
69 #define RX_PKT_SKB_LEN   512
70 
71 /*
72  * Max number of Tx descriptors we clean up at a time.  Should be modest as
73  * freeing skbs isn't cheap and it happens while holding locks.  We just need
74  * to free packets faster than they arrive, we eventually catch up and keep
75  * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.
76  */
77 #define MAX_TX_RECLAIM 16
78 
79 /*
80  * Max number of Rx buffers we replenish at a time.  Again keep this modest,
81  * allocating buffers isn't cheap either.
82  */
83 #define MAX_RX_REFILL 16U
84 
85 /*
86  * Period of the Rx queue check timer.  This timer is infrequent as it has
87  * something to do only when the system experiences severe memory shortage.
88  */
89 #define RX_QCHECK_PERIOD (HZ / 2)
90 
91 /*
92  * Period of the Tx queue check timer.
93  */
94 #define TX_QCHECK_PERIOD (HZ / 2)
95 
96 /*
97  * Max number of Tx descriptors to be reclaimed by the Tx timer.
98  */
99 #define MAX_TIMER_TX_RECLAIM 100
100 
101 /*
102  * Timer index used when backing off due to memory shortage.
103  */
104 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
105 
106 /*
107  * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
108  * attempt to refill it.
109  */
110 #define FL_STARVE_THRES 4
111 
112 /*
113  * Suspend an Ethernet Tx queue with fewer available descriptors than this.
114  * This is the same as calc_tx_descs() for a TSO packet with
115  * nr_frags == MAX_SKB_FRAGS.
116  */
117 #define ETHTXQ_STOP_THRES \
118 	(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
119 
120 /*
121  * Suspension threshold for non-Ethernet Tx queues.  We require enough room
122  * for a full sized WR.
123  */
124 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
125 
126 /*
127  * Max Tx descriptor space we allow for an Ethernet packet to be inlined
128  * into a WR.
129  */
130 #define MAX_IMM_TX_PKT_LEN 128
131 
132 /*
133  * Max size of a WR sent through a control Tx queue.
134  */
135 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
136 
137 struct tx_sw_desc {                /* SW state per Tx descriptor */
138 	struct sk_buff *skb;
139 	struct ulptx_sgl *sgl;
140 };
141 
142 struct rx_sw_desc {                /* SW state per Rx descriptor */
143 	struct page *page;
144 	dma_addr_t dma_addr;
145 };
146 
147 /*
148  * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
149  * buffer).  We currently only support two sizes for 1500- and 9000-byte MTUs.
150  * We could easily support more but there doesn't seem to be much need for
151  * that ...
152  */
153 #define FL_MTU_SMALL 1500
154 #define FL_MTU_LARGE 9000
155 
156 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
157 					  unsigned int mtu)
158 {
159 	struct sge *s = &adapter->sge;
160 
161 	return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
162 }
163 
164 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
165 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
166 
167 /*
168  * Bits 0..3 of rx_sw_desc.dma_addr have special meaning.  The hardware uses
169  * these to specify the buffer size as an index into the SGE Free List Buffer
170  * Size register array.  We also use bit 4, when the buffer has been unmapped
171  * for DMA, but this is of course never sent to the hardware and is only used
172  * to prevent double unmappings.  All of the above requires that the Free List
173  * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
174  * 32-byte or or a power of 2 greater in alignment.  Since the SGE's minimal
175  * Free List Buffer alignment is 32 bytes, this works out for us ...
176  */
177 enum {
178 	RX_BUF_FLAGS     = 0x1f,   /* bottom five bits are special */
179 	RX_BUF_SIZE      = 0x0f,   /* bottom three bits are for buf sizes */
180 	RX_UNMAPPED_BUF  = 0x10,   /* buffer is not mapped */
181 
182 	/*
183 	 * XXX We shouldn't depend on being able to use these indices.
184 	 * XXX Especially when some other Master PF has initialized the
185 	 * XXX adapter or we use the Firmware Configuration File.  We
186 	 * XXX should really search through the Host Buffer Size register
187 	 * XXX array for the appropriately sized buffer indices.
188 	 */
189 	RX_SMALL_PG_BUF  = 0x0,   /* small (PAGE_SIZE) page buffer */
190 	RX_LARGE_PG_BUF  = 0x1,   /* buffer large (FL_PG_ORDER) page buffer */
191 
192 	RX_SMALL_MTU_BUF = 0x2,   /* small MTU buffer */
193 	RX_LARGE_MTU_BUF = 0x3,   /* large MTU buffer */
194 };
195 
196 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
197 {
198 	return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
199 }
200 
201 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
202 {
203 	return !(d->dma_addr & RX_UNMAPPED_BUF);
204 }
205 
206 /**
207  *	txq_avail - return the number of available slots in a Tx queue
208  *	@q: the Tx queue
209  *
210  *	Returns the number of descriptors in a Tx queue available to write new
211  *	packets.
212  */
213 static inline unsigned int txq_avail(const struct sge_txq *q)
214 {
215 	return q->size - 1 - q->in_use;
216 }
217 
218 /**
219  *	fl_cap - return the capacity of a free-buffer list
220  *	@fl: the FL
221  *
222  *	Returns the capacity of a free-buffer list.  The capacity is less than
223  *	the size because one descriptor needs to be left unpopulated, otherwise
224  *	HW will think the FL is empty.
225  */
226 static inline unsigned int fl_cap(const struct sge_fl *fl)
227 {
228 	return fl->size - 8;   /* 1 descriptor = 8 buffers */
229 }
230 
231 static inline bool fl_starving(const struct sge_fl *fl)
232 {
233 	return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
234 }
235 
236 static int map_skb(struct device *dev, const struct sk_buff *skb,
237 		   dma_addr_t *addr)
238 {
239 	const skb_frag_t *fp, *end;
240 	const struct skb_shared_info *si;
241 
242 	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
243 	if (dma_mapping_error(dev, *addr))
244 		goto out_err;
245 
246 	si = skb_shinfo(skb);
247 	end = &si->frags[si->nr_frags];
248 
249 	for (fp = si->frags; fp < end; fp++) {
250 		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
251 					   DMA_TO_DEVICE);
252 		if (dma_mapping_error(dev, *addr))
253 			goto unwind;
254 	}
255 	return 0;
256 
257 unwind:
258 	while (fp-- > si->frags)
259 		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
260 
261 	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
262 out_err:
263 	return -ENOMEM;
264 }
265 
266 #ifdef CONFIG_NEED_DMA_MAP_STATE
267 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
268 		      const dma_addr_t *addr)
269 {
270 	const skb_frag_t *fp, *end;
271 	const struct skb_shared_info *si;
272 
273 	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
274 
275 	si = skb_shinfo(skb);
276 	end = &si->frags[si->nr_frags];
277 	for (fp = si->frags; fp < end; fp++)
278 		dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
279 }
280 
281 /**
282  *	deferred_unmap_destructor - unmap a packet when it is freed
283  *	@skb: the packet
284  *
285  *	This is the packet destructor used for Tx packets that need to remain
286  *	mapped until they are freed rather than until their Tx descriptors are
287  *	freed.
288  */
289 static void deferred_unmap_destructor(struct sk_buff *skb)
290 {
291 	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
292 }
293 #endif
294 
295 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
296 		      const struct ulptx_sgl *sgl, const struct sge_txq *q)
297 {
298 	const struct ulptx_sge_pair *p;
299 	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
300 
301 	if (likely(skb_headlen(skb)))
302 		dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
303 				 DMA_TO_DEVICE);
304 	else {
305 		dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
306 			       DMA_TO_DEVICE);
307 		nfrags--;
308 	}
309 
310 	/*
311 	 * the complexity below is because of the possibility of a wrap-around
312 	 * in the middle of an SGL
313 	 */
314 	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
315 		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
316 unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
317 				       ntohl(p->len[0]), DMA_TO_DEVICE);
318 			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
319 				       ntohl(p->len[1]), DMA_TO_DEVICE);
320 			p++;
321 		} else if ((u8 *)p == (u8 *)q->stat) {
322 			p = (const struct ulptx_sge_pair *)q->desc;
323 			goto unmap;
324 		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
325 			const __be64 *addr = (const __be64 *)q->desc;
326 
327 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
328 				       ntohl(p->len[0]), DMA_TO_DEVICE);
329 			dma_unmap_page(dev, be64_to_cpu(addr[1]),
330 				       ntohl(p->len[1]), DMA_TO_DEVICE);
331 			p = (const struct ulptx_sge_pair *)&addr[2];
332 		} else {
333 			const __be64 *addr = (const __be64 *)q->desc;
334 
335 			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
336 				       ntohl(p->len[0]), DMA_TO_DEVICE);
337 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
338 				       ntohl(p->len[1]), DMA_TO_DEVICE);
339 			p = (const struct ulptx_sge_pair *)&addr[1];
340 		}
341 	}
342 	if (nfrags) {
343 		__be64 addr;
344 
345 		if ((u8 *)p == (u8 *)q->stat)
346 			p = (const struct ulptx_sge_pair *)q->desc;
347 		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
348 						       *(const __be64 *)q->desc;
349 		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
350 			       DMA_TO_DEVICE);
351 	}
352 }
353 
354 /**
355  *	free_tx_desc - reclaims Tx descriptors and their buffers
356  *	@adapter: the adapter
357  *	@q: the Tx queue to reclaim descriptors from
358  *	@n: the number of descriptors to reclaim
359  *	@unmap: whether the buffers should be unmapped for DMA
360  *
361  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
362  *	Tx buffers.  Called with the Tx queue lock held.
363  */
364 static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
365 			 unsigned int n, bool unmap)
366 {
367 	struct tx_sw_desc *d;
368 	unsigned int cidx = q->cidx;
369 	struct device *dev = adap->pdev_dev;
370 
371 	d = &q->sdesc[cidx];
372 	while (n--) {
373 		if (d->skb) {                       /* an SGL is present */
374 			if (unmap)
375 				unmap_sgl(dev, d->skb, d->sgl, q);
376 			kfree_skb(d->skb);
377 			d->skb = NULL;
378 		}
379 		++d;
380 		if (++cidx == q->size) {
381 			cidx = 0;
382 			d = q->sdesc;
383 		}
384 	}
385 	q->cidx = cidx;
386 }
387 
388 /*
389  * Return the number of reclaimable descriptors in a Tx queue.
390  */
391 static inline int reclaimable(const struct sge_txq *q)
392 {
393 	int hw_cidx = ntohs(q->stat->cidx);
394 	hw_cidx -= q->cidx;
395 	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
396 }
397 
398 /**
399  *	reclaim_completed_tx - reclaims completed Tx descriptors
400  *	@adap: the adapter
401  *	@q: the Tx queue to reclaim completed descriptors from
402  *	@unmap: whether the buffers should be unmapped for DMA
403  *
404  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
405  *	and frees the associated buffers if possible.  Called with the Tx
406  *	queue locked.
407  */
408 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
409 					bool unmap)
410 {
411 	int avail = reclaimable(q);
412 
413 	if (avail) {
414 		/*
415 		 * Limit the amount of clean up work we do at a time to keep
416 		 * the Tx lock hold time O(1).
417 		 */
418 		if (avail > MAX_TX_RECLAIM)
419 			avail = MAX_TX_RECLAIM;
420 
421 		free_tx_desc(adap, q, avail, unmap);
422 		q->in_use -= avail;
423 	}
424 }
425 
426 static inline int get_buf_size(struct adapter *adapter,
427 			       const struct rx_sw_desc *d)
428 {
429 	struct sge *s = &adapter->sge;
430 	unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
431 	int buf_size;
432 
433 	switch (rx_buf_size_idx) {
434 	case RX_SMALL_PG_BUF:
435 		buf_size = PAGE_SIZE;
436 		break;
437 
438 	case RX_LARGE_PG_BUF:
439 		buf_size = PAGE_SIZE << s->fl_pg_order;
440 		break;
441 
442 	case RX_SMALL_MTU_BUF:
443 		buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
444 		break;
445 
446 	case RX_LARGE_MTU_BUF:
447 		buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
448 		break;
449 
450 	default:
451 		BUG_ON(1);
452 	}
453 
454 	return buf_size;
455 }
456 
457 /**
458  *	free_rx_bufs - free the Rx buffers on an SGE free list
459  *	@adap: the adapter
460  *	@q: the SGE free list to free buffers from
461  *	@n: how many buffers to free
462  *
463  *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
464  *	buffers must be made inaccessible to HW before calling this function.
465  */
466 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
467 {
468 	while (n--) {
469 		struct rx_sw_desc *d = &q->sdesc[q->cidx];
470 
471 		if (is_buf_mapped(d))
472 			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
473 				       get_buf_size(adap, d),
474 				       PCI_DMA_FROMDEVICE);
475 		put_page(d->page);
476 		d->page = NULL;
477 		if (++q->cidx == q->size)
478 			q->cidx = 0;
479 		q->avail--;
480 	}
481 }
482 
483 /**
484  *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
485  *	@adap: the adapter
486  *	@q: the SGE free list
487  *
488  *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
489  *	buffer must be made inaccessible to HW before calling this function.
490  *
491  *	This is similar to @free_rx_bufs above but does not free the buffer.
492  *	Do note that the FL still loses any further access to the buffer.
493  */
494 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
495 {
496 	struct rx_sw_desc *d = &q->sdesc[q->cidx];
497 
498 	if (is_buf_mapped(d))
499 		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
500 			       get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
501 	d->page = NULL;
502 	if (++q->cidx == q->size)
503 		q->cidx = 0;
504 	q->avail--;
505 }
506 
507 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
508 {
509 	u32 val;
510 	if (q->pend_cred >= 8) {
511 		val = PIDX(q->pend_cred / 8);
512 		if (!is_t4(adap->params.chip))
513 			val |= DBTYPE(1);
514 		wmb();
515 		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
516 			     QID(q->cntxt_id) | val);
517 		q->pend_cred &= 7;
518 	}
519 }
520 
521 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
522 				  dma_addr_t mapping)
523 {
524 	sd->page = pg;
525 	sd->dma_addr = mapping;      /* includes size low bits */
526 }
527 
528 /**
529  *	refill_fl - refill an SGE Rx buffer ring
530  *	@adap: the adapter
531  *	@q: the ring to refill
532  *	@n: the number of new buffers to allocate
533  *	@gfp: the gfp flags for the allocations
534  *
535  *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
536  *	allocated with the supplied gfp flags.  The caller must assure that
537  *	@n does not exceed the queue's capacity.  If afterwards the queue is
538  *	found critically low mark it as starving in the bitmap of starving FLs.
539  *
540  *	Returns the number of buffers allocated.
541  */
542 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
543 			      gfp_t gfp)
544 {
545 	struct sge *s = &adap->sge;
546 	struct page *pg;
547 	dma_addr_t mapping;
548 	unsigned int cred = q->avail;
549 	__be64 *d = &q->desc[q->pidx];
550 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
551 
552 	gfp |= __GFP_NOWARN | __GFP_COLD;
553 
554 	if (s->fl_pg_order == 0)
555 		goto alloc_small_pages;
556 
557 	/*
558 	 * Prefer large buffers
559 	 */
560 	while (n) {
561 		pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
562 		if (unlikely(!pg)) {
563 			q->large_alloc_failed++;
564 			break;       /* fall back to single pages */
565 		}
566 
567 		mapping = dma_map_page(adap->pdev_dev, pg, 0,
568 				       PAGE_SIZE << s->fl_pg_order,
569 				       PCI_DMA_FROMDEVICE);
570 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
571 			__free_pages(pg, s->fl_pg_order);
572 			goto out;   /* do not try small pages for this error */
573 		}
574 		mapping |= RX_LARGE_PG_BUF;
575 		*d++ = cpu_to_be64(mapping);
576 
577 		set_rx_sw_desc(sd, pg, mapping);
578 		sd++;
579 
580 		q->avail++;
581 		if (++q->pidx == q->size) {
582 			q->pidx = 0;
583 			sd = q->sdesc;
584 			d = q->desc;
585 		}
586 		n--;
587 	}
588 
589 alloc_small_pages:
590 	while (n--) {
591 		pg = __skb_alloc_page(gfp, NULL);
592 		if (unlikely(!pg)) {
593 			q->alloc_failed++;
594 			break;
595 		}
596 
597 		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
598 				       PCI_DMA_FROMDEVICE);
599 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
600 			put_page(pg);
601 			goto out;
602 		}
603 		*d++ = cpu_to_be64(mapping);
604 
605 		set_rx_sw_desc(sd, pg, mapping);
606 		sd++;
607 
608 		q->avail++;
609 		if (++q->pidx == q->size) {
610 			q->pidx = 0;
611 			sd = q->sdesc;
612 			d = q->desc;
613 		}
614 	}
615 
616 out:	cred = q->avail - cred;
617 	q->pend_cred += cred;
618 	ring_fl_db(adap, q);
619 
620 	if (unlikely(fl_starving(q))) {
621 		smp_wmb();
622 		set_bit(q->cntxt_id - adap->sge.egr_start,
623 			adap->sge.starving_fl);
624 	}
625 
626 	return cred;
627 }
628 
629 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
630 {
631 	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
632 		  GFP_ATOMIC);
633 }
634 
635 /**
636  *	alloc_ring - allocate resources for an SGE descriptor ring
637  *	@dev: the PCI device's core device
638  *	@nelem: the number of descriptors
639  *	@elem_size: the size of each descriptor
640  *	@sw_size: the size of the SW state associated with each ring element
641  *	@phys: the physical address of the allocated ring
642  *	@metadata: address of the array holding the SW state for the ring
643  *	@stat_size: extra space in HW ring for status information
644  *	@node: preferred node for memory allocations
645  *
646  *	Allocates resources for an SGE descriptor ring, such as Tx queues,
647  *	free buffer lists, or response queues.  Each SGE ring requires
648  *	space for its HW descriptors plus, optionally, space for the SW state
649  *	associated with each HW entry (the metadata).  The function returns
650  *	three values: the virtual address for the HW ring (the return value
651  *	of the function), the bus address of the HW ring, and the address
652  *	of the SW ring.
653  */
654 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
655 			size_t sw_size, dma_addr_t *phys, void *metadata,
656 			size_t stat_size, int node)
657 {
658 	size_t len = nelem * elem_size + stat_size;
659 	void *s = NULL;
660 	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
661 
662 	if (!p)
663 		return NULL;
664 	if (sw_size) {
665 		s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
666 
667 		if (!s) {
668 			dma_free_coherent(dev, len, p, *phys);
669 			return NULL;
670 		}
671 	}
672 	if (metadata)
673 		*(void **)metadata = s;
674 	memset(p, 0, len);
675 	return p;
676 }
677 
678 /**
679  *	sgl_len - calculates the size of an SGL of the given capacity
680  *	@n: the number of SGL entries
681  *
682  *	Calculates the number of flits needed for a scatter/gather list that
683  *	can hold the given number of entries.
684  */
685 static inline unsigned int sgl_len(unsigned int n)
686 {
687 	n--;
688 	return (3 * n) / 2 + (n & 1) + 2;
689 }
690 
691 /**
692  *	flits_to_desc - returns the num of Tx descriptors for the given flits
693  *	@n: the number of flits
694  *
695  *	Returns the number of Tx descriptors needed for the supplied number
696  *	of flits.
697  */
698 static inline unsigned int flits_to_desc(unsigned int n)
699 {
700 	BUG_ON(n > SGE_MAX_WR_LEN / 8);
701 	return DIV_ROUND_UP(n, 8);
702 }
703 
704 /**
705  *	is_eth_imm - can an Ethernet packet be sent as immediate data?
706  *	@skb: the packet
707  *
708  *	Returns whether an Ethernet packet is small enough to fit as
709  *	immediate data.
710  */
711 static inline int is_eth_imm(const struct sk_buff *skb)
712 {
713 	return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
714 }
715 
716 /**
717  *	calc_tx_flits - calculate the number of flits for a packet Tx WR
718  *	@skb: the packet
719  *
720  *	Returns the number of flits needed for a Tx WR for the given Ethernet
721  *	packet, including the needed WR and CPL headers.
722  */
723 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
724 {
725 	unsigned int flits;
726 
727 	if (is_eth_imm(skb))
728 		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
729 
730 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
731 	if (skb_shinfo(skb)->gso_size)
732 		flits += 2;
733 	return flits;
734 }
735 
736 /**
737  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
738  *	@skb: the packet
739  *
740  *	Returns the number of Tx descriptors needed for the given Ethernet
741  *	packet, including the needed WR and CPL headers.
742  */
743 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
744 {
745 	return flits_to_desc(calc_tx_flits(skb));
746 }
747 
748 /**
749  *	write_sgl - populate a scatter/gather list for a packet
750  *	@skb: the packet
751  *	@q: the Tx queue we are writing into
752  *	@sgl: starting location for writing the SGL
753  *	@end: points right after the end of the SGL
754  *	@start: start offset into skb main-body data to include in the SGL
755  *	@addr: the list of bus addresses for the SGL elements
756  *
757  *	Generates a gather list for the buffers that make up a packet.
758  *	The caller must provide adequate space for the SGL that will be written.
759  *	The SGL includes all of the packet's page fragments and the data in its
760  *	main body except for the first @start bytes.  @sgl must be 16-byte
761  *	aligned and within a Tx descriptor with available space.  @end points
762  *	right after the end of the SGL but does not account for any potential
763  *	wrap around, i.e., @end > @sgl.
764  */
765 static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
766 		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
767 		      const dma_addr_t *addr)
768 {
769 	unsigned int i, len;
770 	struct ulptx_sge_pair *to;
771 	const struct skb_shared_info *si = skb_shinfo(skb);
772 	unsigned int nfrags = si->nr_frags;
773 	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
774 
775 	len = skb_headlen(skb) - start;
776 	if (likely(len)) {
777 		sgl->len0 = htonl(len);
778 		sgl->addr0 = cpu_to_be64(addr[0] + start);
779 		nfrags++;
780 	} else {
781 		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
782 		sgl->addr0 = cpu_to_be64(addr[1]);
783 	}
784 
785 	sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
786 	if (likely(--nfrags == 0))
787 		return;
788 	/*
789 	 * Most of the complexity below deals with the possibility we hit the
790 	 * end of the queue in the middle of writing the SGL.  For this case
791 	 * only we create the SGL in a temporary buffer and then copy it.
792 	 */
793 	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
794 
795 	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
796 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
797 		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
798 		to->addr[0] = cpu_to_be64(addr[i]);
799 		to->addr[1] = cpu_to_be64(addr[++i]);
800 	}
801 	if (nfrags) {
802 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
803 		to->len[1] = cpu_to_be32(0);
804 		to->addr[0] = cpu_to_be64(addr[i + 1]);
805 	}
806 	if (unlikely((u8 *)end > (u8 *)q->stat)) {
807 		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
808 
809 		if (likely(part0))
810 			memcpy(sgl->sge, buf, part0);
811 		part1 = (u8 *)end - (u8 *)q->stat;
812 		memcpy(q->desc, (u8 *)buf + part0, part1);
813 		end = (void *)q->desc + part1;
814 	}
815 	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
816 		*end = 0;
817 }
818 
819 /* This function copies 64 byte coalesced work request to
820  * memory mapped BAR2 space(user space writes).
821  * For coalesced WR SGE, fetches data from the FIFO instead of from Host.
822  */
823 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
824 {
825 	int count = 8;
826 
827 	while (count) {
828 		writeq(*src, dst);
829 		src++;
830 		dst++;
831 		count--;
832 	}
833 }
834 
835 /**
836  *	ring_tx_db - check and potentially ring a Tx queue's doorbell
837  *	@adap: the adapter
838  *	@q: the Tx queue
839  *	@n: number of new descriptors to give to HW
840  *
841  *	Ring the doorbel for a Tx queue.
842  */
843 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
844 {
845 	unsigned int *wr, index;
846 
847 	wmb();            /* write descriptors before telling HW */
848 	spin_lock(&q->db_lock);
849 	if (!q->db_disabled) {
850 		if (is_t4(adap->params.chip)) {
851 			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
852 				     QID(q->cntxt_id) | PIDX(n));
853 		} else {
854 			if (n == 1) {
855 				index = q->pidx ? (q->pidx - 1) : (q->size - 1);
856 				wr = (unsigned int *)&q->desc[index];
857 				cxgb_pio_copy((u64 __iomem *)
858 					      (adap->bar2 + q->udb + 64),
859 					      (u64 *)wr);
860 			} else
861 				writel(n,  adap->bar2 + q->udb + 8);
862 			wmb();
863 		}
864 	}
865 	q->db_pidx = q->pidx;
866 	spin_unlock(&q->db_lock);
867 }
868 
869 /**
870  *	inline_tx_skb - inline a packet's data into Tx descriptors
871  *	@skb: the packet
872  *	@q: the Tx queue where the packet will be inlined
873  *	@pos: starting position in the Tx queue where to inline the packet
874  *
875  *	Inline a packet's contents directly into Tx descriptors, starting at
876  *	the given position within the Tx DMA ring.
877  *	Most of the complexity of this operation is dealing with wrap arounds
878  *	in the middle of the packet we want to inline.
879  */
880 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
881 			  void *pos)
882 {
883 	u64 *p;
884 	int left = (void *)q->stat - pos;
885 
886 	if (likely(skb->len <= left)) {
887 		if (likely(!skb->data_len))
888 			skb_copy_from_linear_data(skb, pos, skb->len);
889 		else
890 			skb_copy_bits(skb, 0, pos, skb->len);
891 		pos += skb->len;
892 	} else {
893 		skb_copy_bits(skb, 0, pos, left);
894 		skb_copy_bits(skb, left, q->desc, skb->len - left);
895 		pos = (void *)q->desc + (skb->len - left);
896 	}
897 
898 	/* 0-pad to multiple of 16 */
899 	p = PTR_ALIGN(pos, 8);
900 	if ((uintptr_t)p & 8)
901 		*p = 0;
902 }
903 
904 /*
905  * Figure out what HW csum a packet wants and return the appropriate control
906  * bits.
907  */
908 static u64 hwcsum(const struct sk_buff *skb)
909 {
910 	int csum_type;
911 	const struct iphdr *iph = ip_hdr(skb);
912 
913 	if (iph->version == 4) {
914 		if (iph->protocol == IPPROTO_TCP)
915 			csum_type = TX_CSUM_TCPIP;
916 		else if (iph->protocol == IPPROTO_UDP)
917 			csum_type = TX_CSUM_UDPIP;
918 		else {
919 nocsum:			/*
920 			 * unknown protocol, disable HW csum
921 			 * and hope a bad packet is detected
922 			 */
923 			return TXPKT_L4CSUM_DIS;
924 		}
925 	} else {
926 		/*
927 		 * this doesn't work with extension headers
928 		 */
929 		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
930 
931 		if (ip6h->nexthdr == IPPROTO_TCP)
932 			csum_type = TX_CSUM_TCPIP6;
933 		else if (ip6h->nexthdr == IPPROTO_UDP)
934 			csum_type = TX_CSUM_UDPIP6;
935 		else
936 			goto nocsum;
937 	}
938 
939 	if (likely(csum_type >= TX_CSUM_TCPIP))
940 		return TXPKT_CSUM_TYPE(csum_type) |
941 			TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
942 			TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
943 	else {
944 		int start = skb_transport_offset(skb);
945 
946 		return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
947 			TXPKT_CSUM_LOC(start + skb->csum_offset);
948 	}
949 }
950 
951 static void eth_txq_stop(struct sge_eth_txq *q)
952 {
953 	netif_tx_stop_queue(q->txq);
954 	q->q.stops++;
955 }
956 
957 static inline void txq_advance(struct sge_txq *q, unsigned int n)
958 {
959 	q->in_use += n;
960 	q->pidx += n;
961 	if (q->pidx >= q->size)
962 		q->pidx -= q->size;
963 }
964 
965 /**
966  *	t4_eth_xmit - add a packet to an Ethernet Tx queue
967  *	@skb: the packet
968  *	@dev: the egress net device
969  *
970  *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
971  */
972 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
973 {
974 	u32 wr_mid;
975 	u64 cntrl, *end;
976 	int qidx, credits;
977 	unsigned int flits, ndesc;
978 	struct adapter *adap;
979 	struct sge_eth_txq *q;
980 	const struct port_info *pi;
981 	struct fw_eth_tx_pkt_wr *wr;
982 	struct cpl_tx_pkt_core *cpl;
983 	const struct skb_shared_info *ssi;
984 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
985 
986 	/*
987 	 * The chip min packet length is 10 octets but play safe and reject
988 	 * anything shorter than an Ethernet header.
989 	 */
990 	if (unlikely(skb->len < ETH_HLEN)) {
991 out_free:	dev_kfree_skb(skb);
992 		return NETDEV_TX_OK;
993 	}
994 
995 	pi = netdev_priv(dev);
996 	adap = pi->adapter;
997 	qidx = skb_get_queue_mapping(skb);
998 	q = &adap->sge.ethtxq[qidx + pi->first_qset];
999 
1000 	reclaim_completed_tx(adap, &q->q, true);
1001 
1002 	flits = calc_tx_flits(skb);
1003 	ndesc = flits_to_desc(flits);
1004 	credits = txq_avail(&q->q) - ndesc;
1005 
1006 	if (unlikely(credits < 0)) {
1007 		eth_txq_stop(q);
1008 		dev_err(adap->pdev_dev,
1009 			"%s: Tx ring %u full while queue awake!\n",
1010 			dev->name, qidx);
1011 		return NETDEV_TX_BUSY;
1012 	}
1013 
1014 	if (!is_eth_imm(skb) &&
1015 	    unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
1016 		q->mapping_err++;
1017 		goto out_free;
1018 	}
1019 
1020 	wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
1021 	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1022 		eth_txq_stop(q);
1023 		wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
1024 	}
1025 
1026 	wr = (void *)&q->q.desc[q->q.pidx];
1027 	wr->equiq_to_len16 = htonl(wr_mid);
1028 	wr->r3 = cpu_to_be64(0);
1029 	end = (u64 *)wr + flits;
1030 
1031 	ssi = skb_shinfo(skb);
1032 	if (ssi->gso_size) {
1033 		struct cpl_tx_pkt_lso *lso = (void *)wr;
1034 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1035 		int l3hdr_len = skb_network_header_len(skb);
1036 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1037 
1038 		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
1039 				       FW_WR_IMMDLEN(sizeof(*lso)));
1040 		lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
1041 					LSO_FIRST_SLICE | LSO_LAST_SLICE |
1042 					LSO_IPV6(v6) |
1043 					LSO_ETHHDR_LEN(eth_xtra_len / 4) |
1044 					LSO_IPHDR_LEN(l3hdr_len / 4) |
1045 					LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
1046 		lso->c.ipid_ofst = htons(0);
1047 		lso->c.mss = htons(ssi->gso_size);
1048 		lso->c.seqno_offset = htonl(0);
1049 		lso->c.len = htonl(skb->len);
1050 		cpl = (void *)(lso + 1);
1051 		cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1052 			TXPKT_IPHDR_LEN(l3hdr_len) |
1053 			TXPKT_ETHHDR_LEN(eth_xtra_len);
1054 		q->tso++;
1055 		q->tx_cso += ssi->gso_segs;
1056 	} else {
1057 		int len;
1058 
1059 		len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1060 		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
1061 				       FW_WR_IMMDLEN(len));
1062 		cpl = (void *)(wr + 1);
1063 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1064 			cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
1065 			q->tx_cso++;
1066 		} else
1067 			cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
1068 	}
1069 
1070 	if (vlan_tx_tag_present(skb)) {
1071 		q->vlan_ins++;
1072 		cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
1073 	}
1074 
1075 	cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
1076 			   TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
1077 	cpl->pack = htons(0);
1078 	cpl->len = htons(skb->len);
1079 	cpl->ctrl1 = cpu_to_be64(cntrl);
1080 
1081 	if (is_eth_imm(skb)) {
1082 		inline_tx_skb(skb, &q->q, cpl + 1);
1083 		dev_kfree_skb(skb);
1084 	} else {
1085 		int last_desc;
1086 
1087 		write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1088 			  addr);
1089 		skb_orphan(skb);
1090 
1091 		last_desc = q->q.pidx + ndesc - 1;
1092 		if (last_desc >= q->q.size)
1093 			last_desc -= q->q.size;
1094 		q->q.sdesc[last_desc].skb = skb;
1095 		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1096 	}
1097 
1098 	txq_advance(&q->q, ndesc);
1099 
1100 	ring_tx_db(adap, &q->q, ndesc);
1101 	return NETDEV_TX_OK;
1102 }
1103 
1104 /**
1105  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1106  *	@q: the SGE control Tx queue
1107  *
1108  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1109  *	that send only immediate data (presently just the control queues) and
1110  *	thus do not have any sk_buffs to release.
1111  */
1112 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1113 {
1114 	int hw_cidx = ntohs(q->stat->cidx);
1115 	int reclaim = hw_cidx - q->cidx;
1116 
1117 	if (reclaim < 0)
1118 		reclaim += q->size;
1119 
1120 	q->in_use -= reclaim;
1121 	q->cidx = hw_cidx;
1122 }
1123 
1124 /**
1125  *	is_imm - check whether a packet can be sent as immediate data
1126  *	@skb: the packet
1127  *
1128  *	Returns true if a packet can be sent as a WR with immediate data.
1129  */
1130 static inline int is_imm(const struct sk_buff *skb)
1131 {
1132 	return skb->len <= MAX_CTRL_WR_LEN;
1133 }
1134 
1135 /**
1136  *	ctrlq_check_stop - check if a control queue is full and should stop
1137  *	@q: the queue
1138  *	@wr: most recent WR written to the queue
1139  *
1140  *	Check if a control queue has become full and should be stopped.
1141  *	We clean up control queue descriptors very lazily, only when we are out.
1142  *	If the queue is still full after reclaiming any completed descriptors
1143  *	we suspend it and have the last WR wake it up.
1144  */
1145 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1146 {
1147 	reclaim_completed_tx_imm(&q->q);
1148 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1149 		wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1150 		q->q.stops++;
1151 		q->full = 1;
1152 	}
1153 }
1154 
1155 /**
1156  *	ctrl_xmit - send a packet through an SGE control Tx queue
1157  *	@q: the control queue
1158  *	@skb: the packet
1159  *
1160  *	Send a packet through an SGE control Tx queue.  Packets sent through
1161  *	a control queue must fit entirely as immediate data.
1162  */
1163 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1164 {
1165 	unsigned int ndesc;
1166 	struct fw_wr_hdr *wr;
1167 
1168 	if (unlikely(!is_imm(skb))) {
1169 		WARN_ON(1);
1170 		dev_kfree_skb(skb);
1171 		return NET_XMIT_DROP;
1172 	}
1173 
1174 	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1175 	spin_lock(&q->sendq.lock);
1176 
1177 	if (unlikely(q->full)) {
1178 		skb->priority = ndesc;                  /* save for restart */
1179 		__skb_queue_tail(&q->sendq, skb);
1180 		spin_unlock(&q->sendq.lock);
1181 		return NET_XMIT_CN;
1182 	}
1183 
1184 	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1185 	inline_tx_skb(skb, &q->q, wr);
1186 
1187 	txq_advance(&q->q, ndesc);
1188 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1189 		ctrlq_check_stop(q, wr);
1190 
1191 	ring_tx_db(q->adap, &q->q, ndesc);
1192 	spin_unlock(&q->sendq.lock);
1193 
1194 	kfree_skb(skb);
1195 	return NET_XMIT_SUCCESS;
1196 }
1197 
1198 /**
1199  *	restart_ctrlq - restart a suspended control queue
1200  *	@data: the control queue to restart
1201  *
1202  *	Resumes transmission on a suspended Tx control queue.
1203  */
1204 static void restart_ctrlq(unsigned long data)
1205 {
1206 	struct sk_buff *skb;
1207 	unsigned int written = 0;
1208 	struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1209 
1210 	spin_lock(&q->sendq.lock);
1211 	reclaim_completed_tx_imm(&q->q);
1212 	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
1213 
1214 	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1215 		struct fw_wr_hdr *wr;
1216 		unsigned int ndesc = skb->priority;     /* previously saved */
1217 
1218 		/*
1219 		 * Write descriptors and free skbs outside the lock to limit
1220 		 * wait times.  q->full is still set so new skbs will be queued.
1221 		 */
1222 		spin_unlock(&q->sendq.lock);
1223 
1224 		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1225 		inline_tx_skb(skb, &q->q, wr);
1226 		kfree_skb(skb);
1227 
1228 		written += ndesc;
1229 		txq_advance(&q->q, ndesc);
1230 		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1231 			unsigned long old = q->q.stops;
1232 
1233 			ctrlq_check_stop(q, wr);
1234 			if (q->q.stops != old) {          /* suspended anew */
1235 				spin_lock(&q->sendq.lock);
1236 				goto ringdb;
1237 			}
1238 		}
1239 		if (written > 16) {
1240 			ring_tx_db(q->adap, &q->q, written);
1241 			written = 0;
1242 		}
1243 		spin_lock(&q->sendq.lock);
1244 	}
1245 	q->full = 0;
1246 ringdb: if (written)
1247 		ring_tx_db(q->adap, &q->q, written);
1248 	spin_unlock(&q->sendq.lock);
1249 }
1250 
1251 /**
1252  *	t4_mgmt_tx - send a management message
1253  *	@adap: the adapter
1254  *	@skb: the packet containing the management message
1255  *
1256  *	Send a management message through control queue 0.
1257  */
1258 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1259 {
1260 	int ret;
1261 
1262 	local_bh_disable();
1263 	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1264 	local_bh_enable();
1265 	return ret;
1266 }
1267 
1268 /**
1269  *	is_ofld_imm - check whether a packet can be sent as immediate data
1270  *	@skb: the packet
1271  *
1272  *	Returns true if a packet can be sent as an offload WR with immediate
1273  *	data.  We currently use the same limit as for Ethernet packets.
1274  */
1275 static inline int is_ofld_imm(const struct sk_buff *skb)
1276 {
1277 	return skb->len <= MAX_IMM_TX_PKT_LEN;
1278 }
1279 
1280 /**
1281  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
1282  *	@skb: the packet
1283  *
1284  *	Returns the number of flits needed for the given offload packet.
1285  *	These packets are already fully constructed and no additional headers
1286  *	will be added.
1287  */
1288 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1289 {
1290 	unsigned int flits, cnt;
1291 
1292 	if (is_ofld_imm(skb))
1293 		return DIV_ROUND_UP(skb->len, 8);
1294 
1295 	flits = skb_transport_offset(skb) / 8U;   /* headers */
1296 	cnt = skb_shinfo(skb)->nr_frags;
1297 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
1298 		cnt++;
1299 	return flits + sgl_len(cnt);
1300 }
1301 
1302 /**
1303  *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1304  *	@adap: the adapter
1305  *	@q: the queue to stop
1306  *
1307  *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1308  *	inability to map packets.  A periodic timer attempts to restart
1309  *	queues so marked.
1310  */
1311 static void txq_stop_maperr(struct sge_ofld_txq *q)
1312 {
1313 	q->mapping_err++;
1314 	q->q.stops++;
1315 	set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1316 		q->adap->sge.txq_maperr);
1317 }
1318 
1319 /**
1320  *	ofldtxq_stop - stop an offload Tx queue that has become full
1321  *	@q: the queue to stop
1322  *	@skb: the packet causing the queue to become full
1323  *
1324  *	Stops an offload Tx queue that has become full and modifies the packet
1325  *	being written to request a wakeup.
1326  */
1327 static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1328 {
1329 	struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1330 
1331 	wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1332 	q->q.stops++;
1333 	q->full = 1;
1334 }
1335 
1336 /**
1337  *	service_ofldq - restart a suspended offload queue
1338  *	@q: the offload queue
1339  *
1340  *	Services an offload Tx queue by moving packets from its packet queue
1341  *	to the HW Tx ring.  The function starts and ends with the queue locked.
1342  */
1343 static void service_ofldq(struct sge_ofld_txq *q)
1344 {
1345 	u64 *pos;
1346 	int credits;
1347 	struct sk_buff *skb;
1348 	unsigned int written = 0;
1349 	unsigned int flits, ndesc;
1350 
1351 	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1352 		/*
1353 		 * We drop the lock but leave skb on sendq, thus retaining
1354 		 * exclusive access to the state of the queue.
1355 		 */
1356 		spin_unlock(&q->sendq.lock);
1357 
1358 		reclaim_completed_tx(q->adap, &q->q, false);
1359 
1360 		flits = skb->priority;                /* previously saved */
1361 		ndesc = flits_to_desc(flits);
1362 		credits = txq_avail(&q->q) - ndesc;
1363 		BUG_ON(credits < 0);
1364 		if (unlikely(credits < TXQ_STOP_THRES))
1365 			ofldtxq_stop(q, skb);
1366 
1367 		pos = (u64 *)&q->q.desc[q->q.pidx];
1368 		if (is_ofld_imm(skb))
1369 			inline_tx_skb(skb, &q->q, pos);
1370 		else if (map_skb(q->adap->pdev_dev, skb,
1371 				 (dma_addr_t *)skb->head)) {
1372 			txq_stop_maperr(q);
1373 			spin_lock(&q->sendq.lock);
1374 			break;
1375 		} else {
1376 			int last_desc, hdr_len = skb_transport_offset(skb);
1377 
1378 			memcpy(pos, skb->data, hdr_len);
1379 			write_sgl(skb, &q->q, (void *)pos + hdr_len,
1380 				  pos + flits, hdr_len,
1381 				  (dma_addr_t *)skb->head);
1382 #ifdef CONFIG_NEED_DMA_MAP_STATE
1383 			skb->dev = q->adap->port[0];
1384 			skb->destructor = deferred_unmap_destructor;
1385 #endif
1386 			last_desc = q->q.pidx + ndesc - 1;
1387 			if (last_desc >= q->q.size)
1388 				last_desc -= q->q.size;
1389 			q->q.sdesc[last_desc].skb = skb;
1390 		}
1391 
1392 		txq_advance(&q->q, ndesc);
1393 		written += ndesc;
1394 		if (unlikely(written > 32)) {
1395 			ring_tx_db(q->adap, &q->q, written);
1396 			written = 0;
1397 		}
1398 
1399 		spin_lock(&q->sendq.lock);
1400 		__skb_unlink(skb, &q->sendq);
1401 		if (is_ofld_imm(skb))
1402 			kfree_skb(skb);
1403 	}
1404 	if (likely(written))
1405 		ring_tx_db(q->adap, &q->q, written);
1406 }
1407 
1408 /**
1409  *	ofld_xmit - send a packet through an offload queue
1410  *	@q: the Tx offload queue
1411  *	@skb: the packet
1412  *
1413  *	Send an offload packet through an SGE offload queue.
1414  */
1415 static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1416 {
1417 	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
1418 	spin_lock(&q->sendq.lock);
1419 	__skb_queue_tail(&q->sendq, skb);
1420 	if (q->sendq.qlen == 1)
1421 		service_ofldq(q);
1422 	spin_unlock(&q->sendq.lock);
1423 	return NET_XMIT_SUCCESS;
1424 }
1425 
1426 /**
1427  *	restart_ofldq - restart a suspended offload queue
1428  *	@data: the offload queue to restart
1429  *
1430  *	Resumes transmission on a suspended Tx offload queue.
1431  */
1432 static void restart_ofldq(unsigned long data)
1433 {
1434 	struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1435 
1436 	spin_lock(&q->sendq.lock);
1437 	q->full = 0;            /* the queue actually is completely empty now */
1438 	service_ofldq(q);
1439 	spin_unlock(&q->sendq.lock);
1440 }
1441 
1442 /**
1443  *	skb_txq - return the Tx queue an offload packet should use
1444  *	@skb: the packet
1445  *
1446  *	Returns the Tx queue an offload packet should use as indicated by bits
1447  *	1-15 in the packet's queue_mapping.
1448  */
1449 static inline unsigned int skb_txq(const struct sk_buff *skb)
1450 {
1451 	return skb->queue_mapping >> 1;
1452 }
1453 
1454 /**
1455  *	is_ctrl_pkt - return whether an offload packet is a control packet
1456  *	@skb: the packet
1457  *
1458  *	Returns whether an offload packet should use an OFLD or a CTRL
1459  *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
1460  */
1461 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1462 {
1463 	return skb->queue_mapping & 1;
1464 }
1465 
1466 static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1467 {
1468 	unsigned int idx = skb_txq(skb);
1469 
1470 	if (unlikely(is_ctrl_pkt(skb)))
1471 		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1472 	return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1473 }
1474 
1475 /**
1476  *	t4_ofld_send - send an offload packet
1477  *	@adap: the adapter
1478  *	@skb: the packet
1479  *
1480  *	Sends an offload packet.  We use the packet queue_mapping to select the
1481  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1482  *	should be sent as regular or control, bits 1-15 select the queue.
1483  */
1484 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1485 {
1486 	int ret;
1487 
1488 	local_bh_disable();
1489 	ret = ofld_send(adap, skb);
1490 	local_bh_enable();
1491 	return ret;
1492 }
1493 
1494 /**
1495  *	cxgb4_ofld_send - send an offload packet
1496  *	@dev: the net device
1497  *	@skb: the packet
1498  *
1499  *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
1500  *	intended for ULDs.
1501  */
1502 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1503 {
1504 	return t4_ofld_send(netdev2adap(dev), skb);
1505 }
1506 EXPORT_SYMBOL(cxgb4_ofld_send);
1507 
1508 static inline void copy_frags(struct sk_buff *skb,
1509 			      const struct pkt_gl *gl, unsigned int offset)
1510 {
1511 	int i;
1512 
1513 	/* usually there's just one frag */
1514 	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
1515 			     gl->frags[0].offset + offset,
1516 			     gl->frags[0].size - offset);
1517 	skb_shinfo(skb)->nr_frags = gl->nfrags;
1518 	for (i = 1; i < gl->nfrags; i++)
1519 		__skb_fill_page_desc(skb, i, gl->frags[i].page,
1520 				     gl->frags[i].offset,
1521 				     gl->frags[i].size);
1522 
1523 	/* get a reference to the last page, we don't own it */
1524 	get_page(gl->frags[gl->nfrags - 1].page);
1525 }
1526 
1527 /**
1528  *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1529  *	@gl: the gather list
1530  *	@skb_len: size of sk_buff main body if it carries fragments
1531  *	@pull_len: amount of data to move to the sk_buff's main body
1532  *
1533  *	Builds an sk_buff from the given packet gather list.  Returns the
1534  *	sk_buff or %NULL if sk_buff allocation failed.
1535  */
1536 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1537 				   unsigned int skb_len, unsigned int pull_len)
1538 {
1539 	struct sk_buff *skb;
1540 
1541 	/*
1542 	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1543 	 * size, which is expected since buffers are at least PAGE_SIZEd.
1544 	 * In this case packets up to RX_COPY_THRES have only one fragment.
1545 	 */
1546 	if (gl->tot_len <= RX_COPY_THRES) {
1547 		skb = dev_alloc_skb(gl->tot_len);
1548 		if (unlikely(!skb))
1549 			goto out;
1550 		__skb_put(skb, gl->tot_len);
1551 		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1552 	} else {
1553 		skb = dev_alloc_skb(skb_len);
1554 		if (unlikely(!skb))
1555 			goto out;
1556 		__skb_put(skb, pull_len);
1557 		skb_copy_to_linear_data(skb, gl->va, pull_len);
1558 
1559 		copy_frags(skb, gl, pull_len);
1560 		skb->len = gl->tot_len;
1561 		skb->data_len = skb->len - pull_len;
1562 		skb->truesize += skb->data_len;
1563 	}
1564 out:	return skb;
1565 }
1566 EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1567 
1568 /**
1569  *	t4_pktgl_free - free a packet gather list
1570  *	@gl: the gather list
1571  *
1572  *	Releases the pages of a packet gather list.  We do not own the last
1573  *	page on the list and do not free it.
1574  */
1575 static void t4_pktgl_free(const struct pkt_gl *gl)
1576 {
1577 	int n;
1578 	const struct page_frag *p;
1579 
1580 	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1581 		put_page(p->page);
1582 }
1583 
1584 /*
1585  * Process an MPS trace packet.  Give it an unused protocol number so it won't
1586  * be delivered to anyone and send it to the stack for capture.
1587  */
1588 static noinline int handle_trace_pkt(struct adapter *adap,
1589 				     const struct pkt_gl *gl)
1590 {
1591 	struct sk_buff *skb;
1592 
1593 	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1594 	if (unlikely(!skb)) {
1595 		t4_pktgl_free(gl);
1596 		return 0;
1597 	}
1598 
1599 	if (is_t4(adap->params.chip))
1600 		__skb_pull(skb, sizeof(struct cpl_trace_pkt));
1601 	else
1602 		__skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
1603 
1604 	skb_reset_mac_header(skb);
1605 	skb->protocol = htons(0xffff);
1606 	skb->dev = adap->port[0];
1607 	netif_receive_skb(skb);
1608 	return 0;
1609 }
1610 
1611 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1612 		   const struct cpl_rx_pkt *pkt)
1613 {
1614 	struct adapter *adapter = rxq->rspq.adap;
1615 	struct sge *s = &adapter->sge;
1616 	int ret;
1617 	struct sk_buff *skb;
1618 
1619 	skb = napi_get_frags(&rxq->rspq.napi);
1620 	if (unlikely(!skb)) {
1621 		t4_pktgl_free(gl);
1622 		rxq->stats.rx_drops++;
1623 		return;
1624 	}
1625 
1626 	copy_frags(skb, gl, s->pktshift);
1627 	skb->len = gl->tot_len - s->pktshift;
1628 	skb->data_len = skb->len;
1629 	skb->truesize += skb->data_len;
1630 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1631 	skb_record_rx_queue(skb, rxq->rspq.idx);
1632 	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1633 		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1634 
1635 	if (unlikely(pkt->vlan_ex)) {
1636 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1637 		rxq->stats.vlan_ex++;
1638 	}
1639 	ret = napi_gro_frags(&rxq->rspq.napi);
1640 	if (ret == GRO_HELD)
1641 		rxq->stats.lro_pkts++;
1642 	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1643 		rxq->stats.lro_merged++;
1644 	rxq->stats.pkts++;
1645 	rxq->stats.rx_cso++;
1646 }
1647 
1648 /**
1649  *	t4_ethrx_handler - process an ingress ethernet packet
1650  *	@q: the response queue that received the packet
1651  *	@rsp: the response queue descriptor holding the RX_PKT message
1652  *	@si: the gather list of packet fragments
1653  *
1654  *	Process an ingress ethernet packet and deliver it to the stack.
1655  */
1656 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1657 		     const struct pkt_gl *si)
1658 {
1659 	bool csum_ok;
1660 	struct sk_buff *skb;
1661 	const struct cpl_rx_pkt *pkt;
1662 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1663 	struct sge *s = &q->adap->sge;
1664 	int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
1665 			    CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
1666 
1667 	if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
1668 		return handle_trace_pkt(q->adap, si);
1669 
1670 	pkt = (const struct cpl_rx_pkt *)rsp;
1671 	csum_ok = pkt->csum_calc && !pkt->err_vec;
1672 	if ((pkt->l2info & htonl(RXF_TCP)) &&
1673 	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1674 		do_gro(rxq, si, pkt);
1675 		return 0;
1676 	}
1677 
1678 	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
1679 	if (unlikely(!skb)) {
1680 		t4_pktgl_free(si);
1681 		rxq->stats.rx_drops++;
1682 		return 0;
1683 	}
1684 
1685 	__skb_pull(skb, s->pktshift);      /* remove ethernet header padding */
1686 	skb->protocol = eth_type_trans(skb, q->netdev);
1687 	skb_record_rx_queue(skb, q->idx);
1688 	if (skb->dev->features & NETIF_F_RXHASH)
1689 		skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1690 
1691 	rxq->stats.pkts++;
1692 
1693 	if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
1694 	    (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1695 		if (!pkt->ip_frag) {
1696 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1697 			rxq->stats.rx_cso++;
1698 		} else if (pkt->l2info & htonl(RXF_IP)) {
1699 			__sum16 c = (__force __sum16)pkt->csum;
1700 			skb->csum = csum_unfold(c);
1701 			skb->ip_summed = CHECKSUM_COMPLETE;
1702 			rxq->stats.rx_cso++;
1703 		}
1704 	} else
1705 		skb_checksum_none_assert(skb);
1706 
1707 	if (unlikely(pkt->vlan_ex)) {
1708 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1709 		rxq->stats.vlan_ex++;
1710 	}
1711 	netif_receive_skb(skb);
1712 	return 0;
1713 }
1714 
1715 /**
1716  *	restore_rx_bufs - put back a packet's Rx buffers
1717  *	@si: the packet gather list
1718  *	@q: the SGE free list
1719  *	@frags: number of FL buffers to restore
1720  *
1721  *	Puts back on an FL the Rx buffers associated with @si.  The buffers
1722  *	have already been unmapped and are left unmapped, we mark them so to
1723  *	prevent further unmapping attempts.
1724  *
1725  *	This function undoes a series of @unmap_rx_buf calls when we find out
1726  *	that the current packet can't be processed right away afterall and we
1727  *	need to come back to it later.  This is a very rare event and there's
1728  *	no effort to make this particularly efficient.
1729  */
1730 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1731 			    int frags)
1732 {
1733 	struct rx_sw_desc *d;
1734 
1735 	while (frags--) {
1736 		if (q->cidx == 0)
1737 			q->cidx = q->size - 1;
1738 		else
1739 			q->cidx--;
1740 		d = &q->sdesc[q->cidx];
1741 		d->page = si->frags[frags].page;
1742 		d->dma_addr |= RX_UNMAPPED_BUF;
1743 		q->avail++;
1744 	}
1745 }
1746 
1747 /**
1748  *	is_new_response - check if a response is newly written
1749  *	@r: the response descriptor
1750  *	@q: the response queue
1751  *
1752  *	Returns true if a response descriptor contains a yet unprocessed
1753  *	response.
1754  */
1755 static inline bool is_new_response(const struct rsp_ctrl *r,
1756 				   const struct sge_rspq *q)
1757 {
1758 	return RSPD_GEN(r->type_gen) == q->gen;
1759 }
1760 
1761 /**
1762  *	rspq_next - advance to the next entry in a response queue
1763  *	@q: the queue
1764  *
1765  *	Updates the state of a response queue to advance it to the next entry.
1766  */
1767 static inline void rspq_next(struct sge_rspq *q)
1768 {
1769 	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1770 	if (unlikely(++q->cidx == q->size)) {
1771 		q->cidx = 0;
1772 		q->gen ^= 1;
1773 		q->cur_desc = q->desc;
1774 	}
1775 }
1776 
1777 /**
1778  *	process_responses - process responses from an SGE response queue
1779  *	@q: the ingress queue to process
1780  *	@budget: how many responses can be processed in this round
1781  *
1782  *	Process responses from an SGE response queue up to the supplied budget.
1783  *	Responses include received packets as well as control messages from FW
1784  *	or HW.
1785  *
1786  *	Additionally choose the interrupt holdoff time for the next interrupt
1787  *	on this queue.  If the system is under memory shortage use a fairly
1788  *	long delay to help recovery.
1789  */
1790 static int process_responses(struct sge_rspq *q, int budget)
1791 {
1792 	int ret, rsp_type;
1793 	int budget_left = budget;
1794 	const struct rsp_ctrl *rc;
1795 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1796 	struct adapter *adapter = q->adap;
1797 	struct sge *s = &adapter->sge;
1798 
1799 	while (likely(budget_left)) {
1800 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1801 		if (!is_new_response(rc, q))
1802 			break;
1803 
1804 		rmb();
1805 		rsp_type = RSPD_TYPE(rc->type_gen);
1806 		if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1807 			struct page_frag *fp;
1808 			struct pkt_gl si;
1809 			const struct rx_sw_desc *rsd;
1810 			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
1811 
1812 			if (len & RSPD_NEWBUF) {
1813 				if (likely(q->offset > 0)) {
1814 					free_rx_bufs(q->adap, &rxq->fl, 1);
1815 					q->offset = 0;
1816 				}
1817 				len = RSPD_LEN(len);
1818 			}
1819 			si.tot_len = len;
1820 
1821 			/* gather packet fragments */
1822 			for (frags = 0, fp = si.frags; ; frags++, fp++) {
1823 				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1824 				bufsz = get_buf_size(adapter, rsd);
1825 				fp->page = rsd->page;
1826 				fp->offset = q->offset;
1827 				fp->size = min(bufsz, len);
1828 				len -= fp->size;
1829 				if (!len)
1830 					break;
1831 				unmap_rx_buf(q->adap, &rxq->fl);
1832 			}
1833 
1834 			/*
1835 			 * Last buffer remains mapped so explicitly make it
1836 			 * coherent for CPU access.
1837 			 */
1838 			dma_sync_single_for_cpu(q->adap->pdev_dev,
1839 						get_buf_addr(rsd),
1840 						fp->size, DMA_FROM_DEVICE);
1841 
1842 			si.va = page_address(si.frags[0].page) +
1843 				si.frags[0].offset;
1844 			prefetch(si.va);
1845 
1846 			si.nfrags = frags + 1;
1847 			ret = q->handler(q, q->cur_desc, &si);
1848 			if (likely(ret == 0))
1849 				q->offset += ALIGN(fp->size, s->fl_align);
1850 			else
1851 				restore_rx_bufs(&si, &rxq->fl, frags);
1852 		} else if (likely(rsp_type == RSP_TYPE_CPL)) {
1853 			ret = q->handler(q, q->cur_desc, NULL);
1854 		} else {
1855 			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1856 		}
1857 
1858 		if (unlikely(ret)) {
1859 			/* couldn't process descriptor, back off for recovery */
1860 			q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1861 			break;
1862 		}
1863 
1864 		rspq_next(q);
1865 		budget_left--;
1866 	}
1867 
1868 	if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
1869 		__refill_fl(q->adap, &rxq->fl);
1870 	return budget - budget_left;
1871 }
1872 
1873 /**
1874  *	napi_rx_handler - the NAPI handler for Rx processing
1875  *	@napi: the napi instance
1876  *	@budget: how many packets we can process in this round
1877  *
1878  *	Handler for new data events when using NAPI.  This does not need any
1879  *	locking or protection from interrupts as data interrupts are off at
1880  *	this point and other adapter interrupts do not interfere (the latter
1881  *	in not a concern at all with MSI-X as non-data interrupts then have
1882  *	a separate handler).
1883  */
1884 static int napi_rx_handler(struct napi_struct *napi, int budget)
1885 {
1886 	unsigned int params;
1887 	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1888 	int work_done = process_responses(q, budget);
1889 
1890 	if (likely(work_done < budget)) {
1891 		napi_complete(napi);
1892 		params = q->next_intr_params;
1893 		q->next_intr_params = q->intr_params;
1894 	} else
1895 		params = QINTR_TIMER_IDX(7);
1896 
1897 	t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
1898 		     INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
1899 	return work_done;
1900 }
1901 
1902 /*
1903  * The MSI-X interrupt handler for an SGE response queue.
1904  */
1905 irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
1906 {
1907 	struct sge_rspq *q = cookie;
1908 
1909 	napi_schedule(&q->napi);
1910 	return IRQ_HANDLED;
1911 }
1912 
1913 /*
1914  * Process the indirect interrupt entries in the interrupt queue and kick off
1915  * NAPI for each queue that has generated an entry.
1916  */
1917 static unsigned int process_intrq(struct adapter *adap)
1918 {
1919 	unsigned int credits;
1920 	const struct rsp_ctrl *rc;
1921 	struct sge_rspq *q = &adap->sge.intrq;
1922 
1923 	spin_lock(&adap->sge.intrq_lock);
1924 	for (credits = 0; ; credits++) {
1925 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1926 		if (!is_new_response(rc, q))
1927 			break;
1928 
1929 		rmb();
1930 		if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
1931 			unsigned int qid = ntohl(rc->pldbuflen_qid);
1932 
1933 			qid -= adap->sge.ingr_start;
1934 			napi_schedule(&adap->sge.ingr_map[qid]->napi);
1935 		}
1936 
1937 		rspq_next(q);
1938 	}
1939 
1940 	t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
1941 		     INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
1942 	spin_unlock(&adap->sge.intrq_lock);
1943 	return credits;
1944 }
1945 
1946 /*
1947  * The MSI interrupt handler, which handles data events from SGE response queues
1948  * as well as error and other async events as they all use the same MSI vector.
1949  */
1950 static irqreturn_t t4_intr_msi(int irq, void *cookie)
1951 {
1952 	struct adapter *adap = cookie;
1953 
1954 	t4_slow_intr_handler(adap);
1955 	process_intrq(adap);
1956 	return IRQ_HANDLED;
1957 }
1958 
1959 /*
1960  * Interrupt handler for legacy INTx interrupts.
1961  * Handles data events from SGE response queues as well as error and other
1962  * async events as they all use the same interrupt line.
1963  */
1964 static irqreturn_t t4_intr_intx(int irq, void *cookie)
1965 {
1966 	struct adapter *adap = cookie;
1967 
1968 	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
1969 	if (t4_slow_intr_handler(adap) | process_intrq(adap))
1970 		return IRQ_HANDLED;
1971 	return IRQ_NONE;             /* probably shared interrupt */
1972 }
1973 
1974 /**
1975  *	t4_intr_handler - select the top-level interrupt handler
1976  *	@adap: the adapter
1977  *
1978  *	Selects the top-level interrupt handler based on the type of interrupts
1979  *	(MSI-X, MSI, or INTx).
1980  */
1981 irq_handler_t t4_intr_handler(struct adapter *adap)
1982 {
1983 	if (adap->flags & USING_MSIX)
1984 		return t4_sge_intr_msix;
1985 	if (adap->flags & USING_MSI)
1986 		return t4_intr_msi;
1987 	return t4_intr_intx;
1988 }
1989 
1990 static void sge_rx_timer_cb(unsigned long data)
1991 {
1992 	unsigned long m;
1993 	unsigned int i, cnt[2];
1994 	struct adapter *adap = (struct adapter *)data;
1995 	struct sge *s = &adap->sge;
1996 
1997 	for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
1998 		for (m = s->starving_fl[i]; m; m &= m - 1) {
1999 			struct sge_eth_rxq *rxq;
2000 			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2001 			struct sge_fl *fl = s->egr_map[id];
2002 
2003 			clear_bit(id, s->starving_fl);
2004 			smp_mb__after_clear_bit();
2005 
2006 			if (fl_starving(fl)) {
2007 				rxq = container_of(fl, struct sge_eth_rxq, fl);
2008 				if (napi_reschedule(&rxq->rspq.napi))
2009 					fl->starving++;
2010 				else
2011 					set_bit(id, s->starving_fl);
2012 			}
2013 		}
2014 
2015 	t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
2016 	cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
2017 	cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
2018 
2019 	for (i = 0; i < 2; i++)
2020 		if (cnt[i] >= s->starve_thres) {
2021 			if (s->idma_state[i] || cnt[i] == 0xffffffff)
2022 				continue;
2023 			s->idma_state[i] = 1;
2024 			t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
2025 			m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
2026 			dev_warn(adap->pdev_dev,
2027 				 "SGE idma%u starvation detected for "
2028 				 "queue %lu\n", i, m & 0xffff);
2029 		} else if (s->idma_state[i])
2030 			s->idma_state[i] = 0;
2031 
2032 	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2033 }
2034 
2035 static void sge_tx_timer_cb(unsigned long data)
2036 {
2037 	unsigned long m;
2038 	unsigned int i, budget;
2039 	struct adapter *adap = (struct adapter *)data;
2040 	struct sge *s = &adap->sge;
2041 
2042 	for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
2043 		for (m = s->txq_maperr[i]; m; m &= m - 1) {
2044 			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2045 			struct sge_ofld_txq *txq = s->egr_map[id];
2046 
2047 			clear_bit(id, s->txq_maperr);
2048 			tasklet_schedule(&txq->qresume_tsk);
2049 		}
2050 
2051 	budget = MAX_TIMER_TX_RECLAIM;
2052 	i = s->ethtxq_rover;
2053 	do {
2054 		struct sge_eth_txq *q = &s->ethtxq[i];
2055 
2056 		if (q->q.in_use &&
2057 		    time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
2058 		    __netif_tx_trylock(q->txq)) {
2059 			int avail = reclaimable(&q->q);
2060 
2061 			if (avail) {
2062 				if (avail > budget)
2063 					avail = budget;
2064 
2065 				free_tx_desc(adap, &q->q, avail, true);
2066 				q->q.in_use -= avail;
2067 				budget -= avail;
2068 			}
2069 			__netif_tx_unlock(q->txq);
2070 		}
2071 
2072 		if (++i >= s->ethqsets)
2073 			i = 0;
2074 	} while (budget && i != s->ethtxq_rover);
2075 	s->ethtxq_rover = i;
2076 	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2077 }
2078 
2079 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2080 		     struct net_device *dev, int intr_idx,
2081 		     struct sge_fl *fl, rspq_handler_t hnd)
2082 {
2083 	int ret, flsz = 0;
2084 	struct fw_iq_cmd c;
2085 	struct sge *s = &adap->sge;
2086 	struct port_info *pi = netdev_priv(dev);
2087 
2088 	/* Size needs to be multiple of 16, including status entry. */
2089 	iq->size = roundup(iq->size, 16);
2090 
2091 	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
2092 			      &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
2093 	if (!iq->desc)
2094 		return -ENOMEM;
2095 
2096 	memset(&c, 0, sizeof(c));
2097 	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2098 			    FW_CMD_WRITE | FW_CMD_EXEC |
2099 			    FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
2100 	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
2101 				 FW_LEN16(c));
2102 	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
2103 		FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
2104 		FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
2105 		FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
2106 							-intr_idx - 1));
2107 	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
2108 		FW_IQ_CMD_IQGTSMODE |
2109 		FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
2110 		FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
2111 	c.iqsize = htons(iq->size);
2112 	c.iqaddr = cpu_to_be64(iq->phys_addr);
2113 
2114 	if (fl) {
2115 		fl->size = roundup(fl->size, 8);
2116 		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2117 				      sizeof(struct rx_sw_desc), &fl->addr,
2118 				      &fl->sdesc, s->stat_len, NUMA_NO_NODE);
2119 		if (!fl->desc)
2120 			goto fl_nomem;
2121 
2122 		flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2123 		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN(1) |
2124 					    FW_IQ_CMD_FL0FETCHRO(1) |
2125 					    FW_IQ_CMD_FL0DATARO(1) |
2126 					    FW_IQ_CMD_FL0PADEN(1));
2127 		c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
2128 				FW_IQ_CMD_FL0FBMAX(3));
2129 		c.fl0size = htons(flsz);
2130 		c.fl0addr = cpu_to_be64(fl->addr);
2131 	}
2132 
2133 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2134 	if (ret)
2135 		goto err;
2136 
2137 	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2138 	iq->cur_desc = iq->desc;
2139 	iq->cidx = 0;
2140 	iq->gen = 1;
2141 	iq->next_intr_params = iq->intr_params;
2142 	iq->cntxt_id = ntohs(c.iqid);
2143 	iq->abs_id = ntohs(c.physiqid);
2144 	iq->size--;                           /* subtract status entry */
2145 	iq->adap = adap;
2146 	iq->netdev = dev;
2147 	iq->handler = hnd;
2148 
2149 	/* set offset to -1 to distinguish ingress queues without FL */
2150 	iq->offset = fl ? 0 : -1;
2151 
2152 	adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2153 
2154 	if (fl) {
2155 		fl->cntxt_id = ntohs(c.fl0id);
2156 		fl->avail = fl->pend_cred = 0;
2157 		fl->pidx = fl->cidx = 0;
2158 		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2159 		adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2160 		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2161 	}
2162 	return 0;
2163 
2164 fl_nomem:
2165 	ret = -ENOMEM;
2166 err:
2167 	if (iq->desc) {
2168 		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2169 				  iq->desc, iq->phys_addr);
2170 		iq->desc = NULL;
2171 	}
2172 	if (fl && fl->desc) {
2173 		kfree(fl->sdesc);
2174 		fl->sdesc = NULL;
2175 		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2176 				  fl->desc, fl->addr);
2177 		fl->desc = NULL;
2178 	}
2179 	return ret;
2180 }
2181 
2182 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2183 {
2184 	q->cntxt_id = id;
2185 	if (!is_t4(adap->params.chip)) {
2186 		unsigned int s_qpp;
2187 		unsigned short udb_density;
2188 		unsigned long qpshift;
2189 		int page;
2190 
2191 		s_qpp = QUEUESPERPAGEPF1 * adap->fn;
2192 		udb_density = 1 << QUEUESPERPAGEPF0_GET((t4_read_reg(adap,
2193 				SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp));
2194 		qpshift = PAGE_SHIFT - ilog2(udb_density);
2195 		q->udb = q->cntxt_id << qpshift;
2196 		q->udb &= PAGE_MASK;
2197 		page = q->udb / PAGE_SIZE;
2198 		q->udb += (q->cntxt_id - (page * udb_density)) * 128;
2199 	}
2200 
2201 	q->in_use = 0;
2202 	q->cidx = q->pidx = 0;
2203 	q->stops = q->restarts = 0;
2204 	q->stat = (void *)&q->desc[q->size];
2205 	spin_lock_init(&q->db_lock);
2206 	adap->sge.egr_map[id - adap->sge.egr_start] = q;
2207 }
2208 
2209 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2210 			 struct net_device *dev, struct netdev_queue *netdevq,
2211 			 unsigned int iqid)
2212 {
2213 	int ret, nentries;
2214 	struct fw_eq_eth_cmd c;
2215 	struct sge *s = &adap->sge;
2216 	struct port_info *pi = netdev_priv(dev);
2217 
2218 	/* Add status entries */
2219 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2220 
2221 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2222 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2223 			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2224 			netdev_queue_numa_node_read(netdevq));
2225 	if (!txq->q.desc)
2226 		return -ENOMEM;
2227 
2228 	memset(&c, 0, sizeof(c));
2229 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2230 			    FW_CMD_WRITE | FW_CMD_EXEC |
2231 			    FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
2232 	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
2233 				 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
2234 	c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
2235 	c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
2236 				   FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
2237 				   FW_EQ_ETH_CMD_FETCHRO(1) |
2238 				   FW_EQ_ETH_CMD_IQID(iqid));
2239 	c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
2240 				  FW_EQ_ETH_CMD_FBMAX(3) |
2241 				  FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
2242 				  FW_EQ_ETH_CMD_EQSIZE(nentries));
2243 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2244 
2245 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2246 	if (ret) {
2247 		kfree(txq->q.sdesc);
2248 		txq->q.sdesc = NULL;
2249 		dma_free_coherent(adap->pdev_dev,
2250 				  nentries * sizeof(struct tx_desc),
2251 				  txq->q.desc, txq->q.phys_addr);
2252 		txq->q.desc = NULL;
2253 		return ret;
2254 	}
2255 
2256 	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2257 	txq->txq = netdevq;
2258 	txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2259 	txq->mapping_err = 0;
2260 	return 0;
2261 }
2262 
2263 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2264 			  struct net_device *dev, unsigned int iqid,
2265 			  unsigned int cmplqid)
2266 {
2267 	int ret, nentries;
2268 	struct fw_eq_ctrl_cmd c;
2269 	struct sge *s = &adap->sge;
2270 	struct port_info *pi = netdev_priv(dev);
2271 
2272 	/* Add status entries */
2273 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2274 
2275 	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2276 				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2277 				 NULL, 0, NUMA_NO_NODE);
2278 	if (!txq->q.desc)
2279 		return -ENOMEM;
2280 
2281 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2282 			    FW_CMD_WRITE | FW_CMD_EXEC |
2283 			    FW_EQ_CTRL_CMD_PFN(adap->fn) |
2284 			    FW_EQ_CTRL_CMD_VFN(0));
2285 	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
2286 				 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
2287 	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
2288 	c.physeqid_pkd = htonl(0);
2289 	c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
2290 				   FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
2291 				   FW_EQ_CTRL_CMD_FETCHRO |
2292 				   FW_EQ_CTRL_CMD_IQID(iqid));
2293 	c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
2294 				  FW_EQ_CTRL_CMD_FBMAX(3) |
2295 				  FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
2296 				  FW_EQ_CTRL_CMD_EQSIZE(nentries));
2297 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2298 
2299 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2300 	if (ret) {
2301 		dma_free_coherent(adap->pdev_dev,
2302 				  nentries * sizeof(struct tx_desc),
2303 				  txq->q.desc, txq->q.phys_addr);
2304 		txq->q.desc = NULL;
2305 		return ret;
2306 	}
2307 
2308 	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
2309 	txq->adap = adap;
2310 	skb_queue_head_init(&txq->sendq);
2311 	tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2312 	txq->full = 0;
2313 	return 0;
2314 }
2315 
2316 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2317 			  struct net_device *dev, unsigned int iqid)
2318 {
2319 	int ret, nentries;
2320 	struct fw_eq_ofld_cmd c;
2321 	struct sge *s = &adap->sge;
2322 	struct port_info *pi = netdev_priv(dev);
2323 
2324 	/* Add status entries */
2325 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2326 
2327 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2328 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2329 			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2330 			NUMA_NO_NODE);
2331 	if (!txq->q.desc)
2332 		return -ENOMEM;
2333 
2334 	memset(&c, 0, sizeof(c));
2335 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2336 			    FW_CMD_WRITE | FW_CMD_EXEC |
2337 			    FW_EQ_OFLD_CMD_PFN(adap->fn) |
2338 			    FW_EQ_OFLD_CMD_VFN(0));
2339 	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
2340 				 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2341 	c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
2342 				   FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
2343 				   FW_EQ_OFLD_CMD_FETCHRO(1) |
2344 				   FW_EQ_OFLD_CMD_IQID(iqid));
2345 	c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
2346 				  FW_EQ_OFLD_CMD_FBMAX(3) |
2347 				  FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
2348 				  FW_EQ_OFLD_CMD_EQSIZE(nentries));
2349 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2350 
2351 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2352 	if (ret) {
2353 		kfree(txq->q.sdesc);
2354 		txq->q.sdesc = NULL;
2355 		dma_free_coherent(adap->pdev_dev,
2356 				  nentries * sizeof(struct tx_desc),
2357 				  txq->q.desc, txq->q.phys_addr);
2358 		txq->q.desc = NULL;
2359 		return ret;
2360 	}
2361 
2362 	init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2363 	txq->adap = adap;
2364 	skb_queue_head_init(&txq->sendq);
2365 	tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2366 	txq->full = 0;
2367 	txq->mapping_err = 0;
2368 	return 0;
2369 }
2370 
2371 static void free_txq(struct adapter *adap, struct sge_txq *q)
2372 {
2373 	struct sge *s = &adap->sge;
2374 
2375 	dma_free_coherent(adap->pdev_dev,
2376 			  q->size * sizeof(struct tx_desc) + s->stat_len,
2377 			  q->desc, q->phys_addr);
2378 	q->cntxt_id = 0;
2379 	q->sdesc = NULL;
2380 	q->desc = NULL;
2381 }
2382 
2383 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2384 			 struct sge_fl *fl)
2385 {
2386 	struct sge *s = &adap->sge;
2387 	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2388 
2389 	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2390 	t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2391 		   rq->cntxt_id, fl_id, 0xffff);
2392 	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2393 			  rq->desc, rq->phys_addr);
2394 	netif_napi_del(&rq->napi);
2395 	rq->netdev = NULL;
2396 	rq->cntxt_id = rq->abs_id = 0;
2397 	rq->desc = NULL;
2398 
2399 	if (fl) {
2400 		free_rx_bufs(adap, fl, fl->avail);
2401 		dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
2402 				  fl->desc, fl->addr);
2403 		kfree(fl->sdesc);
2404 		fl->sdesc = NULL;
2405 		fl->cntxt_id = 0;
2406 		fl->desc = NULL;
2407 	}
2408 }
2409 
2410 /**
2411  *	t4_free_sge_resources - free SGE resources
2412  *	@adap: the adapter
2413  *
2414  *	Frees resources used by the SGE queue sets.
2415  */
2416 void t4_free_sge_resources(struct adapter *adap)
2417 {
2418 	int i;
2419 	struct sge_eth_rxq *eq = adap->sge.ethrxq;
2420 	struct sge_eth_txq *etq = adap->sge.ethtxq;
2421 	struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
2422 
2423 	/* clean up Ethernet Tx/Rx queues */
2424 	for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2425 		if (eq->rspq.desc)
2426 			free_rspq_fl(adap, &eq->rspq, &eq->fl);
2427 		if (etq->q.desc) {
2428 			t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
2429 				       etq->q.cntxt_id);
2430 			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2431 			kfree(etq->q.sdesc);
2432 			free_txq(adap, &etq->q);
2433 		}
2434 	}
2435 
2436 	/* clean up RDMA and iSCSI Rx queues */
2437 	for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
2438 		if (oq->rspq.desc)
2439 			free_rspq_fl(adap, &oq->rspq, &oq->fl);
2440 	}
2441 	for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
2442 		if (oq->rspq.desc)
2443 			free_rspq_fl(adap, &oq->rspq, &oq->fl);
2444 	}
2445 
2446 	/* clean up offload Tx queues */
2447 	for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2448 		struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2449 
2450 		if (q->q.desc) {
2451 			tasklet_kill(&q->qresume_tsk);
2452 			t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
2453 					q->q.cntxt_id);
2454 			free_tx_desc(adap, &q->q, q->q.in_use, false);
2455 			kfree(q->q.sdesc);
2456 			__skb_queue_purge(&q->sendq);
2457 			free_txq(adap, &q->q);
2458 		}
2459 	}
2460 
2461 	/* clean up control Tx queues */
2462 	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2463 		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2464 
2465 		if (cq->q.desc) {
2466 			tasklet_kill(&cq->qresume_tsk);
2467 			t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
2468 					cq->q.cntxt_id);
2469 			__skb_queue_purge(&cq->sendq);
2470 			free_txq(adap, &cq->q);
2471 		}
2472 	}
2473 
2474 	if (adap->sge.fw_evtq.desc)
2475 		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2476 
2477 	if (adap->sge.intrq.desc)
2478 		free_rspq_fl(adap, &adap->sge.intrq, NULL);
2479 
2480 	/* clear the reverse egress queue map */
2481 	memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
2482 }
2483 
2484 void t4_sge_start(struct adapter *adap)
2485 {
2486 	adap->sge.ethtxq_rover = 0;
2487 	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2488 	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2489 }
2490 
2491 /**
2492  *	t4_sge_stop - disable SGE operation
2493  *	@adap: the adapter
2494  *
2495  *	Stop tasklets and timers associated with the DMA engine.  Note that
2496  *	this is effective only if measures have been taken to disable any HW
2497  *	events that may restart them.
2498  */
2499 void t4_sge_stop(struct adapter *adap)
2500 {
2501 	int i;
2502 	struct sge *s = &adap->sge;
2503 
2504 	if (in_interrupt())  /* actions below require waiting */
2505 		return;
2506 
2507 	if (s->rx_timer.function)
2508 		del_timer_sync(&s->rx_timer);
2509 	if (s->tx_timer.function)
2510 		del_timer_sync(&s->tx_timer);
2511 
2512 	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2513 		struct sge_ofld_txq *q = &s->ofldtxq[i];
2514 
2515 		if (q->q.desc)
2516 			tasklet_kill(&q->qresume_tsk);
2517 	}
2518 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2519 		struct sge_ctrl_txq *cq = &s->ctrlq[i];
2520 
2521 		if (cq->q.desc)
2522 			tasklet_kill(&cq->qresume_tsk);
2523 	}
2524 }
2525 
2526 /**
2527  *	t4_sge_init - initialize SGE
2528  *	@adap: the adapter
2529  *
2530  *	Performs SGE initialization needed every time after a chip reset.
2531  *	We do not initialize any of the queues here, instead the driver
2532  *	top-level must request them individually.
2533  *
2534  *	Called in two different modes:
2535  *
2536  *	 1. Perform actual hardware initialization and record hard-coded
2537  *	    parameters which were used.  This gets used when we're the
2538  *	    Master PF and the Firmware Configuration File support didn't
2539  *	    work for some reason.
2540  *
2541  *	 2. We're not the Master PF or initialization was performed with
2542  *	    a Firmware Configuration File.  In this case we need to grab
2543  *	    any of the SGE operating parameters that we need to have in
2544  *	    order to do our job and make sure we can live with them ...
2545  */
2546 
2547 static int t4_sge_init_soft(struct adapter *adap)
2548 {
2549 	struct sge *s = &adap->sge;
2550 	u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
2551 	u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
2552 	u32 ingress_rx_threshold;
2553 
2554 	/*
2555 	 * Verify that CPL messages are going to the Ingress Queue for
2556 	 * process_responses() and that only packet data is going to the
2557 	 * Free Lists.
2558 	 */
2559 	if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
2560 	    RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
2561 		dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
2562 		return -EINVAL;
2563 	}
2564 
2565 	/*
2566 	 * Validate the Host Buffer Register Array indices that we want to
2567 	 * use ...
2568 	 *
2569 	 * XXX Note that we should really read through the Host Buffer Size
2570 	 * XXX register array and find the indices of the Buffer Sizes which
2571 	 * XXX meet our needs!
2572 	 */
2573 	#define READ_FL_BUF(x) \
2574 		t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
2575 
2576 	fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2577 	fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
2578 	fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2579 	fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2580 
2581 	#undef READ_FL_BUF
2582 
2583 	if (fl_small_pg != PAGE_SIZE ||
2584 	    (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
2585 				  (fl_large_pg & (fl_large_pg-1)) != 0))) {
2586 		dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2587 			fl_small_pg, fl_large_pg);
2588 		return -EINVAL;
2589 	}
2590 	if (fl_large_pg)
2591 		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2592 
2593 	if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
2594 	    fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
2595 		dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
2596 			fl_small_mtu, fl_large_mtu);
2597 		return -EINVAL;
2598 	}
2599 
2600 	/*
2601 	 * Retrieve our RX interrupt holdoff timer values and counter
2602 	 * threshold values from the SGE parameters.
2603 	 */
2604 	timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
2605 	timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
2606 	timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
2607 	s->timer_val[0] = core_ticks_to_us(adap,
2608 		TIMERVALUE0_GET(timer_value_0_and_1));
2609 	s->timer_val[1] = core_ticks_to_us(adap,
2610 		TIMERVALUE1_GET(timer_value_0_and_1));
2611 	s->timer_val[2] = core_ticks_to_us(adap,
2612 		TIMERVALUE2_GET(timer_value_2_and_3));
2613 	s->timer_val[3] = core_ticks_to_us(adap,
2614 		TIMERVALUE3_GET(timer_value_2_and_3));
2615 	s->timer_val[4] = core_ticks_to_us(adap,
2616 		TIMERVALUE4_GET(timer_value_4_and_5));
2617 	s->timer_val[5] = core_ticks_to_us(adap,
2618 		TIMERVALUE5_GET(timer_value_4_and_5));
2619 
2620 	ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
2621 	s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
2622 	s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
2623 	s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
2624 	s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
2625 
2626 	return 0;
2627 }
2628 
2629 static int t4_sge_init_hard(struct adapter *adap)
2630 {
2631 	struct sge *s = &adap->sge;
2632 
2633 	/*
2634 	 * Set up our basic SGE mode to deliver CPL messages to our Ingress
2635 	 * Queue and Packet Date to the Free List.
2636 	 */
2637 	t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
2638 			 RXPKTCPLMODE_MASK);
2639 
2640 	/*
2641 	 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
2642 	 * and generate an interrupt when this occurs so we can recover.
2643 	 */
2644 	if (is_t4(adap->params.chip)) {
2645 		t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2646 				 V_HP_INT_THRESH(M_HP_INT_THRESH) |
2647 				 V_LP_INT_THRESH(M_LP_INT_THRESH),
2648 				 V_HP_INT_THRESH(dbfifo_int_thresh) |
2649 				 V_LP_INT_THRESH(dbfifo_int_thresh));
2650 	} else {
2651 		t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2652 				 V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
2653 				 V_LP_INT_THRESH_T5(dbfifo_int_thresh));
2654 		t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
2655 				 V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
2656 				 V_HP_INT_THRESH_T5(dbfifo_int_thresh));
2657 	}
2658 	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
2659 			F_ENABLE_DROP);
2660 
2661 	/*
2662 	 * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
2663 	 * t4_fixup_host_params().
2664 	 */
2665 	s->fl_pg_order = FL_PG_ORDER;
2666 	if (s->fl_pg_order)
2667 		t4_write_reg(adap,
2668 			     SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
2669 			     PAGE_SIZE << FL_PG_ORDER);
2670 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
2671 		     FL_MTU_SMALL_BUFSIZE(adap));
2672 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
2673 		     FL_MTU_LARGE_BUFSIZE(adap));
2674 
2675 	/*
2676 	 * Note that the SGE Ingress Packet Count Interrupt Threshold and
2677 	 * Timer Holdoff values must be supplied by our caller.
2678 	 */
2679 	t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2680 		     THRESHOLD_0(s->counter_val[0]) |
2681 		     THRESHOLD_1(s->counter_val[1]) |
2682 		     THRESHOLD_2(s->counter_val[2]) |
2683 		     THRESHOLD_3(s->counter_val[3]));
2684 	t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
2685 		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2686 		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2687 	t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2688 		     TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
2689 		     TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
2690 	t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2691 		     TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
2692 		     TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
2693 
2694 	return 0;
2695 }
2696 
2697 int t4_sge_init(struct adapter *adap)
2698 {
2699 	struct sge *s = &adap->sge;
2700 	u32 sge_control;
2701 	int ret;
2702 
2703 	/*
2704 	 * Ingress Padding Boundary and Egress Status Page Size are set up by
2705 	 * t4_fixup_host_params().
2706 	 */
2707 	sge_control = t4_read_reg(adap, SGE_CONTROL);
2708 	s->pktshift = PKTSHIFT_GET(sge_control);
2709 	s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
2710 	s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
2711 			    X_INGPADBOUNDARY_SHIFT);
2712 
2713 	if (adap->flags & USING_SOFT_PARAMS)
2714 		ret = t4_sge_init_soft(adap);
2715 	else
2716 		ret = t4_sge_init_hard(adap);
2717 	if (ret < 0)
2718 		return ret;
2719 
2720 	/*
2721 	 * A FL with <= fl_starve_thres buffers is starving and a periodic
2722 	 * timer will attempt to refill it.  This needs to be larger than the
2723 	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
2724 	 * stuck waiting for new packets while the SGE is waiting for us to
2725 	 * give it more Free List entries.  (Note that the SGE's Egress
2726 	 * Congestion Threshold is in units of 2 Free List pointers.)
2727 	 */
2728 	s->fl_starve_thres
2729 		= EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
2730 
2731 	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2732 	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2733 	s->starve_thres = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
2734 	s->idma_state[0] = s->idma_state[1] = 0;
2735 	spin_lock_init(&s->intrq_lock);
2736 
2737 	return 0;
2738 }
2739