1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
39 #include <linux/ip.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
44 #include <net/ipv6.h>
45 #include <net/tcp.h>
46 #include "cxgb4.h"
47 #include "t4_regs.h"
48 #include "t4_msg.h"
49 #include "t4fw_api.h"
50 
51 /*
52  * Rx buffer size.  We use largish buffers if possible but settle for single
53  * pages under memory shortage.
54  */
55 #if PAGE_SHIFT >= 16
56 # define FL_PG_ORDER 0
57 #else
58 # define FL_PG_ORDER (16 - PAGE_SHIFT)
59 #endif
60 
61 /* RX_PULL_LEN should be <= RX_COPY_THRES */
62 #define RX_COPY_THRES    256
63 #define RX_PULL_LEN      128
64 
65 /*
66  * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
67  * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
68  */
69 #define RX_PKT_SKB_LEN   512
70 
71 /*
72  * Max number of Tx descriptors we clean up at a time.  Should be modest as
73  * freeing skbs isn't cheap and it happens while holding locks.  We just need
74  * to free packets faster than they arrive, we eventually catch up and keep
75  * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.
76  */
77 #define MAX_TX_RECLAIM 16
78 
79 /*
80  * Max number of Rx buffers we replenish at a time.  Again keep this modest,
81  * allocating buffers isn't cheap either.
82  */
83 #define MAX_RX_REFILL 16U
84 
85 /*
86  * Period of the Rx queue check timer.  This timer is infrequent as it has
87  * something to do only when the system experiences severe memory shortage.
88  */
89 #define RX_QCHECK_PERIOD (HZ / 2)
90 
91 /*
92  * Period of the Tx queue check timer.
93  */
94 #define TX_QCHECK_PERIOD (HZ / 2)
95 
96 /* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
97  * (in RX_QCHECK_PERIOD multiples).  If we find one of the SGE Ingress DMA
98  * State Machines in the same state for this amount of time (in HZ) then we'll
99  * issue a warning about a potential hang.  We'll repeat the warning as the
100  * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
101  * the situation clears.  If the situation clears, we'll note that as well.
102  */
103 #define SGE_IDMA_WARN_THRESH (1 * HZ)
104 #define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
105 
106 /*
107  * Max number of Tx descriptors to be reclaimed by the Tx timer.
108  */
109 #define MAX_TIMER_TX_RECLAIM 100
110 
111 /*
112  * Timer index used when backing off due to memory shortage.
113  */
114 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
115 
116 /*
117  * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
118  * attempt to refill it.
119  */
120 #define FL_STARVE_THRES 4
121 
122 /*
123  * Suspend an Ethernet Tx queue with fewer available descriptors than this.
124  * This is the same as calc_tx_descs() for a TSO packet with
125  * nr_frags == MAX_SKB_FRAGS.
126  */
127 #define ETHTXQ_STOP_THRES \
128 	(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
129 
130 /*
131  * Suspension threshold for non-Ethernet Tx queues.  We require enough room
132  * for a full sized WR.
133  */
134 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
135 
136 /*
137  * Max Tx descriptor space we allow for an Ethernet packet to be inlined
138  * into a WR.
139  */
140 #define MAX_IMM_TX_PKT_LEN 128
141 
142 /*
143  * Max size of a WR sent through a control Tx queue.
144  */
145 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
146 
147 struct tx_sw_desc {                /* SW state per Tx descriptor */
148 	struct sk_buff *skb;
149 	struct ulptx_sgl *sgl;
150 };
151 
152 struct rx_sw_desc {                /* SW state per Rx descriptor */
153 	struct page *page;
154 	dma_addr_t dma_addr;
155 };
156 
157 /*
158  * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
159  * buffer).  We currently only support two sizes for 1500- and 9000-byte MTUs.
160  * We could easily support more but there doesn't seem to be much need for
161  * that ...
162  */
163 #define FL_MTU_SMALL 1500
164 #define FL_MTU_LARGE 9000
165 
166 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
167 					  unsigned int mtu)
168 {
169 	struct sge *s = &adapter->sge;
170 
171 	return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
172 }
173 
174 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
175 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
176 
177 /*
178  * Bits 0..3 of rx_sw_desc.dma_addr have special meaning.  The hardware uses
179  * these to specify the buffer size as an index into the SGE Free List Buffer
180  * Size register array.  We also use bit 4, when the buffer has been unmapped
181  * for DMA, but this is of course never sent to the hardware and is only used
182  * to prevent double unmappings.  All of the above requires that the Free List
183  * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
184  * 32-byte or or a power of 2 greater in alignment.  Since the SGE's minimal
185  * Free List Buffer alignment is 32 bytes, this works out for us ...
186  */
187 enum {
188 	RX_BUF_FLAGS     = 0x1f,   /* bottom five bits are special */
189 	RX_BUF_SIZE      = 0x0f,   /* bottom three bits are for buf sizes */
190 	RX_UNMAPPED_BUF  = 0x10,   /* buffer is not mapped */
191 
192 	/*
193 	 * XXX We shouldn't depend on being able to use these indices.
194 	 * XXX Especially when some other Master PF has initialized the
195 	 * XXX adapter or we use the Firmware Configuration File.  We
196 	 * XXX should really search through the Host Buffer Size register
197 	 * XXX array for the appropriately sized buffer indices.
198 	 */
199 	RX_SMALL_PG_BUF  = 0x0,   /* small (PAGE_SIZE) page buffer */
200 	RX_LARGE_PG_BUF  = 0x1,   /* buffer large (FL_PG_ORDER) page buffer */
201 
202 	RX_SMALL_MTU_BUF = 0x2,   /* small MTU buffer */
203 	RX_LARGE_MTU_BUF = 0x3,   /* large MTU buffer */
204 };
205 
206 static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
207 #define MIN_NAPI_WORK  1
208 
209 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
210 {
211 	return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
212 }
213 
214 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
215 {
216 	return !(d->dma_addr & RX_UNMAPPED_BUF);
217 }
218 
219 /**
220  *	txq_avail - return the number of available slots in a Tx queue
221  *	@q: the Tx queue
222  *
223  *	Returns the number of descriptors in a Tx queue available to write new
224  *	packets.
225  */
226 static inline unsigned int txq_avail(const struct sge_txq *q)
227 {
228 	return q->size - 1 - q->in_use;
229 }
230 
231 /**
232  *	fl_cap - return the capacity of a free-buffer list
233  *	@fl: the FL
234  *
235  *	Returns the capacity of a free-buffer list.  The capacity is less than
236  *	the size because one descriptor needs to be left unpopulated, otherwise
237  *	HW will think the FL is empty.
238  */
239 static inline unsigned int fl_cap(const struct sge_fl *fl)
240 {
241 	return fl->size - 8;   /* 1 descriptor = 8 buffers */
242 }
243 
244 static inline bool fl_starving(const struct sge_fl *fl)
245 {
246 	return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
247 }
248 
249 static int map_skb(struct device *dev, const struct sk_buff *skb,
250 		   dma_addr_t *addr)
251 {
252 	const skb_frag_t *fp, *end;
253 	const struct skb_shared_info *si;
254 
255 	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
256 	if (dma_mapping_error(dev, *addr))
257 		goto out_err;
258 
259 	si = skb_shinfo(skb);
260 	end = &si->frags[si->nr_frags];
261 
262 	for (fp = si->frags; fp < end; fp++) {
263 		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
264 					   DMA_TO_DEVICE);
265 		if (dma_mapping_error(dev, *addr))
266 			goto unwind;
267 	}
268 	return 0;
269 
270 unwind:
271 	while (fp-- > si->frags)
272 		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
273 
274 	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
275 out_err:
276 	return -ENOMEM;
277 }
278 
279 #ifdef CONFIG_NEED_DMA_MAP_STATE
280 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
281 		      const dma_addr_t *addr)
282 {
283 	const skb_frag_t *fp, *end;
284 	const struct skb_shared_info *si;
285 
286 	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
287 
288 	si = skb_shinfo(skb);
289 	end = &si->frags[si->nr_frags];
290 	for (fp = si->frags; fp < end; fp++)
291 		dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
292 }
293 
294 /**
295  *	deferred_unmap_destructor - unmap a packet when it is freed
296  *	@skb: the packet
297  *
298  *	This is the packet destructor used for Tx packets that need to remain
299  *	mapped until they are freed rather than until their Tx descriptors are
300  *	freed.
301  */
302 static void deferred_unmap_destructor(struct sk_buff *skb)
303 {
304 	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
305 }
306 #endif
307 
308 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
309 		      const struct ulptx_sgl *sgl, const struct sge_txq *q)
310 {
311 	const struct ulptx_sge_pair *p;
312 	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
313 
314 	if (likely(skb_headlen(skb)))
315 		dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
316 				 DMA_TO_DEVICE);
317 	else {
318 		dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
319 			       DMA_TO_DEVICE);
320 		nfrags--;
321 	}
322 
323 	/*
324 	 * the complexity below is because of the possibility of a wrap-around
325 	 * in the middle of an SGL
326 	 */
327 	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
328 		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
329 unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
330 				       ntohl(p->len[0]), DMA_TO_DEVICE);
331 			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
332 				       ntohl(p->len[1]), DMA_TO_DEVICE);
333 			p++;
334 		} else if ((u8 *)p == (u8 *)q->stat) {
335 			p = (const struct ulptx_sge_pair *)q->desc;
336 			goto unmap;
337 		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
338 			const __be64 *addr = (const __be64 *)q->desc;
339 
340 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
341 				       ntohl(p->len[0]), DMA_TO_DEVICE);
342 			dma_unmap_page(dev, be64_to_cpu(addr[1]),
343 				       ntohl(p->len[1]), DMA_TO_DEVICE);
344 			p = (const struct ulptx_sge_pair *)&addr[2];
345 		} else {
346 			const __be64 *addr = (const __be64 *)q->desc;
347 
348 			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
349 				       ntohl(p->len[0]), DMA_TO_DEVICE);
350 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
351 				       ntohl(p->len[1]), DMA_TO_DEVICE);
352 			p = (const struct ulptx_sge_pair *)&addr[1];
353 		}
354 	}
355 	if (nfrags) {
356 		__be64 addr;
357 
358 		if ((u8 *)p == (u8 *)q->stat)
359 			p = (const struct ulptx_sge_pair *)q->desc;
360 		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
361 						       *(const __be64 *)q->desc;
362 		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
363 			       DMA_TO_DEVICE);
364 	}
365 }
366 
367 /**
368  *	free_tx_desc - reclaims Tx descriptors and their buffers
369  *	@adapter: the adapter
370  *	@q: the Tx queue to reclaim descriptors from
371  *	@n: the number of descriptors to reclaim
372  *	@unmap: whether the buffers should be unmapped for DMA
373  *
374  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
375  *	Tx buffers.  Called with the Tx queue lock held.
376  */
377 static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
378 			 unsigned int n, bool unmap)
379 {
380 	struct tx_sw_desc *d;
381 	unsigned int cidx = q->cidx;
382 	struct device *dev = adap->pdev_dev;
383 
384 	d = &q->sdesc[cidx];
385 	while (n--) {
386 		if (d->skb) {                       /* an SGL is present */
387 			if (unmap)
388 				unmap_sgl(dev, d->skb, d->sgl, q);
389 			dev_consume_skb_any(d->skb);
390 			d->skb = NULL;
391 		}
392 		++d;
393 		if (++cidx == q->size) {
394 			cidx = 0;
395 			d = q->sdesc;
396 		}
397 	}
398 	q->cidx = cidx;
399 }
400 
401 /*
402  * Return the number of reclaimable descriptors in a Tx queue.
403  */
404 static inline int reclaimable(const struct sge_txq *q)
405 {
406 	int hw_cidx = ntohs(q->stat->cidx);
407 	hw_cidx -= q->cidx;
408 	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
409 }
410 
411 /**
412  *	reclaim_completed_tx - reclaims completed Tx descriptors
413  *	@adap: the adapter
414  *	@q: the Tx queue to reclaim completed descriptors from
415  *	@unmap: whether the buffers should be unmapped for DMA
416  *
417  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
418  *	and frees the associated buffers if possible.  Called with the Tx
419  *	queue locked.
420  */
421 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
422 					bool unmap)
423 {
424 	int avail = reclaimable(q);
425 
426 	if (avail) {
427 		/*
428 		 * Limit the amount of clean up work we do at a time to keep
429 		 * the Tx lock hold time O(1).
430 		 */
431 		if (avail > MAX_TX_RECLAIM)
432 			avail = MAX_TX_RECLAIM;
433 
434 		free_tx_desc(adap, q, avail, unmap);
435 		q->in_use -= avail;
436 	}
437 }
438 
439 static inline int get_buf_size(struct adapter *adapter,
440 			       const struct rx_sw_desc *d)
441 {
442 	struct sge *s = &adapter->sge;
443 	unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
444 	int buf_size;
445 
446 	switch (rx_buf_size_idx) {
447 	case RX_SMALL_PG_BUF:
448 		buf_size = PAGE_SIZE;
449 		break;
450 
451 	case RX_LARGE_PG_BUF:
452 		buf_size = PAGE_SIZE << s->fl_pg_order;
453 		break;
454 
455 	case RX_SMALL_MTU_BUF:
456 		buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
457 		break;
458 
459 	case RX_LARGE_MTU_BUF:
460 		buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
461 		break;
462 
463 	default:
464 		BUG_ON(1);
465 	}
466 
467 	return buf_size;
468 }
469 
470 /**
471  *	free_rx_bufs - free the Rx buffers on an SGE free list
472  *	@adap: the adapter
473  *	@q: the SGE free list to free buffers from
474  *	@n: how many buffers to free
475  *
476  *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
477  *	buffers must be made inaccessible to HW before calling this function.
478  */
479 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
480 {
481 	while (n--) {
482 		struct rx_sw_desc *d = &q->sdesc[q->cidx];
483 
484 		if (is_buf_mapped(d))
485 			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
486 				       get_buf_size(adap, d),
487 				       PCI_DMA_FROMDEVICE);
488 		put_page(d->page);
489 		d->page = NULL;
490 		if (++q->cidx == q->size)
491 			q->cidx = 0;
492 		q->avail--;
493 	}
494 }
495 
496 /**
497  *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
498  *	@adap: the adapter
499  *	@q: the SGE free list
500  *
501  *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
502  *	buffer must be made inaccessible to HW before calling this function.
503  *
504  *	This is similar to @free_rx_bufs above but does not free the buffer.
505  *	Do note that the FL still loses any further access to the buffer.
506  */
507 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
508 {
509 	struct rx_sw_desc *d = &q->sdesc[q->cidx];
510 
511 	if (is_buf_mapped(d))
512 		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
513 			       get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
514 	d->page = NULL;
515 	if (++q->cidx == q->size)
516 		q->cidx = 0;
517 	q->avail--;
518 }
519 
520 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
521 {
522 	u32 val;
523 	if (q->pend_cred >= 8) {
524 		val = PIDX(q->pend_cred / 8);
525 		if (!is_t4(adap->params.chip))
526 			val |= DBTYPE(1);
527 		val |= DBPRIO(1);
528 		wmb();
529 
530 		/* If we're on T4, use the old doorbell mechanism; otherwise
531 		 * use the new BAR2 mechanism.
532 		 */
533 		if (is_t4(adap->params.chip)) {
534 			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
535 				     val | QID(q->cntxt_id));
536 		} else {
537 			writel(val,  adap->bar2 + q->udb + SGE_UDB_KDOORBELL);
538 
539 			/* This Write memory Barrier will force the write to
540 			 * the User Doorbell area to be flushed.
541 			 */
542 			wmb();
543 		}
544 		q->pend_cred &= 7;
545 	}
546 }
547 
548 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
549 				  dma_addr_t mapping)
550 {
551 	sd->page = pg;
552 	sd->dma_addr = mapping;      /* includes size low bits */
553 }
554 
555 /**
556  *	refill_fl - refill an SGE Rx buffer ring
557  *	@adap: the adapter
558  *	@q: the ring to refill
559  *	@n: the number of new buffers to allocate
560  *	@gfp: the gfp flags for the allocations
561  *
562  *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
563  *	allocated with the supplied gfp flags.  The caller must assure that
564  *	@n does not exceed the queue's capacity.  If afterwards the queue is
565  *	found critically low mark it as starving in the bitmap of starving FLs.
566  *
567  *	Returns the number of buffers allocated.
568  */
569 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
570 			      gfp_t gfp)
571 {
572 	struct sge *s = &adap->sge;
573 	struct page *pg;
574 	dma_addr_t mapping;
575 	unsigned int cred = q->avail;
576 	__be64 *d = &q->desc[q->pidx];
577 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
578 
579 	gfp |= __GFP_NOWARN | __GFP_COLD;
580 
581 	if (s->fl_pg_order == 0)
582 		goto alloc_small_pages;
583 
584 	/*
585 	 * Prefer large buffers
586 	 */
587 	while (n) {
588 		pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
589 		if (unlikely(!pg)) {
590 			q->large_alloc_failed++;
591 			break;       /* fall back to single pages */
592 		}
593 
594 		mapping = dma_map_page(adap->pdev_dev, pg, 0,
595 				       PAGE_SIZE << s->fl_pg_order,
596 				       PCI_DMA_FROMDEVICE);
597 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
598 			__free_pages(pg, s->fl_pg_order);
599 			goto out;   /* do not try small pages for this error */
600 		}
601 		mapping |= RX_LARGE_PG_BUF;
602 		*d++ = cpu_to_be64(mapping);
603 
604 		set_rx_sw_desc(sd, pg, mapping);
605 		sd++;
606 
607 		q->avail++;
608 		if (++q->pidx == q->size) {
609 			q->pidx = 0;
610 			sd = q->sdesc;
611 			d = q->desc;
612 		}
613 		n--;
614 	}
615 
616 alloc_small_pages:
617 	while (n--) {
618 		pg = __skb_alloc_page(gfp, NULL);
619 		if (unlikely(!pg)) {
620 			q->alloc_failed++;
621 			break;
622 		}
623 
624 		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
625 				       PCI_DMA_FROMDEVICE);
626 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
627 			put_page(pg);
628 			goto out;
629 		}
630 		*d++ = cpu_to_be64(mapping);
631 
632 		set_rx_sw_desc(sd, pg, mapping);
633 		sd++;
634 
635 		q->avail++;
636 		if (++q->pidx == q->size) {
637 			q->pidx = 0;
638 			sd = q->sdesc;
639 			d = q->desc;
640 		}
641 	}
642 
643 out:	cred = q->avail - cred;
644 	q->pend_cred += cred;
645 	ring_fl_db(adap, q);
646 
647 	if (unlikely(fl_starving(q))) {
648 		smp_wmb();
649 		set_bit(q->cntxt_id - adap->sge.egr_start,
650 			adap->sge.starving_fl);
651 	}
652 
653 	return cred;
654 }
655 
656 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
657 {
658 	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
659 		  GFP_ATOMIC);
660 }
661 
662 /**
663  *	alloc_ring - allocate resources for an SGE descriptor ring
664  *	@dev: the PCI device's core device
665  *	@nelem: the number of descriptors
666  *	@elem_size: the size of each descriptor
667  *	@sw_size: the size of the SW state associated with each ring element
668  *	@phys: the physical address of the allocated ring
669  *	@metadata: address of the array holding the SW state for the ring
670  *	@stat_size: extra space in HW ring for status information
671  *	@node: preferred node for memory allocations
672  *
673  *	Allocates resources for an SGE descriptor ring, such as Tx queues,
674  *	free buffer lists, or response queues.  Each SGE ring requires
675  *	space for its HW descriptors plus, optionally, space for the SW state
676  *	associated with each HW entry (the metadata).  The function returns
677  *	three values: the virtual address for the HW ring (the return value
678  *	of the function), the bus address of the HW ring, and the address
679  *	of the SW ring.
680  */
681 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
682 			size_t sw_size, dma_addr_t *phys, void *metadata,
683 			size_t stat_size, int node)
684 {
685 	size_t len = nelem * elem_size + stat_size;
686 	void *s = NULL;
687 	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
688 
689 	if (!p)
690 		return NULL;
691 	if (sw_size) {
692 		s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
693 
694 		if (!s) {
695 			dma_free_coherent(dev, len, p, *phys);
696 			return NULL;
697 		}
698 	}
699 	if (metadata)
700 		*(void **)metadata = s;
701 	memset(p, 0, len);
702 	return p;
703 }
704 
705 /**
706  *	sgl_len - calculates the size of an SGL of the given capacity
707  *	@n: the number of SGL entries
708  *
709  *	Calculates the number of flits needed for a scatter/gather list that
710  *	can hold the given number of entries.
711  */
712 static inline unsigned int sgl_len(unsigned int n)
713 {
714 	n--;
715 	return (3 * n) / 2 + (n & 1) + 2;
716 }
717 
718 /**
719  *	flits_to_desc - returns the num of Tx descriptors for the given flits
720  *	@n: the number of flits
721  *
722  *	Returns the number of Tx descriptors needed for the supplied number
723  *	of flits.
724  */
725 static inline unsigned int flits_to_desc(unsigned int n)
726 {
727 	BUG_ON(n > SGE_MAX_WR_LEN / 8);
728 	return DIV_ROUND_UP(n, 8);
729 }
730 
731 /**
732  *	is_eth_imm - can an Ethernet packet be sent as immediate data?
733  *	@skb: the packet
734  *
735  *	Returns whether an Ethernet packet is small enough to fit as
736  *	immediate data. Return value corresponds to headroom required.
737  */
738 static inline int is_eth_imm(const struct sk_buff *skb)
739 {
740 	int hdrlen = skb_shinfo(skb)->gso_size ?
741 			sizeof(struct cpl_tx_pkt_lso_core) : 0;
742 
743 	hdrlen += sizeof(struct cpl_tx_pkt);
744 	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
745 		return hdrlen;
746 	return 0;
747 }
748 
749 /**
750  *	calc_tx_flits - calculate the number of flits for a packet Tx WR
751  *	@skb: the packet
752  *
753  *	Returns the number of flits needed for a Tx WR for the given Ethernet
754  *	packet, including the needed WR and CPL headers.
755  */
756 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
757 {
758 	unsigned int flits;
759 	int hdrlen = is_eth_imm(skb);
760 
761 	if (hdrlen)
762 		return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
763 
764 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
765 	if (skb_shinfo(skb)->gso_size)
766 		flits += 2;
767 	return flits;
768 }
769 
770 /**
771  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
772  *	@skb: the packet
773  *
774  *	Returns the number of Tx descriptors needed for the given Ethernet
775  *	packet, including the needed WR and CPL headers.
776  */
777 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
778 {
779 	return flits_to_desc(calc_tx_flits(skb));
780 }
781 
782 /**
783  *	write_sgl - populate a scatter/gather list for a packet
784  *	@skb: the packet
785  *	@q: the Tx queue we are writing into
786  *	@sgl: starting location for writing the SGL
787  *	@end: points right after the end of the SGL
788  *	@start: start offset into skb main-body data to include in the SGL
789  *	@addr: the list of bus addresses for the SGL elements
790  *
791  *	Generates a gather list for the buffers that make up a packet.
792  *	The caller must provide adequate space for the SGL that will be written.
793  *	The SGL includes all of the packet's page fragments and the data in its
794  *	main body except for the first @start bytes.  @sgl must be 16-byte
795  *	aligned and within a Tx descriptor with available space.  @end points
796  *	right after the end of the SGL but does not account for any potential
797  *	wrap around, i.e., @end > @sgl.
798  */
799 static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
800 		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
801 		      const dma_addr_t *addr)
802 {
803 	unsigned int i, len;
804 	struct ulptx_sge_pair *to;
805 	const struct skb_shared_info *si = skb_shinfo(skb);
806 	unsigned int nfrags = si->nr_frags;
807 	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
808 
809 	len = skb_headlen(skb) - start;
810 	if (likely(len)) {
811 		sgl->len0 = htonl(len);
812 		sgl->addr0 = cpu_to_be64(addr[0] + start);
813 		nfrags++;
814 	} else {
815 		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
816 		sgl->addr0 = cpu_to_be64(addr[1]);
817 	}
818 
819 	sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
820 	if (likely(--nfrags == 0))
821 		return;
822 	/*
823 	 * Most of the complexity below deals with the possibility we hit the
824 	 * end of the queue in the middle of writing the SGL.  For this case
825 	 * only we create the SGL in a temporary buffer and then copy it.
826 	 */
827 	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
828 
829 	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
830 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
831 		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
832 		to->addr[0] = cpu_to_be64(addr[i]);
833 		to->addr[1] = cpu_to_be64(addr[++i]);
834 	}
835 	if (nfrags) {
836 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
837 		to->len[1] = cpu_to_be32(0);
838 		to->addr[0] = cpu_to_be64(addr[i + 1]);
839 	}
840 	if (unlikely((u8 *)end > (u8 *)q->stat)) {
841 		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
842 
843 		if (likely(part0))
844 			memcpy(sgl->sge, buf, part0);
845 		part1 = (u8 *)end - (u8 *)q->stat;
846 		memcpy(q->desc, (u8 *)buf + part0, part1);
847 		end = (void *)q->desc + part1;
848 	}
849 	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
850 		*end = 0;
851 }
852 
853 /* This function copies a tx_desc struct to memory mapped BAR2 space(user space
854  * writes). For coalesced WR SGE, fetches data from the FIFO instead of from
855  * Host.
856  */
857 static void cxgb_pio_copy(u64 __iomem *dst, struct tx_desc *desc)
858 {
859 	int count = sizeof(*desc) / sizeof(u64);
860 	u64 *src = (u64 *)desc;
861 
862 	while (count) {
863 		writeq(*src, dst);
864 		src++;
865 		dst++;
866 		count--;
867 	}
868 }
869 
870 /**
871  *	ring_tx_db - check and potentially ring a Tx queue's doorbell
872  *	@adap: the adapter
873  *	@q: the Tx queue
874  *	@n: number of new descriptors to give to HW
875  *
876  *	Ring the doorbel for a Tx queue.
877  */
878 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
879 {
880 	wmb();            /* write descriptors before telling HW */
881 
882 	if (is_t4(adap->params.chip)) {
883 		u32 val = PIDX(n);
884 		unsigned long flags;
885 
886 		/* For T4 we need to participate in the Doorbell Recovery
887 		 * mechanism.
888 		 */
889 		spin_lock_irqsave(&q->db_lock, flags);
890 		if (!q->db_disabled)
891 			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
892 				     QID(q->cntxt_id) | val);
893 		else
894 			q->db_pidx_inc += n;
895 		q->db_pidx = q->pidx;
896 		spin_unlock_irqrestore(&q->db_lock, flags);
897 	} else {
898 		u32 val = PIDX_T5(n);
899 
900 		/* T4 and later chips share the same PIDX field offset within
901 		 * the doorbell, but T5 and later shrank the field in order to
902 		 * gain a bit for Doorbell Priority.  The field was absurdly
903 		 * large in the first place (14 bits) so we just use the T5
904 		 * and later limits and warn if a Queue ID is too large.
905 		 */
906 		WARN_ON(val & DBPRIO(1));
907 
908 		/* For T5 and later we use the Write-Combine mapped BAR2 User
909 		 * Doorbell mechanism.  If we're only writing a single TX
910 		 * Descriptor and TX Write Combining hasn't been disabled, we
911 		 * can use the Write Combining Gather Buffer; otherwise we use
912 		 * the simple doorbell.
913 		 */
914 		if (n == 1) {
915 			int index = (q->pidx
916 				     ? (q->pidx - 1)
917 				     : (q->size - 1));
918 
919 			cxgb_pio_copy(adap->bar2 + q->udb + SGE_UDB_WCDOORBELL,
920 				      q->desc + index);
921 		} else {
922 			writel(val,  adap->bar2 + q->udb + SGE_UDB_KDOORBELL);
923 		}
924 
925 		/* This Write Memory Barrier will force the write to the User
926 		 * Doorbell area to be flushed.  This is needed to prevent
927 		 * writes on different CPUs for the same queue from hitting
928 		 * the adapter out of order.  This is required when some Work
929 		 * Requests take the Write Combine Gather Buffer path (user
930 		 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
931 		 * take the traditional path where we simply increment the
932 		 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
933 		 * hardware DMA read the actual Work Request.
934 		 */
935 		wmb();
936 	}
937 }
938 
939 /**
940  *	inline_tx_skb - inline a packet's data into Tx descriptors
941  *	@skb: the packet
942  *	@q: the Tx queue where the packet will be inlined
943  *	@pos: starting position in the Tx queue where to inline the packet
944  *
945  *	Inline a packet's contents directly into Tx descriptors, starting at
946  *	the given position within the Tx DMA ring.
947  *	Most of the complexity of this operation is dealing with wrap arounds
948  *	in the middle of the packet we want to inline.
949  */
950 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
951 			  void *pos)
952 {
953 	u64 *p;
954 	int left = (void *)q->stat - pos;
955 
956 	if (likely(skb->len <= left)) {
957 		if (likely(!skb->data_len))
958 			skb_copy_from_linear_data(skb, pos, skb->len);
959 		else
960 			skb_copy_bits(skb, 0, pos, skb->len);
961 		pos += skb->len;
962 	} else {
963 		skb_copy_bits(skb, 0, pos, left);
964 		skb_copy_bits(skb, left, q->desc, skb->len - left);
965 		pos = (void *)q->desc + (skb->len - left);
966 	}
967 
968 	/* 0-pad to multiple of 16 */
969 	p = PTR_ALIGN(pos, 8);
970 	if ((uintptr_t)p & 8)
971 		*p = 0;
972 }
973 
974 /*
975  * Figure out what HW csum a packet wants and return the appropriate control
976  * bits.
977  */
978 static u64 hwcsum(const struct sk_buff *skb)
979 {
980 	int csum_type;
981 	const struct iphdr *iph = ip_hdr(skb);
982 
983 	if (iph->version == 4) {
984 		if (iph->protocol == IPPROTO_TCP)
985 			csum_type = TX_CSUM_TCPIP;
986 		else if (iph->protocol == IPPROTO_UDP)
987 			csum_type = TX_CSUM_UDPIP;
988 		else {
989 nocsum:			/*
990 			 * unknown protocol, disable HW csum
991 			 * and hope a bad packet is detected
992 			 */
993 			return TXPKT_L4CSUM_DIS;
994 		}
995 	} else {
996 		/*
997 		 * this doesn't work with extension headers
998 		 */
999 		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1000 
1001 		if (ip6h->nexthdr == IPPROTO_TCP)
1002 			csum_type = TX_CSUM_TCPIP6;
1003 		else if (ip6h->nexthdr == IPPROTO_UDP)
1004 			csum_type = TX_CSUM_UDPIP6;
1005 		else
1006 			goto nocsum;
1007 	}
1008 
1009 	if (likely(csum_type >= TX_CSUM_TCPIP))
1010 		return TXPKT_CSUM_TYPE(csum_type) |
1011 			TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
1012 			TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
1013 	else {
1014 		int start = skb_transport_offset(skb);
1015 
1016 		return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
1017 			TXPKT_CSUM_LOC(start + skb->csum_offset);
1018 	}
1019 }
1020 
1021 static void eth_txq_stop(struct sge_eth_txq *q)
1022 {
1023 	netif_tx_stop_queue(q->txq);
1024 	q->q.stops++;
1025 }
1026 
1027 static inline void txq_advance(struct sge_txq *q, unsigned int n)
1028 {
1029 	q->in_use += n;
1030 	q->pidx += n;
1031 	if (q->pidx >= q->size)
1032 		q->pidx -= q->size;
1033 }
1034 
1035 /**
1036  *	t4_eth_xmit - add a packet to an Ethernet Tx queue
1037  *	@skb: the packet
1038  *	@dev: the egress net device
1039  *
1040  *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
1041  */
1042 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1043 {
1044 	int len;
1045 	u32 wr_mid;
1046 	u64 cntrl, *end;
1047 	int qidx, credits;
1048 	unsigned int flits, ndesc;
1049 	struct adapter *adap;
1050 	struct sge_eth_txq *q;
1051 	const struct port_info *pi;
1052 	struct fw_eth_tx_pkt_wr *wr;
1053 	struct cpl_tx_pkt_core *cpl;
1054 	const struct skb_shared_info *ssi;
1055 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1056 	bool immediate = false;
1057 
1058 	/*
1059 	 * The chip min packet length is 10 octets but play safe and reject
1060 	 * anything shorter than an Ethernet header.
1061 	 */
1062 	if (unlikely(skb->len < ETH_HLEN)) {
1063 out_free:	dev_kfree_skb_any(skb);
1064 		return NETDEV_TX_OK;
1065 	}
1066 
1067 	pi = netdev_priv(dev);
1068 	adap = pi->adapter;
1069 	qidx = skb_get_queue_mapping(skb);
1070 	q = &adap->sge.ethtxq[qidx + pi->first_qset];
1071 
1072 	reclaim_completed_tx(adap, &q->q, true);
1073 
1074 	flits = calc_tx_flits(skb);
1075 	ndesc = flits_to_desc(flits);
1076 	credits = txq_avail(&q->q) - ndesc;
1077 
1078 	if (unlikely(credits < 0)) {
1079 		eth_txq_stop(q);
1080 		dev_err(adap->pdev_dev,
1081 			"%s: Tx ring %u full while queue awake!\n",
1082 			dev->name, qidx);
1083 		return NETDEV_TX_BUSY;
1084 	}
1085 
1086 	if (is_eth_imm(skb))
1087 		immediate = true;
1088 
1089 	if (!immediate &&
1090 	    unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
1091 		q->mapping_err++;
1092 		goto out_free;
1093 	}
1094 
1095 	wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
1096 	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1097 		eth_txq_stop(q);
1098 		wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
1099 	}
1100 
1101 	wr = (void *)&q->q.desc[q->q.pidx];
1102 	wr->equiq_to_len16 = htonl(wr_mid);
1103 	wr->r3 = cpu_to_be64(0);
1104 	end = (u64 *)wr + flits;
1105 
1106 	len = immediate ? skb->len : 0;
1107 	ssi = skb_shinfo(skb);
1108 	if (ssi->gso_size) {
1109 		struct cpl_tx_pkt_lso *lso = (void *)wr;
1110 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1111 		int l3hdr_len = skb_network_header_len(skb);
1112 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1113 
1114 		len += sizeof(*lso);
1115 		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
1116 				       FW_WR_IMMDLEN(len));
1117 		lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
1118 					LSO_FIRST_SLICE | LSO_LAST_SLICE |
1119 					LSO_IPV6(v6) |
1120 					LSO_ETHHDR_LEN(eth_xtra_len / 4) |
1121 					LSO_IPHDR_LEN(l3hdr_len / 4) |
1122 					LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
1123 		lso->c.ipid_ofst = htons(0);
1124 		lso->c.mss = htons(ssi->gso_size);
1125 		lso->c.seqno_offset = htonl(0);
1126 		if (is_t4(adap->params.chip))
1127 			lso->c.len = htonl(skb->len);
1128 		else
1129 			lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len));
1130 		cpl = (void *)(lso + 1);
1131 		cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1132 			TXPKT_IPHDR_LEN(l3hdr_len) |
1133 			TXPKT_ETHHDR_LEN(eth_xtra_len);
1134 		q->tso++;
1135 		q->tx_cso += ssi->gso_segs;
1136 	} else {
1137 		len += sizeof(*cpl);
1138 		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
1139 				       FW_WR_IMMDLEN(len));
1140 		cpl = (void *)(wr + 1);
1141 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1142 			cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
1143 			q->tx_cso++;
1144 		} else
1145 			cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
1146 	}
1147 
1148 	if (vlan_tx_tag_present(skb)) {
1149 		q->vlan_ins++;
1150 		cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
1151 	}
1152 
1153 	cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
1154 			   TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
1155 	cpl->pack = htons(0);
1156 	cpl->len = htons(skb->len);
1157 	cpl->ctrl1 = cpu_to_be64(cntrl);
1158 
1159 	if (immediate) {
1160 		inline_tx_skb(skb, &q->q, cpl + 1);
1161 		dev_consume_skb_any(skb);
1162 	} else {
1163 		int last_desc;
1164 
1165 		write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1166 			  addr);
1167 		skb_orphan(skb);
1168 
1169 		last_desc = q->q.pidx + ndesc - 1;
1170 		if (last_desc >= q->q.size)
1171 			last_desc -= q->q.size;
1172 		q->q.sdesc[last_desc].skb = skb;
1173 		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1174 	}
1175 
1176 	txq_advance(&q->q, ndesc);
1177 
1178 	ring_tx_db(adap, &q->q, ndesc);
1179 	return NETDEV_TX_OK;
1180 }
1181 
1182 /**
1183  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1184  *	@q: the SGE control Tx queue
1185  *
1186  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1187  *	that send only immediate data (presently just the control queues) and
1188  *	thus do not have any sk_buffs to release.
1189  */
1190 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1191 {
1192 	int hw_cidx = ntohs(q->stat->cidx);
1193 	int reclaim = hw_cidx - q->cidx;
1194 
1195 	if (reclaim < 0)
1196 		reclaim += q->size;
1197 
1198 	q->in_use -= reclaim;
1199 	q->cidx = hw_cidx;
1200 }
1201 
1202 /**
1203  *	is_imm - check whether a packet can be sent as immediate data
1204  *	@skb: the packet
1205  *
1206  *	Returns true if a packet can be sent as a WR with immediate data.
1207  */
1208 static inline int is_imm(const struct sk_buff *skb)
1209 {
1210 	return skb->len <= MAX_CTRL_WR_LEN;
1211 }
1212 
1213 /**
1214  *	ctrlq_check_stop - check if a control queue is full and should stop
1215  *	@q: the queue
1216  *	@wr: most recent WR written to the queue
1217  *
1218  *	Check if a control queue has become full and should be stopped.
1219  *	We clean up control queue descriptors very lazily, only when we are out.
1220  *	If the queue is still full after reclaiming any completed descriptors
1221  *	we suspend it and have the last WR wake it up.
1222  */
1223 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1224 {
1225 	reclaim_completed_tx_imm(&q->q);
1226 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1227 		wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1228 		q->q.stops++;
1229 		q->full = 1;
1230 	}
1231 }
1232 
1233 /**
1234  *	ctrl_xmit - send a packet through an SGE control Tx queue
1235  *	@q: the control queue
1236  *	@skb: the packet
1237  *
1238  *	Send a packet through an SGE control Tx queue.  Packets sent through
1239  *	a control queue must fit entirely as immediate data.
1240  */
1241 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1242 {
1243 	unsigned int ndesc;
1244 	struct fw_wr_hdr *wr;
1245 
1246 	if (unlikely(!is_imm(skb))) {
1247 		WARN_ON(1);
1248 		dev_kfree_skb(skb);
1249 		return NET_XMIT_DROP;
1250 	}
1251 
1252 	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1253 	spin_lock(&q->sendq.lock);
1254 
1255 	if (unlikely(q->full)) {
1256 		skb->priority = ndesc;                  /* save for restart */
1257 		__skb_queue_tail(&q->sendq, skb);
1258 		spin_unlock(&q->sendq.lock);
1259 		return NET_XMIT_CN;
1260 	}
1261 
1262 	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1263 	inline_tx_skb(skb, &q->q, wr);
1264 
1265 	txq_advance(&q->q, ndesc);
1266 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1267 		ctrlq_check_stop(q, wr);
1268 
1269 	ring_tx_db(q->adap, &q->q, ndesc);
1270 	spin_unlock(&q->sendq.lock);
1271 
1272 	kfree_skb(skb);
1273 	return NET_XMIT_SUCCESS;
1274 }
1275 
1276 /**
1277  *	restart_ctrlq - restart a suspended control queue
1278  *	@data: the control queue to restart
1279  *
1280  *	Resumes transmission on a suspended Tx control queue.
1281  */
1282 static void restart_ctrlq(unsigned long data)
1283 {
1284 	struct sk_buff *skb;
1285 	unsigned int written = 0;
1286 	struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1287 
1288 	spin_lock(&q->sendq.lock);
1289 	reclaim_completed_tx_imm(&q->q);
1290 	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
1291 
1292 	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1293 		struct fw_wr_hdr *wr;
1294 		unsigned int ndesc = skb->priority;     /* previously saved */
1295 
1296 		/*
1297 		 * Write descriptors and free skbs outside the lock to limit
1298 		 * wait times.  q->full is still set so new skbs will be queued.
1299 		 */
1300 		spin_unlock(&q->sendq.lock);
1301 
1302 		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1303 		inline_tx_skb(skb, &q->q, wr);
1304 		kfree_skb(skb);
1305 
1306 		written += ndesc;
1307 		txq_advance(&q->q, ndesc);
1308 		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1309 			unsigned long old = q->q.stops;
1310 
1311 			ctrlq_check_stop(q, wr);
1312 			if (q->q.stops != old) {          /* suspended anew */
1313 				spin_lock(&q->sendq.lock);
1314 				goto ringdb;
1315 			}
1316 		}
1317 		if (written > 16) {
1318 			ring_tx_db(q->adap, &q->q, written);
1319 			written = 0;
1320 		}
1321 		spin_lock(&q->sendq.lock);
1322 	}
1323 	q->full = 0;
1324 ringdb: if (written)
1325 		ring_tx_db(q->adap, &q->q, written);
1326 	spin_unlock(&q->sendq.lock);
1327 }
1328 
1329 /**
1330  *	t4_mgmt_tx - send a management message
1331  *	@adap: the adapter
1332  *	@skb: the packet containing the management message
1333  *
1334  *	Send a management message through control queue 0.
1335  */
1336 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1337 {
1338 	int ret;
1339 
1340 	local_bh_disable();
1341 	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1342 	local_bh_enable();
1343 	return ret;
1344 }
1345 
1346 /**
1347  *	is_ofld_imm - check whether a packet can be sent as immediate data
1348  *	@skb: the packet
1349  *
1350  *	Returns true if a packet can be sent as an offload WR with immediate
1351  *	data.  We currently use the same limit as for Ethernet packets.
1352  */
1353 static inline int is_ofld_imm(const struct sk_buff *skb)
1354 {
1355 	return skb->len <= MAX_IMM_TX_PKT_LEN;
1356 }
1357 
1358 /**
1359  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
1360  *	@skb: the packet
1361  *
1362  *	Returns the number of flits needed for the given offload packet.
1363  *	These packets are already fully constructed and no additional headers
1364  *	will be added.
1365  */
1366 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1367 {
1368 	unsigned int flits, cnt;
1369 
1370 	if (is_ofld_imm(skb))
1371 		return DIV_ROUND_UP(skb->len, 8);
1372 
1373 	flits = skb_transport_offset(skb) / 8U;   /* headers */
1374 	cnt = skb_shinfo(skb)->nr_frags;
1375 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
1376 		cnt++;
1377 	return flits + sgl_len(cnt);
1378 }
1379 
1380 /**
1381  *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1382  *	@adap: the adapter
1383  *	@q: the queue to stop
1384  *
1385  *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1386  *	inability to map packets.  A periodic timer attempts to restart
1387  *	queues so marked.
1388  */
1389 static void txq_stop_maperr(struct sge_ofld_txq *q)
1390 {
1391 	q->mapping_err++;
1392 	q->q.stops++;
1393 	set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1394 		q->adap->sge.txq_maperr);
1395 }
1396 
1397 /**
1398  *	ofldtxq_stop - stop an offload Tx queue that has become full
1399  *	@q: the queue to stop
1400  *	@skb: the packet causing the queue to become full
1401  *
1402  *	Stops an offload Tx queue that has become full and modifies the packet
1403  *	being written to request a wakeup.
1404  */
1405 static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1406 {
1407 	struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1408 
1409 	wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1410 	q->q.stops++;
1411 	q->full = 1;
1412 }
1413 
1414 /**
1415  *	service_ofldq - restart a suspended offload queue
1416  *	@q: the offload queue
1417  *
1418  *	Services an offload Tx queue by moving packets from its packet queue
1419  *	to the HW Tx ring.  The function starts and ends with the queue locked.
1420  */
1421 static void service_ofldq(struct sge_ofld_txq *q)
1422 {
1423 	u64 *pos;
1424 	int credits;
1425 	struct sk_buff *skb;
1426 	unsigned int written = 0;
1427 	unsigned int flits, ndesc;
1428 
1429 	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1430 		/*
1431 		 * We drop the lock but leave skb on sendq, thus retaining
1432 		 * exclusive access to the state of the queue.
1433 		 */
1434 		spin_unlock(&q->sendq.lock);
1435 
1436 		reclaim_completed_tx(q->adap, &q->q, false);
1437 
1438 		flits = skb->priority;                /* previously saved */
1439 		ndesc = flits_to_desc(flits);
1440 		credits = txq_avail(&q->q) - ndesc;
1441 		BUG_ON(credits < 0);
1442 		if (unlikely(credits < TXQ_STOP_THRES))
1443 			ofldtxq_stop(q, skb);
1444 
1445 		pos = (u64 *)&q->q.desc[q->q.pidx];
1446 		if (is_ofld_imm(skb))
1447 			inline_tx_skb(skb, &q->q, pos);
1448 		else if (map_skb(q->adap->pdev_dev, skb,
1449 				 (dma_addr_t *)skb->head)) {
1450 			txq_stop_maperr(q);
1451 			spin_lock(&q->sendq.lock);
1452 			break;
1453 		} else {
1454 			int last_desc, hdr_len = skb_transport_offset(skb);
1455 
1456 			memcpy(pos, skb->data, hdr_len);
1457 			write_sgl(skb, &q->q, (void *)pos + hdr_len,
1458 				  pos + flits, hdr_len,
1459 				  (dma_addr_t *)skb->head);
1460 #ifdef CONFIG_NEED_DMA_MAP_STATE
1461 			skb->dev = q->adap->port[0];
1462 			skb->destructor = deferred_unmap_destructor;
1463 #endif
1464 			last_desc = q->q.pidx + ndesc - 1;
1465 			if (last_desc >= q->q.size)
1466 				last_desc -= q->q.size;
1467 			q->q.sdesc[last_desc].skb = skb;
1468 		}
1469 
1470 		txq_advance(&q->q, ndesc);
1471 		written += ndesc;
1472 		if (unlikely(written > 32)) {
1473 			ring_tx_db(q->adap, &q->q, written);
1474 			written = 0;
1475 		}
1476 
1477 		spin_lock(&q->sendq.lock);
1478 		__skb_unlink(skb, &q->sendq);
1479 		if (is_ofld_imm(skb))
1480 			kfree_skb(skb);
1481 	}
1482 	if (likely(written))
1483 		ring_tx_db(q->adap, &q->q, written);
1484 }
1485 
1486 /**
1487  *	ofld_xmit - send a packet through an offload queue
1488  *	@q: the Tx offload queue
1489  *	@skb: the packet
1490  *
1491  *	Send an offload packet through an SGE offload queue.
1492  */
1493 static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1494 {
1495 	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
1496 	spin_lock(&q->sendq.lock);
1497 	__skb_queue_tail(&q->sendq, skb);
1498 	if (q->sendq.qlen == 1)
1499 		service_ofldq(q);
1500 	spin_unlock(&q->sendq.lock);
1501 	return NET_XMIT_SUCCESS;
1502 }
1503 
1504 /**
1505  *	restart_ofldq - restart a suspended offload queue
1506  *	@data: the offload queue to restart
1507  *
1508  *	Resumes transmission on a suspended Tx offload queue.
1509  */
1510 static void restart_ofldq(unsigned long data)
1511 {
1512 	struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1513 
1514 	spin_lock(&q->sendq.lock);
1515 	q->full = 0;            /* the queue actually is completely empty now */
1516 	service_ofldq(q);
1517 	spin_unlock(&q->sendq.lock);
1518 }
1519 
1520 /**
1521  *	skb_txq - return the Tx queue an offload packet should use
1522  *	@skb: the packet
1523  *
1524  *	Returns the Tx queue an offload packet should use as indicated by bits
1525  *	1-15 in the packet's queue_mapping.
1526  */
1527 static inline unsigned int skb_txq(const struct sk_buff *skb)
1528 {
1529 	return skb->queue_mapping >> 1;
1530 }
1531 
1532 /**
1533  *	is_ctrl_pkt - return whether an offload packet is a control packet
1534  *	@skb: the packet
1535  *
1536  *	Returns whether an offload packet should use an OFLD or a CTRL
1537  *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
1538  */
1539 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1540 {
1541 	return skb->queue_mapping & 1;
1542 }
1543 
1544 static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1545 {
1546 	unsigned int idx = skb_txq(skb);
1547 
1548 	if (unlikely(is_ctrl_pkt(skb))) {
1549 		/* Single ctrl queue is a requirement for LE workaround path */
1550 		if (adap->tids.nsftids)
1551 			idx = 0;
1552 		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1553 	}
1554 	return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1555 }
1556 
1557 /**
1558  *	t4_ofld_send - send an offload packet
1559  *	@adap: the adapter
1560  *	@skb: the packet
1561  *
1562  *	Sends an offload packet.  We use the packet queue_mapping to select the
1563  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1564  *	should be sent as regular or control, bits 1-15 select the queue.
1565  */
1566 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1567 {
1568 	int ret;
1569 
1570 	local_bh_disable();
1571 	ret = ofld_send(adap, skb);
1572 	local_bh_enable();
1573 	return ret;
1574 }
1575 
1576 /**
1577  *	cxgb4_ofld_send - send an offload packet
1578  *	@dev: the net device
1579  *	@skb: the packet
1580  *
1581  *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
1582  *	intended for ULDs.
1583  */
1584 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1585 {
1586 	return t4_ofld_send(netdev2adap(dev), skb);
1587 }
1588 EXPORT_SYMBOL(cxgb4_ofld_send);
1589 
1590 static inline void copy_frags(struct sk_buff *skb,
1591 			      const struct pkt_gl *gl, unsigned int offset)
1592 {
1593 	int i;
1594 
1595 	/* usually there's just one frag */
1596 	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
1597 			     gl->frags[0].offset + offset,
1598 			     gl->frags[0].size - offset);
1599 	skb_shinfo(skb)->nr_frags = gl->nfrags;
1600 	for (i = 1; i < gl->nfrags; i++)
1601 		__skb_fill_page_desc(skb, i, gl->frags[i].page,
1602 				     gl->frags[i].offset,
1603 				     gl->frags[i].size);
1604 
1605 	/* get a reference to the last page, we don't own it */
1606 	get_page(gl->frags[gl->nfrags - 1].page);
1607 }
1608 
1609 /**
1610  *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1611  *	@gl: the gather list
1612  *	@skb_len: size of sk_buff main body if it carries fragments
1613  *	@pull_len: amount of data to move to the sk_buff's main body
1614  *
1615  *	Builds an sk_buff from the given packet gather list.  Returns the
1616  *	sk_buff or %NULL if sk_buff allocation failed.
1617  */
1618 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1619 				   unsigned int skb_len, unsigned int pull_len)
1620 {
1621 	struct sk_buff *skb;
1622 
1623 	/*
1624 	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1625 	 * size, which is expected since buffers are at least PAGE_SIZEd.
1626 	 * In this case packets up to RX_COPY_THRES have only one fragment.
1627 	 */
1628 	if (gl->tot_len <= RX_COPY_THRES) {
1629 		skb = dev_alloc_skb(gl->tot_len);
1630 		if (unlikely(!skb))
1631 			goto out;
1632 		__skb_put(skb, gl->tot_len);
1633 		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1634 	} else {
1635 		skb = dev_alloc_skb(skb_len);
1636 		if (unlikely(!skb))
1637 			goto out;
1638 		__skb_put(skb, pull_len);
1639 		skb_copy_to_linear_data(skb, gl->va, pull_len);
1640 
1641 		copy_frags(skb, gl, pull_len);
1642 		skb->len = gl->tot_len;
1643 		skb->data_len = skb->len - pull_len;
1644 		skb->truesize += skb->data_len;
1645 	}
1646 out:	return skb;
1647 }
1648 EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1649 
1650 /**
1651  *	t4_pktgl_free - free a packet gather list
1652  *	@gl: the gather list
1653  *
1654  *	Releases the pages of a packet gather list.  We do not own the last
1655  *	page on the list and do not free it.
1656  */
1657 static void t4_pktgl_free(const struct pkt_gl *gl)
1658 {
1659 	int n;
1660 	const struct page_frag *p;
1661 
1662 	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1663 		put_page(p->page);
1664 }
1665 
1666 /*
1667  * Process an MPS trace packet.  Give it an unused protocol number so it won't
1668  * be delivered to anyone and send it to the stack for capture.
1669  */
1670 static noinline int handle_trace_pkt(struct adapter *adap,
1671 				     const struct pkt_gl *gl)
1672 {
1673 	struct sk_buff *skb;
1674 
1675 	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1676 	if (unlikely(!skb)) {
1677 		t4_pktgl_free(gl);
1678 		return 0;
1679 	}
1680 
1681 	if (is_t4(adap->params.chip))
1682 		__skb_pull(skb, sizeof(struct cpl_trace_pkt));
1683 	else
1684 		__skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
1685 
1686 	skb_reset_mac_header(skb);
1687 	skb->protocol = htons(0xffff);
1688 	skb->dev = adap->port[0];
1689 	netif_receive_skb(skb);
1690 	return 0;
1691 }
1692 
1693 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1694 		   const struct cpl_rx_pkt *pkt)
1695 {
1696 	struct adapter *adapter = rxq->rspq.adap;
1697 	struct sge *s = &adapter->sge;
1698 	int ret;
1699 	struct sk_buff *skb;
1700 
1701 	skb = napi_get_frags(&rxq->rspq.napi);
1702 	if (unlikely(!skb)) {
1703 		t4_pktgl_free(gl);
1704 		rxq->stats.rx_drops++;
1705 		return;
1706 	}
1707 
1708 	copy_frags(skb, gl, s->pktshift);
1709 	skb->len = gl->tot_len - s->pktshift;
1710 	skb->data_len = skb->len;
1711 	skb->truesize += skb->data_len;
1712 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1713 	skb_record_rx_queue(skb, rxq->rspq.idx);
1714 	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1715 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
1716 			     PKT_HASH_TYPE_L3);
1717 
1718 	if (unlikely(pkt->vlan_ex)) {
1719 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1720 		rxq->stats.vlan_ex++;
1721 	}
1722 	ret = napi_gro_frags(&rxq->rspq.napi);
1723 	if (ret == GRO_HELD)
1724 		rxq->stats.lro_pkts++;
1725 	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1726 		rxq->stats.lro_merged++;
1727 	rxq->stats.pkts++;
1728 	rxq->stats.rx_cso++;
1729 }
1730 
1731 /**
1732  *	t4_ethrx_handler - process an ingress ethernet packet
1733  *	@q: the response queue that received the packet
1734  *	@rsp: the response queue descriptor holding the RX_PKT message
1735  *	@si: the gather list of packet fragments
1736  *
1737  *	Process an ingress ethernet packet and deliver it to the stack.
1738  */
1739 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1740 		     const struct pkt_gl *si)
1741 {
1742 	bool csum_ok;
1743 	struct sk_buff *skb;
1744 	const struct cpl_rx_pkt *pkt;
1745 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1746 	struct sge *s = &q->adap->sge;
1747 	int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
1748 			    CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
1749 
1750 	if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
1751 		return handle_trace_pkt(q->adap, si);
1752 
1753 	pkt = (const struct cpl_rx_pkt *)rsp;
1754 	csum_ok = pkt->csum_calc && !pkt->err_vec &&
1755 		  (q->netdev->features & NETIF_F_RXCSUM);
1756 	if ((pkt->l2info & htonl(RXF_TCP)) &&
1757 	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1758 		do_gro(rxq, si, pkt);
1759 		return 0;
1760 	}
1761 
1762 	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
1763 	if (unlikely(!skb)) {
1764 		t4_pktgl_free(si);
1765 		rxq->stats.rx_drops++;
1766 		return 0;
1767 	}
1768 
1769 	__skb_pull(skb, s->pktshift);      /* remove ethernet header padding */
1770 	skb->protocol = eth_type_trans(skb, q->netdev);
1771 	skb_record_rx_queue(skb, q->idx);
1772 	if (skb->dev->features & NETIF_F_RXHASH)
1773 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
1774 			     PKT_HASH_TYPE_L3);
1775 
1776 	rxq->stats.pkts++;
1777 
1778 	if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1779 		if (!pkt->ip_frag) {
1780 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1781 			rxq->stats.rx_cso++;
1782 		} else if (pkt->l2info & htonl(RXF_IP)) {
1783 			__sum16 c = (__force __sum16)pkt->csum;
1784 			skb->csum = csum_unfold(c);
1785 			skb->ip_summed = CHECKSUM_COMPLETE;
1786 			rxq->stats.rx_cso++;
1787 		}
1788 	} else
1789 		skb_checksum_none_assert(skb);
1790 
1791 	if (unlikely(pkt->vlan_ex)) {
1792 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1793 		rxq->stats.vlan_ex++;
1794 	}
1795 	netif_receive_skb(skb);
1796 	return 0;
1797 }
1798 
1799 /**
1800  *	restore_rx_bufs - put back a packet's Rx buffers
1801  *	@si: the packet gather list
1802  *	@q: the SGE free list
1803  *	@frags: number of FL buffers to restore
1804  *
1805  *	Puts back on an FL the Rx buffers associated with @si.  The buffers
1806  *	have already been unmapped and are left unmapped, we mark them so to
1807  *	prevent further unmapping attempts.
1808  *
1809  *	This function undoes a series of @unmap_rx_buf calls when we find out
1810  *	that the current packet can't be processed right away afterall and we
1811  *	need to come back to it later.  This is a very rare event and there's
1812  *	no effort to make this particularly efficient.
1813  */
1814 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1815 			    int frags)
1816 {
1817 	struct rx_sw_desc *d;
1818 
1819 	while (frags--) {
1820 		if (q->cidx == 0)
1821 			q->cidx = q->size - 1;
1822 		else
1823 			q->cidx--;
1824 		d = &q->sdesc[q->cidx];
1825 		d->page = si->frags[frags].page;
1826 		d->dma_addr |= RX_UNMAPPED_BUF;
1827 		q->avail++;
1828 	}
1829 }
1830 
1831 /**
1832  *	is_new_response - check if a response is newly written
1833  *	@r: the response descriptor
1834  *	@q: the response queue
1835  *
1836  *	Returns true if a response descriptor contains a yet unprocessed
1837  *	response.
1838  */
1839 static inline bool is_new_response(const struct rsp_ctrl *r,
1840 				   const struct sge_rspq *q)
1841 {
1842 	return RSPD_GEN(r->type_gen) == q->gen;
1843 }
1844 
1845 /**
1846  *	rspq_next - advance to the next entry in a response queue
1847  *	@q: the queue
1848  *
1849  *	Updates the state of a response queue to advance it to the next entry.
1850  */
1851 static inline void rspq_next(struct sge_rspq *q)
1852 {
1853 	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1854 	if (unlikely(++q->cidx == q->size)) {
1855 		q->cidx = 0;
1856 		q->gen ^= 1;
1857 		q->cur_desc = q->desc;
1858 	}
1859 }
1860 
1861 /**
1862  *	process_responses - process responses from an SGE response queue
1863  *	@q: the ingress queue to process
1864  *	@budget: how many responses can be processed in this round
1865  *
1866  *	Process responses from an SGE response queue up to the supplied budget.
1867  *	Responses include received packets as well as control messages from FW
1868  *	or HW.
1869  *
1870  *	Additionally choose the interrupt holdoff time for the next interrupt
1871  *	on this queue.  If the system is under memory shortage use a fairly
1872  *	long delay to help recovery.
1873  */
1874 static int process_responses(struct sge_rspq *q, int budget)
1875 {
1876 	int ret, rsp_type;
1877 	int budget_left = budget;
1878 	const struct rsp_ctrl *rc;
1879 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1880 	struct adapter *adapter = q->adap;
1881 	struct sge *s = &adapter->sge;
1882 
1883 	while (likely(budget_left)) {
1884 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1885 		if (!is_new_response(rc, q))
1886 			break;
1887 
1888 		rmb();
1889 		rsp_type = RSPD_TYPE(rc->type_gen);
1890 		if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1891 			struct page_frag *fp;
1892 			struct pkt_gl si;
1893 			const struct rx_sw_desc *rsd;
1894 			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
1895 
1896 			if (len & RSPD_NEWBUF) {
1897 				if (likely(q->offset > 0)) {
1898 					free_rx_bufs(q->adap, &rxq->fl, 1);
1899 					q->offset = 0;
1900 				}
1901 				len = RSPD_LEN(len);
1902 			}
1903 			si.tot_len = len;
1904 
1905 			/* gather packet fragments */
1906 			for (frags = 0, fp = si.frags; ; frags++, fp++) {
1907 				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1908 				bufsz = get_buf_size(adapter, rsd);
1909 				fp->page = rsd->page;
1910 				fp->offset = q->offset;
1911 				fp->size = min(bufsz, len);
1912 				len -= fp->size;
1913 				if (!len)
1914 					break;
1915 				unmap_rx_buf(q->adap, &rxq->fl);
1916 			}
1917 
1918 			/*
1919 			 * Last buffer remains mapped so explicitly make it
1920 			 * coherent for CPU access.
1921 			 */
1922 			dma_sync_single_for_cpu(q->adap->pdev_dev,
1923 						get_buf_addr(rsd),
1924 						fp->size, DMA_FROM_DEVICE);
1925 
1926 			si.va = page_address(si.frags[0].page) +
1927 				si.frags[0].offset;
1928 			prefetch(si.va);
1929 
1930 			si.nfrags = frags + 1;
1931 			ret = q->handler(q, q->cur_desc, &si);
1932 			if (likely(ret == 0))
1933 				q->offset += ALIGN(fp->size, s->fl_align);
1934 			else
1935 				restore_rx_bufs(&si, &rxq->fl, frags);
1936 		} else if (likely(rsp_type == RSP_TYPE_CPL)) {
1937 			ret = q->handler(q, q->cur_desc, NULL);
1938 		} else {
1939 			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1940 		}
1941 
1942 		if (unlikely(ret)) {
1943 			/* couldn't process descriptor, back off for recovery */
1944 			q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1945 			break;
1946 		}
1947 
1948 		rspq_next(q);
1949 		budget_left--;
1950 	}
1951 
1952 	if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
1953 		__refill_fl(q->adap, &rxq->fl);
1954 	return budget - budget_left;
1955 }
1956 
1957 /**
1958  *	napi_rx_handler - the NAPI handler for Rx processing
1959  *	@napi: the napi instance
1960  *	@budget: how many packets we can process in this round
1961  *
1962  *	Handler for new data events when using NAPI.  This does not need any
1963  *	locking or protection from interrupts as data interrupts are off at
1964  *	this point and other adapter interrupts do not interfere (the latter
1965  *	in not a concern at all with MSI-X as non-data interrupts then have
1966  *	a separate handler).
1967  */
1968 static int napi_rx_handler(struct napi_struct *napi, int budget)
1969 {
1970 	unsigned int params;
1971 	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1972 	int work_done = process_responses(q, budget);
1973 	u32 val;
1974 
1975 	if (likely(work_done < budget)) {
1976 		int timer_index;
1977 
1978 		napi_complete(napi);
1979 		timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params);
1980 
1981 		if (q->adaptive_rx) {
1982 			if (work_done > max(timer_pkt_quota[timer_index],
1983 					    MIN_NAPI_WORK))
1984 				timer_index = (timer_index + 1);
1985 			else
1986 				timer_index = timer_index - 1;
1987 
1988 			timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
1989 			q->next_intr_params = QINTR_TIMER_IDX(timer_index) |
1990 							      V_QINTR_CNT_EN;
1991 			params = q->next_intr_params;
1992 		} else {
1993 			params = q->next_intr_params;
1994 			q->next_intr_params = q->intr_params;
1995 		}
1996 	} else
1997 		params = QINTR_TIMER_IDX(7);
1998 
1999 	val = CIDXINC(work_done) | SEINTARM(params);
2000 	if (is_t4(q->adap->params.chip)) {
2001 		t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS),
2002 			     val | INGRESSQID((u32)q->cntxt_id));
2003 	} else {
2004 		writel(val, q->adap->bar2 + q->udb + SGE_UDB_GTS);
2005 		wmb();
2006 	}
2007 	return work_done;
2008 }
2009 
2010 /*
2011  * The MSI-X interrupt handler for an SGE response queue.
2012  */
2013 irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
2014 {
2015 	struct sge_rspq *q = cookie;
2016 
2017 	napi_schedule(&q->napi);
2018 	return IRQ_HANDLED;
2019 }
2020 
2021 /*
2022  * Process the indirect interrupt entries in the interrupt queue and kick off
2023  * NAPI for each queue that has generated an entry.
2024  */
2025 static unsigned int process_intrq(struct adapter *adap)
2026 {
2027 	unsigned int credits;
2028 	const struct rsp_ctrl *rc;
2029 	struct sge_rspq *q = &adap->sge.intrq;
2030 	u32 val;
2031 
2032 	spin_lock(&adap->sge.intrq_lock);
2033 	for (credits = 0; ; credits++) {
2034 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2035 		if (!is_new_response(rc, q))
2036 			break;
2037 
2038 		rmb();
2039 		if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
2040 			unsigned int qid = ntohl(rc->pldbuflen_qid);
2041 
2042 			qid -= adap->sge.ingr_start;
2043 			napi_schedule(&adap->sge.ingr_map[qid]->napi);
2044 		}
2045 
2046 		rspq_next(q);
2047 	}
2048 
2049 	val =  CIDXINC(credits) | SEINTARM(q->intr_params);
2050 	if (is_t4(adap->params.chip)) {
2051 		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
2052 			     val | INGRESSQID(q->cntxt_id));
2053 	} else {
2054 		writel(val, adap->bar2 + q->udb + SGE_UDB_GTS);
2055 		wmb();
2056 	}
2057 	spin_unlock(&adap->sge.intrq_lock);
2058 	return credits;
2059 }
2060 
2061 /*
2062  * The MSI interrupt handler, which handles data events from SGE response queues
2063  * as well as error and other async events as they all use the same MSI vector.
2064  */
2065 static irqreturn_t t4_intr_msi(int irq, void *cookie)
2066 {
2067 	struct adapter *adap = cookie;
2068 
2069 	t4_slow_intr_handler(adap);
2070 	process_intrq(adap);
2071 	return IRQ_HANDLED;
2072 }
2073 
2074 /*
2075  * Interrupt handler for legacy INTx interrupts.
2076  * Handles data events from SGE response queues as well as error and other
2077  * async events as they all use the same interrupt line.
2078  */
2079 static irqreturn_t t4_intr_intx(int irq, void *cookie)
2080 {
2081 	struct adapter *adap = cookie;
2082 
2083 	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
2084 	if (t4_slow_intr_handler(adap) | process_intrq(adap))
2085 		return IRQ_HANDLED;
2086 	return IRQ_NONE;             /* probably shared interrupt */
2087 }
2088 
2089 /**
2090  *	t4_intr_handler - select the top-level interrupt handler
2091  *	@adap: the adapter
2092  *
2093  *	Selects the top-level interrupt handler based on the type of interrupts
2094  *	(MSI-X, MSI, or INTx).
2095  */
2096 irq_handler_t t4_intr_handler(struct adapter *adap)
2097 {
2098 	if (adap->flags & USING_MSIX)
2099 		return t4_sge_intr_msix;
2100 	if (adap->flags & USING_MSI)
2101 		return t4_intr_msi;
2102 	return t4_intr_intx;
2103 }
2104 
2105 static void sge_rx_timer_cb(unsigned long data)
2106 {
2107 	unsigned long m;
2108 	unsigned int i, idma_same_state_cnt[2];
2109 	struct adapter *adap = (struct adapter *)data;
2110 	struct sge *s = &adap->sge;
2111 
2112 	for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
2113 		for (m = s->starving_fl[i]; m; m &= m - 1) {
2114 			struct sge_eth_rxq *rxq;
2115 			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2116 			struct sge_fl *fl = s->egr_map[id];
2117 
2118 			clear_bit(id, s->starving_fl);
2119 			smp_mb__after_atomic();
2120 
2121 			if (fl_starving(fl)) {
2122 				rxq = container_of(fl, struct sge_eth_rxq, fl);
2123 				if (napi_reschedule(&rxq->rspq.napi))
2124 					fl->starving++;
2125 				else
2126 					set_bit(id, s->starving_fl);
2127 			}
2128 		}
2129 
2130 	t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
2131 	idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
2132 	idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
2133 
2134 	for (i = 0; i < 2; i++) {
2135 		u32 debug0, debug11;
2136 
2137 		/* If the Ingress DMA Same State Counter ("timer") is less
2138 		 * than 1s, then we can reset our synthesized Stall Timer and
2139 		 * continue.  If we have previously emitted warnings about a
2140 		 * potential stalled Ingress Queue, issue a note indicating
2141 		 * that the Ingress Queue has resumed forward progress.
2142 		 */
2143 		if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
2144 			if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
2145 				CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
2146 					i, s->idma_qid[i],
2147 					s->idma_stalled[i]/HZ);
2148 			s->idma_stalled[i] = 0;
2149 			continue;
2150 		}
2151 
2152 		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
2153 		 * domain.  The first time we get here it'll be because we
2154 		 * passed the 1s Threshold; each additional time it'll be
2155 		 * because the RX Timer Callback is being fired on its regular
2156 		 * schedule.
2157 		 *
2158 		 * If the stall is below our Potential Hung Ingress Queue
2159 		 * Warning Threshold, continue.
2160 		 */
2161 		if (s->idma_stalled[i] == 0)
2162 			s->idma_stalled[i] = HZ;
2163 		else
2164 			s->idma_stalled[i] += RX_QCHECK_PERIOD;
2165 
2166 		if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
2167 			continue;
2168 
2169 		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
2170 		if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
2171 			continue;
2172 
2173 		/* Read and save the SGE IDMA State and Queue ID information.
2174 		 * We do this every time in case it changes across time ...
2175 		 */
2176 		t4_write_reg(adap, SGE_DEBUG_INDEX, 0);
2177 		debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
2178 		s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
2179 
2180 		t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
2181 		debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
2182 		s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
2183 
2184 		CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
2185 			i, s->idma_qid[i], s->idma_state[i],
2186 			s->idma_stalled[i]/HZ, debug0, debug11);
2187 		t4_sge_decode_idma_state(adap, s->idma_state[i]);
2188 	}
2189 
2190 	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2191 }
2192 
2193 static void sge_tx_timer_cb(unsigned long data)
2194 {
2195 	unsigned long m;
2196 	unsigned int i, budget;
2197 	struct adapter *adap = (struct adapter *)data;
2198 	struct sge *s = &adap->sge;
2199 
2200 	for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
2201 		for (m = s->txq_maperr[i]; m; m &= m - 1) {
2202 			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2203 			struct sge_ofld_txq *txq = s->egr_map[id];
2204 
2205 			clear_bit(id, s->txq_maperr);
2206 			tasklet_schedule(&txq->qresume_tsk);
2207 		}
2208 
2209 	budget = MAX_TIMER_TX_RECLAIM;
2210 	i = s->ethtxq_rover;
2211 	do {
2212 		struct sge_eth_txq *q = &s->ethtxq[i];
2213 
2214 		if (q->q.in_use &&
2215 		    time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
2216 		    __netif_tx_trylock(q->txq)) {
2217 			int avail = reclaimable(&q->q);
2218 
2219 			if (avail) {
2220 				if (avail > budget)
2221 					avail = budget;
2222 
2223 				free_tx_desc(adap, &q->q, avail, true);
2224 				q->q.in_use -= avail;
2225 				budget -= avail;
2226 			}
2227 			__netif_tx_unlock(q->txq);
2228 		}
2229 
2230 		if (++i >= s->ethqsets)
2231 			i = 0;
2232 	} while (budget && i != s->ethtxq_rover);
2233 	s->ethtxq_rover = i;
2234 	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2235 }
2236 
2237 /**
2238  *      udb_address - return the BAR2 User Doorbell address for a Queue
2239  *      @adap: the adapter
2240  *      @cntxt_id: the Queue Context ID
2241  *      @qpp: Queues Per Page (for all PFs)
2242  *
2243  *      Returns the BAR2 address of the user Doorbell associated with the
2244  *      indicated Queue Context ID.  Note that this is only applicable
2245  *      for T5 and later.
2246  */
2247 static u64 udb_address(struct adapter *adap, unsigned int cntxt_id,
2248 		       unsigned int qpp)
2249 {
2250 	u64 udb;
2251 	unsigned int s_qpp;
2252 	unsigned short udb_density;
2253 	unsigned long qpshift;
2254 	int page;
2255 
2256 	BUG_ON(is_t4(adap->params.chip));
2257 
2258 	s_qpp = (QUEUESPERPAGEPF0 +
2259 		(QUEUESPERPAGEPF1 - QUEUESPERPAGEPF0) * adap->fn);
2260 	udb_density = 1 << ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
2261 	qpshift = PAGE_SHIFT - ilog2(udb_density);
2262 	udb = (u64)cntxt_id << qpshift;
2263 	udb &= PAGE_MASK;
2264 	page = udb / PAGE_SIZE;
2265 	udb += (cntxt_id - (page * udb_density)) * SGE_UDB_SIZE;
2266 
2267 	return udb;
2268 }
2269 
2270 static u64 udb_address_eq(struct adapter *adap, unsigned int cntxt_id)
2271 {
2272 	return udb_address(adap, cntxt_id,
2273 			   t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
2274 }
2275 
2276 static u64 udb_address_iq(struct adapter *adap, unsigned int cntxt_id)
2277 {
2278 	return udb_address(adap, cntxt_id,
2279 			   t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
2280 }
2281 
2282 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2283 		     struct net_device *dev, int intr_idx,
2284 		     struct sge_fl *fl, rspq_handler_t hnd)
2285 {
2286 	int ret, flsz = 0;
2287 	struct fw_iq_cmd c;
2288 	struct sge *s = &adap->sge;
2289 	struct port_info *pi = netdev_priv(dev);
2290 
2291 	/* Size needs to be multiple of 16, including status entry. */
2292 	iq->size = roundup(iq->size, 16);
2293 
2294 	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
2295 			      &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
2296 	if (!iq->desc)
2297 		return -ENOMEM;
2298 
2299 	memset(&c, 0, sizeof(c));
2300 	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2301 			    FW_CMD_WRITE | FW_CMD_EXEC |
2302 			    FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
2303 	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
2304 				 FW_LEN16(c));
2305 	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
2306 		FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
2307 		FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
2308 		FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
2309 							-intr_idx - 1));
2310 	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
2311 		FW_IQ_CMD_IQGTSMODE |
2312 		FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
2313 		FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
2314 	c.iqsize = htons(iq->size);
2315 	c.iqaddr = cpu_to_be64(iq->phys_addr);
2316 
2317 	if (fl) {
2318 		fl->size = roundup(fl->size, 8);
2319 		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2320 				      sizeof(struct rx_sw_desc), &fl->addr,
2321 				      &fl->sdesc, s->stat_len, NUMA_NO_NODE);
2322 		if (!fl->desc)
2323 			goto fl_nomem;
2324 
2325 		flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2326 		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN(1) |
2327 					    FW_IQ_CMD_FL0FETCHRO(1) |
2328 					    FW_IQ_CMD_FL0DATARO(1) |
2329 					    FW_IQ_CMD_FL0PADEN(1));
2330 		c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
2331 				FW_IQ_CMD_FL0FBMAX(3));
2332 		c.fl0size = htons(flsz);
2333 		c.fl0addr = cpu_to_be64(fl->addr);
2334 	}
2335 
2336 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2337 	if (ret)
2338 		goto err;
2339 
2340 	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2341 	iq->cur_desc = iq->desc;
2342 	iq->cidx = 0;
2343 	iq->gen = 1;
2344 	iq->next_intr_params = iq->intr_params;
2345 	iq->cntxt_id = ntohs(c.iqid);
2346 	iq->abs_id = ntohs(c.physiqid);
2347 	if (!is_t4(adap->params.chip))
2348 		iq->udb = udb_address_iq(adap, iq->cntxt_id);
2349 	iq->size--;                           /* subtract status entry */
2350 	iq->netdev = dev;
2351 	iq->handler = hnd;
2352 
2353 	/* set offset to -1 to distinguish ingress queues without FL */
2354 	iq->offset = fl ? 0 : -1;
2355 
2356 	adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2357 
2358 	if (fl) {
2359 		fl->cntxt_id = ntohs(c.fl0id);
2360 		fl->avail = fl->pend_cred = 0;
2361 		fl->pidx = fl->cidx = 0;
2362 		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2363 		adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2364 
2365 		/* Note, we must initialize the Free List User Doorbell
2366 		 * address before refilling the Free List!
2367 		 */
2368 		if (!is_t4(adap->params.chip))
2369 			fl->udb = udb_address_eq(adap, fl->cntxt_id);
2370 		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2371 	}
2372 	return 0;
2373 
2374 fl_nomem:
2375 	ret = -ENOMEM;
2376 err:
2377 	if (iq->desc) {
2378 		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2379 				  iq->desc, iq->phys_addr);
2380 		iq->desc = NULL;
2381 	}
2382 	if (fl && fl->desc) {
2383 		kfree(fl->sdesc);
2384 		fl->sdesc = NULL;
2385 		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2386 				  fl->desc, fl->addr);
2387 		fl->desc = NULL;
2388 	}
2389 	return ret;
2390 }
2391 
2392 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2393 {
2394 	q->cntxt_id = id;
2395 	if (!is_t4(adap->params.chip))
2396 		q->udb = udb_address_eq(adap, q->cntxt_id);
2397 
2398 	q->in_use = 0;
2399 	q->cidx = q->pidx = 0;
2400 	q->stops = q->restarts = 0;
2401 	q->stat = (void *)&q->desc[q->size];
2402 	spin_lock_init(&q->db_lock);
2403 	adap->sge.egr_map[id - adap->sge.egr_start] = q;
2404 }
2405 
2406 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2407 			 struct net_device *dev, struct netdev_queue *netdevq,
2408 			 unsigned int iqid)
2409 {
2410 	int ret, nentries;
2411 	struct fw_eq_eth_cmd c;
2412 	struct sge *s = &adap->sge;
2413 	struct port_info *pi = netdev_priv(dev);
2414 
2415 	/* Add status entries */
2416 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2417 
2418 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2419 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2420 			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2421 			netdev_queue_numa_node_read(netdevq));
2422 	if (!txq->q.desc)
2423 		return -ENOMEM;
2424 
2425 	memset(&c, 0, sizeof(c));
2426 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2427 			    FW_CMD_WRITE | FW_CMD_EXEC |
2428 			    FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
2429 	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
2430 				 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
2431 	c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE |
2432 			   FW_EQ_ETH_CMD_VIID(pi->viid));
2433 	c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
2434 				   FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
2435 				   FW_EQ_ETH_CMD_FETCHRO(1) |
2436 				   FW_EQ_ETH_CMD_IQID(iqid));
2437 	c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
2438 				  FW_EQ_ETH_CMD_FBMAX(3) |
2439 				  FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
2440 				  FW_EQ_ETH_CMD_EQSIZE(nentries));
2441 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2442 
2443 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2444 	if (ret) {
2445 		kfree(txq->q.sdesc);
2446 		txq->q.sdesc = NULL;
2447 		dma_free_coherent(adap->pdev_dev,
2448 				  nentries * sizeof(struct tx_desc),
2449 				  txq->q.desc, txq->q.phys_addr);
2450 		txq->q.desc = NULL;
2451 		return ret;
2452 	}
2453 
2454 	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2455 	txq->txq = netdevq;
2456 	txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2457 	txq->mapping_err = 0;
2458 	return 0;
2459 }
2460 
2461 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2462 			  struct net_device *dev, unsigned int iqid,
2463 			  unsigned int cmplqid)
2464 {
2465 	int ret, nentries;
2466 	struct fw_eq_ctrl_cmd c;
2467 	struct sge *s = &adap->sge;
2468 	struct port_info *pi = netdev_priv(dev);
2469 
2470 	/* Add status entries */
2471 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2472 
2473 	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2474 				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2475 				 NULL, 0, NUMA_NO_NODE);
2476 	if (!txq->q.desc)
2477 		return -ENOMEM;
2478 
2479 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2480 			    FW_CMD_WRITE | FW_CMD_EXEC |
2481 			    FW_EQ_CTRL_CMD_PFN(adap->fn) |
2482 			    FW_EQ_CTRL_CMD_VFN(0));
2483 	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
2484 				 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
2485 	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
2486 	c.physeqid_pkd = htonl(0);
2487 	c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
2488 				   FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
2489 				   FW_EQ_CTRL_CMD_FETCHRO |
2490 				   FW_EQ_CTRL_CMD_IQID(iqid));
2491 	c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
2492 				  FW_EQ_CTRL_CMD_FBMAX(3) |
2493 				  FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
2494 				  FW_EQ_CTRL_CMD_EQSIZE(nentries));
2495 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2496 
2497 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2498 	if (ret) {
2499 		dma_free_coherent(adap->pdev_dev,
2500 				  nentries * sizeof(struct tx_desc),
2501 				  txq->q.desc, txq->q.phys_addr);
2502 		txq->q.desc = NULL;
2503 		return ret;
2504 	}
2505 
2506 	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
2507 	txq->adap = adap;
2508 	skb_queue_head_init(&txq->sendq);
2509 	tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2510 	txq->full = 0;
2511 	return 0;
2512 }
2513 
2514 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2515 			  struct net_device *dev, unsigned int iqid)
2516 {
2517 	int ret, nentries;
2518 	struct fw_eq_ofld_cmd c;
2519 	struct sge *s = &adap->sge;
2520 	struct port_info *pi = netdev_priv(dev);
2521 
2522 	/* Add status entries */
2523 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2524 
2525 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2526 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2527 			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2528 			NUMA_NO_NODE);
2529 	if (!txq->q.desc)
2530 		return -ENOMEM;
2531 
2532 	memset(&c, 0, sizeof(c));
2533 	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2534 			    FW_CMD_WRITE | FW_CMD_EXEC |
2535 			    FW_EQ_OFLD_CMD_PFN(adap->fn) |
2536 			    FW_EQ_OFLD_CMD_VFN(0));
2537 	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
2538 				 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2539 	c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
2540 				   FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
2541 				   FW_EQ_OFLD_CMD_FETCHRO(1) |
2542 				   FW_EQ_OFLD_CMD_IQID(iqid));
2543 	c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
2544 				  FW_EQ_OFLD_CMD_FBMAX(3) |
2545 				  FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
2546 				  FW_EQ_OFLD_CMD_EQSIZE(nentries));
2547 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2548 
2549 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2550 	if (ret) {
2551 		kfree(txq->q.sdesc);
2552 		txq->q.sdesc = NULL;
2553 		dma_free_coherent(adap->pdev_dev,
2554 				  nentries * sizeof(struct tx_desc),
2555 				  txq->q.desc, txq->q.phys_addr);
2556 		txq->q.desc = NULL;
2557 		return ret;
2558 	}
2559 
2560 	init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2561 	txq->adap = adap;
2562 	skb_queue_head_init(&txq->sendq);
2563 	tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2564 	txq->full = 0;
2565 	txq->mapping_err = 0;
2566 	return 0;
2567 }
2568 
2569 static void free_txq(struct adapter *adap, struct sge_txq *q)
2570 {
2571 	struct sge *s = &adap->sge;
2572 
2573 	dma_free_coherent(adap->pdev_dev,
2574 			  q->size * sizeof(struct tx_desc) + s->stat_len,
2575 			  q->desc, q->phys_addr);
2576 	q->cntxt_id = 0;
2577 	q->sdesc = NULL;
2578 	q->desc = NULL;
2579 }
2580 
2581 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2582 			 struct sge_fl *fl)
2583 {
2584 	struct sge *s = &adap->sge;
2585 	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2586 
2587 	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2588 	t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2589 		   rq->cntxt_id, fl_id, 0xffff);
2590 	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2591 			  rq->desc, rq->phys_addr);
2592 	netif_napi_del(&rq->napi);
2593 	rq->netdev = NULL;
2594 	rq->cntxt_id = rq->abs_id = 0;
2595 	rq->desc = NULL;
2596 
2597 	if (fl) {
2598 		free_rx_bufs(adap, fl, fl->avail);
2599 		dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
2600 				  fl->desc, fl->addr);
2601 		kfree(fl->sdesc);
2602 		fl->sdesc = NULL;
2603 		fl->cntxt_id = 0;
2604 		fl->desc = NULL;
2605 	}
2606 }
2607 
2608 /**
2609  *      t4_free_ofld_rxqs - free a block of consecutive Rx queues
2610  *      @adap: the adapter
2611  *      @n: number of queues
2612  *      @q: pointer to first queue
2613  *
2614  *      Release the resources of a consecutive block of offload Rx queues.
2615  */
2616 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
2617 {
2618 	for ( ; n; n--, q++)
2619 		if (q->rspq.desc)
2620 			free_rspq_fl(adap, &q->rspq,
2621 				     q->fl.size ? &q->fl : NULL);
2622 }
2623 
2624 /**
2625  *	t4_free_sge_resources - free SGE resources
2626  *	@adap: the adapter
2627  *
2628  *	Frees resources used by the SGE queue sets.
2629  */
2630 void t4_free_sge_resources(struct adapter *adap)
2631 {
2632 	int i;
2633 	struct sge_eth_rxq *eq = adap->sge.ethrxq;
2634 	struct sge_eth_txq *etq = adap->sge.ethtxq;
2635 
2636 	/* clean up Ethernet Tx/Rx queues */
2637 	for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2638 		if (eq->rspq.desc)
2639 			free_rspq_fl(adap, &eq->rspq,
2640 				     eq->fl.size ? &eq->fl : NULL);
2641 		if (etq->q.desc) {
2642 			t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
2643 				       etq->q.cntxt_id);
2644 			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2645 			kfree(etq->q.sdesc);
2646 			free_txq(adap, &etq->q);
2647 		}
2648 	}
2649 
2650 	/* clean up RDMA and iSCSI Rx queues */
2651 	t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq);
2652 	t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
2653 	t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
2654 
2655 	/* clean up offload Tx queues */
2656 	for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2657 		struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2658 
2659 		if (q->q.desc) {
2660 			tasklet_kill(&q->qresume_tsk);
2661 			t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
2662 					q->q.cntxt_id);
2663 			free_tx_desc(adap, &q->q, q->q.in_use, false);
2664 			kfree(q->q.sdesc);
2665 			__skb_queue_purge(&q->sendq);
2666 			free_txq(adap, &q->q);
2667 		}
2668 	}
2669 
2670 	/* clean up control Tx queues */
2671 	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2672 		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2673 
2674 		if (cq->q.desc) {
2675 			tasklet_kill(&cq->qresume_tsk);
2676 			t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
2677 					cq->q.cntxt_id);
2678 			__skb_queue_purge(&cq->sendq);
2679 			free_txq(adap, &cq->q);
2680 		}
2681 	}
2682 
2683 	if (adap->sge.fw_evtq.desc)
2684 		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2685 
2686 	if (adap->sge.intrq.desc)
2687 		free_rspq_fl(adap, &adap->sge.intrq, NULL);
2688 
2689 	/* clear the reverse egress queue map */
2690 	memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
2691 }
2692 
2693 void t4_sge_start(struct adapter *adap)
2694 {
2695 	adap->sge.ethtxq_rover = 0;
2696 	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2697 	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2698 }
2699 
2700 /**
2701  *	t4_sge_stop - disable SGE operation
2702  *	@adap: the adapter
2703  *
2704  *	Stop tasklets and timers associated with the DMA engine.  Note that
2705  *	this is effective only if measures have been taken to disable any HW
2706  *	events that may restart them.
2707  */
2708 void t4_sge_stop(struct adapter *adap)
2709 {
2710 	int i;
2711 	struct sge *s = &adap->sge;
2712 
2713 	if (in_interrupt())  /* actions below require waiting */
2714 		return;
2715 
2716 	if (s->rx_timer.function)
2717 		del_timer_sync(&s->rx_timer);
2718 	if (s->tx_timer.function)
2719 		del_timer_sync(&s->tx_timer);
2720 
2721 	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2722 		struct sge_ofld_txq *q = &s->ofldtxq[i];
2723 
2724 		if (q->q.desc)
2725 			tasklet_kill(&q->qresume_tsk);
2726 	}
2727 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2728 		struct sge_ctrl_txq *cq = &s->ctrlq[i];
2729 
2730 		if (cq->q.desc)
2731 			tasklet_kill(&cq->qresume_tsk);
2732 	}
2733 }
2734 
2735 /**
2736  *	t4_sge_init - initialize SGE
2737  *	@adap: the adapter
2738  *
2739  *	Performs SGE initialization needed every time after a chip reset.
2740  *	We do not initialize any of the queues here, instead the driver
2741  *	top-level must request them individually.
2742  *
2743  *	Called in two different modes:
2744  *
2745  *	 1. Perform actual hardware initialization and record hard-coded
2746  *	    parameters which were used.  This gets used when we're the
2747  *	    Master PF and the Firmware Configuration File support didn't
2748  *	    work for some reason.
2749  *
2750  *	 2. We're not the Master PF or initialization was performed with
2751  *	    a Firmware Configuration File.  In this case we need to grab
2752  *	    any of the SGE operating parameters that we need to have in
2753  *	    order to do our job and make sure we can live with them ...
2754  */
2755 
2756 static int t4_sge_init_soft(struct adapter *adap)
2757 {
2758 	struct sge *s = &adap->sge;
2759 	u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
2760 	u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
2761 	u32 ingress_rx_threshold;
2762 
2763 	/*
2764 	 * Verify that CPL messages are going to the Ingress Queue for
2765 	 * process_responses() and that only packet data is going to the
2766 	 * Free Lists.
2767 	 */
2768 	if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
2769 	    RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
2770 		dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
2771 		return -EINVAL;
2772 	}
2773 
2774 	/*
2775 	 * Validate the Host Buffer Register Array indices that we want to
2776 	 * use ...
2777 	 *
2778 	 * XXX Note that we should really read through the Host Buffer Size
2779 	 * XXX register array and find the indices of the Buffer Sizes which
2780 	 * XXX meet our needs!
2781 	 */
2782 	#define READ_FL_BUF(x) \
2783 		t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
2784 
2785 	fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2786 	fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
2787 	fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2788 	fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2789 
2790 	/* We only bother using the Large Page logic if the Large Page Buffer
2791 	 * is larger than our Page Size Buffer.
2792 	 */
2793 	if (fl_large_pg <= fl_small_pg)
2794 		fl_large_pg = 0;
2795 
2796 	#undef READ_FL_BUF
2797 
2798 	/* The Page Size Buffer must be exactly equal to our Page Size and the
2799 	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2800 	 */
2801 	if (fl_small_pg != PAGE_SIZE ||
2802 	    (fl_large_pg & (fl_large_pg-1)) != 0) {
2803 		dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2804 			fl_small_pg, fl_large_pg);
2805 		return -EINVAL;
2806 	}
2807 	if (fl_large_pg)
2808 		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2809 
2810 	if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
2811 	    fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
2812 		dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
2813 			fl_small_mtu, fl_large_mtu);
2814 		return -EINVAL;
2815 	}
2816 
2817 	/*
2818 	 * Retrieve our RX interrupt holdoff timer values and counter
2819 	 * threshold values from the SGE parameters.
2820 	 */
2821 	timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
2822 	timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
2823 	timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
2824 	s->timer_val[0] = core_ticks_to_us(adap,
2825 		TIMERVALUE0_GET(timer_value_0_and_1));
2826 	s->timer_val[1] = core_ticks_to_us(adap,
2827 		TIMERVALUE1_GET(timer_value_0_and_1));
2828 	s->timer_val[2] = core_ticks_to_us(adap,
2829 		TIMERVALUE2_GET(timer_value_2_and_3));
2830 	s->timer_val[3] = core_ticks_to_us(adap,
2831 		TIMERVALUE3_GET(timer_value_2_and_3));
2832 	s->timer_val[4] = core_ticks_to_us(adap,
2833 		TIMERVALUE4_GET(timer_value_4_and_5));
2834 	s->timer_val[5] = core_ticks_to_us(adap,
2835 		TIMERVALUE5_GET(timer_value_4_and_5));
2836 
2837 	ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
2838 	s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
2839 	s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
2840 	s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
2841 	s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
2842 
2843 	return 0;
2844 }
2845 
2846 static int t4_sge_init_hard(struct adapter *adap)
2847 {
2848 	struct sge *s = &adap->sge;
2849 
2850 	/*
2851 	 * Set up our basic SGE mode to deliver CPL messages to our Ingress
2852 	 * Queue and Packet Date to the Free List.
2853 	 */
2854 	t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
2855 			 RXPKTCPLMODE_MASK);
2856 
2857 	/*
2858 	 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
2859 	 * and generate an interrupt when this occurs so we can recover.
2860 	 */
2861 	if (is_t4(adap->params.chip)) {
2862 		t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2863 				 V_HP_INT_THRESH(M_HP_INT_THRESH) |
2864 				 V_LP_INT_THRESH(M_LP_INT_THRESH),
2865 				 V_HP_INT_THRESH(dbfifo_int_thresh) |
2866 				 V_LP_INT_THRESH(dbfifo_int_thresh));
2867 	} else {
2868 		t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2869 				 V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
2870 				 V_LP_INT_THRESH_T5(dbfifo_int_thresh));
2871 		t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
2872 				 V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
2873 				 V_HP_INT_THRESH_T5(dbfifo_int_thresh));
2874 	}
2875 	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
2876 			F_ENABLE_DROP);
2877 
2878 	/*
2879 	 * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
2880 	 * t4_fixup_host_params().
2881 	 */
2882 	s->fl_pg_order = FL_PG_ORDER;
2883 	if (s->fl_pg_order)
2884 		t4_write_reg(adap,
2885 			     SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
2886 			     PAGE_SIZE << FL_PG_ORDER);
2887 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
2888 		     FL_MTU_SMALL_BUFSIZE(adap));
2889 	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
2890 		     FL_MTU_LARGE_BUFSIZE(adap));
2891 
2892 	/*
2893 	 * Note that the SGE Ingress Packet Count Interrupt Threshold and
2894 	 * Timer Holdoff values must be supplied by our caller.
2895 	 */
2896 	t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2897 		     THRESHOLD_0(s->counter_val[0]) |
2898 		     THRESHOLD_1(s->counter_val[1]) |
2899 		     THRESHOLD_2(s->counter_val[2]) |
2900 		     THRESHOLD_3(s->counter_val[3]));
2901 	t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
2902 		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2903 		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2904 	t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2905 		     TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
2906 		     TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
2907 	t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2908 		     TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
2909 		     TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
2910 
2911 	return 0;
2912 }
2913 
2914 int t4_sge_init(struct adapter *adap)
2915 {
2916 	struct sge *s = &adap->sge;
2917 	u32 sge_control, sge_conm_ctrl;
2918 	int ret, egress_threshold;
2919 
2920 	/*
2921 	 * Ingress Padding Boundary and Egress Status Page Size are set up by
2922 	 * t4_fixup_host_params().
2923 	 */
2924 	sge_control = t4_read_reg(adap, SGE_CONTROL);
2925 	s->pktshift = PKTSHIFT_GET(sge_control);
2926 	s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
2927 	s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
2928 			    X_INGPADBOUNDARY_SHIFT);
2929 
2930 	if (adap->flags & USING_SOFT_PARAMS)
2931 		ret = t4_sge_init_soft(adap);
2932 	else
2933 		ret = t4_sge_init_hard(adap);
2934 	if (ret < 0)
2935 		return ret;
2936 
2937 	/*
2938 	 * A FL with <= fl_starve_thres buffers is starving and a periodic
2939 	 * timer will attempt to refill it.  This needs to be larger than the
2940 	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
2941 	 * stuck waiting for new packets while the SGE is waiting for us to
2942 	 * give it more Free List entries.  (Note that the SGE's Egress
2943 	 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
2944 	 * there was only a single field to control this.  For T5 there's the
2945 	 * original field which now only applies to Unpacked Mode Free List
2946 	 * buffers and a new field which only applies to Packed Mode Free List
2947 	 * buffers.
2948 	 */
2949 	sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL);
2950 	if (is_t4(adap->params.chip))
2951 		egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl);
2952 	else
2953 		egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl);
2954 	s->fl_starve_thres = 2*egress_threshold + 1;
2955 
2956 	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2957 	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2958 	s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
2959 	s->idma_stalled[0] = 0;
2960 	s->idma_stalled[1] = 0;
2961 	spin_lock_init(&s->intrq_lock);
2962 
2963 	return 0;
2964 }
2965