1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
39 #include <linux/ip.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
44 #include <net/ipv6.h>
45 #include <net/tcp.h>
46 #ifdef CONFIG_NET_RX_BUSY_POLL
47 #include <net/busy_poll.h>
48 #endif /* CONFIG_NET_RX_BUSY_POLL */
49 #include "cxgb4.h"
50 #include "t4_regs.h"
51 #include "t4_values.h"
52 #include "t4_msg.h"
53 #include "t4fw_api.h"
54 
55 /*
56  * Rx buffer size.  We use largish buffers if possible but settle for single
57  * pages under memory shortage.
58  */
59 #if PAGE_SHIFT >= 16
60 # define FL_PG_ORDER 0
61 #else
62 # define FL_PG_ORDER (16 - PAGE_SHIFT)
63 #endif
64 
65 /* RX_PULL_LEN should be <= RX_COPY_THRES */
66 #define RX_COPY_THRES    256
67 #define RX_PULL_LEN      128
68 
69 /*
70  * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
71  * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
72  */
73 #define RX_PKT_SKB_LEN   512
74 
75 /*
76  * Max number of Tx descriptors we clean up at a time.  Should be modest as
77  * freeing skbs isn't cheap and it happens while holding locks.  We just need
78  * to free packets faster than they arrive, we eventually catch up and keep
79  * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.
80  */
81 #define MAX_TX_RECLAIM 16
82 
83 /*
84  * Max number of Rx buffers we replenish at a time.  Again keep this modest,
85  * allocating buffers isn't cheap either.
86  */
87 #define MAX_RX_REFILL 16U
88 
89 /*
90  * Period of the Rx queue check timer.  This timer is infrequent as it has
91  * something to do only when the system experiences severe memory shortage.
92  */
93 #define RX_QCHECK_PERIOD (HZ / 2)
94 
95 /*
96  * Period of the Tx queue check timer.
97  */
98 #define TX_QCHECK_PERIOD (HZ / 2)
99 
100 /* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
101  * (in RX_QCHECK_PERIOD multiples).  If we find one of the SGE Ingress DMA
102  * State Machines in the same state for this amount of time (in HZ) then we'll
103  * issue a warning about a potential hang.  We'll repeat the warning as the
104  * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
105  * the situation clears.  If the situation clears, we'll note that as well.
106  */
107 #define SGE_IDMA_WARN_THRESH (1 * HZ)
108 #define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
109 
110 /*
111  * Max number of Tx descriptors to be reclaimed by the Tx timer.
112  */
113 #define MAX_TIMER_TX_RECLAIM 100
114 
115 /*
116  * Timer index used when backing off due to memory shortage.
117  */
118 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
119 
120 /*
121  * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
122  * attempt to refill it.
123  */
124 #define FL_STARVE_THRES 4
125 
126 /*
127  * Suspend an Ethernet Tx queue with fewer available descriptors than this.
128  * This is the same as calc_tx_descs() for a TSO packet with
129  * nr_frags == MAX_SKB_FRAGS.
130  */
131 #define ETHTXQ_STOP_THRES \
132 	(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
133 
134 /*
135  * Suspension threshold for non-Ethernet Tx queues.  We require enough room
136  * for a full sized WR.
137  */
138 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
139 
140 /*
141  * Max Tx descriptor space we allow for an Ethernet packet to be inlined
142  * into a WR.
143  */
144 #define MAX_IMM_TX_PKT_LEN 128
145 
146 /*
147  * Max size of a WR sent through a control Tx queue.
148  */
149 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
150 
151 struct tx_sw_desc {                /* SW state per Tx descriptor */
152 	struct sk_buff *skb;
153 	struct ulptx_sgl *sgl;
154 };
155 
156 struct rx_sw_desc {                /* SW state per Rx descriptor */
157 	struct page *page;
158 	dma_addr_t dma_addr;
159 };
160 
161 /*
162  * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
163  * buffer).  We currently only support two sizes for 1500- and 9000-byte MTUs.
164  * We could easily support more but there doesn't seem to be much need for
165  * that ...
166  */
167 #define FL_MTU_SMALL 1500
168 #define FL_MTU_LARGE 9000
169 
170 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
171 					  unsigned int mtu)
172 {
173 	struct sge *s = &adapter->sge;
174 
175 	return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
176 }
177 
178 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
179 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
180 
181 /*
182  * Bits 0..3 of rx_sw_desc.dma_addr have special meaning.  The hardware uses
183  * these to specify the buffer size as an index into the SGE Free List Buffer
184  * Size register array.  We also use bit 4, when the buffer has been unmapped
185  * for DMA, but this is of course never sent to the hardware and is only used
186  * to prevent double unmappings.  All of the above requires that the Free List
187  * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
188  * 32-byte or or a power of 2 greater in alignment.  Since the SGE's minimal
189  * Free List Buffer alignment is 32 bytes, this works out for us ...
190  */
191 enum {
192 	RX_BUF_FLAGS     = 0x1f,   /* bottom five bits are special */
193 	RX_BUF_SIZE      = 0x0f,   /* bottom three bits are for buf sizes */
194 	RX_UNMAPPED_BUF  = 0x10,   /* buffer is not mapped */
195 
196 	/*
197 	 * XXX We shouldn't depend on being able to use these indices.
198 	 * XXX Especially when some other Master PF has initialized the
199 	 * XXX adapter or we use the Firmware Configuration File.  We
200 	 * XXX should really search through the Host Buffer Size register
201 	 * XXX array for the appropriately sized buffer indices.
202 	 */
203 	RX_SMALL_PG_BUF  = 0x0,   /* small (PAGE_SIZE) page buffer */
204 	RX_LARGE_PG_BUF  = 0x1,   /* buffer large (FL_PG_ORDER) page buffer */
205 
206 	RX_SMALL_MTU_BUF = 0x2,   /* small MTU buffer */
207 	RX_LARGE_MTU_BUF = 0x3,   /* large MTU buffer */
208 };
209 
210 static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
211 #define MIN_NAPI_WORK  1
212 
213 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
214 {
215 	return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
216 }
217 
218 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
219 {
220 	return !(d->dma_addr & RX_UNMAPPED_BUF);
221 }
222 
223 /**
224  *	txq_avail - return the number of available slots in a Tx queue
225  *	@q: the Tx queue
226  *
227  *	Returns the number of descriptors in a Tx queue available to write new
228  *	packets.
229  */
230 static inline unsigned int txq_avail(const struct sge_txq *q)
231 {
232 	return q->size - 1 - q->in_use;
233 }
234 
235 /**
236  *	fl_cap - return the capacity of a free-buffer list
237  *	@fl: the FL
238  *
239  *	Returns the capacity of a free-buffer list.  The capacity is less than
240  *	the size because one descriptor needs to be left unpopulated, otherwise
241  *	HW will think the FL is empty.
242  */
243 static inline unsigned int fl_cap(const struct sge_fl *fl)
244 {
245 	return fl->size - 8;   /* 1 descriptor = 8 buffers */
246 }
247 
248 static inline bool fl_starving(const struct sge_fl *fl)
249 {
250 	return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
251 }
252 
253 static int map_skb(struct device *dev, const struct sk_buff *skb,
254 		   dma_addr_t *addr)
255 {
256 	const skb_frag_t *fp, *end;
257 	const struct skb_shared_info *si;
258 
259 	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
260 	if (dma_mapping_error(dev, *addr))
261 		goto out_err;
262 
263 	si = skb_shinfo(skb);
264 	end = &si->frags[si->nr_frags];
265 
266 	for (fp = si->frags; fp < end; fp++) {
267 		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
268 					   DMA_TO_DEVICE);
269 		if (dma_mapping_error(dev, *addr))
270 			goto unwind;
271 	}
272 	return 0;
273 
274 unwind:
275 	while (fp-- > si->frags)
276 		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
277 
278 	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
279 out_err:
280 	return -ENOMEM;
281 }
282 
283 #ifdef CONFIG_NEED_DMA_MAP_STATE
284 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
285 		      const dma_addr_t *addr)
286 {
287 	const skb_frag_t *fp, *end;
288 	const struct skb_shared_info *si;
289 
290 	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
291 
292 	si = skb_shinfo(skb);
293 	end = &si->frags[si->nr_frags];
294 	for (fp = si->frags; fp < end; fp++)
295 		dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
296 }
297 
298 /**
299  *	deferred_unmap_destructor - unmap a packet when it is freed
300  *	@skb: the packet
301  *
302  *	This is the packet destructor used for Tx packets that need to remain
303  *	mapped until they are freed rather than until their Tx descriptors are
304  *	freed.
305  */
306 static void deferred_unmap_destructor(struct sk_buff *skb)
307 {
308 	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
309 }
310 #endif
311 
312 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
313 		      const struct ulptx_sgl *sgl, const struct sge_txq *q)
314 {
315 	const struct ulptx_sge_pair *p;
316 	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
317 
318 	if (likely(skb_headlen(skb)))
319 		dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
320 				 DMA_TO_DEVICE);
321 	else {
322 		dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
323 			       DMA_TO_DEVICE);
324 		nfrags--;
325 	}
326 
327 	/*
328 	 * the complexity below is because of the possibility of a wrap-around
329 	 * in the middle of an SGL
330 	 */
331 	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
332 		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
333 unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
334 				       ntohl(p->len[0]), DMA_TO_DEVICE);
335 			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
336 				       ntohl(p->len[1]), DMA_TO_DEVICE);
337 			p++;
338 		} else if ((u8 *)p == (u8 *)q->stat) {
339 			p = (const struct ulptx_sge_pair *)q->desc;
340 			goto unmap;
341 		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
342 			const __be64 *addr = (const __be64 *)q->desc;
343 
344 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
345 				       ntohl(p->len[0]), DMA_TO_DEVICE);
346 			dma_unmap_page(dev, be64_to_cpu(addr[1]),
347 				       ntohl(p->len[1]), DMA_TO_DEVICE);
348 			p = (const struct ulptx_sge_pair *)&addr[2];
349 		} else {
350 			const __be64 *addr = (const __be64 *)q->desc;
351 
352 			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
353 				       ntohl(p->len[0]), DMA_TO_DEVICE);
354 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
355 				       ntohl(p->len[1]), DMA_TO_DEVICE);
356 			p = (const struct ulptx_sge_pair *)&addr[1];
357 		}
358 	}
359 	if (nfrags) {
360 		__be64 addr;
361 
362 		if ((u8 *)p == (u8 *)q->stat)
363 			p = (const struct ulptx_sge_pair *)q->desc;
364 		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
365 						       *(const __be64 *)q->desc;
366 		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
367 			       DMA_TO_DEVICE);
368 	}
369 }
370 
371 /**
372  *	free_tx_desc - reclaims Tx descriptors and their buffers
373  *	@adapter: the adapter
374  *	@q: the Tx queue to reclaim descriptors from
375  *	@n: the number of descriptors to reclaim
376  *	@unmap: whether the buffers should be unmapped for DMA
377  *
378  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
379  *	Tx buffers.  Called with the Tx queue lock held.
380  */
381 static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
382 			 unsigned int n, bool unmap)
383 {
384 	struct tx_sw_desc *d;
385 	unsigned int cidx = q->cidx;
386 	struct device *dev = adap->pdev_dev;
387 
388 	d = &q->sdesc[cidx];
389 	while (n--) {
390 		if (d->skb) {                       /* an SGL is present */
391 			if (unmap)
392 				unmap_sgl(dev, d->skb, d->sgl, q);
393 			dev_consume_skb_any(d->skb);
394 			d->skb = NULL;
395 		}
396 		++d;
397 		if (++cidx == q->size) {
398 			cidx = 0;
399 			d = q->sdesc;
400 		}
401 	}
402 	q->cidx = cidx;
403 }
404 
405 /*
406  * Return the number of reclaimable descriptors in a Tx queue.
407  */
408 static inline int reclaimable(const struct sge_txq *q)
409 {
410 	int hw_cidx = ntohs(q->stat->cidx);
411 	hw_cidx -= q->cidx;
412 	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
413 }
414 
415 /**
416  *	reclaim_completed_tx - reclaims completed Tx descriptors
417  *	@adap: the adapter
418  *	@q: the Tx queue to reclaim completed descriptors from
419  *	@unmap: whether the buffers should be unmapped for DMA
420  *
421  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
422  *	and frees the associated buffers if possible.  Called with the Tx
423  *	queue locked.
424  */
425 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
426 					bool unmap)
427 {
428 	int avail = reclaimable(q);
429 
430 	if (avail) {
431 		/*
432 		 * Limit the amount of clean up work we do at a time to keep
433 		 * the Tx lock hold time O(1).
434 		 */
435 		if (avail > MAX_TX_RECLAIM)
436 			avail = MAX_TX_RECLAIM;
437 
438 		free_tx_desc(adap, q, avail, unmap);
439 		q->in_use -= avail;
440 	}
441 }
442 
443 static inline int get_buf_size(struct adapter *adapter,
444 			       const struct rx_sw_desc *d)
445 {
446 	struct sge *s = &adapter->sge;
447 	unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
448 	int buf_size;
449 
450 	switch (rx_buf_size_idx) {
451 	case RX_SMALL_PG_BUF:
452 		buf_size = PAGE_SIZE;
453 		break;
454 
455 	case RX_LARGE_PG_BUF:
456 		buf_size = PAGE_SIZE << s->fl_pg_order;
457 		break;
458 
459 	case RX_SMALL_MTU_BUF:
460 		buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
461 		break;
462 
463 	case RX_LARGE_MTU_BUF:
464 		buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
465 		break;
466 
467 	default:
468 		BUG_ON(1);
469 	}
470 
471 	return buf_size;
472 }
473 
474 /**
475  *	free_rx_bufs - free the Rx buffers on an SGE free list
476  *	@adap: the adapter
477  *	@q: the SGE free list to free buffers from
478  *	@n: how many buffers to free
479  *
480  *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
481  *	buffers must be made inaccessible to HW before calling this function.
482  */
483 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
484 {
485 	while (n--) {
486 		struct rx_sw_desc *d = &q->sdesc[q->cidx];
487 
488 		if (is_buf_mapped(d))
489 			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
490 				       get_buf_size(adap, d),
491 				       PCI_DMA_FROMDEVICE);
492 		put_page(d->page);
493 		d->page = NULL;
494 		if (++q->cidx == q->size)
495 			q->cidx = 0;
496 		q->avail--;
497 	}
498 }
499 
500 /**
501  *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
502  *	@adap: the adapter
503  *	@q: the SGE free list
504  *
505  *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
506  *	buffer must be made inaccessible to HW before calling this function.
507  *
508  *	This is similar to @free_rx_bufs above but does not free the buffer.
509  *	Do note that the FL still loses any further access to the buffer.
510  */
511 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
512 {
513 	struct rx_sw_desc *d = &q->sdesc[q->cidx];
514 
515 	if (is_buf_mapped(d))
516 		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
517 			       get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
518 	d->page = NULL;
519 	if (++q->cidx == q->size)
520 		q->cidx = 0;
521 	q->avail--;
522 }
523 
524 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
525 {
526 	u32 val;
527 	if (q->pend_cred >= 8) {
528 		if (is_t4(adap->params.chip))
529 			val = PIDX_V(q->pend_cred / 8);
530 		else
531 			val = PIDX_T5_V(q->pend_cred / 8) |
532 				DBTYPE_F;
533 		val |= DBPRIO_F;
534 		wmb();
535 
536 		/* If we don't have access to the new User Doorbell (T5+), use
537 		 * the old doorbell mechanism; otherwise use the new BAR2
538 		 * mechanism.
539 		 */
540 		if (unlikely(q->bar2_addr == NULL)) {
541 			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
542 				     val | QID_V(q->cntxt_id));
543 		} else {
544 			writel(val | QID_V(q->bar2_qid),
545 			       q->bar2_addr + SGE_UDB_KDOORBELL);
546 
547 			/* This Write memory Barrier will force the write to
548 			 * the User Doorbell area to be flushed.
549 			 */
550 			wmb();
551 		}
552 		q->pend_cred &= 7;
553 	}
554 }
555 
556 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
557 				  dma_addr_t mapping)
558 {
559 	sd->page = pg;
560 	sd->dma_addr = mapping;      /* includes size low bits */
561 }
562 
563 /**
564  *	refill_fl - refill an SGE Rx buffer ring
565  *	@adap: the adapter
566  *	@q: the ring to refill
567  *	@n: the number of new buffers to allocate
568  *	@gfp: the gfp flags for the allocations
569  *
570  *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
571  *	allocated with the supplied gfp flags.  The caller must assure that
572  *	@n does not exceed the queue's capacity.  If afterwards the queue is
573  *	found critically low mark it as starving in the bitmap of starving FLs.
574  *
575  *	Returns the number of buffers allocated.
576  */
577 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
578 			      gfp_t gfp)
579 {
580 	struct sge *s = &adap->sge;
581 	struct page *pg;
582 	dma_addr_t mapping;
583 	unsigned int cred = q->avail;
584 	__be64 *d = &q->desc[q->pidx];
585 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
586 
587 	gfp |= __GFP_NOWARN;
588 
589 	if (s->fl_pg_order == 0)
590 		goto alloc_small_pages;
591 
592 	/*
593 	 * Prefer large buffers
594 	 */
595 	while (n) {
596 		pg = __dev_alloc_pages(gfp, s->fl_pg_order);
597 		if (unlikely(!pg)) {
598 			q->large_alloc_failed++;
599 			break;       /* fall back to single pages */
600 		}
601 
602 		mapping = dma_map_page(adap->pdev_dev, pg, 0,
603 				       PAGE_SIZE << s->fl_pg_order,
604 				       PCI_DMA_FROMDEVICE);
605 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
606 			__free_pages(pg, s->fl_pg_order);
607 			goto out;   /* do not try small pages for this error */
608 		}
609 		mapping |= RX_LARGE_PG_BUF;
610 		*d++ = cpu_to_be64(mapping);
611 
612 		set_rx_sw_desc(sd, pg, mapping);
613 		sd++;
614 
615 		q->avail++;
616 		if (++q->pidx == q->size) {
617 			q->pidx = 0;
618 			sd = q->sdesc;
619 			d = q->desc;
620 		}
621 		n--;
622 	}
623 
624 alloc_small_pages:
625 	while (n--) {
626 		pg = __dev_alloc_page(gfp);
627 		if (unlikely(!pg)) {
628 			q->alloc_failed++;
629 			break;
630 		}
631 
632 		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
633 				       PCI_DMA_FROMDEVICE);
634 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
635 			put_page(pg);
636 			goto out;
637 		}
638 		*d++ = cpu_to_be64(mapping);
639 
640 		set_rx_sw_desc(sd, pg, mapping);
641 		sd++;
642 
643 		q->avail++;
644 		if (++q->pidx == q->size) {
645 			q->pidx = 0;
646 			sd = q->sdesc;
647 			d = q->desc;
648 		}
649 	}
650 
651 out:	cred = q->avail - cred;
652 	q->pend_cred += cred;
653 	ring_fl_db(adap, q);
654 
655 	if (unlikely(fl_starving(q))) {
656 		smp_wmb();
657 		set_bit(q->cntxt_id - adap->sge.egr_start,
658 			adap->sge.starving_fl);
659 	}
660 
661 	return cred;
662 }
663 
664 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
665 {
666 	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
667 		  GFP_ATOMIC);
668 }
669 
670 /**
671  *	alloc_ring - allocate resources for an SGE descriptor ring
672  *	@dev: the PCI device's core device
673  *	@nelem: the number of descriptors
674  *	@elem_size: the size of each descriptor
675  *	@sw_size: the size of the SW state associated with each ring element
676  *	@phys: the physical address of the allocated ring
677  *	@metadata: address of the array holding the SW state for the ring
678  *	@stat_size: extra space in HW ring for status information
679  *	@node: preferred node for memory allocations
680  *
681  *	Allocates resources for an SGE descriptor ring, such as Tx queues,
682  *	free buffer lists, or response queues.  Each SGE ring requires
683  *	space for its HW descriptors plus, optionally, space for the SW state
684  *	associated with each HW entry (the metadata).  The function returns
685  *	three values: the virtual address for the HW ring (the return value
686  *	of the function), the bus address of the HW ring, and the address
687  *	of the SW ring.
688  */
689 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
690 			size_t sw_size, dma_addr_t *phys, void *metadata,
691 			size_t stat_size, int node)
692 {
693 	size_t len = nelem * elem_size + stat_size;
694 	void *s = NULL;
695 	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
696 
697 	if (!p)
698 		return NULL;
699 	if (sw_size) {
700 		s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
701 
702 		if (!s) {
703 			dma_free_coherent(dev, len, p, *phys);
704 			return NULL;
705 		}
706 	}
707 	if (metadata)
708 		*(void **)metadata = s;
709 	memset(p, 0, len);
710 	return p;
711 }
712 
713 /**
714  *	sgl_len - calculates the size of an SGL of the given capacity
715  *	@n: the number of SGL entries
716  *
717  *	Calculates the number of flits needed for a scatter/gather list that
718  *	can hold the given number of entries.
719  */
720 static inline unsigned int sgl_len(unsigned int n)
721 {
722 	n--;
723 	return (3 * n) / 2 + (n & 1) + 2;
724 }
725 
726 /**
727  *	flits_to_desc - returns the num of Tx descriptors for the given flits
728  *	@n: the number of flits
729  *
730  *	Returns the number of Tx descriptors needed for the supplied number
731  *	of flits.
732  */
733 static inline unsigned int flits_to_desc(unsigned int n)
734 {
735 	BUG_ON(n > SGE_MAX_WR_LEN / 8);
736 	return DIV_ROUND_UP(n, 8);
737 }
738 
739 /**
740  *	is_eth_imm - can an Ethernet packet be sent as immediate data?
741  *	@skb: the packet
742  *
743  *	Returns whether an Ethernet packet is small enough to fit as
744  *	immediate data. Return value corresponds to headroom required.
745  */
746 static inline int is_eth_imm(const struct sk_buff *skb)
747 {
748 	int hdrlen = skb_shinfo(skb)->gso_size ?
749 			sizeof(struct cpl_tx_pkt_lso_core) : 0;
750 
751 	hdrlen += sizeof(struct cpl_tx_pkt);
752 	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
753 		return hdrlen;
754 	return 0;
755 }
756 
757 /**
758  *	calc_tx_flits - calculate the number of flits for a packet Tx WR
759  *	@skb: the packet
760  *
761  *	Returns the number of flits needed for a Tx WR for the given Ethernet
762  *	packet, including the needed WR and CPL headers.
763  */
764 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
765 {
766 	unsigned int flits;
767 	int hdrlen = is_eth_imm(skb);
768 
769 	if (hdrlen)
770 		return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
771 
772 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
773 	if (skb_shinfo(skb)->gso_size)
774 		flits += 2;
775 	return flits;
776 }
777 
778 /**
779  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
780  *	@skb: the packet
781  *
782  *	Returns the number of Tx descriptors needed for the given Ethernet
783  *	packet, including the needed WR and CPL headers.
784  */
785 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
786 {
787 	return flits_to_desc(calc_tx_flits(skb));
788 }
789 
790 /**
791  *	write_sgl - populate a scatter/gather list for a packet
792  *	@skb: the packet
793  *	@q: the Tx queue we are writing into
794  *	@sgl: starting location for writing the SGL
795  *	@end: points right after the end of the SGL
796  *	@start: start offset into skb main-body data to include in the SGL
797  *	@addr: the list of bus addresses for the SGL elements
798  *
799  *	Generates a gather list for the buffers that make up a packet.
800  *	The caller must provide adequate space for the SGL that will be written.
801  *	The SGL includes all of the packet's page fragments and the data in its
802  *	main body except for the first @start bytes.  @sgl must be 16-byte
803  *	aligned and within a Tx descriptor with available space.  @end points
804  *	right after the end of the SGL but does not account for any potential
805  *	wrap around, i.e., @end > @sgl.
806  */
807 static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
808 		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
809 		      const dma_addr_t *addr)
810 {
811 	unsigned int i, len;
812 	struct ulptx_sge_pair *to;
813 	const struct skb_shared_info *si = skb_shinfo(skb);
814 	unsigned int nfrags = si->nr_frags;
815 	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
816 
817 	len = skb_headlen(skb) - start;
818 	if (likely(len)) {
819 		sgl->len0 = htonl(len);
820 		sgl->addr0 = cpu_to_be64(addr[0] + start);
821 		nfrags++;
822 	} else {
823 		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
824 		sgl->addr0 = cpu_to_be64(addr[1]);
825 	}
826 
827 	sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
828 			      ULPTX_NSGE_V(nfrags));
829 	if (likely(--nfrags == 0))
830 		return;
831 	/*
832 	 * Most of the complexity below deals with the possibility we hit the
833 	 * end of the queue in the middle of writing the SGL.  For this case
834 	 * only we create the SGL in a temporary buffer and then copy it.
835 	 */
836 	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
837 
838 	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
839 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
840 		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
841 		to->addr[0] = cpu_to_be64(addr[i]);
842 		to->addr[1] = cpu_to_be64(addr[++i]);
843 	}
844 	if (nfrags) {
845 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
846 		to->len[1] = cpu_to_be32(0);
847 		to->addr[0] = cpu_to_be64(addr[i + 1]);
848 	}
849 	if (unlikely((u8 *)end > (u8 *)q->stat)) {
850 		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
851 
852 		if (likely(part0))
853 			memcpy(sgl->sge, buf, part0);
854 		part1 = (u8 *)end - (u8 *)q->stat;
855 		memcpy(q->desc, (u8 *)buf + part0, part1);
856 		end = (void *)q->desc + part1;
857 	}
858 	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
859 		*end = 0;
860 }
861 
862 /* This function copies 64 byte coalesced work request to
863  * memory mapped BAR2 space. For coalesced WR SGE fetches
864  * data from the FIFO instead of from Host.
865  */
866 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
867 {
868 	int count = 8;
869 
870 	while (count) {
871 		writeq(*src, dst);
872 		src++;
873 		dst++;
874 		count--;
875 	}
876 }
877 
878 /**
879  *	ring_tx_db - check and potentially ring a Tx queue's doorbell
880  *	@adap: the adapter
881  *	@q: the Tx queue
882  *	@n: number of new descriptors to give to HW
883  *
884  *	Ring the doorbel for a Tx queue.
885  */
886 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
887 {
888 	wmb();            /* write descriptors before telling HW */
889 
890 	/* If we don't have access to the new User Doorbell (T5+), use the old
891 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
892 	 */
893 	if (unlikely(q->bar2_addr == NULL)) {
894 		u32 val = PIDX_V(n);
895 		unsigned long flags;
896 
897 		/* For T4 we need to participate in the Doorbell Recovery
898 		 * mechanism.
899 		 */
900 		spin_lock_irqsave(&q->db_lock, flags);
901 		if (!q->db_disabled)
902 			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
903 				     QID_V(q->cntxt_id) | val);
904 		else
905 			q->db_pidx_inc += n;
906 		q->db_pidx = q->pidx;
907 		spin_unlock_irqrestore(&q->db_lock, flags);
908 	} else {
909 		u32 val = PIDX_T5_V(n);
910 
911 		/* T4 and later chips share the same PIDX field offset within
912 		 * the doorbell, but T5 and later shrank the field in order to
913 		 * gain a bit for Doorbell Priority.  The field was absurdly
914 		 * large in the first place (14 bits) so we just use the T5
915 		 * and later limits and warn if a Queue ID is too large.
916 		 */
917 		WARN_ON(val & DBPRIO_F);
918 
919 		/* If we're only writing a single TX Descriptor and we can use
920 		 * Inferred QID registers, we can use the Write Combining
921 		 * Gather Buffer; otherwise we use the simple doorbell.
922 		 */
923 		if (n == 1 && q->bar2_qid == 0) {
924 			int index = (q->pidx
925 				     ? (q->pidx - 1)
926 				     : (q->size - 1));
927 			u64 *wr = (u64 *)&q->desc[index];
928 
929 			cxgb_pio_copy((u64 __iomem *)
930 				      (q->bar2_addr + SGE_UDB_WCDOORBELL),
931 				      wr);
932 		} else {
933 			writel(val | QID_V(q->bar2_qid),
934 			       q->bar2_addr + SGE_UDB_KDOORBELL);
935 		}
936 
937 		/* This Write Memory Barrier will force the write to the User
938 		 * Doorbell area to be flushed.  This is needed to prevent
939 		 * writes on different CPUs for the same queue from hitting
940 		 * the adapter out of order.  This is required when some Work
941 		 * Requests take the Write Combine Gather Buffer path (user
942 		 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
943 		 * take the traditional path where we simply increment the
944 		 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
945 		 * hardware DMA read the actual Work Request.
946 		 */
947 		wmb();
948 	}
949 }
950 
951 /**
952  *	inline_tx_skb - inline a packet's data into Tx descriptors
953  *	@skb: the packet
954  *	@q: the Tx queue where the packet will be inlined
955  *	@pos: starting position in the Tx queue where to inline the packet
956  *
957  *	Inline a packet's contents directly into Tx descriptors, starting at
958  *	the given position within the Tx DMA ring.
959  *	Most of the complexity of this operation is dealing with wrap arounds
960  *	in the middle of the packet we want to inline.
961  */
962 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
963 			  void *pos)
964 {
965 	u64 *p;
966 	int left = (void *)q->stat - pos;
967 
968 	if (likely(skb->len <= left)) {
969 		if (likely(!skb->data_len))
970 			skb_copy_from_linear_data(skb, pos, skb->len);
971 		else
972 			skb_copy_bits(skb, 0, pos, skb->len);
973 		pos += skb->len;
974 	} else {
975 		skb_copy_bits(skb, 0, pos, left);
976 		skb_copy_bits(skb, left, q->desc, skb->len - left);
977 		pos = (void *)q->desc + (skb->len - left);
978 	}
979 
980 	/* 0-pad to multiple of 16 */
981 	p = PTR_ALIGN(pos, 8);
982 	if ((uintptr_t)p & 8)
983 		*p = 0;
984 }
985 
986 /*
987  * Figure out what HW csum a packet wants and return the appropriate control
988  * bits.
989  */
990 static u64 hwcsum(const struct sk_buff *skb)
991 {
992 	int csum_type;
993 	const struct iphdr *iph = ip_hdr(skb);
994 
995 	if (iph->version == 4) {
996 		if (iph->protocol == IPPROTO_TCP)
997 			csum_type = TX_CSUM_TCPIP;
998 		else if (iph->protocol == IPPROTO_UDP)
999 			csum_type = TX_CSUM_UDPIP;
1000 		else {
1001 nocsum:			/*
1002 			 * unknown protocol, disable HW csum
1003 			 * and hope a bad packet is detected
1004 			 */
1005 			return TXPKT_L4CSUM_DIS;
1006 		}
1007 	} else {
1008 		/*
1009 		 * this doesn't work with extension headers
1010 		 */
1011 		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1012 
1013 		if (ip6h->nexthdr == IPPROTO_TCP)
1014 			csum_type = TX_CSUM_TCPIP6;
1015 		else if (ip6h->nexthdr == IPPROTO_UDP)
1016 			csum_type = TX_CSUM_UDPIP6;
1017 		else
1018 			goto nocsum;
1019 	}
1020 
1021 	if (likely(csum_type >= TX_CSUM_TCPIP))
1022 		return TXPKT_CSUM_TYPE(csum_type) |
1023 			TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
1024 			TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
1025 	else {
1026 		int start = skb_transport_offset(skb);
1027 
1028 		return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
1029 			TXPKT_CSUM_LOC(start + skb->csum_offset);
1030 	}
1031 }
1032 
1033 static void eth_txq_stop(struct sge_eth_txq *q)
1034 {
1035 	netif_tx_stop_queue(q->txq);
1036 	q->q.stops++;
1037 }
1038 
1039 static inline void txq_advance(struct sge_txq *q, unsigned int n)
1040 {
1041 	q->in_use += n;
1042 	q->pidx += n;
1043 	if (q->pidx >= q->size)
1044 		q->pidx -= q->size;
1045 }
1046 
1047 /**
1048  *	t4_eth_xmit - add a packet to an Ethernet Tx queue
1049  *	@skb: the packet
1050  *	@dev: the egress net device
1051  *
1052  *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
1053  */
1054 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1055 {
1056 	int len;
1057 	u32 wr_mid;
1058 	u64 cntrl, *end;
1059 	int qidx, credits;
1060 	unsigned int flits, ndesc;
1061 	struct adapter *adap;
1062 	struct sge_eth_txq *q;
1063 	const struct port_info *pi;
1064 	struct fw_eth_tx_pkt_wr *wr;
1065 	struct cpl_tx_pkt_core *cpl;
1066 	const struct skb_shared_info *ssi;
1067 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1068 	bool immediate = false;
1069 
1070 	/*
1071 	 * The chip min packet length is 10 octets but play safe and reject
1072 	 * anything shorter than an Ethernet header.
1073 	 */
1074 	if (unlikely(skb->len < ETH_HLEN)) {
1075 out_free:	dev_kfree_skb_any(skb);
1076 		return NETDEV_TX_OK;
1077 	}
1078 
1079 	pi = netdev_priv(dev);
1080 	adap = pi->adapter;
1081 	qidx = skb_get_queue_mapping(skb);
1082 	q = &adap->sge.ethtxq[qidx + pi->first_qset];
1083 
1084 	reclaim_completed_tx(adap, &q->q, true);
1085 
1086 	flits = calc_tx_flits(skb);
1087 	ndesc = flits_to_desc(flits);
1088 	credits = txq_avail(&q->q) - ndesc;
1089 
1090 	if (unlikely(credits < 0)) {
1091 		eth_txq_stop(q);
1092 		dev_err(adap->pdev_dev,
1093 			"%s: Tx ring %u full while queue awake!\n",
1094 			dev->name, qidx);
1095 		return NETDEV_TX_BUSY;
1096 	}
1097 
1098 	if (is_eth_imm(skb))
1099 		immediate = true;
1100 
1101 	if (!immediate &&
1102 	    unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
1103 		q->mapping_err++;
1104 		goto out_free;
1105 	}
1106 
1107 	wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1108 	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1109 		eth_txq_stop(q);
1110 		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1111 	}
1112 
1113 	wr = (void *)&q->q.desc[q->q.pidx];
1114 	wr->equiq_to_len16 = htonl(wr_mid);
1115 	wr->r3 = cpu_to_be64(0);
1116 	end = (u64 *)wr + flits;
1117 
1118 	len = immediate ? skb->len : 0;
1119 	ssi = skb_shinfo(skb);
1120 	if (ssi->gso_size) {
1121 		struct cpl_tx_pkt_lso *lso = (void *)wr;
1122 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1123 		int l3hdr_len = skb_network_header_len(skb);
1124 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1125 
1126 		len += sizeof(*lso);
1127 		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1128 				       FW_WR_IMMDLEN_V(len));
1129 		lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
1130 					LSO_FIRST_SLICE | LSO_LAST_SLICE |
1131 					LSO_IPV6(v6) |
1132 					LSO_ETHHDR_LEN(eth_xtra_len / 4) |
1133 					LSO_IPHDR_LEN(l3hdr_len / 4) |
1134 					LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
1135 		lso->c.ipid_ofst = htons(0);
1136 		lso->c.mss = htons(ssi->gso_size);
1137 		lso->c.seqno_offset = htonl(0);
1138 		if (is_t4(adap->params.chip))
1139 			lso->c.len = htonl(skb->len);
1140 		else
1141 			lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len));
1142 		cpl = (void *)(lso + 1);
1143 		cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1144 			TXPKT_IPHDR_LEN(l3hdr_len) |
1145 			TXPKT_ETHHDR_LEN(eth_xtra_len);
1146 		q->tso++;
1147 		q->tx_cso += ssi->gso_segs;
1148 	} else {
1149 		len += sizeof(*cpl);
1150 		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1151 				       FW_WR_IMMDLEN_V(len));
1152 		cpl = (void *)(wr + 1);
1153 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1154 			cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
1155 			q->tx_cso++;
1156 		} else
1157 			cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
1158 	}
1159 
1160 	if (skb_vlan_tag_present(skb)) {
1161 		q->vlan_ins++;
1162 		cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
1163 	}
1164 
1165 	cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
1166 			   TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
1167 	cpl->pack = htons(0);
1168 	cpl->len = htons(skb->len);
1169 	cpl->ctrl1 = cpu_to_be64(cntrl);
1170 
1171 	if (immediate) {
1172 		inline_tx_skb(skb, &q->q, cpl + 1);
1173 		dev_consume_skb_any(skb);
1174 	} else {
1175 		int last_desc;
1176 
1177 		write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1178 			  addr);
1179 		skb_orphan(skb);
1180 
1181 		last_desc = q->q.pidx + ndesc - 1;
1182 		if (last_desc >= q->q.size)
1183 			last_desc -= q->q.size;
1184 		q->q.sdesc[last_desc].skb = skb;
1185 		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1186 	}
1187 
1188 	txq_advance(&q->q, ndesc);
1189 
1190 	ring_tx_db(adap, &q->q, ndesc);
1191 	return NETDEV_TX_OK;
1192 }
1193 
1194 /**
1195  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1196  *	@q: the SGE control Tx queue
1197  *
1198  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1199  *	that send only immediate data (presently just the control queues) and
1200  *	thus do not have any sk_buffs to release.
1201  */
1202 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1203 {
1204 	int hw_cidx = ntohs(q->stat->cidx);
1205 	int reclaim = hw_cidx - q->cidx;
1206 
1207 	if (reclaim < 0)
1208 		reclaim += q->size;
1209 
1210 	q->in_use -= reclaim;
1211 	q->cidx = hw_cidx;
1212 }
1213 
1214 /**
1215  *	is_imm - check whether a packet can be sent as immediate data
1216  *	@skb: the packet
1217  *
1218  *	Returns true if a packet can be sent as a WR with immediate data.
1219  */
1220 static inline int is_imm(const struct sk_buff *skb)
1221 {
1222 	return skb->len <= MAX_CTRL_WR_LEN;
1223 }
1224 
1225 /**
1226  *	ctrlq_check_stop - check if a control queue is full and should stop
1227  *	@q: the queue
1228  *	@wr: most recent WR written to the queue
1229  *
1230  *	Check if a control queue has become full and should be stopped.
1231  *	We clean up control queue descriptors very lazily, only when we are out.
1232  *	If the queue is still full after reclaiming any completed descriptors
1233  *	we suspend it and have the last WR wake it up.
1234  */
1235 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1236 {
1237 	reclaim_completed_tx_imm(&q->q);
1238 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1239 		wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1240 		q->q.stops++;
1241 		q->full = 1;
1242 	}
1243 }
1244 
1245 /**
1246  *	ctrl_xmit - send a packet through an SGE control Tx queue
1247  *	@q: the control queue
1248  *	@skb: the packet
1249  *
1250  *	Send a packet through an SGE control Tx queue.  Packets sent through
1251  *	a control queue must fit entirely as immediate data.
1252  */
1253 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1254 {
1255 	unsigned int ndesc;
1256 	struct fw_wr_hdr *wr;
1257 
1258 	if (unlikely(!is_imm(skb))) {
1259 		WARN_ON(1);
1260 		dev_kfree_skb(skb);
1261 		return NET_XMIT_DROP;
1262 	}
1263 
1264 	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1265 	spin_lock(&q->sendq.lock);
1266 
1267 	if (unlikely(q->full)) {
1268 		skb->priority = ndesc;                  /* save for restart */
1269 		__skb_queue_tail(&q->sendq, skb);
1270 		spin_unlock(&q->sendq.lock);
1271 		return NET_XMIT_CN;
1272 	}
1273 
1274 	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1275 	inline_tx_skb(skb, &q->q, wr);
1276 
1277 	txq_advance(&q->q, ndesc);
1278 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1279 		ctrlq_check_stop(q, wr);
1280 
1281 	ring_tx_db(q->adap, &q->q, ndesc);
1282 	spin_unlock(&q->sendq.lock);
1283 
1284 	kfree_skb(skb);
1285 	return NET_XMIT_SUCCESS;
1286 }
1287 
1288 /**
1289  *	restart_ctrlq - restart a suspended control queue
1290  *	@data: the control queue to restart
1291  *
1292  *	Resumes transmission on a suspended Tx control queue.
1293  */
1294 static void restart_ctrlq(unsigned long data)
1295 {
1296 	struct sk_buff *skb;
1297 	unsigned int written = 0;
1298 	struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1299 
1300 	spin_lock(&q->sendq.lock);
1301 	reclaim_completed_tx_imm(&q->q);
1302 	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
1303 
1304 	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1305 		struct fw_wr_hdr *wr;
1306 		unsigned int ndesc = skb->priority;     /* previously saved */
1307 
1308 		/*
1309 		 * Write descriptors and free skbs outside the lock to limit
1310 		 * wait times.  q->full is still set so new skbs will be queued.
1311 		 */
1312 		spin_unlock(&q->sendq.lock);
1313 
1314 		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1315 		inline_tx_skb(skb, &q->q, wr);
1316 		kfree_skb(skb);
1317 
1318 		written += ndesc;
1319 		txq_advance(&q->q, ndesc);
1320 		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1321 			unsigned long old = q->q.stops;
1322 
1323 			ctrlq_check_stop(q, wr);
1324 			if (q->q.stops != old) {          /* suspended anew */
1325 				spin_lock(&q->sendq.lock);
1326 				goto ringdb;
1327 			}
1328 		}
1329 		if (written > 16) {
1330 			ring_tx_db(q->adap, &q->q, written);
1331 			written = 0;
1332 		}
1333 		spin_lock(&q->sendq.lock);
1334 	}
1335 	q->full = 0;
1336 ringdb: if (written)
1337 		ring_tx_db(q->adap, &q->q, written);
1338 	spin_unlock(&q->sendq.lock);
1339 }
1340 
1341 /**
1342  *	t4_mgmt_tx - send a management message
1343  *	@adap: the adapter
1344  *	@skb: the packet containing the management message
1345  *
1346  *	Send a management message through control queue 0.
1347  */
1348 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1349 {
1350 	int ret;
1351 
1352 	local_bh_disable();
1353 	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1354 	local_bh_enable();
1355 	return ret;
1356 }
1357 
1358 /**
1359  *	is_ofld_imm - check whether a packet can be sent as immediate data
1360  *	@skb: the packet
1361  *
1362  *	Returns true if a packet can be sent as an offload WR with immediate
1363  *	data.  We currently use the same limit as for Ethernet packets.
1364  */
1365 static inline int is_ofld_imm(const struct sk_buff *skb)
1366 {
1367 	return skb->len <= MAX_IMM_TX_PKT_LEN;
1368 }
1369 
1370 /**
1371  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
1372  *	@skb: the packet
1373  *
1374  *	Returns the number of flits needed for the given offload packet.
1375  *	These packets are already fully constructed and no additional headers
1376  *	will be added.
1377  */
1378 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1379 {
1380 	unsigned int flits, cnt;
1381 
1382 	if (is_ofld_imm(skb))
1383 		return DIV_ROUND_UP(skb->len, 8);
1384 
1385 	flits = skb_transport_offset(skb) / 8U;   /* headers */
1386 	cnt = skb_shinfo(skb)->nr_frags;
1387 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
1388 		cnt++;
1389 	return flits + sgl_len(cnt);
1390 }
1391 
1392 /**
1393  *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1394  *	@adap: the adapter
1395  *	@q: the queue to stop
1396  *
1397  *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1398  *	inability to map packets.  A periodic timer attempts to restart
1399  *	queues so marked.
1400  */
1401 static void txq_stop_maperr(struct sge_ofld_txq *q)
1402 {
1403 	q->mapping_err++;
1404 	q->q.stops++;
1405 	set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1406 		q->adap->sge.txq_maperr);
1407 }
1408 
1409 /**
1410  *	ofldtxq_stop - stop an offload Tx queue that has become full
1411  *	@q: the queue to stop
1412  *	@skb: the packet causing the queue to become full
1413  *
1414  *	Stops an offload Tx queue that has become full and modifies the packet
1415  *	being written to request a wakeup.
1416  */
1417 static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1418 {
1419 	struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1420 
1421 	wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1422 	q->q.stops++;
1423 	q->full = 1;
1424 }
1425 
1426 /**
1427  *	service_ofldq - restart a suspended offload queue
1428  *	@q: the offload queue
1429  *
1430  *	Services an offload Tx queue by moving packets from its packet queue
1431  *	to the HW Tx ring.  The function starts and ends with the queue locked.
1432  */
1433 static void service_ofldq(struct sge_ofld_txq *q)
1434 {
1435 	u64 *pos;
1436 	int credits;
1437 	struct sk_buff *skb;
1438 	unsigned int written = 0;
1439 	unsigned int flits, ndesc;
1440 
1441 	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1442 		/*
1443 		 * We drop the lock but leave skb on sendq, thus retaining
1444 		 * exclusive access to the state of the queue.
1445 		 */
1446 		spin_unlock(&q->sendq.lock);
1447 
1448 		reclaim_completed_tx(q->adap, &q->q, false);
1449 
1450 		flits = skb->priority;                /* previously saved */
1451 		ndesc = flits_to_desc(flits);
1452 		credits = txq_avail(&q->q) - ndesc;
1453 		BUG_ON(credits < 0);
1454 		if (unlikely(credits < TXQ_STOP_THRES))
1455 			ofldtxq_stop(q, skb);
1456 
1457 		pos = (u64 *)&q->q.desc[q->q.pidx];
1458 		if (is_ofld_imm(skb))
1459 			inline_tx_skb(skb, &q->q, pos);
1460 		else if (map_skb(q->adap->pdev_dev, skb,
1461 				 (dma_addr_t *)skb->head)) {
1462 			txq_stop_maperr(q);
1463 			spin_lock(&q->sendq.lock);
1464 			break;
1465 		} else {
1466 			int last_desc, hdr_len = skb_transport_offset(skb);
1467 
1468 			memcpy(pos, skb->data, hdr_len);
1469 			write_sgl(skb, &q->q, (void *)pos + hdr_len,
1470 				  pos + flits, hdr_len,
1471 				  (dma_addr_t *)skb->head);
1472 #ifdef CONFIG_NEED_DMA_MAP_STATE
1473 			skb->dev = q->adap->port[0];
1474 			skb->destructor = deferred_unmap_destructor;
1475 #endif
1476 			last_desc = q->q.pidx + ndesc - 1;
1477 			if (last_desc >= q->q.size)
1478 				last_desc -= q->q.size;
1479 			q->q.sdesc[last_desc].skb = skb;
1480 		}
1481 
1482 		txq_advance(&q->q, ndesc);
1483 		written += ndesc;
1484 		if (unlikely(written > 32)) {
1485 			ring_tx_db(q->adap, &q->q, written);
1486 			written = 0;
1487 		}
1488 
1489 		spin_lock(&q->sendq.lock);
1490 		__skb_unlink(skb, &q->sendq);
1491 		if (is_ofld_imm(skb))
1492 			kfree_skb(skb);
1493 	}
1494 	if (likely(written))
1495 		ring_tx_db(q->adap, &q->q, written);
1496 }
1497 
1498 /**
1499  *	ofld_xmit - send a packet through an offload queue
1500  *	@q: the Tx offload queue
1501  *	@skb: the packet
1502  *
1503  *	Send an offload packet through an SGE offload queue.
1504  */
1505 static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1506 {
1507 	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
1508 	spin_lock(&q->sendq.lock);
1509 	__skb_queue_tail(&q->sendq, skb);
1510 	if (q->sendq.qlen == 1)
1511 		service_ofldq(q);
1512 	spin_unlock(&q->sendq.lock);
1513 	return NET_XMIT_SUCCESS;
1514 }
1515 
1516 /**
1517  *	restart_ofldq - restart a suspended offload queue
1518  *	@data: the offload queue to restart
1519  *
1520  *	Resumes transmission on a suspended Tx offload queue.
1521  */
1522 static void restart_ofldq(unsigned long data)
1523 {
1524 	struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1525 
1526 	spin_lock(&q->sendq.lock);
1527 	q->full = 0;            /* the queue actually is completely empty now */
1528 	service_ofldq(q);
1529 	spin_unlock(&q->sendq.lock);
1530 }
1531 
1532 /**
1533  *	skb_txq - return the Tx queue an offload packet should use
1534  *	@skb: the packet
1535  *
1536  *	Returns the Tx queue an offload packet should use as indicated by bits
1537  *	1-15 in the packet's queue_mapping.
1538  */
1539 static inline unsigned int skb_txq(const struct sk_buff *skb)
1540 {
1541 	return skb->queue_mapping >> 1;
1542 }
1543 
1544 /**
1545  *	is_ctrl_pkt - return whether an offload packet is a control packet
1546  *	@skb: the packet
1547  *
1548  *	Returns whether an offload packet should use an OFLD or a CTRL
1549  *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
1550  */
1551 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1552 {
1553 	return skb->queue_mapping & 1;
1554 }
1555 
1556 static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1557 {
1558 	unsigned int idx = skb_txq(skb);
1559 
1560 	if (unlikely(is_ctrl_pkt(skb))) {
1561 		/* Single ctrl queue is a requirement for LE workaround path */
1562 		if (adap->tids.nsftids)
1563 			idx = 0;
1564 		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1565 	}
1566 	return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1567 }
1568 
1569 /**
1570  *	t4_ofld_send - send an offload packet
1571  *	@adap: the adapter
1572  *	@skb: the packet
1573  *
1574  *	Sends an offload packet.  We use the packet queue_mapping to select the
1575  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1576  *	should be sent as regular or control, bits 1-15 select the queue.
1577  */
1578 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1579 {
1580 	int ret;
1581 
1582 	local_bh_disable();
1583 	ret = ofld_send(adap, skb);
1584 	local_bh_enable();
1585 	return ret;
1586 }
1587 
1588 /**
1589  *	cxgb4_ofld_send - send an offload packet
1590  *	@dev: the net device
1591  *	@skb: the packet
1592  *
1593  *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
1594  *	intended for ULDs.
1595  */
1596 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1597 {
1598 	return t4_ofld_send(netdev2adap(dev), skb);
1599 }
1600 EXPORT_SYMBOL(cxgb4_ofld_send);
1601 
1602 static inline void copy_frags(struct sk_buff *skb,
1603 			      const struct pkt_gl *gl, unsigned int offset)
1604 {
1605 	int i;
1606 
1607 	/* usually there's just one frag */
1608 	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
1609 			     gl->frags[0].offset + offset,
1610 			     gl->frags[0].size - offset);
1611 	skb_shinfo(skb)->nr_frags = gl->nfrags;
1612 	for (i = 1; i < gl->nfrags; i++)
1613 		__skb_fill_page_desc(skb, i, gl->frags[i].page,
1614 				     gl->frags[i].offset,
1615 				     gl->frags[i].size);
1616 
1617 	/* get a reference to the last page, we don't own it */
1618 	get_page(gl->frags[gl->nfrags - 1].page);
1619 }
1620 
1621 /**
1622  *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1623  *	@gl: the gather list
1624  *	@skb_len: size of sk_buff main body if it carries fragments
1625  *	@pull_len: amount of data to move to the sk_buff's main body
1626  *
1627  *	Builds an sk_buff from the given packet gather list.  Returns the
1628  *	sk_buff or %NULL if sk_buff allocation failed.
1629  */
1630 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1631 				   unsigned int skb_len, unsigned int pull_len)
1632 {
1633 	struct sk_buff *skb;
1634 
1635 	/*
1636 	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1637 	 * size, which is expected since buffers are at least PAGE_SIZEd.
1638 	 * In this case packets up to RX_COPY_THRES have only one fragment.
1639 	 */
1640 	if (gl->tot_len <= RX_COPY_THRES) {
1641 		skb = dev_alloc_skb(gl->tot_len);
1642 		if (unlikely(!skb))
1643 			goto out;
1644 		__skb_put(skb, gl->tot_len);
1645 		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1646 	} else {
1647 		skb = dev_alloc_skb(skb_len);
1648 		if (unlikely(!skb))
1649 			goto out;
1650 		__skb_put(skb, pull_len);
1651 		skb_copy_to_linear_data(skb, gl->va, pull_len);
1652 
1653 		copy_frags(skb, gl, pull_len);
1654 		skb->len = gl->tot_len;
1655 		skb->data_len = skb->len - pull_len;
1656 		skb->truesize += skb->data_len;
1657 	}
1658 out:	return skb;
1659 }
1660 EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1661 
1662 /**
1663  *	t4_pktgl_free - free a packet gather list
1664  *	@gl: the gather list
1665  *
1666  *	Releases the pages of a packet gather list.  We do not own the last
1667  *	page on the list and do not free it.
1668  */
1669 static void t4_pktgl_free(const struct pkt_gl *gl)
1670 {
1671 	int n;
1672 	const struct page_frag *p;
1673 
1674 	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1675 		put_page(p->page);
1676 }
1677 
1678 /*
1679  * Process an MPS trace packet.  Give it an unused protocol number so it won't
1680  * be delivered to anyone and send it to the stack for capture.
1681  */
1682 static noinline int handle_trace_pkt(struct adapter *adap,
1683 				     const struct pkt_gl *gl)
1684 {
1685 	struct sk_buff *skb;
1686 
1687 	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1688 	if (unlikely(!skb)) {
1689 		t4_pktgl_free(gl);
1690 		return 0;
1691 	}
1692 
1693 	if (is_t4(adap->params.chip))
1694 		__skb_pull(skb, sizeof(struct cpl_trace_pkt));
1695 	else
1696 		__skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
1697 
1698 	skb_reset_mac_header(skb);
1699 	skb->protocol = htons(0xffff);
1700 	skb->dev = adap->port[0];
1701 	netif_receive_skb(skb);
1702 	return 0;
1703 }
1704 
1705 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1706 		   const struct cpl_rx_pkt *pkt)
1707 {
1708 	struct adapter *adapter = rxq->rspq.adap;
1709 	struct sge *s = &adapter->sge;
1710 	int ret;
1711 	struct sk_buff *skb;
1712 
1713 	skb = napi_get_frags(&rxq->rspq.napi);
1714 	if (unlikely(!skb)) {
1715 		t4_pktgl_free(gl);
1716 		rxq->stats.rx_drops++;
1717 		return;
1718 	}
1719 
1720 	copy_frags(skb, gl, s->pktshift);
1721 	skb->len = gl->tot_len - s->pktshift;
1722 	skb->data_len = skb->len;
1723 	skb->truesize += skb->data_len;
1724 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1725 	skb_record_rx_queue(skb, rxq->rspq.idx);
1726 	skb_mark_napi_id(skb, &rxq->rspq.napi);
1727 	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1728 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
1729 			     PKT_HASH_TYPE_L3);
1730 
1731 	if (unlikely(pkt->vlan_ex)) {
1732 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1733 		rxq->stats.vlan_ex++;
1734 	}
1735 	ret = napi_gro_frags(&rxq->rspq.napi);
1736 	if (ret == GRO_HELD)
1737 		rxq->stats.lro_pkts++;
1738 	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1739 		rxq->stats.lro_merged++;
1740 	rxq->stats.pkts++;
1741 	rxq->stats.rx_cso++;
1742 }
1743 
1744 /**
1745  *	t4_ethrx_handler - process an ingress ethernet packet
1746  *	@q: the response queue that received the packet
1747  *	@rsp: the response queue descriptor holding the RX_PKT message
1748  *	@si: the gather list of packet fragments
1749  *
1750  *	Process an ingress ethernet packet and deliver it to the stack.
1751  */
1752 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1753 		     const struct pkt_gl *si)
1754 {
1755 	bool csum_ok;
1756 	struct sk_buff *skb;
1757 	const struct cpl_rx_pkt *pkt;
1758 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1759 	struct sge *s = &q->adap->sge;
1760 	int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
1761 			    CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
1762 
1763 	if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
1764 		return handle_trace_pkt(q->adap, si);
1765 
1766 	pkt = (const struct cpl_rx_pkt *)rsp;
1767 	csum_ok = pkt->csum_calc && !pkt->err_vec &&
1768 		  (q->netdev->features & NETIF_F_RXCSUM);
1769 	if ((pkt->l2info & htonl(RXF_TCP_F)) &&
1770 	    !(cxgb_poll_busy_polling(q)) &&
1771 	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1772 		do_gro(rxq, si, pkt);
1773 		return 0;
1774 	}
1775 
1776 	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
1777 	if (unlikely(!skb)) {
1778 		t4_pktgl_free(si);
1779 		rxq->stats.rx_drops++;
1780 		return 0;
1781 	}
1782 
1783 	__skb_pull(skb, s->pktshift);      /* remove ethernet header padding */
1784 	skb->protocol = eth_type_trans(skb, q->netdev);
1785 	skb_record_rx_queue(skb, q->idx);
1786 	if (skb->dev->features & NETIF_F_RXHASH)
1787 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
1788 			     PKT_HASH_TYPE_L3);
1789 
1790 	rxq->stats.pkts++;
1791 
1792 	if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
1793 		if (!pkt->ip_frag) {
1794 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1795 			rxq->stats.rx_cso++;
1796 		} else if (pkt->l2info & htonl(RXF_IP_F)) {
1797 			__sum16 c = (__force __sum16)pkt->csum;
1798 			skb->csum = csum_unfold(c);
1799 			skb->ip_summed = CHECKSUM_COMPLETE;
1800 			rxq->stats.rx_cso++;
1801 		}
1802 	} else
1803 		skb_checksum_none_assert(skb);
1804 
1805 	if (unlikely(pkt->vlan_ex)) {
1806 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1807 		rxq->stats.vlan_ex++;
1808 	}
1809 	skb_mark_napi_id(skb, &q->napi);
1810 	netif_receive_skb(skb);
1811 	return 0;
1812 }
1813 
1814 /**
1815  *	restore_rx_bufs - put back a packet's Rx buffers
1816  *	@si: the packet gather list
1817  *	@q: the SGE free list
1818  *	@frags: number of FL buffers to restore
1819  *
1820  *	Puts back on an FL the Rx buffers associated with @si.  The buffers
1821  *	have already been unmapped and are left unmapped, we mark them so to
1822  *	prevent further unmapping attempts.
1823  *
1824  *	This function undoes a series of @unmap_rx_buf calls when we find out
1825  *	that the current packet can't be processed right away afterall and we
1826  *	need to come back to it later.  This is a very rare event and there's
1827  *	no effort to make this particularly efficient.
1828  */
1829 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1830 			    int frags)
1831 {
1832 	struct rx_sw_desc *d;
1833 
1834 	while (frags--) {
1835 		if (q->cidx == 0)
1836 			q->cidx = q->size - 1;
1837 		else
1838 			q->cidx--;
1839 		d = &q->sdesc[q->cidx];
1840 		d->page = si->frags[frags].page;
1841 		d->dma_addr |= RX_UNMAPPED_BUF;
1842 		q->avail++;
1843 	}
1844 }
1845 
1846 /**
1847  *	is_new_response - check if a response is newly written
1848  *	@r: the response descriptor
1849  *	@q: the response queue
1850  *
1851  *	Returns true if a response descriptor contains a yet unprocessed
1852  *	response.
1853  */
1854 static inline bool is_new_response(const struct rsp_ctrl *r,
1855 				   const struct sge_rspq *q)
1856 {
1857 	return RSPD_GEN(r->type_gen) == q->gen;
1858 }
1859 
1860 /**
1861  *	rspq_next - advance to the next entry in a response queue
1862  *	@q: the queue
1863  *
1864  *	Updates the state of a response queue to advance it to the next entry.
1865  */
1866 static inline void rspq_next(struct sge_rspq *q)
1867 {
1868 	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1869 	if (unlikely(++q->cidx == q->size)) {
1870 		q->cidx = 0;
1871 		q->gen ^= 1;
1872 		q->cur_desc = q->desc;
1873 	}
1874 }
1875 
1876 /**
1877  *	process_responses - process responses from an SGE response queue
1878  *	@q: the ingress queue to process
1879  *	@budget: how many responses can be processed in this round
1880  *
1881  *	Process responses from an SGE response queue up to the supplied budget.
1882  *	Responses include received packets as well as control messages from FW
1883  *	or HW.
1884  *
1885  *	Additionally choose the interrupt holdoff time for the next interrupt
1886  *	on this queue.  If the system is under memory shortage use a fairly
1887  *	long delay to help recovery.
1888  */
1889 static int process_responses(struct sge_rspq *q, int budget)
1890 {
1891 	int ret, rsp_type;
1892 	int budget_left = budget;
1893 	const struct rsp_ctrl *rc;
1894 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1895 	struct adapter *adapter = q->adap;
1896 	struct sge *s = &adapter->sge;
1897 
1898 	while (likely(budget_left)) {
1899 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1900 		if (!is_new_response(rc, q))
1901 			break;
1902 
1903 		rmb();
1904 		rsp_type = RSPD_TYPE(rc->type_gen);
1905 		if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1906 			struct page_frag *fp;
1907 			struct pkt_gl si;
1908 			const struct rx_sw_desc *rsd;
1909 			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
1910 
1911 			if (len & RSPD_NEWBUF) {
1912 				if (likely(q->offset > 0)) {
1913 					free_rx_bufs(q->adap, &rxq->fl, 1);
1914 					q->offset = 0;
1915 				}
1916 				len = RSPD_LEN(len);
1917 			}
1918 			si.tot_len = len;
1919 
1920 			/* gather packet fragments */
1921 			for (frags = 0, fp = si.frags; ; frags++, fp++) {
1922 				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1923 				bufsz = get_buf_size(adapter, rsd);
1924 				fp->page = rsd->page;
1925 				fp->offset = q->offset;
1926 				fp->size = min(bufsz, len);
1927 				len -= fp->size;
1928 				if (!len)
1929 					break;
1930 				unmap_rx_buf(q->adap, &rxq->fl);
1931 			}
1932 
1933 			/*
1934 			 * Last buffer remains mapped so explicitly make it
1935 			 * coherent for CPU access.
1936 			 */
1937 			dma_sync_single_for_cpu(q->adap->pdev_dev,
1938 						get_buf_addr(rsd),
1939 						fp->size, DMA_FROM_DEVICE);
1940 
1941 			si.va = page_address(si.frags[0].page) +
1942 				si.frags[0].offset;
1943 			prefetch(si.va);
1944 
1945 			si.nfrags = frags + 1;
1946 			ret = q->handler(q, q->cur_desc, &si);
1947 			if (likely(ret == 0))
1948 				q->offset += ALIGN(fp->size, s->fl_align);
1949 			else
1950 				restore_rx_bufs(&si, &rxq->fl, frags);
1951 		} else if (likely(rsp_type == RSP_TYPE_CPL)) {
1952 			ret = q->handler(q, q->cur_desc, NULL);
1953 		} else {
1954 			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1955 		}
1956 
1957 		if (unlikely(ret)) {
1958 			/* couldn't process descriptor, back off for recovery */
1959 			q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1960 			break;
1961 		}
1962 
1963 		rspq_next(q);
1964 		budget_left--;
1965 	}
1966 
1967 	if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
1968 		__refill_fl(q->adap, &rxq->fl);
1969 	return budget - budget_left;
1970 }
1971 
1972 #ifdef CONFIG_NET_RX_BUSY_POLL
1973 int cxgb_busy_poll(struct napi_struct *napi)
1974 {
1975 	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1976 	unsigned int params, work_done;
1977 	u32 val;
1978 
1979 	if (!cxgb_poll_lock_poll(q))
1980 		return LL_FLUSH_BUSY;
1981 
1982 	work_done = process_responses(q, 4);
1983 	params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
1984 	q->next_intr_params = params;
1985 	val = CIDXINC_V(work_done) | SEINTARM_V(params);
1986 
1987 	/* If we don't have access to the new User GTS (T5+), use the old
1988 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
1989 	 */
1990 	if (unlikely(!q->bar2_addr))
1991 		t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
1992 			     val | INGRESSQID_V((u32)q->cntxt_id));
1993 	else {
1994 		writel(val | INGRESSQID_V(q->bar2_qid),
1995 		       q->bar2_addr + SGE_UDB_GTS);
1996 		wmb();
1997 	}
1998 
1999 	cxgb_poll_unlock_poll(q);
2000 	return work_done;
2001 }
2002 #endif /* CONFIG_NET_RX_BUSY_POLL */
2003 
2004 /**
2005  *	napi_rx_handler - the NAPI handler for Rx processing
2006  *	@napi: the napi instance
2007  *	@budget: how many packets we can process in this round
2008  *
2009  *	Handler for new data events when using NAPI.  This does not need any
2010  *	locking or protection from interrupts as data interrupts are off at
2011  *	this point and other adapter interrupts do not interfere (the latter
2012  *	in not a concern at all with MSI-X as non-data interrupts then have
2013  *	a separate handler).
2014  */
2015 static int napi_rx_handler(struct napi_struct *napi, int budget)
2016 {
2017 	unsigned int params;
2018 	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
2019 	int work_done;
2020 	u32 val;
2021 
2022 	if (!cxgb_poll_lock_napi(q))
2023 		return budget;
2024 
2025 	work_done = process_responses(q, budget);
2026 	if (likely(work_done < budget)) {
2027 		int timer_index;
2028 
2029 		napi_complete(napi);
2030 		timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params);
2031 
2032 		if (q->adaptive_rx) {
2033 			if (work_done > max(timer_pkt_quota[timer_index],
2034 					    MIN_NAPI_WORK))
2035 				timer_index = (timer_index + 1);
2036 			else
2037 				timer_index = timer_index - 1;
2038 
2039 			timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
2040 			q->next_intr_params = QINTR_TIMER_IDX(timer_index) |
2041 							      V_QINTR_CNT_EN;
2042 			params = q->next_intr_params;
2043 		} else {
2044 			params = q->next_intr_params;
2045 			q->next_intr_params = q->intr_params;
2046 		}
2047 	} else
2048 		params = QINTR_TIMER_IDX(7);
2049 
2050 	val = CIDXINC_V(work_done) | SEINTARM_V(params);
2051 
2052 	/* If we don't have access to the new User GTS (T5+), use the old
2053 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2054 	 */
2055 	if (unlikely(q->bar2_addr == NULL)) {
2056 		t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
2057 			     val | INGRESSQID_V((u32)q->cntxt_id));
2058 	} else {
2059 		writel(val | INGRESSQID_V(q->bar2_qid),
2060 		       q->bar2_addr + SGE_UDB_GTS);
2061 		wmb();
2062 	}
2063 	cxgb_poll_unlock_napi(q);
2064 	return work_done;
2065 }
2066 
2067 /*
2068  * The MSI-X interrupt handler for an SGE response queue.
2069  */
2070 irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
2071 {
2072 	struct sge_rspq *q = cookie;
2073 
2074 	napi_schedule(&q->napi);
2075 	return IRQ_HANDLED;
2076 }
2077 
2078 /*
2079  * Process the indirect interrupt entries in the interrupt queue and kick off
2080  * NAPI for each queue that has generated an entry.
2081  */
2082 static unsigned int process_intrq(struct adapter *adap)
2083 {
2084 	unsigned int credits;
2085 	const struct rsp_ctrl *rc;
2086 	struct sge_rspq *q = &adap->sge.intrq;
2087 	u32 val;
2088 
2089 	spin_lock(&adap->sge.intrq_lock);
2090 	for (credits = 0; ; credits++) {
2091 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2092 		if (!is_new_response(rc, q))
2093 			break;
2094 
2095 		rmb();
2096 		if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
2097 			unsigned int qid = ntohl(rc->pldbuflen_qid);
2098 
2099 			qid -= adap->sge.ingr_start;
2100 			napi_schedule(&adap->sge.ingr_map[qid]->napi);
2101 		}
2102 
2103 		rspq_next(q);
2104 	}
2105 
2106 	val =  CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
2107 
2108 	/* If we don't have access to the new User GTS (T5+), use the old
2109 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2110 	 */
2111 	if (unlikely(q->bar2_addr == NULL)) {
2112 		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
2113 			     val | INGRESSQID_V(q->cntxt_id));
2114 	} else {
2115 		writel(val | INGRESSQID_V(q->bar2_qid),
2116 		       q->bar2_addr + SGE_UDB_GTS);
2117 		wmb();
2118 	}
2119 	spin_unlock(&adap->sge.intrq_lock);
2120 	return credits;
2121 }
2122 
2123 /*
2124  * The MSI interrupt handler, which handles data events from SGE response queues
2125  * as well as error and other async events as they all use the same MSI vector.
2126  */
2127 static irqreturn_t t4_intr_msi(int irq, void *cookie)
2128 {
2129 	struct adapter *adap = cookie;
2130 
2131 	t4_slow_intr_handler(adap);
2132 	process_intrq(adap);
2133 	return IRQ_HANDLED;
2134 }
2135 
2136 /*
2137  * Interrupt handler for legacy INTx interrupts.
2138  * Handles data events from SGE response queues as well as error and other
2139  * async events as they all use the same interrupt line.
2140  */
2141 static irqreturn_t t4_intr_intx(int irq, void *cookie)
2142 {
2143 	struct adapter *adap = cookie;
2144 
2145 	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
2146 	if (t4_slow_intr_handler(adap) | process_intrq(adap))
2147 		return IRQ_HANDLED;
2148 	return IRQ_NONE;             /* probably shared interrupt */
2149 }
2150 
2151 /**
2152  *	t4_intr_handler - select the top-level interrupt handler
2153  *	@adap: the adapter
2154  *
2155  *	Selects the top-level interrupt handler based on the type of interrupts
2156  *	(MSI-X, MSI, or INTx).
2157  */
2158 irq_handler_t t4_intr_handler(struct adapter *adap)
2159 {
2160 	if (adap->flags & USING_MSIX)
2161 		return t4_sge_intr_msix;
2162 	if (adap->flags & USING_MSI)
2163 		return t4_intr_msi;
2164 	return t4_intr_intx;
2165 }
2166 
2167 static void sge_rx_timer_cb(unsigned long data)
2168 {
2169 	unsigned long m;
2170 	unsigned int i, idma_same_state_cnt[2];
2171 	struct adapter *adap = (struct adapter *)data;
2172 	struct sge *s = &adap->sge;
2173 
2174 	for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
2175 		for (m = s->starving_fl[i]; m; m &= m - 1) {
2176 			struct sge_eth_rxq *rxq;
2177 			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2178 			struct sge_fl *fl = s->egr_map[id];
2179 
2180 			clear_bit(id, s->starving_fl);
2181 			smp_mb__after_atomic();
2182 
2183 			if (fl_starving(fl)) {
2184 				rxq = container_of(fl, struct sge_eth_rxq, fl);
2185 				if (napi_reschedule(&rxq->rspq.napi))
2186 					fl->starving++;
2187 				else
2188 					set_bit(id, s->starving_fl);
2189 			}
2190 		}
2191 
2192 	t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
2193 	idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
2194 	idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2195 
2196 	for (i = 0; i < 2; i++) {
2197 		u32 debug0, debug11;
2198 
2199 		/* If the Ingress DMA Same State Counter ("timer") is less
2200 		 * than 1s, then we can reset our synthesized Stall Timer and
2201 		 * continue.  If we have previously emitted warnings about a
2202 		 * potential stalled Ingress Queue, issue a note indicating
2203 		 * that the Ingress Queue has resumed forward progress.
2204 		 */
2205 		if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
2206 			if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
2207 				CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
2208 					i, s->idma_qid[i],
2209 					s->idma_stalled[i]/HZ);
2210 			s->idma_stalled[i] = 0;
2211 			continue;
2212 		}
2213 
2214 		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
2215 		 * domain.  The first time we get here it'll be because we
2216 		 * passed the 1s Threshold; each additional time it'll be
2217 		 * because the RX Timer Callback is being fired on its regular
2218 		 * schedule.
2219 		 *
2220 		 * If the stall is below our Potential Hung Ingress Queue
2221 		 * Warning Threshold, continue.
2222 		 */
2223 		if (s->idma_stalled[i] == 0)
2224 			s->idma_stalled[i] = HZ;
2225 		else
2226 			s->idma_stalled[i] += RX_QCHECK_PERIOD;
2227 
2228 		if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
2229 			continue;
2230 
2231 		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
2232 		if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
2233 			continue;
2234 
2235 		/* Read and save the SGE IDMA State and Queue ID information.
2236 		 * We do this every time in case it changes across time ...
2237 		 */
2238 		t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
2239 		debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2240 		s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
2241 
2242 		t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
2243 		debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2244 		s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
2245 
2246 		CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
2247 			i, s->idma_qid[i], s->idma_state[i],
2248 			s->idma_stalled[i]/HZ, debug0, debug11);
2249 		t4_sge_decode_idma_state(adap, s->idma_state[i]);
2250 	}
2251 
2252 	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2253 }
2254 
2255 static void sge_tx_timer_cb(unsigned long data)
2256 {
2257 	unsigned long m;
2258 	unsigned int i, budget;
2259 	struct adapter *adap = (struct adapter *)data;
2260 	struct sge *s = &adap->sge;
2261 
2262 	for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
2263 		for (m = s->txq_maperr[i]; m; m &= m - 1) {
2264 			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2265 			struct sge_ofld_txq *txq = s->egr_map[id];
2266 
2267 			clear_bit(id, s->txq_maperr);
2268 			tasklet_schedule(&txq->qresume_tsk);
2269 		}
2270 
2271 	budget = MAX_TIMER_TX_RECLAIM;
2272 	i = s->ethtxq_rover;
2273 	do {
2274 		struct sge_eth_txq *q = &s->ethtxq[i];
2275 
2276 		if (q->q.in_use &&
2277 		    time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
2278 		    __netif_tx_trylock(q->txq)) {
2279 			int avail = reclaimable(&q->q);
2280 
2281 			if (avail) {
2282 				if (avail > budget)
2283 					avail = budget;
2284 
2285 				free_tx_desc(adap, &q->q, avail, true);
2286 				q->q.in_use -= avail;
2287 				budget -= avail;
2288 			}
2289 			__netif_tx_unlock(q->txq);
2290 		}
2291 
2292 		if (++i >= s->ethqsets)
2293 			i = 0;
2294 	} while (budget && i != s->ethtxq_rover);
2295 	s->ethtxq_rover = i;
2296 	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2297 }
2298 
2299 /**
2300  *	bar2_address - return the BAR2 address for an SGE Queue's Registers
2301  *	@adapter: the adapter
2302  *	@qid: the SGE Queue ID
2303  *	@qtype: the SGE Queue Type (Egress or Ingress)
2304  *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2305  *
2306  *	Returns the BAR2 address for the SGE Queue Registers associated with
2307  *	@qid.  If BAR2 SGE Registers aren't available, returns NULL.  Also
2308  *	returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2309  *	Queue Registers.  If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2310  *	Registers are supported (e.g. the Write Combining Doorbell Buffer).
2311  */
2312 static void __iomem *bar2_address(struct adapter *adapter,
2313 				  unsigned int qid,
2314 				  enum t4_bar2_qtype qtype,
2315 				  unsigned int *pbar2_qid)
2316 {
2317 	u64 bar2_qoffset;
2318 	int ret;
2319 
2320 	ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype,
2321 				&bar2_qoffset, pbar2_qid);
2322 	if (ret)
2323 		return NULL;
2324 
2325 	return adapter->bar2 + bar2_qoffset;
2326 }
2327 
2328 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2329 		     struct net_device *dev, int intr_idx,
2330 		     struct sge_fl *fl, rspq_handler_t hnd)
2331 {
2332 	int ret, flsz = 0;
2333 	struct fw_iq_cmd c;
2334 	struct sge *s = &adap->sge;
2335 	struct port_info *pi = netdev_priv(dev);
2336 
2337 	/* Size needs to be multiple of 16, including status entry. */
2338 	iq->size = roundup(iq->size, 16);
2339 
2340 	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
2341 			      &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
2342 	if (!iq->desc)
2343 		return -ENOMEM;
2344 
2345 	memset(&c, 0, sizeof(c));
2346 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
2347 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2348 			    FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0));
2349 	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
2350 				 FW_LEN16(c));
2351 	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2352 		FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
2353 		FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) |
2354 		FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
2355 							-intr_idx - 1));
2356 	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
2357 		FW_IQ_CMD_IQGTSMODE_F |
2358 		FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
2359 		FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
2360 	c.iqsize = htons(iq->size);
2361 	c.iqaddr = cpu_to_be64(iq->phys_addr);
2362 
2363 	if (fl) {
2364 		fl->size = roundup(fl->size, 8);
2365 		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2366 				      sizeof(struct rx_sw_desc), &fl->addr,
2367 				      &fl->sdesc, s->stat_len, NUMA_NO_NODE);
2368 		if (!fl->desc)
2369 			goto fl_nomem;
2370 
2371 		flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2372 		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F |
2373 					    FW_IQ_CMD_FL0FETCHRO_F |
2374 					    FW_IQ_CMD_FL0DATARO_F |
2375 					    FW_IQ_CMD_FL0PADEN_F);
2376 		c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) |
2377 				FW_IQ_CMD_FL0FBMAX_V(3));
2378 		c.fl0size = htons(flsz);
2379 		c.fl0addr = cpu_to_be64(fl->addr);
2380 	}
2381 
2382 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2383 	if (ret)
2384 		goto err;
2385 
2386 	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2387 	napi_hash_add(&iq->napi);
2388 	iq->cur_desc = iq->desc;
2389 	iq->cidx = 0;
2390 	iq->gen = 1;
2391 	iq->next_intr_params = iq->intr_params;
2392 	iq->cntxt_id = ntohs(c.iqid);
2393 	iq->abs_id = ntohs(c.physiqid);
2394 	iq->bar2_addr = bar2_address(adap,
2395 				     iq->cntxt_id,
2396 				     T4_BAR2_QTYPE_INGRESS,
2397 				     &iq->bar2_qid);
2398 	iq->size--;                           /* subtract status entry */
2399 	iq->netdev = dev;
2400 	iq->handler = hnd;
2401 
2402 	/* set offset to -1 to distinguish ingress queues without FL */
2403 	iq->offset = fl ? 0 : -1;
2404 
2405 	adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2406 
2407 	if (fl) {
2408 		fl->cntxt_id = ntohs(c.fl0id);
2409 		fl->avail = fl->pend_cred = 0;
2410 		fl->pidx = fl->cidx = 0;
2411 		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2412 		adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2413 
2414 		/* Note, we must initialize the BAR2 Free List User Doorbell
2415 		 * information before refilling the Free List!
2416 		 */
2417 		fl->bar2_addr = bar2_address(adap,
2418 					     fl->cntxt_id,
2419 					     T4_BAR2_QTYPE_EGRESS,
2420 					     &fl->bar2_qid);
2421 		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2422 	}
2423 	return 0;
2424 
2425 fl_nomem:
2426 	ret = -ENOMEM;
2427 err:
2428 	if (iq->desc) {
2429 		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2430 				  iq->desc, iq->phys_addr);
2431 		iq->desc = NULL;
2432 	}
2433 	if (fl && fl->desc) {
2434 		kfree(fl->sdesc);
2435 		fl->sdesc = NULL;
2436 		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2437 				  fl->desc, fl->addr);
2438 		fl->desc = NULL;
2439 	}
2440 	return ret;
2441 }
2442 
2443 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2444 {
2445 	q->cntxt_id = id;
2446 	q->bar2_addr = bar2_address(adap,
2447 				    q->cntxt_id,
2448 				    T4_BAR2_QTYPE_EGRESS,
2449 				    &q->bar2_qid);
2450 	q->in_use = 0;
2451 	q->cidx = q->pidx = 0;
2452 	q->stops = q->restarts = 0;
2453 	q->stat = (void *)&q->desc[q->size];
2454 	spin_lock_init(&q->db_lock);
2455 	adap->sge.egr_map[id - adap->sge.egr_start] = q;
2456 }
2457 
2458 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2459 			 struct net_device *dev, struct netdev_queue *netdevq,
2460 			 unsigned int iqid)
2461 {
2462 	int ret, nentries;
2463 	struct fw_eq_eth_cmd c;
2464 	struct sge *s = &adap->sge;
2465 	struct port_info *pi = netdev_priv(dev);
2466 
2467 	/* Add status entries */
2468 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2469 
2470 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2471 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2472 			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2473 			netdev_queue_numa_node_read(netdevq));
2474 	if (!txq->q.desc)
2475 		return -ENOMEM;
2476 
2477 	memset(&c, 0, sizeof(c));
2478 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
2479 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2480 			    FW_EQ_ETH_CMD_PFN_V(adap->fn) |
2481 			    FW_EQ_ETH_CMD_VFN_V(0));
2482 	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
2483 				 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
2484 	c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2485 			   FW_EQ_ETH_CMD_VIID_V(pi->viid));
2486 	c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) |
2487 				   FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
2488 				   FW_EQ_ETH_CMD_FETCHRO_V(1) |
2489 				   FW_EQ_ETH_CMD_IQID_V(iqid));
2490 	c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) |
2491 				  FW_EQ_ETH_CMD_FBMAX_V(3) |
2492 				  FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) |
2493 				  FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2494 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2495 
2496 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2497 	if (ret) {
2498 		kfree(txq->q.sdesc);
2499 		txq->q.sdesc = NULL;
2500 		dma_free_coherent(adap->pdev_dev,
2501 				  nentries * sizeof(struct tx_desc),
2502 				  txq->q.desc, txq->q.phys_addr);
2503 		txq->q.desc = NULL;
2504 		return ret;
2505 	}
2506 
2507 	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
2508 	txq->txq = netdevq;
2509 	txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2510 	txq->mapping_err = 0;
2511 	return 0;
2512 }
2513 
2514 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2515 			  struct net_device *dev, unsigned int iqid,
2516 			  unsigned int cmplqid)
2517 {
2518 	int ret, nentries;
2519 	struct fw_eq_ctrl_cmd c;
2520 	struct sge *s = &adap->sge;
2521 	struct port_info *pi = netdev_priv(dev);
2522 
2523 	/* Add status entries */
2524 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2525 
2526 	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2527 				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2528 				 NULL, 0, NUMA_NO_NODE);
2529 	if (!txq->q.desc)
2530 		return -ENOMEM;
2531 
2532 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
2533 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2534 			    FW_EQ_CTRL_CMD_PFN_V(adap->fn) |
2535 			    FW_EQ_CTRL_CMD_VFN_V(0));
2536 	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
2537 				 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
2538 	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
2539 	c.physeqid_pkd = htonl(0);
2540 	c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) |
2541 				   FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
2542 				   FW_EQ_CTRL_CMD_FETCHRO_F |
2543 				   FW_EQ_CTRL_CMD_IQID_V(iqid));
2544 	c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) |
2545 				  FW_EQ_CTRL_CMD_FBMAX_V(3) |
2546 				  FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) |
2547 				  FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
2548 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2549 
2550 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2551 	if (ret) {
2552 		dma_free_coherent(adap->pdev_dev,
2553 				  nentries * sizeof(struct tx_desc),
2554 				  txq->q.desc, txq->q.phys_addr);
2555 		txq->q.desc = NULL;
2556 		return ret;
2557 	}
2558 
2559 	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
2560 	txq->adap = adap;
2561 	skb_queue_head_init(&txq->sendq);
2562 	tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2563 	txq->full = 0;
2564 	return 0;
2565 }
2566 
2567 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2568 			  struct net_device *dev, unsigned int iqid)
2569 {
2570 	int ret, nentries;
2571 	struct fw_eq_ofld_cmd c;
2572 	struct sge *s = &adap->sge;
2573 	struct port_info *pi = netdev_priv(dev);
2574 
2575 	/* Add status entries */
2576 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2577 
2578 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2579 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2580 			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2581 			NUMA_NO_NODE);
2582 	if (!txq->q.desc)
2583 		return -ENOMEM;
2584 
2585 	memset(&c, 0, sizeof(c));
2586 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
2587 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2588 			    FW_EQ_OFLD_CMD_PFN_V(adap->fn) |
2589 			    FW_EQ_OFLD_CMD_VFN_V(0));
2590 	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
2591 				 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
2592 	c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) |
2593 				   FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
2594 				   FW_EQ_OFLD_CMD_FETCHRO_F |
2595 				   FW_EQ_OFLD_CMD_IQID_V(iqid));
2596 	c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) |
2597 				  FW_EQ_OFLD_CMD_FBMAX_V(3) |
2598 				  FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) |
2599 				  FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
2600 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2601 
2602 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2603 	if (ret) {
2604 		kfree(txq->q.sdesc);
2605 		txq->q.sdesc = NULL;
2606 		dma_free_coherent(adap->pdev_dev,
2607 				  nentries * sizeof(struct tx_desc),
2608 				  txq->q.desc, txq->q.phys_addr);
2609 		txq->q.desc = NULL;
2610 		return ret;
2611 	}
2612 
2613 	init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
2614 	txq->adap = adap;
2615 	skb_queue_head_init(&txq->sendq);
2616 	tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2617 	txq->full = 0;
2618 	txq->mapping_err = 0;
2619 	return 0;
2620 }
2621 
2622 static void free_txq(struct adapter *adap, struct sge_txq *q)
2623 {
2624 	struct sge *s = &adap->sge;
2625 
2626 	dma_free_coherent(adap->pdev_dev,
2627 			  q->size * sizeof(struct tx_desc) + s->stat_len,
2628 			  q->desc, q->phys_addr);
2629 	q->cntxt_id = 0;
2630 	q->sdesc = NULL;
2631 	q->desc = NULL;
2632 }
2633 
2634 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2635 			 struct sge_fl *fl)
2636 {
2637 	struct sge *s = &adap->sge;
2638 	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2639 
2640 	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2641 	t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2642 		   rq->cntxt_id, fl_id, 0xffff);
2643 	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2644 			  rq->desc, rq->phys_addr);
2645 	napi_hash_del(&rq->napi);
2646 	netif_napi_del(&rq->napi);
2647 	rq->netdev = NULL;
2648 	rq->cntxt_id = rq->abs_id = 0;
2649 	rq->desc = NULL;
2650 
2651 	if (fl) {
2652 		free_rx_bufs(adap, fl, fl->avail);
2653 		dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
2654 				  fl->desc, fl->addr);
2655 		kfree(fl->sdesc);
2656 		fl->sdesc = NULL;
2657 		fl->cntxt_id = 0;
2658 		fl->desc = NULL;
2659 	}
2660 }
2661 
2662 /**
2663  *      t4_free_ofld_rxqs - free a block of consecutive Rx queues
2664  *      @adap: the adapter
2665  *      @n: number of queues
2666  *      @q: pointer to first queue
2667  *
2668  *      Release the resources of a consecutive block of offload Rx queues.
2669  */
2670 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
2671 {
2672 	for ( ; n; n--, q++)
2673 		if (q->rspq.desc)
2674 			free_rspq_fl(adap, &q->rspq,
2675 				     q->fl.size ? &q->fl : NULL);
2676 }
2677 
2678 /**
2679  *	t4_free_sge_resources - free SGE resources
2680  *	@adap: the adapter
2681  *
2682  *	Frees resources used by the SGE queue sets.
2683  */
2684 void t4_free_sge_resources(struct adapter *adap)
2685 {
2686 	int i;
2687 	struct sge_eth_rxq *eq = adap->sge.ethrxq;
2688 	struct sge_eth_txq *etq = adap->sge.ethtxq;
2689 
2690 	/* clean up Ethernet Tx/Rx queues */
2691 	for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2692 		if (eq->rspq.desc)
2693 			free_rspq_fl(adap, &eq->rspq,
2694 				     eq->fl.size ? &eq->fl : NULL);
2695 		if (etq->q.desc) {
2696 			t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
2697 				       etq->q.cntxt_id);
2698 			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2699 			kfree(etq->q.sdesc);
2700 			free_txq(adap, &etq->q);
2701 		}
2702 	}
2703 
2704 	/* clean up RDMA and iSCSI Rx queues */
2705 	t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq);
2706 	t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
2707 	t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
2708 
2709 	/* clean up offload Tx queues */
2710 	for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2711 		struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2712 
2713 		if (q->q.desc) {
2714 			tasklet_kill(&q->qresume_tsk);
2715 			t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
2716 					q->q.cntxt_id);
2717 			free_tx_desc(adap, &q->q, q->q.in_use, false);
2718 			kfree(q->q.sdesc);
2719 			__skb_queue_purge(&q->sendq);
2720 			free_txq(adap, &q->q);
2721 		}
2722 	}
2723 
2724 	/* clean up control Tx queues */
2725 	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2726 		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2727 
2728 		if (cq->q.desc) {
2729 			tasklet_kill(&cq->qresume_tsk);
2730 			t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
2731 					cq->q.cntxt_id);
2732 			__skb_queue_purge(&cq->sendq);
2733 			free_txq(adap, &cq->q);
2734 		}
2735 	}
2736 
2737 	if (adap->sge.fw_evtq.desc)
2738 		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2739 
2740 	if (adap->sge.intrq.desc)
2741 		free_rspq_fl(adap, &adap->sge.intrq, NULL);
2742 
2743 	/* clear the reverse egress queue map */
2744 	memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
2745 }
2746 
2747 void t4_sge_start(struct adapter *adap)
2748 {
2749 	adap->sge.ethtxq_rover = 0;
2750 	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2751 	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2752 }
2753 
2754 /**
2755  *	t4_sge_stop - disable SGE operation
2756  *	@adap: the adapter
2757  *
2758  *	Stop tasklets and timers associated with the DMA engine.  Note that
2759  *	this is effective only if measures have been taken to disable any HW
2760  *	events that may restart them.
2761  */
2762 void t4_sge_stop(struct adapter *adap)
2763 {
2764 	int i;
2765 	struct sge *s = &adap->sge;
2766 
2767 	if (in_interrupt())  /* actions below require waiting */
2768 		return;
2769 
2770 	if (s->rx_timer.function)
2771 		del_timer_sync(&s->rx_timer);
2772 	if (s->tx_timer.function)
2773 		del_timer_sync(&s->tx_timer);
2774 
2775 	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2776 		struct sge_ofld_txq *q = &s->ofldtxq[i];
2777 
2778 		if (q->q.desc)
2779 			tasklet_kill(&q->qresume_tsk);
2780 	}
2781 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2782 		struct sge_ctrl_txq *cq = &s->ctrlq[i];
2783 
2784 		if (cq->q.desc)
2785 			tasklet_kill(&cq->qresume_tsk);
2786 	}
2787 }
2788 
2789 /**
2790  *	t4_sge_init_soft - grab core SGE values needed by SGE code
2791  *	@adap: the adapter
2792  *
2793  *	We need to grab the SGE operating parameters that we need to have
2794  *	in order to do our job and make sure we can live with them.
2795  */
2796 
2797 static int t4_sge_init_soft(struct adapter *adap)
2798 {
2799 	struct sge *s = &adap->sge;
2800 	u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
2801 	u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
2802 	u32 ingress_rx_threshold;
2803 
2804 	/*
2805 	 * Verify that CPL messages are going to the Ingress Queue for
2806 	 * process_responses() and that only packet data is going to the
2807 	 * Free Lists.
2808 	 */
2809 	if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
2810 	    RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
2811 		dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
2812 		return -EINVAL;
2813 	}
2814 
2815 	/*
2816 	 * Validate the Host Buffer Register Array indices that we want to
2817 	 * use ...
2818 	 *
2819 	 * XXX Note that we should really read through the Host Buffer Size
2820 	 * XXX register array and find the indices of the Buffer Sizes which
2821 	 * XXX meet our needs!
2822 	 */
2823 	#define READ_FL_BUF(x) \
2824 		t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
2825 
2826 	fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2827 	fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
2828 	fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2829 	fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2830 
2831 	/* We only bother using the Large Page logic if the Large Page Buffer
2832 	 * is larger than our Page Size Buffer.
2833 	 */
2834 	if (fl_large_pg <= fl_small_pg)
2835 		fl_large_pg = 0;
2836 
2837 	#undef READ_FL_BUF
2838 
2839 	/* The Page Size Buffer must be exactly equal to our Page Size and the
2840 	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2841 	 */
2842 	if (fl_small_pg != PAGE_SIZE ||
2843 	    (fl_large_pg & (fl_large_pg-1)) != 0) {
2844 		dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2845 			fl_small_pg, fl_large_pg);
2846 		return -EINVAL;
2847 	}
2848 	if (fl_large_pg)
2849 		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2850 
2851 	if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
2852 	    fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
2853 		dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
2854 			fl_small_mtu, fl_large_mtu);
2855 		return -EINVAL;
2856 	}
2857 
2858 	/*
2859 	 * Retrieve our RX interrupt holdoff timer values and counter
2860 	 * threshold values from the SGE parameters.
2861 	 */
2862 	timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
2863 	timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
2864 	timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
2865 	s->timer_val[0] = core_ticks_to_us(adap,
2866 		TIMERVALUE0_G(timer_value_0_and_1));
2867 	s->timer_val[1] = core_ticks_to_us(adap,
2868 		TIMERVALUE1_G(timer_value_0_and_1));
2869 	s->timer_val[2] = core_ticks_to_us(adap,
2870 		TIMERVALUE2_G(timer_value_2_and_3));
2871 	s->timer_val[3] = core_ticks_to_us(adap,
2872 		TIMERVALUE3_G(timer_value_2_and_3));
2873 	s->timer_val[4] = core_ticks_to_us(adap,
2874 		TIMERVALUE4_G(timer_value_4_and_5));
2875 	s->timer_val[5] = core_ticks_to_us(adap,
2876 		TIMERVALUE5_G(timer_value_4_and_5));
2877 
2878 	ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
2879 	s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
2880 	s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
2881 	s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
2882 	s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
2883 
2884 	return 0;
2885 }
2886 
2887 /**
2888  *     t4_sge_init - initialize SGE
2889  *     @adap: the adapter
2890  *
2891  *     Perform low-level SGE code initialization needed every time after a
2892  *     chip reset.
2893  */
2894 int t4_sge_init(struct adapter *adap)
2895 {
2896 	struct sge *s = &adap->sge;
2897 	u32 sge_control, sge_control2, sge_conm_ctrl;
2898 	unsigned int ingpadboundary, ingpackboundary;
2899 	int ret, egress_threshold;
2900 
2901 	/*
2902 	 * Ingress Padding Boundary and Egress Status Page Size are set up by
2903 	 * t4_fixup_host_params().
2904 	 */
2905 	sge_control = t4_read_reg(adap, SGE_CONTROL_A);
2906 	s->pktshift = PKTSHIFT_G(sge_control);
2907 	s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
2908 
2909 	/* T4 uses a single control field to specify both the PCIe Padding and
2910 	 * Packing Boundary.  T5 introduced the ability to specify these
2911 	 * separately.  The actual Ingress Packet Data alignment boundary
2912 	 * within Packed Buffer Mode is the maximum of these two
2913 	 * specifications.
2914 	 */
2915 	ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
2916 			       INGPADBOUNDARY_SHIFT_X);
2917 	if (is_t4(adap->params.chip)) {
2918 		s->fl_align = ingpadboundary;
2919 	} else {
2920 		/* T5 has a different interpretation of one of the PCIe Packing
2921 		 * Boundary values.
2922 		 */
2923 		sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
2924 		ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
2925 		if (ingpackboundary == INGPACKBOUNDARY_16B_X)
2926 			ingpackboundary = 16;
2927 		else
2928 			ingpackboundary = 1 << (ingpackboundary +
2929 						INGPACKBOUNDARY_SHIFT_X);
2930 
2931 		s->fl_align = max(ingpadboundary, ingpackboundary);
2932 	}
2933 
2934 	ret = t4_sge_init_soft(adap);
2935 	if (ret < 0)
2936 		return ret;
2937 
2938 	/*
2939 	 * A FL with <= fl_starve_thres buffers is starving and a periodic
2940 	 * timer will attempt to refill it.  This needs to be larger than the
2941 	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
2942 	 * stuck waiting for new packets while the SGE is waiting for us to
2943 	 * give it more Free List entries.  (Note that the SGE's Egress
2944 	 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
2945 	 * there was only a single field to control this.  For T5 there's the
2946 	 * original field which now only applies to Unpacked Mode Free List
2947 	 * buffers and a new field which only applies to Packed Mode Free List
2948 	 * buffers.
2949 	 */
2950 	sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
2951 	if (is_t4(adap->params.chip))
2952 		egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
2953 	else
2954 		egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
2955 	s->fl_starve_thres = 2*egress_threshold + 1;
2956 
2957 	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2958 	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2959 	s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
2960 	s->idma_stalled[0] = 0;
2961 	s->idma_stalled[1] = 0;
2962 	spin_lock_init(&s->intrq_lock);
2963 
2964 	return 0;
2965 }
2966