1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
39 #include <linux/ip.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
44 #include <net/ipv6.h>
45 #include <net/tcp.h>
46 #include <net/busy_poll.h>
47 #ifdef CONFIG_CHELSIO_T4_FCOE
48 #include <scsi/fc/fc_fcoe.h>
49 #endif /* CONFIG_CHELSIO_T4_FCOE */
50 #include "cxgb4.h"
51 #include "t4_regs.h"
52 #include "t4_values.h"
53 #include "t4_msg.h"
54 #include "t4fw_api.h"
55 
56 /*
57  * Rx buffer size.  We use largish buffers if possible but settle for single
58  * pages under memory shortage.
59  */
60 #if PAGE_SHIFT >= 16
61 # define FL_PG_ORDER 0
62 #else
63 # define FL_PG_ORDER (16 - PAGE_SHIFT)
64 #endif
65 
66 /* RX_PULL_LEN should be <= RX_COPY_THRES */
67 #define RX_COPY_THRES    256
68 #define RX_PULL_LEN      128
69 
70 /*
71  * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
72  * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
73  */
74 #define RX_PKT_SKB_LEN   512
75 
76 /*
77  * Max number of Tx descriptors we clean up at a time.  Should be modest as
78  * freeing skbs isn't cheap and it happens while holding locks.  We just need
79  * to free packets faster than they arrive, we eventually catch up and keep
80  * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.
81  */
82 #define MAX_TX_RECLAIM 16
83 
84 /*
85  * Max number of Rx buffers we replenish at a time.  Again keep this modest,
86  * allocating buffers isn't cheap either.
87  */
88 #define MAX_RX_REFILL 16U
89 
90 /*
91  * Period of the Rx queue check timer.  This timer is infrequent as it has
92  * something to do only when the system experiences severe memory shortage.
93  */
94 #define RX_QCHECK_PERIOD (HZ / 2)
95 
96 /*
97  * Period of the Tx queue check timer.
98  */
99 #define TX_QCHECK_PERIOD (HZ / 2)
100 
101 /*
102  * Max number of Tx descriptors to be reclaimed by the Tx timer.
103  */
104 #define MAX_TIMER_TX_RECLAIM 100
105 
106 /*
107  * Timer index used when backing off due to memory shortage.
108  */
109 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
110 
111 /*
112  * Suspend an Ethernet Tx queue with fewer available descriptors than this.
113  * This is the same as calc_tx_descs() for a TSO packet with
114  * nr_frags == MAX_SKB_FRAGS.
115  */
116 #define ETHTXQ_STOP_THRES \
117 	(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
118 
119 /*
120  * Suspension threshold for non-Ethernet Tx queues.  We require enough room
121  * for a full sized WR.
122  */
123 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
124 
125 /*
126  * Max Tx descriptor space we allow for an Ethernet packet to be inlined
127  * into a WR.
128  */
129 #define MAX_IMM_TX_PKT_LEN 256
130 
131 /*
132  * Max size of a WR sent through a control Tx queue.
133  */
134 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
135 
136 struct tx_sw_desc {                /* SW state per Tx descriptor */
137 	struct sk_buff *skb;
138 	struct ulptx_sgl *sgl;
139 };
140 
141 struct rx_sw_desc {                /* SW state per Rx descriptor */
142 	struct page *page;
143 	dma_addr_t dma_addr;
144 };
145 
146 /*
147  * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
148  * buffer).  We currently only support two sizes for 1500- and 9000-byte MTUs.
149  * We could easily support more but there doesn't seem to be much need for
150  * that ...
151  */
152 #define FL_MTU_SMALL 1500
153 #define FL_MTU_LARGE 9000
154 
155 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
156 					  unsigned int mtu)
157 {
158 	struct sge *s = &adapter->sge;
159 
160 	return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
161 }
162 
163 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
164 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
165 
166 /*
167  * Bits 0..3 of rx_sw_desc.dma_addr have special meaning.  The hardware uses
168  * these to specify the buffer size as an index into the SGE Free List Buffer
169  * Size register array.  We also use bit 4, when the buffer has been unmapped
170  * for DMA, but this is of course never sent to the hardware and is only used
171  * to prevent double unmappings.  All of the above requires that the Free List
172  * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
173  * 32-byte or or a power of 2 greater in alignment.  Since the SGE's minimal
174  * Free List Buffer alignment is 32 bytes, this works out for us ...
175  */
176 enum {
177 	RX_BUF_FLAGS     = 0x1f,   /* bottom five bits are special */
178 	RX_BUF_SIZE      = 0x0f,   /* bottom three bits are for buf sizes */
179 	RX_UNMAPPED_BUF  = 0x10,   /* buffer is not mapped */
180 
181 	/*
182 	 * XXX We shouldn't depend on being able to use these indices.
183 	 * XXX Especially when some other Master PF has initialized the
184 	 * XXX adapter or we use the Firmware Configuration File.  We
185 	 * XXX should really search through the Host Buffer Size register
186 	 * XXX array for the appropriately sized buffer indices.
187 	 */
188 	RX_SMALL_PG_BUF  = 0x0,   /* small (PAGE_SIZE) page buffer */
189 	RX_LARGE_PG_BUF  = 0x1,   /* buffer large (FL_PG_ORDER) page buffer */
190 
191 	RX_SMALL_MTU_BUF = 0x2,   /* small MTU buffer */
192 	RX_LARGE_MTU_BUF = 0x3,   /* large MTU buffer */
193 };
194 
195 static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
196 #define MIN_NAPI_WORK  1
197 
198 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
199 {
200 	return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
201 }
202 
203 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
204 {
205 	return !(d->dma_addr & RX_UNMAPPED_BUF);
206 }
207 
208 /**
209  *	txq_avail - return the number of available slots in a Tx queue
210  *	@q: the Tx queue
211  *
212  *	Returns the number of descriptors in a Tx queue available to write new
213  *	packets.
214  */
215 static inline unsigned int txq_avail(const struct sge_txq *q)
216 {
217 	return q->size - 1 - q->in_use;
218 }
219 
220 /**
221  *	fl_cap - return the capacity of a free-buffer list
222  *	@fl: the FL
223  *
224  *	Returns the capacity of a free-buffer list.  The capacity is less than
225  *	the size because one descriptor needs to be left unpopulated, otherwise
226  *	HW will think the FL is empty.
227  */
228 static inline unsigned int fl_cap(const struct sge_fl *fl)
229 {
230 	return fl->size - 8;   /* 1 descriptor = 8 buffers */
231 }
232 
233 /**
234  *	fl_starving - return whether a Free List is starving.
235  *	@adapter: pointer to the adapter
236  *	@fl: the Free List
237  *
238  *	Tests specified Free List to see whether the number of buffers
239  *	available to the hardware has falled below our "starvation"
240  *	threshold.
241  */
242 static inline bool fl_starving(const struct adapter *adapter,
243 			       const struct sge_fl *fl)
244 {
245 	const struct sge *s = &adapter->sge;
246 
247 	return fl->avail - fl->pend_cred <= s->fl_starve_thres;
248 }
249 
250 static int map_skb(struct device *dev, const struct sk_buff *skb,
251 		   dma_addr_t *addr)
252 {
253 	const skb_frag_t *fp, *end;
254 	const struct skb_shared_info *si;
255 
256 	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
257 	if (dma_mapping_error(dev, *addr))
258 		goto out_err;
259 
260 	si = skb_shinfo(skb);
261 	end = &si->frags[si->nr_frags];
262 
263 	for (fp = si->frags; fp < end; fp++) {
264 		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
265 					   DMA_TO_DEVICE);
266 		if (dma_mapping_error(dev, *addr))
267 			goto unwind;
268 	}
269 	return 0;
270 
271 unwind:
272 	while (fp-- > si->frags)
273 		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
274 
275 	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
276 out_err:
277 	return -ENOMEM;
278 }
279 
280 #ifdef CONFIG_NEED_DMA_MAP_STATE
281 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
282 		      const dma_addr_t *addr)
283 {
284 	const skb_frag_t *fp, *end;
285 	const struct skb_shared_info *si;
286 
287 	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
288 
289 	si = skb_shinfo(skb);
290 	end = &si->frags[si->nr_frags];
291 	for (fp = si->frags; fp < end; fp++)
292 		dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
293 }
294 
295 /**
296  *	deferred_unmap_destructor - unmap a packet when it is freed
297  *	@skb: the packet
298  *
299  *	This is the packet destructor used for Tx packets that need to remain
300  *	mapped until they are freed rather than until their Tx descriptors are
301  *	freed.
302  */
303 static void deferred_unmap_destructor(struct sk_buff *skb)
304 {
305 	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
306 }
307 #endif
308 
309 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
310 		      const struct ulptx_sgl *sgl, const struct sge_txq *q)
311 {
312 	const struct ulptx_sge_pair *p;
313 	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
314 
315 	if (likely(skb_headlen(skb)))
316 		dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
317 				 DMA_TO_DEVICE);
318 	else {
319 		dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
320 			       DMA_TO_DEVICE);
321 		nfrags--;
322 	}
323 
324 	/*
325 	 * the complexity below is because of the possibility of a wrap-around
326 	 * in the middle of an SGL
327 	 */
328 	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
329 		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
330 unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
331 				       ntohl(p->len[0]), DMA_TO_DEVICE);
332 			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
333 				       ntohl(p->len[1]), DMA_TO_DEVICE);
334 			p++;
335 		} else if ((u8 *)p == (u8 *)q->stat) {
336 			p = (const struct ulptx_sge_pair *)q->desc;
337 			goto unmap;
338 		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
339 			const __be64 *addr = (const __be64 *)q->desc;
340 
341 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
342 				       ntohl(p->len[0]), DMA_TO_DEVICE);
343 			dma_unmap_page(dev, be64_to_cpu(addr[1]),
344 				       ntohl(p->len[1]), DMA_TO_DEVICE);
345 			p = (const struct ulptx_sge_pair *)&addr[2];
346 		} else {
347 			const __be64 *addr = (const __be64 *)q->desc;
348 
349 			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
350 				       ntohl(p->len[0]), DMA_TO_DEVICE);
351 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
352 				       ntohl(p->len[1]), DMA_TO_DEVICE);
353 			p = (const struct ulptx_sge_pair *)&addr[1];
354 		}
355 	}
356 	if (nfrags) {
357 		__be64 addr;
358 
359 		if ((u8 *)p == (u8 *)q->stat)
360 			p = (const struct ulptx_sge_pair *)q->desc;
361 		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
362 						       *(const __be64 *)q->desc;
363 		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
364 			       DMA_TO_DEVICE);
365 	}
366 }
367 
368 /**
369  *	free_tx_desc - reclaims Tx descriptors and their buffers
370  *	@adapter: the adapter
371  *	@q: the Tx queue to reclaim descriptors from
372  *	@n: the number of descriptors to reclaim
373  *	@unmap: whether the buffers should be unmapped for DMA
374  *
375  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
376  *	Tx buffers.  Called with the Tx queue lock held.
377  */
378 void free_tx_desc(struct adapter *adap, struct sge_txq *q,
379 		  unsigned int n, bool unmap)
380 {
381 	struct tx_sw_desc *d;
382 	unsigned int cidx = q->cidx;
383 	struct device *dev = adap->pdev_dev;
384 
385 	d = &q->sdesc[cidx];
386 	while (n--) {
387 		if (d->skb) {                       /* an SGL is present */
388 			if (unmap)
389 				unmap_sgl(dev, d->skb, d->sgl, q);
390 			dev_consume_skb_any(d->skb);
391 			d->skb = NULL;
392 		}
393 		++d;
394 		if (++cidx == q->size) {
395 			cidx = 0;
396 			d = q->sdesc;
397 		}
398 	}
399 	q->cidx = cidx;
400 }
401 
402 /*
403  * Return the number of reclaimable descriptors in a Tx queue.
404  */
405 static inline int reclaimable(const struct sge_txq *q)
406 {
407 	int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
408 	hw_cidx -= q->cidx;
409 	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
410 }
411 
412 /**
413  *	reclaim_completed_tx - reclaims completed Tx descriptors
414  *	@adap: the adapter
415  *	@q: the Tx queue to reclaim completed descriptors from
416  *	@unmap: whether the buffers should be unmapped for DMA
417  *
418  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
419  *	and frees the associated buffers if possible.  Called with the Tx
420  *	queue locked.
421  */
422 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
423 					bool unmap)
424 {
425 	int avail = reclaimable(q);
426 
427 	if (avail) {
428 		/*
429 		 * Limit the amount of clean up work we do at a time to keep
430 		 * the Tx lock hold time O(1).
431 		 */
432 		if (avail > MAX_TX_RECLAIM)
433 			avail = MAX_TX_RECLAIM;
434 
435 		free_tx_desc(adap, q, avail, unmap);
436 		q->in_use -= avail;
437 	}
438 }
439 
440 static inline int get_buf_size(struct adapter *adapter,
441 			       const struct rx_sw_desc *d)
442 {
443 	struct sge *s = &adapter->sge;
444 	unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
445 	int buf_size;
446 
447 	switch (rx_buf_size_idx) {
448 	case RX_SMALL_PG_BUF:
449 		buf_size = PAGE_SIZE;
450 		break;
451 
452 	case RX_LARGE_PG_BUF:
453 		buf_size = PAGE_SIZE << s->fl_pg_order;
454 		break;
455 
456 	case RX_SMALL_MTU_BUF:
457 		buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
458 		break;
459 
460 	case RX_LARGE_MTU_BUF:
461 		buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
462 		break;
463 
464 	default:
465 		BUG_ON(1);
466 	}
467 
468 	return buf_size;
469 }
470 
471 /**
472  *	free_rx_bufs - free the Rx buffers on an SGE free list
473  *	@adap: the adapter
474  *	@q: the SGE free list to free buffers from
475  *	@n: how many buffers to free
476  *
477  *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
478  *	buffers must be made inaccessible to HW before calling this function.
479  */
480 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
481 {
482 	while (n--) {
483 		struct rx_sw_desc *d = &q->sdesc[q->cidx];
484 
485 		if (is_buf_mapped(d))
486 			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
487 				       get_buf_size(adap, d),
488 				       PCI_DMA_FROMDEVICE);
489 		put_page(d->page);
490 		d->page = NULL;
491 		if (++q->cidx == q->size)
492 			q->cidx = 0;
493 		q->avail--;
494 	}
495 }
496 
497 /**
498  *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
499  *	@adap: the adapter
500  *	@q: the SGE free list
501  *
502  *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
503  *	buffer must be made inaccessible to HW before calling this function.
504  *
505  *	This is similar to @free_rx_bufs above but does not free the buffer.
506  *	Do note that the FL still loses any further access to the buffer.
507  */
508 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
509 {
510 	struct rx_sw_desc *d = &q->sdesc[q->cidx];
511 
512 	if (is_buf_mapped(d))
513 		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
514 			       get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
515 	d->page = NULL;
516 	if (++q->cidx == q->size)
517 		q->cidx = 0;
518 	q->avail--;
519 }
520 
521 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
522 {
523 	if (q->pend_cred >= 8) {
524 		u32 val = adap->params.arch.sge_fl_db;
525 
526 		if (is_t4(adap->params.chip))
527 			val |= PIDX_V(q->pend_cred / 8);
528 		else
529 			val |= PIDX_T5_V(q->pend_cred / 8);
530 
531 		/* Make sure all memory writes to the Free List queue are
532 		 * committed before we tell the hardware about them.
533 		 */
534 		wmb();
535 
536 		/* If we don't have access to the new User Doorbell (T5+), use
537 		 * the old doorbell mechanism; otherwise use the new BAR2
538 		 * mechanism.
539 		 */
540 		if (unlikely(q->bar2_addr == NULL)) {
541 			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
542 				     val | QID_V(q->cntxt_id));
543 		} else {
544 			writel(val | QID_V(q->bar2_qid),
545 			       q->bar2_addr + SGE_UDB_KDOORBELL);
546 
547 			/* This Write memory Barrier will force the write to
548 			 * the User Doorbell area to be flushed.
549 			 */
550 			wmb();
551 		}
552 		q->pend_cred &= 7;
553 	}
554 }
555 
556 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
557 				  dma_addr_t mapping)
558 {
559 	sd->page = pg;
560 	sd->dma_addr = mapping;      /* includes size low bits */
561 }
562 
563 /**
564  *	refill_fl - refill an SGE Rx buffer ring
565  *	@adap: the adapter
566  *	@q: the ring to refill
567  *	@n: the number of new buffers to allocate
568  *	@gfp: the gfp flags for the allocations
569  *
570  *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
571  *	allocated with the supplied gfp flags.  The caller must assure that
572  *	@n does not exceed the queue's capacity.  If afterwards the queue is
573  *	found critically low mark it as starving in the bitmap of starving FLs.
574  *
575  *	Returns the number of buffers allocated.
576  */
577 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
578 			      gfp_t gfp)
579 {
580 	struct sge *s = &adap->sge;
581 	struct page *pg;
582 	dma_addr_t mapping;
583 	unsigned int cred = q->avail;
584 	__be64 *d = &q->desc[q->pidx];
585 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
586 	int node;
587 
588 #ifdef CONFIG_DEBUG_FS
589 	if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
590 		goto out;
591 #endif
592 
593 	gfp |= __GFP_NOWARN;
594 	node = dev_to_node(adap->pdev_dev);
595 
596 	if (s->fl_pg_order == 0)
597 		goto alloc_small_pages;
598 
599 	/*
600 	 * Prefer large buffers
601 	 */
602 	while (n) {
603 		pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
604 		if (unlikely(!pg)) {
605 			q->large_alloc_failed++;
606 			break;       /* fall back to single pages */
607 		}
608 
609 		mapping = dma_map_page(adap->pdev_dev, pg, 0,
610 				       PAGE_SIZE << s->fl_pg_order,
611 				       PCI_DMA_FROMDEVICE);
612 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
613 			__free_pages(pg, s->fl_pg_order);
614 			q->mapping_err++;
615 			goto out;   /* do not try small pages for this error */
616 		}
617 		mapping |= RX_LARGE_PG_BUF;
618 		*d++ = cpu_to_be64(mapping);
619 
620 		set_rx_sw_desc(sd, pg, mapping);
621 		sd++;
622 
623 		q->avail++;
624 		if (++q->pidx == q->size) {
625 			q->pidx = 0;
626 			sd = q->sdesc;
627 			d = q->desc;
628 		}
629 		n--;
630 	}
631 
632 alloc_small_pages:
633 	while (n--) {
634 		pg = alloc_pages_node(node, gfp, 0);
635 		if (unlikely(!pg)) {
636 			q->alloc_failed++;
637 			break;
638 		}
639 
640 		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
641 				       PCI_DMA_FROMDEVICE);
642 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
643 			put_page(pg);
644 			q->mapping_err++;
645 			goto out;
646 		}
647 		*d++ = cpu_to_be64(mapping);
648 
649 		set_rx_sw_desc(sd, pg, mapping);
650 		sd++;
651 
652 		q->avail++;
653 		if (++q->pidx == q->size) {
654 			q->pidx = 0;
655 			sd = q->sdesc;
656 			d = q->desc;
657 		}
658 	}
659 
660 out:	cred = q->avail - cred;
661 	q->pend_cred += cred;
662 	ring_fl_db(adap, q);
663 
664 	if (unlikely(fl_starving(adap, q))) {
665 		smp_wmb();
666 		q->low++;
667 		set_bit(q->cntxt_id - adap->sge.egr_start,
668 			adap->sge.starving_fl);
669 	}
670 
671 	return cred;
672 }
673 
674 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
675 {
676 	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
677 		  GFP_ATOMIC);
678 }
679 
680 /**
681  *	alloc_ring - allocate resources for an SGE descriptor ring
682  *	@dev: the PCI device's core device
683  *	@nelem: the number of descriptors
684  *	@elem_size: the size of each descriptor
685  *	@sw_size: the size of the SW state associated with each ring element
686  *	@phys: the physical address of the allocated ring
687  *	@metadata: address of the array holding the SW state for the ring
688  *	@stat_size: extra space in HW ring for status information
689  *	@node: preferred node for memory allocations
690  *
691  *	Allocates resources for an SGE descriptor ring, such as Tx queues,
692  *	free buffer lists, or response queues.  Each SGE ring requires
693  *	space for its HW descriptors plus, optionally, space for the SW state
694  *	associated with each HW entry (the metadata).  The function returns
695  *	three values: the virtual address for the HW ring (the return value
696  *	of the function), the bus address of the HW ring, and the address
697  *	of the SW ring.
698  */
699 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
700 			size_t sw_size, dma_addr_t *phys, void *metadata,
701 			size_t stat_size, int node)
702 {
703 	size_t len = nelem * elem_size + stat_size;
704 	void *s = NULL;
705 	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
706 
707 	if (!p)
708 		return NULL;
709 	if (sw_size) {
710 		s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
711 
712 		if (!s) {
713 			dma_free_coherent(dev, len, p, *phys);
714 			return NULL;
715 		}
716 	}
717 	if (metadata)
718 		*(void **)metadata = s;
719 	memset(p, 0, len);
720 	return p;
721 }
722 
723 /**
724  *	sgl_len - calculates the size of an SGL of the given capacity
725  *	@n: the number of SGL entries
726  *
727  *	Calculates the number of flits needed for a scatter/gather list that
728  *	can hold the given number of entries.
729  */
730 static inline unsigned int sgl_len(unsigned int n)
731 {
732 	/* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
733 	 * addresses.  The DSGL Work Request starts off with a 32-bit DSGL
734 	 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
735 	 * repeated sequences of { Length[i], Length[i+1], Address[i],
736 	 * Address[i+1] } (this ensures that all addresses are on 64-bit
737 	 * boundaries).  If N is even, then Length[N+1] should be set to 0 and
738 	 * Address[N+1] is omitted.
739 	 *
740 	 * The following calculation incorporates all of the above.  It's
741 	 * somewhat hard to follow but, briefly: the "+2" accounts for the
742 	 * first two flits which include the DSGL header, Length0 and
743 	 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
744 	 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
745 	 * finally the "+((n-1)&1)" adds the one remaining flit needed if
746 	 * (n-1) is odd ...
747 	 */
748 	n--;
749 	return (3 * n) / 2 + (n & 1) + 2;
750 }
751 
752 /**
753  *	flits_to_desc - returns the num of Tx descriptors for the given flits
754  *	@n: the number of flits
755  *
756  *	Returns the number of Tx descriptors needed for the supplied number
757  *	of flits.
758  */
759 static inline unsigned int flits_to_desc(unsigned int n)
760 {
761 	BUG_ON(n > SGE_MAX_WR_LEN / 8);
762 	return DIV_ROUND_UP(n, 8);
763 }
764 
765 /**
766  *	is_eth_imm - can an Ethernet packet be sent as immediate data?
767  *	@skb: the packet
768  *
769  *	Returns whether an Ethernet packet is small enough to fit as
770  *	immediate data. Return value corresponds to headroom required.
771  */
772 static inline int is_eth_imm(const struct sk_buff *skb)
773 {
774 	int hdrlen = skb_shinfo(skb)->gso_size ?
775 			sizeof(struct cpl_tx_pkt_lso_core) : 0;
776 
777 	hdrlen += sizeof(struct cpl_tx_pkt);
778 	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
779 		return hdrlen;
780 	return 0;
781 }
782 
783 /**
784  *	calc_tx_flits - calculate the number of flits for a packet Tx WR
785  *	@skb: the packet
786  *
787  *	Returns the number of flits needed for a Tx WR for the given Ethernet
788  *	packet, including the needed WR and CPL headers.
789  */
790 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
791 {
792 	unsigned int flits;
793 	int hdrlen = is_eth_imm(skb);
794 
795 	/* If the skb is small enough, we can pump it out as a work request
796 	 * with only immediate data.  In that case we just have to have the
797 	 * TX Packet header plus the skb data in the Work Request.
798 	 */
799 
800 	if (hdrlen)
801 		return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
802 
803 	/* Otherwise, we're going to have to construct a Scatter gather list
804 	 * of the skb body and fragments.  We also include the flits necessary
805 	 * for the TX Packet Work Request and CPL.  We always have a firmware
806 	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
807 	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
808 	 * message or, if we're doing a Large Send Offload, an LSO CPL message
809 	 * with an embedded TX Packet Write CPL message.
810 	 */
811 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
812 	if (skb_shinfo(skb)->gso_size)
813 		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
814 			  sizeof(struct cpl_tx_pkt_lso_core) +
815 			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
816 	else
817 		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
818 			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
819 	return flits;
820 }
821 
822 /**
823  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
824  *	@skb: the packet
825  *
826  *	Returns the number of Tx descriptors needed for the given Ethernet
827  *	packet, including the needed WR and CPL headers.
828  */
829 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
830 {
831 	return flits_to_desc(calc_tx_flits(skb));
832 }
833 
834 /**
835  *	write_sgl - populate a scatter/gather list for a packet
836  *	@skb: the packet
837  *	@q: the Tx queue we are writing into
838  *	@sgl: starting location for writing the SGL
839  *	@end: points right after the end of the SGL
840  *	@start: start offset into skb main-body data to include in the SGL
841  *	@addr: the list of bus addresses for the SGL elements
842  *
843  *	Generates a gather list for the buffers that make up a packet.
844  *	The caller must provide adequate space for the SGL that will be written.
845  *	The SGL includes all of the packet's page fragments and the data in its
846  *	main body except for the first @start bytes.  @sgl must be 16-byte
847  *	aligned and within a Tx descriptor with available space.  @end points
848  *	right after the end of the SGL but does not account for any potential
849  *	wrap around, i.e., @end > @sgl.
850  */
851 static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
852 		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
853 		      const dma_addr_t *addr)
854 {
855 	unsigned int i, len;
856 	struct ulptx_sge_pair *to;
857 	const struct skb_shared_info *si = skb_shinfo(skb);
858 	unsigned int nfrags = si->nr_frags;
859 	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
860 
861 	len = skb_headlen(skb) - start;
862 	if (likely(len)) {
863 		sgl->len0 = htonl(len);
864 		sgl->addr0 = cpu_to_be64(addr[0] + start);
865 		nfrags++;
866 	} else {
867 		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
868 		sgl->addr0 = cpu_to_be64(addr[1]);
869 	}
870 
871 	sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
872 			      ULPTX_NSGE_V(nfrags));
873 	if (likely(--nfrags == 0))
874 		return;
875 	/*
876 	 * Most of the complexity below deals with the possibility we hit the
877 	 * end of the queue in the middle of writing the SGL.  For this case
878 	 * only we create the SGL in a temporary buffer and then copy it.
879 	 */
880 	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
881 
882 	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
883 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
884 		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
885 		to->addr[0] = cpu_to_be64(addr[i]);
886 		to->addr[1] = cpu_to_be64(addr[++i]);
887 	}
888 	if (nfrags) {
889 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
890 		to->len[1] = cpu_to_be32(0);
891 		to->addr[0] = cpu_to_be64(addr[i + 1]);
892 	}
893 	if (unlikely((u8 *)end > (u8 *)q->stat)) {
894 		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
895 
896 		if (likely(part0))
897 			memcpy(sgl->sge, buf, part0);
898 		part1 = (u8 *)end - (u8 *)q->stat;
899 		memcpy(q->desc, (u8 *)buf + part0, part1);
900 		end = (void *)q->desc + part1;
901 	}
902 	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
903 		*end = 0;
904 }
905 
906 /* This function copies 64 byte coalesced work request to
907  * memory mapped BAR2 space. For coalesced WR SGE fetches
908  * data from the FIFO instead of from Host.
909  */
910 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
911 {
912 	int count = 8;
913 
914 	while (count) {
915 		writeq(*src, dst);
916 		src++;
917 		dst++;
918 		count--;
919 	}
920 }
921 
922 /**
923  *	ring_tx_db - check and potentially ring a Tx queue's doorbell
924  *	@adap: the adapter
925  *	@q: the Tx queue
926  *	@n: number of new descriptors to give to HW
927  *
928  *	Ring the doorbel for a Tx queue.
929  */
930 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
931 {
932 	/* Make sure that all writes to the TX Descriptors are committed
933 	 * before we tell the hardware about them.
934 	 */
935 	wmb();
936 
937 	/* If we don't have access to the new User Doorbell (T5+), use the old
938 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
939 	 */
940 	if (unlikely(q->bar2_addr == NULL)) {
941 		u32 val = PIDX_V(n);
942 		unsigned long flags;
943 
944 		/* For T4 we need to participate in the Doorbell Recovery
945 		 * mechanism.
946 		 */
947 		spin_lock_irqsave(&q->db_lock, flags);
948 		if (!q->db_disabled)
949 			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
950 				     QID_V(q->cntxt_id) | val);
951 		else
952 			q->db_pidx_inc += n;
953 		q->db_pidx = q->pidx;
954 		spin_unlock_irqrestore(&q->db_lock, flags);
955 	} else {
956 		u32 val = PIDX_T5_V(n);
957 
958 		/* T4 and later chips share the same PIDX field offset within
959 		 * the doorbell, but T5 and later shrank the field in order to
960 		 * gain a bit for Doorbell Priority.  The field was absurdly
961 		 * large in the first place (14 bits) so we just use the T5
962 		 * and later limits and warn if a Queue ID is too large.
963 		 */
964 		WARN_ON(val & DBPRIO_F);
965 
966 		/* If we're only writing a single TX Descriptor and we can use
967 		 * Inferred QID registers, we can use the Write Combining
968 		 * Gather Buffer; otherwise we use the simple doorbell.
969 		 */
970 		if (n == 1 && q->bar2_qid == 0) {
971 			int index = (q->pidx
972 				     ? (q->pidx - 1)
973 				     : (q->size - 1));
974 			u64 *wr = (u64 *)&q->desc[index];
975 
976 			cxgb_pio_copy((u64 __iomem *)
977 				      (q->bar2_addr + SGE_UDB_WCDOORBELL),
978 				      wr);
979 		} else {
980 			writel(val | QID_V(q->bar2_qid),
981 			       q->bar2_addr + SGE_UDB_KDOORBELL);
982 		}
983 
984 		/* This Write Memory Barrier will force the write to the User
985 		 * Doorbell area to be flushed.  This is needed to prevent
986 		 * writes on different CPUs for the same queue from hitting
987 		 * the adapter out of order.  This is required when some Work
988 		 * Requests take the Write Combine Gather Buffer path (user
989 		 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
990 		 * take the traditional path where we simply increment the
991 		 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
992 		 * hardware DMA read the actual Work Request.
993 		 */
994 		wmb();
995 	}
996 }
997 
998 /**
999  *	inline_tx_skb - inline a packet's data into Tx descriptors
1000  *	@skb: the packet
1001  *	@q: the Tx queue where the packet will be inlined
1002  *	@pos: starting position in the Tx queue where to inline the packet
1003  *
1004  *	Inline a packet's contents directly into Tx descriptors, starting at
1005  *	the given position within the Tx DMA ring.
1006  *	Most of the complexity of this operation is dealing with wrap arounds
1007  *	in the middle of the packet we want to inline.
1008  */
1009 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
1010 			  void *pos)
1011 {
1012 	u64 *p;
1013 	int left = (void *)q->stat - pos;
1014 
1015 	if (likely(skb->len <= left)) {
1016 		if (likely(!skb->data_len))
1017 			skb_copy_from_linear_data(skb, pos, skb->len);
1018 		else
1019 			skb_copy_bits(skb, 0, pos, skb->len);
1020 		pos += skb->len;
1021 	} else {
1022 		skb_copy_bits(skb, 0, pos, left);
1023 		skb_copy_bits(skb, left, q->desc, skb->len - left);
1024 		pos = (void *)q->desc + (skb->len - left);
1025 	}
1026 
1027 	/* 0-pad to multiple of 16 */
1028 	p = PTR_ALIGN(pos, 8);
1029 	if ((uintptr_t)p & 8)
1030 		*p = 0;
1031 }
1032 
1033 static void *inline_tx_skb_header(const struct sk_buff *skb,
1034 				  const struct sge_txq *q,  void *pos,
1035 				  int length)
1036 {
1037 	u64 *p;
1038 	int left = (void *)q->stat - pos;
1039 
1040 	if (likely(length <= left)) {
1041 		memcpy(pos, skb->data, length);
1042 		pos += length;
1043 	} else {
1044 		memcpy(pos, skb->data, left);
1045 		memcpy(q->desc, skb->data + left, length - left);
1046 		pos = (void *)q->desc + (length - left);
1047 	}
1048 	/* 0-pad to multiple of 16 */
1049 	p = PTR_ALIGN(pos, 8);
1050 	if ((uintptr_t)p & 8) {
1051 		*p = 0;
1052 		return p + 1;
1053 	}
1054 	return p;
1055 }
1056 
1057 /*
1058  * Figure out what HW csum a packet wants and return the appropriate control
1059  * bits.
1060  */
1061 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1062 {
1063 	int csum_type;
1064 	const struct iphdr *iph = ip_hdr(skb);
1065 
1066 	if (iph->version == 4) {
1067 		if (iph->protocol == IPPROTO_TCP)
1068 			csum_type = TX_CSUM_TCPIP;
1069 		else if (iph->protocol == IPPROTO_UDP)
1070 			csum_type = TX_CSUM_UDPIP;
1071 		else {
1072 nocsum:			/*
1073 			 * unknown protocol, disable HW csum
1074 			 * and hope a bad packet is detected
1075 			 */
1076 			return TXPKT_L4CSUM_DIS_F;
1077 		}
1078 	} else {
1079 		/*
1080 		 * this doesn't work with extension headers
1081 		 */
1082 		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1083 
1084 		if (ip6h->nexthdr == IPPROTO_TCP)
1085 			csum_type = TX_CSUM_TCPIP6;
1086 		else if (ip6h->nexthdr == IPPROTO_UDP)
1087 			csum_type = TX_CSUM_UDPIP6;
1088 		else
1089 			goto nocsum;
1090 	}
1091 
1092 	if (likely(csum_type >= TX_CSUM_TCPIP)) {
1093 		u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
1094 		int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1095 
1096 		if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1097 			hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1098 		else
1099 			hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1100 		return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1101 	} else {
1102 		int start = skb_transport_offset(skb);
1103 
1104 		return TXPKT_CSUM_TYPE_V(csum_type) |
1105 			TXPKT_CSUM_START_V(start) |
1106 			TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1107 	}
1108 }
1109 
1110 static void eth_txq_stop(struct sge_eth_txq *q)
1111 {
1112 	netif_tx_stop_queue(q->txq);
1113 	q->q.stops++;
1114 }
1115 
1116 static inline void txq_advance(struct sge_txq *q, unsigned int n)
1117 {
1118 	q->in_use += n;
1119 	q->pidx += n;
1120 	if (q->pidx >= q->size)
1121 		q->pidx -= q->size;
1122 }
1123 
1124 #ifdef CONFIG_CHELSIO_T4_FCOE
1125 static inline int
1126 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1127 		  const struct port_info *pi, u64 *cntrl)
1128 {
1129 	const struct cxgb_fcoe *fcoe = &pi->fcoe;
1130 
1131 	if (!(fcoe->flags & CXGB_FCOE_ENABLED))
1132 		return 0;
1133 
1134 	if (skb->protocol != htons(ETH_P_FCOE))
1135 		return 0;
1136 
1137 	skb_reset_mac_header(skb);
1138 	skb->mac_len = sizeof(struct ethhdr);
1139 
1140 	skb_set_network_header(skb, skb->mac_len);
1141 	skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
1142 
1143 	if (!cxgb_fcoe_sof_eof_supported(adap, skb))
1144 		return -ENOTSUPP;
1145 
1146 	/* FC CRC offload */
1147 	*cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
1148 		     TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
1149 		     TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
1150 		     TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
1151 		     TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
1152 	return 0;
1153 }
1154 #endif /* CONFIG_CHELSIO_T4_FCOE */
1155 
1156 /**
1157  *	t4_eth_xmit - add a packet to an Ethernet Tx queue
1158  *	@skb: the packet
1159  *	@dev: the egress net device
1160  *
1161  *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
1162  */
1163 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1164 {
1165 	u32 wr_mid, ctrl0;
1166 	u64 cntrl, *end;
1167 	int qidx, credits;
1168 	unsigned int flits, ndesc;
1169 	struct adapter *adap;
1170 	struct sge_eth_txq *q;
1171 	const struct port_info *pi;
1172 	struct fw_eth_tx_pkt_wr *wr;
1173 	struct cpl_tx_pkt_core *cpl;
1174 	const struct skb_shared_info *ssi;
1175 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1176 	bool immediate = false;
1177 	int len, max_pkt_len;
1178 #ifdef CONFIG_CHELSIO_T4_FCOE
1179 	int err;
1180 #endif /* CONFIG_CHELSIO_T4_FCOE */
1181 
1182 	/*
1183 	 * The chip min packet length is 10 octets but play safe and reject
1184 	 * anything shorter than an Ethernet header.
1185 	 */
1186 	if (unlikely(skb->len < ETH_HLEN)) {
1187 out_free:	dev_kfree_skb_any(skb);
1188 		return NETDEV_TX_OK;
1189 	}
1190 
1191 	/* Discard the packet if the length is greater than mtu */
1192 	max_pkt_len = ETH_HLEN + dev->mtu;
1193 	if (skb_vlan_tagged(skb))
1194 		max_pkt_len += VLAN_HLEN;
1195 	if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1196 		goto out_free;
1197 
1198 	pi = netdev_priv(dev);
1199 	adap = pi->adapter;
1200 	qidx = skb_get_queue_mapping(skb);
1201 	q = &adap->sge.ethtxq[qidx + pi->first_qset];
1202 
1203 	reclaim_completed_tx(adap, &q->q, true);
1204 	cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1205 
1206 #ifdef CONFIG_CHELSIO_T4_FCOE
1207 	err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1208 	if (unlikely(err == -ENOTSUPP))
1209 		goto out_free;
1210 #endif /* CONFIG_CHELSIO_T4_FCOE */
1211 
1212 	flits = calc_tx_flits(skb);
1213 	ndesc = flits_to_desc(flits);
1214 	credits = txq_avail(&q->q) - ndesc;
1215 
1216 	if (unlikely(credits < 0)) {
1217 		eth_txq_stop(q);
1218 		dev_err(adap->pdev_dev,
1219 			"%s: Tx ring %u full while queue awake!\n",
1220 			dev->name, qidx);
1221 		return NETDEV_TX_BUSY;
1222 	}
1223 
1224 	if (is_eth_imm(skb))
1225 		immediate = true;
1226 
1227 	if (!immediate &&
1228 	    unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
1229 		q->mapping_err++;
1230 		goto out_free;
1231 	}
1232 
1233 	wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1234 	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1235 		eth_txq_stop(q);
1236 		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1237 	}
1238 
1239 	wr = (void *)&q->q.desc[q->q.pidx];
1240 	wr->equiq_to_len16 = htonl(wr_mid);
1241 	wr->r3 = cpu_to_be64(0);
1242 	end = (u64 *)wr + flits;
1243 
1244 	len = immediate ? skb->len : 0;
1245 	ssi = skb_shinfo(skb);
1246 	if (ssi->gso_size) {
1247 		struct cpl_tx_pkt_lso *lso = (void *)wr;
1248 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1249 		int l3hdr_len = skb_network_header_len(skb);
1250 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1251 
1252 		len += sizeof(*lso);
1253 		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1254 				       FW_WR_IMMDLEN_V(len));
1255 		lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1256 					LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
1257 					LSO_IPV6_V(v6) |
1258 					LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1259 					LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1260 					LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1261 		lso->c.ipid_ofst = htons(0);
1262 		lso->c.mss = htons(ssi->gso_size);
1263 		lso->c.seqno_offset = htonl(0);
1264 		if (is_t4(adap->params.chip))
1265 			lso->c.len = htonl(skb->len);
1266 		else
1267 			lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
1268 		cpl = (void *)(lso + 1);
1269 
1270 		if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1271 			cntrl =	TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1272 		else
1273 			cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1274 
1275 		cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1276 					   TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1277 			 TXPKT_IPHDR_LEN_V(l3hdr_len);
1278 		q->tso++;
1279 		q->tx_cso += ssi->gso_segs;
1280 	} else {
1281 		len += sizeof(*cpl);
1282 		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1283 				       FW_WR_IMMDLEN_V(len));
1284 		cpl = (void *)(wr + 1);
1285 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1286 			cntrl = hwcsum(adap->params.chip, skb) |
1287 				TXPKT_IPCSUM_DIS_F;
1288 			q->tx_cso++;
1289 		}
1290 	}
1291 
1292 	if (skb_vlan_tag_present(skb)) {
1293 		q->vlan_ins++;
1294 		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1295 #ifdef CONFIG_CHELSIO_T4_FCOE
1296 		if (skb->protocol == htons(ETH_P_FCOE))
1297 			cntrl |= TXPKT_VLAN_V(
1298 				 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
1299 #endif /* CONFIG_CHELSIO_T4_FCOE */
1300 	}
1301 
1302 	ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
1303 		TXPKT_PF_V(adap->pf);
1304 #ifdef CONFIG_CHELSIO_T4_DCB
1305 	if (is_t4(adap->params.chip))
1306 		ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
1307 	else
1308 		ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
1309 #endif
1310 	cpl->ctrl0 = htonl(ctrl0);
1311 	cpl->pack = htons(0);
1312 	cpl->len = htons(skb->len);
1313 	cpl->ctrl1 = cpu_to_be64(cntrl);
1314 
1315 	if (immediate) {
1316 		inline_tx_skb(skb, &q->q, cpl + 1);
1317 		dev_consume_skb_any(skb);
1318 	} else {
1319 		int last_desc;
1320 
1321 		write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1322 			  addr);
1323 		skb_orphan(skb);
1324 
1325 		last_desc = q->q.pidx + ndesc - 1;
1326 		if (last_desc >= q->q.size)
1327 			last_desc -= q->q.size;
1328 		q->q.sdesc[last_desc].skb = skb;
1329 		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1330 	}
1331 
1332 	txq_advance(&q->q, ndesc);
1333 
1334 	ring_tx_db(adap, &q->q, ndesc);
1335 	return NETDEV_TX_OK;
1336 }
1337 
1338 /**
1339  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1340  *	@q: the SGE control Tx queue
1341  *
1342  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1343  *	that send only immediate data (presently just the control queues) and
1344  *	thus do not have any sk_buffs to release.
1345  */
1346 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1347 {
1348 	int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
1349 	int reclaim = hw_cidx - q->cidx;
1350 
1351 	if (reclaim < 0)
1352 		reclaim += q->size;
1353 
1354 	q->in_use -= reclaim;
1355 	q->cidx = hw_cidx;
1356 }
1357 
1358 /**
1359  *	is_imm - check whether a packet can be sent as immediate data
1360  *	@skb: the packet
1361  *
1362  *	Returns true if a packet can be sent as a WR with immediate data.
1363  */
1364 static inline int is_imm(const struct sk_buff *skb)
1365 {
1366 	return skb->len <= MAX_CTRL_WR_LEN;
1367 }
1368 
1369 /**
1370  *	ctrlq_check_stop - check if a control queue is full and should stop
1371  *	@q: the queue
1372  *	@wr: most recent WR written to the queue
1373  *
1374  *	Check if a control queue has become full and should be stopped.
1375  *	We clean up control queue descriptors very lazily, only when we are out.
1376  *	If the queue is still full after reclaiming any completed descriptors
1377  *	we suspend it and have the last WR wake it up.
1378  */
1379 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1380 {
1381 	reclaim_completed_tx_imm(&q->q);
1382 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1383 		wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1384 		q->q.stops++;
1385 		q->full = 1;
1386 	}
1387 }
1388 
1389 /**
1390  *	ctrl_xmit - send a packet through an SGE control Tx queue
1391  *	@q: the control queue
1392  *	@skb: the packet
1393  *
1394  *	Send a packet through an SGE control Tx queue.  Packets sent through
1395  *	a control queue must fit entirely as immediate data.
1396  */
1397 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1398 {
1399 	unsigned int ndesc;
1400 	struct fw_wr_hdr *wr;
1401 
1402 	if (unlikely(!is_imm(skb))) {
1403 		WARN_ON(1);
1404 		dev_kfree_skb(skb);
1405 		return NET_XMIT_DROP;
1406 	}
1407 
1408 	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1409 	spin_lock(&q->sendq.lock);
1410 
1411 	if (unlikely(q->full)) {
1412 		skb->priority = ndesc;                  /* save for restart */
1413 		__skb_queue_tail(&q->sendq, skb);
1414 		spin_unlock(&q->sendq.lock);
1415 		return NET_XMIT_CN;
1416 	}
1417 
1418 	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1419 	inline_tx_skb(skb, &q->q, wr);
1420 
1421 	txq_advance(&q->q, ndesc);
1422 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1423 		ctrlq_check_stop(q, wr);
1424 
1425 	ring_tx_db(q->adap, &q->q, ndesc);
1426 	spin_unlock(&q->sendq.lock);
1427 
1428 	kfree_skb(skb);
1429 	return NET_XMIT_SUCCESS;
1430 }
1431 
1432 /**
1433  *	restart_ctrlq - restart a suspended control queue
1434  *	@data: the control queue to restart
1435  *
1436  *	Resumes transmission on a suspended Tx control queue.
1437  */
1438 static void restart_ctrlq(unsigned long data)
1439 {
1440 	struct sk_buff *skb;
1441 	unsigned int written = 0;
1442 	struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1443 
1444 	spin_lock(&q->sendq.lock);
1445 	reclaim_completed_tx_imm(&q->q);
1446 	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
1447 
1448 	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1449 		struct fw_wr_hdr *wr;
1450 		unsigned int ndesc = skb->priority;     /* previously saved */
1451 
1452 		written += ndesc;
1453 		/* Write descriptors and free skbs outside the lock to limit
1454 		 * wait times.  q->full is still set so new skbs will be queued.
1455 		 */
1456 		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1457 		txq_advance(&q->q, ndesc);
1458 		spin_unlock(&q->sendq.lock);
1459 
1460 		inline_tx_skb(skb, &q->q, wr);
1461 		kfree_skb(skb);
1462 
1463 		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1464 			unsigned long old = q->q.stops;
1465 
1466 			ctrlq_check_stop(q, wr);
1467 			if (q->q.stops != old) {          /* suspended anew */
1468 				spin_lock(&q->sendq.lock);
1469 				goto ringdb;
1470 			}
1471 		}
1472 		if (written > 16) {
1473 			ring_tx_db(q->adap, &q->q, written);
1474 			written = 0;
1475 		}
1476 		spin_lock(&q->sendq.lock);
1477 	}
1478 	q->full = 0;
1479 ringdb: if (written)
1480 		ring_tx_db(q->adap, &q->q, written);
1481 	spin_unlock(&q->sendq.lock);
1482 }
1483 
1484 /**
1485  *	t4_mgmt_tx - send a management message
1486  *	@adap: the adapter
1487  *	@skb: the packet containing the management message
1488  *
1489  *	Send a management message through control queue 0.
1490  */
1491 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1492 {
1493 	int ret;
1494 
1495 	local_bh_disable();
1496 	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1497 	local_bh_enable();
1498 	return ret;
1499 }
1500 
1501 /**
1502  *	is_ofld_imm - check whether a packet can be sent as immediate data
1503  *	@skb: the packet
1504  *
1505  *	Returns true if a packet can be sent as an offload WR with immediate
1506  *	data.  We currently use the same limit as for Ethernet packets.
1507  */
1508 static inline int is_ofld_imm(const struct sk_buff *skb)
1509 {
1510 	return skb->len <= MAX_IMM_TX_PKT_LEN;
1511 }
1512 
1513 /**
1514  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
1515  *	@skb: the packet
1516  *
1517  *	Returns the number of flits needed for the given offload packet.
1518  *	These packets are already fully constructed and no additional headers
1519  *	will be added.
1520  */
1521 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1522 {
1523 	unsigned int flits, cnt;
1524 
1525 	if (is_ofld_imm(skb))
1526 		return DIV_ROUND_UP(skb->len, 8);
1527 
1528 	flits = skb_transport_offset(skb) / 8U;   /* headers */
1529 	cnt = skb_shinfo(skb)->nr_frags;
1530 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
1531 		cnt++;
1532 	return flits + sgl_len(cnt);
1533 }
1534 
1535 /**
1536  *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1537  *	@adap: the adapter
1538  *	@q: the queue to stop
1539  *
1540  *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1541  *	inability to map packets.  A periodic timer attempts to restart
1542  *	queues so marked.
1543  */
1544 static void txq_stop_maperr(struct sge_uld_txq *q)
1545 {
1546 	q->mapping_err++;
1547 	q->q.stops++;
1548 	set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1549 		q->adap->sge.txq_maperr);
1550 }
1551 
1552 /**
1553  *	ofldtxq_stop - stop an offload Tx queue that has become full
1554  *	@q: the queue to stop
1555  *	@skb: the packet causing the queue to become full
1556  *
1557  *	Stops an offload Tx queue that has become full and modifies the packet
1558  *	being written to request a wakeup.
1559  */
1560 static void ofldtxq_stop(struct sge_uld_txq *q, struct sk_buff *skb)
1561 {
1562 	struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1563 
1564 	wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1565 	q->q.stops++;
1566 	q->full = 1;
1567 }
1568 
1569 /**
1570  *	service_ofldq - service/restart a suspended offload queue
1571  *	@q: the offload queue
1572  *
1573  *	Services an offload Tx queue by moving packets from its Pending Send
1574  *	Queue to the Hardware TX ring.  The function starts and ends with the
1575  *	Send Queue locked, but drops the lock while putting the skb at the
1576  *	head of the Send Queue onto the Hardware TX Ring.  Dropping the lock
1577  *	allows more skbs to be added to the Send Queue by other threads.
1578  *	The packet being processed at the head of the Pending Send Queue is
1579  *	left on the queue in case we experience DMA Mapping errors, etc.
1580  *	and need to give up and restart later.
1581  *
1582  *	service_ofldq() can be thought of as a task which opportunistically
1583  *	uses other threads execution contexts.  We use the Offload Queue
1584  *	boolean "service_ofldq_running" to make sure that only one instance
1585  *	is ever running at a time ...
1586  */
1587 static void service_ofldq(struct sge_uld_txq *q)
1588 {
1589 	u64 *pos, *before, *end;
1590 	int credits;
1591 	struct sk_buff *skb;
1592 	struct sge_txq *txq;
1593 	unsigned int left;
1594 	unsigned int written = 0;
1595 	unsigned int flits, ndesc;
1596 
1597 	/* If another thread is currently in service_ofldq() processing the
1598 	 * Pending Send Queue then there's nothing to do. Otherwise, flag
1599 	 * that we're doing the work and continue.  Examining/modifying
1600 	 * the Offload Queue boolean "service_ofldq_running" must be done
1601 	 * while holding the Pending Send Queue Lock.
1602 	 */
1603 	if (q->service_ofldq_running)
1604 		return;
1605 	q->service_ofldq_running = true;
1606 
1607 	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1608 		/* We drop the lock while we're working with the skb at the
1609 		 * head of the Pending Send Queue.  This allows more skbs to
1610 		 * be added to the Pending Send Queue while we're working on
1611 		 * this one.  We don't need to lock to guard the TX Ring
1612 		 * updates because only one thread of execution is ever
1613 		 * allowed into service_ofldq() at a time.
1614 		 */
1615 		spin_unlock(&q->sendq.lock);
1616 
1617 		reclaim_completed_tx(q->adap, &q->q, false);
1618 
1619 		flits = skb->priority;                /* previously saved */
1620 		ndesc = flits_to_desc(flits);
1621 		credits = txq_avail(&q->q) - ndesc;
1622 		BUG_ON(credits < 0);
1623 		if (unlikely(credits < TXQ_STOP_THRES))
1624 			ofldtxq_stop(q, skb);
1625 
1626 		pos = (u64 *)&q->q.desc[q->q.pidx];
1627 		if (is_ofld_imm(skb))
1628 			inline_tx_skb(skb, &q->q, pos);
1629 		else if (map_skb(q->adap->pdev_dev, skb,
1630 				 (dma_addr_t *)skb->head)) {
1631 			txq_stop_maperr(q);
1632 			spin_lock(&q->sendq.lock);
1633 			break;
1634 		} else {
1635 			int last_desc, hdr_len = skb_transport_offset(skb);
1636 
1637 			/* The WR headers  may not fit within one descriptor.
1638 			 * So we need to deal with wrap-around here.
1639 			 */
1640 			before = (u64 *)pos;
1641 			end = (u64 *)pos + flits;
1642 			txq = &q->q;
1643 			pos = (void *)inline_tx_skb_header(skb, &q->q,
1644 							   (void *)pos,
1645 							   hdr_len);
1646 			if (before > (u64 *)pos) {
1647 				left = (u8 *)end - (u8 *)txq->stat;
1648 				end = (void *)txq->desc + left;
1649 			}
1650 
1651 			/* If current position is already at the end of the
1652 			 * ofld queue, reset the current to point to
1653 			 * start of the queue and update the end ptr as well.
1654 			 */
1655 			if (pos == (u64 *)txq->stat) {
1656 				left = (u8 *)end - (u8 *)txq->stat;
1657 				end = (void *)txq->desc + left;
1658 				pos = (void *)txq->desc;
1659 			}
1660 
1661 			write_sgl(skb, &q->q, (void *)pos,
1662 				  end, hdr_len,
1663 				  (dma_addr_t *)skb->head);
1664 #ifdef CONFIG_NEED_DMA_MAP_STATE
1665 			skb->dev = q->adap->port[0];
1666 			skb->destructor = deferred_unmap_destructor;
1667 #endif
1668 			last_desc = q->q.pidx + ndesc - 1;
1669 			if (last_desc >= q->q.size)
1670 				last_desc -= q->q.size;
1671 			q->q.sdesc[last_desc].skb = skb;
1672 		}
1673 
1674 		txq_advance(&q->q, ndesc);
1675 		written += ndesc;
1676 		if (unlikely(written > 32)) {
1677 			ring_tx_db(q->adap, &q->q, written);
1678 			written = 0;
1679 		}
1680 
1681 		/* Reacquire the Pending Send Queue Lock so we can unlink the
1682 		 * skb we've just successfully transferred to the TX Ring and
1683 		 * loop for the next skb which may be at the head of the
1684 		 * Pending Send Queue.
1685 		 */
1686 		spin_lock(&q->sendq.lock);
1687 		__skb_unlink(skb, &q->sendq);
1688 		if (is_ofld_imm(skb))
1689 			kfree_skb(skb);
1690 	}
1691 	if (likely(written))
1692 		ring_tx_db(q->adap, &q->q, written);
1693 
1694 	/*Indicate that no thread is processing the Pending Send Queue
1695 	 * currently.
1696 	 */
1697 	q->service_ofldq_running = false;
1698 }
1699 
1700 /**
1701  *	ofld_xmit - send a packet through an offload queue
1702  *	@q: the Tx offload queue
1703  *	@skb: the packet
1704  *
1705  *	Send an offload packet through an SGE offload queue.
1706  */
1707 static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
1708 {
1709 	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
1710 	spin_lock(&q->sendq.lock);
1711 
1712 	/* Queue the new skb onto the Offload Queue's Pending Send Queue.  If
1713 	 * that results in this new skb being the only one on the queue, start
1714 	 * servicing it.  If there are other skbs already on the list, then
1715 	 * either the queue is currently being processed or it's been stopped
1716 	 * for some reason and it'll be restarted at a later time.  Restart
1717 	 * paths are triggered by events like experiencing a DMA Mapping Error
1718 	 * or filling the Hardware TX Ring.
1719 	 */
1720 	__skb_queue_tail(&q->sendq, skb);
1721 	if (q->sendq.qlen == 1)
1722 		service_ofldq(q);
1723 
1724 	spin_unlock(&q->sendq.lock);
1725 	return NET_XMIT_SUCCESS;
1726 }
1727 
1728 /**
1729  *	restart_ofldq - restart a suspended offload queue
1730  *	@data: the offload queue to restart
1731  *
1732  *	Resumes transmission on a suspended Tx offload queue.
1733  */
1734 static void restart_ofldq(unsigned long data)
1735 {
1736 	struct sge_uld_txq *q = (struct sge_uld_txq *)data;
1737 
1738 	spin_lock(&q->sendq.lock);
1739 	q->full = 0;            /* the queue actually is completely empty now */
1740 	service_ofldq(q);
1741 	spin_unlock(&q->sendq.lock);
1742 }
1743 
1744 /**
1745  *	skb_txq - return the Tx queue an offload packet should use
1746  *	@skb: the packet
1747  *
1748  *	Returns the Tx queue an offload packet should use as indicated by bits
1749  *	1-15 in the packet's queue_mapping.
1750  */
1751 static inline unsigned int skb_txq(const struct sk_buff *skb)
1752 {
1753 	return skb->queue_mapping >> 1;
1754 }
1755 
1756 /**
1757  *	is_ctrl_pkt - return whether an offload packet is a control packet
1758  *	@skb: the packet
1759  *
1760  *	Returns whether an offload packet should use an OFLD or a CTRL
1761  *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
1762  */
1763 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1764 {
1765 	return skb->queue_mapping & 1;
1766 }
1767 
1768 static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
1769 			   unsigned int tx_uld_type)
1770 {
1771 	struct sge_uld_txq_info *txq_info;
1772 	struct sge_uld_txq *txq;
1773 	unsigned int idx = skb_txq(skb);
1774 
1775 	if (unlikely(is_ctrl_pkt(skb))) {
1776 		/* Single ctrl queue is a requirement for LE workaround path */
1777 		if (adap->tids.nsftids)
1778 			idx = 0;
1779 		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1780 	}
1781 
1782 	txq_info = adap->sge.uld_txq_info[tx_uld_type];
1783 	if (unlikely(!txq_info)) {
1784 		WARN_ON(true);
1785 		return NET_XMIT_DROP;
1786 	}
1787 
1788 	txq = &txq_info->uldtxq[idx];
1789 	return ofld_xmit(txq, skb);
1790 }
1791 
1792 /**
1793  *	t4_ofld_send - send an offload packet
1794  *	@adap: the adapter
1795  *	@skb: the packet
1796  *
1797  *	Sends an offload packet.  We use the packet queue_mapping to select the
1798  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1799  *	should be sent as regular or control, bits 1-15 select the queue.
1800  */
1801 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1802 {
1803 	int ret;
1804 
1805 	local_bh_disable();
1806 	ret = uld_send(adap, skb, CXGB4_TX_OFLD);
1807 	local_bh_enable();
1808 	return ret;
1809 }
1810 
1811 /**
1812  *	cxgb4_ofld_send - send an offload packet
1813  *	@dev: the net device
1814  *	@skb: the packet
1815  *
1816  *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
1817  *	intended for ULDs.
1818  */
1819 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1820 {
1821 	return t4_ofld_send(netdev2adap(dev), skb);
1822 }
1823 EXPORT_SYMBOL(cxgb4_ofld_send);
1824 
1825 /**
1826  *	t4_crypto_send - send crypto packet
1827  *	@adap: the adapter
1828  *	@skb: the packet
1829  *
1830  *	Sends crypto packet.  We use the packet queue_mapping to select the
1831  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1832  *	should be sent as regular or control, bits 1-15 select the queue.
1833  */
1834 static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
1835 {
1836 	int ret;
1837 
1838 	local_bh_disable();
1839 	ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
1840 	local_bh_enable();
1841 	return ret;
1842 }
1843 
1844 /**
1845  *	cxgb4_crypto_send - send crypto packet
1846  *	@dev: the net device
1847  *	@skb: the packet
1848  *
1849  *	Sends crypto packet.  This is an exported version of @t4_crypto_send,
1850  *	intended for ULDs.
1851  */
1852 int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
1853 {
1854 	return t4_crypto_send(netdev2adap(dev), skb);
1855 }
1856 EXPORT_SYMBOL(cxgb4_crypto_send);
1857 
1858 static inline void copy_frags(struct sk_buff *skb,
1859 			      const struct pkt_gl *gl, unsigned int offset)
1860 {
1861 	int i;
1862 
1863 	/* usually there's just one frag */
1864 	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
1865 			     gl->frags[0].offset + offset,
1866 			     gl->frags[0].size - offset);
1867 	skb_shinfo(skb)->nr_frags = gl->nfrags;
1868 	for (i = 1; i < gl->nfrags; i++)
1869 		__skb_fill_page_desc(skb, i, gl->frags[i].page,
1870 				     gl->frags[i].offset,
1871 				     gl->frags[i].size);
1872 
1873 	/* get a reference to the last page, we don't own it */
1874 	get_page(gl->frags[gl->nfrags - 1].page);
1875 }
1876 
1877 /**
1878  *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1879  *	@gl: the gather list
1880  *	@skb_len: size of sk_buff main body if it carries fragments
1881  *	@pull_len: amount of data to move to the sk_buff's main body
1882  *
1883  *	Builds an sk_buff from the given packet gather list.  Returns the
1884  *	sk_buff or %NULL if sk_buff allocation failed.
1885  */
1886 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1887 				   unsigned int skb_len, unsigned int pull_len)
1888 {
1889 	struct sk_buff *skb;
1890 
1891 	/*
1892 	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1893 	 * size, which is expected since buffers are at least PAGE_SIZEd.
1894 	 * In this case packets up to RX_COPY_THRES have only one fragment.
1895 	 */
1896 	if (gl->tot_len <= RX_COPY_THRES) {
1897 		skb = dev_alloc_skb(gl->tot_len);
1898 		if (unlikely(!skb))
1899 			goto out;
1900 		__skb_put(skb, gl->tot_len);
1901 		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1902 	} else {
1903 		skb = dev_alloc_skb(skb_len);
1904 		if (unlikely(!skb))
1905 			goto out;
1906 		__skb_put(skb, pull_len);
1907 		skb_copy_to_linear_data(skb, gl->va, pull_len);
1908 
1909 		copy_frags(skb, gl, pull_len);
1910 		skb->len = gl->tot_len;
1911 		skb->data_len = skb->len - pull_len;
1912 		skb->truesize += skb->data_len;
1913 	}
1914 out:	return skb;
1915 }
1916 EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1917 
1918 /**
1919  *	t4_pktgl_free - free a packet gather list
1920  *	@gl: the gather list
1921  *
1922  *	Releases the pages of a packet gather list.  We do not own the last
1923  *	page on the list and do not free it.
1924  */
1925 static void t4_pktgl_free(const struct pkt_gl *gl)
1926 {
1927 	int n;
1928 	const struct page_frag *p;
1929 
1930 	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1931 		put_page(p->page);
1932 }
1933 
1934 /*
1935  * Process an MPS trace packet.  Give it an unused protocol number so it won't
1936  * be delivered to anyone and send it to the stack for capture.
1937  */
1938 static noinline int handle_trace_pkt(struct adapter *adap,
1939 				     const struct pkt_gl *gl)
1940 {
1941 	struct sk_buff *skb;
1942 
1943 	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1944 	if (unlikely(!skb)) {
1945 		t4_pktgl_free(gl);
1946 		return 0;
1947 	}
1948 
1949 	if (is_t4(adap->params.chip))
1950 		__skb_pull(skb, sizeof(struct cpl_trace_pkt));
1951 	else
1952 		__skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
1953 
1954 	skb_reset_mac_header(skb);
1955 	skb->protocol = htons(0xffff);
1956 	skb->dev = adap->port[0];
1957 	netif_receive_skb(skb);
1958 	return 0;
1959 }
1960 
1961 /**
1962  * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
1963  * @adap: the adapter
1964  * @hwtstamps: time stamp structure to update
1965  * @sgetstamp: 60bit iqe timestamp
1966  *
1967  * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
1968  * which is in Core Clock ticks into ktime_t and assign it
1969  **/
1970 static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
1971 				     struct skb_shared_hwtstamps *hwtstamps,
1972 				     u64 sgetstamp)
1973 {
1974 	u64 ns;
1975 	u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
1976 
1977 	ns = div_u64(tmp, adap->params.vpd.cclk);
1978 
1979 	memset(hwtstamps, 0, sizeof(*hwtstamps));
1980 	hwtstamps->hwtstamp = ns_to_ktime(ns);
1981 }
1982 
1983 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1984 		   const struct cpl_rx_pkt *pkt)
1985 {
1986 	struct adapter *adapter = rxq->rspq.adap;
1987 	struct sge *s = &adapter->sge;
1988 	struct port_info *pi;
1989 	int ret;
1990 	struct sk_buff *skb;
1991 
1992 	skb = napi_get_frags(&rxq->rspq.napi);
1993 	if (unlikely(!skb)) {
1994 		t4_pktgl_free(gl);
1995 		rxq->stats.rx_drops++;
1996 		return;
1997 	}
1998 
1999 	copy_frags(skb, gl, s->pktshift);
2000 	skb->len = gl->tot_len - s->pktshift;
2001 	skb->data_len = skb->len;
2002 	skb->truesize += skb->data_len;
2003 	skb->ip_summed = CHECKSUM_UNNECESSARY;
2004 	skb_record_rx_queue(skb, rxq->rspq.idx);
2005 	pi = netdev_priv(skb->dev);
2006 	if (pi->rxtstamp)
2007 		cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
2008 					 gl->sgetstamp);
2009 	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
2010 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
2011 			     PKT_HASH_TYPE_L3);
2012 
2013 	if (unlikely(pkt->vlan_ex)) {
2014 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
2015 		rxq->stats.vlan_ex++;
2016 	}
2017 	ret = napi_gro_frags(&rxq->rspq.napi);
2018 	if (ret == GRO_HELD)
2019 		rxq->stats.lro_pkts++;
2020 	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
2021 		rxq->stats.lro_merged++;
2022 	rxq->stats.pkts++;
2023 	rxq->stats.rx_cso++;
2024 }
2025 
2026 /**
2027  *	t4_ethrx_handler - process an ingress ethernet packet
2028  *	@q: the response queue that received the packet
2029  *	@rsp: the response queue descriptor holding the RX_PKT message
2030  *	@si: the gather list of packet fragments
2031  *
2032  *	Process an ingress ethernet packet and deliver it to the stack.
2033  */
2034 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
2035 		     const struct pkt_gl *si)
2036 {
2037 	bool csum_ok;
2038 	struct sk_buff *skb;
2039 	const struct cpl_rx_pkt *pkt;
2040 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
2041 	struct sge *s = &q->adap->sge;
2042 	int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
2043 			    CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
2044 	u16 err_vec;
2045 	struct port_info *pi;
2046 
2047 	if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
2048 		return handle_trace_pkt(q->adap, si);
2049 
2050 	pkt = (const struct cpl_rx_pkt *)rsp;
2051 	/* Compressed error vector is enabled for T6 only */
2052 	if (q->adap->params.tp.rx_pkt_encap)
2053 		err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
2054 	else
2055 		err_vec = be16_to_cpu(pkt->err_vec);
2056 
2057 	csum_ok = pkt->csum_calc && !err_vec &&
2058 		  (q->netdev->features & NETIF_F_RXCSUM);
2059 	if ((pkt->l2info & htonl(RXF_TCP_F)) &&
2060 	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
2061 		do_gro(rxq, si, pkt);
2062 		return 0;
2063 	}
2064 
2065 	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
2066 	if (unlikely(!skb)) {
2067 		t4_pktgl_free(si);
2068 		rxq->stats.rx_drops++;
2069 		return 0;
2070 	}
2071 
2072 	__skb_pull(skb, s->pktshift);      /* remove ethernet header padding */
2073 	skb->protocol = eth_type_trans(skb, q->netdev);
2074 	skb_record_rx_queue(skb, q->idx);
2075 	if (skb->dev->features & NETIF_F_RXHASH)
2076 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
2077 			     PKT_HASH_TYPE_L3);
2078 
2079 	rxq->stats.pkts++;
2080 
2081 	pi = netdev_priv(skb->dev);
2082 	if (pi->rxtstamp)
2083 		cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
2084 					 si->sgetstamp);
2085 	if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
2086 		if (!pkt->ip_frag) {
2087 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2088 			rxq->stats.rx_cso++;
2089 		} else if (pkt->l2info & htonl(RXF_IP_F)) {
2090 			__sum16 c = (__force __sum16)pkt->csum;
2091 			skb->csum = csum_unfold(c);
2092 			skb->ip_summed = CHECKSUM_COMPLETE;
2093 			rxq->stats.rx_cso++;
2094 		}
2095 	} else {
2096 		skb_checksum_none_assert(skb);
2097 #ifdef CONFIG_CHELSIO_T4_FCOE
2098 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
2099 			  RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
2100 
2101 		if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
2102 			if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
2103 			    (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
2104 				if (q->adap->params.tp.rx_pkt_encap)
2105 					csum_ok = err_vec &
2106 						  T6_COMPR_RXERR_SUM_F;
2107 				else
2108 					csum_ok = err_vec & RXERR_CSUM_F;
2109 				if (!csum_ok)
2110 					skb->ip_summed = CHECKSUM_UNNECESSARY;
2111 			}
2112 		}
2113 
2114 #undef CPL_RX_PKT_FLAGS
2115 #endif /* CONFIG_CHELSIO_T4_FCOE */
2116 	}
2117 
2118 	if (unlikely(pkt->vlan_ex)) {
2119 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
2120 		rxq->stats.vlan_ex++;
2121 	}
2122 	skb_mark_napi_id(skb, &q->napi);
2123 	netif_receive_skb(skb);
2124 	return 0;
2125 }
2126 
2127 /**
2128  *	restore_rx_bufs - put back a packet's Rx buffers
2129  *	@si: the packet gather list
2130  *	@q: the SGE free list
2131  *	@frags: number of FL buffers to restore
2132  *
2133  *	Puts back on an FL the Rx buffers associated with @si.  The buffers
2134  *	have already been unmapped and are left unmapped, we mark them so to
2135  *	prevent further unmapping attempts.
2136  *
2137  *	This function undoes a series of @unmap_rx_buf calls when we find out
2138  *	that the current packet can't be processed right away afterall and we
2139  *	need to come back to it later.  This is a very rare event and there's
2140  *	no effort to make this particularly efficient.
2141  */
2142 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
2143 			    int frags)
2144 {
2145 	struct rx_sw_desc *d;
2146 
2147 	while (frags--) {
2148 		if (q->cidx == 0)
2149 			q->cidx = q->size - 1;
2150 		else
2151 			q->cidx--;
2152 		d = &q->sdesc[q->cidx];
2153 		d->page = si->frags[frags].page;
2154 		d->dma_addr |= RX_UNMAPPED_BUF;
2155 		q->avail++;
2156 	}
2157 }
2158 
2159 /**
2160  *	is_new_response - check if a response is newly written
2161  *	@r: the response descriptor
2162  *	@q: the response queue
2163  *
2164  *	Returns true if a response descriptor contains a yet unprocessed
2165  *	response.
2166  */
2167 static inline bool is_new_response(const struct rsp_ctrl *r,
2168 				   const struct sge_rspq *q)
2169 {
2170 	return (r->type_gen >> RSPD_GEN_S) == q->gen;
2171 }
2172 
2173 /**
2174  *	rspq_next - advance to the next entry in a response queue
2175  *	@q: the queue
2176  *
2177  *	Updates the state of a response queue to advance it to the next entry.
2178  */
2179 static inline void rspq_next(struct sge_rspq *q)
2180 {
2181 	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
2182 	if (unlikely(++q->cidx == q->size)) {
2183 		q->cidx = 0;
2184 		q->gen ^= 1;
2185 		q->cur_desc = q->desc;
2186 	}
2187 }
2188 
2189 /**
2190  *	process_responses - process responses from an SGE response queue
2191  *	@q: the ingress queue to process
2192  *	@budget: how many responses can be processed in this round
2193  *
2194  *	Process responses from an SGE response queue up to the supplied budget.
2195  *	Responses include received packets as well as control messages from FW
2196  *	or HW.
2197  *
2198  *	Additionally choose the interrupt holdoff time for the next interrupt
2199  *	on this queue.  If the system is under memory shortage use a fairly
2200  *	long delay to help recovery.
2201  */
2202 static int process_responses(struct sge_rspq *q, int budget)
2203 {
2204 	int ret, rsp_type;
2205 	int budget_left = budget;
2206 	const struct rsp_ctrl *rc;
2207 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
2208 	struct adapter *adapter = q->adap;
2209 	struct sge *s = &adapter->sge;
2210 
2211 	while (likely(budget_left)) {
2212 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2213 		if (!is_new_response(rc, q)) {
2214 			if (q->flush_handler)
2215 				q->flush_handler(q);
2216 			break;
2217 		}
2218 
2219 		dma_rmb();
2220 		rsp_type = RSPD_TYPE_G(rc->type_gen);
2221 		if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
2222 			struct page_frag *fp;
2223 			struct pkt_gl si;
2224 			const struct rx_sw_desc *rsd;
2225 			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
2226 
2227 			if (len & RSPD_NEWBUF_F) {
2228 				if (likely(q->offset > 0)) {
2229 					free_rx_bufs(q->adap, &rxq->fl, 1);
2230 					q->offset = 0;
2231 				}
2232 				len = RSPD_LEN_G(len);
2233 			}
2234 			si.tot_len = len;
2235 
2236 			/* gather packet fragments */
2237 			for (frags = 0, fp = si.frags; ; frags++, fp++) {
2238 				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
2239 				bufsz = get_buf_size(adapter, rsd);
2240 				fp->page = rsd->page;
2241 				fp->offset = q->offset;
2242 				fp->size = min(bufsz, len);
2243 				len -= fp->size;
2244 				if (!len)
2245 					break;
2246 				unmap_rx_buf(q->adap, &rxq->fl);
2247 			}
2248 
2249 			si.sgetstamp = SGE_TIMESTAMP_G(
2250 					be64_to_cpu(rc->last_flit));
2251 			/*
2252 			 * Last buffer remains mapped so explicitly make it
2253 			 * coherent for CPU access.
2254 			 */
2255 			dma_sync_single_for_cpu(q->adap->pdev_dev,
2256 						get_buf_addr(rsd),
2257 						fp->size, DMA_FROM_DEVICE);
2258 
2259 			si.va = page_address(si.frags[0].page) +
2260 				si.frags[0].offset;
2261 			prefetch(si.va);
2262 
2263 			si.nfrags = frags + 1;
2264 			ret = q->handler(q, q->cur_desc, &si);
2265 			if (likely(ret == 0))
2266 				q->offset += ALIGN(fp->size, s->fl_align);
2267 			else
2268 				restore_rx_bufs(&si, &rxq->fl, frags);
2269 		} else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
2270 			ret = q->handler(q, q->cur_desc, NULL);
2271 		} else {
2272 			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
2273 		}
2274 
2275 		if (unlikely(ret)) {
2276 			/* couldn't process descriptor, back off for recovery */
2277 			q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
2278 			break;
2279 		}
2280 
2281 		rspq_next(q);
2282 		budget_left--;
2283 	}
2284 
2285 	if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
2286 		__refill_fl(q->adap, &rxq->fl);
2287 	return budget - budget_left;
2288 }
2289 
2290 /**
2291  *	napi_rx_handler - the NAPI handler for Rx processing
2292  *	@napi: the napi instance
2293  *	@budget: how many packets we can process in this round
2294  *
2295  *	Handler for new data events when using NAPI.  This does not need any
2296  *	locking or protection from interrupts as data interrupts are off at
2297  *	this point and other adapter interrupts do not interfere (the latter
2298  *	in not a concern at all with MSI-X as non-data interrupts then have
2299  *	a separate handler).
2300  */
2301 static int napi_rx_handler(struct napi_struct *napi, int budget)
2302 {
2303 	unsigned int params;
2304 	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
2305 	int work_done;
2306 	u32 val;
2307 
2308 	work_done = process_responses(q, budget);
2309 	if (likely(work_done < budget)) {
2310 		int timer_index;
2311 
2312 		napi_complete_done(napi, work_done);
2313 		timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
2314 
2315 		if (q->adaptive_rx) {
2316 			if (work_done > max(timer_pkt_quota[timer_index],
2317 					    MIN_NAPI_WORK))
2318 				timer_index = (timer_index + 1);
2319 			else
2320 				timer_index = timer_index - 1;
2321 
2322 			timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
2323 			q->next_intr_params =
2324 					QINTR_TIMER_IDX_V(timer_index) |
2325 					QINTR_CNT_EN_V(0);
2326 			params = q->next_intr_params;
2327 		} else {
2328 			params = q->next_intr_params;
2329 			q->next_intr_params = q->intr_params;
2330 		}
2331 	} else
2332 		params = QINTR_TIMER_IDX_V(7);
2333 
2334 	val = CIDXINC_V(work_done) | SEINTARM_V(params);
2335 
2336 	/* If we don't have access to the new User GTS (T5+), use the old
2337 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2338 	 */
2339 	if (unlikely(q->bar2_addr == NULL)) {
2340 		t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
2341 			     val | INGRESSQID_V((u32)q->cntxt_id));
2342 	} else {
2343 		writel(val | INGRESSQID_V(q->bar2_qid),
2344 		       q->bar2_addr + SGE_UDB_GTS);
2345 		wmb();
2346 	}
2347 	return work_done;
2348 }
2349 
2350 /*
2351  * The MSI-X interrupt handler for an SGE response queue.
2352  */
2353 irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
2354 {
2355 	struct sge_rspq *q = cookie;
2356 
2357 	napi_schedule(&q->napi);
2358 	return IRQ_HANDLED;
2359 }
2360 
2361 /*
2362  * Process the indirect interrupt entries in the interrupt queue and kick off
2363  * NAPI for each queue that has generated an entry.
2364  */
2365 static unsigned int process_intrq(struct adapter *adap)
2366 {
2367 	unsigned int credits;
2368 	const struct rsp_ctrl *rc;
2369 	struct sge_rspq *q = &adap->sge.intrq;
2370 	u32 val;
2371 
2372 	spin_lock(&adap->sge.intrq_lock);
2373 	for (credits = 0; ; credits++) {
2374 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2375 		if (!is_new_response(rc, q))
2376 			break;
2377 
2378 		dma_rmb();
2379 		if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
2380 			unsigned int qid = ntohl(rc->pldbuflen_qid);
2381 
2382 			qid -= adap->sge.ingr_start;
2383 			napi_schedule(&adap->sge.ingr_map[qid]->napi);
2384 		}
2385 
2386 		rspq_next(q);
2387 	}
2388 
2389 	val =  CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
2390 
2391 	/* If we don't have access to the new User GTS (T5+), use the old
2392 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2393 	 */
2394 	if (unlikely(q->bar2_addr == NULL)) {
2395 		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
2396 			     val | INGRESSQID_V(q->cntxt_id));
2397 	} else {
2398 		writel(val | INGRESSQID_V(q->bar2_qid),
2399 		       q->bar2_addr + SGE_UDB_GTS);
2400 		wmb();
2401 	}
2402 	spin_unlock(&adap->sge.intrq_lock);
2403 	return credits;
2404 }
2405 
2406 /*
2407  * The MSI interrupt handler, which handles data events from SGE response queues
2408  * as well as error and other async events as they all use the same MSI vector.
2409  */
2410 static irqreturn_t t4_intr_msi(int irq, void *cookie)
2411 {
2412 	struct adapter *adap = cookie;
2413 
2414 	if (adap->flags & MASTER_PF)
2415 		t4_slow_intr_handler(adap);
2416 	process_intrq(adap);
2417 	return IRQ_HANDLED;
2418 }
2419 
2420 /*
2421  * Interrupt handler for legacy INTx interrupts.
2422  * Handles data events from SGE response queues as well as error and other
2423  * async events as they all use the same interrupt line.
2424  */
2425 static irqreturn_t t4_intr_intx(int irq, void *cookie)
2426 {
2427 	struct adapter *adap = cookie;
2428 
2429 	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
2430 	if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) |
2431 	    process_intrq(adap))
2432 		return IRQ_HANDLED;
2433 	return IRQ_NONE;             /* probably shared interrupt */
2434 }
2435 
2436 /**
2437  *	t4_intr_handler - select the top-level interrupt handler
2438  *	@adap: the adapter
2439  *
2440  *	Selects the top-level interrupt handler based on the type of interrupts
2441  *	(MSI-X, MSI, or INTx).
2442  */
2443 irq_handler_t t4_intr_handler(struct adapter *adap)
2444 {
2445 	if (adap->flags & USING_MSIX)
2446 		return t4_sge_intr_msix;
2447 	if (adap->flags & USING_MSI)
2448 		return t4_intr_msi;
2449 	return t4_intr_intx;
2450 }
2451 
2452 static void sge_rx_timer_cb(unsigned long data)
2453 {
2454 	unsigned long m;
2455 	unsigned int i;
2456 	struct adapter *adap = (struct adapter *)data;
2457 	struct sge *s = &adap->sge;
2458 
2459 	for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2460 		for (m = s->starving_fl[i]; m; m &= m - 1) {
2461 			struct sge_eth_rxq *rxq;
2462 			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2463 			struct sge_fl *fl = s->egr_map[id];
2464 
2465 			clear_bit(id, s->starving_fl);
2466 			smp_mb__after_atomic();
2467 
2468 			if (fl_starving(adap, fl)) {
2469 				rxq = container_of(fl, struct sge_eth_rxq, fl);
2470 				if (napi_reschedule(&rxq->rspq.napi))
2471 					fl->starving++;
2472 				else
2473 					set_bit(id, s->starving_fl);
2474 			}
2475 		}
2476 	/* The remainder of the SGE RX Timer Callback routine is dedicated to
2477 	 * global Master PF activities like checking for chip ingress stalls,
2478 	 * etc.
2479 	 */
2480 	if (!(adap->flags & MASTER_PF))
2481 		goto done;
2482 
2483 	t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
2484 
2485 done:
2486 	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2487 }
2488 
2489 static void sge_tx_timer_cb(unsigned long data)
2490 {
2491 	unsigned long m;
2492 	unsigned int i, budget;
2493 	struct adapter *adap = (struct adapter *)data;
2494 	struct sge *s = &adap->sge;
2495 
2496 	for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2497 		for (m = s->txq_maperr[i]; m; m &= m - 1) {
2498 			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2499 			struct sge_uld_txq *txq = s->egr_map[id];
2500 
2501 			clear_bit(id, s->txq_maperr);
2502 			tasklet_schedule(&txq->qresume_tsk);
2503 		}
2504 
2505 	budget = MAX_TIMER_TX_RECLAIM;
2506 	i = s->ethtxq_rover;
2507 	do {
2508 		struct sge_eth_txq *q = &s->ethtxq[i];
2509 
2510 		if (q->q.in_use &&
2511 		    time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
2512 		    __netif_tx_trylock(q->txq)) {
2513 			int avail = reclaimable(&q->q);
2514 
2515 			if (avail) {
2516 				if (avail > budget)
2517 					avail = budget;
2518 
2519 				free_tx_desc(adap, &q->q, avail, true);
2520 				q->q.in_use -= avail;
2521 				budget -= avail;
2522 			}
2523 			__netif_tx_unlock(q->txq);
2524 		}
2525 
2526 		if (++i >= s->ethqsets)
2527 			i = 0;
2528 	} while (budget && i != s->ethtxq_rover);
2529 	s->ethtxq_rover = i;
2530 	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2531 }
2532 
2533 /**
2534  *	bar2_address - return the BAR2 address for an SGE Queue's Registers
2535  *	@adapter: the adapter
2536  *	@qid: the SGE Queue ID
2537  *	@qtype: the SGE Queue Type (Egress or Ingress)
2538  *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2539  *
2540  *	Returns the BAR2 address for the SGE Queue Registers associated with
2541  *	@qid.  If BAR2 SGE Registers aren't available, returns NULL.  Also
2542  *	returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2543  *	Queue Registers.  If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2544  *	Registers are supported (e.g. the Write Combining Doorbell Buffer).
2545  */
2546 static void __iomem *bar2_address(struct adapter *adapter,
2547 				  unsigned int qid,
2548 				  enum t4_bar2_qtype qtype,
2549 				  unsigned int *pbar2_qid)
2550 {
2551 	u64 bar2_qoffset;
2552 	int ret;
2553 
2554 	ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
2555 				&bar2_qoffset, pbar2_qid);
2556 	if (ret)
2557 		return NULL;
2558 
2559 	return adapter->bar2 + bar2_qoffset;
2560 }
2561 
2562 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
2563  * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
2564  */
2565 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2566 		     struct net_device *dev, int intr_idx,
2567 		     struct sge_fl *fl, rspq_handler_t hnd,
2568 		     rspq_flush_handler_t flush_hnd, int cong)
2569 {
2570 	int ret, flsz = 0;
2571 	struct fw_iq_cmd c;
2572 	struct sge *s = &adap->sge;
2573 	struct port_info *pi = netdev_priv(dev);
2574 
2575 	/* Size needs to be multiple of 16, including status entry. */
2576 	iq->size = roundup(iq->size, 16);
2577 
2578 	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
2579 			      &iq->phys_addr, NULL, 0,
2580 			      dev_to_node(adap->pdev_dev));
2581 	if (!iq->desc)
2582 		return -ENOMEM;
2583 
2584 	memset(&c, 0, sizeof(c));
2585 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
2586 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2587 			    FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
2588 	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
2589 				 FW_LEN16(c));
2590 	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2591 		FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
2592 		FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
2593 		FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
2594 		FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
2595 							-intr_idx - 1));
2596 	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
2597 		FW_IQ_CMD_IQGTSMODE_F |
2598 		FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
2599 		FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
2600 	c.iqsize = htons(iq->size);
2601 	c.iqaddr = cpu_to_be64(iq->phys_addr);
2602 	if (cong >= 0)
2603 		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
2604 
2605 	if (fl) {
2606 		enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
2607 
2608 		/* Allocate the ring for the hardware free list (with space
2609 		 * for its status page) along with the associated software
2610 		 * descriptor ring.  The free list size needs to be a multiple
2611 		 * of the Egress Queue Unit and at least 2 Egress Units larger
2612 		 * than the SGE's Egress Congrestion Threshold
2613 		 * (fl_starve_thres - 1).
2614 		 */
2615 		if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
2616 			fl->size = s->fl_starve_thres - 1 + 2 * 8;
2617 		fl->size = roundup(fl->size, 8);
2618 		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2619 				      sizeof(struct rx_sw_desc), &fl->addr,
2620 				      &fl->sdesc, s->stat_len,
2621 				      dev_to_node(adap->pdev_dev));
2622 		if (!fl->desc)
2623 			goto fl_nomem;
2624 
2625 		flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2626 		c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
2627 					     FW_IQ_CMD_FL0FETCHRO_F |
2628 					     FW_IQ_CMD_FL0DATARO_F |
2629 					     FW_IQ_CMD_FL0PADEN_F);
2630 		if (cong >= 0)
2631 			c.iqns_to_fl0congen |=
2632 				htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
2633 				      FW_IQ_CMD_FL0CONGCIF_F |
2634 				      FW_IQ_CMD_FL0CONGEN_F);
2635 		/* In T6, for egress queue type FL there is internal overhead
2636 		 * of 16B for header going into FLM module.  Hence the maximum
2637 		 * allowed burst size is 448 bytes.  For T4/T5, the hardware
2638 		 * doesn't coalesce fetch requests if more than 64 bytes of
2639 		 * Free List pointers are provided, so we use a 128-byte Fetch
2640 		 * Burst Minimum there (T6 implements coalescing so we can use
2641 		 * the smaller 64-byte value there).
2642 		 */
2643 		c.fl0dcaen_to_fl0cidxfthresh =
2644 			htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
2645 						   FETCHBURSTMIN_128B_X :
2646 						   FETCHBURSTMIN_64B_X) |
2647 			      FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
2648 						   FETCHBURSTMAX_512B_X :
2649 						   FETCHBURSTMAX_256B_X));
2650 		c.fl0size = htons(flsz);
2651 		c.fl0addr = cpu_to_be64(fl->addr);
2652 	}
2653 
2654 	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2655 	if (ret)
2656 		goto err;
2657 
2658 	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2659 	iq->cur_desc = iq->desc;
2660 	iq->cidx = 0;
2661 	iq->gen = 1;
2662 	iq->next_intr_params = iq->intr_params;
2663 	iq->cntxt_id = ntohs(c.iqid);
2664 	iq->abs_id = ntohs(c.physiqid);
2665 	iq->bar2_addr = bar2_address(adap,
2666 				     iq->cntxt_id,
2667 				     T4_BAR2_QTYPE_INGRESS,
2668 				     &iq->bar2_qid);
2669 	iq->size--;                           /* subtract status entry */
2670 	iq->netdev = dev;
2671 	iq->handler = hnd;
2672 	iq->flush_handler = flush_hnd;
2673 
2674 	memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
2675 	skb_queue_head_init(&iq->lro_mgr.lroq);
2676 
2677 	/* set offset to -1 to distinguish ingress queues without FL */
2678 	iq->offset = fl ? 0 : -1;
2679 
2680 	adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2681 
2682 	if (fl) {
2683 		fl->cntxt_id = ntohs(c.fl0id);
2684 		fl->avail = fl->pend_cred = 0;
2685 		fl->pidx = fl->cidx = 0;
2686 		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2687 		adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2688 
2689 		/* Note, we must initialize the BAR2 Free List User Doorbell
2690 		 * information before refilling the Free List!
2691 		 */
2692 		fl->bar2_addr = bar2_address(adap,
2693 					     fl->cntxt_id,
2694 					     T4_BAR2_QTYPE_EGRESS,
2695 					     &fl->bar2_qid);
2696 		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2697 	}
2698 
2699 	/* For T5 and later we attempt to set up the Congestion Manager values
2700 	 * of the new RX Ethernet Queue.  This should really be handled by
2701 	 * firmware because it's more complex than any host driver wants to
2702 	 * get involved with and it's different per chip and this is almost
2703 	 * certainly wrong.  Firmware would be wrong as well, but it would be
2704 	 * a lot easier to fix in one place ...  For now we do something very
2705 	 * simple (and hopefully less wrong).
2706 	 */
2707 	if (!is_t4(adap->params.chip) && cong >= 0) {
2708 		u32 param, val, ch_map = 0;
2709 		int i;
2710 		u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
2711 
2712 		param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
2713 			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
2714 			 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
2715 		if (cong == 0) {
2716 			val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
2717 		} else {
2718 			val =
2719 			    CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
2720 			for (i = 0; i < 4; i++) {
2721 				if (cong & (1 << i))
2722 					ch_map |= 1 << (i << cng_ch_bits_log);
2723 			}
2724 			val |= CONMCTXT_CNGCHMAP_V(ch_map);
2725 		}
2726 		ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
2727 				    &param, &val);
2728 		if (ret)
2729 			dev_warn(adap->pdev_dev, "Failed to set Congestion"
2730 				 " Manager Context for Ingress Queue %d: %d\n",
2731 				 iq->cntxt_id, -ret);
2732 	}
2733 
2734 	return 0;
2735 
2736 fl_nomem:
2737 	ret = -ENOMEM;
2738 err:
2739 	if (iq->desc) {
2740 		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2741 				  iq->desc, iq->phys_addr);
2742 		iq->desc = NULL;
2743 	}
2744 	if (fl && fl->desc) {
2745 		kfree(fl->sdesc);
2746 		fl->sdesc = NULL;
2747 		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2748 				  fl->desc, fl->addr);
2749 		fl->desc = NULL;
2750 	}
2751 	return ret;
2752 }
2753 
2754 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2755 {
2756 	q->cntxt_id = id;
2757 	q->bar2_addr = bar2_address(adap,
2758 				    q->cntxt_id,
2759 				    T4_BAR2_QTYPE_EGRESS,
2760 				    &q->bar2_qid);
2761 	q->in_use = 0;
2762 	q->cidx = q->pidx = 0;
2763 	q->stops = q->restarts = 0;
2764 	q->stat = (void *)&q->desc[q->size];
2765 	spin_lock_init(&q->db_lock);
2766 	adap->sge.egr_map[id - adap->sge.egr_start] = q;
2767 }
2768 
2769 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2770 			 struct net_device *dev, struct netdev_queue *netdevq,
2771 			 unsigned int iqid)
2772 {
2773 	int ret, nentries;
2774 	struct fw_eq_eth_cmd c;
2775 	struct sge *s = &adap->sge;
2776 	struct port_info *pi = netdev_priv(dev);
2777 
2778 	/* Add status entries */
2779 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2780 
2781 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2782 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2783 			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2784 			netdev_queue_numa_node_read(netdevq));
2785 	if (!txq->q.desc)
2786 		return -ENOMEM;
2787 
2788 	memset(&c, 0, sizeof(c));
2789 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
2790 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2791 			    FW_EQ_ETH_CMD_PFN_V(adap->pf) |
2792 			    FW_EQ_ETH_CMD_VFN_V(0));
2793 	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
2794 				 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
2795 	c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2796 			   FW_EQ_ETH_CMD_VIID_V(pi->viid));
2797 	c.fetchszm_to_iqid =
2798 		htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
2799 		      FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
2800 		      FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
2801 	c.dcaen_to_eqsize =
2802 		htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
2803 		      FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2804 		      FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
2805 		      FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2806 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2807 
2808 	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2809 	if (ret) {
2810 		kfree(txq->q.sdesc);
2811 		txq->q.sdesc = NULL;
2812 		dma_free_coherent(adap->pdev_dev,
2813 				  nentries * sizeof(struct tx_desc),
2814 				  txq->q.desc, txq->q.phys_addr);
2815 		txq->q.desc = NULL;
2816 		return ret;
2817 	}
2818 
2819 	txq->q.q_type = CXGB4_TXQ_ETH;
2820 	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
2821 	txq->txq = netdevq;
2822 	txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2823 	txq->mapping_err = 0;
2824 	return 0;
2825 }
2826 
2827 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2828 			  struct net_device *dev, unsigned int iqid,
2829 			  unsigned int cmplqid)
2830 {
2831 	int ret, nentries;
2832 	struct fw_eq_ctrl_cmd c;
2833 	struct sge *s = &adap->sge;
2834 	struct port_info *pi = netdev_priv(dev);
2835 
2836 	/* Add status entries */
2837 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2838 
2839 	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2840 				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2841 				 NULL, 0, dev_to_node(adap->pdev_dev));
2842 	if (!txq->q.desc)
2843 		return -ENOMEM;
2844 
2845 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
2846 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2847 			    FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
2848 			    FW_EQ_CTRL_CMD_VFN_V(0));
2849 	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
2850 				 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
2851 	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
2852 	c.physeqid_pkd = htonl(0);
2853 	c.fetchszm_to_iqid =
2854 		htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
2855 		      FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
2856 		      FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
2857 	c.dcaen_to_eqsize =
2858 		htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
2859 		      FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2860 		      FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
2861 		      FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
2862 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2863 
2864 	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2865 	if (ret) {
2866 		dma_free_coherent(adap->pdev_dev,
2867 				  nentries * sizeof(struct tx_desc),
2868 				  txq->q.desc, txq->q.phys_addr);
2869 		txq->q.desc = NULL;
2870 		return ret;
2871 	}
2872 
2873 	txq->q.q_type = CXGB4_TXQ_CTRL;
2874 	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
2875 	txq->adap = adap;
2876 	skb_queue_head_init(&txq->sendq);
2877 	tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2878 	txq->full = 0;
2879 	return 0;
2880 }
2881 
2882 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
2883 			unsigned int cmplqid)
2884 {
2885 	u32 param, val;
2886 
2887 	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
2888 		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
2889 		 FW_PARAMS_PARAM_YZ_V(eqid));
2890 	val = cmplqid;
2891 	return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
2892 }
2893 
2894 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
2895 			 struct net_device *dev, unsigned int iqid,
2896 			 unsigned int uld_type)
2897 {
2898 	int ret, nentries;
2899 	struct fw_eq_ofld_cmd c;
2900 	struct sge *s = &adap->sge;
2901 	struct port_info *pi = netdev_priv(dev);
2902 	int cmd = FW_EQ_OFLD_CMD;
2903 
2904 	/* Add status entries */
2905 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2906 
2907 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2908 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2909 			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2910 			NUMA_NO_NODE);
2911 	if (!txq->q.desc)
2912 		return -ENOMEM;
2913 
2914 	memset(&c, 0, sizeof(c));
2915 	if (unlikely(uld_type == CXGB4_TX_CRYPTO))
2916 		cmd = FW_EQ_CTRL_CMD;
2917 	c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
2918 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2919 			    FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
2920 			    FW_EQ_OFLD_CMD_VFN_V(0));
2921 	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
2922 				 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
2923 	c.fetchszm_to_iqid =
2924 		htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
2925 		      FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
2926 		      FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
2927 	c.dcaen_to_eqsize =
2928 		htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
2929 		      FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2930 		      FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
2931 		      FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
2932 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2933 
2934 	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2935 	if (ret) {
2936 		kfree(txq->q.sdesc);
2937 		txq->q.sdesc = NULL;
2938 		dma_free_coherent(adap->pdev_dev,
2939 				  nentries * sizeof(struct tx_desc),
2940 				  txq->q.desc, txq->q.phys_addr);
2941 		txq->q.desc = NULL;
2942 		return ret;
2943 	}
2944 
2945 	txq->q.q_type = CXGB4_TXQ_ULD;
2946 	init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
2947 	txq->adap = adap;
2948 	skb_queue_head_init(&txq->sendq);
2949 	tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2950 	txq->full = 0;
2951 	txq->mapping_err = 0;
2952 	return 0;
2953 }
2954 
2955 void free_txq(struct adapter *adap, struct sge_txq *q)
2956 {
2957 	struct sge *s = &adap->sge;
2958 
2959 	dma_free_coherent(adap->pdev_dev,
2960 			  q->size * sizeof(struct tx_desc) + s->stat_len,
2961 			  q->desc, q->phys_addr);
2962 	q->cntxt_id = 0;
2963 	q->sdesc = NULL;
2964 	q->desc = NULL;
2965 }
2966 
2967 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2968 		  struct sge_fl *fl)
2969 {
2970 	struct sge *s = &adap->sge;
2971 	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2972 
2973 	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2974 	t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
2975 		   rq->cntxt_id, fl_id, 0xffff);
2976 	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2977 			  rq->desc, rq->phys_addr);
2978 	netif_napi_del(&rq->napi);
2979 	rq->netdev = NULL;
2980 	rq->cntxt_id = rq->abs_id = 0;
2981 	rq->desc = NULL;
2982 
2983 	if (fl) {
2984 		free_rx_bufs(adap, fl, fl->avail);
2985 		dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
2986 				  fl->desc, fl->addr);
2987 		kfree(fl->sdesc);
2988 		fl->sdesc = NULL;
2989 		fl->cntxt_id = 0;
2990 		fl->desc = NULL;
2991 	}
2992 }
2993 
2994 /**
2995  *      t4_free_ofld_rxqs - free a block of consecutive Rx queues
2996  *      @adap: the adapter
2997  *      @n: number of queues
2998  *      @q: pointer to first queue
2999  *
3000  *      Release the resources of a consecutive block of offload Rx queues.
3001  */
3002 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
3003 {
3004 	for ( ; n; n--, q++)
3005 		if (q->rspq.desc)
3006 			free_rspq_fl(adap, &q->rspq,
3007 				     q->fl.size ? &q->fl : NULL);
3008 }
3009 
3010 /**
3011  *	t4_free_sge_resources - free SGE resources
3012  *	@adap: the adapter
3013  *
3014  *	Frees resources used by the SGE queue sets.
3015  */
3016 void t4_free_sge_resources(struct adapter *adap)
3017 {
3018 	int i;
3019 	struct sge_eth_rxq *eq;
3020 	struct sge_eth_txq *etq;
3021 
3022 	/* stop all Rx queues in order to start them draining */
3023 	for (i = 0; i < adap->sge.ethqsets; i++) {
3024 		eq = &adap->sge.ethrxq[i];
3025 		if (eq->rspq.desc)
3026 			t4_iq_stop(adap, adap->mbox, adap->pf, 0,
3027 				   FW_IQ_TYPE_FL_INT_CAP,
3028 				   eq->rspq.cntxt_id,
3029 				   eq->fl.size ? eq->fl.cntxt_id : 0xffff,
3030 				   0xffff);
3031 	}
3032 
3033 	/* clean up Ethernet Tx/Rx queues */
3034 	for (i = 0; i < adap->sge.ethqsets; i++) {
3035 		eq = &adap->sge.ethrxq[i];
3036 		if (eq->rspq.desc)
3037 			free_rspq_fl(adap, &eq->rspq,
3038 				     eq->fl.size ? &eq->fl : NULL);
3039 
3040 		etq = &adap->sge.ethtxq[i];
3041 		if (etq->q.desc) {
3042 			t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
3043 				       etq->q.cntxt_id);
3044 			__netif_tx_lock_bh(etq->txq);
3045 			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
3046 			__netif_tx_unlock_bh(etq->txq);
3047 			kfree(etq->q.sdesc);
3048 			free_txq(adap, &etq->q);
3049 		}
3050 	}
3051 
3052 	/* clean up control Tx queues */
3053 	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
3054 		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
3055 
3056 		if (cq->q.desc) {
3057 			tasklet_kill(&cq->qresume_tsk);
3058 			t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
3059 					cq->q.cntxt_id);
3060 			__skb_queue_purge(&cq->sendq);
3061 			free_txq(adap, &cq->q);
3062 		}
3063 	}
3064 
3065 	if (adap->sge.fw_evtq.desc)
3066 		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
3067 
3068 	if (adap->sge.intrq.desc)
3069 		free_rspq_fl(adap, &adap->sge.intrq, NULL);
3070 
3071 	/* clear the reverse egress queue map */
3072 	memset(adap->sge.egr_map, 0,
3073 	       adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
3074 }
3075 
3076 void t4_sge_start(struct adapter *adap)
3077 {
3078 	adap->sge.ethtxq_rover = 0;
3079 	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
3080 	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
3081 }
3082 
3083 /**
3084  *	t4_sge_stop - disable SGE operation
3085  *	@adap: the adapter
3086  *
3087  *	Stop tasklets and timers associated with the DMA engine.  Note that
3088  *	this is effective only if measures have been taken to disable any HW
3089  *	events that may restart them.
3090  */
3091 void t4_sge_stop(struct adapter *adap)
3092 {
3093 	int i;
3094 	struct sge *s = &adap->sge;
3095 
3096 	if (in_interrupt())  /* actions below require waiting */
3097 		return;
3098 
3099 	if (s->rx_timer.function)
3100 		del_timer_sync(&s->rx_timer);
3101 	if (s->tx_timer.function)
3102 		del_timer_sync(&s->tx_timer);
3103 
3104 	if (is_offload(adap)) {
3105 		struct sge_uld_txq_info *txq_info;
3106 
3107 		txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
3108 		if (txq_info) {
3109 			struct sge_uld_txq *txq = txq_info->uldtxq;
3110 
3111 			for_each_ofldtxq(&adap->sge, i) {
3112 				if (txq->q.desc)
3113 					tasklet_kill(&txq->qresume_tsk);
3114 			}
3115 		}
3116 	}
3117 
3118 	if (is_pci_uld(adap)) {
3119 		struct sge_uld_txq_info *txq_info;
3120 
3121 		txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
3122 		if (txq_info) {
3123 			struct sge_uld_txq *txq = txq_info->uldtxq;
3124 
3125 			for_each_ofldtxq(&adap->sge, i) {
3126 				if (txq->q.desc)
3127 					tasklet_kill(&txq->qresume_tsk);
3128 			}
3129 		}
3130 	}
3131 
3132 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
3133 		struct sge_ctrl_txq *cq = &s->ctrlq[i];
3134 
3135 		if (cq->q.desc)
3136 			tasklet_kill(&cq->qresume_tsk);
3137 	}
3138 }
3139 
3140 /**
3141  *	t4_sge_init_soft - grab core SGE values needed by SGE code
3142  *	@adap: the adapter
3143  *
3144  *	We need to grab the SGE operating parameters that we need to have
3145  *	in order to do our job and make sure we can live with them.
3146  */
3147 
3148 static int t4_sge_init_soft(struct adapter *adap)
3149 {
3150 	struct sge *s = &adap->sge;
3151 	u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
3152 	u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
3153 	u32 ingress_rx_threshold;
3154 
3155 	/*
3156 	 * Verify that CPL messages are going to the Ingress Queue for
3157 	 * process_responses() and that only packet data is going to the
3158 	 * Free Lists.
3159 	 */
3160 	if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
3161 	    RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
3162 		dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
3163 		return -EINVAL;
3164 	}
3165 
3166 	/*
3167 	 * Validate the Host Buffer Register Array indices that we want to
3168 	 * use ...
3169 	 *
3170 	 * XXX Note that we should really read through the Host Buffer Size
3171 	 * XXX register array and find the indices of the Buffer Sizes which
3172 	 * XXX meet our needs!
3173 	 */
3174 	#define READ_FL_BUF(x) \
3175 		t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
3176 
3177 	fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
3178 	fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
3179 	fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
3180 	fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
3181 
3182 	/* We only bother using the Large Page logic if the Large Page Buffer
3183 	 * is larger than our Page Size Buffer.
3184 	 */
3185 	if (fl_large_pg <= fl_small_pg)
3186 		fl_large_pg = 0;
3187 
3188 	#undef READ_FL_BUF
3189 
3190 	/* The Page Size Buffer must be exactly equal to our Page Size and the
3191 	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
3192 	 */
3193 	if (fl_small_pg != PAGE_SIZE ||
3194 	    (fl_large_pg & (fl_large_pg-1)) != 0) {
3195 		dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
3196 			fl_small_pg, fl_large_pg);
3197 		return -EINVAL;
3198 	}
3199 	if (fl_large_pg)
3200 		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
3201 
3202 	if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
3203 	    fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
3204 		dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
3205 			fl_small_mtu, fl_large_mtu);
3206 		return -EINVAL;
3207 	}
3208 
3209 	/*
3210 	 * Retrieve our RX interrupt holdoff timer values and counter
3211 	 * threshold values from the SGE parameters.
3212 	 */
3213 	timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
3214 	timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
3215 	timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
3216 	s->timer_val[0] = core_ticks_to_us(adap,
3217 		TIMERVALUE0_G(timer_value_0_and_1));
3218 	s->timer_val[1] = core_ticks_to_us(adap,
3219 		TIMERVALUE1_G(timer_value_0_and_1));
3220 	s->timer_val[2] = core_ticks_to_us(adap,
3221 		TIMERVALUE2_G(timer_value_2_and_3));
3222 	s->timer_val[3] = core_ticks_to_us(adap,
3223 		TIMERVALUE3_G(timer_value_2_and_3));
3224 	s->timer_val[4] = core_ticks_to_us(adap,
3225 		TIMERVALUE4_G(timer_value_4_and_5));
3226 	s->timer_val[5] = core_ticks_to_us(adap,
3227 		TIMERVALUE5_G(timer_value_4_and_5));
3228 
3229 	ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
3230 	s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
3231 	s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
3232 	s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
3233 	s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
3234 
3235 	return 0;
3236 }
3237 
3238 /**
3239  *     t4_sge_init - initialize SGE
3240  *     @adap: the adapter
3241  *
3242  *     Perform low-level SGE code initialization needed every time after a
3243  *     chip reset.
3244  */
3245 int t4_sge_init(struct adapter *adap)
3246 {
3247 	struct sge *s = &adap->sge;
3248 	u32 sge_control, sge_conm_ctrl;
3249 	int ret, egress_threshold;
3250 
3251 	/*
3252 	 * Ingress Padding Boundary and Egress Status Page Size are set up by
3253 	 * t4_fixup_host_params().
3254 	 */
3255 	sge_control = t4_read_reg(adap, SGE_CONTROL_A);
3256 	s->pktshift = PKTSHIFT_G(sge_control);
3257 	s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
3258 
3259 	s->fl_align = t4_fl_pkt_align(adap);
3260 	ret = t4_sge_init_soft(adap);
3261 	if (ret < 0)
3262 		return ret;
3263 
3264 	/*
3265 	 * A FL with <= fl_starve_thres buffers is starving and a periodic
3266 	 * timer will attempt to refill it.  This needs to be larger than the
3267 	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
3268 	 * stuck waiting for new packets while the SGE is waiting for us to
3269 	 * give it more Free List entries.  (Note that the SGE's Egress
3270 	 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
3271 	 * there was only a single field to control this.  For T5 there's the
3272 	 * original field which now only applies to Unpacked Mode Free List
3273 	 * buffers and a new field which only applies to Packed Mode Free List
3274 	 * buffers.
3275 	 */
3276 	sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
3277 	switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
3278 	case CHELSIO_T4:
3279 		egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
3280 		break;
3281 	case CHELSIO_T5:
3282 		egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
3283 		break;
3284 	case CHELSIO_T6:
3285 		egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
3286 		break;
3287 	default:
3288 		dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
3289 			CHELSIO_CHIP_VERSION(adap->params.chip));
3290 		return -EINVAL;
3291 	}
3292 	s->fl_starve_thres = 2*egress_threshold + 1;
3293 
3294 	t4_idma_monitor_init(adap, &s->idma_monitor);
3295 
3296 	/* Set up timers used for recuring callbacks to process RX and TX
3297 	 * administrative tasks.
3298 	 */
3299 	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
3300 	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
3301 
3302 	spin_lock_init(&s->intrq_lock);
3303 
3304 	return 0;
3305 }
3306