1f7917c00SJeff Kirsher /*
2f7917c00SJeff Kirsher  * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3f7917c00SJeff Kirsher  *
4f7917c00SJeff Kirsher  * This software is available to you under a choice of one of two
5f7917c00SJeff Kirsher  * licenses.  You may choose to be licensed under the terms of the GNU
6f7917c00SJeff Kirsher  * General Public License (GPL) Version 2, available from the file
7f7917c00SJeff Kirsher  * COPYING in the main directory of this source tree, or the
8f7917c00SJeff Kirsher  * OpenIB.org BSD license below:
9f7917c00SJeff Kirsher  *
10f7917c00SJeff Kirsher  *     Redistribution and use in source and binary forms, with or
11f7917c00SJeff Kirsher  *     without modification, are permitted provided that the following
12f7917c00SJeff Kirsher  *     conditions are met:
13f7917c00SJeff Kirsher  *
14f7917c00SJeff Kirsher  *      - Redistributions of source code must retain the above
15f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
16f7917c00SJeff Kirsher  *        disclaimer.
17f7917c00SJeff Kirsher  *
18f7917c00SJeff Kirsher  *      - Redistributions in binary form must reproduce the above
19f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
20f7917c00SJeff Kirsher  *        disclaimer in the documentation and/or other materials
21f7917c00SJeff Kirsher  *        provided with the distribution.
22f7917c00SJeff Kirsher  *
23f7917c00SJeff Kirsher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24f7917c00SJeff Kirsher  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25f7917c00SJeff Kirsher  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26f7917c00SJeff Kirsher  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27f7917c00SJeff Kirsher  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28f7917c00SJeff Kirsher  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29f7917c00SJeff Kirsher  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30f7917c00SJeff Kirsher  * SOFTWARE.
31f7917c00SJeff Kirsher  */
32f7917c00SJeff Kirsher #include <linux/skbuff.h>
33f7917c00SJeff Kirsher #include <linux/netdevice.h>
34f7917c00SJeff Kirsher #include <linux/etherdevice.h>
35f7917c00SJeff Kirsher #include <linux/if_vlan.h>
36f7917c00SJeff Kirsher #include <linux/ip.h>
37f7917c00SJeff Kirsher #include <linux/tcp.h>
38f7917c00SJeff Kirsher #include <linux/dma-mapping.h>
39f7917c00SJeff Kirsher #include <linux/slab.h>
40f7917c00SJeff Kirsher #include <linux/prefetch.h>
41f7917c00SJeff Kirsher #include <net/arp.h>
42f7917c00SJeff Kirsher #include "common.h"
43f7917c00SJeff Kirsher #include "regs.h"
44f7917c00SJeff Kirsher #include "sge_defs.h"
45f7917c00SJeff Kirsher #include "t3_cpl.h"
46f7917c00SJeff Kirsher #include "firmware_exports.h"
47f7917c00SJeff Kirsher #include "cxgb3_offload.h"
48f7917c00SJeff Kirsher 
49f7917c00SJeff Kirsher #define USE_GTS 0
50f7917c00SJeff Kirsher 
51f7917c00SJeff Kirsher #define SGE_RX_SM_BUF_SIZE 1536
52f7917c00SJeff Kirsher 
53f7917c00SJeff Kirsher #define SGE_RX_COPY_THRES  256
54f7917c00SJeff Kirsher #define SGE_RX_PULL_LEN    128
55f7917c00SJeff Kirsher 
56f7917c00SJeff Kirsher #define SGE_PG_RSVD SMP_CACHE_BYTES
57f7917c00SJeff Kirsher /*
58f7917c00SJeff Kirsher  * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
59f7917c00SJeff Kirsher  * It must be a divisor of PAGE_SIZE.  If set to 0 FL0 will use sk_buffs
60f7917c00SJeff Kirsher  * directly.
61f7917c00SJeff Kirsher  */
62f7917c00SJeff Kirsher #define FL0_PG_CHUNK_SIZE  2048
63f7917c00SJeff Kirsher #define FL0_PG_ORDER 0
64f7917c00SJeff Kirsher #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
65f7917c00SJeff Kirsher #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
66f7917c00SJeff Kirsher #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
67f7917c00SJeff Kirsher #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
68f7917c00SJeff Kirsher 
69f7917c00SJeff Kirsher #define SGE_RX_DROP_THRES 16
70f7917c00SJeff Kirsher #define RX_RECLAIM_PERIOD (HZ/4)
71f7917c00SJeff Kirsher 
72f7917c00SJeff Kirsher /*
73f7917c00SJeff Kirsher  * Max number of Rx buffers we replenish at a time.
74f7917c00SJeff Kirsher  */
75f7917c00SJeff Kirsher #define MAX_RX_REFILL 16U
76f7917c00SJeff Kirsher /*
77f7917c00SJeff Kirsher  * Period of the Tx buffer reclaim timer.  This timer does not need to run
78f7917c00SJeff Kirsher  * frequently as Tx buffers are usually reclaimed by new Tx packets.
79f7917c00SJeff Kirsher  */
80f7917c00SJeff Kirsher #define TX_RECLAIM_PERIOD (HZ / 4)
81f7917c00SJeff Kirsher #define TX_RECLAIM_TIMER_CHUNK 64U
82f7917c00SJeff Kirsher #define TX_RECLAIM_CHUNK 16U
83f7917c00SJeff Kirsher 
84f7917c00SJeff Kirsher /* WR size in bytes */
85f7917c00SJeff Kirsher #define WR_LEN (WR_FLITS * 8)
86f7917c00SJeff Kirsher 
87f7917c00SJeff Kirsher /*
88f7917c00SJeff Kirsher  * Types of Tx queues in each queue set.  Order here matters, do not change.
89f7917c00SJeff Kirsher  */
90f7917c00SJeff Kirsher enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
91f7917c00SJeff Kirsher 
92f7917c00SJeff Kirsher /* Values for sge_txq.flags */
93f7917c00SJeff Kirsher enum {
94f7917c00SJeff Kirsher 	TXQ_RUNNING = 1 << 0,	/* fetch engine is running */
95f7917c00SJeff Kirsher 	TXQ_LAST_PKT_DB = 1 << 1,	/* last packet rang the doorbell */
96f7917c00SJeff Kirsher };
97f7917c00SJeff Kirsher 
98f7917c00SJeff Kirsher struct tx_desc {
99f7917c00SJeff Kirsher 	__be64 flit[TX_DESC_FLITS];
100f7917c00SJeff Kirsher };
101f7917c00SJeff Kirsher 
102f7917c00SJeff Kirsher struct rx_desc {
103f7917c00SJeff Kirsher 	__be32 addr_lo;
104f7917c00SJeff Kirsher 	__be32 len_gen;
105f7917c00SJeff Kirsher 	__be32 gen2;
106f7917c00SJeff Kirsher 	__be32 addr_hi;
107f7917c00SJeff Kirsher };
108f7917c00SJeff Kirsher 
109f7917c00SJeff Kirsher struct tx_sw_desc {		/* SW state per Tx descriptor */
110f7917c00SJeff Kirsher 	struct sk_buff *skb;
111f7917c00SJeff Kirsher 	u8 eop;       /* set if last descriptor for packet */
112f7917c00SJeff Kirsher 	u8 addr_idx;  /* buffer index of first SGL entry in descriptor */
113f7917c00SJeff Kirsher 	u8 fragidx;   /* first page fragment associated with descriptor */
114f7917c00SJeff Kirsher 	s8 sflit;     /* start flit of first SGL entry in descriptor */
115f7917c00SJeff Kirsher };
116f7917c00SJeff Kirsher 
117f7917c00SJeff Kirsher struct rx_sw_desc {                /* SW state per Rx descriptor */
118f7917c00SJeff Kirsher 	union {
119f7917c00SJeff Kirsher 		struct sk_buff *skb;
120f7917c00SJeff Kirsher 		struct fl_pg_chunk pg_chunk;
121f7917c00SJeff Kirsher 	};
122f7917c00SJeff Kirsher 	DEFINE_DMA_UNMAP_ADDR(dma_addr);
123f7917c00SJeff Kirsher };
124f7917c00SJeff Kirsher 
125f7917c00SJeff Kirsher struct rsp_desc {		/* response queue descriptor */
126f7917c00SJeff Kirsher 	struct rss_header rss_hdr;
127f7917c00SJeff Kirsher 	__be32 flags;
128f7917c00SJeff Kirsher 	__be32 len_cq;
129f7917c00SJeff Kirsher 	u8 imm_data[47];
130f7917c00SJeff Kirsher 	u8 intr_gen;
131f7917c00SJeff Kirsher };
132f7917c00SJeff Kirsher 
133f7917c00SJeff Kirsher /*
134f7917c00SJeff Kirsher  * Holds unmapping information for Tx packets that need deferred unmapping.
135f7917c00SJeff Kirsher  * This structure lives at skb->head and must be allocated by callers.
136f7917c00SJeff Kirsher  */
137f7917c00SJeff Kirsher struct deferred_unmap_info {
138f7917c00SJeff Kirsher 	struct pci_dev *pdev;
139f7917c00SJeff Kirsher 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
140f7917c00SJeff Kirsher };
141f7917c00SJeff Kirsher 
142f7917c00SJeff Kirsher /*
143f7917c00SJeff Kirsher  * Maps a number of flits to the number of Tx descriptors that can hold them.
144f7917c00SJeff Kirsher  * The formula is
145f7917c00SJeff Kirsher  *
146f7917c00SJeff Kirsher  * desc = 1 + (flits - 2) / (WR_FLITS - 1).
147f7917c00SJeff Kirsher  *
148f7917c00SJeff Kirsher  * HW allows up to 4 descriptors to be combined into a WR.
149f7917c00SJeff Kirsher  */
150f7917c00SJeff Kirsher static u8 flit_desc_map[] = {
151f7917c00SJeff Kirsher 	0,
152f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 1
153f7917c00SJeff Kirsher 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
154f7917c00SJeff Kirsher 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
155f7917c00SJeff Kirsher 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
156f7917c00SJeff Kirsher 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
157f7917c00SJeff Kirsher #elif SGE_NUM_GENBITS == 2
158f7917c00SJeff Kirsher 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
159f7917c00SJeff Kirsher 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
160f7917c00SJeff Kirsher 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
161f7917c00SJeff Kirsher 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
162f7917c00SJeff Kirsher #else
163f7917c00SJeff Kirsher # error "SGE_NUM_GENBITS must be 1 or 2"
164f7917c00SJeff Kirsher #endif
165f7917c00SJeff Kirsher };
166f7917c00SJeff Kirsher 
167f7917c00SJeff Kirsher static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
168f7917c00SJeff Kirsher {
169f7917c00SJeff Kirsher 	return container_of(q, struct sge_qset, fl[qidx]);
170f7917c00SJeff Kirsher }
171f7917c00SJeff Kirsher 
172f7917c00SJeff Kirsher static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
173f7917c00SJeff Kirsher {
174f7917c00SJeff Kirsher 	return container_of(q, struct sge_qset, rspq);
175f7917c00SJeff Kirsher }
176f7917c00SJeff Kirsher 
177f7917c00SJeff Kirsher static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
178f7917c00SJeff Kirsher {
179f7917c00SJeff Kirsher 	return container_of(q, struct sge_qset, txq[qidx]);
180f7917c00SJeff Kirsher }
181f7917c00SJeff Kirsher 
182f7917c00SJeff Kirsher /**
183f7917c00SJeff Kirsher  *	refill_rspq - replenish an SGE response queue
184f7917c00SJeff Kirsher  *	@adapter: the adapter
185f7917c00SJeff Kirsher  *	@q: the response queue to replenish
186f7917c00SJeff Kirsher  *	@credits: how many new responses to make available
187f7917c00SJeff Kirsher  *
188f7917c00SJeff Kirsher  *	Replenishes a response queue by making the supplied number of responses
189f7917c00SJeff Kirsher  *	available to HW.
190f7917c00SJeff Kirsher  */
191f7917c00SJeff Kirsher static inline void refill_rspq(struct adapter *adapter,
192f7917c00SJeff Kirsher 			       const struct sge_rspq *q, unsigned int credits)
193f7917c00SJeff Kirsher {
194f7917c00SJeff Kirsher 	rmb();
195f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
196f7917c00SJeff Kirsher 		     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
197f7917c00SJeff Kirsher }
198f7917c00SJeff Kirsher 
199f7917c00SJeff Kirsher /**
200f7917c00SJeff Kirsher  *	need_skb_unmap - does the platform need unmapping of sk_buffs?
201f7917c00SJeff Kirsher  *
202f7917c00SJeff Kirsher  *	Returns true if the platform needs sk_buff unmapping.  The compiler
203f7917c00SJeff Kirsher  *	optimizes away unnecessary code if this returns true.
204f7917c00SJeff Kirsher  */
205f7917c00SJeff Kirsher static inline int need_skb_unmap(void)
206f7917c00SJeff Kirsher {
207f7917c00SJeff Kirsher #ifdef CONFIG_NEED_DMA_MAP_STATE
208f7917c00SJeff Kirsher 	return 1;
209f7917c00SJeff Kirsher #else
210f7917c00SJeff Kirsher 	return 0;
211f7917c00SJeff Kirsher #endif
212f7917c00SJeff Kirsher }
213f7917c00SJeff Kirsher 
214f7917c00SJeff Kirsher /**
215f7917c00SJeff Kirsher  *	unmap_skb - unmap a packet main body and its page fragments
216f7917c00SJeff Kirsher  *	@skb: the packet
217f7917c00SJeff Kirsher  *	@q: the Tx queue containing Tx descriptors for the packet
218f7917c00SJeff Kirsher  *	@cidx: index of Tx descriptor
219f7917c00SJeff Kirsher  *	@pdev: the PCI device
220f7917c00SJeff Kirsher  *
221f7917c00SJeff Kirsher  *	Unmap the main body of an sk_buff and its page fragments, if any.
222f7917c00SJeff Kirsher  *	Because of the fairly complicated structure of our SGLs and the desire
223f7917c00SJeff Kirsher  *	to conserve space for metadata, the information necessary to unmap an
224f7917c00SJeff Kirsher  *	sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
225f7917c00SJeff Kirsher  *	descriptors (the physical addresses of the various data buffers), and
226f7917c00SJeff Kirsher  *	the SW descriptor state (assorted indices).  The send functions
227f7917c00SJeff Kirsher  *	initialize the indices for the first packet descriptor so we can unmap
228f7917c00SJeff Kirsher  *	the buffers held in the first Tx descriptor here, and we have enough
229f7917c00SJeff Kirsher  *	information at this point to set the state for the next Tx descriptor.
230f7917c00SJeff Kirsher  *
231f7917c00SJeff Kirsher  *	Note that it is possible to clean up the first descriptor of a packet
232f7917c00SJeff Kirsher  *	before the send routines have written the next descriptors, but this
233f7917c00SJeff Kirsher  *	race does not cause any problem.  We just end up writing the unmapping
234f7917c00SJeff Kirsher  *	info for the descriptor first.
235f7917c00SJeff Kirsher  */
236f7917c00SJeff Kirsher static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
237f7917c00SJeff Kirsher 			     unsigned int cidx, struct pci_dev *pdev)
238f7917c00SJeff Kirsher {
239f7917c00SJeff Kirsher 	const struct sg_ent *sgp;
240f7917c00SJeff Kirsher 	struct tx_sw_desc *d = &q->sdesc[cidx];
241f7917c00SJeff Kirsher 	int nfrags, frag_idx, curflit, j = d->addr_idx;
242f7917c00SJeff Kirsher 
243f7917c00SJeff Kirsher 	sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
244f7917c00SJeff Kirsher 	frag_idx = d->fragidx;
245f7917c00SJeff Kirsher 
246f7917c00SJeff Kirsher 	if (frag_idx == 0 && skb_headlen(skb)) {
2474489d8f5SChristophe JAILLET 		dma_unmap_single(&pdev->dev, be64_to_cpu(sgp->addr[0]),
2484489d8f5SChristophe JAILLET 				 skb_headlen(skb), DMA_TO_DEVICE);
249f7917c00SJeff Kirsher 		j = 1;
250f7917c00SJeff Kirsher 	}
251f7917c00SJeff Kirsher 
252f7917c00SJeff Kirsher 	curflit = d->sflit + 1 + j;
253f7917c00SJeff Kirsher 	nfrags = skb_shinfo(skb)->nr_frags;
254f7917c00SJeff Kirsher 
255f7917c00SJeff Kirsher 	while (frag_idx < nfrags && curflit < WR_FLITS) {
2564489d8f5SChristophe JAILLET 		dma_unmap_page(&pdev->dev, be64_to_cpu(sgp->addr[j]),
2579e903e08SEric Dumazet 			       skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
2584489d8f5SChristophe JAILLET 			       DMA_TO_DEVICE);
259f7917c00SJeff Kirsher 		j ^= 1;
260f7917c00SJeff Kirsher 		if (j == 0) {
261f7917c00SJeff Kirsher 			sgp++;
262f7917c00SJeff Kirsher 			curflit++;
263f7917c00SJeff Kirsher 		}
264f7917c00SJeff Kirsher 		curflit++;
265f7917c00SJeff Kirsher 		frag_idx++;
266f7917c00SJeff Kirsher 	}
267f7917c00SJeff Kirsher 
268f7917c00SJeff Kirsher 	if (frag_idx < nfrags) {   /* SGL continues into next Tx descriptor */
269f7917c00SJeff Kirsher 		d = cidx + 1 == q->size ? q->sdesc : d + 1;
270f7917c00SJeff Kirsher 		d->fragidx = frag_idx;
271f7917c00SJeff Kirsher 		d->addr_idx = j;
272f7917c00SJeff Kirsher 		d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
273f7917c00SJeff Kirsher 	}
274f7917c00SJeff Kirsher }
275f7917c00SJeff Kirsher 
276f7917c00SJeff Kirsher /**
277f7917c00SJeff Kirsher  *	free_tx_desc - reclaims Tx descriptors and their buffers
278f7917c00SJeff Kirsher  *	@adapter: the adapter
279f7917c00SJeff Kirsher  *	@q: the Tx queue to reclaim descriptors from
280f7917c00SJeff Kirsher  *	@n: the number of descriptors to reclaim
281f7917c00SJeff Kirsher  *
282f7917c00SJeff Kirsher  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
283f7917c00SJeff Kirsher  *	Tx buffers.  Called with the Tx queue lock held.
284f7917c00SJeff Kirsher  */
285f7917c00SJeff Kirsher static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
286f7917c00SJeff Kirsher 			 unsigned int n)
287f7917c00SJeff Kirsher {
288f7917c00SJeff Kirsher 	struct tx_sw_desc *d;
289f7917c00SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
290f7917c00SJeff Kirsher 	unsigned int cidx = q->cidx;
291f7917c00SJeff Kirsher 
292f7917c00SJeff Kirsher 	const int need_unmap = need_skb_unmap() &&
293f7917c00SJeff Kirsher 			       q->cntxt_id >= FW_TUNNEL_SGEEC_START;
294f7917c00SJeff Kirsher 
295f7917c00SJeff Kirsher 	d = &q->sdesc[cidx];
296f7917c00SJeff Kirsher 	while (n--) {
297f7917c00SJeff Kirsher 		if (d->skb) {	/* an SGL is present */
298f7917c00SJeff Kirsher 			if (need_unmap)
299f7917c00SJeff Kirsher 				unmap_skb(d->skb, q, cidx, pdev);
300f7917c00SJeff Kirsher 			if (d->eop) {
301f9ec8131SEric W. Biederman 				dev_consume_skb_any(d->skb);
302f7917c00SJeff Kirsher 				d->skb = NULL;
303f7917c00SJeff Kirsher 			}
304f7917c00SJeff Kirsher 		}
305f7917c00SJeff Kirsher 		++d;
306f7917c00SJeff Kirsher 		if (++cidx == q->size) {
307f7917c00SJeff Kirsher 			cidx = 0;
308f7917c00SJeff Kirsher 			d = q->sdesc;
309f7917c00SJeff Kirsher 		}
310f7917c00SJeff Kirsher 	}
311f7917c00SJeff Kirsher 	q->cidx = cidx;
312f7917c00SJeff Kirsher }
313f7917c00SJeff Kirsher 
314f7917c00SJeff Kirsher /**
315f7917c00SJeff Kirsher  *	reclaim_completed_tx - reclaims completed Tx descriptors
316f7917c00SJeff Kirsher  *	@adapter: the adapter
317f7917c00SJeff Kirsher  *	@q: the Tx queue to reclaim completed descriptors from
318f7917c00SJeff Kirsher  *	@chunk: maximum number of descriptors to reclaim
319f7917c00SJeff Kirsher  *
320f7917c00SJeff Kirsher  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
321f7917c00SJeff Kirsher  *	and frees the associated buffers if possible.  Called with the Tx
322f7917c00SJeff Kirsher  *	queue's lock held.
323f7917c00SJeff Kirsher  */
324f7917c00SJeff Kirsher static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
325f7917c00SJeff Kirsher 						struct sge_txq *q,
326f7917c00SJeff Kirsher 						unsigned int chunk)
327f7917c00SJeff Kirsher {
328f7917c00SJeff Kirsher 	unsigned int reclaim = q->processed - q->cleaned;
329f7917c00SJeff Kirsher 
330f7917c00SJeff Kirsher 	reclaim = min(chunk, reclaim);
331f7917c00SJeff Kirsher 	if (reclaim) {
332f7917c00SJeff Kirsher 		free_tx_desc(adapter, q, reclaim);
333f7917c00SJeff Kirsher 		q->cleaned += reclaim;
334f7917c00SJeff Kirsher 		q->in_use -= reclaim;
335f7917c00SJeff Kirsher 	}
336f7917c00SJeff Kirsher 	return q->processed - q->cleaned;
337f7917c00SJeff Kirsher }
338f7917c00SJeff Kirsher 
339f7917c00SJeff Kirsher /**
340f7917c00SJeff Kirsher  *	should_restart_tx - are there enough resources to restart a Tx queue?
341f7917c00SJeff Kirsher  *	@q: the Tx queue
342f7917c00SJeff Kirsher  *
343f7917c00SJeff Kirsher  *	Checks if there are enough descriptors to restart a suspended Tx queue.
344f7917c00SJeff Kirsher  */
345f7917c00SJeff Kirsher static inline int should_restart_tx(const struct sge_txq *q)
346f7917c00SJeff Kirsher {
347f7917c00SJeff Kirsher 	unsigned int r = q->processed - q->cleaned;
348f7917c00SJeff Kirsher 
349f7917c00SJeff Kirsher 	return q->in_use - r < (q->size >> 1);
350f7917c00SJeff Kirsher }
351f7917c00SJeff Kirsher 
352f7917c00SJeff Kirsher static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
353f7917c00SJeff Kirsher 			  struct rx_sw_desc *d)
354f7917c00SJeff Kirsher {
355f7917c00SJeff Kirsher 	if (q->use_pages && d->pg_chunk.page) {
356f7917c00SJeff Kirsher 		(*d->pg_chunk.p_cnt)--;
357f7917c00SJeff Kirsher 		if (!*d->pg_chunk.p_cnt)
3584489d8f5SChristophe JAILLET 			dma_unmap_page(&pdev->dev, d->pg_chunk.mapping,
3594489d8f5SChristophe JAILLET 				       q->alloc_size, DMA_FROM_DEVICE);
360f7917c00SJeff Kirsher 
361f7917c00SJeff Kirsher 		put_page(d->pg_chunk.page);
362f7917c00SJeff Kirsher 		d->pg_chunk.page = NULL;
363f7917c00SJeff Kirsher 	} else {
3644489d8f5SChristophe JAILLET 		dma_unmap_single(&pdev->dev, dma_unmap_addr(d, dma_addr),
3654489d8f5SChristophe JAILLET 				 q->buf_size, DMA_FROM_DEVICE);
366f7917c00SJeff Kirsher 		kfree_skb(d->skb);
367f7917c00SJeff Kirsher 		d->skb = NULL;
368f7917c00SJeff Kirsher 	}
369f7917c00SJeff Kirsher }
370f7917c00SJeff Kirsher 
371f7917c00SJeff Kirsher /**
372f7917c00SJeff Kirsher  *	free_rx_bufs - free the Rx buffers on an SGE free list
373f7917c00SJeff Kirsher  *	@pdev: the PCI device associated with the adapter
374d0ea5cbdSJesse Brandeburg  *	@q: the SGE free list to clean up
375f7917c00SJeff Kirsher  *
376f7917c00SJeff Kirsher  *	Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
377f7917c00SJeff Kirsher  *	this queue should be stopped before calling this function.
378f7917c00SJeff Kirsher  */
379f7917c00SJeff Kirsher static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
380f7917c00SJeff Kirsher {
381f7917c00SJeff Kirsher 	unsigned int cidx = q->cidx;
382f7917c00SJeff Kirsher 
383f7917c00SJeff Kirsher 	while (q->credits--) {
384f7917c00SJeff Kirsher 		struct rx_sw_desc *d = &q->sdesc[cidx];
385f7917c00SJeff Kirsher 
386f7917c00SJeff Kirsher 
387f7917c00SJeff Kirsher 		clear_rx_desc(pdev, q, d);
388f7917c00SJeff Kirsher 		if (++cidx == q->size)
389f7917c00SJeff Kirsher 			cidx = 0;
390f7917c00SJeff Kirsher 	}
391f7917c00SJeff Kirsher 
392f7917c00SJeff Kirsher 	if (q->pg_chunk.page) {
393f7917c00SJeff Kirsher 		__free_pages(q->pg_chunk.page, q->order);
394f7917c00SJeff Kirsher 		q->pg_chunk.page = NULL;
395f7917c00SJeff Kirsher 	}
396f7917c00SJeff Kirsher }
397f7917c00SJeff Kirsher 
398f7917c00SJeff Kirsher /**
399f7917c00SJeff Kirsher  *	add_one_rx_buf - add a packet buffer to a free-buffer list
400f7917c00SJeff Kirsher  *	@va:  buffer start VA
401f7917c00SJeff Kirsher  *	@len: the buffer length
402f7917c00SJeff Kirsher  *	@d: the HW Rx descriptor to write
403f7917c00SJeff Kirsher  *	@sd: the SW Rx descriptor to write
404f7917c00SJeff Kirsher  *	@gen: the generation bit value
405f7917c00SJeff Kirsher  *	@pdev: the PCI device associated with the adapter
406f7917c00SJeff Kirsher  *
407f7917c00SJeff Kirsher  *	Add a buffer of the given length to the supplied HW and SW Rx
408f7917c00SJeff Kirsher  *	descriptors.
409f7917c00SJeff Kirsher  */
410f7917c00SJeff Kirsher static inline int add_one_rx_buf(void *va, unsigned int len,
411f7917c00SJeff Kirsher 				 struct rx_desc *d, struct rx_sw_desc *sd,
412f7917c00SJeff Kirsher 				 unsigned int gen, struct pci_dev *pdev)
413f7917c00SJeff Kirsher {
414f7917c00SJeff Kirsher 	dma_addr_t mapping;
415f7917c00SJeff Kirsher 
4164489d8f5SChristophe JAILLET 	mapping = dma_map_single(&pdev->dev, va, len, DMA_FROM_DEVICE);
4174489d8f5SChristophe JAILLET 	if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
418f7917c00SJeff Kirsher 		return -ENOMEM;
419f7917c00SJeff Kirsher 
420f7917c00SJeff Kirsher 	dma_unmap_addr_set(sd, dma_addr, mapping);
421f7917c00SJeff Kirsher 
422f7917c00SJeff Kirsher 	d->addr_lo = cpu_to_be32(mapping);
423f7917c00SJeff Kirsher 	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
424019be1cfSAlexander Duyck 	dma_wmb();
425f7917c00SJeff Kirsher 	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
426f7917c00SJeff Kirsher 	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
427f7917c00SJeff Kirsher 	return 0;
428f7917c00SJeff Kirsher }
429f7917c00SJeff Kirsher 
430f7917c00SJeff Kirsher static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
431f7917c00SJeff Kirsher 				   unsigned int gen)
432f7917c00SJeff Kirsher {
433f7917c00SJeff Kirsher 	d->addr_lo = cpu_to_be32(mapping);
434f7917c00SJeff Kirsher 	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
435019be1cfSAlexander Duyck 	dma_wmb();
436f7917c00SJeff Kirsher 	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
437f7917c00SJeff Kirsher 	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
438f7917c00SJeff Kirsher 	return 0;
439f7917c00SJeff Kirsher }
440f7917c00SJeff Kirsher 
441f7917c00SJeff Kirsher static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
442f7917c00SJeff Kirsher 			  struct rx_sw_desc *sd, gfp_t gfp,
443f7917c00SJeff Kirsher 			  unsigned int order)
444f7917c00SJeff Kirsher {
445f7917c00SJeff Kirsher 	if (!q->pg_chunk.page) {
446f7917c00SJeff Kirsher 		dma_addr_t mapping;
447f7917c00SJeff Kirsher 
448f7917c00SJeff Kirsher 		q->pg_chunk.page = alloc_pages(gfp, order);
449f7917c00SJeff Kirsher 		if (unlikely(!q->pg_chunk.page))
450f7917c00SJeff Kirsher 			return -ENOMEM;
451f7917c00SJeff Kirsher 		q->pg_chunk.va = page_address(q->pg_chunk.page);
452f7917c00SJeff Kirsher 		q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
453f7917c00SJeff Kirsher 				    SGE_PG_RSVD;
454f7917c00SJeff Kirsher 		q->pg_chunk.offset = 0;
4554489d8f5SChristophe JAILLET 		mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page,
4564489d8f5SChristophe JAILLET 				       0, q->alloc_size, DMA_FROM_DEVICE);
4574489d8f5SChristophe JAILLET 		if (unlikely(dma_mapping_error(&adapter->pdev->dev, mapping))) {
458c69fe407SArjun Vynipadath 			__free_pages(q->pg_chunk.page, order);
459c69fe407SArjun Vynipadath 			q->pg_chunk.page = NULL;
460c69fe407SArjun Vynipadath 			return -EIO;
461c69fe407SArjun Vynipadath 		}
462f7917c00SJeff Kirsher 		q->pg_chunk.mapping = mapping;
463f7917c00SJeff Kirsher 	}
464f7917c00SJeff Kirsher 	sd->pg_chunk = q->pg_chunk;
465f7917c00SJeff Kirsher 
466f7917c00SJeff Kirsher 	prefetch(sd->pg_chunk.p_cnt);
467f7917c00SJeff Kirsher 
468f7917c00SJeff Kirsher 	q->pg_chunk.offset += q->buf_size;
469f7917c00SJeff Kirsher 	if (q->pg_chunk.offset == (PAGE_SIZE << order))
470f7917c00SJeff Kirsher 		q->pg_chunk.page = NULL;
471f7917c00SJeff Kirsher 	else {
472f7917c00SJeff Kirsher 		q->pg_chunk.va += q->buf_size;
473f7917c00SJeff Kirsher 		get_page(q->pg_chunk.page);
474f7917c00SJeff Kirsher 	}
475f7917c00SJeff Kirsher 
476f7917c00SJeff Kirsher 	if (sd->pg_chunk.offset == 0)
477f7917c00SJeff Kirsher 		*sd->pg_chunk.p_cnt = 1;
478f7917c00SJeff Kirsher 	else
479f7917c00SJeff Kirsher 		*sd->pg_chunk.p_cnt += 1;
480f7917c00SJeff Kirsher 
481f7917c00SJeff Kirsher 	return 0;
482f7917c00SJeff Kirsher }
483f7917c00SJeff Kirsher 
484f7917c00SJeff Kirsher static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
485f7917c00SJeff Kirsher {
486f7917c00SJeff Kirsher 	if (q->pend_cred >= q->credits / 4) {
487f7917c00SJeff Kirsher 		q->pend_cred = 0;
488f7917c00SJeff Kirsher 		wmb();
489f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
490f7917c00SJeff Kirsher 	}
491f7917c00SJeff Kirsher }
492f7917c00SJeff Kirsher 
493f7917c00SJeff Kirsher /**
494f7917c00SJeff Kirsher  *	refill_fl - refill an SGE free-buffer list
495d0ea5cbdSJesse Brandeburg  *	@adap: the adapter
496f7917c00SJeff Kirsher  *	@q: the free-list to refill
497f7917c00SJeff Kirsher  *	@n: the number of new buffers to allocate
498f7917c00SJeff Kirsher  *	@gfp: the gfp flags for allocating new buffers
499f7917c00SJeff Kirsher  *
500f7917c00SJeff Kirsher  *	(Re)populate an SGE free-buffer list with up to @n new packet buffers,
501f7917c00SJeff Kirsher  *	allocated with the supplied gfp flags.  The caller must assure that
502f7917c00SJeff Kirsher  *	@n does not exceed the queue's capacity.
503f7917c00SJeff Kirsher  */
504f7917c00SJeff Kirsher static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
505f7917c00SJeff Kirsher {
506f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
507f7917c00SJeff Kirsher 	struct rx_desc *d = &q->desc[q->pidx];
508f7917c00SJeff Kirsher 	unsigned int count = 0;
509f7917c00SJeff Kirsher 
510f7917c00SJeff Kirsher 	while (n--) {
511f7917c00SJeff Kirsher 		dma_addr_t mapping;
512f7917c00SJeff Kirsher 		int err;
513f7917c00SJeff Kirsher 
514f7917c00SJeff Kirsher 		if (q->use_pages) {
515f7917c00SJeff Kirsher 			if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
516f7917c00SJeff Kirsher 						    q->order))) {
517f7917c00SJeff Kirsher nomem:				q->alloc_failed++;
518f7917c00SJeff Kirsher 				break;
519f7917c00SJeff Kirsher 			}
520f7917c00SJeff Kirsher 			mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
521f7917c00SJeff Kirsher 			dma_unmap_addr_set(sd, dma_addr, mapping);
522f7917c00SJeff Kirsher 
523f7917c00SJeff Kirsher 			add_one_rx_chunk(mapping, d, q->gen);
5244489d8f5SChristophe JAILLET 			dma_sync_single_for_device(&adap->pdev->dev, mapping,
525f7917c00SJeff Kirsher 						   q->buf_size - SGE_PG_RSVD,
5264489d8f5SChristophe JAILLET 						   DMA_FROM_DEVICE);
527f7917c00SJeff Kirsher 		} else {
528f7917c00SJeff Kirsher 			void *buf_start;
529f7917c00SJeff Kirsher 
530f7917c00SJeff Kirsher 			struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
531f7917c00SJeff Kirsher 			if (!skb)
532f7917c00SJeff Kirsher 				goto nomem;
533f7917c00SJeff Kirsher 
534f7917c00SJeff Kirsher 			sd->skb = skb;
535f7917c00SJeff Kirsher 			buf_start = skb->data;
536f7917c00SJeff Kirsher 			err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
537f7917c00SJeff Kirsher 					     q->gen, adap->pdev);
538f7917c00SJeff Kirsher 			if (unlikely(err)) {
539f7917c00SJeff Kirsher 				clear_rx_desc(adap->pdev, q, sd);
540f7917c00SJeff Kirsher 				break;
541f7917c00SJeff Kirsher 			}
542f7917c00SJeff Kirsher 		}
543f7917c00SJeff Kirsher 
544f7917c00SJeff Kirsher 		d++;
545f7917c00SJeff Kirsher 		sd++;
546f7917c00SJeff Kirsher 		if (++q->pidx == q->size) {
547f7917c00SJeff Kirsher 			q->pidx = 0;
548f7917c00SJeff Kirsher 			q->gen ^= 1;
549f7917c00SJeff Kirsher 			sd = q->sdesc;
550f7917c00SJeff Kirsher 			d = q->desc;
551f7917c00SJeff Kirsher 		}
552f7917c00SJeff Kirsher 		count++;
553f7917c00SJeff Kirsher 	}
554f7917c00SJeff Kirsher 
555f7917c00SJeff Kirsher 	q->credits += count;
556f7917c00SJeff Kirsher 	q->pend_cred += count;
557f7917c00SJeff Kirsher 	ring_fl_db(adap, q);
558f7917c00SJeff Kirsher 
559f7917c00SJeff Kirsher 	return count;
560f7917c00SJeff Kirsher }
561f7917c00SJeff Kirsher 
562f7917c00SJeff Kirsher static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
563f7917c00SJeff Kirsher {
564f7917c00SJeff Kirsher 	refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
565f7917c00SJeff Kirsher 		  GFP_ATOMIC | __GFP_COMP);
566f7917c00SJeff Kirsher }
567f7917c00SJeff Kirsher 
568f7917c00SJeff Kirsher /**
569f7917c00SJeff Kirsher  *	recycle_rx_buf - recycle a receive buffer
570d0ea5cbdSJesse Brandeburg  *	@adap: the adapter
571f7917c00SJeff Kirsher  *	@q: the SGE free list
572f7917c00SJeff Kirsher  *	@idx: index of buffer to recycle
573f7917c00SJeff Kirsher  *
574f7917c00SJeff Kirsher  *	Recycles the specified buffer on the given free list by adding it at
575f7917c00SJeff Kirsher  *	the next available slot on the list.
576f7917c00SJeff Kirsher  */
577f7917c00SJeff Kirsher static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
578f7917c00SJeff Kirsher 			   unsigned int idx)
579f7917c00SJeff Kirsher {
580f7917c00SJeff Kirsher 	struct rx_desc *from = &q->desc[idx];
581f7917c00SJeff Kirsher 	struct rx_desc *to = &q->desc[q->pidx];
582f7917c00SJeff Kirsher 
583f7917c00SJeff Kirsher 	q->sdesc[q->pidx] = q->sdesc[idx];
584f7917c00SJeff Kirsher 	to->addr_lo = from->addr_lo;	/* already big endian */
585f7917c00SJeff Kirsher 	to->addr_hi = from->addr_hi;	/* likewise */
586019be1cfSAlexander Duyck 	dma_wmb();
587f7917c00SJeff Kirsher 	to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
588f7917c00SJeff Kirsher 	to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
589f7917c00SJeff Kirsher 
590f7917c00SJeff Kirsher 	if (++q->pidx == q->size) {
591f7917c00SJeff Kirsher 		q->pidx = 0;
592f7917c00SJeff Kirsher 		q->gen ^= 1;
593f7917c00SJeff Kirsher 	}
594f7917c00SJeff Kirsher 
595f7917c00SJeff Kirsher 	q->credits++;
596f7917c00SJeff Kirsher 	q->pend_cred++;
597f7917c00SJeff Kirsher 	ring_fl_db(adap, q);
598f7917c00SJeff Kirsher }
599f7917c00SJeff Kirsher 
600f7917c00SJeff Kirsher /**
601f7917c00SJeff Kirsher  *	alloc_ring - allocate resources for an SGE descriptor ring
602f7917c00SJeff Kirsher  *	@pdev: the PCI device
603f7917c00SJeff Kirsher  *	@nelem: the number of descriptors
604f7917c00SJeff Kirsher  *	@elem_size: the size of each descriptor
605f7917c00SJeff Kirsher  *	@sw_size: the size of the SW state associated with each ring element
606f7917c00SJeff Kirsher  *	@phys: the physical address of the allocated ring
607f7917c00SJeff Kirsher  *	@metadata: address of the array holding the SW state for the ring
608f7917c00SJeff Kirsher  *
609f7917c00SJeff Kirsher  *	Allocates resources for an SGE descriptor ring, such as Tx queues,
610f7917c00SJeff Kirsher  *	free buffer lists, or response queues.  Each SGE ring requires
611f7917c00SJeff Kirsher  *	space for its HW descriptors plus, optionally, space for the SW state
612f7917c00SJeff Kirsher  *	associated with each HW entry (the metadata).  The function returns
613f7917c00SJeff Kirsher  *	three values: the virtual address for the HW ring (the return value
614f7917c00SJeff Kirsher  *	of the function), the physical address of the HW ring, and the address
615f7917c00SJeff Kirsher  *	of the SW ring.
616f7917c00SJeff Kirsher  */
617f7917c00SJeff Kirsher static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
618f7917c00SJeff Kirsher 			size_t sw_size, dma_addr_t * phys, void *metadata)
619f7917c00SJeff Kirsher {
620f7917c00SJeff Kirsher 	size_t len = nelem * elem_size;
621f7917c00SJeff Kirsher 	void *s = NULL;
622750afb08SLuis Chamberlain 	void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
623f7917c00SJeff Kirsher 
624f7917c00SJeff Kirsher 	if (!p)
625f7917c00SJeff Kirsher 		return NULL;
626f7917c00SJeff Kirsher 	if (sw_size && metadata) {
627f7917c00SJeff Kirsher 		s = kcalloc(nelem, sw_size, GFP_KERNEL);
628f7917c00SJeff Kirsher 
629f7917c00SJeff Kirsher 		if (!s) {
630f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev, len, p, *phys);
631f7917c00SJeff Kirsher 			return NULL;
632f7917c00SJeff Kirsher 		}
633f7917c00SJeff Kirsher 		*(void **)metadata = s;
634f7917c00SJeff Kirsher 	}
635f7917c00SJeff Kirsher 	return p;
636f7917c00SJeff Kirsher }
637f7917c00SJeff Kirsher 
638f7917c00SJeff Kirsher /**
639f7917c00SJeff Kirsher  *	t3_reset_qset - reset a sge qset
640f7917c00SJeff Kirsher  *	@q: the queue set
641f7917c00SJeff Kirsher  *
642f7917c00SJeff Kirsher  *	Reset the qset structure.
643f7917c00SJeff Kirsher  *	the NAPI structure is preserved in the event of
644f7917c00SJeff Kirsher  *	the qset's reincarnation, for example during EEH recovery.
645f7917c00SJeff Kirsher  */
646f7917c00SJeff Kirsher static void t3_reset_qset(struct sge_qset *q)
647f7917c00SJeff Kirsher {
648f7917c00SJeff Kirsher 	if (q->adap &&
649f7917c00SJeff Kirsher 	    !(q->adap->flags & NAPI_INIT)) {
650f7917c00SJeff Kirsher 		memset(q, 0, sizeof(*q));
651f7917c00SJeff Kirsher 		return;
652f7917c00SJeff Kirsher 	}
653f7917c00SJeff Kirsher 
654f7917c00SJeff Kirsher 	q->adap = NULL;
655f7917c00SJeff Kirsher 	memset(&q->rspq, 0, sizeof(q->rspq));
656f7917c00SJeff Kirsher 	memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
657f7917c00SJeff Kirsher 	memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
658f7917c00SJeff Kirsher 	q->txq_stopped = 0;
659f7917c00SJeff Kirsher 	q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
660f7917c00SJeff Kirsher 	q->rx_reclaim_timer.function = NULL;
661f7917c00SJeff Kirsher 	q->nomem = 0;
662f7917c00SJeff Kirsher 	napi_free_frags(&q->napi);
663f7917c00SJeff Kirsher }
664f7917c00SJeff Kirsher 
665f7917c00SJeff Kirsher 
666f7917c00SJeff Kirsher /**
667aeed744aSYang Shen  *	t3_free_qset - free the resources of an SGE queue set
668f7917c00SJeff Kirsher  *	@adapter: the adapter owning the queue set
669f7917c00SJeff Kirsher  *	@q: the queue set
670f7917c00SJeff Kirsher  *
671f7917c00SJeff Kirsher  *	Release the HW and SW resources associated with an SGE queue set, such
672f7917c00SJeff Kirsher  *	as HW contexts, packet buffers, and descriptor rings.  Traffic to the
673f7917c00SJeff Kirsher  *	queue set must be quiesced prior to calling this.
674f7917c00SJeff Kirsher  */
675f7917c00SJeff Kirsher static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
676f7917c00SJeff Kirsher {
677f7917c00SJeff Kirsher 	int i;
678f7917c00SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
679f7917c00SJeff Kirsher 
680f7917c00SJeff Kirsher 	for (i = 0; i < SGE_RXQ_PER_SET; ++i)
681f7917c00SJeff Kirsher 		if (q->fl[i].desc) {
682f7917c00SJeff Kirsher 			spin_lock_irq(&adapter->sge.reg_lock);
683f7917c00SJeff Kirsher 			t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
684f7917c00SJeff Kirsher 			spin_unlock_irq(&adapter->sge.reg_lock);
685f7917c00SJeff Kirsher 			free_rx_bufs(pdev, &q->fl[i]);
686f7917c00SJeff Kirsher 			kfree(q->fl[i].sdesc);
687f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev,
688f7917c00SJeff Kirsher 					  q->fl[i].size *
689f7917c00SJeff Kirsher 					  sizeof(struct rx_desc), q->fl[i].desc,
690f7917c00SJeff Kirsher 					  q->fl[i].phys_addr);
691f7917c00SJeff Kirsher 		}
692f7917c00SJeff Kirsher 
693f7917c00SJeff Kirsher 	for (i = 0; i < SGE_TXQ_PER_SET; ++i)
694f7917c00SJeff Kirsher 		if (q->txq[i].desc) {
695f7917c00SJeff Kirsher 			spin_lock_irq(&adapter->sge.reg_lock);
696f7917c00SJeff Kirsher 			t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
697f7917c00SJeff Kirsher 			spin_unlock_irq(&adapter->sge.reg_lock);
698f7917c00SJeff Kirsher 			if (q->txq[i].sdesc) {
699f7917c00SJeff Kirsher 				free_tx_desc(adapter, &q->txq[i],
700f7917c00SJeff Kirsher 					     q->txq[i].in_use);
701f7917c00SJeff Kirsher 				kfree(q->txq[i].sdesc);
702f7917c00SJeff Kirsher 			}
703f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev,
704f7917c00SJeff Kirsher 					  q->txq[i].size *
705f7917c00SJeff Kirsher 					  sizeof(struct tx_desc),
706f7917c00SJeff Kirsher 					  q->txq[i].desc, q->txq[i].phys_addr);
707f7917c00SJeff Kirsher 			__skb_queue_purge(&q->txq[i].sendq);
708f7917c00SJeff Kirsher 		}
709f7917c00SJeff Kirsher 
710f7917c00SJeff Kirsher 	if (q->rspq.desc) {
711f7917c00SJeff Kirsher 		spin_lock_irq(&adapter->sge.reg_lock);
712f7917c00SJeff Kirsher 		t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
713f7917c00SJeff Kirsher 		spin_unlock_irq(&adapter->sge.reg_lock);
714f7917c00SJeff Kirsher 		dma_free_coherent(&pdev->dev,
715f7917c00SJeff Kirsher 				  q->rspq.size * sizeof(struct rsp_desc),
716f7917c00SJeff Kirsher 				  q->rspq.desc, q->rspq.phys_addr);
717f7917c00SJeff Kirsher 	}
718f7917c00SJeff Kirsher 
719f7917c00SJeff Kirsher 	t3_reset_qset(q);
720f7917c00SJeff Kirsher }
721f7917c00SJeff Kirsher 
722f7917c00SJeff Kirsher /**
723f7917c00SJeff Kirsher  *	init_qset_cntxt - initialize an SGE queue set context info
724f7917c00SJeff Kirsher  *	@qs: the queue set
725f7917c00SJeff Kirsher  *	@id: the queue set id
726f7917c00SJeff Kirsher  *
727f7917c00SJeff Kirsher  *	Initializes the TIDs and context ids for the queues of a queue set.
728f7917c00SJeff Kirsher  */
729f7917c00SJeff Kirsher static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
730f7917c00SJeff Kirsher {
731f7917c00SJeff Kirsher 	qs->rspq.cntxt_id = id;
732f7917c00SJeff Kirsher 	qs->fl[0].cntxt_id = 2 * id;
733f7917c00SJeff Kirsher 	qs->fl[1].cntxt_id = 2 * id + 1;
734f7917c00SJeff Kirsher 	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
735f7917c00SJeff Kirsher 	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
736f7917c00SJeff Kirsher 	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
737f7917c00SJeff Kirsher 	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
738f7917c00SJeff Kirsher 	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
739f7917c00SJeff Kirsher }
740f7917c00SJeff Kirsher 
741f7917c00SJeff Kirsher /**
742f7917c00SJeff Kirsher  *	sgl_len - calculates the size of an SGL of the given capacity
743f7917c00SJeff Kirsher  *	@n: the number of SGL entries
744f7917c00SJeff Kirsher  *
745f7917c00SJeff Kirsher  *	Calculates the number of flits needed for a scatter/gather list that
746f7917c00SJeff Kirsher  *	can hold the given number of entries.
747f7917c00SJeff Kirsher  */
748f7917c00SJeff Kirsher static inline unsigned int sgl_len(unsigned int n)
749f7917c00SJeff Kirsher {
750f7917c00SJeff Kirsher 	/* alternatively: 3 * (n / 2) + 2 * (n & 1) */
751f7917c00SJeff Kirsher 	return (3 * n) / 2 + (n & 1);
752f7917c00SJeff Kirsher }
753f7917c00SJeff Kirsher 
754f7917c00SJeff Kirsher /**
755f7917c00SJeff Kirsher  *	flits_to_desc - returns the num of Tx descriptors for the given flits
756f7917c00SJeff Kirsher  *	@n: the number of flits
757f7917c00SJeff Kirsher  *
758f7917c00SJeff Kirsher  *	Calculates the number of Tx descriptors needed for the supplied number
759f7917c00SJeff Kirsher  *	of flits.
760f7917c00SJeff Kirsher  */
761f7917c00SJeff Kirsher static inline unsigned int flits_to_desc(unsigned int n)
762f7917c00SJeff Kirsher {
763f7917c00SJeff Kirsher 	BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
764f7917c00SJeff Kirsher 	return flit_desc_map[n];
765f7917c00SJeff Kirsher }
766f7917c00SJeff Kirsher 
767f7917c00SJeff Kirsher /**
768f7917c00SJeff Kirsher  *	get_packet - return the next ingress packet buffer from a free list
769f7917c00SJeff Kirsher  *	@adap: the adapter that received the packet
770f7917c00SJeff Kirsher  *	@fl: the SGE free list holding the packet
771f7917c00SJeff Kirsher  *	@len: the packet length including any SGE padding
772f7917c00SJeff Kirsher  *	@drop_thres: # of remaining buffers before we start dropping packets
773f7917c00SJeff Kirsher  *
774f7917c00SJeff Kirsher  *	Get the next packet from a free list and complete setup of the
775f7917c00SJeff Kirsher  *	sk_buff.  If the packet is small we make a copy and recycle the
776f7917c00SJeff Kirsher  *	original buffer, otherwise we use the original buffer itself.  If a
777f7917c00SJeff Kirsher  *	positive drop threshold is supplied packets are dropped and their
778f7917c00SJeff Kirsher  *	buffers recycled if (a) the number of remaining buffers is under the
779f7917c00SJeff Kirsher  *	threshold and the packet is too big to copy, or (b) the packet should
780f7917c00SJeff Kirsher  *	be copied but there is no memory for the copy.
781f7917c00SJeff Kirsher  */
782f7917c00SJeff Kirsher static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
783f7917c00SJeff Kirsher 				  unsigned int len, unsigned int drop_thres)
784f7917c00SJeff Kirsher {
785f7917c00SJeff Kirsher 	struct sk_buff *skb = NULL;
786f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
787f7917c00SJeff Kirsher 
788f7917c00SJeff Kirsher 	prefetch(sd->skb->data);
789f7917c00SJeff Kirsher 	fl->credits--;
790f7917c00SJeff Kirsher 
791f7917c00SJeff Kirsher 	if (len <= SGE_RX_COPY_THRES) {
792f7917c00SJeff Kirsher 		skb = alloc_skb(len, GFP_ATOMIC);
793f7917c00SJeff Kirsher 		if (likely(skb != NULL)) {
794f7917c00SJeff Kirsher 			__skb_put(skb, len);
7954489d8f5SChristophe JAILLET 			dma_sync_single_for_cpu(&adap->pdev->dev,
7964489d8f5SChristophe JAILLET 						dma_unmap_addr(sd, dma_addr),
7974489d8f5SChristophe JAILLET 						len, DMA_FROM_DEVICE);
798f7917c00SJeff Kirsher 			memcpy(skb->data, sd->skb->data, len);
7994489d8f5SChristophe JAILLET 			dma_sync_single_for_device(&adap->pdev->dev,
8004489d8f5SChristophe JAILLET 						   dma_unmap_addr(sd, dma_addr),
8014489d8f5SChristophe JAILLET 						   len, DMA_FROM_DEVICE);
802f7917c00SJeff Kirsher 		} else if (!drop_thres)
803f7917c00SJeff Kirsher 			goto use_orig_buf;
804f7917c00SJeff Kirsher recycle:
805f7917c00SJeff Kirsher 		recycle_rx_buf(adap, fl, fl->cidx);
806f7917c00SJeff Kirsher 		return skb;
807f7917c00SJeff Kirsher 	}
808f7917c00SJeff Kirsher 
809f7917c00SJeff Kirsher 	if (unlikely(fl->credits < drop_thres) &&
810f7917c00SJeff Kirsher 	    refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
811f7917c00SJeff Kirsher 		      GFP_ATOMIC | __GFP_COMP) == 0)
812f7917c00SJeff Kirsher 		goto recycle;
813f7917c00SJeff Kirsher 
814f7917c00SJeff Kirsher use_orig_buf:
8154489d8f5SChristophe JAILLET 	dma_unmap_single(&adap->pdev->dev, dma_unmap_addr(sd, dma_addr),
8164489d8f5SChristophe JAILLET 			 fl->buf_size, DMA_FROM_DEVICE);
817f7917c00SJeff Kirsher 	skb = sd->skb;
818f7917c00SJeff Kirsher 	skb_put(skb, len);
819f7917c00SJeff Kirsher 	__refill_fl(adap, fl);
820f7917c00SJeff Kirsher 	return skb;
821f7917c00SJeff Kirsher }
822f7917c00SJeff Kirsher 
823f7917c00SJeff Kirsher /**
824f7917c00SJeff Kirsher  *	get_packet_pg - return the next ingress packet buffer from a free list
825f7917c00SJeff Kirsher  *	@adap: the adapter that received the packet
826f7917c00SJeff Kirsher  *	@fl: the SGE free list holding the packet
827d0ea5cbdSJesse Brandeburg  *	@q: the queue
828f7917c00SJeff Kirsher  *	@len: the packet length including any SGE padding
829f7917c00SJeff Kirsher  *	@drop_thres: # of remaining buffers before we start dropping packets
830f7917c00SJeff Kirsher  *
831f7917c00SJeff Kirsher  *	Get the next packet from a free list populated with page chunks.
832f7917c00SJeff Kirsher  *	If the packet is small we make a copy and recycle the original buffer,
833f7917c00SJeff Kirsher  *	otherwise we attach the original buffer as a page fragment to a fresh
834f7917c00SJeff Kirsher  *	sk_buff.  If a positive drop threshold is supplied packets are dropped
835f7917c00SJeff Kirsher  *	and their buffers recycled if (a) the number of remaining buffers is
836f7917c00SJeff Kirsher  *	under the threshold and the packet is too big to copy, or (b) there's
837f7917c00SJeff Kirsher  *	no system memory.
838f7917c00SJeff Kirsher  *
839f7917c00SJeff Kirsher  * 	Note: this function is similar to @get_packet but deals with Rx buffers
840f7917c00SJeff Kirsher  * 	that are page chunks rather than sk_buffs.
841f7917c00SJeff Kirsher  */
842f7917c00SJeff Kirsher static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
843f7917c00SJeff Kirsher 				     struct sge_rspq *q, unsigned int len,
844f7917c00SJeff Kirsher 				     unsigned int drop_thres)
845f7917c00SJeff Kirsher {
846f7917c00SJeff Kirsher 	struct sk_buff *newskb, *skb;
847f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
848f7917c00SJeff Kirsher 
849f7917c00SJeff Kirsher 	dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
850f7917c00SJeff Kirsher 
851f7917c00SJeff Kirsher 	newskb = skb = q->pg_skb;
852f7917c00SJeff Kirsher 	if (!skb && (len <= SGE_RX_COPY_THRES)) {
853f7917c00SJeff Kirsher 		newskb = alloc_skb(len, GFP_ATOMIC);
854f7917c00SJeff Kirsher 		if (likely(newskb != NULL)) {
855f7917c00SJeff Kirsher 			__skb_put(newskb, len);
8564489d8f5SChristophe JAILLET 			dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr,
8574489d8f5SChristophe JAILLET 						len, DMA_FROM_DEVICE);
858f7917c00SJeff Kirsher 			memcpy(newskb->data, sd->pg_chunk.va, len);
8594489d8f5SChristophe JAILLET 			dma_sync_single_for_device(&adap->pdev->dev, dma_addr,
8604489d8f5SChristophe JAILLET 						   len, DMA_FROM_DEVICE);
861f7917c00SJeff Kirsher 		} else if (!drop_thres)
862f7917c00SJeff Kirsher 			return NULL;
863f7917c00SJeff Kirsher recycle:
864f7917c00SJeff Kirsher 		fl->credits--;
865f7917c00SJeff Kirsher 		recycle_rx_buf(adap, fl, fl->cidx);
866f7917c00SJeff Kirsher 		q->rx_recycle_buf++;
867f7917c00SJeff Kirsher 		return newskb;
868f7917c00SJeff Kirsher 	}
869f7917c00SJeff Kirsher 
870f7917c00SJeff Kirsher 	if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
871f7917c00SJeff Kirsher 		goto recycle;
872f7917c00SJeff Kirsher 
873f7917c00SJeff Kirsher 	prefetch(sd->pg_chunk.p_cnt);
874f7917c00SJeff Kirsher 
875f7917c00SJeff Kirsher 	if (!skb)
876f7917c00SJeff Kirsher 		newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
877f7917c00SJeff Kirsher 
878f7917c00SJeff Kirsher 	if (unlikely(!newskb)) {
879f7917c00SJeff Kirsher 		if (!drop_thres)
880f7917c00SJeff Kirsher 			return NULL;
881f7917c00SJeff Kirsher 		goto recycle;
882f7917c00SJeff Kirsher 	}
883f7917c00SJeff Kirsher 
8844489d8f5SChristophe JAILLET 	dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr, len,
8854489d8f5SChristophe JAILLET 				DMA_FROM_DEVICE);
886f7917c00SJeff Kirsher 	(*sd->pg_chunk.p_cnt)--;
887f7917c00SJeff Kirsher 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
8884489d8f5SChristophe JAILLET 		dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
8894489d8f5SChristophe JAILLET 			       fl->alloc_size, DMA_FROM_DEVICE);
890f7917c00SJeff Kirsher 	if (!skb) {
891f7917c00SJeff Kirsher 		__skb_put(newskb, SGE_RX_PULL_LEN);
892f7917c00SJeff Kirsher 		memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
893f7917c00SJeff Kirsher 		skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
894f7917c00SJeff Kirsher 				   sd->pg_chunk.offset + SGE_RX_PULL_LEN,
895f7917c00SJeff Kirsher 				   len - SGE_RX_PULL_LEN);
896f7917c00SJeff Kirsher 		newskb->len = len;
897f7917c00SJeff Kirsher 		newskb->data_len = len - SGE_RX_PULL_LEN;
898f7917c00SJeff Kirsher 		newskb->truesize += newskb->data_len;
899f7917c00SJeff Kirsher 	} else {
900f7917c00SJeff Kirsher 		skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
901f7917c00SJeff Kirsher 				   sd->pg_chunk.page,
902f7917c00SJeff Kirsher 				   sd->pg_chunk.offset, len);
903f7917c00SJeff Kirsher 		newskb->len += len;
904f7917c00SJeff Kirsher 		newskb->data_len += len;
905f7917c00SJeff Kirsher 		newskb->truesize += len;
906f7917c00SJeff Kirsher 	}
907f7917c00SJeff Kirsher 
908f7917c00SJeff Kirsher 	fl->credits--;
909f7917c00SJeff Kirsher 	/*
910f7917c00SJeff Kirsher 	 * We do not refill FLs here, we let the caller do it to overlap a
911f7917c00SJeff Kirsher 	 * prefetch.
912f7917c00SJeff Kirsher 	 */
913f7917c00SJeff Kirsher 	return newskb;
914f7917c00SJeff Kirsher }
915f7917c00SJeff Kirsher 
916f7917c00SJeff Kirsher /**
917f7917c00SJeff Kirsher  *	get_imm_packet - return the next ingress packet buffer from a response
918f7917c00SJeff Kirsher  *	@resp: the response descriptor containing the packet data
919f7917c00SJeff Kirsher  *
920f7917c00SJeff Kirsher  *	Return a packet containing the immediate data of the given response.
921f7917c00SJeff Kirsher  */
922f7917c00SJeff Kirsher static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
923f7917c00SJeff Kirsher {
924f7917c00SJeff Kirsher 	struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
925f7917c00SJeff Kirsher 
926f7917c00SJeff Kirsher 	if (skb) {
927f7917c00SJeff Kirsher 		__skb_put(skb, IMMED_PKT_SIZE);
928f7917c00SJeff Kirsher 		skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
929f7917c00SJeff Kirsher 	}
930f7917c00SJeff Kirsher 	return skb;
931f7917c00SJeff Kirsher }
932f7917c00SJeff Kirsher 
933f7917c00SJeff Kirsher /**
934f7917c00SJeff Kirsher  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
935f7917c00SJeff Kirsher  *	@skb: the packet
936f7917c00SJeff Kirsher  *
937f7917c00SJeff Kirsher  * 	Returns the number of Tx descriptors needed for the given Ethernet
938f7917c00SJeff Kirsher  * 	packet.  Ethernet packets require addition of WR and CPL headers.
939f7917c00SJeff Kirsher  */
940f7917c00SJeff Kirsher static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
941f7917c00SJeff Kirsher {
942f7917c00SJeff Kirsher 	unsigned int flits;
943f7917c00SJeff Kirsher 
944f7917c00SJeff Kirsher 	if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
945f7917c00SJeff Kirsher 		return 1;
946f7917c00SJeff Kirsher 
947f7917c00SJeff Kirsher 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
948f7917c00SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
949f7917c00SJeff Kirsher 		flits++;
950f7917c00SJeff Kirsher 	return flits_to_desc(flits);
951f7917c00SJeff Kirsher }
952f7917c00SJeff Kirsher 
953c69fe407SArjun Vynipadath /*	map_skb - map a packet main body and its page fragments
954c69fe407SArjun Vynipadath  *	@pdev: the PCI device
955c69fe407SArjun Vynipadath  *	@skb: the packet
956c69fe407SArjun Vynipadath  *	@addr: placeholder to save the mapped addresses
957c69fe407SArjun Vynipadath  *
958c69fe407SArjun Vynipadath  *	map the main body of an sk_buff and its page fragments, if any.
959c69fe407SArjun Vynipadath  */
960c69fe407SArjun Vynipadath static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
961c69fe407SArjun Vynipadath 		   dma_addr_t *addr)
962c69fe407SArjun Vynipadath {
963c69fe407SArjun Vynipadath 	const skb_frag_t *fp, *end;
964c69fe407SArjun Vynipadath 	const struct skb_shared_info *si;
965c69fe407SArjun Vynipadath 
966c69fe407SArjun Vynipadath 	if (skb_headlen(skb)) {
9674489d8f5SChristophe JAILLET 		*addr = dma_map_single(&pdev->dev, skb->data,
9684489d8f5SChristophe JAILLET 				       skb_headlen(skb), DMA_TO_DEVICE);
9694489d8f5SChristophe JAILLET 		if (dma_mapping_error(&pdev->dev, *addr))
970c69fe407SArjun Vynipadath 			goto out_err;
971c69fe407SArjun Vynipadath 		addr++;
972c69fe407SArjun Vynipadath 	}
973c69fe407SArjun Vynipadath 
974c69fe407SArjun Vynipadath 	si = skb_shinfo(skb);
975c69fe407SArjun Vynipadath 	end = &si->frags[si->nr_frags];
976c69fe407SArjun Vynipadath 
977c69fe407SArjun Vynipadath 	for (fp = si->frags; fp < end; fp++) {
978c69fe407SArjun Vynipadath 		*addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
979c69fe407SArjun Vynipadath 					 DMA_TO_DEVICE);
9804489d8f5SChristophe JAILLET 		if (dma_mapping_error(&pdev->dev, *addr))
981c69fe407SArjun Vynipadath 			goto unwind;
982c69fe407SArjun Vynipadath 		addr++;
983c69fe407SArjun Vynipadath 	}
984c69fe407SArjun Vynipadath 	return 0;
985c69fe407SArjun Vynipadath 
986c69fe407SArjun Vynipadath unwind:
987c69fe407SArjun Vynipadath 	while (fp-- > si->frags)
988c69fe407SArjun Vynipadath 		dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
989c69fe407SArjun Vynipadath 			       DMA_TO_DEVICE);
990c69fe407SArjun Vynipadath 
9914489d8f5SChristophe JAILLET 	dma_unmap_single(&pdev->dev, addr[-1], skb_headlen(skb),
9924489d8f5SChristophe JAILLET 			 DMA_TO_DEVICE);
993c69fe407SArjun Vynipadath out_err:
994c69fe407SArjun Vynipadath 	return -ENOMEM;
995c69fe407SArjun Vynipadath }
996c69fe407SArjun Vynipadath 
997f7917c00SJeff Kirsher /**
998c69fe407SArjun Vynipadath  *	write_sgl - populate a scatter/gather list for a packet
999f7917c00SJeff Kirsher  *	@skb: the packet
1000f7917c00SJeff Kirsher  *	@sgp: the SGL to populate
1001f7917c00SJeff Kirsher  *	@start: start address of skb main body data to include in the SGL
1002f7917c00SJeff Kirsher  *	@len: length of skb main body data to include in the SGL
1003c69fe407SArjun Vynipadath  *	@addr: the list of the mapped addresses
1004f7917c00SJeff Kirsher  *
1005c69fe407SArjun Vynipadath  *	Copies the scatter/gather list for the buffers that make up a packet
1006f7917c00SJeff Kirsher  *	and returns the SGL size in 8-byte words.  The caller must size the SGL
1007f7917c00SJeff Kirsher  *	appropriately.
1008f7917c00SJeff Kirsher  */
1009c69fe407SArjun Vynipadath static inline unsigned int write_sgl(const struct sk_buff *skb,
1010f7917c00SJeff Kirsher 				     struct sg_ent *sgp, unsigned char *start,
1011c69fe407SArjun Vynipadath 				     unsigned int len, const dma_addr_t *addr)
1012f7917c00SJeff Kirsher {
1013c69fe407SArjun Vynipadath 	unsigned int i, j = 0, k = 0, nfrags;
1014f7917c00SJeff Kirsher 
1015f7917c00SJeff Kirsher 	if (len) {
1016f7917c00SJeff Kirsher 		sgp->len[0] = cpu_to_be32(len);
1017c69fe407SArjun Vynipadath 		sgp->addr[j++] = cpu_to_be64(addr[k++]);
1018f7917c00SJeff Kirsher 	}
1019f7917c00SJeff Kirsher 
1020f7917c00SJeff Kirsher 	nfrags = skb_shinfo(skb)->nr_frags;
1021f7917c00SJeff Kirsher 	for (i = 0; i < nfrags; i++) {
10229e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1023f7917c00SJeff Kirsher 
10249e903e08SEric Dumazet 		sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
1025c69fe407SArjun Vynipadath 		sgp->addr[j] = cpu_to_be64(addr[k++]);
1026f7917c00SJeff Kirsher 		j ^= 1;
1027f7917c00SJeff Kirsher 		if (j == 0)
1028f7917c00SJeff Kirsher 			++sgp;
1029f7917c00SJeff Kirsher 	}
1030f7917c00SJeff Kirsher 	if (j)
1031f7917c00SJeff Kirsher 		sgp->len[j] = 0;
1032f7917c00SJeff Kirsher 	return ((nfrags + (len != 0)) * 3) / 2 + j;
1033f7917c00SJeff Kirsher }
1034f7917c00SJeff Kirsher 
1035f7917c00SJeff Kirsher /**
1036f7917c00SJeff Kirsher  *	check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1037f7917c00SJeff Kirsher  *	@adap: the adapter
1038f7917c00SJeff Kirsher  *	@q: the Tx queue
1039f7917c00SJeff Kirsher  *
1040f7917c00SJeff Kirsher  *	Ring the doorbel if a Tx queue is asleep.  There is a natural race,
1041f7917c00SJeff Kirsher  *	where the HW is going to sleep just after we checked, however,
1042f7917c00SJeff Kirsher  *	then the interrupt handler will detect the outstanding TX packet
1043f7917c00SJeff Kirsher  *	and ring the doorbell for us.
1044f7917c00SJeff Kirsher  *
1045f7917c00SJeff Kirsher  *	When GTS is disabled we unconditionally ring the doorbell.
1046f7917c00SJeff Kirsher  */
1047f7917c00SJeff Kirsher static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1048f7917c00SJeff Kirsher {
1049f7917c00SJeff Kirsher #if USE_GTS
1050f7917c00SJeff Kirsher 	clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1051f7917c00SJeff Kirsher 	if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1052f7917c00SJeff Kirsher 		set_bit(TXQ_LAST_PKT_DB, &q->flags);
1053f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL,
1054f7917c00SJeff Kirsher 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1055f7917c00SJeff Kirsher 	}
1056f7917c00SJeff Kirsher #else
1057f7917c00SJeff Kirsher 	wmb();			/* write descriptors before telling HW */
1058f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_KDOORBELL,
1059f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1060f7917c00SJeff Kirsher #endif
1061f7917c00SJeff Kirsher }
1062f7917c00SJeff Kirsher 
1063f7917c00SJeff Kirsher static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1064f7917c00SJeff Kirsher {
1065f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 2
1066f7917c00SJeff Kirsher 	d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1067f7917c00SJeff Kirsher #endif
1068f7917c00SJeff Kirsher }
1069f7917c00SJeff Kirsher 
1070f7917c00SJeff Kirsher /**
1071f7917c00SJeff Kirsher  *	write_wr_hdr_sgl - write a WR header and, optionally, SGL
1072f7917c00SJeff Kirsher  *	@ndesc: number of Tx descriptors spanned by the SGL
1073f7917c00SJeff Kirsher  *	@skb: the packet corresponding to the WR
1074f7917c00SJeff Kirsher  *	@d: first Tx descriptor to be written
1075f7917c00SJeff Kirsher  *	@pidx: index of above descriptors
1076f7917c00SJeff Kirsher  *	@q: the SGE Tx queue
1077f7917c00SJeff Kirsher  *	@sgl: the SGL
1078f7917c00SJeff Kirsher  *	@flits: number of flits to the start of the SGL in the first descriptor
1079f7917c00SJeff Kirsher  *	@sgl_flits: the SGL size in flits
1080f7917c00SJeff Kirsher  *	@gen: the Tx descriptor generation
1081f7917c00SJeff Kirsher  *	@wr_hi: top 32 bits of WR header based on WR type (big endian)
1082f7917c00SJeff Kirsher  *	@wr_lo: low 32 bits of WR header based on WR type (big endian)
1083f7917c00SJeff Kirsher  *
1084f7917c00SJeff Kirsher  *	Write a work request header and an associated SGL.  If the SGL is
1085f7917c00SJeff Kirsher  *	small enough to fit into one Tx descriptor it has already been written
1086f7917c00SJeff Kirsher  *	and we just need to write the WR header.  Otherwise we distribute the
1087f7917c00SJeff Kirsher  *	SGL across the number of descriptors it spans.
1088f7917c00SJeff Kirsher  */
1089f7917c00SJeff Kirsher static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1090f7917c00SJeff Kirsher 			     struct tx_desc *d, unsigned int pidx,
1091f7917c00SJeff Kirsher 			     const struct sge_txq *q,
1092f7917c00SJeff Kirsher 			     const struct sg_ent *sgl,
1093f7917c00SJeff Kirsher 			     unsigned int flits, unsigned int sgl_flits,
1094f7917c00SJeff Kirsher 			     unsigned int gen, __be32 wr_hi,
1095f7917c00SJeff Kirsher 			     __be32 wr_lo)
1096f7917c00SJeff Kirsher {
1097f7917c00SJeff Kirsher 	struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1098f7917c00SJeff Kirsher 	struct tx_sw_desc *sd = &q->sdesc[pidx];
1099f7917c00SJeff Kirsher 
1100f7917c00SJeff Kirsher 	sd->skb = skb;
1101f7917c00SJeff Kirsher 	if (need_skb_unmap()) {
1102f7917c00SJeff Kirsher 		sd->fragidx = 0;
1103f7917c00SJeff Kirsher 		sd->addr_idx = 0;
1104f7917c00SJeff Kirsher 		sd->sflit = flits;
1105f7917c00SJeff Kirsher 	}
1106f7917c00SJeff Kirsher 
1107f7917c00SJeff Kirsher 	if (likely(ndesc == 1)) {
1108f7917c00SJeff Kirsher 		sd->eop = 1;
1109f7917c00SJeff Kirsher 		wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1110f7917c00SJeff Kirsher 				   V_WR_SGLSFLT(flits)) | wr_hi;
1111019be1cfSAlexander Duyck 		dma_wmb();
1112f7917c00SJeff Kirsher 		wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1113f7917c00SJeff Kirsher 				   V_WR_GEN(gen)) | wr_lo;
1114f7917c00SJeff Kirsher 		wr_gen2(d, gen);
1115f7917c00SJeff Kirsher 	} else {
1116f7917c00SJeff Kirsher 		unsigned int ogen = gen;
1117f7917c00SJeff Kirsher 		const u64 *fp = (const u64 *)sgl;
1118f7917c00SJeff Kirsher 		struct work_request_hdr *wp = wrp;
1119f7917c00SJeff Kirsher 
1120f7917c00SJeff Kirsher 		wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1121f7917c00SJeff Kirsher 				   V_WR_SGLSFLT(flits)) | wr_hi;
1122f7917c00SJeff Kirsher 
1123f7917c00SJeff Kirsher 		while (sgl_flits) {
1124f7917c00SJeff Kirsher 			unsigned int avail = WR_FLITS - flits;
1125f7917c00SJeff Kirsher 
1126f7917c00SJeff Kirsher 			if (avail > sgl_flits)
1127f7917c00SJeff Kirsher 				avail = sgl_flits;
1128f7917c00SJeff Kirsher 			memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1129f7917c00SJeff Kirsher 			sgl_flits -= avail;
1130f7917c00SJeff Kirsher 			ndesc--;
1131f7917c00SJeff Kirsher 			if (!sgl_flits)
1132f7917c00SJeff Kirsher 				break;
1133f7917c00SJeff Kirsher 
1134f7917c00SJeff Kirsher 			fp += avail;
1135f7917c00SJeff Kirsher 			d++;
1136f7917c00SJeff Kirsher 			sd->eop = 0;
1137f7917c00SJeff Kirsher 			sd++;
1138f7917c00SJeff Kirsher 			if (++pidx == q->size) {
1139f7917c00SJeff Kirsher 				pidx = 0;
1140f7917c00SJeff Kirsher 				gen ^= 1;
1141f7917c00SJeff Kirsher 				d = q->desc;
1142f7917c00SJeff Kirsher 				sd = q->sdesc;
1143f7917c00SJeff Kirsher 			}
1144f7917c00SJeff Kirsher 
1145f7917c00SJeff Kirsher 			sd->skb = skb;
1146f7917c00SJeff Kirsher 			wrp = (struct work_request_hdr *)d;
1147f7917c00SJeff Kirsher 			wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1148f7917c00SJeff Kirsher 					   V_WR_SGLSFLT(1)) | wr_hi;
1149f7917c00SJeff Kirsher 			wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1150f7917c00SJeff Kirsher 							sgl_flits + 1)) |
1151f7917c00SJeff Kirsher 					   V_WR_GEN(gen)) | wr_lo;
1152f7917c00SJeff Kirsher 			wr_gen2(d, gen);
1153f7917c00SJeff Kirsher 			flits = 1;
1154f7917c00SJeff Kirsher 		}
1155f7917c00SJeff Kirsher 		sd->eop = 1;
1156f7917c00SJeff Kirsher 		wrp->wr_hi |= htonl(F_WR_EOP);
1157019be1cfSAlexander Duyck 		dma_wmb();
1158f7917c00SJeff Kirsher 		wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1159f7917c00SJeff Kirsher 		wr_gen2((struct tx_desc *)wp, ogen);
1160f7917c00SJeff Kirsher 		WARN_ON(ndesc != 0);
1161f7917c00SJeff Kirsher 	}
1162f7917c00SJeff Kirsher }
1163f7917c00SJeff Kirsher 
1164f7917c00SJeff Kirsher /**
1165f7917c00SJeff Kirsher  *	write_tx_pkt_wr - write a TX_PKT work request
1166f7917c00SJeff Kirsher  *	@adap: the adapter
1167f7917c00SJeff Kirsher  *	@skb: the packet to send
1168f7917c00SJeff Kirsher  *	@pi: the egress interface
1169f7917c00SJeff Kirsher  *	@pidx: index of the first Tx descriptor to write
1170f7917c00SJeff Kirsher  *	@gen: the generation value to use
1171f7917c00SJeff Kirsher  *	@q: the Tx queue
1172f7917c00SJeff Kirsher  *	@ndesc: number of descriptors the packet will occupy
1173f7917c00SJeff Kirsher  *	@compl: the value of the COMPL bit to use
1174d0ea5cbdSJesse Brandeburg  *	@addr: address
1175f7917c00SJeff Kirsher  *
1176f7917c00SJeff Kirsher  *	Generate a TX_PKT work request to send the supplied packet.
1177f7917c00SJeff Kirsher  */
1178f7917c00SJeff Kirsher static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1179f7917c00SJeff Kirsher 			    const struct port_info *pi,
1180f7917c00SJeff Kirsher 			    unsigned int pidx, unsigned int gen,
1181f7917c00SJeff Kirsher 			    struct sge_txq *q, unsigned int ndesc,
1182c69fe407SArjun Vynipadath 			    unsigned int compl, const dma_addr_t *addr)
1183f7917c00SJeff Kirsher {
1184f7917c00SJeff Kirsher 	unsigned int flits, sgl_flits, cntrl, tso_info;
1185f7917c00SJeff Kirsher 	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1186f7917c00SJeff Kirsher 	struct tx_desc *d = &q->desc[pidx];
1187f7917c00SJeff Kirsher 	struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1188f7917c00SJeff Kirsher 
1189f7917c00SJeff Kirsher 	cpl->len = htonl(skb->len);
1190f7917c00SJeff Kirsher 	cntrl = V_TXPKT_INTF(pi->port_id);
1191f7917c00SJeff Kirsher 
1192df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
1193df8a39deSJiri Pirko 		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
1194f7917c00SJeff Kirsher 
1195f7917c00SJeff Kirsher 	tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1196f7917c00SJeff Kirsher 	if (tso_info) {
1197f7917c00SJeff Kirsher 		int eth_type;
1198f7917c00SJeff Kirsher 		struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1199f7917c00SJeff Kirsher 
1200f7917c00SJeff Kirsher 		d->flit[2] = 0;
1201f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1202f7917c00SJeff Kirsher 		hdr->cntrl = htonl(cntrl);
1203f7917c00SJeff Kirsher 		eth_type = skb_network_offset(skb) == ETH_HLEN ?
1204f7917c00SJeff Kirsher 		    CPL_ETH_II : CPL_ETH_II_VLAN;
1205f7917c00SJeff Kirsher 		tso_info |= V_LSO_ETH_TYPE(eth_type) |
1206f7917c00SJeff Kirsher 		    V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1207f7917c00SJeff Kirsher 		    V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1208f7917c00SJeff Kirsher 		hdr->lso_info = htonl(tso_info);
1209f7917c00SJeff Kirsher 		flits = 3;
1210f7917c00SJeff Kirsher 	} else {
1211f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1212f7917c00SJeff Kirsher 		cntrl |= F_TXPKT_IPCSUM_DIS;	/* SW calculates IP csum */
1213f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1214f7917c00SJeff Kirsher 		cpl->cntrl = htonl(cntrl);
1215f7917c00SJeff Kirsher 
1216f7917c00SJeff Kirsher 		if (skb->len <= WR_LEN - sizeof(*cpl)) {
1217f7917c00SJeff Kirsher 			q->sdesc[pidx].skb = NULL;
1218f7917c00SJeff Kirsher 			if (!skb->data_len)
1219f7917c00SJeff Kirsher 				skb_copy_from_linear_data(skb, &d->flit[2],
1220f7917c00SJeff Kirsher 							  skb->len);
1221f7917c00SJeff Kirsher 			else
1222f7917c00SJeff Kirsher 				skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1223f7917c00SJeff Kirsher 
1224f7917c00SJeff Kirsher 			flits = (skb->len + 7) / 8 + 2;
1225f7917c00SJeff Kirsher 			cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1226f7917c00SJeff Kirsher 					      V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1227f7917c00SJeff Kirsher 					      | F_WR_SOP | F_WR_EOP | compl);
1228019be1cfSAlexander Duyck 			dma_wmb();
1229f7917c00SJeff Kirsher 			cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1230f7917c00SJeff Kirsher 					      V_WR_TID(q->token));
1231f7917c00SJeff Kirsher 			wr_gen2(d, gen);
1232f9ec8131SEric W. Biederman 			dev_consume_skb_any(skb);
1233f7917c00SJeff Kirsher 			return;
1234f7917c00SJeff Kirsher 		}
1235f7917c00SJeff Kirsher 
1236f7917c00SJeff Kirsher 		flits = 2;
1237f7917c00SJeff Kirsher 	}
1238f7917c00SJeff Kirsher 
1239f7917c00SJeff Kirsher 	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1240c69fe407SArjun Vynipadath 	sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
1241f7917c00SJeff Kirsher 
1242f7917c00SJeff Kirsher 	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1243f7917c00SJeff Kirsher 			 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1244f7917c00SJeff Kirsher 			 htonl(V_WR_TID(q->token)));
1245f7917c00SJeff Kirsher }
1246f7917c00SJeff Kirsher 
1247f7917c00SJeff Kirsher static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1248f7917c00SJeff Kirsher 				    struct sge_qset *qs, struct sge_txq *q)
1249f7917c00SJeff Kirsher {
1250f7917c00SJeff Kirsher 	netif_tx_stop_queue(txq);
1251f7917c00SJeff Kirsher 	set_bit(TXQ_ETH, &qs->txq_stopped);
1252f7917c00SJeff Kirsher 	q->stops++;
1253f7917c00SJeff Kirsher }
1254f7917c00SJeff Kirsher 
1255f7917c00SJeff Kirsher /**
1256aeed744aSYang Shen  *	t3_eth_xmit - add a packet to the Ethernet Tx queue
1257f7917c00SJeff Kirsher  *	@skb: the packet
1258f7917c00SJeff Kirsher  *	@dev: the egress net device
1259f7917c00SJeff Kirsher  *
1260f7917c00SJeff Kirsher  *	Add a packet to an SGE Tx queue.  Runs with softirqs disabled.
1261f7917c00SJeff Kirsher  */
1262f7917c00SJeff Kirsher netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1263f7917c00SJeff Kirsher {
1264f7917c00SJeff Kirsher 	int qidx;
1265f7917c00SJeff Kirsher 	unsigned int ndesc, pidx, credits, gen, compl;
1266f7917c00SJeff Kirsher 	const struct port_info *pi = netdev_priv(dev);
1267f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
1268f7917c00SJeff Kirsher 	struct netdev_queue *txq;
1269f7917c00SJeff Kirsher 	struct sge_qset *qs;
1270f7917c00SJeff Kirsher 	struct sge_txq *q;
1271c69fe407SArjun Vynipadath 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1272f7917c00SJeff Kirsher 
1273f7917c00SJeff Kirsher 	/*
1274f7917c00SJeff Kirsher 	 * The chip min packet length is 9 octets but play safe and reject
1275f7917c00SJeff Kirsher 	 * anything shorter than an Ethernet header.
1276f7917c00SJeff Kirsher 	 */
1277f7917c00SJeff Kirsher 	if (unlikely(skb->len < ETH_HLEN)) {
1278f9ec8131SEric W. Biederman 		dev_kfree_skb_any(skb);
1279f7917c00SJeff Kirsher 		return NETDEV_TX_OK;
1280f7917c00SJeff Kirsher 	}
1281f7917c00SJeff Kirsher 
1282f7917c00SJeff Kirsher 	qidx = skb_get_queue_mapping(skb);
1283f7917c00SJeff Kirsher 	qs = &pi->qs[qidx];
1284f7917c00SJeff Kirsher 	q = &qs->txq[TXQ_ETH];
1285f7917c00SJeff Kirsher 	txq = netdev_get_tx_queue(dev, qidx);
1286f7917c00SJeff Kirsher 
1287f7917c00SJeff Kirsher 	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1288f7917c00SJeff Kirsher 
1289f7917c00SJeff Kirsher 	credits = q->size - q->in_use;
1290f7917c00SJeff Kirsher 	ndesc = calc_tx_descs(skb);
1291f7917c00SJeff Kirsher 
1292f7917c00SJeff Kirsher 	if (unlikely(credits < ndesc)) {
1293f7917c00SJeff Kirsher 		t3_stop_tx_queue(txq, qs, q);
1294f7917c00SJeff Kirsher 		dev_err(&adap->pdev->dev,
1295f7917c00SJeff Kirsher 			"%s: Tx ring %u full while queue awake!\n",
1296f7917c00SJeff Kirsher 			dev->name, q->cntxt_id & 7);
1297f7917c00SJeff Kirsher 		return NETDEV_TX_BUSY;
1298f7917c00SJeff Kirsher 	}
1299f7917c00SJeff Kirsher 
1300c69fe407SArjun Vynipadath 	/* Check if ethernet packet can't be sent as immediate data */
1301c69fe407SArjun Vynipadath 	if (skb->len > (WR_LEN - sizeof(struct cpl_tx_pkt))) {
1302c69fe407SArjun Vynipadath 		if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1303c69fe407SArjun Vynipadath 			dev_kfree_skb(skb);
1304c69fe407SArjun Vynipadath 			return NETDEV_TX_OK;
1305c69fe407SArjun Vynipadath 		}
1306c69fe407SArjun Vynipadath 	}
1307c69fe407SArjun Vynipadath 
1308f7917c00SJeff Kirsher 	q->in_use += ndesc;
1309f7917c00SJeff Kirsher 	if (unlikely(credits - ndesc < q->stop_thres)) {
1310f7917c00SJeff Kirsher 		t3_stop_tx_queue(txq, qs, q);
1311f7917c00SJeff Kirsher 
1312f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1313f7917c00SJeff Kirsher 		    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1314f7917c00SJeff Kirsher 			q->restarts++;
1315f7917c00SJeff Kirsher 			netif_tx_start_queue(txq);
1316f7917c00SJeff Kirsher 		}
1317f7917c00SJeff Kirsher 	}
1318f7917c00SJeff Kirsher 
1319f7917c00SJeff Kirsher 	gen = q->gen;
1320f7917c00SJeff Kirsher 	q->unacked += ndesc;
1321f7917c00SJeff Kirsher 	compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1322f7917c00SJeff Kirsher 	q->unacked &= 7;
1323f7917c00SJeff Kirsher 	pidx = q->pidx;
1324f7917c00SJeff Kirsher 	q->pidx += ndesc;
1325f7917c00SJeff Kirsher 	if (q->pidx >= q->size) {
1326f7917c00SJeff Kirsher 		q->pidx -= q->size;
1327f7917c00SJeff Kirsher 		q->gen ^= 1;
1328f7917c00SJeff Kirsher 	}
1329f7917c00SJeff Kirsher 
1330f7917c00SJeff Kirsher 	/* update port statistics */
1331bc6c47b5SVipul Pandya 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1332f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1333f7917c00SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
1334f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_TSO]++;
1335df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
1336f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_VLANINS]++;
1337f7917c00SJeff Kirsher 
1338f7917c00SJeff Kirsher 	/*
1339f7917c00SJeff Kirsher 	 * We do not use Tx completion interrupts to free DMAd Tx packets.
1340f7917c00SJeff Kirsher 	 * This is good for performance but means that we rely on new Tx
1341f7917c00SJeff Kirsher 	 * packets arriving to run the destructors of completed packets,
1342f7917c00SJeff Kirsher 	 * which open up space in their sockets' send queues.  Sometimes
1343f7917c00SJeff Kirsher 	 * we do not get such new packets causing Tx to stall.  A single
1344f7917c00SJeff Kirsher 	 * UDP transmitter is a good example of this situation.  We have
1345f7917c00SJeff Kirsher 	 * a clean up timer that periodically reclaims completed packets
1346f7917c00SJeff Kirsher 	 * but it doesn't run often enough (nor do we want it to) to prevent
1347f7917c00SJeff Kirsher 	 * lengthy stalls.  A solution to this problem is to run the
1348f7917c00SJeff Kirsher 	 * destructor early, after the packet is queued but before it's DMAd.
1349f7917c00SJeff Kirsher 	 * A cons is that we lie to socket memory accounting, but the amount
1350f7917c00SJeff Kirsher 	 * of extra memory is reasonable (limited by the number of Tx
1351f7917c00SJeff Kirsher 	 * descriptors), the packets do actually get freed quickly by new
1352f7917c00SJeff Kirsher 	 * packets almost always, and for protocols like TCP that wait for
1353f7917c00SJeff Kirsher 	 * acks to really free up the data the extra memory is even less.
1354f7917c00SJeff Kirsher 	 * On the positive side we run the destructors on the sending CPU
1355f7917c00SJeff Kirsher 	 * rather than on a potentially different completing CPU, usually a
1356f7917c00SJeff Kirsher 	 * good thing.  We also run them without holding our Tx queue lock,
1357f7917c00SJeff Kirsher 	 * unlike what reclaim_completed_tx() would otherwise do.
1358f7917c00SJeff Kirsher 	 *
1359f7917c00SJeff Kirsher 	 * Run the destructor before telling the DMA engine about the packet
1360f7917c00SJeff Kirsher 	 * to make sure it doesn't complete and get freed prematurely.
1361f7917c00SJeff Kirsher 	 */
1362f7917c00SJeff Kirsher 	if (likely(!skb_shared(skb)))
1363f7917c00SJeff Kirsher 		skb_orphan(skb);
1364f7917c00SJeff Kirsher 
1365c69fe407SArjun Vynipadath 	write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
1366f7917c00SJeff Kirsher 	check_ring_tx_db(adap, q);
1367f7917c00SJeff Kirsher 	return NETDEV_TX_OK;
1368f7917c00SJeff Kirsher }
1369f7917c00SJeff Kirsher 
1370f7917c00SJeff Kirsher /**
1371f7917c00SJeff Kirsher  *	write_imm - write a packet into a Tx descriptor as immediate data
1372f7917c00SJeff Kirsher  *	@d: the Tx descriptor to write
1373f7917c00SJeff Kirsher  *	@skb: the packet
1374f7917c00SJeff Kirsher  *	@len: the length of packet data to write as immediate data
1375f7917c00SJeff Kirsher  *	@gen: the generation bit value to write
1376f7917c00SJeff Kirsher  *
1377f7917c00SJeff Kirsher  *	Writes a packet as immediate data into a Tx descriptor.  The packet
1378f7917c00SJeff Kirsher  *	contains a work request at its beginning.  We must write the packet
1379f7917c00SJeff Kirsher  *	carefully so the SGE doesn't read it accidentally before it's written
1380f7917c00SJeff Kirsher  *	in its entirety.
1381f7917c00SJeff Kirsher  */
1382f7917c00SJeff Kirsher static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1383f7917c00SJeff Kirsher 			     unsigned int len, unsigned int gen)
1384f7917c00SJeff Kirsher {
1385f7917c00SJeff Kirsher 	struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1386f7917c00SJeff Kirsher 	struct work_request_hdr *to = (struct work_request_hdr *)d;
1387f7917c00SJeff Kirsher 
1388f7917c00SJeff Kirsher 	if (likely(!skb->data_len))
1389f7917c00SJeff Kirsher 		memcpy(&to[1], &from[1], len - sizeof(*from));
1390f7917c00SJeff Kirsher 	else
1391f7917c00SJeff Kirsher 		skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1392f7917c00SJeff Kirsher 
1393f7917c00SJeff Kirsher 	to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1394f7917c00SJeff Kirsher 					V_WR_BCNTLFLT(len & 7));
1395019be1cfSAlexander Duyck 	dma_wmb();
1396f7917c00SJeff Kirsher 	to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1397f7917c00SJeff Kirsher 					V_WR_LEN((len + 7) / 8));
1398f7917c00SJeff Kirsher 	wr_gen2(d, gen);
1399f7917c00SJeff Kirsher 	kfree_skb(skb);
1400f7917c00SJeff Kirsher }
1401f7917c00SJeff Kirsher 
1402f7917c00SJeff Kirsher /**
1403f7917c00SJeff Kirsher  *	check_desc_avail - check descriptor availability on a send queue
1404f7917c00SJeff Kirsher  *	@adap: the adapter
1405f7917c00SJeff Kirsher  *	@q: the send queue
1406f7917c00SJeff Kirsher  *	@skb: the packet needing the descriptors
1407f7917c00SJeff Kirsher  *	@ndesc: the number of Tx descriptors needed
1408f7917c00SJeff Kirsher  *	@qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1409f7917c00SJeff Kirsher  *
1410f7917c00SJeff Kirsher  *	Checks if the requested number of Tx descriptors is available on an
1411f7917c00SJeff Kirsher  *	SGE send queue.  If the queue is already suspended or not enough
1412f7917c00SJeff Kirsher  *	descriptors are available the packet is queued for later transmission.
1413f7917c00SJeff Kirsher  *	Must be called with the Tx queue locked.
1414f7917c00SJeff Kirsher  *
1415f7917c00SJeff Kirsher  *	Returns 0 if enough descriptors are available, 1 if there aren't
1416f7917c00SJeff Kirsher  *	enough descriptors and the packet has been queued, and 2 if the caller
1417f7917c00SJeff Kirsher  *	needs to retry because there weren't enough descriptors at the
1418f7917c00SJeff Kirsher  *	beginning of the call but some freed up in the mean time.
1419f7917c00SJeff Kirsher  */
1420f7917c00SJeff Kirsher static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1421f7917c00SJeff Kirsher 				   struct sk_buff *skb, unsigned int ndesc,
1422f7917c00SJeff Kirsher 				   unsigned int qid)
1423f7917c00SJeff Kirsher {
1424f7917c00SJeff Kirsher 	if (unlikely(!skb_queue_empty(&q->sendq))) {
1425f7917c00SJeff Kirsher 	      addq_exit:__skb_queue_tail(&q->sendq, skb);
1426f7917c00SJeff Kirsher 		return 1;
1427f7917c00SJeff Kirsher 	}
1428f7917c00SJeff Kirsher 	if (unlikely(q->size - q->in_use < ndesc)) {
1429f7917c00SJeff Kirsher 		struct sge_qset *qs = txq_to_qset(q, qid);
1430f7917c00SJeff Kirsher 
1431f7917c00SJeff Kirsher 		set_bit(qid, &qs->txq_stopped);
14324e857c58SPeter Zijlstra 		smp_mb__after_atomic();
1433f7917c00SJeff Kirsher 
1434f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1435f7917c00SJeff Kirsher 		    test_and_clear_bit(qid, &qs->txq_stopped))
1436f7917c00SJeff Kirsher 			return 2;
1437f7917c00SJeff Kirsher 
1438f7917c00SJeff Kirsher 		q->stops++;
1439f7917c00SJeff Kirsher 		goto addq_exit;
1440f7917c00SJeff Kirsher 	}
1441f7917c00SJeff Kirsher 	return 0;
1442f7917c00SJeff Kirsher }
1443f7917c00SJeff Kirsher 
1444f7917c00SJeff Kirsher /**
1445f7917c00SJeff Kirsher  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1446f7917c00SJeff Kirsher  *	@q: the SGE control Tx queue
1447f7917c00SJeff Kirsher  *
1448f7917c00SJeff Kirsher  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1449f7917c00SJeff Kirsher  *	that send only immediate data (presently just the control queues) and
1450f7917c00SJeff Kirsher  *	thus do not have any sk_buffs to release.
1451f7917c00SJeff Kirsher  */
1452f7917c00SJeff Kirsher static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1453f7917c00SJeff Kirsher {
1454f7917c00SJeff Kirsher 	unsigned int reclaim = q->processed - q->cleaned;
1455f7917c00SJeff Kirsher 
1456f7917c00SJeff Kirsher 	q->in_use -= reclaim;
1457f7917c00SJeff Kirsher 	q->cleaned += reclaim;
1458f7917c00SJeff Kirsher }
1459f7917c00SJeff Kirsher 
1460f7917c00SJeff Kirsher static inline int immediate(const struct sk_buff *skb)
1461f7917c00SJeff Kirsher {
1462f7917c00SJeff Kirsher 	return skb->len <= WR_LEN;
1463f7917c00SJeff Kirsher }
1464f7917c00SJeff Kirsher 
1465f7917c00SJeff Kirsher /**
1466f7917c00SJeff Kirsher  *	ctrl_xmit - send a packet through an SGE control Tx queue
1467f7917c00SJeff Kirsher  *	@adap: the adapter
1468f7917c00SJeff Kirsher  *	@q: the control queue
1469f7917c00SJeff Kirsher  *	@skb: the packet
1470f7917c00SJeff Kirsher  *
1471f7917c00SJeff Kirsher  *	Send a packet through an SGE control Tx queue.  Packets sent through
1472f7917c00SJeff Kirsher  *	a control queue must fit entirely as immediate data in a single Tx
1473f7917c00SJeff Kirsher  *	descriptor and have no page fragments.
1474f7917c00SJeff Kirsher  */
1475f7917c00SJeff Kirsher static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1476f7917c00SJeff Kirsher 		     struct sk_buff *skb)
1477f7917c00SJeff Kirsher {
1478f7917c00SJeff Kirsher 	int ret;
1479f7917c00SJeff Kirsher 	struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1480f7917c00SJeff Kirsher 
1481f7917c00SJeff Kirsher 	if (unlikely(!immediate(skb))) {
1482f7917c00SJeff Kirsher 		WARN_ON(1);
1483f7917c00SJeff Kirsher 		dev_kfree_skb(skb);
1484f7917c00SJeff Kirsher 		return NET_XMIT_SUCCESS;
1485f7917c00SJeff Kirsher 	}
1486f7917c00SJeff Kirsher 
1487f7917c00SJeff Kirsher 	wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1488f7917c00SJeff Kirsher 	wrp->wr_lo = htonl(V_WR_TID(q->token));
1489f7917c00SJeff Kirsher 
1490f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1491f7917c00SJeff Kirsher       again:reclaim_completed_tx_imm(q);
1492f7917c00SJeff Kirsher 
1493f7917c00SJeff Kirsher 	ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1494f7917c00SJeff Kirsher 	if (unlikely(ret)) {
1495f7917c00SJeff Kirsher 		if (ret == 1) {
1496f7917c00SJeff Kirsher 			spin_unlock(&q->lock);
1497f7917c00SJeff Kirsher 			return NET_XMIT_CN;
1498f7917c00SJeff Kirsher 		}
1499f7917c00SJeff Kirsher 		goto again;
1500f7917c00SJeff Kirsher 	}
1501f7917c00SJeff Kirsher 
1502f7917c00SJeff Kirsher 	write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1503f7917c00SJeff Kirsher 
1504f7917c00SJeff Kirsher 	q->in_use++;
1505f7917c00SJeff Kirsher 	if (++q->pidx >= q->size) {
1506f7917c00SJeff Kirsher 		q->pidx = 0;
1507f7917c00SJeff Kirsher 		q->gen ^= 1;
1508f7917c00SJeff Kirsher 	}
1509f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1510f7917c00SJeff Kirsher 	wmb();
1511f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_KDOORBELL,
1512f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1513f7917c00SJeff Kirsher 	return NET_XMIT_SUCCESS;
1514f7917c00SJeff Kirsher }
1515f7917c00SJeff Kirsher 
1516f7917c00SJeff Kirsher /**
1517f7917c00SJeff Kirsher  *	restart_ctrlq - restart a suspended control queue
15185e0b8928SÍñigo Huguet  *	@w: pointer to the work associated with this handler
1519f7917c00SJeff Kirsher  *
1520f7917c00SJeff Kirsher  *	Resumes transmission on a suspended Tx control queue.
1521f7917c00SJeff Kirsher  */
15225e0b8928SÍñigo Huguet static void restart_ctrlq(struct work_struct *w)
1523f7917c00SJeff Kirsher {
1524f7917c00SJeff Kirsher 	struct sk_buff *skb;
15255e0b8928SÍñigo Huguet 	struct sge_qset *qs = container_of(w, struct sge_qset,
15265e0b8928SÍñigo Huguet 					   txq[TXQ_CTRL].qresume_task);
1527f7917c00SJeff Kirsher 	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1528f7917c00SJeff Kirsher 
1529f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1530f7917c00SJeff Kirsher       again:reclaim_completed_tx_imm(q);
1531f7917c00SJeff Kirsher 
1532f7917c00SJeff Kirsher 	while (q->in_use < q->size &&
1533f7917c00SJeff Kirsher 	       (skb = __skb_dequeue(&q->sendq)) != NULL) {
1534f7917c00SJeff Kirsher 
1535f7917c00SJeff Kirsher 		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1536f7917c00SJeff Kirsher 
1537f7917c00SJeff Kirsher 		if (++q->pidx >= q->size) {
1538f7917c00SJeff Kirsher 			q->pidx = 0;
1539f7917c00SJeff Kirsher 			q->gen ^= 1;
1540f7917c00SJeff Kirsher 		}
1541f7917c00SJeff Kirsher 		q->in_use++;
1542f7917c00SJeff Kirsher 	}
1543f7917c00SJeff Kirsher 
1544f7917c00SJeff Kirsher 	if (!skb_queue_empty(&q->sendq)) {
1545f7917c00SJeff Kirsher 		set_bit(TXQ_CTRL, &qs->txq_stopped);
15464e857c58SPeter Zijlstra 		smp_mb__after_atomic();
1547f7917c00SJeff Kirsher 
1548f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1549f7917c00SJeff Kirsher 		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1550f7917c00SJeff Kirsher 			goto again;
1551f7917c00SJeff Kirsher 		q->stops++;
1552f7917c00SJeff Kirsher 	}
1553f7917c00SJeff Kirsher 
1554f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1555f7917c00SJeff Kirsher 	wmb();
1556f7917c00SJeff Kirsher 	t3_write_reg(qs->adap, A_SG_KDOORBELL,
1557f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1558f7917c00SJeff Kirsher }
1559f7917c00SJeff Kirsher 
1560f7917c00SJeff Kirsher /*
1561f7917c00SJeff Kirsher  * Send a management message through control queue 0
1562f7917c00SJeff Kirsher  */
1563f7917c00SJeff Kirsher int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1564f7917c00SJeff Kirsher {
1565f7917c00SJeff Kirsher 	int ret;
1566f7917c00SJeff Kirsher 	local_bh_disable();
1567f7917c00SJeff Kirsher 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1568f7917c00SJeff Kirsher 	local_bh_enable();
1569f7917c00SJeff Kirsher 
1570f7917c00SJeff Kirsher 	return ret;
1571f7917c00SJeff Kirsher }
1572f7917c00SJeff Kirsher 
1573f7917c00SJeff Kirsher /**
1574f7917c00SJeff Kirsher  *	deferred_unmap_destructor - unmap a packet when it is freed
1575f7917c00SJeff Kirsher  *	@skb: the packet
1576f7917c00SJeff Kirsher  *
1577f7917c00SJeff Kirsher  *	This is the packet destructor used for Tx packets that need to remain
1578f7917c00SJeff Kirsher  *	mapped until they are freed rather than until their Tx descriptors are
1579f7917c00SJeff Kirsher  *	freed.
1580f7917c00SJeff Kirsher  */
1581f7917c00SJeff Kirsher static void deferred_unmap_destructor(struct sk_buff *skb)
1582f7917c00SJeff Kirsher {
1583f7917c00SJeff Kirsher 	int i;
1584f7917c00SJeff Kirsher 	const dma_addr_t *p;
1585f7917c00SJeff Kirsher 	const struct skb_shared_info *si;
1586f7917c00SJeff Kirsher 	const struct deferred_unmap_info *dui;
1587f7917c00SJeff Kirsher 
1588f7917c00SJeff Kirsher 	dui = (struct deferred_unmap_info *)skb->head;
1589f7917c00SJeff Kirsher 	p = dui->addr;
1590f7917c00SJeff Kirsher 
159115dd16c2SLi RongQing 	if (skb_tail_pointer(skb) - skb_transport_header(skb))
15924489d8f5SChristophe JAILLET 		dma_unmap_single(&dui->pdev->dev, *p++,
15934489d8f5SChristophe JAILLET 				 skb_tail_pointer(skb) - skb_transport_header(skb),
15944489d8f5SChristophe JAILLET 				 DMA_TO_DEVICE);
1595f7917c00SJeff Kirsher 
1596f7917c00SJeff Kirsher 	si = skb_shinfo(skb);
1597f7917c00SJeff Kirsher 	for (i = 0; i < si->nr_frags; i++)
15984489d8f5SChristophe JAILLET 		dma_unmap_page(&dui->pdev->dev, *p++,
15994489d8f5SChristophe JAILLET 			       skb_frag_size(&si->frags[i]), DMA_TO_DEVICE);
1600f7917c00SJeff Kirsher }
1601f7917c00SJeff Kirsher 
1602f7917c00SJeff Kirsher static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1603f7917c00SJeff Kirsher 				     const struct sg_ent *sgl, int sgl_flits)
1604f7917c00SJeff Kirsher {
1605f7917c00SJeff Kirsher 	dma_addr_t *p;
1606f7917c00SJeff Kirsher 	struct deferred_unmap_info *dui;
1607f7917c00SJeff Kirsher 
1608f7917c00SJeff Kirsher 	dui = (struct deferred_unmap_info *)skb->head;
1609f7917c00SJeff Kirsher 	dui->pdev = pdev;
1610f7917c00SJeff Kirsher 	for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1611f7917c00SJeff Kirsher 		*p++ = be64_to_cpu(sgl->addr[0]);
1612f7917c00SJeff Kirsher 		*p++ = be64_to_cpu(sgl->addr[1]);
1613f7917c00SJeff Kirsher 	}
1614f7917c00SJeff Kirsher 	if (sgl_flits)
1615f7917c00SJeff Kirsher 		*p = be64_to_cpu(sgl->addr[0]);
1616f7917c00SJeff Kirsher }
1617f7917c00SJeff Kirsher 
1618f7917c00SJeff Kirsher /**
1619f7917c00SJeff Kirsher  *	write_ofld_wr - write an offload work request
1620f7917c00SJeff Kirsher  *	@adap: the adapter
1621f7917c00SJeff Kirsher  *	@skb: the packet to send
1622f7917c00SJeff Kirsher  *	@q: the Tx queue
1623f7917c00SJeff Kirsher  *	@pidx: index of the first Tx descriptor to write
1624f7917c00SJeff Kirsher  *	@gen: the generation value to use
1625f7917c00SJeff Kirsher  *	@ndesc: number of descriptors the packet will occupy
1626d0ea5cbdSJesse Brandeburg  *	@addr: the address
1627f7917c00SJeff Kirsher  *
1628f7917c00SJeff Kirsher  *	Write an offload work request to send the supplied packet.  The packet
1629f7917c00SJeff Kirsher  *	data already carry the work request with most fields populated.
1630f7917c00SJeff Kirsher  */
1631f7917c00SJeff Kirsher static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1632f7917c00SJeff Kirsher 			  struct sge_txq *q, unsigned int pidx,
1633c69fe407SArjun Vynipadath 			  unsigned int gen, unsigned int ndesc,
1634c69fe407SArjun Vynipadath 			  const dma_addr_t *addr)
1635f7917c00SJeff Kirsher {
1636f7917c00SJeff Kirsher 	unsigned int sgl_flits, flits;
1637f7917c00SJeff Kirsher 	struct work_request_hdr *from;
1638f7917c00SJeff Kirsher 	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1639f7917c00SJeff Kirsher 	struct tx_desc *d = &q->desc[pidx];
1640f7917c00SJeff Kirsher 
1641f7917c00SJeff Kirsher 	if (immediate(skb)) {
1642f7917c00SJeff Kirsher 		q->sdesc[pidx].skb = NULL;
1643f7917c00SJeff Kirsher 		write_imm(d, skb, skb->len, gen);
1644f7917c00SJeff Kirsher 		return;
1645f7917c00SJeff Kirsher 	}
1646f7917c00SJeff Kirsher 
1647f7917c00SJeff Kirsher 	/* Only TX_DATA builds SGLs */
1648f7917c00SJeff Kirsher 
1649f7917c00SJeff Kirsher 	from = (struct work_request_hdr *)skb->data;
1650f7917c00SJeff Kirsher 	memcpy(&d->flit[1], &from[1],
1651f7917c00SJeff Kirsher 	       skb_transport_offset(skb) - sizeof(*from));
1652f7917c00SJeff Kirsher 
1653f7917c00SJeff Kirsher 	flits = skb_transport_offset(skb) / 8;
1654f7917c00SJeff Kirsher 	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1655c69fe407SArjun Vynipadath 	sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
1656c69fe407SArjun Vynipadath 			      skb_tail_pointer(skb) - skb_transport_header(skb),
1657c69fe407SArjun Vynipadath 			      addr);
1658f7917c00SJeff Kirsher 	if (need_skb_unmap()) {
1659f7917c00SJeff Kirsher 		setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1660f7917c00SJeff Kirsher 		skb->destructor = deferred_unmap_destructor;
1661f7917c00SJeff Kirsher 	}
1662f7917c00SJeff Kirsher 
1663f7917c00SJeff Kirsher 	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1664f7917c00SJeff Kirsher 			 gen, from->wr_hi, from->wr_lo);
1665f7917c00SJeff Kirsher }
1666f7917c00SJeff Kirsher 
1667f7917c00SJeff Kirsher /**
1668f7917c00SJeff Kirsher  *	calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1669f7917c00SJeff Kirsher  *	@skb: the packet
1670f7917c00SJeff Kirsher  *
1671f7917c00SJeff Kirsher  * 	Returns the number of Tx descriptors needed for the given offload
1672f7917c00SJeff Kirsher  * 	packet.  These packets are already fully constructed.
1673f7917c00SJeff Kirsher  */
1674f7917c00SJeff Kirsher static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1675f7917c00SJeff Kirsher {
1676f7917c00SJeff Kirsher 	unsigned int flits, cnt;
1677f7917c00SJeff Kirsher 
1678f7917c00SJeff Kirsher 	if (skb->len <= WR_LEN)
1679f7917c00SJeff Kirsher 		return 1;	/* packet fits as immediate data */
1680f7917c00SJeff Kirsher 
1681f7917c00SJeff Kirsher 	flits = skb_transport_offset(skb) / 8;	/* headers */
1682f7917c00SJeff Kirsher 	cnt = skb_shinfo(skb)->nr_frags;
1683be8b678cSSimon Horman 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
1684f7917c00SJeff Kirsher 		cnt++;
1685f7917c00SJeff Kirsher 	return flits_to_desc(flits + sgl_len(cnt));
1686f7917c00SJeff Kirsher }
1687f7917c00SJeff Kirsher 
1688f7917c00SJeff Kirsher /**
1689f7917c00SJeff Kirsher  *	ofld_xmit - send a packet through an offload queue
1690f7917c00SJeff Kirsher  *	@adap: the adapter
1691f7917c00SJeff Kirsher  *	@q: the Tx offload queue
1692f7917c00SJeff Kirsher  *	@skb: the packet
1693f7917c00SJeff Kirsher  *
1694f7917c00SJeff Kirsher  *	Send an offload packet through an SGE offload queue.
1695f7917c00SJeff Kirsher  */
1696f7917c00SJeff Kirsher static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1697f7917c00SJeff Kirsher 		     struct sk_buff *skb)
1698f7917c00SJeff Kirsher {
1699f7917c00SJeff Kirsher 	int ret;
1700f7917c00SJeff Kirsher 	unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1701f7917c00SJeff Kirsher 
1702f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1703f7917c00SJeff Kirsher again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1704f7917c00SJeff Kirsher 
1705f7917c00SJeff Kirsher 	ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1706f7917c00SJeff Kirsher 	if (unlikely(ret)) {
1707f7917c00SJeff Kirsher 		if (ret == 1) {
1708f7917c00SJeff Kirsher 			skb->priority = ndesc;	/* save for restart */
1709f7917c00SJeff Kirsher 			spin_unlock(&q->lock);
1710f7917c00SJeff Kirsher 			return NET_XMIT_CN;
1711f7917c00SJeff Kirsher 		}
1712f7917c00SJeff Kirsher 		goto again;
1713f7917c00SJeff Kirsher 	}
1714f7917c00SJeff Kirsher 
1715c69fe407SArjun Vynipadath 	if (!immediate(skb) &&
1716c69fe407SArjun Vynipadath 	    map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1717c69fe407SArjun Vynipadath 		spin_unlock(&q->lock);
1718c69fe407SArjun Vynipadath 		return NET_XMIT_SUCCESS;
1719c69fe407SArjun Vynipadath 	}
1720c69fe407SArjun Vynipadath 
1721f7917c00SJeff Kirsher 	gen = q->gen;
1722f7917c00SJeff Kirsher 	q->in_use += ndesc;
1723f7917c00SJeff Kirsher 	pidx = q->pidx;
1724f7917c00SJeff Kirsher 	q->pidx += ndesc;
1725f7917c00SJeff Kirsher 	if (q->pidx >= q->size) {
1726f7917c00SJeff Kirsher 		q->pidx -= q->size;
1727f7917c00SJeff Kirsher 		q->gen ^= 1;
1728f7917c00SJeff Kirsher 	}
1729f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1730f7917c00SJeff Kirsher 
1731c69fe407SArjun Vynipadath 	write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
1732f7917c00SJeff Kirsher 	check_ring_tx_db(adap, q);
1733f7917c00SJeff Kirsher 	return NET_XMIT_SUCCESS;
1734f7917c00SJeff Kirsher }
1735f7917c00SJeff Kirsher 
1736f7917c00SJeff Kirsher /**
1737f7917c00SJeff Kirsher  *	restart_offloadq - restart a suspended offload queue
17385e0b8928SÍñigo Huguet  *	@w: pointer to the work associated with this handler
1739f7917c00SJeff Kirsher  *
1740f7917c00SJeff Kirsher  *	Resumes transmission on a suspended Tx offload queue.
1741f7917c00SJeff Kirsher  */
17425e0b8928SÍñigo Huguet static void restart_offloadq(struct work_struct *w)
1743f7917c00SJeff Kirsher {
1744f7917c00SJeff Kirsher 	struct sk_buff *skb;
17455e0b8928SÍñigo Huguet 	struct sge_qset *qs = container_of(w, struct sge_qset,
17465e0b8928SÍñigo Huguet 					   txq[TXQ_OFLD].qresume_task);
1747f7917c00SJeff Kirsher 	struct sge_txq *q = &qs->txq[TXQ_OFLD];
1748f7917c00SJeff Kirsher 	const struct port_info *pi = netdev_priv(qs->netdev);
1749f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
1750c69fe407SArjun Vynipadath 	unsigned int written = 0;
1751f7917c00SJeff Kirsher 
1752f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1753f7917c00SJeff Kirsher again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1754f7917c00SJeff Kirsher 
1755f7917c00SJeff Kirsher 	while ((skb = skb_peek(&q->sendq)) != NULL) {
1756f7917c00SJeff Kirsher 		unsigned int gen, pidx;
1757f7917c00SJeff Kirsher 		unsigned int ndesc = skb->priority;
1758f7917c00SJeff Kirsher 
1759f7917c00SJeff Kirsher 		if (unlikely(q->size - q->in_use < ndesc)) {
1760f7917c00SJeff Kirsher 			set_bit(TXQ_OFLD, &qs->txq_stopped);
17614e857c58SPeter Zijlstra 			smp_mb__after_atomic();
1762f7917c00SJeff Kirsher 
1763f7917c00SJeff Kirsher 			if (should_restart_tx(q) &&
1764f7917c00SJeff Kirsher 			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1765f7917c00SJeff Kirsher 				goto again;
1766f7917c00SJeff Kirsher 			q->stops++;
1767f7917c00SJeff Kirsher 			break;
1768f7917c00SJeff Kirsher 		}
1769f7917c00SJeff Kirsher 
1770c69fe407SArjun Vynipadath 		if (!immediate(skb) &&
1771c69fe407SArjun Vynipadath 		    map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1772c69fe407SArjun Vynipadath 			break;
1773c69fe407SArjun Vynipadath 
1774f7917c00SJeff Kirsher 		gen = q->gen;
1775f7917c00SJeff Kirsher 		q->in_use += ndesc;
1776f7917c00SJeff Kirsher 		pidx = q->pidx;
1777f7917c00SJeff Kirsher 		q->pidx += ndesc;
1778c69fe407SArjun Vynipadath 		written += ndesc;
1779f7917c00SJeff Kirsher 		if (q->pidx >= q->size) {
1780f7917c00SJeff Kirsher 			q->pidx -= q->size;
1781f7917c00SJeff Kirsher 			q->gen ^= 1;
1782f7917c00SJeff Kirsher 		}
1783f7917c00SJeff Kirsher 		__skb_unlink(skb, &q->sendq);
1784f7917c00SJeff Kirsher 		spin_unlock(&q->lock);
1785f7917c00SJeff Kirsher 
1786c69fe407SArjun Vynipadath 		write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1787c69fe407SArjun Vynipadath 			      (dma_addr_t *)skb->head);
1788f7917c00SJeff Kirsher 		spin_lock(&q->lock);
1789f7917c00SJeff Kirsher 	}
1790f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1791f7917c00SJeff Kirsher 
1792f7917c00SJeff Kirsher #if USE_GTS
1793f7917c00SJeff Kirsher 	set_bit(TXQ_RUNNING, &q->flags);
1794f7917c00SJeff Kirsher 	set_bit(TXQ_LAST_PKT_DB, &q->flags);
1795f7917c00SJeff Kirsher #endif
1796f7917c00SJeff Kirsher 	wmb();
1797c69fe407SArjun Vynipadath 	if (likely(written))
1798f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL,
1799f7917c00SJeff Kirsher 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1800f7917c00SJeff Kirsher }
1801f7917c00SJeff Kirsher 
1802f7917c00SJeff Kirsher /**
1803f7917c00SJeff Kirsher  *	queue_set - return the queue set a packet should use
1804f7917c00SJeff Kirsher  *	@skb: the packet
1805f7917c00SJeff Kirsher  *
1806f7917c00SJeff Kirsher  *	Maps a packet to the SGE queue set it should use.  The desired queue
1807f7917c00SJeff Kirsher  *	set is carried in bits 1-3 in the packet's priority.
1808f7917c00SJeff Kirsher  */
1809f7917c00SJeff Kirsher static inline int queue_set(const struct sk_buff *skb)
1810f7917c00SJeff Kirsher {
1811f7917c00SJeff Kirsher 	return skb->priority >> 1;
1812f7917c00SJeff Kirsher }
1813f7917c00SJeff Kirsher 
1814f7917c00SJeff Kirsher /**
1815f7917c00SJeff Kirsher  *	is_ctrl_pkt - return whether an offload packet is a control packet
1816f7917c00SJeff Kirsher  *	@skb: the packet
1817f7917c00SJeff Kirsher  *
1818f7917c00SJeff Kirsher  *	Determines whether an offload packet should use an OFLD or a CTRL
1819f7917c00SJeff Kirsher  *	Tx queue.  This is indicated by bit 0 in the packet's priority.
1820f7917c00SJeff Kirsher  */
1821f7917c00SJeff Kirsher static inline int is_ctrl_pkt(const struct sk_buff *skb)
1822f7917c00SJeff Kirsher {
1823f7917c00SJeff Kirsher 	return skb->priority & 1;
1824f7917c00SJeff Kirsher }
1825f7917c00SJeff Kirsher 
1826f7917c00SJeff Kirsher /**
1827f7917c00SJeff Kirsher  *	t3_offload_tx - send an offload packet
1828f7917c00SJeff Kirsher  *	@tdev: the offload device to send to
1829f7917c00SJeff Kirsher  *	@skb: the packet
1830f7917c00SJeff Kirsher  *
1831f7917c00SJeff Kirsher  *	Sends an offload packet.  We use the packet priority to select the
1832f7917c00SJeff Kirsher  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1833f7917c00SJeff Kirsher  *	should be sent as regular or control, bits 1-3 select the queue set.
1834f7917c00SJeff Kirsher  */
1835f7917c00SJeff Kirsher int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1836f7917c00SJeff Kirsher {
1837f7917c00SJeff Kirsher 	struct adapter *adap = tdev2adap(tdev);
1838f7917c00SJeff Kirsher 	struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1839f7917c00SJeff Kirsher 
1840f7917c00SJeff Kirsher 	if (unlikely(is_ctrl_pkt(skb)))
1841f7917c00SJeff Kirsher 		return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1842f7917c00SJeff Kirsher 
1843f7917c00SJeff Kirsher 	return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1844f7917c00SJeff Kirsher }
1845f7917c00SJeff Kirsher 
1846f7917c00SJeff Kirsher /**
1847f7917c00SJeff Kirsher  *	offload_enqueue - add an offload packet to an SGE offload receive queue
1848f7917c00SJeff Kirsher  *	@q: the SGE response queue
1849f7917c00SJeff Kirsher  *	@skb: the packet
1850f7917c00SJeff Kirsher  *
1851f7917c00SJeff Kirsher  *	Add a new offload packet to an SGE response queue's offload packet
1852f7917c00SJeff Kirsher  *	queue.  If the packet is the first on the queue it schedules the RX
1853f7917c00SJeff Kirsher  *	softirq to process the queue.
1854f7917c00SJeff Kirsher  */
1855f7917c00SJeff Kirsher static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1856f7917c00SJeff Kirsher {
1857f7917c00SJeff Kirsher 	int was_empty = skb_queue_empty(&q->rx_queue);
1858f7917c00SJeff Kirsher 
1859f7917c00SJeff Kirsher 	__skb_queue_tail(&q->rx_queue, skb);
1860f7917c00SJeff Kirsher 
1861f7917c00SJeff Kirsher 	if (was_empty) {
1862f7917c00SJeff Kirsher 		struct sge_qset *qs = rspq_to_qset(q);
1863f7917c00SJeff Kirsher 
1864f7917c00SJeff Kirsher 		napi_schedule(&qs->napi);
1865f7917c00SJeff Kirsher 	}
1866f7917c00SJeff Kirsher }
1867f7917c00SJeff Kirsher 
1868f7917c00SJeff Kirsher /**
1869f7917c00SJeff Kirsher  *	deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1870f7917c00SJeff Kirsher  *	@tdev: the offload device that will be receiving the packets
1871f7917c00SJeff Kirsher  *	@q: the SGE response queue that assembled the bundle
1872f7917c00SJeff Kirsher  *	@skbs: the partial bundle
1873f7917c00SJeff Kirsher  *	@n: the number of packets in the bundle
1874f7917c00SJeff Kirsher  *
1875f7917c00SJeff Kirsher  *	Delivers a (partial) bundle of Rx offload packets to an offload device.
1876f7917c00SJeff Kirsher  */
1877f7917c00SJeff Kirsher static inline void deliver_partial_bundle(struct t3cdev *tdev,
1878f7917c00SJeff Kirsher 					  struct sge_rspq *q,
1879f7917c00SJeff Kirsher 					  struct sk_buff *skbs[], int n)
1880f7917c00SJeff Kirsher {
1881f7917c00SJeff Kirsher 	if (n) {
1882f7917c00SJeff Kirsher 		q->offload_bundles++;
1883f7917c00SJeff Kirsher 		tdev->recv(tdev, skbs, n);
1884f7917c00SJeff Kirsher 	}
1885f7917c00SJeff Kirsher }
1886f7917c00SJeff Kirsher 
1887f7917c00SJeff Kirsher /**
1888f7917c00SJeff Kirsher  *	ofld_poll - NAPI handler for offload packets in interrupt mode
1889d0ea5cbdSJesse Brandeburg  *	@napi: the network device doing the polling
1890f7917c00SJeff Kirsher  *	@budget: polling budget
1891f7917c00SJeff Kirsher  *
1892f7917c00SJeff Kirsher  *	The NAPI handler for offload packets when a response queue is serviced
1893f7917c00SJeff Kirsher  *	by the hard interrupt handler, i.e., when it's operating in non-polling
1894f7917c00SJeff Kirsher  *	mode.  Creates small packet batches and sends them through the offload
1895f7917c00SJeff Kirsher  *	receive handler.  Batches need to be of modest size as we do prefetches
1896f7917c00SJeff Kirsher  *	on the packets in each.
1897f7917c00SJeff Kirsher  */
1898f7917c00SJeff Kirsher static int ofld_poll(struct napi_struct *napi, int budget)
1899f7917c00SJeff Kirsher {
1900f7917c00SJeff Kirsher 	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1901f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
1902f7917c00SJeff Kirsher 	struct adapter *adapter = qs->adap;
1903f7917c00SJeff Kirsher 	int work_done = 0;
1904f7917c00SJeff Kirsher 
1905f7917c00SJeff Kirsher 	while (work_done < budget) {
1906f7917c00SJeff Kirsher 		struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1907f7917c00SJeff Kirsher 		struct sk_buff_head queue;
1908f7917c00SJeff Kirsher 		int ngathered;
1909f7917c00SJeff Kirsher 
1910f7917c00SJeff Kirsher 		spin_lock_irq(&q->lock);
1911f7917c00SJeff Kirsher 		__skb_queue_head_init(&queue);
1912f7917c00SJeff Kirsher 		skb_queue_splice_init(&q->rx_queue, &queue);
1913f7917c00SJeff Kirsher 		if (skb_queue_empty(&queue)) {
19146ad20165SEric Dumazet 			napi_complete_done(napi, work_done);
1915f7917c00SJeff Kirsher 			spin_unlock_irq(&q->lock);
1916f7917c00SJeff Kirsher 			return work_done;
1917f7917c00SJeff Kirsher 		}
1918f7917c00SJeff Kirsher 		spin_unlock_irq(&q->lock);
1919f7917c00SJeff Kirsher 
1920f7917c00SJeff Kirsher 		ngathered = 0;
1921f7917c00SJeff Kirsher 		skb_queue_walk_safe(&queue, skb, tmp) {
1922f7917c00SJeff Kirsher 			if (work_done >= budget)
1923f7917c00SJeff Kirsher 				break;
1924f7917c00SJeff Kirsher 			work_done++;
1925f7917c00SJeff Kirsher 
1926f7917c00SJeff Kirsher 			__skb_unlink(skb, &queue);
1927f7917c00SJeff Kirsher 			prefetch(skb->data);
1928f7917c00SJeff Kirsher 			skbs[ngathered] = skb;
1929f7917c00SJeff Kirsher 			if (++ngathered == RX_BUNDLE_SIZE) {
1930f7917c00SJeff Kirsher 				q->offload_bundles++;
1931f7917c00SJeff Kirsher 				adapter->tdev.recv(&adapter->tdev, skbs,
1932f7917c00SJeff Kirsher 						   ngathered);
1933f7917c00SJeff Kirsher 				ngathered = 0;
1934f7917c00SJeff Kirsher 			}
1935f7917c00SJeff Kirsher 		}
1936f7917c00SJeff Kirsher 		if (!skb_queue_empty(&queue)) {
1937f7917c00SJeff Kirsher 			/* splice remaining packets back onto Rx queue */
1938f7917c00SJeff Kirsher 			spin_lock_irq(&q->lock);
1939f7917c00SJeff Kirsher 			skb_queue_splice(&queue, &q->rx_queue);
1940f7917c00SJeff Kirsher 			spin_unlock_irq(&q->lock);
1941f7917c00SJeff Kirsher 		}
1942f7917c00SJeff Kirsher 		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1943f7917c00SJeff Kirsher 	}
1944f7917c00SJeff Kirsher 
1945f7917c00SJeff Kirsher 	return work_done;
1946f7917c00SJeff Kirsher }
1947f7917c00SJeff Kirsher 
1948f7917c00SJeff Kirsher /**
1949f7917c00SJeff Kirsher  *	rx_offload - process a received offload packet
1950f7917c00SJeff Kirsher  *	@tdev: the offload device receiving the packet
1951f7917c00SJeff Kirsher  *	@rq: the response queue that received the packet
1952f7917c00SJeff Kirsher  *	@skb: the packet
1953f7917c00SJeff Kirsher  *	@rx_gather: a gather list of packets if we are building a bundle
1954f7917c00SJeff Kirsher  *	@gather_idx: index of the next available slot in the bundle
1955f7917c00SJeff Kirsher  *
1956f7917c00SJeff Kirsher  *	Process an ingress offload pakcet and add it to the offload ingress
1957f7917c00SJeff Kirsher  *	queue. 	Returns the index of the next available slot in the bundle.
1958f7917c00SJeff Kirsher  */
1959f7917c00SJeff Kirsher static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1960f7917c00SJeff Kirsher 			     struct sk_buff *skb, struct sk_buff *rx_gather[],
1961f7917c00SJeff Kirsher 			     unsigned int gather_idx)
1962f7917c00SJeff Kirsher {
1963f7917c00SJeff Kirsher 	skb_reset_mac_header(skb);
1964f7917c00SJeff Kirsher 	skb_reset_network_header(skb);
1965f7917c00SJeff Kirsher 	skb_reset_transport_header(skb);
1966f7917c00SJeff Kirsher 
1967f7917c00SJeff Kirsher 	if (rq->polling) {
1968f7917c00SJeff Kirsher 		rx_gather[gather_idx++] = skb;
1969f7917c00SJeff Kirsher 		if (gather_idx == RX_BUNDLE_SIZE) {
1970f7917c00SJeff Kirsher 			tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1971f7917c00SJeff Kirsher 			gather_idx = 0;
1972f7917c00SJeff Kirsher 			rq->offload_bundles++;
1973f7917c00SJeff Kirsher 		}
1974f7917c00SJeff Kirsher 	} else
1975f7917c00SJeff Kirsher 		offload_enqueue(rq, skb);
1976f7917c00SJeff Kirsher 
1977f7917c00SJeff Kirsher 	return gather_idx;
1978f7917c00SJeff Kirsher }
1979f7917c00SJeff Kirsher 
1980f7917c00SJeff Kirsher /**
1981f7917c00SJeff Kirsher  *	restart_tx - check whether to restart suspended Tx queues
1982f7917c00SJeff Kirsher  *	@qs: the queue set to resume
1983f7917c00SJeff Kirsher  *
1984f7917c00SJeff Kirsher  *	Restarts suspended Tx queues of an SGE queue set if they have enough
1985f7917c00SJeff Kirsher  *	free resources to resume operation.
1986f7917c00SJeff Kirsher  */
1987f7917c00SJeff Kirsher static void restart_tx(struct sge_qset *qs)
1988f7917c00SJeff Kirsher {
1989f7917c00SJeff Kirsher 	if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1990f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_ETH]) &&
1991f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1992f7917c00SJeff Kirsher 		qs->txq[TXQ_ETH].restarts++;
1993f7917c00SJeff Kirsher 		if (netif_running(qs->netdev))
1994f7917c00SJeff Kirsher 			netif_tx_wake_queue(qs->tx_q);
1995f7917c00SJeff Kirsher 	}
1996f7917c00SJeff Kirsher 
1997f7917c00SJeff Kirsher 	if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1998f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1999f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2000f7917c00SJeff Kirsher 		qs->txq[TXQ_OFLD].restarts++;
20015e0b8928SÍñigo Huguet 
20025e0b8928SÍñigo Huguet 		/* The work can be quite lengthy so we use driver's own queue */
20035e0b8928SÍñigo Huguet 		queue_work(cxgb3_wq, &qs->txq[TXQ_OFLD].qresume_task);
2004f7917c00SJeff Kirsher 	}
2005f7917c00SJeff Kirsher 	if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
2006f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2007f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2008f7917c00SJeff Kirsher 		qs->txq[TXQ_CTRL].restarts++;
20095e0b8928SÍñigo Huguet 
20105e0b8928SÍñigo Huguet 		/* The work can be quite lengthy so we use driver's own queue */
20115e0b8928SÍñigo Huguet 		queue_work(cxgb3_wq, &qs->txq[TXQ_CTRL].qresume_task);
2012f7917c00SJeff Kirsher 	}
2013f7917c00SJeff Kirsher }
2014f7917c00SJeff Kirsher 
2015f7917c00SJeff Kirsher /**
2016f7917c00SJeff Kirsher  *	cxgb3_arp_process - process an ARP request probing a private IP address
2017d0ea5cbdSJesse Brandeburg  *	@pi: the port info
2018f7917c00SJeff Kirsher  *	@skb: the skbuff containing the ARP request
2019f7917c00SJeff Kirsher  *
2020f7917c00SJeff Kirsher  *	Check if the ARP request is probing the private IP address
2021f7917c00SJeff Kirsher  *	dedicated to iSCSI, generate an ARP reply if so.
2022f7917c00SJeff Kirsher  */
2023f7917c00SJeff Kirsher static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
2024f7917c00SJeff Kirsher {
2025f7917c00SJeff Kirsher 	struct net_device *dev = skb->dev;
2026f7917c00SJeff Kirsher 	struct arphdr *arp;
2027f7917c00SJeff Kirsher 	unsigned char *arp_ptr;
2028f7917c00SJeff Kirsher 	unsigned char *sha;
2029f7917c00SJeff Kirsher 	__be32 sip, tip;
2030f7917c00SJeff Kirsher 
2031f7917c00SJeff Kirsher 	if (!dev)
2032f7917c00SJeff Kirsher 		return;
2033f7917c00SJeff Kirsher 
2034f7917c00SJeff Kirsher 	skb_reset_network_header(skb);
2035f7917c00SJeff Kirsher 	arp = arp_hdr(skb);
2036f7917c00SJeff Kirsher 
2037f7917c00SJeff Kirsher 	if (arp->ar_op != htons(ARPOP_REQUEST))
2038f7917c00SJeff Kirsher 		return;
2039f7917c00SJeff Kirsher 
2040f7917c00SJeff Kirsher 	arp_ptr = (unsigned char *)(arp + 1);
2041f7917c00SJeff Kirsher 	sha = arp_ptr;
2042f7917c00SJeff Kirsher 	arp_ptr += dev->addr_len;
2043f7917c00SJeff Kirsher 	memcpy(&sip, arp_ptr, sizeof(sip));
2044f7917c00SJeff Kirsher 	arp_ptr += sizeof(sip);
2045f7917c00SJeff Kirsher 	arp_ptr += dev->addr_len;
2046f7917c00SJeff Kirsher 	memcpy(&tip, arp_ptr, sizeof(tip));
2047f7917c00SJeff Kirsher 
2048f7917c00SJeff Kirsher 	if (tip != pi->iscsi_ipv4addr)
2049f7917c00SJeff Kirsher 		return;
2050f7917c00SJeff Kirsher 
2051f7917c00SJeff Kirsher 	arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
2052f7917c00SJeff Kirsher 		 pi->iscsic.mac_addr, sha);
2053f7917c00SJeff Kirsher 
2054f7917c00SJeff Kirsher }
2055f7917c00SJeff Kirsher 
2056f7917c00SJeff Kirsher static inline int is_arp(struct sk_buff *skb)
2057f7917c00SJeff Kirsher {
2058f7917c00SJeff Kirsher 	return skb->protocol == htons(ETH_P_ARP);
2059f7917c00SJeff Kirsher }
2060f7917c00SJeff Kirsher 
2061f7917c00SJeff Kirsher static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
2062f7917c00SJeff Kirsher 					struct sk_buff *skb)
2063f7917c00SJeff Kirsher {
2064f7917c00SJeff Kirsher 	if (is_arp(skb)) {
2065f7917c00SJeff Kirsher 		cxgb3_arp_process(pi, skb);
2066f7917c00SJeff Kirsher 		return;
2067f7917c00SJeff Kirsher 	}
2068f7917c00SJeff Kirsher 
2069f7917c00SJeff Kirsher 	if (pi->iscsic.recv)
2070f7917c00SJeff Kirsher 		pi->iscsic.recv(pi, skb);
2071f7917c00SJeff Kirsher 
2072f7917c00SJeff Kirsher }
2073f7917c00SJeff Kirsher 
2074f7917c00SJeff Kirsher /**
2075f7917c00SJeff Kirsher  *	rx_eth - process an ingress ethernet packet
2076f7917c00SJeff Kirsher  *	@adap: the adapter
2077f7917c00SJeff Kirsher  *	@rq: the response queue that received the packet
2078f7917c00SJeff Kirsher  *	@skb: the packet
2079d0ea5cbdSJesse Brandeburg  *	@pad: padding
2080d0ea5cbdSJesse Brandeburg  *	@lro: large receive offload
2081f7917c00SJeff Kirsher  *
2082f7917c00SJeff Kirsher  *	Process an ingress ethernet pakcet and deliver it to the stack.
2083f7917c00SJeff Kirsher  *	The padding is 2 if the packet was delivered in an Rx buffer and 0
2084f7917c00SJeff Kirsher  *	if it was immediate data in a response.
2085f7917c00SJeff Kirsher  */
2086f7917c00SJeff Kirsher static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2087f7917c00SJeff Kirsher 		   struct sk_buff *skb, int pad, int lro)
2088f7917c00SJeff Kirsher {
2089f7917c00SJeff Kirsher 	struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2090f7917c00SJeff Kirsher 	struct sge_qset *qs = rspq_to_qset(rq);
2091f7917c00SJeff Kirsher 	struct port_info *pi;
2092f7917c00SJeff Kirsher 
2093f7917c00SJeff Kirsher 	skb_pull(skb, sizeof(*p) + pad);
2094f7917c00SJeff Kirsher 	skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2095f7917c00SJeff Kirsher 	pi = netdev_priv(skb->dev);
2096f7917c00SJeff Kirsher 	if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2097f7917c00SJeff Kirsher 	    p->csum == htons(0xffff) && !p->fragment) {
2098f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2099f7917c00SJeff Kirsher 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2100f7917c00SJeff Kirsher 	} else
2101f7917c00SJeff Kirsher 		skb_checksum_none_assert(skb);
2102f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2103f7917c00SJeff Kirsher 
2104f7917c00SJeff Kirsher 	if (p->vlan_valid) {
2105f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_VLANEX]++;
210686a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
2107f7917c00SJeff Kirsher 	}
2108f7917c00SJeff Kirsher 	if (rq->polling) {
2109f7917c00SJeff Kirsher 		if (lro)
2110f7917c00SJeff Kirsher 			napi_gro_receive(&qs->napi, skb);
2111f7917c00SJeff Kirsher 		else {
2112f7917c00SJeff Kirsher 			if (unlikely(pi->iscsic.flags))
2113f7917c00SJeff Kirsher 				cxgb3_process_iscsi_prov_pack(pi, skb);
2114f7917c00SJeff Kirsher 			netif_receive_skb(skb);
2115f7917c00SJeff Kirsher 		}
2116f7917c00SJeff Kirsher 	} else
2117f7917c00SJeff Kirsher 		netif_rx(skb);
2118f7917c00SJeff Kirsher }
2119f7917c00SJeff Kirsher 
2120f7917c00SJeff Kirsher static inline int is_eth_tcp(u32 rss)
2121f7917c00SJeff Kirsher {
2122f7917c00SJeff Kirsher 	return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2123f7917c00SJeff Kirsher }
2124f7917c00SJeff Kirsher 
2125f7917c00SJeff Kirsher /**
2126f7917c00SJeff Kirsher  *	lro_add_page - add a page chunk to an LRO session
2127f7917c00SJeff Kirsher  *	@adap: the adapter
2128f7917c00SJeff Kirsher  *	@qs: the associated queue set
2129f7917c00SJeff Kirsher  *	@fl: the free list containing the page chunk to add
2130f7917c00SJeff Kirsher  *	@len: packet length
2131f7917c00SJeff Kirsher  *	@complete: Indicates the last fragment of a frame
2132f7917c00SJeff Kirsher  *
2133f7917c00SJeff Kirsher  *	Add a received packet contained in a page chunk to an existing LRO
2134f7917c00SJeff Kirsher  *	session.
2135f7917c00SJeff Kirsher  */
2136f7917c00SJeff Kirsher static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2137f7917c00SJeff Kirsher 			 struct sge_fl *fl, int len, int complete)
2138f7917c00SJeff Kirsher {
2139f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2140f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2141f7917c00SJeff Kirsher 	struct sk_buff *skb = NULL;
2142f7917c00SJeff Kirsher 	struct cpl_rx_pkt *cpl;
2143d7840976SMatthew Wilcox (Oracle) 	skb_frag_t *rx_frag;
2144f7917c00SJeff Kirsher 	int nr_frags;
2145f7917c00SJeff Kirsher 	int offset = 0;
2146f7917c00SJeff Kirsher 
2147f7917c00SJeff Kirsher 	if (!qs->nomem) {
2148f7917c00SJeff Kirsher 		skb = napi_get_frags(&qs->napi);
2149f7917c00SJeff Kirsher 		qs->nomem = !skb;
2150f7917c00SJeff Kirsher 	}
2151f7917c00SJeff Kirsher 
2152f7917c00SJeff Kirsher 	fl->credits--;
2153f7917c00SJeff Kirsher 
21544489d8f5SChristophe JAILLET 	dma_sync_single_for_cpu(&adap->pdev->dev,
2155f7917c00SJeff Kirsher 				dma_unmap_addr(sd, dma_addr),
21564489d8f5SChristophe JAILLET 				fl->buf_size - SGE_PG_RSVD, DMA_FROM_DEVICE);
2157f7917c00SJeff Kirsher 
2158f7917c00SJeff Kirsher 	(*sd->pg_chunk.p_cnt)--;
2159f7917c00SJeff Kirsher 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
21604489d8f5SChristophe JAILLET 		dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
21614489d8f5SChristophe JAILLET 			       fl->alloc_size, DMA_FROM_DEVICE);
2162f7917c00SJeff Kirsher 
2163f7917c00SJeff Kirsher 	if (!skb) {
2164f7917c00SJeff Kirsher 		put_page(sd->pg_chunk.page);
2165f7917c00SJeff Kirsher 		if (complete)
2166f7917c00SJeff Kirsher 			qs->nomem = 0;
2167f7917c00SJeff Kirsher 		return;
2168f7917c00SJeff Kirsher 	}
2169f7917c00SJeff Kirsher 
2170f7917c00SJeff Kirsher 	rx_frag = skb_shinfo(skb)->frags;
2171f7917c00SJeff Kirsher 	nr_frags = skb_shinfo(skb)->nr_frags;
2172f7917c00SJeff Kirsher 
2173f7917c00SJeff Kirsher 	if (!nr_frags) {
2174f7917c00SJeff Kirsher 		offset = 2 + sizeof(struct cpl_rx_pkt);
2175f7917c00SJeff Kirsher 		cpl = qs->lro_va = sd->pg_chunk.va + 2;
2176f7917c00SJeff Kirsher 
2177f7917c00SJeff Kirsher 		if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2178f7917c00SJeff Kirsher 		     cpl->csum_valid && cpl->csum == htons(0xffff)) {
2179f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2180f7917c00SJeff Kirsher 			qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2181f7917c00SJeff Kirsher 		} else
2182f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_NONE;
2183f7917c00SJeff Kirsher 	} else
2184f7917c00SJeff Kirsher 		cpl = qs->lro_va;
2185f7917c00SJeff Kirsher 
2186f7917c00SJeff Kirsher 	len -= offset;
2187f7917c00SJeff Kirsher 
2188f7917c00SJeff Kirsher 	rx_frag += nr_frags;
21896a930b9fSIan Campbell 	__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
2190b54c9d5bSJonathan Lemon 	skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset);
21919e903e08SEric Dumazet 	skb_frag_size_set(rx_frag, len);
2192f7917c00SJeff Kirsher 
2193f7917c00SJeff Kirsher 	skb->len += len;
2194f7917c00SJeff Kirsher 	skb->data_len += len;
2195f7917c00SJeff Kirsher 	skb->truesize += len;
2196f7917c00SJeff Kirsher 	skb_shinfo(skb)->nr_frags++;
2197f7917c00SJeff Kirsher 
2198f7917c00SJeff Kirsher 	if (!complete)
2199f7917c00SJeff Kirsher 		return;
2200f7917c00SJeff Kirsher 
2201f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2202f7917c00SJeff Kirsher 
220372073ad2SVipul Pandya 	if (cpl->vlan_valid) {
220472073ad2SVipul Pandya 		qs->port_stats[SGE_PSTAT_VLANEX]++;
220586a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
220672073ad2SVipul Pandya 	}
2207f7917c00SJeff Kirsher 	napi_gro_frags(&qs->napi);
2208f7917c00SJeff Kirsher }
2209f7917c00SJeff Kirsher 
2210f7917c00SJeff Kirsher /**
2211f7917c00SJeff Kirsher  *	handle_rsp_cntrl_info - handles control information in a response
2212f7917c00SJeff Kirsher  *	@qs: the queue set corresponding to the response
2213f7917c00SJeff Kirsher  *	@flags: the response control flags
2214f7917c00SJeff Kirsher  *
2215f7917c00SJeff Kirsher  *	Handles the control information of an SGE response, such as GTS
2216f7917c00SJeff Kirsher  *	indications and completion credits for the queue set's Tx queues.
2217f7917c00SJeff Kirsher  *	HW coalesces credits, we don't do any extra SW coalescing.
2218f7917c00SJeff Kirsher  */
2219f7917c00SJeff Kirsher static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2220f7917c00SJeff Kirsher {
2221f7917c00SJeff Kirsher 	unsigned int credits;
2222f7917c00SJeff Kirsher 
2223f7917c00SJeff Kirsher #if USE_GTS
2224f7917c00SJeff Kirsher 	if (flags & F_RSPD_TXQ0_GTS)
2225f7917c00SJeff Kirsher 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2226f7917c00SJeff Kirsher #endif
2227f7917c00SJeff Kirsher 
2228f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ0_CR(flags);
2229f7917c00SJeff Kirsher 	if (credits)
2230f7917c00SJeff Kirsher 		qs->txq[TXQ_ETH].processed += credits;
2231f7917c00SJeff Kirsher 
2232f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ2_CR(flags);
2233f7917c00SJeff Kirsher 	if (credits)
2234f7917c00SJeff Kirsher 		qs->txq[TXQ_CTRL].processed += credits;
2235f7917c00SJeff Kirsher 
2236f7917c00SJeff Kirsher # if USE_GTS
2237f7917c00SJeff Kirsher 	if (flags & F_RSPD_TXQ1_GTS)
2238f7917c00SJeff Kirsher 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2239f7917c00SJeff Kirsher # endif
2240f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ1_CR(flags);
2241f7917c00SJeff Kirsher 	if (credits)
2242f7917c00SJeff Kirsher 		qs->txq[TXQ_OFLD].processed += credits;
2243f7917c00SJeff Kirsher }
2244f7917c00SJeff Kirsher 
2245f7917c00SJeff Kirsher /**
2246f7917c00SJeff Kirsher  *	check_ring_db - check if we need to ring any doorbells
2247d0ea5cbdSJesse Brandeburg  *	@adap: the adapter
2248f7917c00SJeff Kirsher  *	@qs: the queue set whose Tx queues are to be examined
2249f7917c00SJeff Kirsher  *	@sleeping: indicates which Tx queue sent GTS
2250f7917c00SJeff Kirsher  *
2251f7917c00SJeff Kirsher  *	Checks if some of a queue set's Tx queues need to ring their doorbells
2252f7917c00SJeff Kirsher  *	to resume transmission after idling while they still have unprocessed
2253f7917c00SJeff Kirsher  *	descriptors.
2254f7917c00SJeff Kirsher  */
2255f7917c00SJeff Kirsher static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2256f7917c00SJeff Kirsher 			  unsigned int sleeping)
2257f7917c00SJeff Kirsher {
2258f7917c00SJeff Kirsher 	if (sleeping & F_RSPD_TXQ0_GTS) {
2259f7917c00SJeff Kirsher 		struct sge_txq *txq = &qs->txq[TXQ_ETH];
2260f7917c00SJeff Kirsher 
2261f7917c00SJeff Kirsher 		if (txq->cleaned + txq->in_use != txq->processed &&
2262f7917c00SJeff Kirsher 		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2263f7917c00SJeff Kirsher 			set_bit(TXQ_RUNNING, &txq->flags);
2264f7917c00SJeff Kirsher 			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2265f7917c00SJeff Kirsher 				     V_EGRCNTX(txq->cntxt_id));
2266f7917c00SJeff Kirsher 		}
2267f7917c00SJeff Kirsher 	}
2268f7917c00SJeff Kirsher 
2269f7917c00SJeff Kirsher 	if (sleeping & F_RSPD_TXQ1_GTS) {
2270f7917c00SJeff Kirsher 		struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2271f7917c00SJeff Kirsher 
2272f7917c00SJeff Kirsher 		if (txq->cleaned + txq->in_use != txq->processed &&
2273f7917c00SJeff Kirsher 		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2274f7917c00SJeff Kirsher 			set_bit(TXQ_RUNNING, &txq->flags);
2275f7917c00SJeff Kirsher 			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2276f7917c00SJeff Kirsher 				     V_EGRCNTX(txq->cntxt_id));
2277f7917c00SJeff Kirsher 		}
2278f7917c00SJeff Kirsher 	}
2279f7917c00SJeff Kirsher }
2280f7917c00SJeff Kirsher 
2281f7917c00SJeff Kirsher /**
2282f7917c00SJeff Kirsher  *	is_new_response - check if a response is newly written
2283f7917c00SJeff Kirsher  *	@r: the response descriptor
2284f7917c00SJeff Kirsher  *	@q: the response queue
2285f7917c00SJeff Kirsher  *
2286f7917c00SJeff Kirsher  *	Returns true if a response descriptor contains a yet unprocessed
2287f7917c00SJeff Kirsher  *	response.
2288f7917c00SJeff Kirsher  */
2289f7917c00SJeff Kirsher static inline int is_new_response(const struct rsp_desc *r,
2290f7917c00SJeff Kirsher 				  const struct sge_rspq *q)
2291f7917c00SJeff Kirsher {
2292f7917c00SJeff Kirsher 	return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2293f7917c00SJeff Kirsher }
2294f7917c00SJeff Kirsher 
2295f7917c00SJeff Kirsher static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2296f7917c00SJeff Kirsher {
2297f7917c00SJeff Kirsher 	q->pg_skb = NULL;
2298f7917c00SJeff Kirsher 	q->rx_recycle_buf = 0;
2299f7917c00SJeff Kirsher }
2300f7917c00SJeff Kirsher 
2301f7917c00SJeff Kirsher #define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2302f7917c00SJeff Kirsher #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2303f7917c00SJeff Kirsher 			V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2304f7917c00SJeff Kirsher 			V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2305f7917c00SJeff Kirsher 			V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2306f7917c00SJeff Kirsher 
2307f7917c00SJeff Kirsher /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2308f7917c00SJeff Kirsher #define NOMEM_INTR_DELAY 2500
2309f7917c00SJeff Kirsher 
2310f7917c00SJeff Kirsher /**
2311f7917c00SJeff Kirsher  *	process_responses - process responses from an SGE response queue
2312f7917c00SJeff Kirsher  *	@adap: the adapter
2313f7917c00SJeff Kirsher  *	@qs: the queue set to which the response queue belongs
2314f7917c00SJeff Kirsher  *	@budget: how many responses can be processed in this round
2315f7917c00SJeff Kirsher  *
2316f7917c00SJeff Kirsher  *	Process responses from an SGE response queue up to the supplied budget.
2317f7917c00SJeff Kirsher  *	Responses include received packets as well as credits and other events
2318f7917c00SJeff Kirsher  *	for the queues that belong to the response queue's queue set.
2319f7917c00SJeff Kirsher  *	A negative budget is effectively unlimited.
2320f7917c00SJeff Kirsher  *
2321f7917c00SJeff Kirsher  *	Additionally choose the interrupt holdoff time for the next interrupt
2322f7917c00SJeff Kirsher  *	on this queue.  If the system is under memory shortage use a fairly
2323f7917c00SJeff Kirsher  *	long delay to help recovery.
2324f7917c00SJeff Kirsher  */
2325f7917c00SJeff Kirsher static int process_responses(struct adapter *adap, struct sge_qset *qs,
2326f7917c00SJeff Kirsher 			     int budget)
2327f7917c00SJeff Kirsher {
2328f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2329f7917c00SJeff Kirsher 	struct rsp_desc *r = &q->desc[q->cidx];
2330f7917c00SJeff Kirsher 	int budget_left = budget;
2331f7917c00SJeff Kirsher 	unsigned int sleeping = 0;
2332f7917c00SJeff Kirsher 	struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2333f7917c00SJeff Kirsher 	int ngathered = 0;
2334f7917c00SJeff Kirsher 
2335f7917c00SJeff Kirsher 	q->next_holdoff = q->holdoff_tmr;
2336f7917c00SJeff Kirsher 
2337f7917c00SJeff Kirsher 	while (likely(budget_left && is_new_response(r, q))) {
2338f7917c00SJeff Kirsher 		int packet_complete, eth, ethpad = 2;
2339f7917c00SJeff Kirsher 		int lro = !!(qs->netdev->features & NETIF_F_GRO);
2340f7917c00SJeff Kirsher 		struct sk_buff *skb = NULL;
2341f7917c00SJeff Kirsher 		u32 len, flags;
2342f7917c00SJeff Kirsher 		__be32 rss_hi, rss_lo;
2343f7917c00SJeff Kirsher 
2344019be1cfSAlexander Duyck 		dma_rmb();
2345f7917c00SJeff Kirsher 		eth = r->rss_hdr.opcode == CPL_RX_PKT;
2346f7917c00SJeff Kirsher 		rss_hi = *(const __be32 *)r;
2347f7917c00SJeff Kirsher 		rss_lo = r->rss_hdr.rss_hash_val;
2348f7917c00SJeff Kirsher 		flags = ntohl(r->flags);
2349f7917c00SJeff Kirsher 
2350f7917c00SJeff Kirsher 		if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2351f7917c00SJeff Kirsher 			skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2352f7917c00SJeff Kirsher 			if (!skb)
2353f7917c00SJeff Kirsher 				goto no_mem;
2354f7917c00SJeff Kirsher 
2355de77b966Syuan linyu 			__skb_put_data(skb, r, AN_PKT_SIZE);
2356f7917c00SJeff Kirsher 			skb->data[0] = CPL_ASYNC_NOTIF;
2357f7917c00SJeff Kirsher 			rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2358f7917c00SJeff Kirsher 			q->async_notif++;
2359f7917c00SJeff Kirsher 		} else if (flags & F_RSPD_IMM_DATA_VALID) {
2360f7917c00SJeff Kirsher 			skb = get_imm_packet(r);
2361f7917c00SJeff Kirsher 			if (unlikely(!skb)) {
2362f7917c00SJeff Kirsher no_mem:
2363f7917c00SJeff Kirsher 				q->next_holdoff = NOMEM_INTR_DELAY;
2364f7917c00SJeff Kirsher 				q->nomem++;
2365f7917c00SJeff Kirsher 				/* consume one credit since we tried */
2366f7917c00SJeff Kirsher 				budget_left--;
2367f7917c00SJeff Kirsher 				break;
2368f7917c00SJeff Kirsher 			}
2369f7917c00SJeff Kirsher 			q->imm_data++;
2370f7917c00SJeff Kirsher 			ethpad = 0;
2371f7917c00SJeff Kirsher 		} else if ((len = ntohl(r->len_cq)) != 0) {
2372f7917c00SJeff Kirsher 			struct sge_fl *fl;
2373f7917c00SJeff Kirsher 
2374f7917c00SJeff Kirsher 			lro &= eth && is_eth_tcp(rss_hi);
2375f7917c00SJeff Kirsher 
2376f7917c00SJeff Kirsher 			fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2377f7917c00SJeff Kirsher 			if (fl->use_pages) {
2378f7917c00SJeff Kirsher 				void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2379f7917c00SJeff Kirsher 
2380f468f21bSTariq Toukan 				net_prefetch(addr);
2381f7917c00SJeff Kirsher 				__refill_fl(adap, fl);
2382f7917c00SJeff Kirsher 				if (lro > 0) {
2383f7917c00SJeff Kirsher 					lro_add_page(adap, qs, fl,
2384f7917c00SJeff Kirsher 						     G_RSPD_LEN(len),
2385f7917c00SJeff Kirsher 						     flags & F_RSPD_EOP);
2386f7917c00SJeff Kirsher 					goto next_fl;
2387f7917c00SJeff Kirsher 				}
2388f7917c00SJeff Kirsher 
2389f7917c00SJeff Kirsher 				skb = get_packet_pg(adap, fl, q,
2390f7917c00SJeff Kirsher 						    G_RSPD_LEN(len),
2391f7917c00SJeff Kirsher 						    eth ?
2392f7917c00SJeff Kirsher 						    SGE_RX_DROP_THRES : 0);
2393f7917c00SJeff Kirsher 				q->pg_skb = skb;
2394f7917c00SJeff Kirsher 			} else
2395f7917c00SJeff Kirsher 				skb = get_packet(adap, fl, G_RSPD_LEN(len),
2396f7917c00SJeff Kirsher 						 eth ? SGE_RX_DROP_THRES : 0);
2397f7917c00SJeff Kirsher 			if (unlikely(!skb)) {
2398f7917c00SJeff Kirsher 				if (!eth)
2399f7917c00SJeff Kirsher 					goto no_mem;
2400f7917c00SJeff Kirsher 				q->rx_drops++;
2401f7917c00SJeff Kirsher 			} else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2402f7917c00SJeff Kirsher 				__skb_pull(skb, 2);
2403f7917c00SJeff Kirsher next_fl:
2404f7917c00SJeff Kirsher 			if (++fl->cidx == fl->size)
2405f7917c00SJeff Kirsher 				fl->cidx = 0;
2406f7917c00SJeff Kirsher 		} else
2407f7917c00SJeff Kirsher 			q->pure_rsps++;
2408f7917c00SJeff Kirsher 
2409f7917c00SJeff Kirsher 		if (flags & RSPD_CTRL_MASK) {
2410f7917c00SJeff Kirsher 			sleeping |= flags & RSPD_GTS_MASK;
2411f7917c00SJeff Kirsher 			handle_rsp_cntrl_info(qs, flags);
2412f7917c00SJeff Kirsher 		}
2413f7917c00SJeff Kirsher 
2414f7917c00SJeff Kirsher 		r++;
2415f7917c00SJeff Kirsher 		if (unlikely(++q->cidx == q->size)) {
2416f7917c00SJeff Kirsher 			q->cidx = 0;
2417f7917c00SJeff Kirsher 			q->gen ^= 1;
2418f7917c00SJeff Kirsher 			r = q->desc;
2419f7917c00SJeff Kirsher 		}
2420f7917c00SJeff Kirsher 		prefetch(r);
2421f7917c00SJeff Kirsher 
2422f7917c00SJeff Kirsher 		if (++q->credits >= (q->size / 4)) {
2423f7917c00SJeff Kirsher 			refill_rspq(adap, q, q->credits);
2424f7917c00SJeff Kirsher 			q->credits = 0;
2425f7917c00SJeff Kirsher 		}
2426f7917c00SJeff Kirsher 
2427f7917c00SJeff Kirsher 		packet_complete = flags &
2428f7917c00SJeff Kirsher 				  (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2429f7917c00SJeff Kirsher 				   F_RSPD_ASYNC_NOTIF);
2430f7917c00SJeff Kirsher 
2431f7917c00SJeff Kirsher 		if (skb != NULL && packet_complete) {
2432f7917c00SJeff Kirsher 			if (eth)
2433f7917c00SJeff Kirsher 				rx_eth(adap, q, skb, ethpad, lro);
2434f7917c00SJeff Kirsher 			else {
2435f7917c00SJeff Kirsher 				q->offload_pkts++;
2436f7917c00SJeff Kirsher 				/* Preserve the RSS info in csum & priority */
2437f7917c00SJeff Kirsher 				skb->csum = rss_hi;
2438f7917c00SJeff Kirsher 				skb->priority = rss_lo;
2439f7917c00SJeff Kirsher 				ngathered = rx_offload(&adap->tdev, q, skb,
2440f7917c00SJeff Kirsher 						       offload_skbs,
2441f7917c00SJeff Kirsher 						       ngathered);
2442f7917c00SJeff Kirsher 			}
2443f7917c00SJeff Kirsher 
2444f7917c00SJeff Kirsher 			if (flags & F_RSPD_EOP)
2445f7917c00SJeff Kirsher 				clear_rspq_bufstate(q);
2446f7917c00SJeff Kirsher 		}
2447f7917c00SJeff Kirsher 		--budget_left;
2448f7917c00SJeff Kirsher 	}
2449f7917c00SJeff Kirsher 
2450f7917c00SJeff Kirsher 	deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2451f7917c00SJeff Kirsher 
2452f7917c00SJeff Kirsher 	if (sleeping)
2453f7917c00SJeff Kirsher 		check_ring_db(adap, qs, sleeping);
2454f7917c00SJeff Kirsher 
2455f7917c00SJeff Kirsher 	smp_mb();		/* commit Tx queue .processed updates */
2456f7917c00SJeff Kirsher 	if (unlikely(qs->txq_stopped != 0))
2457f7917c00SJeff Kirsher 		restart_tx(qs);
2458f7917c00SJeff Kirsher 
2459f7917c00SJeff Kirsher 	budget -= budget_left;
2460f7917c00SJeff Kirsher 	return budget;
2461f7917c00SJeff Kirsher }
2462f7917c00SJeff Kirsher 
2463f7917c00SJeff Kirsher static inline int is_pure_response(const struct rsp_desc *r)
2464f7917c00SJeff Kirsher {
2465f7917c00SJeff Kirsher 	__be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2466f7917c00SJeff Kirsher 
2467f7917c00SJeff Kirsher 	return (n | r->len_cq) == 0;
2468f7917c00SJeff Kirsher }
2469f7917c00SJeff Kirsher 
2470f7917c00SJeff Kirsher /**
2471f7917c00SJeff Kirsher  *	napi_rx_handler - the NAPI handler for Rx processing
2472f7917c00SJeff Kirsher  *	@napi: the napi instance
2473f7917c00SJeff Kirsher  *	@budget: how many packets we can process in this round
2474f7917c00SJeff Kirsher  *
2475f7917c00SJeff Kirsher  *	Handler for new data events when using NAPI.
2476f7917c00SJeff Kirsher  */
2477f7917c00SJeff Kirsher static int napi_rx_handler(struct napi_struct *napi, int budget)
2478f7917c00SJeff Kirsher {
2479f7917c00SJeff Kirsher 	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2480f7917c00SJeff Kirsher 	struct adapter *adap = qs->adap;
2481f7917c00SJeff Kirsher 	int work_done = process_responses(adap, qs, budget);
2482f7917c00SJeff Kirsher 
2483f7917c00SJeff Kirsher 	if (likely(work_done < budget)) {
24846ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
2485f7917c00SJeff Kirsher 
2486f7917c00SJeff Kirsher 		/*
2487f7917c00SJeff Kirsher 		 * Because we don't atomically flush the following
2488f7917c00SJeff Kirsher 		 * write it is possible that in very rare cases it can
2489f7917c00SJeff Kirsher 		 * reach the device in a way that races with a new
2490f7917c00SJeff Kirsher 		 * response being written plus an error interrupt
2491f7917c00SJeff Kirsher 		 * causing the NAPI interrupt handler below to return
2492f7917c00SJeff Kirsher 		 * unhandled status to the OS.  To protect against
2493f7917c00SJeff Kirsher 		 * this would require flushing the write and doing
2494f7917c00SJeff Kirsher 		 * both the write and the flush with interrupts off.
2495f7917c00SJeff Kirsher 		 * Way too expensive and unjustifiable given the
2496f7917c00SJeff Kirsher 		 * rarity of the race.
2497f7917c00SJeff Kirsher 		 *
2498f7917c00SJeff Kirsher 		 * The race cannot happen at all with MSI-X.
2499f7917c00SJeff Kirsher 		 */
2500f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2501f7917c00SJeff Kirsher 			     V_NEWTIMER(qs->rspq.next_holdoff) |
2502f7917c00SJeff Kirsher 			     V_NEWINDEX(qs->rspq.cidx));
2503f7917c00SJeff Kirsher 	}
2504f7917c00SJeff Kirsher 	return work_done;
2505f7917c00SJeff Kirsher }
2506f7917c00SJeff Kirsher 
2507f7917c00SJeff Kirsher /*
2508f7917c00SJeff Kirsher  * Returns true if the device is already scheduled for polling.
2509f7917c00SJeff Kirsher  */
2510f7917c00SJeff Kirsher static inline int napi_is_scheduled(struct napi_struct *napi)
2511f7917c00SJeff Kirsher {
2512f7917c00SJeff Kirsher 	return test_bit(NAPI_STATE_SCHED, &napi->state);
2513f7917c00SJeff Kirsher }
2514f7917c00SJeff Kirsher 
2515f7917c00SJeff Kirsher /**
2516f7917c00SJeff Kirsher  *	process_pure_responses - process pure responses from a response queue
2517f7917c00SJeff Kirsher  *	@adap: the adapter
2518f7917c00SJeff Kirsher  *	@qs: the queue set owning the response queue
2519f7917c00SJeff Kirsher  *	@r: the first pure response to process
2520f7917c00SJeff Kirsher  *
2521f7917c00SJeff Kirsher  *	A simpler version of process_responses() that handles only pure (i.e.,
2522f7917c00SJeff Kirsher  *	non data-carrying) responses.  Such respones are too light-weight to
2523f7917c00SJeff Kirsher  *	justify calling a softirq under NAPI, so we handle them specially in
2524f7917c00SJeff Kirsher  *	the interrupt handler.  The function is called with a pointer to a
2525f7917c00SJeff Kirsher  *	response, which the caller must ensure is a valid pure response.
2526f7917c00SJeff Kirsher  *
2527f7917c00SJeff Kirsher  *	Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2528f7917c00SJeff Kirsher  */
2529f7917c00SJeff Kirsher static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2530f7917c00SJeff Kirsher 				  struct rsp_desc *r)
2531f7917c00SJeff Kirsher {
2532f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2533f7917c00SJeff Kirsher 	unsigned int sleeping = 0;
2534f7917c00SJeff Kirsher 
2535f7917c00SJeff Kirsher 	do {
2536f7917c00SJeff Kirsher 		u32 flags = ntohl(r->flags);
2537f7917c00SJeff Kirsher 
2538f7917c00SJeff Kirsher 		r++;
2539f7917c00SJeff Kirsher 		if (unlikely(++q->cidx == q->size)) {
2540f7917c00SJeff Kirsher 			q->cidx = 0;
2541f7917c00SJeff Kirsher 			q->gen ^= 1;
2542f7917c00SJeff Kirsher 			r = q->desc;
2543f7917c00SJeff Kirsher 		}
2544f7917c00SJeff Kirsher 		prefetch(r);
2545f7917c00SJeff Kirsher 
2546f7917c00SJeff Kirsher 		if (flags & RSPD_CTRL_MASK) {
2547f7917c00SJeff Kirsher 			sleeping |= flags & RSPD_GTS_MASK;
2548f7917c00SJeff Kirsher 			handle_rsp_cntrl_info(qs, flags);
2549f7917c00SJeff Kirsher 		}
2550f7917c00SJeff Kirsher 
2551f7917c00SJeff Kirsher 		q->pure_rsps++;
2552f7917c00SJeff Kirsher 		if (++q->credits >= (q->size / 4)) {
2553f7917c00SJeff Kirsher 			refill_rspq(adap, q, q->credits);
2554f7917c00SJeff Kirsher 			q->credits = 0;
2555f7917c00SJeff Kirsher 		}
2556f7917c00SJeff Kirsher 		if (!is_new_response(r, q))
2557f7917c00SJeff Kirsher 			break;
2558019be1cfSAlexander Duyck 		dma_rmb();
2559f7917c00SJeff Kirsher 	} while (is_pure_response(r));
2560f7917c00SJeff Kirsher 
2561f7917c00SJeff Kirsher 	if (sleeping)
2562f7917c00SJeff Kirsher 		check_ring_db(adap, qs, sleeping);
2563f7917c00SJeff Kirsher 
2564f7917c00SJeff Kirsher 	smp_mb();		/* commit Tx queue .processed updates */
2565f7917c00SJeff Kirsher 	if (unlikely(qs->txq_stopped != 0))
2566f7917c00SJeff Kirsher 		restart_tx(qs);
2567f7917c00SJeff Kirsher 
2568f7917c00SJeff Kirsher 	return is_new_response(r, q);
2569f7917c00SJeff Kirsher }
2570f7917c00SJeff Kirsher 
2571f7917c00SJeff Kirsher /**
2572f7917c00SJeff Kirsher  *	handle_responses - decide what to do with new responses in NAPI mode
2573f7917c00SJeff Kirsher  *	@adap: the adapter
2574f7917c00SJeff Kirsher  *	@q: the response queue
2575f7917c00SJeff Kirsher  *
2576f7917c00SJeff Kirsher  *	This is used by the NAPI interrupt handlers to decide what to do with
2577f7917c00SJeff Kirsher  *	new SGE responses.  If there are no new responses it returns -1.  If
2578f7917c00SJeff Kirsher  *	there are new responses and they are pure (i.e., non-data carrying)
2579f7917c00SJeff Kirsher  *	it handles them straight in hard interrupt context as they are very
2580f7917c00SJeff Kirsher  *	cheap and don't deliver any packets.  Finally, if there are any data
2581f7917c00SJeff Kirsher  *	signaling responses it schedules the NAPI handler.  Returns 1 if it
2582f7917c00SJeff Kirsher  *	schedules NAPI, 0 if all new responses were pure.
2583f7917c00SJeff Kirsher  *
2584f7917c00SJeff Kirsher  *	The caller must ascertain NAPI is not already running.
2585f7917c00SJeff Kirsher  */
2586f7917c00SJeff Kirsher static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2587f7917c00SJeff Kirsher {
2588f7917c00SJeff Kirsher 	struct sge_qset *qs = rspq_to_qset(q);
2589f7917c00SJeff Kirsher 	struct rsp_desc *r = &q->desc[q->cidx];
2590f7917c00SJeff Kirsher 
2591f7917c00SJeff Kirsher 	if (!is_new_response(r, q))
2592f7917c00SJeff Kirsher 		return -1;
2593019be1cfSAlexander Duyck 	dma_rmb();
2594f7917c00SJeff Kirsher 	if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2595f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2596f7917c00SJeff Kirsher 			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2597f7917c00SJeff Kirsher 		return 0;
2598f7917c00SJeff Kirsher 	}
2599f7917c00SJeff Kirsher 	napi_schedule(&qs->napi);
2600f7917c00SJeff Kirsher 	return 1;
2601f7917c00SJeff Kirsher }
2602f7917c00SJeff Kirsher 
2603f7917c00SJeff Kirsher /*
2604f7917c00SJeff Kirsher  * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2605f7917c00SJeff Kirsher  * (i.e., response queue serviced in hard interrupt).
2606f7917c00SJeff Kirsher  */
2607f7917c00SJeff Kirsher static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2608f7917c00SJeff Kirsher {
2609f7917c00SJeff Kirsher 	struct sge_qset *qs = cookie;
2610f7917c00SJeff Kirsher 	struct adapter *adap = qs->adap;
2611f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2612f7917c00SJeff Kirsher 
2613f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2614f7917c00SJeff Kirsher 	if (process_responses(adap, qs, -1) == 0)
2615f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2616f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2617f7917c00SJeff Kirsher 		     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2618f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2619f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2620f7917c00SJeff Kirsher }
2621f7917c00SJeff Kirsher 
2622f7917c00SJeff Kirsher /*
2623f7917c00SJeff Kirsher  * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2624f7917c00SJeff Kirsher  * (i.e., response queue serviced by NAPI polling).
2625f7917c00SJeff Kirsher  */
2626f7917c00SJeff Kirsher static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2627f7917c00SJeff Kirsher {
2628f7917c00SJeff Kirsher 	struct sge_qset *qs = cookie;
2629f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2630f7917c00SJeff Kirsher 
2631f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2632f7917c00SJeff Kirsher 
2633f7917c00SJeff Kirsher 	if (handle_responses(qs->adap, q) < 0)
2634f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2635f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2636f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2637f7917c00SJeff Kirsher }
2638f7917c00SJeff Kirsher 
2639f7917c00SJeff Kirsher /*
2640f7917c00SJeff Kirsher  * The non-NAPI MSI interrupt handler.  This needs to handle data events from
2641f7917c00SJeff Kirsher  * SGE response queues as well as error and other async events as they all use
2642f7917c00SJeff Kirsher  * the same MSI vector.  We use one SGE response queue per port in this mode
2643f7917c00SJeff Kirsher  * and protect all response queues with queue 0's lock.
2644f7917c00SJeff Kirsher  */
2645f7917c00SJeff Kirsher static irqreturn_t t3_intr_msi(int irq, void *cookie)
2646f7917c00SJeff Kirsher {
2647f7917c00SJeff Kirsher 	int new_packets = 0;
2648f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2649f7917c00SJeff Kirsher 	struct sge_rspq *q = &adap->sge.qs[0].rspq;
2650f7917c00SJeff Kirsher 
2651f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2652f7917c00SJeff Kirsher 
2653f7917c00SJeff Kirsher 	if (process_responses(adap, &adap->sge.qs[0], -1)) {
2654f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2655f7917c00SJeff Kirsher 			     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2656f7917c00SJeff Kirsher 		new_packets = 1;
2657f7917c00SJeff Kirsher 	}
2658f7917c00SJeff Kirsher 
2659f7917c00SJeff Kirsher 	if (adap->params.nports == 2 &&
2660f7917c00SJeff Kirsher 	    process_responses(adap, &adap->sge.qs[1], -1)) {
2661f7917c00SJeff Kirsher 		struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2662f7917c00SJeff Kirsher 
2663f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2664f7917c00SJeff Kirsher 			     V_NEWTIMER(q1->next_holdoff) |
2665f7917c00SJeff Kirsher 			     V_NEWINDEX(q1->cidx));
2666f7917c00SJeff Kirsher 		new_packets = 1;
2667f7917c00SJeff Kirsher 	}
2668f7917c00SJeff Kirsher 
2669f7917c00SJeff Kirsher 	if (!new_packets && t3_slow_intr_handler(adap) == 0)
2670f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2671f7917c00SJeff Kirsher 
2672f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2673f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2674f7917c00SJeff Kirsher }
2675f7917c00SJeff Kirsher 
2676f7917c00SJeff Kirsher static int rspq_check_napi(struct sge_qset *qs)
2677f7917c00SJeff Kirsher {
2678f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2679f7917c00SJeff Kirsher 
2680f7917c00SJeff Kirsher 	if (!napi_is_scheduled(&qs->napi) &&
2681f7917c00SJeff Kirsher 	    is_new_response(&q->desc[q->cidx], q)) {
2682f7917c00SJeff Kirsher 		napi_schedule(&qs->napi);
2683f7917c00SJeff Kirsher 		return 1;
2684f7917c00SJeff Kirsher 	}
2685f7917c00SJeff Kirsher 	return 0;
2686f7917c00SJeff Kirsher }
2687f7917c00SJeff Kirsher 
2688f7917c00SJeff Kirsher /*
2689f7917c00SJeff Kirsher  * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2690f7917c00SJeff Kirsher  * by NAPI polling).  Handles data events from SGE response queues as well as
2691f7917c00SJeff Kirsher  * error and other async events as they all use the same MSI vector.  We use
2692f7917c00SJeff Kirsher  * one SGE response queue per port in this mode and protect all response
2693f7917c00SJeff Kirsher  * queues with queue 0's lock.
2694f7917c00SJeff Kirsher  */
2695f7917c00SJeff Kirsher static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2696f7917c00SJeff Kirsher {
2697f7917c00SJeff Kirsher 	int new_packets;
2698f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2699f7917c00SJeff Kirsher 	struct sge_rspq *q = &adap->sge.qs[0].rspq;
2700f7917c00SJeff Kirsher 
2701f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2702f7917c00SJeff Kirsher 
2703f7917c00SJeff Kirsher 	new_packets = rspq_check_napi(&adap->sge.qs[0]);
2704f7917c00SJeff Kirsher 	if (adap->params.nports == 2)
2705f7917c00SJeff Kirsher 		new_packets += rspq_check_napi(&adap->sge.qs[1]);
2706f7917c00SJeff Kirsher 	if (!new_packets && t3_slow_intr_handler(adap) == 0)
2707f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2708f7917c00SJeff Kirsher 
2709f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2710f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2711f7917c00SJeff Kirsher }
2712f7917c00SJeff Kirsher 
2713f7917c00SJeff Kirsher /*
2714f7917c00SJeff Kirsher  * A helper function that processes responses and issues GTS.
2715f7917c00SJeff Kirsher  */
2716f7917c00SJeff Kirsher static inline int process_responses_gts(struct adapter *adap,
2717f7917c00SJeff Kirsher 					struct sge_rspq *rq)
2718f7917c00SJeff Kirsher {
2719f7917c00SJeff Kirsher 	int work;
2720f7917c00SJeff Kirsher 
2721f7917c00SJeff Kirsher 	work = process_responses(adap, rspq_to_qset(rq), -1);
2722f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2723f7917c00SJeff Kirsher 		     V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2724f7917c00SJeff Kirsher 	return work;
2725f7917c00SJeff Kirsher }
2726f7917c00SJeff Kirsher 
2727f7917c00SJeff Kirsher /*
2728f7917c00SJeff Kirsher  * The legacy INTx interrupt handler.  This needs to handle data events from
2729f7917c00SJeff Kirsher  * SGE response queues as well as error and other async events as they all use
2730f7917c00SJeff Kirsher  * the same interrupt pin.  We use one SGE response queue per port in this mode
2731f7917c00SJeff Kirsher  * and protect all response queues with queue 0's lock.
2732f7917c00SJeff Kirsher  */
2733f7917c00SJeff Kirsher static irqreturn_t t3_intr(int irq, void *cookie)
2734f7917c00SJeff Kirsher {
2735f7917c00SJeff Kirsher 	int work_done, w0, w1;
2736f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2737f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2738f7917c00SJeff Kirsher 	struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2739f7917c00SJeff Kirsher 
2740f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2741f7917c00SJeff Kirsher 
2742f7917c00SJeff Kirsher 	w0 = is_new_response(&q0->desc[q0->cidx], q0);
2743f7917c00SJeff Kirsher 	w1 = adap->params.nports == 2 &&
2744f7917c00SJeff Kirsher 	    is_new_response(&q1->desc[q1->cidx], q1);
2745f7917c00SJeff Kirsher 
2746f7917c00SJeff Kirsher 	if (likely(w0 | w1)) {
2747f7917c00SJeff Kirsher 		t3_write_reg(adap, A_PL_CLI, 0);
2748f7917c00SJeff Kirsher 		t3_read_reg(adap, A_PL_CLI);	/* flush */
2749f7917c00SJeff Kirsher 
2750f7917c00SJeff Kirsher 		if (likely(w0))
2751f7917c00SJeff Kirsher 			process_responses_gts(adap, q0);
2752f7917c00SJeff Kirsher 
2753f7917c00SJeff Kirsher 		if (w1)
2754f7917c00SJeff Kirsher 			process_responses_gts(adap, q1);
2755f7917c00SJeff Kirsher 
2756f7917c00SJeff Kirsher 		work_done = w0 | w1;
2757f7917c00SJeff Kirsher 	} else
2758f7917c00SJeff Kirsher 		work_done = t3_slow_intr_handler(adap);
2759f7917c00SJeff Kirsher 
2760f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2761f7917c00SJeff Kirsher 	return IRQ_RETVAL(work_done != 0);
2762f7917c00SJeff Kirsher }
2763f7917c00SJeff Kirsher 
2764f7917c00SJeff Kirsher /*
2765f7917c00SJeff Kirsher  * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2766f7917c00SJeff Kirsher  * Handles data events from SGE response queues as well as error and other
2767f7917c00SJeff Kirsher  * async events as they all use the same interrupt pin.  We use one SGE
2768f7917c00SJeff Kirsher  * response queue per port in this mode and protect all response queues with
2769f7917c00SJeff Kirsher  * queue 0's lock.
2770f7917c00SJeff Kirsher  */
2771f7917c00SJeff Kirsher static irqreturn_t t3b_intr(int irq, void *cookie)
2772f7917c00SJeff Kirsher {
2773f7917c00SJeff Kirsher 	u32 map;
2774f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2775f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2776f7917c00SJeff Kirsher 
2777f7917c00SJeff Kirsher 	t3_write_reg(adap, A_PL_CLI, 0);
2778f7917c00SJeff Kirsher 	map = t3_read_reg(adap, A_SG_DATA_INTR);
2779f7917c00SJeff Kirsher 
2780f7917c00SJeff Kirsher 	if (unlikely(!map))	/* shared interrupt, most likely */
2781f7917c00SJeff Kirsher 		return IRQ_NONE;
2782f7917c00SJeff Kirsher 
2783f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2784f7917c00SJeff Kirsher 
2785f7917c00SJeff Kirsher 	if (unlikely(map & F_ERRINTR))
2786f7917c00SJeff Kirsher 		t3_slow_intr_handler(adap);
2787f7917c00SJeff Kirsher 
2788f7917c00SJeff Kirsher 	if (likely(map & 1))
2789f7917c00SJeff Kirsher 		process_responses_gts(adap, q0);
2790f7917c00SJeff Kirsher 
2791f7917c00SJeff Kirsher 	if (map & 2)
2792f7917c00SJeff Kirsher 		process_responses_gts(adap, &adap->sge.qs[1].rspq);
2793f7917c00SJeff Kirsher 
2794f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2795f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2796f7917c00SJeff Kirsher }
2797f7917c00SJeff Kirsher 
2798f7917c00SJeff Kirsher /*
2799f7917c00SJeff Kirsher  * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2800f7917c00SJeff Kirsher  * Handles data events from SGE response queues as well as error and other
2801f7917c00SJeff Kirsher  * async events as they all use the same interrupt pin.  We use one SGE
2802f7917c00SJeff Kirsher  * response queue per port in this mode and protect all response queues with
2803f7917c00SJeff Kirsher  * queue 0's lock.
2804f7917c00SJeff Kirsher  */
2805f7917c00SJeff Kirsher static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2806f7917c00SJeff Kirsher {
2807f7917c00SJeff Kirsher 	u32 map;
2808f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2809f7917c00SJeff Kirsher 	struct sge_qset *qs0 = &adap->sge.qs[0];
2810f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &qs0->rspq;
2811f7917c00SJeff Kirsher 
2812f7917c00SJeff Kirsher 	t3_write_reg(adap, A_PL_CLI, 0);
2813f7917c00SJeff Kirsher 	map = t3_read_reg(adap, A_SG_DATA_INTR);
2814f7917c00SJeff Kirsher 
2815f7917c00SJeff Kirsher 	if (unlikely(!map))	/* shared interrupt, most likely */
2816f7917c00SJeff Kirsher 		return IRQ_NONE;
2817f7917c00SJeff Kirsher 
2818f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2819f7917c00SJeff Kirsher 
2820f7917c00SJeff Kirsher 	if (unlikely(map & F_ERRINTR))
2821f7917c00SJeff Kirsher 		t3_slow_intr_handler(adap);
2822f7917c00SJeff Kirsher 
2823f7917c00SJeff Kirsher 	if (likely(map & 1))
2824f7917c00SJeff Kirsher 		napi_schedule(&qs0->napi);
2825f7917c00SJeff Kirsher 
2826f7917c00SJeff Kirsher 	if (map & 2)
2827f7917c00SJeff Kirsher 		napi_schedule(&adap->sge.qs[1].napi);
2828f7917c00SJeff Kirsher 
2829f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2830f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2831f7917c00SJeff Kirsher }
2832f7917c00SJeff Kirsher 
2833f7917c00SJeff Kirsher /**
2834f7917c00SJeff Kirsher  *	t3_intr_handler - select the top-level interrupt handler
2835f7917c00SJeff Kirsher  *	@adap: the adapter
2836f7917c00SJeff Kirsher  *	@polling: whether using NAPI to service response queues
2837f7917c00SJeff Kirsher  *
2838f7917c00SJeff Kirsher  *	Selects the top-level interrupt handler based on the type of interrupts
2839f7917c00SJeff Kirsher  *	(MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2840f7917c00SJeff Kirsher  *	response queues.
2841f7917c00SJeff Kirsher  */
2842f7917c00SJeff Kirsher irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2843f7917c00SJeff Kirsher {
2844f7917c00SJeff Kirsher 	if (adap->flags & USING_MSIX)
2845f7917c00SJeff Kirsher 		return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2846f7917c00SJeff Kirsher 	if (adap->flags & USING_MSI)
2847f7917c00SJeff Kirsher 		return polling ? t3_intr_msi_napi : t3_intr_msi;
2848f7917c00SJeff Kirsher 	if (adap->params.rev > 0)
2849f7917c00SJeff Kirsher 		return polling ? t3b_intr_napi : t3b_intr;
2850f7917c00SJeff Kirsher 	return t3_intr;
2851f7917c00SJeff Kirsher }
2852f7917c00SJeff Kirsher 
2853f7917c00SJeff Kirsher #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2854f7917c00SJeff Kirsher 		    F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2855f7917c00SJeff Kirsher 		    V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2856f7917c00SJeff Kirsher 		    F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2857f7917c00SJeff Kirsher 		    F_HIRCQPARITYERROR)
2858f7917c00SJeff Kirsher #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2859f7917c00SJeff Kirsher #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2860f7917c00SJeff Kirsher 		      F_RSPQDISABLED)
2861f7917c00SJeff Kirsher 
2862f7917c00SJeff Kirsher /**
2863f7917c00SJeff Kirsher  *	t3_sge_err_intr_handler - SGE async event interrupt handler
2864f7917c00SJeff Kirsher  *	@adapter: the adapter
2865f7917c00SJeff Kirsher  *
2866f7917c00SJeff Kirsher  *	Interrupt handler for SGE asynchronous (non-data) events.
2867f7917c00SJeff Kirsher  */
2868f7917c00SJeff Kirsher void t3_sge_err_intr_handler(struct adapter *adapter)
2869f7917c00SJeff Kirsher {
2870f7917c00SJeff Kirsher 	unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2871f7917c00SJeff Kirsher 				 ~F_FLEMPTY;
2872f7917c00SJeff Kirsher 
2873f7917c00SJeff Kirsher 	if (status & SGE_PARERR)
2874f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2875f7917c00SJeff Kirsher 			 status & SGE_PARERR);
2876f7917c00SJeff Kirsher 	if (status & SGE_FRAMINGERR)
2877f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2878f7917c00SJeff Kirsher 			 status & SGE_FRAMINGERR);
2879f7917c00SJeff Kirsher 
2880f7917c00SJeff Kirsher 	if (status & F_RSPQCREDITOVERFOW)
2881f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE response queue credit overflow\n");
2882f7917c00SJeff Kirsher 
2883f7917c00SJeff Kirsher 	if (status & F_RSPQDISABLED) {
2884f7917c00SJeff Kirsher 		v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2885f7917c00SJeff Kirsher 
2886f7917c00SJeff Kirsher 		CH_ALERT(adapter,
2887f7917c00SJeff Kirsher 			 "packet delivered to disabled response queue "
2888f7917c00SJeff Kirsher 			 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2889f7917c00SJeff Kirsher 	}
2890f7917c00SJeff Kirsher 
2891f7917c00SJeff Kirsher 	if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2892f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_drop_task);
2893f7917c00SJeff Kirsher 
2894f7917c00SJeff Kirsher 	if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2895f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_full_task);
2896f7917c00SJeff Kirsher 
2897f7917c00SJeff Kirsher 	if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2898f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_empty_task);
2899f7917c00SJeff Kirsher 
2900f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2901f7917c00SJeff Kirsher 	if (status &  SGE_FATALERR)
2902f7917c00SJeff Kirsher 		t3_fatal_err(adapter);
2903f7917c00SJeff Kirsher }
2904f7917c00SJeff Kirsher 
2905f7917c00SJeff Kirsher /**
2906f7917c00SJeff Kirsher  *	sge_timer_tx - perform periodic maintenance of an SGE qset
2907d0ea5cbdSJesse Brandeburg  *	@t: a timer list containing the SGE queue set to maintain
2908f7917c00SJeff Kirsher  *
2909f7917c00SJeff Kirsher  *	Runs periodically from a timer to perform maintenance of an SGE queue
2910f7917c00SJeff Kirsher  *	set.  It performs two tasks:
2911f7917c00SJeff Kirsher  *
2912f7917c00SJeff Kirsher  *	Cleans up any completed Tx descriptors that may still be pending.
2913f7917c00SJeff Kirsher  *	Normal descriptor cleanup happens when new packets are added to a Tx
2914f7917c00SJeff Kirsher  *	queue so this timer is relatively infrequent and does any cleanup only
2915f7917c00SJeff Kirsher  *	if the Tx queue has not seen any new packets in a while.  We make a
2916f7917c00SJeff Kirsher  *	best effort attempt to reclaim descriptors, in that we don't wait
2917f7917c00SJeff Kirsher  *	around if we cannot get a queue's lock (which most likely is because
2918f7917c00SJeff Kirsher  *	someone else is queueing new packets and so will also handle the clean
2919f7917c00SJeff Kirsher  *	up).  Since control queues use immediate data exclusively we don't
2920f7917c00SJeff Kirsher  *	bother cleaning them up here.
2921f7917c00SJeff Kirsher  *
2922f7917c00SJeff Kirsher  */
29230e23daebSKees Cook static void sge_timer_tx(struct timer_list *t)
2924f7917c00SJeff Kirsher {
29250e23daebSKees Cook 	struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
2926f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2927f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
2928f7917c00SJeff Kirsher 	unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2929f7917c00SJeff Kirsher 	unsigned long next_period;
2930f7917c00SJeff Kirsher 
2931f7917c00SJeff Kirsher 	if (__netif_tx_trylock(qs->tx_q)) {
2932f7917c00SJeff Kirsher                 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2933f7917c00SJeff Kirsher                                                      TX_RECLAIM_TIMER_CHUNK);
2934f7917c00SJeff Kirsher 		__netif_tx_unlock(qs->tx_q);
2935f7917c00SJeff Kirsher 	}
2936f7917c00SJeff Kirsher 
2937f7917c00SJeff Kirsher 	if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2938f7917c00SJeff Kirsher 		tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2939f7917c00SJeff Kirsher 						     TX_RECLAIM_TIMER_CHUNK);
2940f7917c00SJeff Kirsher 		spin_unlock(&qs->txq[TXQ_OFLD].lock);
2941f7917c00SJeff Kirsher 	}
2942f7917c00SJeff Kirsher 
2943f7917c00SJeff Kirsher 	next_period = TX_RECLAIM_PERIOD >>
2944f7917c00SJeff Kirsher                       (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2945f7917c00SJeff Kirsher                       TX_RECLAIM_TIMER_CHUNK);
2946f7917c00SJeff Kirsher 	mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2947f7917c00SJeff Kirsher }
2948f7917c00SJeff Kirsher 
294949ce9c2cSBen Hutchings /**
2950f7917c00SJeff Kirsher  *	sge_timer_rx - perform periodic maintenance of an SGE qset
2951d0ea5cbdSJesse Brandeburg  *	@t: the timer list containing the SGE queue set to maintain
2952f7917c00SJeff Kirsher  *
2953f7917c00SJeff Kirsher  *	a) Replenishes Rx queues that have run out due to memory shortage.
2954f7917c00SJeff Kirsher  *	Normally new Rx buffers are added when existing ones are consumed but
2955f7917c00SJeff Kirsher  *	when out of memory a queue can become empty.  We try to add only a few
2956f7917c00SJeff Kirsher  *	buffers here, the queue will be replenished fully as these new buffers
2957f7917c00SJeff Kirsher  *	are used up if memory shortage has subsided.
2958f7917c00SJeff Kirsher  *
2959f7917c00SJeff Kirsher  *	b) Return coalesced response queue credits in case a response queue is
2960f7917c00SJeff Kirsher  *	starved.
2961f7917c00SJeff Kirsher  *
2962f7917c00SJeff Kirsher  */
29630e23daebSKees Cook static void sge_timer_rx(struct timer_list *t)
2964f7917c00SJeff Kirsher {
2965f7917c00SJeff Kirsher 	spinlock_t *lock;
29660e23daebSKees Cook 	struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
2967f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2968f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
2969f7917c00SJeff Kirsher 	u32 status;
2970f7917c00SJeff Kirsher 
2971f7917c00SJeff Kirsher 	lock = adap->params.rev > 0 ?
2972f7917c00SJeff Kirsher 	       &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2973f7917c00SJeff Kirsher 
2974f7917c00SJeff Kirsher 	if (!spin_trylock_irq(lock))
2975f7917c00SJeff Kirsher 		goto out;
2976f7917c00SJeff Kirsher 
2977f7917c00SJeff Kirsher 	if (napi_is_scheduled(&qs->napi))
2978f7917c00SJeff Kirsher 		goto unlock;
2979f7917c00SJeff Kirsher 
2980f7917c00SJeff Kirsher 	if (adap->params.rev < 4) {
2981f7917c00SJeff Kirsher 		status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2982f7917c00SJeff Kirsher 
2983f7917c00SJeff Kirsher 		if (status & (1 << qs->rspq.cntxt_id)) {
2984f7917c00SJeff Kirsher 			qs->rspq.starved++;
2985f7917c00SJeff Kirsher 			if (qs->rspq.credits) {
2986f7917c00SJeff Kirsher 				qs->rspq.credits--;
2987f7917c00SJeff Kirsher 				refill_rspq(adap, &qs->rspq, 1);
2988f7917c00SJeff Kirsher 				qs->rspq.restarted++;
2989f7917c00SJeff Kirsher 				t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2990f7917c00SJeff Kirsher 					     1 << qs->rspq.cntxt_id);
2991f7917c00SJeff Kirsher 			}
2992f7917c00SJeff Kirsher 		}
2993f7917c00SJeff Kirsher 	}
2994f7917c00SJeff Kirsher 
2995f7917c00SJeff Kirsher 	if (qs->fl[0].credits < qs->fl[0].size)
2996f7917c00SJeff Kirsher 		__refill_fl(adap, &qs->fl[0]);
2997f7917c00SJeff Kirsher 	if (qs->fl[1].credits < qs->fl[1].size)
2998f7917c00SJeff Kirsher 		__refill_fl(adap, &qs->fl[1]);
2999f7917c00SJeff Kirsher 
3000f7917c00SJeff Kirsher unlock:
3001f7917c00SJeff Kirsher 	spin_unlock_irq(lock);
3002f7917c00SJeff Kirsher out:
3003f7917c00SJeff Kirsher 	mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3004f7917c00SJeff Kirsher }
3005f7917c00SJeff Kirsher 
3006f7917c00SJeff Kirsher /**
3007f7917c00SJeff Kirsher  *	t3_update_qset_coalesce - update coalescing settings for a queue set
3008f7917c00SJeff Kirsher  *	@qs: the SGE queue set
3009f7917c00SJeff Kirsher  *	@p: new queue set parameters
3010f7917c00SJeff Kirsher  *
3011f7917c00SJeff Kirsher  *	Update the coalescing settings for an SGE queue set.  Nothing is done
3012f7917c00SJeff Kirsher  *	if the queue set is not initialized yet.
3013f7917c00SJeff Kirsher  */
3014f7917c00SJeff Kirsher void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
3015f7917c00SJeff Kirsher {
3016f7917c00SJeff Kirsher 	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
3017f7917c00SJeff Kirsher 	qs->rspq.polling = p->polling;
3018f7917c00SJeff Kirsher 	qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
3019f7917c00SJeff Kirsher }
3020f7917c00SJeff Kirsher 
3021f7917c00SJeff Kirsher /**
3022f7917c00SJeff Kirsher  *	t3_sge_alloc_qset - initialize an SGE queue set
3023f7917c00SJeff Kirsher  *	@adapter: the adapter
3024f7917c00SJeff Kirsher  *	@id: the queue set id
3025f7917c00SJeff Kirsher  *	@nports: how many Ethernet ports will be using this queue set
3026f7917c00SJeff Kirsher  *	@irq_vec_idx: the IRQ vector index for response queue interrupts
3027f7917c00SJeff Kirsher  *	@p: configuration parameters for this queue set
3028f7917c00SJeff Kirsher  *	@ntxq: number of Tx queues for the queue set
3029d0ea5cbdSJesse Brandeburg  *	@dev: net device associated with this queue set
3030f7917c00SJeff Kirsher  *	@netdevq: net device TX queue associated with this queue set
3031f7917c00SJeff Kirsher  *
3032f7917c00SJeff Kirsher  *	Allocate resources and initialize an SGE queue set.  A queue set
3033f7917c00SJeff Kirsher  *	comprises a response queue, two Rx free-buffer queues, and up to 3
3034f7917c00SJeff Kirsher  *	Tx queues.  The Tx queues are assigned roles in the order Ethernet
3035f7917c00SJeff Kirsher  *	queue, offload queue, and control queue.
3036f7917c00SJeff Kirsher  */
3037f7917c00SJeff Kirsher int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3038f7917c00SJeff Kirsher 		      int irq_vec_idx, const struct qset_params *p,
3039f7917c00SJeff Kirsher 		      int ntxq, struct net_device *dev,
3040f7917c00SJeff Kirsher 		      struct netdev_queue *netdevq)
3041f7917c00SJeff Kirsher {
3042f7917c00SJeff Kirsher 	int i, avail, ret = -ENOMEM;
3043f7917c00SJeff Kirsher 	struct sge_qset *q = &adapter->sge.qs[id];
3044f7917c00SJeff Kirsher 
3045f7917c00SJeff Kirsher 	init_qset_cntxt(q, id);
30460e23daebSKees Cook 	timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0);
30470e23daebSKees Cook 	timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0);
3048f7917c00SJeff Kirsher 
3049f7917c00SJeff Kirsher 	q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
3050f7917c00SJeff Kirsher 				   sizeof(struct rx_desc),
3051f7917c00SJeff Kirsher 				   sizeof(struct rx_sw_desc),
3052f7917c00SJeff Kirsher 				   &q->fl[0].phys_addr, &q->fl[0].sdesc);
3053f7917c00SJeff Kirsher 	if (!q->fl[0].desc)
3054f7917c00SJeff Kirsher 		goto err;
3055f7917c00SJeff Kirsher 
3056f7917c00SJeff Kirsher 	q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
3057f7917c00SJeff Kirsher 				   sizeof(struct rx_desc),
3058f7917c00SJeff Kirsher 				   sizeof(struct rx_sw_desc),
3059f7917c00SJeff Kirsher 				   &q->fl[1].phys_addr, &q->fl[1].sdesc);
3060f7917c00SJeff Kirsher 	if (!q->fl[1].desc)
3061f7917c00SJeff Kirsher 		goto err;
3062f7917c00SJeff Kirsher 
3063f7917c00SJeff Kirsher 	q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
3064f7917c00SJeff Kirsher 				  sizeof(struct rsp_desc), 0,
3065f7917c00SJeff Kirsher 				  &q->rspq.phys_addr, NULL);
3066f7917c00SJeff Kirsher 	if (!q->rspq.desc)
3067f7917c00SJeff Kirsher 		goto err;
3068f7917c00SJeff Kirsher 
3069f7917c00SJeff Kirsher 	for (i = 0; i < ntxq; ++i) {
3070f7917c00SJeff Kirsher 		/*
3071f7917c00SJeff Kirsher 		 * The control queue always uses immediate data so does not
3072f7917c00SJeff Kirsher 		 * need to keep track of any sk_buffs.
3073f7917c00SJeff Kirsher 		 */
3074f7917c00SJeff Kirsher 		size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3075f7917c00SJeff Kirsher 
3076f7917c00SJeff Kirsher 		q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3077f7917c00SJeff Kirsher 					    sizeof(struct tx_desc), sz,
3078f7917c00SJeff Kirsher 					    &q->txq[i].phys_addr,
3079f7917c00SJeff Kirsher 					    &q->txq[i].sdesc);
3080f7917c00SJeff Kirsher 		if (!q->txq[i].desc)
3081f7917c00SJeff Kirsher 			goto err;
3082f7917c00SJeff Kirsher 
3083f7917c00SJeff Kirsher 		q->txq[i].gen = 1;
3084f7917c00SJeff Kirsher 		q->txq[i].size = p->txq_size[i];
3085f7917c00SJeff Kirsher 		spin_lock_init(&q->txq[i].lock);
3086f7917c00SJeff Kirsher 		skb_queue_head_init(&q->txq[i].sendq);
3087f7917c00SJeff Kirsher 	}
3088f7917c00SJeff Kirsher 
30895e0b8928SÍñigo Huguet 	INIT_WORK(&q->txq[TXQ_OFLD].qresume_task, restart_offloadq);
30905e0b8928SÍñigo Huguet 	INIT_WORK(&q->txq[TXQ_CTRL].qresume_task, restart_ctrlq);
3091f7917c00SJeff Kirsher 
3092f7917c00SJeff Kirsher 	q->fl[0].gen = q->fl[1].gen = 1;
3093f7917c00SJeff Kirsher 	q->fl[0].size = p->fl_size;
3094f7917c00SJeff Kirsher 	q->fl[1].size = p->jumbo_size;
3095f7917c00SJeff Kirsher 
3096f7917c00SJeff Kirsher 	q->rspq.gen = 1;
3097f7917c00SJeff Kirsher 	q->rspq.size = p->rspq_size;
3098f7917c00SJeff Kirsher 	spin_lock_init(&q->rspq.lock);
3099f7917c00SJeff Kirsher 	skb_queue_head_init(&q->rspq.rx_queue);
3100f7917c00SJeff Kirsher 
3101f7917c00SJeff Kirsher 	q->txq[TXQ_ETH].stop_thres = nports *
3102f7917c00SJeff Kirsher 	    flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3103f7917c00SJeff Kirsher 
3104f7917c00SJeff Kirsher #if FL0_PG_CHUNK_SIZE > 0
3105f7917c00SJeff Kirsher 	q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3106f7917c00SJeff Kirsher #else
3107f7917c00SJeff Kirsher 	q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3108f7917c00SJeff Kirsher #endif
3109f7917c00SJeff Kirsher #if FL1_PG_CHUNK_SIZE > 0
3110f7917c00SJeff Kirsher 	q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3111f7917c00SJeff Kirsher #else
3112f7917c00SJeff Kirsher 	q->fl[1].buf_size = is_offload(adapter) ?
3113f7917c00SJeff Kirsher 		(16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3114f7917c00SJeff Kirsher 		MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3115f7917c00SJeff Kirsher #endif
3116f7917c00SJeff Kirsher 
3117f7917c00SJeff Kirsher 	q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3118f7917c00SJeff Kirsher 	q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3119f7917c00SJeff Kirsher 	q->fl[0].order = FL0_PG_ORDER;
3120f7917c00SJeff Kirsher 	q->fl[1].order = FL1_PG_ORDER;
3121f7917c00SJeff Kirsher 	q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3122f7917c00SJeff Kirsher 	q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3123f7917c00SJeff Kirsher 
3124f7917c00SJeff Kirsher 	spin_lock_irq(&adapter->sge.reg_lock);
3125f7917c00SJeff Kirsher 
3126f7917c00SJeff Kirsher 	/* FL threshold comparison uses < */
3127f7917c00SJeff Kirsher 	ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3128f7917c00SJeff Kirsher 				   q->rspq.phys_addr, q->rspq.size,
3129f7917c00SJeff Kirsher 				   q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3130f7917c00SJeff Kirsher 	if (ret)
3131f7917c00SJeff Kirsher 		goto err_unlock;
3132f7917c00SJeff Kirsher 
3133f7917c00SJeff Kirsher 	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3134f7917c00SJeff Kirsher 		ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3135f7917c00SJeff Kirsher 					  q->fl[i].phys_addr, q->fl[i].size,
3136f7917c00SJeff Kirsher 					  q->fl[i].buf_size - SGE_PG_RSVD,
3137f7917c00SJeff Kirsher 					  p->cong_thres, 1, 0);
3138f7917c00SJeff Kirsher 		if (ret)
3139f7917c00SJeff Kirsher 			goto err_unlock;
3140f7917c00SJeff Kirsher 	}
3141f7917c00SJeff Kirsher 
3142f7917c00SJeff Kirsher 	ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3143f7917c00SJeff Kirsher 				 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3144f7917c00SJeff Kirsher 				 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3145f7917c00SJeff Kirsher 				 1, 0);
3146f7917c00SJeff Kirsher 	if (ret)
3147f7917c00SJeff Kirsher 		goto err_unlock;
3148f7917c00SJeff Kirsher 
3149f7917c00SJeff Kirsher 	if (ntxq > 1) {
3150f7917c00SJeff Kirsher 		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3151f7917c00SJeff Kirsher 					 USE_GTS, SGE_CNTXT_OFLD, id,
3152f7917c00SJeff Kirsher 					 q->txq[TXQ_OFLD].phys_addr,
3153f7917c00SJeff Kirsher 					 q->txq[TXQ_OFLD].size, 0, 1, 0);
3154f7917c00SJeff Kirsher 		if (ret)
3155f7917c00SJeff Kirsher 			goto err_unlock;
3156f7917c00SJeff Kirsher 	}
3157f7917c00SJeff Kirsher 
3158f7917c00SJeff Kirsher 	if (ntxq > 2) {
3159f7917c00SJeff Kirsher 		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3160f7917c00SJeff Kirsher 					 SGE_CNTXT_CTRL, id,
3161f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].phys_addr,
3162f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].size,
3163f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].token, 1, 0);
3164f7917c00SJeff Kirsher 		if (ret)
3165f7917c00SJeff Kirsher 			goto err_unlock;
3166f7917c00SJeff Kirsher 	}
3167f7917c00SJeff Kirsher 
3168f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->sge.reg_lock);
3169f7917c00SJeff Kirsher 
3170f7917c00SJeff Kirsher 	q->adap = adapter;
3171f7917c00SJeff Kirsher 	q->netdev = dev;
3172f7917c00SJeff Kirsher 	q->tx_q = netdevq;
3173f7917c00SJeff Kirsher 	t3_update_qset_coalesce(q, p);
3174f7917c00SJeff Kirsher 
3175f7917c00SJeff Kirsher 	avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3176f7917c00SJeff Kirsher 			  GFP_KERNEL | __GFP_COMP);
3177f7917c00SJeff Kirsher 	if (!avail) {
3178f7917c00SJeff Kirsher 		CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3179ff992489SZhang Changzhong 		ret = -ENOMEM;
3180f7917c00SJeff Kirsher 		goto err;
3181f7917c00SJeff Kirsher 	}
3182f7917c00SJeff Kirsher 	if (avail < q->fl[0].size)
3183f7917c00SJeff Kirsher 		CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3184f7917c00SJeff Kirsher 			avail);
3185f7917c00SJeff Kirsher 
3186f7917c00SJeff Kirsher 	avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3187f7917c00SJeff Kirsher 			  GFP_KERNEL | __GFP_COMP);
3188f7917c00SJeff Kirsher 	if (avail < q->fl[1].size)
3189f7917c00SJeff Kirsher 		CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3190f7917c00SJeff Kirsher 			avail);
3191f7917c00SJeff Kirsher 	refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3192f7917c00SJeff Kirsher 
3193f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3194f7917c00SJeff Kirsher 		     V_NEWTIMER(q->rspq.holdoff_tmr));
3195f7917c00SJeff Kirsher 
3196f7917c00SJeff Kirsher 	return 0;
3197f7917c00SJeff Kirsher 
3198f7917c00SJeff Kirsher err_unlock:
3199f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->sge.reg_lock);
3200f7917c00SJeff Kirsher err:
3201f7917c00SJeff Kirsher 	t3_free_qset(adapter, q);
3202f7917c00SJeff Kirsher 	return ret;
3203f7917c00SJeff Kirsher }
3204f7917c00SJeff Kirsher 
3205f7917c00SJeff Kirsher /**
3206f7917c00SJeff Kirsher  *      t3_start_sge_timers - start SGE timer call backs
3207f7917c00SJeff Kirsher  *      @adap: the adapter
3208f7917c00SJeff Kirsher  *
3209f7917c00SJeff Kirsher  *      Starts each SGE queue set's timer call back
3210f7917c00SJeff Kirsher  */
3211f7917c00SJeff Kirsher void t3_start_sge_timers(struct adapter *adap)
3212f7917c00SJeff Kirsher {
3213f7917c00SJeff Kirsher 	int i;
3214f7917c00SJeff Kirsher 
3215f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3216f7917c00SJeff Kirsher 		struct sge_qset *q = &adap->sge.qs[i];
3217f7917c00SJeff Kirsher 
3218f7917c00SJeff Kirsher 		if (q->tx_reclaim_timer.function)
32192acc0abcSColin Ian King 			mod_timer(&q->tx_reclaim_timer,
32202acc0abcSColin Ian King 				  jiffies + TX_RECLAIM_PERIOD);
3221f7917c00SJeff Kirsher 
3222f7917c00SJeff Kirsher 		if (q->rx_reclaim_timer.function)
32232acc0abcSColin Ian King 			mod_timer(&q->rx_reclaim_timer,
32242acc0abcSColin Ian King 				  jiffies + RX_RECLAIM_PERIOD);
3225f7917c00SJeff Kirsher 	}
3226f7917c00SJeff Kirsher }
3227f7917c00SJeff Kirsher 
3228f7917c00SJeff Kirsher /**
3229f7917c00SJeff Kirsher  *	t3_stop_sge_timers - stop SGE timer call backs
3230f7917c00SJeff Kirsher  *	@adap: the adapter
3231f7917c00SJeff Kirsher  *
3232f7917c00SJeff Kirsher  *	Stops each SGE queue set's timer call back
3233f7917c00SJeff Kirsher  */
3234f7917c00SJeff Kirsher void t3_stop_sge_timers(struct adapter *adap)
3235f7917c00SJeff Kirsher {
3236f7917c00SJeff Kirsher 	int i;
3237f7917c00SJeff Kirsher 
3238f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3239f7917c00SJeff Kirsher 		struct sge_qset *q = &adap->sge.qs[i];
3240f7917c00SJeff Kirsher 
3241f7917c00SJeff Kirsher 		if (q->tx_reclaim_timer.function)
3242f7917c00SJeff Kirsher 			del_timer_sync(&q->tx_reclaim_timer);
3243f7917c00SJeff Kirsher 		if (q->rx_reclaim_timer.function)
3244f7917c00SJeff Kirsher 			del_timer_sync(&q->rx_reclaim_timer);
3245f7917c00SJeff Kirsher 	}
3246f7917c00SJeff Kirsher }
3247f7917c00SJeff Kirsher 
3248f7917c00SJeff Kirsher /**
3249f7917c00SJeff Kirsher  *	t3_free_sge_resources - free SGE resources
3250f7917c00SJeff Kirsher  *	@adap: the adapter
3251f7917c00SJeff Kirsher  *
3252f7917c00SJeff Kirsher  *	Frees resources used by the SGE queue sets.
3253f7917c00SJeff Kirsher  */
3254f7917c00SJeff Kirsher void t3_free_sge_resources(struct adapter *adap)
3255f7917c00SJeff Kirsher {
3256f7917c00SJeff Kirsher 	int i;
3257f7917c00SJeff Kirsher 
3258f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i)
3259f7917c00SJeff Kirsher 		t3_free_qset(adap, &adap->sge.qs[i]);
3260f7917c00SJeff Kirsher }
3261f7917c00SJeff Kirsher 
3262f7917c00SJeff Kirsher /**
3263f7917c00SJeff Kirsher  *	t3_sge_start - enable SGE
3264f7917c00SJeff Kirsher  *	@adap: the adapter
3265f7917c00SJeff Kirsher  *
3266f7917c00SJeff Kirsher  *	Enables the SGE for DMAs.  This is the last step in starting packet
3267f7917c00SJeff Kirsher  *	transfers.
3268f7917c00SJeff Kirsher  */
3269f7917c00SJeff Kirsher void t3_sge_start(struct adapter *adap)
3270f7917c00SJeff Kirsher {
3271f7917c00SJeff Kirsher 	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3272f7917c00SJeff Kirsher }
3273f7917c00SJeff Kirsher 
3274f7917c00SJeff Kirsher /**
3275a17409e7SThomas Gleixner  *	t3_sge_stop_dma - Disable SGE DMA engine operation
3276f7917c00SJeff Kirsher  *	@adap: the adapter
3277f7917c00SJeff Kirsher  *
3278a17409e7SThomas Gleixner  *	Can be invoked from interrupt context e.g.  error handler.
3279a17409e7SThomas Gleixner  *
32805e0b8928SÍñigo Huguet  *	Note that this function cannot disable the restart of works as
3281a17409e7SThomas Gleixner  *	it cannot wait if called from interrupt context, however the
32825e0b8928SÍñigo Huguet  *	works will have no effect since the doorbells are disabled. The
3283a17409e7SThomas Gleixner  *	driver will call tg3_sge_stop() later from process context, at
32845e0b8928SÍñigo Huguet  *	which time the works will be stopped if they are still running.
3285a17409e7SThomas Gleixner  */
3286a17409e7SThomas Gleixner void t3_sge_stop_dma(struct adapter *adap)
3287a17409e7SThomas Gleixner {
3288a17409e7SThomas Gleixner 	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3289a17409e7SThomas Gleixner }
3290a17409e7SThomas Gleixner 
3291a17409e7SThomas Gleixner /**
3292a17409e7SThomas Gleixner  *	t3_sge_stop - disable SGE operation completly
3293a17409e7SThomas Gleixner  *	@adap: the adapter
3294a17409e7SThomas Gleixner  *
3295a17409e7SThomas Gleixner  *	Called from process context. Disables the DMA engine and any
32965e0b8928SÍñigo Huguet  *	pending queue restart works.
3297f7917c00SJeff Kirsher  */
3298f7917c00SJeff Kirsher void t3_sge_stop(struct adapter *adap)
3299f7917c00SJeff Kirsher {
3300f7917c00SJeff Kirsher 	int i;
3301f7917c00SJeff Kirsher 
3302a17409e7SThomas Gleixner 	t3_sge_stop_dma(adap);
3303a17409e7SThomas Gleixner 
3304*be27a47aSHeiner Kallweit 	/* workqueues aren't initialized otherwise */
3305*be27a47aSHeiner Kallweit 	if (!(adap->flags & FULL_INIT_DONE))
3306*be27a47aSHeiner Kallweit 		return;
3307f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3308f7917c00SJeff Kirsher 		struct sge_qset *qs = &adap->sge.qs[i];
3309f7917c00SJeff Kirsher 
33105e0b8928SÍñigo Huguet 		cancel_work_sync(&qs->txq[TXQ_OFLD].qresume_task);
3311d5a73dcfSÍñigo Huguet 		cancel_work_sync(&qs->txq[TXQ_CTRL].qresume_task);
3312f7917c00SJeff Kirsher 	}
3313f7917c00SJeff Kirsher }
3314f7917c00SJeff Kirsher 
3315f7917c00SJeff Kirsher /**
3316f7917c00SJeff Kirsher  *	t3_sge_init - initialize SGE
3317f7917c00SJeff Kirsher  *	@adap: the adapter
3318f7917c00SJeff Kirsher  *	@p: the SGE parameters
3319f7917c00SJeff Kirsher  *
3320f7917c00SJeff Kirsher  *	Performs SGE initialization needed every time after a chip reset.
3321f7917c00SJeff Kirsher  *	We do not initialize any of the queue sets here, instead the driver
3322f7917c00SJeff Kirsher  *	top-level must request those individually.  We also do not enable DMA
3323f7917c00SJeff Kirsher  *	here, that should be done after the queues have been set up.
3324f7917c00SJeff Kirsher  */
3325f7917c00SJeff Kirsher void t3_sge_init(struct adapter *adap, struct sge_params *p)
3326f7917c00SJeff Kirsher {
3327f7917c00SJeff Kirsher 	unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3328f7917c00SJeff Kirsher 
3329f7917c00SJeff Kirsher 	ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3330f7917c00SJeff Kirsher 	    F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3331f7917c00SJeff Kirsher 	    V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3332f7917c00SJeff Kirsher 	    V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3333f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 1
3334f7917c00SJeff Kirsher 	ctrl |= F_EGRGENCTRL;
3335f7917c00SJeff Kirsher #endif
3336f7917c00SJeff Kirsher 	if (adap->params.rev > 0) {
3337f7917c00SJeff Kirsher 		if (!(adap->flags & (USING_MSIX | USING_MSI)))
3338f7917c00SJeff Kirsher 			ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3339f7917c00SJeff Kirsher 	}
3340f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_CONTROL, ctrl);
3341f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3342f7917c00SJeff Kirsher 		     V_LORCQDRBTHRSH(512));
3343f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3344f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3345f7917c00SJeff Kirsher 		     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3346f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3347f7917c00SJeff Kirsher 		     adap->params.rev < T3_REV_C ? 1000 : 500);
3348f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3349f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3350f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3351f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3352f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3353f7917c00SJeff Kirsher }
3354f7917c00SJeff Kirsher 
3355f7917c00SJeff Kirsher /**
3356f7917c00SJeff Kirsher  *	t3_sge_prep - one-time SGE initialization
3357f7917c00SJeff Kirsher  *	@adap: the associated adapter
3358f7917c00SJeff Kirsher  *	@p: SGE parameters
3359f7917c00SJeff Kirsher  *
3360f7917c00SJeff Kirsher  *	Performs one-time initialization of SGE SW state.  Includes determining
3361f7917c00SJeff Kirsher  *	defaults for the assorted SGE parameters, which admins can change until
3362f7917c00SJeff Kirsher  *	they are used to initialize the SGE.
3363f7917c00SJeff Kirsher  */
3364f7917c00SJeff Kirsher void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3365f7917c00SJeff Kirsher {
3366f7917c00SJeff Kirsher 	int i;
3367f7917c00SJeff Kirsher 
3368f7917c00SJeff Kirsher 	p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3369f7917c00SJeff Kirsher 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3370f7917c00SJeff Kirsher 
3371f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3372f7917c00SJeff Kirsher 		struct qset_params *q = p->qset + i;
3373f7917c00SJeff Kirsher 
3374f7917c00SJeff Kirsher 		q->polling = adap->params.rev > 0;
3375f7917c00SJeff Kirsher 		q->coalesce_usecs = 5;
3376f7917c00SJeff Kirsher 		q->rspq_size = 1024;
3377f7917c00SJeff Kirsher 		q->fl_size = 1024;
3378f7917c00SJeff Kirsher 		q->jumbo_size = 512;
3379f7917c00SJeff Kirsher 		q->txq_size[TXQ_ETH] = 1024;
3380f7917c00SJeff Kirsher 		q->txq_size[TXQ_OFLD] = 1024;
3381f7917c00SJeff Kirsher 		q->txq_size[TXQ_CTRL] = 256;
3382f7917c00SJeff Kirsher 		q->cong_thres = 0;
3383f7917c00SJeff Kirsher 	}
3384f7917c00SJeff Kirsher 
3385f7917c00SJeff Kirsher 	spin_lock_init(&adap->sge.reg_lock);
3386f7917c00SJeff Kirsher }
3387