1f7917c00SJeff Kirsher /*
2f7917c00SJeff Kirsher  * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3f7917c00SJeff Kirsher  *
4f7917c00SJeff Kirsher  * This software is available to you under a choice of one of two
5f7917c00SJeff Kirsher  * licenses.  You may choose to be licensed under the terms of the GNU
6f7917c00SJeff Kirsher  * General Public License (GPL) Version 2, available from the file
7f7917c00SJeff Kirsher  * COPYING in the main directory of this source tree, or the
8f7917c00SJeff Kirsher  * OpenIB.org BSD license below:
9f7917c00SJeff Kirsher  *
10f7917c00SJeff Kirsher  *     Redistribution and use in source and binary forms, with or
11f7917c00SJeff Kirsher  *     without modification, are permitted provided that the following
12f7917c00SJeff Kirsher  *     conditions are met:
13f7917c00SJeff Kirsher  *
14f7917c00SJeff Kirsher  *      - Redistributions of source code must retain the above
15f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
16f7917c00SJeff Kirsher  *        disclaimer.
17f7917c00SJeff Kirsher  *
18f7917c00SJeff Kirsher  *      - Redistributions in binary form must reproduce the above
19f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
20f7917c00SJeff Kirsher  *        disclaimer in the documentation and/or other materials
21f7917c00SJeff Kirsher  *        provided with the distribution.
22f7917c00SJeff Kirsher  *
23f7917c00SJeff Kirsher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24f7917c00SJeff Kirsher  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25f7917c00SJeff Kirsher  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26f7917c00SJeff Kirsher  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27f7917c00SJeff Kirsher  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28f7917c00SJeff Kirsher  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29f7917c00SJeff Kirsher  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30f7917c00SJeff Kirsher  * SOFTWARE.
31f7917c00SJeff Kirsher  */
32f7917c00SJeff Kirsher #include <linux/skbuff.h>
33f7917c00SJeff Kirsher #include <linux/netdevice.h>
34f7917c00SJeff Kirsher #include <linux/etherdevice.h>
35f7917c00SJeff Kirsher #include <linux/if_vlan.h>
36f7917c00SJeff Kirsher #include <linux/ip.h>
37f7917c00SJeff Kirsher #include <linux/tcp.h>
38f7917c00SJeff Kirsher #include <linux/dma-mapping.h>
39f7917c00SJeff Kirsher #include <linux/slab.h>
40f7917c00SJeff Kirsher #include <linux/prefetch.h>
41f7917c00SJeff Kirsher #include <net/arp.h>
42f7917c00SJeff Kirsher #include "common.h"
43f7917c00SJeff Kirsher #include "regs.h"
44f7917c00SJeff Kirsher #include "sge_defs.h"
45f7917c00SJeff Kirsher #include "t3_cpl.h"
46f7917c00SJeff Kirsher #include "firmware_exports.h"
47f7917c00SJeff Kirsher #include "cxgb3_offload.h"
48f7917c00SJeff Kirsher 
49f7917c00SJeff Kirsher #define USE_GTS 0
50f7917c00SJeff Kirsher 
51f7917c00SJeff Kirsher #define SGE_RX_SM_BUF_SIZE 1536
52f7917c00SJeff Kirsher 
53f7917c00SJeff Kirsher #define SGE_RX_COPY_THRES  256
54f7917c00SJeff Kirsher #define SGE_RX_PULL_LEN    128
55f7917c00SJeff Kirsher 
56f7917c00SJeff Kirsher #define SGE_PG_RSVD SMP_CACHE_BYTES
57f7917c00SJeff Kirsher /*
58f7917c00SJeff Kirsher  * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
59f7917c00SJeff Kirsher  * It must be a divisor of PAGE_SIZE.  If set to 0 FL0 will use sk_buffs
60f7917c00SJeff Kirsher  * directly.
61f7917c00SJeff Kirsher  */
62f7917c00SJeff Kirsher #define FL0_PG_CHUNK_SIZE  2048
63f7917c00SJeff Kirsher #define FL0_PG_ORDER 0
64f7917c00SJeff Kirsher #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
65f7917c00SJeff Kirsher #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
66f7917c00SJeff Kirsher #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
67f7917c00SJeff Kirsher #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
68f7917c00SJeff Kirsher 
69f7917c00SJeff Kirsher #define SGE_RX_DROP_THRES 16
70f7917c00SJeff Kirsher #define RX_RECLAIM_PERIOD (HZ/4)
71f7917c00SJeff Kirsher 
72f7917c00SJeff Kirsher /*
73f7917c00SJeff Kirsher  * Max number of Rx buffers we replenish at a time.
74f7917c00SJeff Kirsher  */
75f7917c00SJeff Kirsher #define MAX_RX_REFILL 16U
76f7917c00SJeff Kirsher /*
77f7917c00SJeff Kirsher  * Period of the Tx buffer reclaim timer.  This timer does not need to run
78f7917c00SJeff Kirsher  * frequently as Tx buffers are usually reclaimed by new Tx packets.
79f7917c00SJeff Kirsher  */
80f7917c00SJeff Kirsher #define TX_RECLAIM_PERIOD (HZ / 4)
81f7917c00SJeff Kirsher #define TX_RECLAIM_TIMER_CHUNK 64U
82f7917c00SJeff Kirsher #define TX_RECLAIM_CHUNK 16U
83f7917c00SJeff Kirsher 
84f7917c00SJeff Kirsher /* WR size in bytes */
85f7917c00SJeff Kirsher #define WR_LEN (WR_FLITS * 8)
86f7917c00SJeff Kirsher 
87f7917c00SJeff Kirsher /*
88f7917c00SJeff Kirsher  * Types of Tx queues in each queue set.  Order here matters, do not change.
89f7917c00SJeff Kirsher  */
90f7917c00SJeff Kirsher enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
91f7917c00SJeff Kirsher 
92f7917c00SJeff Kirsher /* Values for sge_txq.flags */
93f7917c00SJeff Kirsher enum {
94f7917c00SJeff Kirsher 	TXQ_RUNNING = 1 << 0,	/* fetch engine is running */
95f7917c00SJeff Kirsher 	TXQ_LAST_PKT_DB = 1 << 1,	/* last packet rang the doorbell */
96f7917c00SJeff Kirsher };
97f7917c00SJeff Kirsher 
98f7917c00SJeff Kirsher struct tx_desc {
99f7917c00SJeff Kirsher 	__be64 flit[TX_DESC_FLITS];
100f7917c00SJeff Kirsher };
101f7917c00SJeff Kirsher 
102f7917c00SJeff Kirsher struct rx_desc {
103f7917c00SJeff Kirsher 	__be32 addr_lo;
104f7917c00SJeff Kirsher 	__be32 len_gen;
105f7917c00SJeff Kirsher 	__be32 gen2;
106f7917c00SJeff Kirsher 	__be32 addr_hi;
107f7917c00SJeff Kirsher };
108f7917c00SJeff Kirsher 
109f7917c00SJeff Kirsher struct tx_sw_desc {		/* SW state per Tx descriptor */
110f7917c00SJeff Kirsher 	struct sk_buff *skb;
111f7917c00SJeff Kirsher 	u8 eop;       /* set if last descriptor for packet */
112f7917c00SJeff Kirsher 	u8 addr_idx;  /* buffer index of first SGL entry in descriptor */
113f7917c00SJeff Kirsher 	u8 fragidx;   /* first page fragment associated with descriptor */
114f7917c00SJeff Kirsher 	s8 sflit;     /* start flit of first SGL entry in descriptor */
115f7917c00SJeff Kirsher };
116f7917c00SJeff Kirsher 
117f7917c00SJeff Kirsher struct rx_sw_desc {                /* SW state per Rx descriptor */
118f7917c00SJeff Kirsher 	union {
119f7917c00SJeff Kirsher 		struct sk_buff *skb;
120f7917c00SJeff Kirsher 		struct fl_pg_chunk pg_chunk;
121f7917c00SJeff Kirsher 	};
122f7917c00SJeff Kirsher 	DEFINE_DMA_UNMAP_ADDR(dma_addr);
123f7917c00SJeff Kirsher };
124f7917c00SJeff Kirsher 
125f7917c00SJeff Kirsher struct rsp_desc {		/* response queue descriptor */
126f7917c00SJeff Kirsher 	struct rss_header rss_hdr;
127f7917c00SJeff Kirsher 	__be32 flags;
128f7917c00SJeff Kirsher 	__be32 len_cq;
129f7917c00SJeff Kirsher 	u8 imm_data[47];
130f7917c00SJeff Kirsher 	u8 intr_gen;
131f7917c00SJeff Kirsher };
132f7917c00SJeff Kirsher 
133f7917c00SJeff Kirsher /*
134f7917c00SJeff Kirsher  * Holds unmapping information for Tx packets that need deferred unmapping.
135f7917c00SJeff Kirsher  * This structure lives at skb->head and must be allocated by callers.
136f7917c00SJeff Kirsher  */
137f7917c00SJeff Kirsher struct deferred_unmap_info {
138f7917c00SJeff Kirsher 	struct pci_dev *pdev;
139f7917c00SJeff Kirsher 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
140f7917c00SJeff Kirsher };
141f7917c00SJeff Kirsher 
142f7917c00SJeff Kirsher /*
143f7917c00SJeff Kirsher  * Maps a number of flits to the number of Tx descriptors that can hold them.
144f7917c00SJeff Kirsher  * The formula is
145f7917c00SJeff Kirsher  *
146f7917c00SJeff Kirsher  * desc = 1 + (flits - 2) / (WR_FLITS - 1).
147f7917c00SJeff Kirsher  *
148f7917c00SJeff Kirsher  * HW allows up to 4 descriptors to be combined into a WR.
149f7917c00SJeff Kirsher  */
150f7917c00SJeff Kirsher static u8 flit_desc_map[] = {
151f7917c00SJeff Kirsher 	0,
152f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 1
153f7917c00SJeff Kirsher 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
154f7917c00SJeff Kirsher 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
155f7917c00SJeff Kirsher 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
156f7917c00SJeff Kirsher 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
157f7917c00SJeff Kirsher #elif SGE_NUM_GENBITS == 2
158f7917c00SJeff Kirsher 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
159f7917c00SJeff Kirsher 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
160f7917c00SJeff Kirsher 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
161f7917c00SJeff Kirsher 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
162f7917c00SJeff Kirsher #else
163f7917c00SJeff Kirsher # error "SGE_NUM_GENBITS must be 1 or 2"
164f7917c00SJeff Kirsher #endif
165f7917c00SJeff Kirsher };
166f7917c00SJeff Kirsher 
167f7917c00SJeff Kirsher static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
168f7917c00SJeff Kirsher {
169f7917c00SJeff Kirsher 	return container_of(q, struct sge_qset, fl[qidx]);
170f7917c00SJeff Kirsher }
171f7917c00SJeff Kirsher 
172f7917c00SJeff Kirsher static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
173f7917c00SJeff Kirsher {
174f7917c00SJeff Kirsher 	return container_of(q, struct sge_qset, rspq);
175f7917c00SJeff Kirsher }
176f7917c00SJeff Kirsher 
177f7917c00SJeff Kirsher static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
178f7917c00SJeff Kirsher {
179f7917c00SJeff Kirsher 	return container_of(q, struct sge_qset, txq[qidx]);
180f7917c00SJeff Kirsher }
181f7917c00SJeff Kirsher 
182f7917c00SJeff Kirsher /**
183f7917c00SJeff Kirsher  *	refill_rspq - replenish an SGE response queue
184f7917c00SJeff Kirsher  *	@adapter: the adapter
185f7917c00SJeff Kirsher  *	@q: the response queue to replenish
186f7917c00SJeff Kirsher  *	@credits: how many new responses to make available
187f7917c00SJeff Kirsher  *
188f7917c00SJeff Kirsher  *	Replenishes a response queue by making the supplied number of responses
189f7917c00SJeff Kirsher  *	available to HW.
190f7917c00SJeff Kirsher  */
191f7917c00SJeff Kirsher static inline void refill_rspq(struct adapter *adapter,
192f7917c00SJeff Kirsher 			       const struct sge_rspq *q, unsigned int credits)
193f7917c00SJeff Kirsher {
194f7917c00SJeff Kirsher 	rmb();
195f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
196f7917c00SJeff Kirsher 		     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
197f7917c00SJeff Kirsher }
198f7917c00SJeff Kirsher 
199f7917c00SJeff Kirsher /**
200f7917c00SJeff Kirsher  *	need_skb_unmap - does the platform need unmapping of sk_buffs?
201f7917c00SJeff Kirsher  *
202f7917c00SJeff Kirsher  *	Returns true if the platform needs sk_buff unmapping.  The compiler
203f7917c00SJeff Kirsher  *	optimizes away unnecessary code if this returns true.
204f7917c00SJeff Kirsher  */
205f7917c00SJeff Kirsher static inline int need_skb_unmap(void)
206f7917c00SJeff Kirsher {
207f7917c00SJeff Kirsher #ifdef CONFIG_NEED_DMA_MAP_STATE
208f7917c00SJeff Kirsher 	return 1;
209f7917c00SJeff Kirsher #else
210f7917c00SJeff Kirsher 	return 0;
211f7917c00SJeff Kirsher #endif
212f7917c00SJeff Kirsher }
213f7917c00SJeff Kirsher 
214f7917c00SJeff Kirsher /**
215f7917c00SJeff Kirsher  *	unmap_skb - unmap a packet main body and its page fragments
216f7917c00SJeff Kirsher  *	@skb: the packet
217f7917c00SJeff Kirsher  *	@q: the Tx queue containing Tx descriptors for the packet
218f7917c00SJeff Kirsher  *	@cidx: index of Tx descriptor
219f7917c00SJeff Kirsher  *	@pdev: the PCI device
220f7917c00SJeff Kirsher  *
221f7917c00SJeff Kirsher  *	Unmap the main body of an sk_buff and its page fragments, if any.
222f7917c00SJeff Kirsher  *	Because of the fairly complicated structure of our SGLs and the desire
223f7917c00SJeff Kirsher  *	to conserve space for metadata, the information necessary to unmap an
224f7917c00SJeff Kirsher  *	sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
225f7917c00SJeff Kirsher  *	descriptors (the physical addresses of the various data buffers), and
226f7917c00SJeff Kirsher  *	the SW descriptor state (assorted indices).  The send functions
227f7917c00SJeff Kirsher  *	initialize the indices for the first packet descriptor so we can unmap
228f7917c00SJeff Kirsher  *	the buffers held in the first Tx descriptor here, and we have enough
229f7917c00SJeff Kirsher  *	information at this point to set the state for the next Tx descriptor.
230f7917c00SJeff Kirsher  *
231f7917c00SJeff Kirsher  *	Note that it is possible to clean up the first descriptor of a packet
232f7917c00SJeff Kirsher  *	before the send routines have written the next descriptors, but this
233f7917c00SJeff Kirsher  *	race does not cause any problem.  We just end up writing the unmapping
234f7917c00SJeff Kirsher  *	info for the descriptor first.
235f7917c00SJeff Kirsher  */
236f7917c00SJeff Kirsher static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
237f7917c00SJeff Kirsher 			     unsigned int cidx, struct pci_dev *pdev)
238f7917c00SJeff Kirsher {
239f7917c00SJeff Kirsher 	const struct sg_ent *sgp;
240f7917c00SJeff Kirsher 	struct tx_sw_desc *d = &q->sdesc[cidx];
241f7917c00SJeff Kirsher 	int nfrags, frag_idx, curflit, j = d->addr_idx;
242f7917c00SJeff Kirsher 
243f7917c00SJeff Kirsher 	sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
244f7917c00SJeff Kirsher 	frag_idx = d->fragidx;
245f7917c00SJeff Kirsher 
246f7917c00SJeff Kirsher 	if (frag_idx == 0 && skb_headlen(skb)) {
247f7917c00SJeff Kirsher 		pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
248f7917c00SJeff Kirsher 				 skb_headlen(skb), PCI_DMA_TODEVICE);
249f7917c00SJeff Kirsher 		j = 1;
250f7917c00SJeff Kirsher 	}
251f7917c00SJeff Kirsher 
252f7917c00SJeff Kirsher 	curflit = d->sflit + 1 + j;
253f7917c00SJeff Kirsher 	nfrags = skb_shinfo(skb)->nr_frags;
254f7917c00SJeff Kirsher 
255f7917c00SJeff Kirsher 	while (frag_idx < nfrags && curflit < WR_FLITS) {
256f7917c00SJeff Kirsher 		pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
2579e903e08SEric Dumazet 			       skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
258f7917c00SJeff Kirsher 			       PCI_DMA_TODEVICE);
259f7917c00SJeff Kirsher 		j ^= 1;
260f7917c00SJeff Kirsher 		if (j == 0) {
261f7917c00SJeff Kirsher 			sgp++;
262f7917c00SJeff Kirsher 			curflit++;
263f7917c00SJeff Kirsher 		}
264f7917c00SJeff Kirsher 		curflit++;
265f7917c00SJeff Kirsher 		frag_idx++;
266f7917c00SJeff Kirsher 	}
267f7917c00SJeff Kirsher 
268f7917c00SJeff Kirsher 	if (frag_idx < nfrags) {   /* SGL continues into next Tx descriptor */
269f7917c00SJeff Kirsher 		d = cidx + 1 == q->size ? q->sdesc : d + 1;
270f7917c00SJeff Kirsher 		d->fragidx = frag_idx;
271f7917c00SJeff Kirsher 		d->addr_idx = j;
272f7917c00SJeff Kirsher 		d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
273f7917c00SJeff Kirsher 	}
274f7917c00SJeff Kirsher }
275f7917c00SJeff Kirsher 
276f7917c00SJeff Kirsher /**
277f7917c00SJeff Kirsher  *	free_tx_desc - reclaims Tx descriptors and their buffers
278f7917c00SJeff Kirsher  *	@adapter: the adapter
279f7917c00SJeff Kirsher  *	@q: the Tx queue to reclaim descriptors from
280f7917c00SJeff Kirsher  *	@n: the number of descriptors to reclaim
281f7917c00SJeff Kirsher  *
282f7917c00SJeff Kirsher  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
283f7917c00SJeff Kirsher  *	Tx buffers.  Called with the Tx queue lock held.
284f7917c00SJeff Kirsher  */
285f7917c00SJeff Kirsher static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
286f7917c00SJeff Kirsher 			 unsigned int n)
287f7917c00SJeff Kirsher {
288f7917c00SJeff Kirsher 	struct tx_sw_desc *d;
289f7917c00SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
290f7917c00SJeff Kirsher 	unsigned int cidx = q->cidx;
291f7917c00SJeff Kirsher 
292f7917c00SJeff Kirsher 	const int need_unmap = need_skb_unmap() &&
293f7917c00SJeff Kirsher 			       q->cntxt_id >= FW_TUNNEL_SGEEC_START;
294f7917c00SJeff Kirsher 
295f7917c00SJeff Kirsher 	d = &q->sdesc[cidx];
296f7917c00SJeff Kirsher 	while (n--) {
297f7917c00SJeff Kirsher 		if (d->skb) {	/* an SGL is present */
298f7917c00SJeff Kirsher 			if (need_unmap)
299f7917c00SJeff Kirsher 				unmap_skb(d->skb, q, cidx, pdev);
300f7917c00SJeff Kirsher 			if (d->eop) {
301f7917c00SJeff Kirsher 				kfree_skb(d->skb);
302f7917c00SJeff Kirsher 				d->skb = NULL;
303f7917c00SJeff Kirsher 			}
304f7917c00SJeff Kirsher 		}
305f7917c00SJeff Kirsher 		++d;
306f7917c00SJeff Kirsher 		if (++cidx == q->size) {
307f7917c00SJeff Kirsher 			cidx = 0;
308f7917c00SJeff Kirsher 			d = q->sdesc;
309f7917c00SJeff Kirsher 		}
310f7917c00SJeff Kirsher 	}
311f7917c00SJeff Kirsher 	q->cidx = cidx;
312f7917c00SJeff Kirsher }
313f7917c00SJeff Kirsher 
314f7917c00SJeff Kirsher /**
315f7917c00SJeff Kirsher  *	reclaim_completed_tx - reclaims completed Tx descriptors
316f7917c00SJeff Kirsher  *	@adapter: the adapter
317f7917c00SJeff Kirsher  *	@q: the Tx queue to reclaim completed descriptors from
318f7917c00SJeff Kirsher  *	@chunk: maximum number of descriptors to reclaim
319f7917c00SJeff Kirsher  *
320f7917c00SJeff Kirsher  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
321f7917c00SJeff Kirsher  *	and frees the associated buffers if possible.  Called with the Tx
322f7917c00SJeff Kirsher  *	queue's lock held.
323f7917c00SJeff Kirsher  */
324f7917c00SJeff Kirsher static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
325f7917c00SJeff Kirsher 						struct sge_txq *q,
326f7917c00SJeff Kirsher 						unsigned int chunk)
327f7917c00SJeff Kirsher {
328f7917c00SJeff Kirsher 	unsigned int reclaim = q->processed - q->cleaned;
329f7917c00SJeff Kirsher 
330f7917c00SJeff Kirsher 	reclaim = min(chunk, reclaim);
331f7917c00SJeff Kirsher 	if (reclaim) {
332f7917c00SJeff Kirsher 		free_tx_desc(adapter, q, reclaim);
333f7917c00SJeff Kirsher 		q->cleaned += reclaim;
334f7917c00SJeff Kirsher 		q->in_use -= reclaim;
335f7917c00SJeff Kirsher 	}
336f7917c00SJeff Kirsher 	return q->processed - q->cleaned;
337f7917c00SJeff Kirsher }
338f7917c00SJeff Kirsher 
339f7917c00SJeff Kirsher /**
340f7917c00SJeff Kirsher  *	should_restart_tx - are there enough resources to restart a Tx queue?
341f7917c00SJeff Kirsher  *	@q: the Tx queue
342f7917c00SJeff Kirsher  *
343f7917c00SJeff Kirsher  *	Checks if there are enough descriptors to restart a suspended Tx queue.
344f7917c00SJeff Kirsher  */
345f7917c00SJeff Kirsher static inline int should_restart_tx(const struct sge_txq *q)
346f7917c00SJeff Kirsher {
347f7917c00SJeff Kirsher 	unsigned int r = q->processed - q->cleaned;
348f7917c00SJeff Kirsher 
349f7917c00SJeff Kirsher 	return q->in_use - r < (q->size >> 1);
350f7917c00SJeff Kirsher }
351f7917c00SJeff Kirsher 
352f7917c00SJeff Kirsher static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
353f7917c00SJeff Kirsher 			  struct rx_sw_desc *d)
354f7917c00SJeff Kirsher {
355f7917c00SJeff Kirsher 	if (q->use_pages && d->pg_chunk.page) {
356f7917c00SJeff Kirsher 		(*d->pg_chunk.p_cnt)--;
357f7917c00SJeff Kirsher 		if (!*d->pg_chunk.p_cnt)
358f7917c00SJeff Kirsher 			pci_unmap_page(pdev,
359f7917c00SJeff Kirsher 				       d->pg_chunk.mapping,
360f7917c00SJeff Kirsher 				       q->alloc_size, PCI_DMA_FROMDEVICE);
361f7917c00SJeff Kirsher 
362f7917c00SJeff Kirsher 		put_page(d->pg_chunk.page);
363f7917c00SJeff Kirsher 		d->pg_chunk.page = NULL;
364f7917c00SJeff Kirsher 	} else {
365f7917c00SJeff Kirsher 		pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
366f7917c00SJeff Kirsher 				 q->buf_size, PCI_DMA_FROMDEVICE);
367f7917c00SJeff Kirsher 		kfree_skb(d->skb);
368f7917c00SJeff Kirsher 		d->skb = NULL;
369f7917c00SJeff Kirsher 	}
370f7917c00SJeff Kirsher }
371f7917c00SJeff Kirsher 
372f7917c00SJeff Kirsher /**
373f7917c00SJeff Kirsher  *	free_rx_bufs - free the Rx buffers on an SGE free list
374f7917c00SJeff Kirsher  *	@pdev: the PCI device associated with the adapter
375f7917c00SJeff Kirsher  *	@rxq: the SGE free list to clean up
376f7917c00SJeff Kirsher  *
377f7917c00SJeff Kirsher  *	Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
378f7917c00SJeff Kirsher  *	this queue should be stopped before calling this function.
379f7917c00SJeff Kirsher  */
380f7917c00SJeff Kirsher static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
381f7917c00SJeff Kirsher {
382f7917c00SJeff Kirsher 	unsigned int cidx = q->cidx;
383f7917c00SJeff Kirsher 
384f7917c00SJeff Kirsher 	while (q->credits--) {
385f7917c00SJeff Kirsher 		struct rx_sw_desc *d = &q->sdesc[cidx];
386f7917c00SJeff Kirsher 
387f7917c00SJeff Kirsher 
388f7917c00SJeff Kirsher 		clear_rx_desc(pdev, q, d);
389f7917c00SJeff Kirsher 		if (++cidx == q->size)
390f7917c00SJeff Kirsher 			cidx = 0;
391f7917c00SJeff Kirsher 	}
392f7917c00SJeff Kirsher 
393f7917c00SJeff Kirsher 	if (q->pg_chunk.page) {
394f7917c00SJeff Kirsher 		__free_pages(q->pg_chunk.page, q->order);
395f7917c00SJeff Kirsher 		q->pg_chunk.page = NULL;
396f7917c00SJeff Kirsher 	}
397f7917c00SJeff Kirsher }
398f7917c00SJeff Kirsher 
399f7917c00SJeff Kirsher /**
400f7917c00SJeff Kirsher  *	add_one_rx_buf - add a packet buffer to a free-buffer list
401f7917c00SJeff Kirsher  *	@va:  buffer start VA
402f7917c00SJeff Kirsher  *	@len: the buffer length
403f7917c00SJeff Kirsher  *	@d: the HW Rx descriptor to write
404f7917c00SJeff Kirsher  *	@sd: the SW Rx descriptor to write
405f7917c00SJeff Kirsher  *	@gen: the generation bit value
406f7917c00SJeff Kirsher  *	@pdev: the PCI device associated with the adapter
407f7917c00SJeff Kirsher  *
408f7917c00SJeff Kirsher  *	Add a buffer of the given length to the supplied HW and SW Rx
409f7917c00SJeff Kirsher  *	descriptors.
410f7917c00SJeff Kirsher  */
411f7917c00SJeff Kirsher static inline int add_one_rx_buf(void *va, unsigned int len,
412f7917c00SJeff Kirsher 				 struct rx_desc *d, struct rx_sw_desc *sd,
413f7917c00SJeff Kirsher 				 unsigned int gen, struct pci_dev *pdev)
414f7917c00SJeff Kirsher {
415f7917c00SJeff Kirsher 	dma_addr_t mapping;
416f7917c00SJeff Kirsher 
417f7917c00SJeff Kirsher 	mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
418f7917c00SJeff Kirsher 	if (unlikely(pci_dma_mapping_error(pdev, mapping)))
419f7917c00SJeff Kirsher 		return -ENOMEM;
420f7917c00SJeff Kirsher 
421f7917c00SJeff Kirsher 	dma_unmap_addr_set(sd, dma_addr, mapping);
422f7917c00SJeff Kirsher 
423f7917c00SJeff Kirsher 	d->addr_lo = cpu_to_be32(mapping);
424f7917c00SJeff Kirsher 	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
425f7917c00SJeff Kirsher 	wmb();
426f7917c00SJeff Kirsher 	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
427f7917c00SJeff Kirsher 	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
428f7917c00SJeff Kirsher 	return 0;
429f7917c00SJeff Kirsher }
430f7917c00SJeff Kirsher 
431f7917c00SJeff Kirsher static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
432f7917c00SJeff Kirsher 				   unsigned int gen)
433f7917c00SJeff Kirsher {
434f7917c00SJeff Kirsher 	d->addr_lo = cpu_to_be32(mapping);
435f7917c00SJeff Kirsher 	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
436f7917c00SJeff Kirsher 	wmb();
437f7917c00SJeff Kirsher 	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
438f7917c00SJeff Kirsher 	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
439f7917c00SJeff Kirsher 	return 0;
440f7917c00SJeff Kirsher }
441f7917c00SJeff Kirsher 
442f7917c00SJeff Kirsher static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
443f7917c00SJeff Kirsher 			  struct rx_sw_desc *sd, gfp_t gfp,
444f7917c00SJeff Kirsher 			  unsigned int order)
445f7917c00SJeff Kirsher {
446f7917c00SJeff Kirsher 	if (!q->pg_chunk.page) {
447f7917c00SJeff Kirsher 		dma_addr_t mapping;
448f7917c00SJeff Kirsher 
449f7917c00SJeff Kirsher 		q->pg_chunk.page = alloc_pages(gfp, order);
450f7917c00SJeff Kirsher 		if (unlikely(!q->pg_chunk.page))
451f7917c00SJeff Kirsher 			return -ENOMEM;
452f7917c00SJeff Kirsher 		q->pg_chunk.va = page_address(q->pg_chunk.page);
453f7917c00SJeff Kirsher 		q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
454f7917c00SJeff Kirsher 				    SGE_PG_RSVD;
455f7917c00SJeff Kirsher 		q->pg_chunk.offset = 0;
456f7917c00SJeff Kirsher 		mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
457f7917c00SJeff Kirsher 				       0, q->alloc_size, PCI_DMA_FROMDEVICE);
458f83331baSSantosh Rastapur 		if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
459f83331baSSantosh Rastapur 			__free_pages(q->pg_chunk.page, order);
460f83331baSSantosh Rastapur 			q->pg_chunk.page = NULL;
461f83331baSSantosh Rastapur 			return -EIO;
462f83331baSSantosh Rastapur 		}
463f7917c00SJeff Kirsher 		q->pg_chunk.mapping = mapping;
464f7917c00SJeff Kirsher 	}
465f7917c00SJeff Kirsher 	sd->pg_chunk = q->pg_chunk;
466f7917c00SJeff Kirsher 
467f7917c00SJeff Kirsher 	prefetch(sd->pg_chunk.p_cnt);
468f7917c00SJeff Kirsher 
469f7917c00SJeff Kirsher 	q->pg_chunk.offset += q->buf_size;
470f7917c00SJeff Kirsher 	if (q->pg_chunk.offset == (PAGE_SIZE << order))
471f7917c00SJeff Kirsher 		q->pg_chunk.page = NULL;
472f7917c00SJeff Kirsher 	else {
473f7917c00SJeff Kirsher 		q->pg_chunk.va += q->buf_size;
474f7917c00SJeff Kirsher 		get_page(q->pg_chunk.page);
475f7917c00SJeff Kirsher 	}
476f7917c00SJeff Kirsher 
477f7917c00SJeff Kirsher 	if (sd->pg_chunk.offset == 0)
478f7917c00SJeff Kirsher 		*sd->pg_chunk.p_cnt = 1;
479f7917c00SJeff Kirsher 	else
480f7917c00SJeff Kirsher 		*sd->pg_chunk.p_cnt += 1;
481f7917c00SJeff Kirsher 
482f7917c00SJeff Kirsher 	return 0;
483f7917c00SJeff Kirsher }
484f7917c00SJeff Kirsher 
485f7917c00SJeff Kirsher static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
486f7917c00SJeff Kirsher {
487f7917c00SJeff Kirsher 	if (q->pend_cred >= q->credits / 4) {
488f7917c00SJeff Kirsher 		q->pend_cred = 0;
489f7917c00SJeff Kirsher 		wmb();
490f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
491f7917c00SJeff Kirsher 	}
492f7917c00SJeff Kirsher }
493f7917c00SJeff Kirsher 
494f7917c00SJeff Kirsher /**
495f7917c00SJeff Kirsher  *	refill_fl - refill an SGE free-buffer list
496f7917c00SJeff Kirsher  *	@adapter: the adapter
497f7917c00SJeff Kirsher  *	@q: the free-list to refill
498f7917c00SJeff Kirsher  *	@n: the number of new buffers to allocate
499f7917c00SJeff Kirsher  *	@gfp: the gfp flags for allocating new buffers
500f7917c00SJeff Kirsher  *
501f7917c00SJeff Kirsher  *	(Re)populate an SGE free-buffer list with up to @n new packet buffers,
502f7917c00SJeff Kirsher  *	allocated with the supplied gfp flags.  The caller must assure that
503f7917c00SJeff Kirsher  *	@n does not exceed the queue's capacity.
504f7917c00SJeff Kirsher  */
505f7917c00SJeff Kirsher static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
506f7917c00SJeff Kirsher {
507f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
508f7917c00SJeff Kirsher 	struct rx_desc *d = &q->desc[q->pidx];
509f7917c00SJeff Kirsher 	unsigned int count = 0;
510f7917c00SJeff Kirsher 
511f7917c00SJeff Kirsher 	while (n--) {
512f7917c00SJeff Kirsher 		dma_addr_t mapping;
513f7917c00SJeff Kirsher 		int err;
514f7917c00SJeff Kirsher 
515f7917c00SJeff Kirsher 		if (q->use_pages) {
516f7917c00SJeff Kirsher 			if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
517f7917c00SJeff Kirsher 						    q->order))) {
518f7917c00SJeff Kirsher nomem:				q->alloc_failed++;
519f7917c00SJeff Kirsher 				break;
520f7917c00SJeff Kirsher 			}
521f7917c00SJeff Kirsher 			mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
522f7917c00SJeff Kirsher 			dma_unmap_addr_set(sd, dma_addr, mapping);
523f7917c00SJeff Kirsher 
524f7917c00SJeff Kirsher 			add_one_rx_chunk(mapping, d, q->gen);
525f7917c00SJeff Kirsher 			pci_dma_sync_single_for_device(adap->pdev, mapping,
526f7917c00SJeff Kirsher 						q->buf_size - SGE_PG_RSVD,
527f7917c00SJeff Kirsher 						PCI_DMA_FROMDEVICE);
528f7917c00SJeff Kirsher 		} else {
529f7917c00SJeff Kirsher 			void *buf_start;
530f7917c00SJeff Kirsher 
531f7917c00SJeff Kirsher 			struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
532f7917c00SJeff Kirsher 			if (!skb)
533f7917c00SJeff Kirsher 				goto nomem;
534f7917c00SJeff Kirsher 
535f7917c00SJeff Kirsher 			sd->skb = skb;
536f7917c00SJeff Kirsher 			buf_start = skb->data;
537f7917c00SJeff Kirsher 			err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
538f7917c00SJeff Kirsher 					     q->gen, adap->pdev);
539f7917c00SJeff Kirsher 			if (unlikely(err)) {
540f7917c00SJeff Kirsher 				clear_rx_desc(adap->pdev, q, sd);
541f7917c00SJeff Kirsher 				break;
542f7917c00SJeff Kirsher 			}
543f7917c00SJeff Kirsher 		}
544f7917c00SJeff Kirsher 
545f7917c00SJeff Kirsher 		d++;
546f7917c00SJeff Kirsher 		sd++;
547f7917c00SJeff Kirsher 		if (++q->pidx == q->size) {
548f7917c00SJeff Kirsher 			q->pidx = 0;
549f7917c00SJeff Kirsher 			q->gen ^= 1;
550f7917c00SJeff Kirsher 			sd = q->sdesc;
551f7917c00SJeff Kirsher 			d = q->desc;
552f7917c00SJeff Kirsher 		}
553f7917c00SJeff Kirsher 		count++;
554f7917c00SJeff Kirsher 	}
555f7917c00SJeff Kirsher 
556f7917c00SJeff Kirsher 	q->credits += count;
557f7917c00SJeff Kirsher 	q->pend_cred += count;
558f7917c00SJeff Kirsher 	ring_fl_db(adap, q);
559f7917c00SJeff Kirsher 
560f7917c00SJeff Kirsher 	return count;
561f7917c00SJeff Kirsher }
562f7917c00SJeff Kirsher 
563f7917c00SJeff Kirsher static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
564f7917c00SJeff Kirsher {
565f7917c00SJeff Kirsher 	refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
566f7917c00SJeff Kirsher 		  GFP_ATOMIC | __GFP_COMP);
567f7917c00SJeff Kirsher }
568f7917c00SJeff Kirsher 
569f7917c00SJeff Kirsher /**
570f7917c00SJeff Kirsher  *	recycle_rx_buf - recycle a receive buffer
571f7917c00SJeff Kirsher  *	@adapter: the adapter
572f7917c00SJeff Kirsher  *	@q: the SGE free list
573f7917c00SJeff Kirsher  *	@idx: index of buffer to recycle
574f7917c00SJeff Kirsher  *
575f7917c00SJeff Kirsher  *	Recycles the specified buffer on the given free list by adding it at
576f7917c00SJeff Kirsher  *	the next available slot on the list.
577f7917c00SJeff Kirsher  */
578f7917c00SJeff Kirsher static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
579f7917c00SJeff Kirsher 			   unsigned int idx)
580f7917c00SJeff Kirsher {
581f7917c00SJeff Kirsher 	struct rx_desc *from = &q->desc[idx];
582f7917c00SJeff Kirsher 	struct rx_desc *to = &q->desc[q->pidx];
583f7917c00SJeff Kirsher 
584f7917c00SJeff Kirsher 	q->sdesc[q->pidx] = q->sdesc[idx];
585f7917c00SJeff Kirsher 	to->addr_lo = from->addr_lo;	/* already big endian */
586f7917c00SJeff Kirsher 	to->addr_hi = from->addr_hi;	/* likewise */
587f7917c00SJeff Kirsher 	wmb();
588f7917c00SJeff Kirsher 	to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
589f7917c00SJeff Kirsher 	to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
590f7917c00SJeff Kirsher 
591f7917c00SJeff Kirsher 	if (++q->pidx == q->size) {
592f7917c00SJeff Kirsher 		q->pidx = 0;
593f7917c00SJeff Kirsher 		q->gen ^= 1;
594f7917c00SJeff Kirsher 	}
595f7917c00SJeff Kirsher 
596f7917c00SJeff Kirsher 	q->credits++;
597f7917c00SJeff Kirsher 	q->pend_cred++;
598f7917c00SJeff Kirsher 	ring_fl_db(adap, q);
599f7917c00SJeff Kirsher }
600f7917c00SJeff Kirsher 
601f7917c00SJeff Kirsher /**
602f7917c00SJeff Kirsher  *	alloc_ring - allocate resources for an SGE descriptor ring
603f7917c00SJeff Kirsher  *	@pdev: the PCI device
604f7917c00SJeff Kirsher  *	@nelem: the number of descriptors
605f7917c00SJeff Kirsher  *	@elem_size: the size of each descriptor
606f7917c00SJeff Kirsher  *	@sw_size: the size of the SW state associated with each ring element
607f7917c00SJeff Kirsher  *	@phys: the physical address of the allocated ring
608f7917c00SJeff Kirsher  *	@metadata: address of the array holding the SW state for the ring
609f7917c00SJeff Kirsher  *
610f7917c00SJeff Kirsher  *	Allocates resources for an SGE descriptor ring, such as Tx queues,
611f7917c00SJeff Kirsher  *	free buffer lists, or response queues.  Each SGE ring requires
612f7917c00SJeff Kirsher  *	space for its HW descriptors plus, optionally, space for the SW state
613f7917c00SJeff Kirsher  *	associated with each HW entry (the metadata).  The function returns
614f7917c00SJeff Kirsher  *	three values: the virtual address for the HW ring (the return value
615f7917c00SJeff Kirsher  *	of the function), the physical address of the HW ring, and the address
616f7917c00SJeff Kirsher  *	of the SW ring.
617f7917c00SJeff Kirsher  */
618f7917c00SJeff Kirsher static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
619f7917c00SJeff Kirsher 			size_t sw_size, dma_addr_t * phys, void *metadata)
620f7917c00SJeff Kirsher {
621f7917c00SJeff Kirsher 	size_t len = nelem * elem_size;
622f7917c00SJeff Kirsher 	void *s = NULL;
623f7917c00SJeff Kirsher 	void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
624f7917c00SJeff Kirsher 
625f7917c00SJeff Kirsher 	if (!p)
626f7917c00SJeff Kirsher 		return NULL;
627f7917c00SJeff Kirsher 	if (sw_size && metadata) {
628f7917c00SJeff Kirsher 		s = kcalloc(nelem, sw_size, GFP_KERNEL);
629f7917c00SJeff Kirsher 
630f7917c00SJeff Kirsher 		if (!s) {
631f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev, len, p, *phys);
632f7917c00SJeff Kirsher 			return NULL;
633f7917c00SJeff Kirsher 		}
634f7917c00SJeff Kirsher 		*(void **)metadata = s;
635f7917c00SJeff Kirsher 	}
636f7917c00SJeff Kirsher 	memset(p, 0, len);
637f7917c00SJeff Kirsher 	return p;
638f7917c00SJeff Kirsher }
639f7917c00SJeff Kirsher 
640f7917c00SJeff Kirsher /**
641f7917c00SJeff Kirsher  *	t3_reset_qset - reset a sge qset
642f7917c00SJeff Kirsher  *	@q: the queue set
643f7917c00SJeff Kirsher  *
644f7917c00SJeff Kirsher  *	Reset the qset structure.
645f7917c00SJeff Kirsher  *	the NAPI structure is preserved in the event of
646f7917c00SJeff Kirsher  *	the qset's reincarnation, for example during EEH recovery.
647f7917c00SJeff Kirsher  */
648f7917c00SJeff Kirsher static void t3_reset_qset(struct sge_qset *q)
649f7917c00SJeff Kirsher {
650f7917c00SJeff Kirsher 	if (q->adap &&
651f7917c00SJeff Kirsher 	    !(q->adap->flags & NAPI_INIT)) {
652f7917c00SJeff Kirsher 		memset(q, 0, sizeof(*q));
653f7917c00SJeff Kirsher 		return;
654f7917c00SJeff Kirsher 	}
655f7917c00SJeff Kirsher 
656f7917c00SJeff Kirsher 	q->adap = NULL;
657f7917c00SJeff Kirsher 	memset(&q->rspq, 0, sizeof(q->rspq));
658f7917c00SJeff Kirsher 	memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
659f7917c00SJeff Kirsher 	memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
660f7917c00SJeff Kirsher 	q->txq_stopped = 0;
661f7917c00SJeff Kirsher 	q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
662f7917c00SJeff Kirsher 	q->rx_reclaim_timer.function = NULL;
663f7917c00SJeff Kirsher 	q->nomem = 0;
664f7917c00SJeff Kirsher 	napi_free_frags(&q->napi);
665f7917c00SJeff Kirsher }
666f7917c00SJeff Kirsher 
667f7917c00SJeff Kirsher 
668f7917c00SJeff Kirsher /**
669f7917c00SJeff Kirsher  *	free_qset - free the resources of an SGE queue set
670f7917c00SJeff Kirsher  *	@adapter: the adapter owning the queue set
671f7917c00SJeff Kirsher  *	@q: the queue set
672f7917c00SJeff Kirsher  *
673f7917c00SJeff Kirsher  *	Release the HW and SW resources associated with an SGE queue set, such
674f7917c00SJeff Kirsher  *	as HW contexts, packet buffers, and descriptor rings.  Traffic to the
675f7917c00SJeff Kirsher  *	queue set must be quiesced prior to calling this.
676f7917c00SJeff Kirsher  */
677f7917c00SJeff Kirsher static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
678f7917c00SJeff Kirsher {
679f7917c00SJeff Kirsher 	int i;
680f7917c00SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
681f7917c00SJeff Kirsher 
682f7917c00SJeff Kirsher 	for (i = 0; i < SGE_RXQ_PER_SET; ++i)
683f7917c00SJeff Kirsher 		if (q->fl[i].desc) {
684f7917c00SJeff Kirsher 			spin_lock_irq(&adapter->sge.reg_lock);
685f7917c00SJeff Kirsher 			t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
686f7917c00SJeff Kirsher 			spin_unlock_irq(&adapter->sge.reg_lock);
687f7917c00SJeff Kirsher 			free_rx_bufs(pdev, &q->fl[i]);
688f7917c00SJeff Kirsher 			kfree(q->fl[i].sdesc);
689f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev,
690f7917c00SJeff Kirsher 					  q->fl[i].size *
691f7917c00SJeff Kirsher 					  sizeof(struct rx_desc), q->fl[i].desc,
692f7917c00SJeff Kirsher 					  q->fl[i].phys_addr);
693f7917c00SJeff Kirsher 		}
694f7917c00SJeff Kirsher 
695f7917c00SJeff Kirsher 	for (i = 0; i < SGE_TXQ_PER_SET; ++i)
696f7917c00SJeff Kirsher 		if (q->txq[i].desc) {
697f7917c00SJeff Kirsher 			spin_lock_irq(&adapter->sge.reg_lock);
698f7917c00SJeff Kirsher 			t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
699f7917c00SJeff Kirsher 			spin_unlock_irq(&adapter->sge.reg_lock);
700f7917c00SJeff Kirsher 			if (q->txq[i].sdesc) {
701f7917c00SJeff Kirsher 				free_tx_desc(adapter, &q->txq[i],
702f7917c00SJeff Kirsher 					     q->txq[i].in_use);
703f7917c00SJeff Kirsher 				kfree(q->txq[i].sdesc);
704f7917c00SJeff Kirsher 			}
705f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev,
706f7917c00SJeff Kirsher 					  q->txq[i].size *
707f7917c00SJeff Kirsher 					  sizeof(struct tx_desc),
708f7917c00SJeff Kirsher 					  q->txq[i].desc, q->txq[i].phys_addr);
709f7917c00SJeff Kirsher 			__skb_queue_purge(&q->txq[i].sendq);
710f7917c00SJeff Kirsher 		}
711f7917c00SJeff Kirsher 
712f7917c00SJeff Kirsher 	if (q->rspq.desc) {
713f7917c00SJeff Kirsher 		spin_lock_irq(&adapter->sge.reg_lock);
714f7917c00SJeff Kirsher 		t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
715f7917c00SJeff Kirsher 		spin_unlock_irq(&adapter->sge.reg_lock);
716f7917c00SJeff Kirsher 		dma_free_coherent(&pdev->dev,
717f7917c00SJeff Kirsher 				  q->rspq.size * sizeof(struct rsp_desc),
718f7917c00SJeff Kirsher 				  q->rspq.desc, q->rspq.phys_addr);
719f7917c00SJeff Kirsher 	}
720f7917c00SJeff Kirsher 
721f7917c00SJeff Kirsher 	t3_reset_qset(q);
722f7917c00SJeff Kirsher }
723f7917c00SJeff Kirsher 
724f7917c00SJeff Kirsher /**
725f7917c00SJeff Kirsher  *	init_qset_cntxt - initialize an SGE queue set context info
726f7917c00SJeff Kirsher  *	@qs: the queue set
727f7917c00SJeff Kirsher  *	@id: the queue set id
728f7917c00SJeff Kirsher  *
729f7917c00SJeff Kirsher  *	Initializes the TIDs and context ids for the queues of a queue set.
730f7917c00SJeff Kirsher  */
731f7917c00SJeff Kirsher static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
732f7917c00SJeff Kirsher {
733f7917c00SJeff Kirsher 	qs->rspq.cntxt_id = id;
734f7917c00SJeff Kirsher 	qs->fl[0].cntxt_id = 2 * id;
735f7917c00SJeff Kirsher 	qs->fl[1].cntxt_id = 2 * id + 1;
736f7917c00SJeff Kirsher 	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
737f7917c00SJeff Kirsher 	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
738f7917c00SJeff Kirsher 	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
739f7917c00SJeff Kirsher 	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
740f7917c00SJeff Kirsher 	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
741f7917c00SJeff Kirsher }
742f7917c00SJeff Kirsher 
743f7917c00SJeff Kirsher /**
744f7917c00SJeff Kirsher  *	sgl_len - calculates the size of an SGL of the given capacity
745f7917c00SJeff Kirsher  *	@n: the number of SGL entries
746f7917c00SJeff Kirsher  *
747f7917c00SJeff Kirsher  *	Calculates the number of flits needed for a scatter/gather list that
748f7917c00SJeff Kirsher  *	can hold the given number of entries.
749f7917c00SJeff Kirsher  */
750f7917c00SJeff Kirsher static inline unsigned int sgl_len(unsigned int n)
751f7917c00SJeff Kirsher {
752f7917c00SJeff Kirsher 	/* alternatively: 3 * (n / 2) + 2 * (n & 1) */
753f7917c00SJeff Kirsher 	return (3 * n) / 2 + (n & 1);
754f7917c00SJeff Kirsher }
755f7917c00SJeff Kirsher 
756f7917c00SJeff Kirsher /**
757f7917c00SJeff Kirsher  *	flits_to_desc - returns the num of Tx descriptors for the given flits
758f7917c00SJeff Kirsher  *	@n: the number of flits
759f7917c00SJeff Kirsher  *
760f7917c00SJeff Kirsher  *	Calculates the number of Tx descriptors needed for the supplied number
761f7917c00SJeff Kirsher  *	of flits.
762f7917c00SJeff Kirsher  */
763f7917c00SJeff Kirsher static inline unsigned int flits_to_desc(unsigned int n)
764f7917c00SJeff Kirsher {
765f7917c00SJeff Kirsher 	BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
766f7917c00SJeff Kirsher 	return flit_desc_map[n];
767f7917c00SJeff Kirsher }
768f7917c00SJeff Kirsher 
769f7917c00SJeff Kirsher /**
770f7917c00SJeff Kirsher  *	get_packet - return the next ingress packet buffer from a free list
771f7917c00SJeff Kirsher  *	@adap: the adapter that received the packet
772f7917c00SJeff Kirsher  *	@fl: the SGE free list holding the packet
773f7917c00SJeff Kirsher  *	@len: the packet length including any SGE padding
774f7917c00SJeff Kirsher  *	@drop_thres: # of remaining buffers before we start dropping packets
775f7917c00SJeff Kirsher  *
776f7917c00SJeff Kirsher  *	Get the next packet from a free list and complete setup of the
777f7917c00SJeff Kirsher  *	sk_buff.  If the packet is small we make a copy and recycle the
778f7917c00SJeff Kirsher  *	original buffer, otherwise we use the original buffer itself.  If a
779f7917c00SJeff Kirsher  *	positive drop threshold is supplied packets are dropped and their
780f7917c00SJeff Kirsher  *	buffers recycled if (a) the number of remaining buffers is under the
781f7917c00SJeff Kirsher  *	threshold and the packet is too big to copy, or (b) the packet should
782f7917c00SJeff Kirsher  *	be copied but there is no memory for the copy.
783f7917c00SJeff Kirsher  */
784f7917c00SJeff Kirsher static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
785f7917c00SJeff Kirsher 				  unsigned int len, unsigned int drop_thres)
786f7917c00SJeff Kirsher {
787f7917c00SJeff Kirsher 	struct sk_buff *skb = NULL;
788f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
789f7917c00SJeff Kirsher 
790f7917c00SJeff Kirsher 	prefetch(sd->skb->data);
791f7917c00SJeff Kirsher 	fl->credits--;
792f7917c00SJeff Kirsher 
793f7917c00SJeff Kirsher 	if (len <= SGE_RX_COPY_THRES) {
794f7917c00SJeff Kirsher 		skb = alloc_skb(len, GFP_ATOMIC);
795f7917c00SJeff Kirsher 		if (likely(skb != NULL)) {
796f7917c00SJeff Kirsher 			__skb_put(skb, len);
797f7917c00SJeff Kirsher 			pci_dma_sync_single_for_cpu(adap->pdev,
798f7917c00SJeff Kirsher 					    dma_unmap_addr(sd, dma_addr), len,
799f7917c00SJeff Kirsher 					    PCI_DMA_FROMDEVICE);
800f7917c00SJeff Kirsher 			memcpy(skb->data, sd->skb->data, len);
801f7917c00SJeff Kirsher 			pci_dma_sync_single_for_device(adap->pdev,
802f7917c00SJeff Kirsher 					    dma_unmap_addr(sd, dma_addr), len,
803f7917c00SJeff Kirsher 					    PCI_DMA_FROMDEVICE);
804f7917c00SJeff Kirsher 		} else if (!drop_thres)
805f7917c00SJeff Kirsher 			goto use_orig_buf;
806f7917c00SJeff Kirsher recycle:
807f7917c00SJeff Kirsher 		recycle_rx_buf(adap, fl, fl->cidx);
808f7917c00SJeff Kirsher 		return skb;
809f7917c00SJeff Kirsher 	}
810f7917c00SJeff Kirsher 
811f7917c00SJeff Kirsher 	if (unlikely(fl->credits < drop_thres) &&
812f7917c00SJeff Kirsher 	    refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
813f7917c00SJeff Kirsher 		      GFP_ATOMIC | __GFP_COMP) == 0)
814f7917c00SJeff Kirsher 		goto recycle;
815f7917c00SJeff Kirsher 
816f7917c00SJeff Kirsher use_orig_buf:
817f7917c00SJeff Kirsher 	pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
818f7917c00SJeff Kirsher 			 fl->buf_size, PCI_DMA_FROMDEVICE);
819f7917c00SJeff Kirsher 	skb = sd->skb;
820f7917c00SJeff Kirsher 	skb_put(skb, len);
821f7917c00SJeff Kirsher 	__refill_fl(adap, fl);
822f7917c00SJeff Kirsher 	return skb;
823f7917c00SJeff Kirsher }
824f7917c00SJeff Kirsher 
825f7917c00SJeff Kirsher /**
826f7917c00SJeff Kirsher  *	get_packet_pg - return the next ingress packet buffer from a free list
827f7917c00SJeff Kirsher  *	@adap: the adapter that received the packet
828f7917c00SJeff Kirsher  *	@fl: the SGE free list holding the packet
829f7917c00SJeff Kirsher  *	@len: the packet length including any SGE padding
830f7917c00SJeff Kirsher  *	@drop_thres: # of remaining buffers before we start dropping packets
831f7917c00SJeff Kirsher  *
832f7917c00SJeff Kirsher  *	Get the next packet from a free list populated with page chunks.
833f7917c00SJeff Kirsher  *	If the packet is small we make a copy and recycle the original buffer,
834f7917c00SJeff Kirsher  *	otherwise we attach the original buffer as a page fragment to a fresh
835f7917c00SJeff Kirsher  *	sk_buff.  If a positive drop threshold is supplied packets are dropped
836f7917c00SJeff Kirsher  *	and their buffers recycled if (a) the number of remaining buffers is
837f7917c00SJeff Kirsher  *	under the threshold and the packet is too big to copy, or (b) there's
838f7917c00SJeff Kirsher  *	no system memory.
839f7917c00SJeff Kirsher  *
840f7917c00SJeff Kirsher  * 	Note: this function is similar to @get_packet but deals with Rx buffers
841f7917c00SJeff Kirsher  * 	that are page chunks rather than sk_buffs.
842f7917c00SJeff Kirsher  */
843f7917c00SJeff Kirsher static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
844f7917c00SJeff Kirsher 				     struct sge_rspq *q, unsigned int len,
845f7917c00SJeff Kirsher 				     unsigned int drop_thres)
846f7917c00SJeff Kirsher {
847f7917c00SJeff Kirsher 	struct sk_buff *newskb, *skb;
848f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
849f7917c00SJeff Kirsher 
850f7917c00SJeff Kirsher 	dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
851f7917c00SJeff Kirsher 
852f7917c00SJeff Kirsher 	newskb = skb = q->pg_skb;
853f7917c00SJeff Kirsher 	if (!skb && (len <= SGE_RX_COPY_THRES)) {
854f7917c00SJeff Kirsher 		newskb = alloc_skb(len, GFP_ATOMIC);
855f7917c00SJeff Kirsher 		if (likely(newskb != NULL)) {
856f7917c00SJeff Kirsher 			__skb_put(newskb, len);
857f7917c00SJeff Kirsher 			pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
858f7917c00SJeff Kirsher 					    PCI_DMA_FROMDEVICE);
859f7917c00SJeff Kirsher 			memcpy(newskb->data, sd->pg_chunk.va, len);
860f7917c00SJeff Kirsher 			pci_dma_sync_single_for_device(adap->pdev, dma_addr,
861f7917c00SJeff Kirsher 						       len,
862f7917c00SJeff Kirsher 						       PCI_DMA_FROMDEVICE);
863f7917c00SJeff Kirsher 		} else if (!drop_thres)
864f7917c00SJeff Kirsher 			return NULL;
865f7917c00SJeff Kirsher recycle:
866f7917c00SJeff Kirsher 		fl->credits--;
867f7917c00SJeff Kirsher 		recycle_rx_buf(adap, fl, fl->cidx);
868f7917c00SJeff Kirsher 		q->rx_recycle_buf++;
869f7917c00SJeff Kirsher 		return newskb;
870f7917c00SJeff Kirsher 	}
871f7917c00SJeff Kirsher 
872f7917c00SJeff Kirsher 	if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
873f7917c00SJeff Kirsher 		goto recycle;
874f7917c00SJeff Kirsher 
875f7917c00SJeff Kirsher 	prefetch(sd->pg_chunk.p_cnt);
876f7917c00SJeff Kirsher 
877f7917c00SJeff Kirsher 	if (!skb)
878f7917c00SJeff Kirsher 		newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
879f7917c00SJeff Kirsher 
880f7917c00SJeff Kirsher 	if (unlikely(!newskb)) {
881f7917c00SJeff Kirsher 		if (!drop_thres)
882f7917c00SJeff Kirsher 			return NULL;
883f7917c00SJeff Kirsher 		goto recycle;
884f7917c00SJeff Kirsher 	}
885f7917c00SJeff Kirsher 
886f7917c00SJeff Kirsher 	pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
887f7917c00SJeff Kirsher 				    PCI_DMA_FROMDEVICE);
888f7917c00SJeff Kirsher 	(*sd->pg_chunk.p_cnt)--;
889f7917c00SJeff Kirsher 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
890f7917c00SJeff Kirsher 		pci_unmap_page(adap->pdev,
891f7917c00SJeff Kirsher 			       sd->pg_chunk.mapping,
892f7917c00SJeff Kirsher 			       fl->alloc_size,
893f7917c00SJeff Kirsher 			       PCI_DMA_FROMDEVICE);
894f7917c00SJeff Kirsher 	if (!skb) {
895f7917c00SJeff Kirsher 		__skb_put(newskb, SGE_RX_PULL_LEN);
896f7917c00SJeff Kirsher 		memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
897f7917c00SJeff Kirsher 		skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
898f7917c00SJeff Kirsher 				   sd->pg_chunk.offset + SGE_RX_PULL_LEN,
899f7917c00SJeff Kirsher 				   len - SGE_RX_PULL_LEN);
900f7917c00SJeff Kirsher 		newskb->len = len;
901f7917c00SJeff Kirsher 		newskb->data_len = len - SGE_RX_PULL_LEN;
902f7917c00SJeff Kirsher 		newskb->truesize += newskb->data_len;
903f7917c00SJeff Kirsher 	} else {
904f7917c00SJeff Kirsher 		skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
905f7917c00SJeff Kirsher 				   sd->pg_chunk.page,
906f7917c00SJeff Kirsher 				   sd->pg_chunk.offset, len);
907f7917c00SJeff Kirsher 		newskb->len += len;
908f7917c00SJeff Kirsher 		newskb->data_len += len;
909f7917c00SJeff Kirsher 		newskb->truesize += len;
910f7917c00SJeff Kirsher 	}
911f7917c00SJeff Kirsher 
912f7917c00SJeff Kirsher 	fl->credits--;
913f7917c00SJeff Kirsher 	/*
914f7917c00SJeff Kirsher 	 * We do not refill FLs here, we let the caller do it to overlap a
915f7917c00SJeff Kirsher 	 * prefetch.
916f7917c00SJeff Kirsher 	 */
917f7917c00SJeff Kirsher 	return newskb;
918f7917c00SJeff Kirsher }
919f7917c00SJeff Kirsher 
920f7917c00SJeff Kirsher /**
921f7917c00SJeff Kirsher  *	get_imm_packet - return the next ingress packet buffer from a response
922f7917c00SJeff Kirsher  *	@resp: the response descriptor containing the packet data
923f7917c00SJeff Kirsher  *
924f7917c00SJeff Kirsher  *	Return a packet containing the immediate data of the given response.
925f7917c00SJeff Kirsher  */
926f7917c00SJeff Kirsher static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
927f7917c00SJeff Kirsher {
928f7917c00SJeff Kirsher 	struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
929f7917c00SJeff Kirsher 
930f7917c00SJeff Kirsher 	if (skb) {
931f7917c00SJeff Kirsher 		__skb_put(skb, IMMED_PKT_SIZE);
932f7917c00SJeff Kirsher 		skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
933f7917c00SJeff Kirsher 	}
934f7917c00SJeff Kirsher 	return skb;
935f7917c00SJeff Kirsher }
936f7917c00SJeff Kirsher 
937f7917c00SJeff Kirsher /**
938f7917c00SJeff Kirsher  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
939f7917c00SJeff Kirsher  *	@skb: the packet
940f7917c00SJeff Kirsher  *
941f7917c00SJeff Kirsher  * 	Returns the number of Tx descriptors needed for the given Ethernet
942f7917c00SJeff Kirsher  * 	packet.  Ethernet packets require addition of WR and CPL headers.
943f7917c00SJeff Kirsher  */
944f7917c00SJeff Kirsher static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
945f7917c00SJeff Kirsher {
946f7917c00SJeff Kirsher 	unsigned int flits;
947f7917c00SJeff Kirsher 
948f7917c00SJeff Kirsher 	if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
949f7917c00SJeff Kirsher 		return 1;
950f7917c00SJeff Kirsher 
951f7917c00SJeff Kirsher 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
952f7917c00SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
953f7917c00SJeff Kirsher 		flits++;
954f7917c00SJeff Kirsher 	return flits_to_desc(flits);
955f7917c00SJeff Kirsher }
956f7917c00SJeff Kirsher 
957f83331baSSantosh Rastapur 
958f83331baSSantosh Rastapur /*	map_skb - map a packet main body and its page fragments
959f83331baSSantosh Rastapur  *	@pdev: the PCI device
960f83331baSSantosh Rastapur  *	@skb: the packet
961f83331baSSantosh Rastapur  *	@addr: placeholder to save the mapped addresses
962f83331baSSantosh Rastapur  *
963f83331baSSantosh Rastapur  *	map the main body of an sk_buff and its page fragments, if any.
964f83331baSSantosh Rastapur  */
965f83331baSSantosh Rastapur static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
966f83331baSSantosh Rastapur 		   dma_addr_t *addr)
967f83331baSSantosh Rastapur {
968f83331baSSantosh Rastapur 	const skb_frag_t *fp, *end;
969f83331baSSantosh Rastapur 	const struct skb_shared_info *si;
970f83331baSSantosh Rastapur 
971f83331baSSantosh Rastapur 	*addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
972f83331baSSantosh Rastapur 			       PCI_DMA_TODEVICE);
973f83331baSSantosh Rastapur 	if (pci_dma_mapping_error(pdev, *addr))
974f83331baSSantosh Rastapur 		goto out_err;
975f83331baSSantosh Rastapur 
976f83331baSSantosh Rastapur 	si = skb_shinfo(skb);
977f83331baSSantosh Rastapur 	end = &si->frags[si->nr_frags];
978f83331baSSantosh Rastapur 
979f83331baSSantosh Rastapur 	for (fp = si->frags; fp < end; fp++) {
980f83331baSSantosh Rastapur 		*++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
981f83331baSSantosh Rastapur 					   DMA_TO_DEVICE);
982f83331baSSantosh Rastapur 		if (pci_dma_mapping_error(pdev, *addr))
983f83331baSSantosh Rastapur 			goto unwind;
984f83331baSSantosh Rastapur 	}
985f83331baSSantosh Rastapur 	return 0;
986f83331baSSantosh Rastapur 
987f83331baSSantosh Rastapur unwind:
988f83331baSSantosh Rastapur 	while (fp-- > si->frags)
989f83331baSSantosh Rastapur 		dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
990f83331baSSantosh Rastapur 			       DMA_TO_DEVICE);
991f83331baSSantosh Rastapur 
992f83331baSSantosh Rastapur 	pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
993f83331baSSantosh Rastapur out_err:
994f83331baSSantosh Rastapur 	return -ENOMEM;
995f83331baSSantosh Rastapur }
996f83331baSSantosh Rastapur 
997f7917c00SJeff Kirsher /**
998f83331baSSantosh Rastapur  *	write_sgl - populate a scatter/gather list for a packet
999f7917c00SJeff Kirsher  *	@skb: the packet
1000f7917c00SJeff Kirsher  *	@sgp: the SGL to populate
1001f7917c00SJeff Kirsher  *	@start: start address of skb main body data to include in the SGL
1002f7917c00SJeff Kirsher  *	@len: length of skb main body data to include in the SGL
1003f83331baSSantosh Rastapur  *	@addr: the list of the mapped addresses
1004f7917c00SJeff Kirsher  *
1005f83331baSSantosh Rastapur  *	Copies the scatter/gather list for the buffers that make up a packet
1006f7917c00SJeff Kirsher  *	and returns the SGL size in 8-byte words.  The caller must size the SGL
1007f7917c00SJeff Kirsher  *	appropriately.
1008f7917c00SJeff Kirsher  */
1009f83331baSSantosh Rastapur static inline unsigned int write_sgl(const struct sk_buff *skb,
1010f7917c00SJeff Kirsher 				    struct sg_ent *sgp, unsigned char *start,
1011f83331baSSantosh Rastapur 				    unsigned int len, const dma_addr_t *addr)
1012f7917c00SJeff Kirsher {
1013f83331baSSantosh Rastapur 	unsigned int i, j = 0, k = 0, nfrags;
1014f7917c00SJeff Kirsher 
1015f7917c00SJeff Kirsher 	if (len) {
1016f7917c00SJeff Kirsher 		sgp->len[0] = cpu_to_be32(len);
1017f83331baSSantosh Rastapur 		sgp->addr[j++] = cpu_to_be64(addr[k++]);
1018f7917c00SJeff Kirsher 	}
1019f7917c00SJeff Kirsher 
1020f7917c00SJeff Kirsher 	nfrags = skb_shinfo(skb)->nr_frags;
1021f7917c00SJeff Kirsher 	for (i = 0; i < nfrags; i++) {
10229e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1023f7917c00SJeff Kirsher 
10249e903e08SEric Dumazet 		sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
1025f83331baSSantosh Rastapur 		sgp->addr[j] = cpu_to_be64(addr[k++]);
1026f7917c00SJeff Kirsher 		j ^= 1;
1027f7917c00SJeff Kirsher 		if (j == 0)
1028f7917c00SJeff Kirsher 			++sgp;
1029f7917c00SJeff Kirsher 	}
1030f7917c00SJeff Kirsher 	if (j)
1031f7917c00SJeff Kirsher 		sgp->len[j] = 0;
1032f7917c00SJeff Kirsher 	return ((nfrags + (len != 0)) * 3) / 2 + j;
1033f7917c00SJeff Kirsher }
1034f7917c00SJeff Kirsher 
1035f7917c00SJeff Kirsher /**
1036f7917c00SJeff Kirsher  *	check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1037f7917c00SJeff Kirsher  *	@adap: the adapter
1038f7917c00SJeff Kirsher  *	@q: the Tx queue
1039f7917c00SJeff Kirsher  *
1040f7917c00SJeff Kirsher  *	Ring the doorbel if a Tx queue is asleep.  There is a natural race,
1041f7917c00SJeff Kirsher  *	where the HW is going to sleep just after we checked, however,
1042f7917c00SJeff Kirsher  *	then the interrupt handler will detect the outstanding TX packet
1043f7917c00SJeff Kirsher  *	and ring the doorbell for us.
1044f7917c00SJeff Kirsher  *
1045f7917c00SJeff Kirsher  *	When GTS is disabled we unconditionally ring the doorbell.
1046f7917c00SJeff Kirsher  */
1047f7917c00SJeff Kirsher static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1048f7917c00SJeff Kirsher {
1049f7917c00SJeff Kirsher #if USE_GTS
1050f7917c00SJeff Kirsher 	clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1051f7917c00SJeff Kirsher 	if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1052f7917c00SJeff Kirsher 		set_bit(TXQ_LAST_PKT_DB, &q->flags);
1053f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL,
1054f7917c00SJeff Kirsher 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1055f7917c00SJeff Kirsher 	}
1056f7917c00SJeff Kirsher #else
1057f7917c00SJeff Kirsher 	wmb();			/* write descriptors before telling HW */
1058f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_KDOORBELL,
1059f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1060f7917c00SJeff Kirsher #endif
1061f7917c00SJeff Kirsher }
1062f7917c00SJeff Kirsher 
1063f7917c00SJeff Kirsher static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1064f7917c00SJeff Kirsher {
1065f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 2
1066f7917c00SJeff Kirsher 	d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1067f7917c00SJeff Kirsher #endif
1068f7917c00SJeff Kirsher }
1069f7917c00SJeff Kirsher 
1070f7917c00SJeff Kirsher /**
1071f7917c00SJeff Kirsher  *	write_wr_hdr_sgl - write a WR header and, optionally, SGL
1072f7917c00SJeff Kirsher  *	@ndesc: number of Tx descriptors spanned by the SGL
1073f7917c00SJeff Kirsher  *	@skb: the packet corresponding to the WR
1074f7917c00SJeff Kirsher  *	@d: first Tx descriptor to be written
1075f7917c00SJeff Kirsher  *	@pidx: index of above descriptors
1076f7917c00SJeff Kirsher  *	@q: the SGE Tx queue
1077f7917c00SJeff Kirsher  *	@sgl: the SGL
1078f7917c00SJeff Kirsher  *	@flits: number of flits to the start of the SGL in the first descriptor
1079f7917c00SJeff Kirsher  *	@sgl_flits: the SGL size in flits
1080f7917c00SJeff Kirsher  *	@gen: the Tx descriptor generation
1081f7917c00SJeff Kirsher  *	@wr_hi: top 32 bits of WR header based on WR type (big endian)
1082f7917c00SJeff Kirsher  *	@wr_lo: low 32 bits of WR header based on WR type (big endian)
1083f7917c00SJeff Kirsher  *
1084f7917c00SJeff Kirsher  *	Write a work request header and an associated SGL.  If the SGL is
1085f7917c00SJeff Kirsher  *	small enough to fit into one Tx descriptor it has already been written
1086f7917c00SJeff Kirsher  *	and we just need to write the WR header.  Otherwise we distribute the
1087f7917c00SJeff Kirsher  *	SGL across the number of descriptors it spans.
1088f7917c00SJeff Kirsher  */
1089f7917c00SJeff Kirsher static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1090f7917c00SJeff Kirsher 			     struct tx_desc *d, unsigned int pidx,
1091f7917c00SJeff Kirsher 			     const struct sge_txq *q,
1092f7917c00SJeff Kirsher 			     const struct sg_ent *sgl,
1093f7917c00SJeff Kirsher 			     unsigned int flits, unsigned int sgl_flits,
1094f7917c00SJeff Kirsher 			     unsigned int gen, __be32 wr_hi,
1095f7917c00SJeff Kirsher 			     __be32 wr_lo)
1096f7917c00SJeff Kirsher {
1097f7917c00SJeff Kirsher 	struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1098f7917c00SJeff Kirsher 	struct tx_sw_desc *sd = &q->sdesc[pidx];
1099f7917c00SJeff Kirsher 
1100f7917c00SJeff Kirsher 	sd->skb = skb;
1101f7917c00SJeff Kirsher 	if (need_skb_unmap()) {
1102f7917c00SJeff Kirsher 		sd->fragidx = 0;
1103f7917c00SJeff Kirsher 		sd->addr_idx = 0;
1104f7917c00SJeff Kirsher 		sd->sflit = flits;
1105f7917c00SJeff Kirsher 	}
1106f7917c00SJeff Kirsher 
1107f7917c00SJeff Kirsher 	if (likely(ndesc == 1)) {
1108f7917c00SJeff Kirsher 		sd->eop = 1;
1109f7917c00SJeff Kirsher 		wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1110f7917c00SJeff Kirsher 				   V_WR_SGLSFLT(flits)) | wr_hi;
1111f7917c00SJeff Kirsher 		wmb();
1112f7917c00SJeff Kirsher 		wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1113f7917c00SJeff Kirsher 				   V_WR_GEN(gen)) | wr_lo;
1114f7917c00SJeff Kirsher 		wr_gen2(d, gen);
1115f7917c00SJeff Kirsher 	} else {
1116f7917c00SJeff Kirsher 		unsigned int ogen = gen;
1117f7917c00SJeff Kirsher 		const u64 *fp = (const u64 *)sgl;
1118f7917c00SJeff Kirsher 		struct work_request_hdr *wp = wrp;
1119f7917c00SJeff Kirsher 
1120f7917c00SJeff Kirsher 		wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1121f7917c00SJeff Kirsher 				   V_WR_SGLSFLT(flits)) | wr_hi;
1122f7917c00SJeff Kirsher 
1123f7917c00SJeff Kirsher 		while (sgl_flits) {
1124f7917c00SJeff Kirsher 			unsigned int avail = WR_FLITS - flits;
1125f7917c00SJeff Kirsher 
1126f7917c00SJeff Kirsher 			if (avail > sgl_flits)
1127f7917c00SJeff Kirsher 				avail = sgl_flits;
1128f7917c00SJeff Kirsher 			memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1129f7917c00SJeff Kirsher 			sgl_flits -= avail;
1130f7917c00SJeff Kirsher 			ndesc--;
1131f7917c00SJeff Kirsher 			if (!sgl_flits)
1132f7917c00SJeff Kirsher 				break;
1133f7917c00SJeff Kirsher 
1134f7917c00SJeff Kirsher 			fp += avail;
1135f7917c00SJeff Kirsher 			d++;
1136f7917c00SJeff Kirsher 			sd->eop = 0;
1137f7917c00SJeff Kirsher 			sd++;
1138f7917c00SJeff Kirsher 			if (++pidx == q->size) {
1139f7917c00SJeff Kirsher 				pidx = 0;
1140f7917c00SJeff Kirsher 				gen ^= 1;
1141f7917c00SJeff Kirsher 				d = q->desc;
1142f7917c00SJeff Kirsher 				sd = q->sdesc;
1143f7917c00SJeff Kirsher 			}
1144f7917c00SJeff Kirsher 
1145f7917c00SJeff Kirsher 			sd->skb = skb;
1146f7917c00SJeff Kirsher 			wrp = (struct work_request_hdr *)d;
1147f7917c00SJeff Kirsher 			wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1148f7917c00SJeff Kirsher 					   V_WR_SGLSFLT(1)) | wr_hi;
1149f7917c00SJeff Kirsher 			wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1150f7917c00SJeff Kirsher 							sgl_flits + 1)) |
1151f7917c00SJeff Kirsher 					   V_WR_GEN(gen)) | wr_lo;
1152f7917c00SJeff Kirsher 			wr_gen2(d, gen);
1153f7917c00SJeff Kirsher 			flits = 1;
1154f7917c00SJeff Kirsher 		}
1155f7917c00SJeff Kirsher 		sd->eop = 1;
1156f7917c00SJeff Kirsher 		wrp->wr_hi |= htonl(F_WR_EOP);
1157f7917c00SJeff Kirsher 		wmb();
1158f7917c00SJeff Kirsher 		wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1159f7917c00SJeff Kirsher 		wr_gen2((struct tx_desc *)wp, ogen);
1160f7917c00SJeff Kirsher 		WARN_ON(ndesc != 0);
1161f7917c00SJeff Kirsher 	}
1162f7917c00SJeff Kirsher }
1163f7917c00SJeff Kirsher 
1164f7917c00SJeff Kirsher /**
1165f7917c00SJeff Kirsher  *	write_tx_pkt_wr - write a TX_PKT work request
1166f7917c00SJeff Kirsher  *	@adap: the adapter
1167f7917c00SJeff Kirsher  *	@skb: the packet to send
1168f7917c00SJeff Kirsher  *	@pi: the egress interface
1169f7917c00SJeff Kirsher  *	@pidx: index of the first Tx descriptor to write
1170f7917c00SJeff Kirsher  *	@gen: the generation value to use
1171f7917c00SJeff Kirsher  *	@q: the Tx queue
1172f7917c00SJeff Kirsher  *	@ndesc: number of descriptors the packet will occupy
1173f7917c00SJeff Kirsher  *	@compl: the value of the COMPL bit to use
1174f7917c00SJeff Kirsher  *
1175f7917c00SJeff Kirsher  *	Generate a TX_PKT work request to send the supplied packet.
1176f7917c00SJeff Kirsher  */
1177f7917c00SJeff Kirsher static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1178f7917c00SJeff Kirsher 			    const struct port_info *pi,
1179f7917c00SJeff Kirsher 			    unsigned int pidx, unsigned int gen,
1180f7917c00SJeff Kirsher 			    struct sge_txq *q, unsigned int ndesc,
1181f83331baSSantosh Rastapur 			    unsigned int compl, const dma_addr_t *addr)
1182f7917c00SJeff Kirsher {
1183f7917c00SJeff Kirsher 	unsigned int flits, sgl_flits, cntrl, tso_info;
1184f7917c00SJeff Kirsher 	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1185f7917c00SJeff Kirsher 	struct tx_desc *d = &q->desc[pidx];
1186f7917c00SJeff Kirsher 	struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1187f7917c00SJeff Kirsher 
1188f7917c00SJeff Kirsher 	cpl->len = htonl(skb->len);
1189f7917c00SJeff Kirsher 	cntrl = V_TXPKT_INTF(pi->port_id);
1190f7917c00SJeff Kirsher 
1191f7917c00SJeff Kirsher 	if (vlan_tx_tag_present(skb))
1192f7917c00SJeff Kirsher 		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1193f7917c00SJeff Kirsher 
1194f7917c00SJeff Kirsher 	tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1195f7917c00SJeff Kirsher 	if (tso_info) {
1196f7917c00SJeff Kirsher 		int eth_type;
1197f7917c00SJeff Kirsher 		struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1198f7917c00SJeff Kirsher 
1199f7917c00SJeff Kirsher 		d->flit[2] = 0;
1200f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1201f7917c00SJeff Kirsher 		hdr->cntrl = htonl(cntrl);
1202f7917c00SJeff Kirsher 		eth_type = skb_network_offset(skb) == ETH_HLEN ?
1203f7917c00SJeff Kirsher 		    CPL_ETH_II : CPL_ETH_II_VLAN;
1204f7917c00SJeff Kirsher 		tso_info |= V_LSO_ETH_TYPE(eth_type) |
1205f7917c00SJeff Kirsher 		    V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1206f7917c00SJeff Kirsher 		    V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1207f7917c00SJeff Kirsher 		hdr->lso_info = htonl(tso_info);
1208f7917c00SJeff Kirsher 		flits = 3;
1209f7917c00SJeff Kirsher 	} else {
1210f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1211f7917c00SJeff Kirsher 		cntrl |= F_TXPKT_IPCSUM_DIS;	/* SW calculates IP csum */
1212f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1213f7917c00SJeff Kirsher 		cpl->cntrl = htonl(cntrl);
1214f7917c00SJeff Kirsher 
1215f7917c00SJeff Kirsher 		if (skb->len <= WR_LEN - sizeof(*cpl)) {
1216f7917c00SJeff Kirsher 			q->sdesc[pidx].skb = NULL;
1217f7917c00SJeff Kirsher 			if (!skb->data_len)
1218f7917c00SJeff Kirsher 				skb_copy_from_linear_data(skb, &d->flit[2],
1219f7917c00SJeff Kirsher 							  skb->len);
1220f7917c00SJeff Kirsher 			else
1221f7917c00SJeff Kirsher 				skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1222f7917c00SJeff Kirsher 
1223f7917c00SJeff Kirsher 			flits = (skb->len + 7) / 8 + 2;
1224f7917c00SJeff Kirsher 			cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1225f7917c00SJeff Kirsher 					      V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1226f7917c00SJeff Kirsher 					      | F_WR_SOP | F_WR_EOP | compl);
1227f7917c00SJeff Kirsher 			wmb();
1228f7917c00SJeff Kirsher 			cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1229f7917c00SJeff Kirsher 					      V_WR_TID(q->token));
1230f7917c00SJeff Kirsher 			wr_gen2(d, gen);
1231f7917c00SJeff Kirsher 			kfree_skb(skb);
1232f7917c00SJeff Kirsher 			return;
1233f7917c00SJeff Kirsher 		}
1234f7917c00SJeff Kirsher 
1235f7917c00SJeff Kirsher 		flits = 2;
1236f7917c00SJeff Kirsher 	}
1237f7917c00SJeff Kirsher 
1238f7917c00SJeff Kirsher 	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1239f83331baSSantosh Rastapur 	sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
1240f7917c00SJeff Kirsher 
1241f7917c00SJeff Kirsher 	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1242f7917c00SJeff Kirsher 			 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1243f7917c00SJeff Kirsher 			 htonl(V_WR_TID(q->token)));
1244f7917c00SJeff Kirsher }
1245f7917c00SJeff Kirsher 
1246f7917c00SJeff Kirsher static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1247f7917c00SJeff Kirsher 				    struct sge_qset *qs, struct sge_txq *q)
1248f7917c00SJeff Kirsher {
1249f7917c00SJeff Kirsher 	netif_tx_stop_queue(txq);
1250f7917c00SJeff Kirsher 	set_bit(TXQ_ETH, &qs->txq_stopped);
1251f7917c00SJeff Kirsher 	q->stops++;
1252f7917c00SJeff Kirsher }
1253f7917c00SJeff Kirsher 
1254f7917c00SJeff Kirsher /**
1255f7917c00SJeff Kirsher  *	eth_xmit - add a packet to the Ethernet Tx queue
1256f7917c00SJeff Kirsher  *	@skb: the packet
1257f7917c00SJeff Kirsher  *	@dev: the egress net device
1258f7917c00SJeff Kirsher  *
1259f7917c00SJeff Kirsher  *	Add a packet to an SGE Tx queue.  Runs with softirqs disabled.
1260f7917c00SJeff Kirsher  */
1261f7917c00SJeff Kirsher netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1262f7917c00SJeff Kirsher {
1263f7917c00SJeff Kirsher 	int qidx;
1264f7917c00SJeff Kirsher 	unsigned int ndesc, pidx, credits, gen, compl;
1265f7917c00SJeff Kirsher 	const struct port_info *pi = netdev_priv(dev);
1266f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
1267f7917c00SJeff Kirsher 	struct netdev_queue *txq;
1268f7917c00SJeff Kirsher 	struct sge_qset *qs;
1269f7917c00SJeff Kirsher 	struct sge_txq *q;
1270f83331baSSantosh Rastapur 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1271f7917c00SJeff Kirsher 
1272f7917c00SJeff Kirsher 	/*
1273f7917c00SJeff Kirsher 	 * The chip min packet length is 9 octets but play safe and reject
1274f7917c00SJeff Kirsher 	 * anything shorter than an Ethernet header.
1275f7917c00SJeff Kirsher 	 */
1276f7917c00SJeff Kirsher 	if (unlikely(skb->len < ETH_HLEN)) {
1277f7917c00SJeff Kirsher 		dev_kfree_skb(skb);
1278f7917c00SJeff Kirsher 		return NETDEV_TX_OK;
1279f7917c00SJeff Kirsher 	}
1280f7917c00SJeff Kirsher 
1281f7917c00SJeff Kirsher 	qidx = skb_get_queue_mapping(skb);
1282f7917c00SJeff Kirsher 	qs = &pi->qs[qidx];
1283f7917c00SJeff Kirsher 	q = &qs->txq[TXQ_ETH];
1284f7917c00SJeff Kirsher 	txq = netdev_get_tx_queue(dev, qidx);
1285f7917c00SJeff Kirsher 
1286f7917c00SJeff Kirsher 	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1287f7917c00SJeff Kirsher 
1288f7917c00SJeff Kirsher 	credits = q->size - q->in_use;
1289f7917c00SJeff Kirsher 	ndesc = calc_tx_descs(skb);
1290f7917c00SJeff Kirsher 
1291f7917c00SJeff Kirsher 	if (unlikely(credits < ndesc)) {
1292f7917c00SJeff Kirsher 		t3_stop_tx_queue(txq, qs, q);
1293f7917c00SJeff Kirsher 		dev_err(&adap->pdev->dev,
1294f7917c00SJeff Kirsher 			"%s: Tx ring %u full while queue awake!\n",
1295f7917c00SJeff Kirsher 			dev->name, q->cntxt_id & 7);
1296f7917c00SJeff Kirsher 		return NETDEV_TX_BUSY;
1297f7917c00SJeff Kirsher 	}
1298f7917c00SJeff Kirsher 
1299f83331baSSantosh Rastapur 	if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1300f83331baSSantosh Rastapur 		dev_kfree_skb(skb);
1301f83331baSSantosh Rastapur 		return NETDEV_TX_OK;
1302f83331baSSantosh Rastapur 	}
1303f83331baSSantosh Rastapur 
1304f7917c00SJeff Kirsher 	q->in_use += ndesc;
1305f7917c00SJeff Kirsher 	if (unlikely(credits - ndesc < q->stop_thres)) {
1306f7917c00SJeff Kirsher 		t3_stop_tx_queue(txq, qs, q);
1307f7917c00SJeff Kirsher 
1308f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1309f7917c00SJeff Kirsher 		    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1310f7917c00SJeff Kirsher 			q->restarts++;
1311f7917c00SJeff Kirsher 			netif_tx_start_queue(txq);
1312f7917c00SJeff Kirsher 		}
1313f7917c00SJeff Kirsher 	}
1314f7917c00SJeff Kirsher 
1315f7917c00SJeff Kirsher 	gen = q->gen;
1316f7917c00SJeff Kirsher 	q->unacked += ndesc;
1317f7917c00SJeff Kirsher 	compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1318f7917c00SJeff Kirsher 	q->unacked &= 7;
1319f7917c00SJeff Kirsher 	pidx = q->pidx;
1320f7917c00SJeff Kirsher 	q->pidx += ndesc;
1321f7917c00SJeff Kirsher 	if (q->pidx >= q->size) {
1322f7917c00SJeff Kirsher 		q->pidx -= q->size;
1323f7917c00SJeff Kirsher 		q->gen ^= 1;
1324f7917c00SJeff Kirsher 	}
1325f7917c00SJeff Kirsher 
1326f7917c00SJeff Kirsher 	/* update port statistics */
1327bc6c47b5SVipul Pandya 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1328f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1329f7917c00SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
1330f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_TSO]++;
1331f7917c00SJeff Kirsher 	if (vlan_tx_tag_present(skb))
1332f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_VLANINS]++;
1333f7917c00SJeff Kirsher 
1334f7917c00SJeff Kirsher 	/*
1335f7917c00SJeff Kirsher 	 * We do not use Tx completion interrupts to free DMAd Tx packets.
1336f7917c00SJeff Kirsher 	 * This is good for performance but means that we rely on new Tx
1337f7917c00SJeff Kirsher 	 * packets arriving to run the destructors of completed packets,
1338f7917c00SJeff Kirsher 	 * which open up space in their sockets' send queues.  Sometimes
1339f7917c00SJeff Kirsher 	 * we do not get such new packets causing Tx to stall.  A single
1340f7917c00SJeff Kirsher 	 * UDP transmitter is a good example of this situation.  We have
1341f7917c00SJeff Kirsher 	 * a clean up timer that periodically reclaims completed packets
1342f7917c00SJeff Kirsher 	 * but it doesn't run often enough (nor do we want it to) to prevent
1343f7917c00SJeff Kirsher 	 * lengthy stalls.  A solution to this problem is to run the
1344f7917c00SJeff Kirsher 	 * destructor early, after the packet is queued but before it's DMAd.
1345f7917c00SJeff Kirsher 	 * A cons is that we lie to socket memory accounting, but the amount
1346f7917c00SJeff Kirsher 	 * of extra memory is reasonable (limited by the number of Tx
1347f7917c00SJeff Kirsher 	 * descriptors), the packets do actually get freed quickly by new
1348f7917c00SJeff Kirsher 	 * packets almost always, and for protocols like TCP that wait for
1349f7917c00SJeff Kirsher 	 * acks to really free up the data the extra memory is even less.
1350f7917c00SJeff Kirsher 	 * On the positive side we run the destructors on the sending CPU
1351f7917c00SJeff Kirsher 	 * rather than on a potentially different completing CPU, usually a
1352f7917c00SJeff Kirsher 	 * good thing.  We also run them without holding our Tx queue lock,
1353f7917c00SJeff Kirsher 	 * unlike what reclaim_completed_tx() would otherwise do.
1354f7917c00SJeff Kirsher 	 *
1355f7917c00SJeff Kirsher 	 * Run the destructor before telling the DMA engine about the packet
1356f7917c00SJeff Kirsher 	 * to make sure it doesn't complete and get freed prematurely.
1357f7917c00SJeff Kirsher 	 */
1358f7917c00SJeff Kirsher 	if (likely(!skb_shared(skb)))
1359f7917c00SJeff Kirsher 		skb_orphan(skb);
1360f7917c00SJeff Kirsher 
1361f83331baSSantosh Rastapur 	write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
1362f7917c00SJeff Kirsher 	check_ring_tx_db(adap, q);
1363f7917c00SJeff Kirsher 	return NETDEV_TX_OK;
1364f7917c00SJeff Kirsher }
1365f7917c00SJeff Kirsher 
1366f7917c00SJeff Kirsher /**
1367f7917c00SJeff Kirsher  *	write_imm - write a packet into a Tx descriptor as immediate data
1368f7917c00SJeff Kirsher  *	@d: the Tx descriptor to write
1369f7917c00SJeff Kirsher  *	@skb: the packet
1370f7917c00SJeff Kirsher  *	@len: the length of packet data to write as immediate data
1371f7917c00SJeff Kirsher  *	@gen: the generation bit value to write
1372f7917c00SJeff Kirsher  *
1373f7917c00SJeff Kirsher  *	Writes a packet as immediate data into a Tx descriptor.  The packet
1374f7917c00SJeff Kirsher  *	contains a work request at its beginning.  We must write the packet
1375f7917c00SJeff Kirsher  *	carefully so the SGE doesn't read it accidentally before it's written
1376f7917c00SJeff Kirsher  *	in its entirety.
1377f7917c00SJeff Kirsher  */
1378f7917c00SJeff Kirsher static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1379f7917c00SJeff Kirsher 			     unsigned int len, unsigned int gen)
1380f7917c00SJeff Kirsher {
1381f7917c00SJeff Kirsher 	struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1382f7917c00SJeff Kirsher 	struct work_request_hdr *to = (struct work_request_hdr *)d;
1383f7917c00SJeff Kirsher 
1384f7917c00SJeff Kirsher 	if (likely(!skb->data_len))
1385f7917c00SJeff Kirsher 		memcpy(&to[1], &from[1], len - sizeof(*from));
1386f7917c00SJeff Kirsher 	else
1387f7917c00SJeff Kirsher 		skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1388f7917c00SJeff Kirsher 
1389f7917c00SJeff Kirsher 	to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1390f7917c00SJeff Kirsher 					V_WR_BCNTLFLT(len & 7));
1391f7917c00SJeff Kirsher 	wmb();
1392f7917c00SJeff Kirsher 	to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1393f7917c00SJeff Kirsher 					V_WR_LEN((len + 7) / 8));
1394f7917c00SJeff Kirsher 	wr_gen2(d, gen);
1395f7917c00SJeff Kirsher 	kfree_skb(skb);
1396f7917c00SJeff Kirsher }
1397f7917c00SJeff Kirsher 
1398f7917c00SJeff Kirsher /**
1399f7917c00SJeff Kirsher  *	check_desc_avail - check descriptor availability on a send queue
1400f7917c00SJeff Kirsher  *	@adap: the adapter
1401f7917c00SJeff Kirsher  *	@q: the send queue
1402f7917c00SJeff Kirsher  *	@skb: the packet needing the descriptors
1403f7917c00SJeff Kirsher  *	@ndesc: the number of Tx descriptors needed
1404f7917c00SJeff Kirsher  *	@qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1405f7917c00SJeff Kirsher  *
1406f7917c00SJeff Kirsher  *	Checks if the requested number of Tx descriptors is available on an
1407f7917c00SJeff Kirsher  *	SGE send queue.  If the queue is already suspended or not enough
1408f7917c00SJeff Kirsher  *	descriptors are available the packet is queued for later transmission.
1409f7917c00SJeff Kirsher  *	Must be called with the Tx queue locked.
1410f7917c00SJeff Kirsher  *
1411f7917c00SJeff Kirsher  *	Returns 0 if enough descriptors are available, 1 if there aren't
1412f7917c00SJeff Kirsher  *	enough descriptors and the packet has been queued, and 2 if the caller
1413f7917c00SJeff Kirsher  *	needs to retry because there weren't enough descriptors at the
1414f7917c00SJeff Kirsher  *	beginning of the call but some freed up in the mean time.
1415f7917c00SJeff Kirsher  */
1416f7917c00SJeff Kirsher static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1417f7917c00SJeff Kirsher 				   struct sk_buff *skb, unsigned int ndesc,
1418f7917c00SJeff Kirsher 				   unsigned int qid)
1419f7917c00SJeff Kirsher {
1420f7917c00SJeff Kirsher 	if (unlikely(!skb_queue_empty(&q->sendq))) {
1421f7917c00SJeff Kirsher 	      addq_exit:__skb_queue_tail(&q->sendq, skb);
1422f7917c00SJeff Kirsher 		return 1;
1423f7917c00SJeff Kirsher 	}
1424f7917c00SJeff Kirsher 	if (unlikely(q->size - q->in_use < ndesc)) {
1425f7917c00SJeff Kirsher 		struct sge_qset *qs = txq_to_qset(q, qid);
1426f7917c00SJeff Kirsher 
1427f7917c00SJeff Kirsher 		set_bit(qid, &qs->txq_stopped);
1428f7917c00SJeff Kirsher 		smp_mb__after_clear_bit();
1429f7917c00SJeff Kirsher 
1430f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1431f7917c00SJeff Kirsher 		    test_and_clear_bit(qid, &qs->txq_stopped))
1432f7917c00SJeff Kirsher 			return 2;
1433f7917c00SJeff Kirsher 
1434f7917c00SJeff Kirsher 		q->stops++;
1435f7917c00SJeff Kirsher 		goto addq_exit;
1436f7917c00SJeff Kirsher 	}
1437f7917c00SJeff Kirsher 	return 0;
1438f7917c00SJeff Kirsher }
1439f7917c00SJeff Kirsher 
1440f7917c00SJeff Kirsher /**
1441f7917c00SJeff Kirsher  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1442f7917c00SJeff Kirsher  *	@q: the SGE control Tx queue
1443f7917c00SJeff Kirsher  *
1444f7917c00SJeff Kirsher  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1445f7917c00SJeff Kirsher  *	that send only immediate data (presently just the control queues) and
1446f7917c00SJeff Kirsher  *	thus do not have any sk_buffs to release.
1447f7917c00SJeff Kirsher  */
1448f7917c00SJeff Kirsher static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1449f7917c00SJeff Kirsher {
1450f7917c00SJeff Kirsher 	unsigned int reclaim = q->processed - q->cleaned;
1451f7917c00SJeff Kirsher 
1452f7917c00SJeff Kirsher 	q->in_use -= reclaim;
1453f7917c00SJeff Kirsher 	q->cleaned += reclaim;
1454f7917c00SJeff Kirsher }
1455f7917c00SJeff Kirsher 
1456f7917c00SJeff Kirsher static inline int immediate(const struct sk_buff *skb)
1457f7917c00SJeff Kirsher {
1458f7917c00SJeff Kirsher 	return skb->len <= WR_LEN;
1459f7917c00SJeff Kirsher }
1460f7917c00SJeff Kirsher 
1461f7917c00SJeff Kirsher /**
1462f7917c00SJeff Kirsher  *	ctrl_xmit - send a packet through an SGE control Tx queue
1463f7917c00SJeff Kirsher  *	@adap: the adapter
1464f7917c00SJeff Kirsher  *	@q: the control queue
1465f7917c00SJeff Kirsher  *	@skb: the packet
1466f7917c00SJeff Kirsher  *
1467f7917c00SJeff Kirsher  *	Send a packet through an SGE control Tx queue.  Packets sent through
1468f7917c00SJeff Kirsher  *	a control queue must fit entirely as immediate data in a single Tx
1469f7917c00SJeff Kirsher  *	descriptor and have no page fragments.
1470f7917c00SJeff Kirsher  */
1471f7917c00SJeff Kirsher static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1472f7917c00SJeff Kirsher 		     struct sk_buff *skb)
1473f7917c00SJeff Kirsher {
1474f7917c00SJeff Kirsher 	int ret;
1475f7917c00SJeff Kirsher 	struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1476f7917c00SJeff Kirsher 
1477f7917c00SJeff Kirsher 	if (unlikely(!immediate(skb))) {
1478f7917c00SJeff Kirsher 		WARN_ON(1);
1479f7917c00SJeff Kirsher 		dev_kfree_skb(skb);
1480f7917c00SJeff Kirsher 		return NET_XMIT_SUCCESS;
1481f7917c00SJeff Kirsher 	}
1482f7917c00SJeff Kirsher 
1483f7917c00SJeff Kirsher 	wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1484f7917c00SJeff Kirsher 	wrp->wr_lo = htonl(V_WR_TID(q->token));
1485f7917c00SJeff Kirsher 
1486f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1487f7917c00SJeff Kirsher       again:reclaim_completed_tx_imm(q);
1488f7917c00SJeff Kirsher 
1489f7917c00SJeff Kirsher 	ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1490f7917c00SJeff Kirsher 	if (unlikely(ret)) {
1491f7917c00SJeff Kirsher 		if (ret == 1) {
1492f7917c00SJeff Kirsher 			spin_unlock(&q->lock);
1493f7917c00SJeff Kirsher 			return NET_XMIT_CN;
1494f7917c00SJeff Kirsher 		}
1495f7917c00SJeff Kirsher 		goto again;
1496f7917c00SJeff Kirsher 	}
1497f7917c00SJeff Kirsher 
1498f7917c00SJeff Kirsher 	write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1499f7917c00SJeff Kirsher 
1500f7917c00SJeff Kirsher 	q->in_use++;
1501f7917c00SJeff Kirsher 	if (++q->pidx >= q->size) {
1502f7917c00SJeff Kirsher 		q->pidx = 0;
1503f7917c00SJeff Kirsher 		q->gen ^= 1;
1504f7917c00SJeff Kirsher 	}
1505f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1506f7917c00SJeff Kirsher 	wmb();
1507f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_KDOORBELL,
1508f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1509f7917c00SJeff Kirsher 	return NET_XMIT_SUCCESS;
1510f7917c00SJeff Kirsher }
1511f7917c00SJeff Kirsher 
1512f7917c00SJeff Kirsher /**
1513f7917c00SJeff Kirsher  *	restart_ctrlq - restart a suspended control queue
1514f7917c00SJeff Kirsher  *	@qs: the queue set cotaining the control queue
1515f7917c00SJeff Kirsher  *
1516f7917c00SJeff Kirsher  *	Resumes transmission on a suspended Tx control queue.
1517f7917c00SJeff Kirsher  */
1518f7917c00SJeff Kirsher static void restart_ctrlq(unsigned long data)
1519f7917c00SJeff Kirsher {
1520f7917c00SJeff Kirsher 	struct sk_buff *skb;
1521f7917c00SJeff Kirsher 	struct sge_qset *qs = (struct sge_qset *)data;
1522f7917c00SJeff Kirsher 	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1523f7917c00SJeff Kirsher 
1524f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1525f7917c00SJeff Kirsher       again:reclaim_completed_tx_imm(q);
1526f7917c00SJeff Kirsher 
1527f7917c00SJeff Kirsher 	while (q->in_use < q->size &&
1528f7917c00SJeff Kirsher 	       (skb = __skb_dequeue(&q->sendq)) != NULL) {
1529f7917c00SJeff Kirsher 
1530f7917c00SJeff Kirsher 		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1531f7917c00SJeff Kirsher 
1532f7917c00SJeff Kirsher 		if (++q->pidx >= q->size) {
1533f7917c00SJeff Kirsher 			q->pidx = 0;
1534f7917c00SJeff Kirsher 			q->gen ^= 1;
1535f7917c00SJeff Kirsher 		}
1536f7917c00SJeff Kirsher 		q->in_use++;
1537f7917c00SJeff Kirsher 	}
1538f7917c00SJeff Kirsher 
1539f7917c00SJeff Kirsher 	if (!skb_queue_empty(&q->sendq)) {
1540f7917c00SJeff Kirsher 		set_bit(TXQ_CTRL, &qs->txq_stopped);
1541f7917c00SJeff Kirsher 		smp_mb__after_clear_bit();
1542f7917c00SJeff Kirsher 
1543f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1544f7917c00SJeff Kirsher 		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1545f7917c00SJeff Kirsher 			goto again;
1546f7917c00SJeff Kirsher 		q->stops++;
1547f7917c00SJeff Kirsher 	}
1548f7917c00SJeff Kirsher 
1549f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1550f7917c00SJeff Kirsher 	wmb();
1551f7917c00SJeff Kirsher 	t3_write_reg(qs->adap, A_SG_KDOORBELL,
1552f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1553f7917c00SJeff Kirsher }
1554f7917c00SJeff Kirsher 
1555f7917c00SJeff Kirsher /*
1556f7917c00SJeff Kirsher  * Send a management message through control queue 0
1557f7917c00SJeff Kirsher  */
1558f7917c00SJeff Kirsher int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1559f7917c00SJeff Kirsher {
1560f7917c00SJeff Kirsher 	int ret;
1561f7917c00SJeff Kirsher 	local_bh_disable();
1562f7917c00SJeff Kirsher 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1563f7917c00SJeff Kirsher 	local_bh_enable();
1564f7917c00SJeff Kirsher 
1565f7917c00SJeff Kirsher 	return ret;
1566f7917c00SJeff Kirsher }
1567f7917c00SJeff Kirsher 
1568f7917c00SJeff Kirsher /**
1569f7917c00SJeff Kirsher  *	deferred_unmap_destructor - unmap a packet when it is freed
1570f7917c00SJeff Kirsher  *	@skb: the packet
1571f7917c00SJeff Kirsher  *
1572f7917c00SJeff Kirsher  *	This is the packet destructor used for Tx packets that need to remain
1573f7917c00SJeff Kirsher  *	mapped until they are freed rather than until their Tx descriptors are
1574f7917c00SJeff Kirsher  *	freed.
1575f7917c00SJeff Kirsher  */
1576f7917c00SJeff Kirsher static void deferred_unmap_destructor(struct sk_buff *skb)
1577f7917c00SJeff Kirsher {
1578f7917c00SJeff Kirsher 	int i;
1579f7917c00SJeff Kirsher 	const dma_addr_t *p;
1580f7917c00SJeff Kirsher 	const struct skb_shared_info *si;
1581f7917c00SJeff Kirsher 	const struct deferred_unmap_info *dui;
1582f7917c00SJeff Kirsher 
1583f7917c00SJeff Kirsher 	dui = (struct deferred_unmap_info *)skb->head;
1584f7917c00SJeff Kirsher 	p = dui->addr;
1585f7917c00SJeff Kirsher 
1586f7917c00SJeff Kirsher 	if (skb->tail - skb->transport_header)
1587be8b678cSSimon Horman 		pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) -
1588be8b678cSSimon Horman 				 skb_transport_header(skb), PCI_DMA_TODEVICE);
1589f7917c00SJeff Kirsher 
1590f7917c00SJeff Kirsher 	si = skb_shinfo(skb);
1591f7917c00SJeff Kirsher 	for (i = 0; i < si->nr_frags; i++)
15929e903e08SEric Dumazet 		pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
1593f7917c00SJeff Kirsher 			       PCI_DMA_TODEVICE);
1594f7917c00SJeff Kirsher }
1595f7917c00SJeff Kirsher 
1596f7917c00SJeff Kirsher static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1597f7917c00SJeff Kirsher 				     const struct sg_ent *sgl, int sgl_flits)
1598f7917c00SJeff Kirsher {
1599f7917c00SJeff Kirsher 	dma_addr_t *p;
1600f7917c00SJeff Kirsher 	struct deferred_unmap_info *dui;
1601f7917c00SJeff Kirsher 
1602f7917c00SJeff Kirsher 	dui = (struct deferred_unmap_info *)skb->head;
1603f7917c00SJeff Kirsher 	dui->pdev = pdev;
1604f7917c00SJeff Kirsher 	for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1605f7917c00SJeff Kirsher 		*p++ = be64_to_cpu(sgl->addr[0]);
1606f7917c00SJeff Kirsher 		*p++ = be64_to_cpu(sgl->addr[1]);
1607f7917c00SJeff Kirsher 	}
1608f7917c00SJeff Kirsher 	if (sgl_flits)
1609f7917c00SJeff Kirsher 		*p = be64_to_cpu(sgl->addr[0]);
1610f7917c00SJeff Kirsher }
1611f7917c00SJeff Kirsher 
1612f7917c00SJeff Kirsher /**
1613f7917c00SJeff Kirsher  *	write_ofld_wr - write an offload work request
1614f7917c00SJeff Kirsher  *	@adap: the adapter
1615f7917c00SJeff Kirsher  *	@skb: the packet to send
1616f7917c00SJeff Kirsher  *	@q: the Tx queue
1617f7917c00SJeff Kirsher  *	@pidx: index of the first Tx descriptor to write
1618f7917c00SJeff Kirsher  *	@gen: the generation value to use
1619f7917c00SJeff Kirsher  *	@ndesc: number of descriptors the packet will occupy
1620f7917c00SJeff Kirsher  *
1621f7917c00SJeff Kirsher  *	Write an offload work request to send the supplied packet.  The packet
1622f7917c00SJeff Kirsher  *	data already carry the work request with most fields populated.
1623f7917c00SJeff Kirsher  */
1624f7917c00SJeff Kirsher static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1625f7917c00SJeff Kirsher 			  struct sge_txq *q, unsigned int pidx,
1626f83331baSSantosh Rastapur 			  unsigned int gen, unsigned int ndesc,
1627f83331baSSantosh Rastapur 			  const dma_addr_t *addr)
1628f7917c00SJeff Kirsher {
1629f7917c00SJeff Kirsher 	unsigned int sgl_flits, flits;
1630f7917c00SJeff Kirsher 	struct work_request_hdr *from;
1631f7917c00SJeff Kirsher 	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1632f7917c00SJeff Kirsher 	struct tx_desc *d = &q->desc[pidx];
1633f7917c00SJeff Kirsher 
1634f7917c00SJeff Kirsher 	if (immediate(skb)) {
1635f7917c00SJeff Kirsher 		q->sdesc[pidx].skb = NULL;
1636f7917c00SJeff Kirsher 		write_imm(d, skb, skb->len, gen);
1637f7917c00SJeff Kirsher 		return;
1638f7917c00SJeff Kirsher 	}
1639f7917c00SJeff Kirsher 
1640f7917c00SJeff Kirsher 	/* Only TX_DATA builds SGLs */
1641f7917c00SJeff Kirsher 
1642f7917c00SJeff Kirsher 	from = (struct work_request_hdr *)skb->data;
1643f7917c00SJeff Kirsher 	memcpy(&d->flit[1], &from[1],
1644f7917c00SJeff Kirsher 	       skb_transport_offset(skb) - sizeof(*from));
1645f7917c00SJeff Kirsher 
1646f7917c00SJeff Kirsher 	flits = skb_transport_offset(skb) / 8;
1647f7917c00SJeff Kirsher 	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1648f83331baSSantosh Rastapur 	sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
1649be8b678cSSimon Horman 			     skb_tail_pointer(skb) -
1650be8b678cSSimon Horman 			     skb_transport_header(skb), addr);
1651f7917c00SJeff Kirsher 	if (need_skb_unmap()) {
1652f7917c00SJeff Kirsher 		setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1653f7917c00SJeff Kirsher 		skb->destructor = deferred_unmap_destructor;
1654f7917c00SJeff Kirsher 	}
1655f7917c00SJeff Kirsher 
1656f7917c00SJeff Kirsher 	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1657f7917c00SJeff Kirsher 			 gen, from->wr_hi, from->wr_lo);
1658f7917c00SJeff Kirsher }
1659f7917c00SJeff Kirsher 
1660f7917c00SJeff Kirsher /**
1661f7917c00SJeff Kirsher  *	calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1662f7917c00SJeff Kirsher  *	@skb: the packet
1663f7917c00SJeff Kirsher  *
1664f7917c00SJeff Kirsher  * 	Returns the number of Tx descriptors needed for the given offload
1665f7917c00SJeff Kirsher  * 	packet.  These packets are already fully constructed.
1666f7917c00SJeff Kirsher  */
1667f7917c00SJeff Kirsher static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1668f7917c00SJeff Kirsher {
1669f7917c00SJeff Kirsher 	unsigned int flits, cnt;
1670f7917c00SJeff Kirsher 
1671f7917c00SJeff Kirsher 	if (skb->len <= WR_LEN)
1672f7917c00SJeff Kirsher 		return 1;	/* packet fits as immediate data */
1673f7917c00SJeff Kirsher 
1674f7917c00SJeff Kirsher 	flits = skb_transport_offset(skb) / 8;	/* headers */
1675f7917c00SJeff Kirsher 	cnt = skb_shinfo(skb)->nr_frags;
1676be8b678cSSimon Horman 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
1677f7917c00SJeff Kirsher 		cnt++;
1678f7917c00SJeff Kirsher 	return flits_to_desc(flits + sgl_len(cnt));
1679f7917c00SJeff Kirsher }
1680f7917c00SJeff Kirsher 
1681f7917c00SJeff Kirsher /**
1682f7917c00SJeff Kirsher  *	ofld_xmit - send a packet through an offload queue
1683f7917c00SJeff Kirsher  *	@adap: the adapter
1684f7917c00SJeff Kirsher  *	@q: the Tx offload queue
1685f7917c00SJeff Kirsher  *	@skb: the packet
1686f7917c00SJeff Kirsher  *
1687f7917c00SJeff Kirsher  *	Send an offload packet through an SGE offload queue.
1688f7917c00SJeff Kirsher  */
1689f7917c00SJeff Kirsher static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1690f7917c00SJeff Kirsher 		     struct sk_buff *skb)
1691f7917c00SJeff Kirsher {
1692f7917c00SJeff Kirsher 	int ret;
1693f7917c00SJeff Kirsher 	unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1694f7917c00SJeff Kirsher 
1695f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1696f7917c00SJeff Kirsher again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1697f7917c00SJeff Kirsher 
1698f7917c00SJeff Kirsher 	ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1699f7917c00SJeff Kirsher 	if (unlikely(ret)) {
1700f7917c00SJeff Kirsher 		if (ret == 1) {
1701f7917c00SJeff Kirsher 			skb->priority = ndesc;	/* save for restart */
1702f7917c00SJeff Kirsher 			spin_unlock(&q->lock);
1703f7917c00SJeff Kirsher 			return NET_XMIT_CN;
1704f7917c00SJeff Kirsher 		}
1705f7917c00SJeff Kirsher 		goto again;
1706f7917c00SJeff Kirsher 	}
1707f7917c00SJeff Kirsher 
1708f83331baSSantosh Rastapur 	if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1709f83331baSSantosh Rastapur 		spin_unlock(&q->lock);
1710f83331baSSantosh Rastapur 		return NET_XMIT_SUCCESS;
1711f83331baSSantosh Rastapur 	}
1712f83331baSSantosh Rastapur 
1713f7917c00SJeff Kirsher 	gen = q->gen;
1714f7917c00SJeff Kirsher 	q->in_use += ndesc;
1715f7917c00SJeff Kirsher 	pidx = q->pidx;
1716f7917c00SJeff Kirsher 	q->pidx += ndesc;
1717f7917c00SJeff Kirsher 	if (q->pidx >= q->size) {
1718f7917c00SJeff Kirsher 		q->pidx -= q->size;
1719f7917c00SJeff Kirsher 		q->gen ^= 1;
1720f7917c00SJeff Kirsher 	}
1721f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1722f7917c00SJeff Kirsher 
1723f83331baSSantosh Rastapur 	write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
1724f7917c00SJeff Kirsher 	check_ring_tx_db(adap, q);
1725f7917c00SJeff Kirsher 	return NET_XMIT_SUCCESS;
1726f7917c00SJeff Kirsher }
1727f7917c00SJeff Kirsher 
1728f7917c00SJeff Kirsher /**
1729f7917c00SJeff Kirsher  *	restart_offloadq - restart a suspended offload queue
1730f7917c00SJeff Kirsher  *	@qs: the queue set cotaining the offload queue
1731f7917c00SJeff Kirsher  *
1732f7917c00SJeff Kirsher  *	Resumes transmission on a suspended Tx offload queue.
1733f7917c00SJeff Kirsher  */
1734f7917c00SJeff Kirsher static void restart_offloadq(unsigned long data)
1735f7917c00SJeff Kirsher {
1736f7917c00SJeff Kirsher 	struct sk_buff *skb;
1737f7917c00SJeff Kirsher 	struct sge_qset *qs = (struct sge_qset *)data;
1738f7917c00SJeff Kirsher 	struct sge_txq *q = &qs->txq[TXQ_OFLD];
1739f7917c00SJeff Kirsher 	const struct port_info *pi = netdev_priv(qs->netdev);
1740f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
1741f83331baSSantosh Rastapur 	unsigned int written = 0;
1742f7917c00SJeff Kirsher 
1743f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1744f7917c00SJeff Kirsher again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1745f7917c00SJeff Kirsher 
1746f7917c00SJeff Kirsher 	while ((skb = skb_peek(&q->sendq)) != NULL) {
1747f7917c00SJeff Kirsher 		unsigned int gen, pidx;
1748f7917c00SJeff Kirsher 		unsigned int ndesc = skb->priority;
1749f7917c00SJeff Kirsher 
1750f7917c00SJeff Kirsher 		if (unlikely(q->size - q->in_use < ndesc)) {
1751f7917c00SJeff Kirsher 			set_bit(TXQ_OFLD, &qs->txq_stopped);
1752f7917c00SJeff Kirsher 			smp_mb__after_clear_bit();
1753f7917c00SJeff Kirsher 
1754f7917c00SJeff Kirsher 			if (should_restart_tx(q) &&
1755f7917c00SJeff Kirsher 			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1756f7917c00SJeff Kirsher 				goto again;
1757f7917c00SJeff Kirsher 			q->stops++;
1758f7917c00SJeff Kirsher 			break;
1759f7917c00SJeff Kirsher 		}
1760f7917c00SJeff Kirsher 
1761f83331baSSantosh Rastapur 		if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1762f83331baSSantosh Rastapur 			break;
1763f83331baSSantosh Rastapur 
1764f7917c00SJeff Kirsher 		gen = q->gen;
1765f7917c00SJeff Kirsher 		q->in_use += ndesc;
1766f7917c00SJeff Kirsher 		pidx = q->pidx;
1767f7917c00SJeff Kirsher 		q->pidx += ndesc;
1768f83331baSSantosh Rastapur 		written += ndesc;
1769f7917c00SJeff Kirsher 		if (q->pidx >= q->size) {
1770f7917c00SJeff Kirsher 			q->pidx -= q->size;
1771f7917c00SJeff Kirsher 			q->gen ^= 1;
1772f7917c00SJeff Kirsher 		}
1773f7917c00SJeff Kirsher 		__skb_unlink(skb, &q->sendq);
1774f7917c00SJeff Kirsher 		spin_unlock(&q->lock);
1775f7917c00SJeff Kirsher 
1776f83331baSSantosh Rastapur 		write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1777f83331baSSantosh Rastapur 			     (dma_addr_t *)skb->head);
1778f7917c00SJeff Kirsher 		spin_lock(&q->lock);
1779f7917c00SJeff Kirsher 	}
1780f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1781f7917c00SJeff Kirsher 
1782f7917c00SJeff Kirsher #if USE_GTS
1783f7917c00SJeff Kirsher 	set_bit(TXQ_RUNNING, &q->flags);
1784f7917c00SJeff Kirsher 	set_bit(TXQ_LAST_PKT_DB, &q->flags);
1785f7917c00SJeff Kirsher #endif
1786f7917c00SJeff Kirsher 	wmb();
1787f83331baSSantosh Rastapur 	if (likely(written))
1788f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL,
1789f7917c00SJeff Kirsher 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1790f7917c00SJeff Kirsher }
1791f7917c00SJeff Kirsher 
1792f7917c00SJeff Kirsher /**
1793f7917c00SJeff Kirsher  *	queue_set - return the queue set a packet should use
1794f7917c00SJeff Kirsher  *	@skb: the packet
1795f7917c00SJeff Kirsher  *
1796f7917c00SJeff Kirsher  *	Maps a packet to the SGE queue set it should use.  The desired queue
1797f7917c00SJeff Kirsher  *	set is carried in bits 1-3 in the packet's priority.
1798f7917c00SJeff Kirsher  */
1799f7917c00SJeff Kirsher static inline int queue_set(const struct sk_buff *skb)
1800f7917c00SJeff Kirsher {
1801f7917c00SJeff Kirsher 	return skb->priority >> 1;
1802f7917c00SJeff Kirsher }
1803f7917c00SJeff Kirsher 
1804f7917c00SJeff Kirsher /**
1805f7917c00SJeff Kirsher  *	is_ctrl_pkt - return whether an offload packet is a control packet
1806f7917c00SJeff Kirsher  *	@skb: the packet
1807f7917c00SJeff Kirsher  *
1808f7917c00SJeff Kirsher  *	Determines whether an offload packet should use an OFLD or a CTRL
1809f7917c00SJeff Kirsher  *	Tx queue.  This is indicated by bit 0 in the packet's priority.
1810f7917c00SJeff Kirsher  */
1811f7917c00SJeff Kirsher static inline int is_ctrl_pkt(const struct sk_buff *skb)
1812f7917c00SJeff Kirsher {
1813f7917c00SJeff Kirsher 	return skb->priority & 1;
1814f7917c00SJeff Kirsher }
1815f7917c00SJeff Kirsher 
1816f7917c00SJeff Kirsher /**
1817f7917c00SJeff Kirsher  *	t3_offload_tx - send an offload packet
1818f7917c00SJeff Kirsher  *	@tdev: the offload device to send to
1819f7917c00SJeff Kirsher  *	@skb: the packet
1820f7917c00SJeff Kirsher  *
1821f7917c00SJeff Kirsher  *	Sends an offload packet.  We use the packet priority to select the
1822f7917c00SJeff Kirsher  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1823f7917c00SJeff Kirsher  *	should be sent as regular or control, bits 1-3 select the queue set.
1824f7917c00SJeff Kirsher  */
1825f7917c00SJeff Kirsher int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1826f7917c00SJeff Kirsher {
1827f7917c00SJeff Kirsher 	struct adapter *adap = tdev2adap(tdev);
1828f7917c00SJeff Kirsher 	struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1829f7917c00SJeff Kirsher 
1830f7917c00SJeff Kirsher 	if (unlikely(is_ctrl_pkt(skb)))
1831f7917c00SJeff Kirsher 		return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1832f7917c00SJeff Kirsher 
1833f7917c00SJeff Kirsher 	return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1834f7917c00SJeff Kirsher }
1835f7917c00SJeff Kirsher 
1836f7917c00SJeff Kirsher /**
1837f7917c00SJeff Kirsher  *	offload_enqueue - add an offload packet to an SGE offload receive queue
1838f7917c00SJeff Kirsher  *	@q: the SGE response queue
1839f7917c00SJeff Kirsher  *	@skb: the packet
1840f7917c00SJeff Kirsher  *
1841f7917c00SJeff Kirsher  *	Add a new offload packet to an SGE response queue's offload packet
1842f7917c00SJeff Kirsher  *	queue.  If the packet is the first on the queue it schedules the RX
1843f7917c00SJeff Kirsher  *	softirq to process the queue.
1844f7917c00SJeff Kirsher  */
1845f7917c00SJeff Kirsher static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1846f7917c00SJeff Kirsher {
1847f7917c00SJeff Kirsher 	int was_empty = skb_queue_empty(&q->rx_queue);
1848f7917c00SJeff Kirsher 
1849f7917c00SJeff Kirsher 	__skb_queue_tail(&q->rx_queue, skb);
1850f7917c00SJeff Kirsher 
1851f7917c00SJeff Kirsher 	if (was_empty) {
1852f7917c00SJeff Kirsher 		struct sge_qset *qs = rspq_to_qset(q);
1853f7917c00SJeff Kirsher 
1854f7917c00SJeff Kirsher 		napi_schedule(&qs->napi);
1855f7917c00SJeff Kirsher 	}
1856f7917c00SJeff Kirsher }
1857f7917c00SJeff Kirsher 
1858f7917c00SJeff Kirsher /**
1859f7917c00SJeff Kirsher  *	deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1860f7917c00SJeff Kirsher  *	@tdev: the offload device that will be receiving the packets
1861f7917c00SJeff Kirsher  *	@q: the SGE response queue that assembled the bundle
1862f7917c00SJeff Kirsher  *	@skbs: the partial bundle
1863f7917c00SJeff Kirsher  *	@n: the number of packets in the bundle
1864f7917c00SJeff Kirsher  *
1865f7917c00SJeff Kirsher  *	Delivers a (partial) bundle of Rx offload packets to an offload device.
1866f7917c00SJeff Kirsher  */
1867f7917c00SJeff Kirsher static inline void deliver_partial_bundle(struct t3cdev *tdev,
1868f7917c00SJeff Kirsher 					  struct sge_rspq *q,
1869f7917c00SJeff Kirsher 					  struct sk_buff *skbs[], int n)
1870f7917c00SJeff Kirsher {
1871f7917c00SJeff Kirsher 	if (n) {
1872f7917c00SJeff Kirsher 		q->offload_bundles++;
1873f7917c00SJeff Kirsher 		tdev->recv(tdev, skbs, n);
1874f7917c00SJeff Kirsher 	}
1875f7917c00SJeff Kirsher }
1876f7917c00SJeff Kirsher 
1877f7917c00SJeff Kirsher /**
1878f7917c00SJeff Kirsher  *	ofld_poll - NAPI handler for offload packets in interrupt mode
1879f7917c00SJeff Kirsher  *	@dev: the network device doing the polling
1880f7917c00SJeff Kirsher  *	@budget: polling budget
1881f7917c00SJeff Kirsher  *
1882f7917c00SJeff Kirsher  *	The NAPI handler for offload packets when a response queue is serviced
1883f7917c00SJeff Kirsher  *	by the hard interrupt handler, i.e., when it's operating in non-polling
1884f7917c00SJeff Kirsher  *	mode.  Creates small packet batches and sends them through the offload
1885f7917c00SJeff Kirsher  *	receive handler.  Batches need to be of modest size as we do prefetches
1886f7917c00SJeff Kirsher  *	on the packets in each.
1887f7917c00SJeff Kirsher  */
1888f7917c00SJeff Kirsher static int ofld_poll(struct napi_struct *napi, int budget)
1889f7917c00SJeff Kirsher {
1890f7917c00SJeff Kirsher 	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1891f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
1892f7917c00SJeff Kirsher 	struct adapter *adapter = qs->adap;
1893f7917c00SJeff Kirsher 	int work_done = 0;
1894f7917c00SJeff Kirsher 
1895f7917c00SJeff Kirsher 	while (work_done < budget) {
1896f7917c00SJeff Kirsher 		struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1897f7917c00SJeff Kirsher 		struct sk_buff_head queue;
1898f7917c00SJeff Kirsher 		int ngathered;
1899f7917c00SJeff Kirsher 
1900f7917c00SJeff Kirsher 		spin_lock_irq(&q->lock);
1901f7917c00SJeff Kirsher 		__skb_queue_head_init(&queue);
1902f7917c00SJeff Kirsher 		skb_queue_splice_init(&q->rx_queue, &queue);
1903f7917c00SJeff Kirsher 		if (skb_queue_empty(&queue)) {
1904f7917c00SJeff Kirsher 			napi_complete(napi);
1905f7917c00SJeff Kirsher 			spin_unlock_irq(&q->lock);
1906f7917c00SJeff Kirsher 			return work_done;
1907f7917c00SJeff Kirsher 		}
1908f7917c00SJeff Kirsher 		spin_unlock_irq(&q->lock);
1909f7917c00SJeff Kirsher 
1910f7917c00SJeff Kirsher 		ngathered = 0;
1911f7917c00SJeff Kirsher 		skb_queue_walk_safe(&queue, skb, tmp) {
1912f7917c00SJeff Kirsher 			if (work_done >= budget)
1913f7917c00SJeff Kirsher 				break;
1914f7917c00SJeff Kirsher 			work_done++;
1915f7917c00SJeff Kirsher 
1916f7917c00SJeff Kirsher 			__skb_unlink(skb, &queue);
1917f7917c00SJeff Kirsher 			prefetch(skb->data);
1918f7917c00SJeff Kirsher 			skbs[ngathered] = skb;
1919f7917c00SJeff Kirsher 			if (++ngathered == RX_BUNDLE_SIZE) {
1920f7917c00SJeff Kirsher 				q->offload_bundles++;
1921f7917c00SJeff Kirsher 				adapter->tdev.recv(&adapter->tdev, skbs,
1922f7917c00SJeff Kirsher 						   ngathered);
1923f7917c00SJeff Kirsher 				ngathered = 0;
1924f7917c00SJeff Kirsher 			}
1925f7917c00SJeff Kirsher 		}
1926f7917c00SJeff Kirsher 		if (!skb_queue_empty(&queue)) {
1927f7917c00SJeff Kirsher 			/* splice remaining packets back onto Rx queue */
1928f7917c00SJeff Kirsher 			spin_lock_irq(&q->lock);
1929f7917c00SJeff Kirsher 			skb_queue_splice(&queue, &q->rx_queue);
1930f7917c00SJeff Kirsher 			spin_unlock_irq(&q->lock);
1931f7917c00SJeff Kirsher 		}
1932f7917c00SJeff Kirsher 		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1933f7917c00SJeff Kirsher 	}
1934f7917c00SJeff Kirsher 
1935f7917c00SJeff Kirsher 	return work_done;
1936f7917c00SJeff Kirsher }
1937f7917c00SJeff Kirsher 
1938f7917c00SJeff Kirsher /**
1939f7917c00SJeff Kirsher  *	rx_offload - process a received offload packet
1940f7917c00SJeff Kirsher  *	@tdev: the offload device receiving the packet
1941f7917c00SJeff Kirsher  *	@rq: the response queue that received the packet
1942f7917c00SJeff Kirsher  *	@skb: the packet
1943f7917c00SJeff Kirsher  *	@rx_gather: a gather list of packets if we are building a bundle
1944f7917c00SJeff Kirsher  *	@gather_idx: index of the next available slot in the bundle
1945f7917c00SJeff Kirsher  *
1946f7917c00SJeff Kirsher  *	Process an ingress offload pakcet and add it to the offload ingress
1947f7917c00SJeff Kirsher  *	queue. 	Returns the index of the next available slot in the bundle.
1948f7917c00SJeff Kirsher  */
1949f7917c00SJeff Kirsher static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1950f7917c00SJeff Kirsher 			     struct sk_buff *skb, struct sk_buff *rx_gather[],
1951f7917c00SJeff Kirsher 			     unsigned int gather_idx)
1952f7917c00SJeff Kirsher {
1953f7917c00SJeff Kirsher 	skb_reset_mac_header(skb);
1954f7917c00SJeff Kirsher 	skb_reset_network_header(skb);
1955f7917c00SJeff Kirsher 	skb_reset_transport_header(skb);
1956f7917c00SJeff Kirsher 
1957f7917c00SJeff Kirsher 	if (rq->polling) {
1958f7917c00SJeff Kirsher 		rx_gather[gather_idx++] = skb;
1959f7917c00SJeff Kirsher 		if (gather_idx == RX_BUNDLE_SIZE) {
1960f7917c00SJeff Kirsher 			tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1961f7917c00SJeff Kirsher 			gather_idx = 0;
1962f7917c00SJeff Kirsher 			rq->offload_bundles++;
1963f7917c00SJeff Kirsher 		}
1964f7917c00SJeff Kirsher 	} else
1965f7917c00SJeff Kirsher 		offload_enqueue(rq, skb);
1966f7917c00SJeff Kirsher 
1967f7917c00SJeff Kirsher 	return gather_idx;
1968f7917c00SJeff Kirsher }
1969f7917c00SJeff Kirsher 
1970f7917c00SJeff Kirsher /**
1971f7917c00SJeff Kirsher  *	restart_tx - check whether to restart suspended Tx queues
1972f7917c00SJeff Kirsher  *	@qs: the queue set to resume
1973f7917c00SJeff Kirsher  *
1974f7917c00SJeff Kirsher  *	Restarts suspended Tx queues of an SGE queue set if they have enough
1975f7917c00SJeff Kirsher  *	free resources to resume operation.
1976f7917c00SJeff Kirsher  */
1977f7917c00SJeff Kirsher static void restart_tx(struct sge_qset *qs)
1978f7917c00SJeff Kirsher {
1979f7917c00SJeff Kirsher 	if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1980f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_ETH]) &&
1981f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1982f7917c00SJeff Kirsher 		qs->txq[TXQ_ETH].restarts++;
1983f7917c00SJeff Kirsher 		if (netif_running(qs->netdev))
1984f7917c00SJeff Kirsher 			netif_tx_wake_queue(qs->tx_q);
1985f7917c00SJeff Kirsher 	}
1986f7917c00SJeff Kirsher 
1987f7917c00SJeff Kirsher 	if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1988f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1989f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1990f7917c00SJeff Kirsher 		qs->txq[TXQ_OFLD].restarts++;
1991f7917c00SJeff Kirsher 		tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1992f7917c00SJeff Kirsher 	}
1993f7917c00SJeff Kirsher 	if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1994f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1995f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1996f7917c00SJeff Kirsher 		qs->txq[TXQ_CTRL].restarts++;
1997f7917c00SJeff Kirsher 		tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1998f7917c00SJeff Kirsher 	}
1999f7917c00SJeff Kirsher }
2000f7917c00SJeff Kirsher 
2001f7917c00SJeff Kirsher /**
2002f7917c00SJeff Kirsher  *	cxgb3_arp_process - process an ARP request probing a private IP address
2003f7917c00SJeff Kirsher  *	@adapter: the adapter
2004f7917c00SJeff Kirsher  *	@skb: the skbuff containing the ARP request
2005f7917c00SJeff Kirsher  *
2006f7917c00SJeff Kirsher  *	Check if the ARP request is probing the private IP address
2007f7917c00SJeff Kirsher  *	dedicated to iSCSI, generate an ARP reply if so.
2008f7917c00SJeff Kirsher  */
2009f7917c00SJeff Kirsher static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
2010f7917c00SJeff Kirsher {
2011f7917c00SJeff Kirsher 	struct net_device *dev = skb->dev;
2012f7917c00SJeff Kirsher 	struct arphdr *arp;
2013f7917c00SJeff Kirsher 	unsigned char *arp_ptr;
2014f7917c00SJeff Kirsher 	unsigned char *sha;
2015f7917c00SJeff Kirsher 	__be32 sip, tip;
2016f7917c00SJeff Kirsher 
2017f7917c00SJeff Kirsher 	if (!dev)
2018f7917c00SJeff Kirsher 		return;
2019f7917c00SJeff Kirsher 
2020f7917c00SJeff Kirsher 	skb_reset_network_header(skb);
2021f7917c00SJeff Kirsher 	arp = arp_hdr(skb);
2022f7917c00SJeff Kirsher 
2023f7917c00SJeff Kirsher 	if (arp->ar_op != htons(ARPOP_REQUEST))
2024f7917c00SJeff Kirsher 		return;
2025f7917c00SJeff Kirsher 
2026f7917c00SJeff Kirsher 	arp_ptr = (unsigned char *)(arp + 1);
2027f7917c00SJeff Kirsher 	sha = arp_ptr;
2028f7917c00SJeff Kirsher 	arp_ptr += dev->addr_len;
2029f7917c00SJeff Kirsher 	memcpy(&sip, arp_ptr, sizeof(sip));
2030f7917c00SJeff Kirsher 	arp_ptr += sizeof(sip);
2031f7917c00SJeff Kirsher 	arp_ptr += dev->addr_len;
2032f7917c00SJeff Kirsher 	memcpy(&tip, arp_ptr, sizeof(tip));
2033f7917c00SJeff Kirsher 
2034f7917c00SJeff Kirsher 	if (tip != pi->iscsi_ipv4addr)
2035f7917c00SJeff Kirsher 		return;
2036f7917c00SJeff Kirsher 
2037f7917c00SJeff Kirsher 	arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
2038f7917c00SJeff Kirsher 		 pi->iscsic.mac_addr, sha);
2039f7917c00SJeff Kirsher 
2040f7917c00SJeff Kirsher }
2041f7917c00SJeff Kirsher 
2042f7917c00SJeff Kirsher static inline int is_arp(struct sk_buff *skb)
2043f7917c00SJeff Kirsher {
2044f7917c00SJeff Kirsher 	return skb->protocol == htons(ETH_P_ARP);
2045f7917c00SJeff Kirsher }
2046f7917c00SJeff Kirsher 
2047f7917c00SJeff Kirsher static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
2048f7917c00SJeff Kirsher 					struct sk_buff *skb)
2049f7917c00SJeff Kirsher {
2050f7917c00SJeff Kirsher 	if (is_arp(skb)) {
2051f7917c00SJeff Kirsher 		cxgb3_arp_process(pi, skb);
2052f7917c00SJeff Kirsher 		return;
2053f7917c00SJeff Kirsher 	}
2054f7917c00SJeff Kirsher 
2055f7917c00SJeff Kirsher 	if (pi->iscsic.recv)
2056f7917c00SJeff Kirsher 		pi->iscsic.recv(pi, skb);
2057f7917c00SJeff Kirsher 
2058f7917c00SJeff Kirsher }
2059f7917c00SJeff Kirsher 
2060f7917c00SJeff Kirsher /**
2061f7917c00SJeff Kirsher  *	rx_eth - process an ingress ethernet packet
2062f7917c00SJeff Kirsher  *	@adap: the adapter
2063f7917c00SJeff Kirsher  *	@rq: the response queue that received the packet
2064f7917c00SJeff Kirsher  *	@skb: the packet
2065f7917c00SJeff Kirsher  *	@pad: amount of padding at the start of the buffer
2066f7917c00SJeff Kirsher  *
2067f7917c00SJeff Kirsher  *	Process an ingress ethernet pakcet and deliver it to the stack.
2068f7917c00SJeff Kirsher  *	The padding is 2 if the packet was delivered in an Rx buffer and 0
2069f7917c00SJeff Kirsher  *	if it was immediate data in a response.
2070f7917c00SJeff Kirsher  */
2071f7917c00SJeff Kirsher static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2072f7917c00SJeff Kirsher 		   struct sk_buff *skb, int pad, int lro)
2073f7917c00SJeff Kirsher {
2074f7917c00SJeff Kirsher 	struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2075f7917c00SJeff Kirsher 	struct sge_qset *qs = rspq_to_qset(rq);
2076f7917c00SJeff Kirsher 	struct port_info *pi;
2077f7917c00SJeff Kirsher 
2078f7917c00SJeff Kirsher 	skb_pull(skb, sizeof(*p) + pad);
2079f7917c00SJeff Kirsher 	skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2080f7917c00SJeff Kirsher 	pi = netdev_priv(skb->dev);
2081f7917c00SJeff Kirsher 	if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2082f7917c00SJeff Kirsher 	    p->csum == htons(0xffff) && !p->fragment) {
2083f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2084f7917c00SJeff Kirsher 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2085f7917c00SJeff Kirsher 	} else
2086f7917c00SJeff Kirsher 		skb_checksum_none_assert(skb);
2087f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2088f7917c00SJeff Kirsher 
2089f7917c00SJeff Kirsher 	if (p->vlan_valid) {
2090f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_VLANEX]++;
209186a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
2092f7917c00SJeff Kirsher 	}
2093f7917c00SJeff Kirsher 	if (rq->polling) {
2094f7917c00SJeff Kirsher 		if (lro)
2095f7917c00SJeff Kirsher 			napi_gro_receive(&qs->napi, skb);
2096f7917c00SJeff Kirsher 		else {
2097f7917c00SJeff Kirsher 			if (unlikely(pi->iscsic.flags))
2098f7917c00SJeff Kirsher 				cxgb3_process_iscsi_prov_pack(pi, skb);
2099f7917c00SJeff Kirsher 			netif_receive_skb(skb);
2100f7917c00SJeff Kirsher 		}
2101f7917c00SJeff Kirsher 	} else
2102f7917c00SJeff Kirsher 		netif_rx(skb);
2103f7917c00SJeff Kirsher }
2104f7917c00SJeff Kirsher 
2105f7917c00SJeff Kirsher static inline int is_eth_tcp(u32 rss)
2106f7917c00SJeff Kirsher {
2107f7917c00SJeff Kirsher 	return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2108f7917c00SJeff Kirsher }
2109f7917c00SJeff Kirsher 
2110f7917c00SJeff Kirsher /**
2111f7917c00SJeff Kirsher  *	lro_add_page - add a page chunk to an LRO session
2112f7917c00SJeff Kirsher  *	@adap: the adapter
2113f7917c00SJeff Kirsher  *	@qs: the associated queue set
2114f7917c00SJeff Kirsher  *	@fl: the free list containing the page chunk to add
2115f7917c00SJeff Kirsher  *	@len: packet length
2116f7917c00SJeff Kirsher  *	@complete: Indicates the last fragment of a frame
2117f7917c00SJeff Kirsher  *
2118f7917c00SJeff Kirsher  *	Add a received packet contained in a page chunk to an existing LRO
2119f7917c00SJeff Kirsher  *	session.
2120f7917c00SJeff Kirsher  */
2121f7917c00SJeff Kirsher static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2122f7917c00SJeff Kirsher 			 struct sge_fl *fl, int len, int complete)
2123f7917c00SJeff Kirsher {
2124f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2125f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2126f7917c00SJeff Kirsher 	struct sk_buff *skb = NULL;
2127f7917c00SJeff Kirsher 	struct cpl_rx_pkt *cpl;
2128f7917c00SJeff Kirsher 	struct skb_frag_struct *rx_frag;
2129f7917c00SJeff Kirsher 	int nr_frags;
2130f7917c00SJeff Kirsher 	int offset = 0;
2131f7917c00SJeff Kirsher 
2132f7917c00SJeff Kirsher 	if (!qs->nomem) {
2133f7917c00SJeff Kirsher 		skb = napi_get_frags(&qs->napi);
2134f7917c00SJeff Kirsher 		qs->nomem = !skb;
2135f7917c00SJeff Kirsher 	}
2136f7917c00SJeff Kirsher 
2137f7917c00SJeff Kirsher 	fl->credits--;
2138f7917c00SJeff Kirsher 
2139f7917c00SJeff Kirsher 	pci_dma_sync_single_for_cpu(adap->pdev,
2140f7917c00SJeff Kirsher 				    dma_unmap_addr(sd, dma_addr),
2141f7917c00SJeff Kirsher 				    fl->buf_size - SGE_PG_RSVD,
2142f7917c00SJeff Kirsher 				    PCI_DMA_FROMDEVICE);
2143f7917c00SJeff Kirsher 
2144f7917c00SJeff Kirsher 	(*sd->pg_chunk.p_cnt)--;
2145f7917c00SJeff Kirsher 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2146f7917c00SJeff Kirsher 		pci_unmap_page(adap->pdev,
2147f7917c00SJeff Kirsher 			       sd->pg_chunk.mapping,
2148f7917c00SJeff Kirsher 			       fl->alloc_size,
2149f7917c00SJeff Kirsher 			       PCI_DMA_FROMDEVICE);
2150f7917c00SJeff Kirsher 
2151f7917c00SJeff Kirsher 	if (!skb) {
2152f7917c00SJeff Kirsher 		put_page(sd->pg_chunk.page);
2153f7917c00SJeff Kirsher 		if (complete)
2154f7917c00SJeff Kirsher 			qs->nomem = 0;
2155f7917c00SJeff Kirsher 		return;
2156f7917c00SJeff Kirsher 	}
2157f7917c00SJeff Kirsher 
2158f7917c00SJeff Kirsher 	rx_frag = skb_shinfo(skb)->frags;
2159f7917c00SJeff Kirsher 	nr_frags = skb_shinfo(skb)->nr_frags;
2160f7917c00SJeff Kirsher 
2161f7917c00SJeff Kirsher 	if (!nr_frags) {
2162f7917c00SJeff Kirsher 		offset = 2 + sizeof(struct cpl_rx_pkt);
2163f7917c00SJeff Kirsher 		cpl = qs->lro_va = sd->pg_chunk.va + 2;
2164f7917c00SJeff Kirsher 
2165f7917c00SJeff Kirsher 		if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2166f7917c00SJeff Kirsher 		     cpl->csum_valid && cpl->csum == htons(0xffff)) {
2167f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2168f7917c00SJeff Kirsher 			qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2169f7917c00SJeff Kirsher 		} else
2170f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_NONE;
2171f7917c00SJeff Kirsher 	} else
2172f7917c00SJeff Kirsher 		cpl = qs->lro_va;
2173f7917c00SJeff Kirsher 
2174f7917c00SJeff Kirsher 	len -= offset;
2175f7917c00SJeff Kirsher 
2176f7917c00SJeff Kirsher 	rx_frag += nr_frags;
21776a930b9fSIan Campbell 	__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
2178f7917c00SJeff Kirsher 	rx_frag->page_offset = sd->pg_chunk.offset + offset;
21799e903e08SEric Dumazet 	skb_frag_size_set(rx_frag, len);
2180f7917c00SJeff Kirsher 
2181f7917c00SJeff Kirsher 	skb->len += len;
2182f7917c00SJeff Kirsher 	skb->data_len += len;
2183f7917c00SJeff Kirsher 	skb->truesize += len;
2184f7917c00SJeff Kirsher 	skb_shinfo(skb)->nr_frags++;
2185f7917c00SJeff Kirsher 
2186f7917c00SJeff Kirsher 	if (!complete)
2187f7917c00SJeff Kirsher 		return;
2188f7917c00SJeff Kirsher 
2189f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2190f7917c00SJeff Kirsher 
219172073ad2SVipul Pandya 	if (cpl->vlan_valid) {
219272073ad2SVipul Pandya 		qs->port_stats[SGE_PSTAT_VLANEX]++;
219386a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
219472073ad2SVipul Pandya 	}
2195f7917c00SJeff Kirsher 	napi_gro_frags(&qs->napi);
2196f7917c00SJeff Kirsher }
2197f7917c00SJeff Kirsher 
2198f7917c00SJeff Kirsher /**
2199f7917c00SJeff Kirsher  *	handle_rsp_cntrl_info - handles control information in a response
2200f7917c00SJeff Kirsher  *	@qs: the queue set corresponding to the response
2201f7917c00SJeff Kirsher  *	@flags: the response control flags
2202f7917c00SJeff Kirsher  *
2203f7917c00SJeff Kirsher  *	Handles the control information of an SGE response, such as GTS
2204f7917c00SJeff Kirsher  *	indications and completion credits for the queue set's Tx queues.
2205f7917c00SJeff Kirsher  *	HW coalesces credits, we don't do any extra SW coalescing.
2206f7917c00SJeff Kirsher  */
2207f7917c00SJeff Kirsher static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2208f7917c00SJeff Kirsher {
2209f7917c00SJeff Kirsher 	unsigned int credits;
2210f7917c00SJeff Kirsher 
2211f7917c00SJeff Kirsher #if USE_GTS
2212f7917c00SJeff Kirsher 	if (flags & F_RSPD_TXQ0_GTS)
2213f7917c00SJeff Kirsher 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2214f7917c00SJeff Kirsher #endif
2215f7917c00SJeff Kirsher 
2216f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ0_CR(flags);
2217f7917c00SJeff Kirsher 	if (credits)
2218f7917c00SJeff Kirsher 		qs->txq[TXQ_ETH].processed += credits;
2219f7917c00SJeff Kirsher 
2220f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ2_CR(flags);
2221f7917c00SJeff Kirsher 	if (credits)
2222f7917c00SJeff Kirsher 		qs->txq[TXQ_CTRL].processed += credits;
2223f7917c00SJeff Kirsher 
2224f7917c00SJeff Kirsher # if USE_GTS
2225f7917c00SJeff Kirsher 	if (flags & F_RSPD_TXQ1_GTS)
2226f7917c00SJeff Kirsher 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2227f7917c00SJeff Kirsher # endif
2228f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ1_CR(flags);
2229f7917c00SJeff Kirsher 	if (credits)
2230f7917c00SJeff Kirsher 		qs->txq[TXQ_OFLD].processed += credits;
2231f7917c00SJeff Kirsher }
2232f7917c00SJeff Kirsher 
2233f7917c00SJeff Kirsher /**
2234f7917c00SJeff Kirsher  *	check_ring_db - check if we need to ring any doorbells
2235f7917c00SJeff Kirsher  *	@adapter: the adapter
2236f7917c00SJeff Kirsher  *	@qs: the queue set whose Tx queues are to be examined
2237f7917c00SJeff Kirsher  *	@sleeping: indicates which Tx queue sent GTS
2238f7917c00SJeff Kirsher  *
2239f7917c00SJeff Kirsher  *	Checks if some of a queue set's Tx queues need to ring their doorbells
2240f7917c00SJeff Kirsher  *	to resume transmission after idling while they still have unprocessed
2241f7917c00SJeff Kirsher  *	descriptors.
2242f7917c00SJeff Kirsher  */
2243f7917c00SJeff Kirsher static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2244f7917c00SJeff Kirsher 			  unsigned int sleeping)
2245f7917c00SJeff Kirsher {
2246f7917c00SJeff Kirsher 	if (sleeping & F_RSPD_TXQ0_GTS) {
2247f7917c00SJeff Kirsher 		struct sge_txq *txq = &qs->txq[TXQ_ETH];
2248f7917c00SJeff Kirsher 
2249f7917c00SJeff Kirsher 		if (txq->cleaned + txq->in_use != txq->processed &&
2250f7917c00SJeff Kirsher 		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2251f7917c00SJeff Kirsher 			set_bit(TXQ_RUNNING, &txq->flags);
2252f7917c00SJeff Kirsher 			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2253f7917c00SJeff Kirsher 				     V_EGRCNTX(txq->cntxt_id));
2254f7917c00SJeff Kirsher 		}
2255f7917c00SJeff Kirsher 	}
2256f7917c00SJeff Kirsher 
2257f7917c00SJeff Kirsher 	if (sleeping & F_RSPD_TXQ1_GTS) {
2258f7917c00SJeff Kirsher 		struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2259f7917c00SJeff Kirsher 
2260f7917c00SJeff Kirsher 		if (txq->cleaned + txq->in_use != txq->processed &&
2261f7917c00SJeff Kirsher 		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2262f7917c00SJeff Kirsher 			set_bit(TXQ_RUNNING, &txq->flags);
2263f7917c00SJeff Kirsher 			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2264f7917c00SJeff Kirsher 				     V_EGRCNTX(txq->cntxt_id));
2265f7917c00SJeff Kirsher 		}
2266f7917c00SJeff Kirsher 	}
2267f7917c00SJeff Kirsher }
2268f7917c00SJeff Kirsher 
2269f7917c00SJeff Kirsher /**
2270f7917c00SJeff Kirsher  *	is_new_response - check if a response is newly written
2271f7917c00SJeff Kirsher  *	@r: the response descriptor
2272f7917c00SJeff Kirsher  *	@q: the response queue
2273f7917c00SJeff Kirsher  *
2274f7917c00SJeff Kirsher  *	Returns true if a response descriptor contains a yet unprocessed
2275f7917c00SJeff Kirsher  *	response.
2276f7917c00SJeff Kirsher  */
2277f7917c00SJeff Kirsher static inline int is_new_response(const struct rsp_desc *r,
2278f7917c00SJeff Kirsher 				  const struct sge_rspq *q)
2279f7917c00SJeff Kirsher {
2280f7917c00SJeff Kirsher 	return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2281f7917c00SJeff Kirsher }
2282f7917c00SJeff Kirsher 
2283f7917c00SJeff Kirsher static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2284f7917c00SJeff Kirsher {
2285f7917c00SJeff Kirsher 	q->pg_skb = NULL;
2286f7917c00SJeff Kirsher 	q->rx_recycle_buf = 0;
2287f7917c00SJeff Kirsher }
2288f7917c00SJeff Kirsher 
2289f7917c00SJeff Kirsher #define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2290f7917c00SJeff Kirsher #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2291f7917c00SJeff Kirsher 			V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2292f7917c00SJeff Kirsher 			V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2293f7917c00SJeff Kirsher 			V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2294f7917c00SJeff Kirsher 
2295f7917c00SJeff Kirsher /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2296f7917c00SJeff Kirsher #define NOMEM_INTR_DELAY 2500
2297f7917c00SJeff Kirsher 
2298f7917c00SJeff Kirsher /**
2299f7917c00SJeff Kirsher  *	process_responses - process responses from an SGE response queue
2300f7917c00SJeff Kirsher  *	@adap: the adapter
2301f7917c00SJeff Kirsher  *	@qs: the queue set to which the response queue belongs
2302f7917c00SJeff Kirsher  *	@budget: how many responses can be processed in this round
2303f7917c00SJeff Kirsher  *
2304f7917c00SJeff Kirsher  *	Process responses from an SGE response queue up to the supplied budget.
2305f7917c00SJeff Kirsher  *	Responses include received packets as well as credits and other events
2306f7917c00SJeff Kirsher  *	for the queues that belong to the response queue's queue set.
2307f7917c00SJeff Kirsher  *	A negative budget is effectively unlimited.
2308f7917c00SJeff Kirsher  *
2309f7917c00SJeff Kirsher  *	Additionally choose the interrupt holdoff time for the next interrupt
2310f7917c00SJeff Kirsher  *	on this queue.  If the system is under memory shortage use a fairly
2311f7917c00SJeff Kirsher  *	long delay to help recovery.
2312f7917c00SJeff Kirsher  */
2313f7917c00SJeff Kirsher static int process_responses(struct adapter *adap, struct sge_qset *qs,
2314f7917c00SJeff Kirsher 			     int budget)
2315f7917c00SJeff Kirsher {
2316f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2317f7917c00SJeff Kirsher 	struct rsp_desc *r = &q->desc[q->cidx];
2318f7917c00SJeff Kirsher 	int budget_left = budget;
2319f7917c00SJeff Kirsher 	unsigned int sleeping = 0;
2320f7917c00SJeff Kirsher 	struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2321f7917c00SJeff Kirsher 	int ngathered = 0;
2322f7917c00SJeff Kirsher 
2323f7917c00SJeff Kirsher 	q->next_holdoff = q->holdoff_tmr;
2324f7917c00SJeff Kirsher 
2325f7917c00SJeff Kirsher 	while (likely(budget_left && is_new_response(r, q))) {
2326f7917c00SJeff Kirsher 		int packet_complete, eth, ethpad = 2;
2327f7917c00SJeff Kirsher 		int lro = !!(qs->netdev->features & NETIF_F_GRO);
2328f7917c00SJeff Kirsher 		struct sk_buff *skb = NULL;
2329f7917c00SJeff Kirsher 		u32 len, flags;
2330f7917c00SJeff Kirsher 		__be32 rss_hi, rss_lo;
2331f7917c00SJeff Kirsher 
2332f7917c00SJeff Kirsher 		rmb();
2333f7917c00SJeff Kirsher 		eth = r->rss_hdr.opcode == CPL_RX_PKT;
2334f7917c00SJeff Kirsher 		rss_hi = *(const __be32 *)r;
2335f7917c00SJeff Kirsher 		rss_lo = r->rss_hdr.rss_hash_val;
2336f7917c00SJeff Kirsher 		flags = ntohl(r->flags);
2337f7917c00SJeff Kirsher 
2338f7917c00SJeff Kirsher 		if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2339f7917c00SJeff Kirsher 			skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2340f7917c00SJeff Kirsher 			if (!skb)
2341f7917c00SJeff Kirsher 				goto no_mem;
2342f7917c00SJeff Kirsher 
2343f7917c00SJeff Kirsher 			memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2344f7917c00SJeff Kirsher 			skb->data[0] = CPL_ASYNC_NOTIF;
2345f7917c00SJeff Kirsher 			rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2346f7917c00SJeff Kirsher 			q->async_notif++;
2347f7917c00SJeff Kirsher 		} else if (flags & F_RSPD_IMM_DATA_VALID) {
2348f7917c00SJeff Kirsher 			skb = get_imm_packet(r);
2349f7917c00SJeff Kirsher 			if (unlikely(!skb)) {
2350f7917c00SJeff Kirsher no_mem:
2351f7917c00SJeff Kirsher 				q->next_holdoff = NOMEM_INTR_DELAY;
2352f7917c00SJeff Kirsher 				q->nomem++;
2353f7917c00SJeff Kirsher 				/* consume one credit since we tried */
2354f7917c00SJeff Kirsher 				budget_left--;
2355f7917c00SJeff Kirsher 				break;
2356f7917c00SJeff Kirsher 			}
2357f7917c00SJeff Kirsher 			q->imm_data++;
2358f7917c00SJeff Kirsher 			ethpad = 0;
2359f7917c00SJeff Kirsher 		} else if ((len = ntohl(r->len_cq)) != 0) {
2360f7917c00SJeff Kirsher 			struct sge_fl *fl;
2361f7917c00SJeff Kirsher 
2362f7917c00SJeff Kirsher 			lro &= eth && is_eth_tcp(rss_hi);
2363f7917c00SJeff Kirsher 
2364f7917c00SJeff Kirsher 			fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2365f7917c00SJeff Kirsher 			if (fl->use_pages) {
2366f7917c00SJeff Kirsher 				void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2367f7917c00SJeff Kirsher 
2368f7917c00SJeff Kirsher 				prefetch(addr);
2369f7917c00SJeff Kirsher #if L1_CACHE_BYTES < 128
2370f7917c00SJeff Kirsher 				prefetch(addr + L1_CACHE_BYTES);
2371f7917c00SJeff Kirsher #endif
2372f7917c00SJeff Kirsher 				__refill_fl(adap, fl);
2373f7917c00SJeff Kirsher 				if (lro > 0) {
2374f7917c00SJeff Kirsher 					lro_add_page(adap, qs, fl,
2375f7917c00SJeff Kirsher 						     G_RSPD_LEN(len),
2376f7917c00SJeff Kirsher 						     flags & F_RSPD_EOP);
2377f7917c00SJeff Kirsher 					 goto next_fl;
2378f7917c00SJeff Kirsher 				}
2379f7917c00SJeff Kirsher 
2380f7917c00SJeff Kirsher 				skb = get_packet_pg(adap, fl, q,
2381f7917c00SJeff Kirsher 						    G_RSPD_LEN(len),
2382f7917c00SJeff Kirsher 						    eth ?
2383f7917c00SJeff Kirsher 						    SGE_RX_DROP_THRES : 0);
2384f7917c00SJeff Kirsher 				q->pg_skb = skb;
2385f7917c00SJeff Kirsher 			} else
2386f7917c00SJeff Kirsher 				skb = get_packet(adap, fl, G_RSPD_LEN(len),
2387f7917c00SJeff Kirsher 						 eth ? SGE_RX_DROP_THRES : 0);
2388f7917c00SJeff Kirsher 			if (unlikely(!skb)) {
2389f7917c00SJeff Kirsher 				if (!eth)
2390f7917c00SJeff Kirsher 					goto no_mem;
2391f7917c00SJeff Kirsher 				q->rx_drops++;
2392f7917c00SJeff Kirsher 			} else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2393f7917c00SJeff Kirsher 				__skb_pull(skb, 2);
2394f7917c00SJeff Kirsher next_fl:
2395f7917c00SJeff Kirsher 			if (++fl->cidx == fl->size)
2396f7917c00SJeff Kirsher 				fl->cidx = 0;
2397f7917c00SJeff Kirsher 		} else
2398f7917c00SJeff Kirsher 			q->pure_rsps++;
2399f7917c00SJeff Kirsher 
2400f7917c00SJeff Kirsher 		if (flags & RSPD_CTRL_MASK) {
2401f7917c00SJeff Kirsher 			sleeping |= flags & RSPD_GTS_MASK;
2402f7917c00SJeff Kirsher 			handle_rsp_cntrl_info(qs, flags);
2403f7917c00SJeff Kirsher 		}
2404f7917c00SJeff Kirsher 
2405f7917c00SJeff Kirsher 		r++;
2406f7917c00SJeff Kirsher 		if (unlikely(++q->cidx == q->size)) {
2407f7917c00SJeff Kirsher 			q->cidx = 0;
2408f7917c00SJeff Kirsher 			q->gen ^= 1;
2409f7917c00SJeff Kirsher 			r = q->desc;
2410f7917c00SJeff Kirsher 		}
2411f7917c00SJeff Kirsher 		prefetch(r);
2412f7917c00SJeff Kirsher 
2413f7917c00SJeff Kirsher 		if (++q->credits >= (q->size / 4)) {
2414f7917c00SJeff Kirsher 			refill_rspq(adap, q, q->credits);
2415f7917c00SJeff Kirsher 			q->credits = 0;
2416f7917c00SJeff Kirsher 		}
2417f7917c00SJeff Kirsher 
2418f7917c00SJeff Kirsher 		packet_complete = flags &
2419f7917c00SJeff Kirsher 				  (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2420f7917c00SJeff Kirsher 				   F_RSPD_ASYNC_NOTIF);
2421f7917c00SJeff Kirsher 
2422f7917c00SJeff Kirsher 		if (skb != NULL && packet_complete) {
2423f7917c00SJeff Kirsher 			if (eth)
2424f7917c00SJeff Kirsher 				rx_eth(adap, q, skb, ethpad, lro);
2425f7917c00SJeff Kirsher 			else {
2426f7917c00SJeff Kirsher 				q->offload_pkts++;
2427f7917c00SJeff Kirsher 				/* Preserve the RSS info in csum & priority */
2428f7917c00SJeff Kirsher 				skb->csum = rss_hi;
2429f7917c00SJeff Kirsher 				skb->priority = rss_lo;
2430f7917c00SJeff Kirsher 				ngathered = rx_offload(&adap->tdev, q, skb,
2431f7917c00SJeff Kirsher 						       offload_skbs,
2432f7917c00SJeff Kirsher 						       ngathered);
2433f7917c00SJeff Kirsher 			}
2434f7917c00SJeff Kirsher 
2435f7917c00SJeff Kirsher 			if (flags & F_RSPD_EOP)
2436f7917c00SJeff Kirsher 				clear_rspq_bufstate(q);
2437f7917c00SJeff Kirsher 		}
2438f7917c00SJeff Kirsher 		--budget_left;
2439f7917c00SJeff Kirsher 	}
2440f7917c00SJeff Kirsher 
2441f7917c00SJeff Kirsher 	deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2442f7917c00SJeff Kirsher 
2443f7917c00SJeff Kirsher 	if (sleeping)
2444f7917c00SJeff Kirsher 		check_ring_db(adap, qs, sleeping);
2445f7917c00SJeff Kirsher 
2446f7917c00SJeff Kirsher 	smp_mb();		/* commit Tx queue .processed updates */
2447f7917c00SJeff Kirsher 	if (unlikely(qs->txq_stopped != 0))
2448f7917c00SJeff Kirsher 		restart_tx(qs);
2449f7917c00SJeff Kirsher 
2450f7917c00SJeff Kirsher 	budget -= budget_left;
2451f7917c00SJeff Kirsher 	return budget;
2452f7917c00SJeff Kirsher }
2453f7917c00SJeff Kirsher 
2454f7917c00SJeff Kirsher static inline int is_pure_response(const struct rsp_desc *r)
2455f7917c00SJeff Kirsher {
2456f7917c00SJeff Kirsher 	__be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2457f7917c00SJeff Kirsher 
2458f7917c00SJeff Kirsher 	return (n | r->len_cq) == 0;
2459f7917c00SJeff Kirsher }
2460f7917c00SJeff Kirsher 
2461f7917c00SJeff Kirsher /**
2462f7917c00SJeff Kirsher  *	napi_rx_handler - the NAPI handler for Rx processing
2463f7917c00SJeff Kirsher  *	@napi: the napi instance
2464f7917c00SJeff Kirsher  *	@budget: how many packets we can process in this round
2465f7917c00SJeff Kirsher  *
2466f7917c00SJeff Kirsher  *	Handler for new data events when using NAPI.
2467f7917c00SJeff Kirsher  */
2468f7917c00SJeff Kirsher static int napi_rx_handler(struct napi_struct *napi, int budget)
2469f7917c00SJeff Kirsher {
2470f7917c00SJeff Kirsher 	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2471f7917c00SJeff Kirsher 	struct adapter *adap = qs->adap;
2472f7917c00SJeff Kirsher 	int work_done = process_responses(adap, qs, budget);
2473f7917c00SJeff Kirsher 
2474f7917c00SJeff Kirsher 	if (likely(work_done < budget)) {
2475f7917c00SJeff Kirsher 		napi_complete(napi);
2476f7917c00SJeff Kirsher 
2477f7917c00SJeff Kirsher 		/*
2478f7917c00SJeff Kirsher 		 * Because we don't atomically flush the following
2479f7917c00SJeff Kirsher 		 * write it is possible that in very rare cases it can
2480f7917c00SJeff Kirsher 		 * reach the device in a way that races with a new
2481f7917c00SJeff Kirsher 		 * response being written plus an error interrupt
2482f7917c00SJeff Kirsher 		 * causing the NAPI interrupt handler below to return
2483f7917c00SJeff Kirsher 		 * unhandled status to the OS.  To protect against
2484f7917c00SJeff Kirsher 		 * this would require flushing the write and doing
2485f7917c00SJeff Kirsher 		 * both the write and the flush with interrupts off.
2486f7917c00SJeff Kirsher 		 * Way too expensive and unjustifiable given the
2487f7917c00SJeff Kirsher 		 * rarity of the race.
2488f7917c00SJeff Kirsher 		 *
2489f7917c00SJeff Kirsher 		 * The race cannot happen at all with MSI-X.
2490f7917c00SJeff Kirsher 		 */
2491f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2492f7917c00SJeff Kirsher 			     V_NEWTIMER(qs->rspq.next_holdoff) |
2493f7917c00SJeff Kirsher 			     V_NEWINDEX(qs->rspq.cidx));
2494f7917c00SJeff Kirsher 	}
2495f7917c00SJeff Kirsher 	return work_done;
2496f7917c00SJeff Kirsher }
2497f7917c00SJeff Kirsher 
2498f7917c00SJeff Kirsher /*
2499f7917c00SJeff Kirsher  * Returns true if the device is already scheduled for polling.
2500f7917c00SJeff Kirsher  */
2501f7917c00SJeff Kirsher static inline int napi_is_scheduled(struct napi_struct *napi)
2502f7917c00SJeff Kirsher {
2503f7917c00SJeff Kirsher 	return test_bit(NAPI_STATE_SCHED, &napi->state);
2504f7917c00SJeff Kirsher }
2505f7917c00SJeff Kirsher 
2506f7917c00SJeff Kirsher /**
2507f7917c00SJeff Kirsher  *	process_pure_responses - process pure responses from a response queue
2508f7917c00SJeff Kirsher  *	@adap: the adapter
2509f7917c00SJeff Kirsher  *	@qs: the queue set owning the response queue
2510f7917c00SJeff Kirsher  *	@r: the first pure response to process
2511f7917c00SJeff Kirsher  *
2512f7917c00SJeff Kirsher  *	A simpler version of process_responses() that handles only pure (i.e.,
2513f7917c00SJeff Kirsher  *	non data-carrying) responses.  Such respones are too light-weight to
2514f7917c00SJeff Kirsher  *	justify calling a softirq under NAPI, so we handle them specially in
2515f7917c00SJeff Kirsher  *	the interrupt handler.  The function is called with a pointer to a
2516f7917c00SJeff Kirsher  *	response, which the caller must ensure is a valid pure response.
2517f7917c00SJeff Kirsher  *
2518f7917c00SJeff Kirsher  *	Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2519f7917c00SJeff Kirsher  */
2520f7917c00SJeff Kirsher static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2521f7917c00SJeff Kirsher 				  struct rsp_desc *r)
2522f7917c00SJeff Kirsher {
2523f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2524f7917c00SJeff Kirsher 	unsigned int sleeping = 0;
2525f7917c00SJeff Kirsher 
2526f7917c00SJeff Kirsher 	do {
2527f7917c00SJeff Kirsher 		u32 flags = ntohl(r->flags);
2528f7917c00SJeff Kirsher 
2529f7917c00SJeff Kirsher 		r++;
2530f7917c00SJeff Kirsher 		if (unlikely(++q->cidx == q->size)) {
2531f7917c00SJeff Kirsher 			q->cidx = 0;
2532f7917c00SJeff Kirsher 			q->gen ^= 1;
2533f7917c00SJeff Kirsher 			r = q->desc;
2534f7917c00SJeff Kirsher 		}
2535f7917c00SJeff Kirsher 		prefetch(r);
2536f7917c00SJeff Kirsher 
2537f7917c00SJeff Kirsher 		if (flags & RSPD_CTRL_MASK) {
2538f7917c00SJeff Kirsher 			sleeping |= flags & RSPD_GTS_MASK;
2539f7917c00SJeff Kirsher 			handle_rsp_cntrl_info(qs, flags);
2540f7917c00SJeff Kirsher 		}
2541f7917c00SJeff Kirsher 
2542f7917c00SJeff Kirsher 		q->pure_rsps++;
2543f7917c00SJeff Kirsher 		if (++q->credits >= (q->size / 4)) {
2544f7917c00SJeff Kirsher 			refill_rspq(adap, q, q->credits);
2545f7917c00SJeff Kirsher 			q->credits = 0;
2546f7917c00SJeff Kirsher 		}
2547f7917c00SJeff Kirsher 		if (!is_new_response(r, q))
2548f7917c00SJeff Kirsher 			break;
2549f7917c00SJeff Kirsher 		rmb();
2550f7917c00SJeff Kirsher 	} while (is_pure_response(r));
2551f7917c00SJeff Kirsher 
2552f7917c00SJeff Kirsher 	if (sleeping)
2553f7917c00SJeff Kirsher 		check_ring_db(adap, qs, sleeping);
2554f7917c00SJeff Kirsher 
2555f7917c00SJeff Kirsher 	smp_mb();		/* commit Tx queue .processed updates */
2556f7917c00SJeff Kirsher 	if (unlikely(qs->txq_stopped != 0))
2557f7917c00SJeff Kirsher 		restart_tx(qs);
2558f7917c00SJeff Kirsher 
2559f7917c00SJeff Kirsher 	return is_new_response(r, q);
2560f7917c00SJeff Kirsher }
2561f7917c00SJeff Kirsher 
2562f7917c00SJeff Kirsher /**
2563f7917c00SJeff Kirsher  *	handle_responses - decide what to do with new responses in NAPI mode
2564f7917c00SJeff Kirsher  *	@adap: the adapter
2565f7917c00SJeff Kirsher  *	@q: the response queue
2566f7917c00SJeff Kirsher  *
2567f7917c00SJeff Kirsher  *	This is used by the NAPI interrupt handlers to decide what to do with
2568f7917c00SJeff Kirsher  *	new SGE responses.  If there are no new responses it returns -1.  If
2569f7917c00SJeff Kirsher  *	there are new responses and they are pure (i.e., non-data carrying)
2570f7917c00SJeff Kirsher  *	it handles them straight in hard interrupt context as they are very
2571f7917c00SJeff Kirsher  *	cheap and don't deliver any packets.  Finally, if there are any data
2572f7917c00SJeff Kirsher  *	signaling responses it schedules the NAPI handler.  Returns 1 if it
2573f7917c00SJeff Kirsher  *	schedules NAPI, 0 if all new responses were pure.
2574f7917c00SJeff Kirsher  *
2575f7917c00SJeff Kirsher  *	The caller must ascertain NAPI is not already running.
2576f7917c00SJeff Kirsher  */
2577f7917c00SJeff Kirsher static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2578f7917c00SJeff Kirsher {
2579f7917c00SJeff Kirsher 	struct sge_qset *qs = rspq_to_qset(q);
2580f7917c00SJeff Kirsher 	struct rsp_desc *r = &q->desc[q->cidx];
2581f7917c00SJeff Kirsher 
2582f7917c00SJeff Kirsher 	if (!is_new_response(r, q))
2583f7917c00SJeff Kirsher 		return -1;
2584f7917c00SJeff Kirsher 	rmb();
2585f7917c00SJeff Kirsher 	if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2586f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2587f7917c00SJeff Kirsher 			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2588f7917c00SJeff Kirsher 		return 0;
2589f7917c00SJeff Kirsher 	}
2590f7917c00SJeff Kirsher 	napi_schedule(&qs->napi);
2591f7917c00SJeff Kirsher 	return 1;
2592f7917c00SJeff Kirsher }
2593f7917c00SJeff Kirsher 
2594f7917c00SJeff Kirsher /*
2595f7917c00SJeff Kirsher  * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2596f7917c00SJeff Kirsher  * (i.e., response queue serviced in hard interrupt).
2597f7917c00SJeff Kirsher  */
2598f7917c00SJeff Kirsher static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2599f7917c00SJeff Kirsher {
2600f7917c00SJeff Kirsher 	struct sge_qset *qs = cookie;
2601f7917c00SJeff Kirsher 	struct adapter *adap = qs->adap;
2602f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2603f7917c00SJeff Kirsher 
2604f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2605f7917c00SJeff Kirsher 	if (process_responses(adap, qs, -1) == 0)
2606f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2607f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2608f7917c00SJeff Kirsher 		     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2609f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2610f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2611f7917c00SJeff Kirsher }
2612f7917c00SJeff Kirsher 
2613f7917c00SJeff Kirsher /*
2614f7917c00SJeff Kirsher  * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2615f7917c00SJeff Kirsher  * (i.e., response queue serviced by NAPI polling).
2616f7917c00SJeff Kirsher  */
2617f7917c00SJeff Kirsher static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2618f7917c00SJeff Kirsher {
2619f7917c00SJeff Kirsher 	struct sge_qset *qs = cookie;
2620f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2621f7917c00SJeff Kirsher 
2622f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2623f7917c00SJeff Kirsher 
2624f7917c00SJeff Kirsher 	if (handle_responses(qs->adap, q) < 0)
2625f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2626f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2627f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2628f7917c00SJeff Kirsher }
2629f7917c00SJeff Kirsher 
2630f7917c00SJeff Kirsher /*
2631f7917c00SJeff Kirsher  * The non-NAPI MSI interrupt handler.  This needs to handle data events from
2632f7917c00SJeff Kirsher  * SGE response queues as well as error and other async events as they all use
2633f7917c00SJeff Kirsher  * the same MSI vector.  We use one SGE response queue per port in this mode
2634f7917c00SJeff Kirsher  * and protect all response queues with queue 0's lock.
2635f7917c00SJeff Kirsher  */
2636f7917c00SJeff Kirsher static irqreturn_t t3_intr_msi(int irq, void *cookie)
2637f7917c00SJeff Kirsher {
2638f7917c00SJeff Kirsher 	int new_packets = 0;
2639f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2640f7917c00SJeff Kirsher 	struct sge_rspq *q = &adap->sge.qs[0].rspq;
2641f7917c00SJeff Kirsher 
2642f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2643f7917c00SJeff Kirsher 
2644f7917c00SJeff Kirsher 	if (process_responses(adap, &adap->sge.qs[0], -1)) {
2645f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2646f7917c00SJeff Kirsher 			     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2647f7917c00SJeff Kirsher 		new_packets = 1;
2648f7917c00SJeff Kirsher 	}
2649f7917c00SJeff Kirsher 
2650f7917c00SJeff Kirsher 	if (adap->params.nports == 2 &&
2651f7917c00SJeff Kirsher 	    process_responses(adap, &adap->sge.qs[1], -1)) {
2652f7917c00SJeff Kirsher 		struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2653f7917c00SJeff Kirsher 
2654f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2655f7917c00SJeff Kirsher 			     V_NEWTIMER(q1->next_holdoff) |
2656f7917c00SJeff Kirsher 			     V_NEWINDEX(q1->cidx));
2657f7917c00SJeff Kirsher 		new_packets = 1;
2658f7917c00SJeff Kirsher 	}
2659f7917c00SJeff Kirsher 
2660f7917c00SJeff Kirsher 	if (!new_packets && t3_slow_intr_handler(adap) == 0)
2661f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2662f7917c00SJeff Kirsher 
2663f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2664f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2665f7917c00SJeff Kirsher }
2666f7917c00SJeff Kirsher 
2667f7917c00SJeff Kirsher static int rspq_check_napi(struct sge_qset *qs)
2668f7917c00SJeff Kirsher {
2669f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2670f7917c00SJeff Kirsher 
2671f7917c00SJeff Kirsher 	if (!napi_is_scheduled(&qs->napi) &&
2672f7917c00SJeff Kirsher 	    is_new_response(&q->desc[q->cidx], q)) {
2673f7917c00SJeff Kirsher 		napi_schedule(&qs->napi);
2674f7917c00SJeff Kirsher 		return 1;
2675f7917c00SJeff Kirsher 	}
2676f7917c00SJeff Kirsher 	return 0;
2677f7917c00SJeff Kirsher }
2678f7917c00SJeff Kirsher 
2679f7917c00SJeff Kirsher /*
2680f7917c00SJeff Kirsher  * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2681f7917c00SJeff Kirsher  * by NAPI polling).  Handles data events from SGE response queues as well as
2682f7917c00SJeff Kirsher  * error and other async events as they all use the same MSI vector.  We use
2683f7917c00SJeff Kirsher  * one SGE response queue per port in this mode and protect all response
2684f7917c00SJeff Kirsher  * queues with queue 0's lock.
2685f7917c00SJeff Kirsher  */
2686f7917c00SJeff Kirsher static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2687f7917c00SJeff Kirsher {
2688f7917c00SJeff Kirsher 	int new_packets;
2689f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2690f7917c00SJeff Kirsher 	struct sge_rspq *q = &adap->sge.qs[0].rspq;
2691f7917c00SJeff Kirsher 
2692f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2693f7917c00SJeff Kirsher 
2694f7917c00SJeff Kirsher 	new_packets = rspq_check_napi(&adap->sge.qs[0]);
2695f7917c00SJeff Kirsher 	if (adap->params.nports == 2)
2696f7917c00SJeff Kirsher 		new_packets += rspq_check_napi(&adap->sge.qs[1]);
2697f7917c00SJeff Kirsher 	if (!new_packets && t3_slow_intr_handler(adap) == 0)
2698f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2699f7917c00SJeff Kirsher 
2700f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2701f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2702f7917c00SJeff Kirsher }
2703f7917c00SJeff Kirsher 
2704f7917c00SJeff Kirsher /*
2705f7917c00SJeff Kirsher  * A helper function that processes responses and issues GTS.
2706f7917c00SJeff Kirsher  */
2707f7917c00SJeff Kirsher static inline int process_responses_gts(struct adapter *adap,
2708f7917c00SJeff Kirsher 					struct sge_rspq *rq)
2709f7917c00SJeff Kirsher {
2710f7917c00SJeff Kirsher 	int work;
2711f7917c00SJeff Kirsher 
2712f7917c00SJeff Kirsher 	work = process_responses(adap, rspq_to_qset(rq), -1);
2713f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2714f7917c00SJeff Kirsher 		     V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2715f7917c00SJeff Kirsher 	return work;
2716f7917c00SJeff Kirsher }
2717f7917c00SJeff Kirsher 
2718f7917c00SJeff Kirsher /*
2719f7917c00SJeff Kirsher  * The legacy INTx interrupt handler.  This needs to handle data events from
2720f7917c00SJeff Kirsher  * SGE response queues as well as error and other async events as they all use
2721f7917c00SJeff Kirsher  * the same interrupt pin.  We use one SGE response queue per port in this mode
2722f7917c00SJeff Kirsher  * and protect all response queues with queue 0's lock.
2723f7917c00SJeff Kirsher  */
2724f7917c00SJeff Kirsher static irqreturn_t t3_intr(int irq, void *cookie)
2725f7917c00SJeff Kirsher {
2726f7917c00SJeff Kirsher 	int work_done, w0, w1;
2727f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2728f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2729f7917c00SJeff Kirsher 	struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2730f7917c00SJeff Kirsher 
2731f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2732f7917c00SJeff Kirsher 
2733f7917c00SJeff Kirsher 	w0 = is_new_response(&q0->desc[q0->cidx], q0);
2734f7917c00SJeff Kirsher 	w1 = adap->params.nports == 2 &&
2735f7917c00SJeff Kirsher 	    is_new_response(&q1->desc[q1->cidx], q1);
2736f7917c00SJeff Kirsher 
2737f7917c00SJeff Kirsher 	if (likely(w0 | w1)) {
2738f7917c00SJeff Kirsher 		t3_write_reg(adap, A_PL_CLI, 0);
2739f7917c00SJeff Kirsher 		t3_read_reg(adap, A_PL_CLI);	/* flush */
2740f7917c00SJeff Kirsher 
2741f7917c00SJeff Kirsher 		if (likely(w0))
2742f7917c00SJeff Kirsher 			process_responses_gts(adap, q0);
2743f7917c00SJeff Kirsher 
2744f7917c00SJeff Kirsher 		if (w1)
2745f7917c00SJeff Kirsher 			process_responses_gts(adap, q1);
2746f7917c00SJeff Kirsher 
2747f7917c00SJeff Kirsher 		work_done = w0 | w1;
2748f7917c00SJeff Kirsher 	} else
2749f7917c00SJeff Kirsher 		work_done = t3_slow_intr_handler(adap);
2750f7917c00SJeff Kirsher 
2751f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2752f7917c00SJeff Kirsher 	return IRQ_RETVAL(work_done != 0);
2753f7917c00SJeff Kirsher }
2754f7917c00SJeff Kirsher 
2755f7917c00SJeff Kirsher /*
2756f7917c00SJeff Kirsher  * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2757f7917c00SJeff Kirsher  * Handles data events from SGE response queues as well as error and other
2758f7917c00SJeff Kirsher  * async events as they all use the same interrupt pin.  We use one SGE
2759f7917c00SJeff Kirsher  * response queue per port in this mode and protect all response queues with
2760f7917c00SJeff Kirsher  * queue 0's lock.
2761f7917c00SJeff Kirsher  */
2762f7917c00SJeff Kirsher static irqreturn_t t3b_intr(int irq, void *cookie)
2763f7917c00SJeff Kirsher {
2764f7917c00SJeff Kirsher 	u32 map;
2765f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2766f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2767f7917c00SJeff Kirsher 
2768f7917c00SJeff Kirsher 	t3_write_reg(adap, A_PL_CLI, 0);
2769f7917c00SJeff Kirsher 	map = t3_read_reg(adap, A_SG_DATA_INTR);
2770f7917c00SJeff Kirsher 
2771f7917c00SJeff Kirsher 	if (unlikely(!map))	/* shared interrupt, most likely */
2772f7917c00SJeff Kirsher 		return IRQ_NONE;
2773f7917c00SJeff Kirsher 
2774f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2775f7917c00SJeff Kirsher 
2776f7917c00SJeff Kirsher 	if (unlikely(map & F_ERRINTR))
2777f7917c00SJeff Kirsher 		t3_slow_intr_handler(adap);
2778f7917c00SJeff Kirsher 
2779f7917c00SJeff Kirsher 	if (likely(map & 1))
2780f7917c00SJeff Kirsher 		process_responses_gts(adap, q0);
2781f7917c00SJeff Kirsher 
2782f7917c00SJeff Kirsher 	if (map & 2)
2783f7917c00SJeff Kirsher 		process_responses_gts(adap, &adap->sge.qs[1].rspq);
2784f7917c00SJeff Kirsher 
2785f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2786f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2787f7917c00SJeff Kirsher }
2788f7917c00SJeff Kirsher 
2789f7917c00SJeff Kirsher /*
2790f7917c00SJeff Kirsher  * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2791f7917c00SJeff Kirsher  * Handles data events from SGE response queues as well as error and other
2792f7917c00SJeff Kirsher  * async events as they all use the same interrupt pin.  We use one SGE
2793f7917c00SJeff Kirsher  * response queue per port in this mode and protect all response queues with
2794f7917c00SJeff Kirsher  * queue 0's lock.
2795f7917c00SJeff Kirsher  */
2796f7917c00SJeff Kirsher static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2797f7917c00SJeff Kirsher {
2798f7917c00SJeff Kirsher 	u32 map;
2799f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2800f7917c00SJeff Kirsher 	struct sge_qset *qs0 = &adap->sge.qs[0];
2801f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &qs0->rspq;
2802f7917c00SJeff Kirsher 
2803f7917c00SJeff Kirsher 	t3_write_reg(adap, A_PL_CLI, 0);
2804f7917c00SJeff Kirsher 	map = t3_read_reg(adap, A_SG_DATA_INTR);
2805f7917c00SJeff Kirsher 
2806f7917c00SJeff Kirsher 	if (unlikely(!map))	/* shared interrupt, most likely */
2807f7917c00SJeff Kirsher 		return IRQ_NONE;
2808f7917c00SJeff Kirsher 
2809f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2810f7917c00SJeff Kirsher 
2811f7917c00SJeff Kirsher 	if (unlikely(map & F_ERRINTR))
2812f7917c00SJeff Kirsher 		t3_slow_intr_handler(adap);
2813f7917c00SJeff Kirsher 
2814f7917c00SJeff Kirsher 	if (likely(map & 1))
2815f7917c00SJeff Kirsher 		napi_schedule(&qs0->napi);
2816f7917c00SJeff Kirsher 
2817f7917c00SJeff Kirsher 	if (map & 2)
2818f7917c00SJeff Kirsher 		napi_schedule(&adap->sge.qs[1].napi);
2819f7917c00SJeff Kirsher 
2820f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2821f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2822f7917c00SJeff Kirsher }
2823f7917c00SJeff Kirsher 
2824f7917c00SJeff Kirsher /**
2825f7917c00SJeff Kirsher  *	t3_intr_handler - select the top-level interrupt handler
2826f7917c00SJeff Kirsher  *	@adap: the adapter
2827f7917c00SJeff Kirsher  *	@polling: whether using NAPI to service response queues
2828f7917c00SJeff Kirsher  *
2829f7917c00SJeff Kirsher  *	Selects the top-level interrupt handler based on the type of interrupts
2830f7917c00SJeff Kirsher  *	(MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2831f7917c00SJeff Kirsher  *	response queues.
2832f7917c00SJeff Kirsher  */
2833f7917c00SJeff Kirsher irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2834f7917c00SJeff Kirsher {
2835f7917c00SJeff Kirsher 	if (adap->flags & USING_MSIX)
2836f7917c00SJeff Kirsher 		return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2837f7917c00SJeff Kirsher 	if (adap->flags & USING_MSI)
2838f7917c00SJeff Kirsher 		return polling ? t3_intr_msi_napi : t3_intr_msi;
2839f7917c00SJeff Kirsher 	if (adap->params.rev > 0)
2840f7917c00SJeff Kirsher 		return polling ? t3b_intr_napi : t3b_intr;
2841f7917c00SJeff Kirsher 	return t3_intr;
2842f7917c00SJeff Kirsher }
2843f7917c00SJeff Kirsher 
2844f7917c00SJeff Kirsher #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2845f7917c00SJeff Kirsher 		    F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2846f7917c00SJeff Kirsher 		    V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2847f7917c00SJeff Kirsher 		    F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2848f7917c00SJeff Kirsher 		    F_HIRCQPARITYERROR)
2849f7917c00SJeff Kirsher #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2850f7917c00SJeff Kirsher #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2851f7917c00SJeff Kirsher 		      F_RSPQDISABLED)
2852f7917c00SJeff Kirsher 
2853f7917c00SJeff Kirsher /**
2854f7917c00SJeff Kirsher  *	t3_sge_err_intr_handler - SGE async event interrupt handler
2855f7917c00SJeff Kirsher  *	@adapter: the adapter
2856f7917c00SJeff Kirsher  *
2857f7917c00SJeff Kirsher  *	Interrupt handler for SGE asynchronous (non-data) events.
2858f7917c00SJeff Kirsher  */
2859f7917c00SJeff Kirsher void t3_sge_err_intr_handler(struct adapter *adapter)
2860f7917c00SJeff Kirsher {
2861f7917c00SJeff Kirsher 	unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2862f7917c00SJeff Kirsher 				 ~F_FLEMPTY;
2863f7917c00SJeff Kirsher 
2864f7917c00SJeff Kirsher 	if (status & SGE_PARERR)
2865f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2866f7917c00SJeff Kirsher 			 status & SGE_PARERR);
2867f7917c00SJeff Kirsher 	if (status & SGE_FRAMINGERR)
2868f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2869f7917c00SJeff Kirsher 			 status & SGE_FRAMINGERR);
2870f7917c00SJeff Kirsher 
2871f7917c00SJeff Kirsher 	if (status & F_RSPQCREDITOVERFOW)
2872f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE response queue credit overflow\n");
2873f7917c00SJeff Kirsher 
2874f7917c00SJeff Kirsher 	if (status & F_RSPQDISABLED) {
2875f7917c00SJeff Kirsher 		v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2876f7917c00SJeff Kirsher 
2877f7917c00SJeff Kirsher 		CH_ALERT(adapter,
2878f7917c00SJeff Kirsher 			 "packet delivered to disabled response queue "
2879f7917c00SJeff Kirsher 			 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2880f7917c00SJeff Kirsher 	}
2881f7917c00SJeff Kirsher 
2882f7917c00SJeff Kirsher 	if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2883f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_drop_task);
2884f7917c00SJeff Kirsher 
2885f7917c00SJeff Kirsher 	if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2886f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_full_task);
2887f7917c00SJeff Kirsher 
2888f7917c00SJeff Kirsher 	if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2889f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_empty_task);
2890f7917c00SJeff Kirsher 
2891f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2892f7917c00SJeff Kirsher 	if (status &  SGE_FATALERR)
2893f7917c00SJeff Kirsher 		t3_fatal_err(adapter);
2894f7917c00SJeff Kirsher }
2895f7917c00SJeff Kirsher 
2896f7917c00SJeff Kirsher /**
2897f7917c00SJeff Kirsher  *	sge_timer_tx - perform periodic maintenance of an SGE qset
2898f7917c00SJeff Kirsher  *	@data: the SGE queue set to maintain
2899f7917c00SJeff Kirsher  *
2900f7917c00SJeff Kirsher  *	Runs periodically from a timer to perform maintenance of an SGE queue
2901f7917c00SJeff Kirsher  *	set.  It performs two tasks:
2902f7917c00SJeff Kirsher  *
2903f7917c00SJeff Kirsher  *	Cleans up any completed Tx descriptors that may still be pending.
2904f7917c00SJeff Kirsher  *	Normal descriptor cleanup happens when new packets are added to a Tx
2905f7917c00SJeff Kirsher  *	queue so this timer is relatively infrequent and does any cleanup only
2906f7917c00SJeff Kirsher  *	if the Tx queue has not seen any new packets in a while.  We make a
2907f7917c00SJeff Kirsher  *	best effort attempt to reclaim descriptors, in that we don't wait
2908f7917c00SJeff Kirsher  *	around if we cannot get a queue's lock (which most likely is because
2909f7917c00SJeff Kirsher  *	someone else is queueing new packets and so will also handle the clean
2910f7917c00SJeff Kirsher  *	up).  Since control queues use immediate data exclusively we don't
2911f7917c00SJeff Kirsher  *	bother cleaning them up here.
2912f7917c00SJeff Kirsher  *
2913f7917c00SJeff Kirsher  */
2914f7917c00SJeff Kirsher static void sge_timer_tx(unsigned long data)
2915f7917c00SJeff Kirsher {
2916f7917c00SJeff Kirsher 	struct sge_qset *qs = (struct sge_qset *)data;
2917f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2918f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
2919f7917c00SJeff Kirsher 	unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2920f7917c00SJeff Kirsher 	unsigned long next_period;
2921f7917c00SJeff Kirsher 
2922f7917c00SJeff Kirsher 	if (__netif_tx_trylock(qs->tx_q)) {
2923f7917c00SJeff Kirsher                 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2924f7917c00SJeff Kirsher                                                      TX_RECLAIM_TIMER_CHUNK);
2925f7917c00SJeff Kirsher 		__netif_tx_unlock(qs->tx_q);
2926f7917c00SJeff Kirsher 	}
2927f7917c00SJeff Kirsher 
2928f7917c00SJeff Kirsher 	if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2929f7917c00SJeff Kirsher 		tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2930f7917c00SJeff Kirsher 						     TX_RECLAIM_TIMER_CHUNK);
2931f7917c00SJeff Kirsher 		spin_unlock(&qs->txq[TXQ_OFLD].lock);
2932f7917c00SJeff Kirsher 	}
2933f7917c00SJeff Kirsher 
2934f7917c00SJeff Kirsher 	next_period = TX_RECLAIM_PERIOD >>
2935f7917c00SJeff Kirsher                       (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2936f7917c00SJeff Kirsher                       TX_RECLAIM_TIMER_CHUNK);
2937f7917c00SJeff Kirsher 	mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2938f7917c00SJeff Kirsher }
2939f7917c00SJeff Kirsher 
294049ce9c2cSBen Hutchings /**
2941f7917c00SJeff Kirsher  *	sge_timer_rx - perform periodic maintenance of an SGE qset
2942f7917c00SJeff Kirsher  *	@data: the SGE queue set to maintain
2943f7917c00SJeff Kirsher  *
2944f7917c00SJeff Kirsher  *	a) Replenishes Rx queues that have run out due to memory shortage.
2945f7917c00SJeff Kirsher  *	Normally new Rx buffers are added when existing ones are consumed but
2946f7917c00SJeff Kirsher  *	when out of memory a queue can become empty.  We try to add only a few
2947f7917c00SJeff Kirsher  *	buffers here, the queue will be replenished fully as these new buffers
2948f7917c00SJeff Kirsher  *	are used up if memory shortage has subsided.
2949f7917c00SJeff Kirsher  *
2950f7917c00SJeff Kirsher  *	b) Return coalesced response queue credits in case a response queue is
2951f7917c00SJeff Kirsher  *	starved.
2952f7917c00SJeff Kirsher  *
2953f7917c00SJeff Kirsher  */
2954f7917c00SJeff Kirsher static void sge_timer_rx(unsigned long data)
2955f7917c00SJeff Kirsher {
2956f7917c00SJeff Kirsher 	spinlock_t *lock;
2957f7917c00SJeff Kirsher 	struct sge_qset *qs = (struct sge_qset *)data;
2958f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2959f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
2960f7917c00SJeff Kirsher 	u32 status;
2961f7917c00SJeff Kirsher 
2962f7917c00SJeff Kirsher 	lock = adap->params.rev > 0 ?
2963f7917c00SJeff Kirsher 	       &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2964f7917c00SJeff Kirsher 
2965f7917c00SJeff Kirsher 	if (!spin_trylock_irq(lock))
2966f7917c00SJeff Kirsher 		goto out;
2967f7917c00SJeff Kirsher 
2968f7917c00SJeff Kirsher 	if (napi_is_scheduled(&qs->napi))
2969f7917c00SJeff Kirsher 		goto unlock;
2970f7917c00SJeff Kirsher 
2971f7917c00SJeff Kirsher 	if (adap->params.rev < 4) {
2972f7917c00SJeff Kirsher 		status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2973f7917c00SJeff Kirsher 
2974f7917c00SJeff Kirsher 		if (status & (1 << qs->rspq.cntxt_id)) {
2975f7917c00SJeff Kirsher 			qs->rspq.starved++;
2976f7917c00SJeff Kirsher 			if (qs->rspq.credits) {
2977f7917c00SJeff Kirsher 				qs->rspq.credits--;
2978f7917c00SJeff Kirsher 				refill_rspq(adap, &qs->rspq, 1);
2979f7917c00SJeff Kirsher 				qs->rspq.restarted++;
2980f7917c00SJeff Kirsher 				t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2981f7917c00SJeff Kirsher 					     1 << qs->rspq.cntxt_id);
2982f7917c00SJeff Kirsher 			}
2983f7917c00SJeff Kirsher 		}
2984f7917c00SJeff Kirsher 	}
2985f7917c00SJeff Kirsher 
2986f7917c00SJeff Kirsher 	if (qs->fl[0].credits < qs->fl[0].size)
2987f7917c00SJeff Kirsher 		__refill_fl(adap, &qs->fl[0]);
2988f7917c00SJeff Kirsher 	if (qs->fl[1].credits < qs->fl[1].size)
2989f7917c00SJeff Kirsher 		__refill_fl(adap, &qs->fl[1]);
2990f7917c00SJeff Kirsher 
2991f7917c00SJeff Kirsher unlock:
2992f7917c00SJeff Kirsher 	spin_unlock_irq(lock);
2993f7917c00SJeff Kirsher out:
2994f7917c00SJeff Kirsher 	mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
2995f7917c00SJeff Kirsher }
2996f7917c00SJeff Kirsher 
2997f7917c00SJeff Kirsher /**
2998f7917c00SJeff Kirsher  *	t3_update_qset_coalesce - update coalescing settings for a queue set
2999f7917c00SJeff Kirsher  *	@qs: the SGE queue set
3000f7917c00SJeff Kirsher  *	@p: new queue set parameters
3001f7917c00SJeff Kirsher  *
3002f7917c00SJeff Kirsher  *	Update the coalescing settings for an SGE queue set.  Nothing is done
3003f7917c00SJeff Kirsher  *	if the queue set is not initialized yet.
3004f7917c00SJeff Kirsher  */
3005f7917c00SJeff Kirsher void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
3006f7917c00SJeff Kirsher {
3007f7917c00SJeff Kirsher 	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
3008f7917c00SJeff Kirsher 	qs->rspq.polling = p->polling;
3009f7917c00SJeff Kirsher 	qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
3010f7917c00SJeff Kirsher }
3011f7917c00SJeff Kirsher 
3012f7917c00SJeff Kirsher /**
3013f7917c00SJeff Kirsher  *	t3_sge_alloc_qset - initialize an SGE queue set
3014f7917c00SJeff Kirsher  *	@adapter: the adapter
3015f7917c00SJeff Kirsher  *	@id: the queue set id
3016f7917c00SJeff Kirsher  *	@nports: how many Ethernet ports will be using this queue set
3017f7917c00SJeff Kirsher  *	@irq_vec_idx: the IRQ vector index for response queue interrupts
3018f7917c00SJeff Kirsher  *	@p: configuration parameters for this queue set
3019f7917c00SJeff Kirsher  *	@ntxq: number of Tx queues for the queue set
3020f7917c00SJeff Kirsher  *	@netdev: net device associated with this queue set
3021f7917c00SJeff Kirsher  *	@netdevq: net device TX queue associated with this queue set
3022f7917c00SJeff Kirsher  *
3023f7917c00SJeff Kirsher  *	Allocate resources and initialize an SGE queue set.  A queue set
3024f7917c00SJeff Kirsher  *	comprises a response queue, two Rx free-buffer queues, and up to 3
3025f7917c00SJeff Kirsher  *	Tx queues.  The Tx queues are assigned roles in the order Ethernet
3026f7917c00SJeff Kirsher  *	queue, offload queue, and control queue.
3027f7917c00SJeff Kirsher  */
3028f7917c00SJeff Kirsher int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3029f7917c00SJeff Kirsher 		      int irq_vec_idx, const struct qset_params *p,
3030f7917c00SJeff Kirsher 		      int ntxq, struct net_device *dev,
3031f7917c00SJeff Kirsher 		      struct netdev_queue *netdevq)
3032f7917c00SJeff Kirsher {
3033f7917c00SJeff Kirsher 	int i, avail, ret = -ENOMEM;
3034f7917c00SJeff Kirsher 	struct sge_qset *q = &adapter->sge.qs[id];
3035f7917c00SJeff Kirsher 
3036f7917c00SJeff Kirsher 	init_qset_cntxt(q, id);
3037f7917c00SJeff Kirsher 	setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
3038f7917c00SJeff Kirsher 	setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
3039f7917c00SJeff Kirsher 
3040f7917c00SJeff Kirsher 	q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
3041f7917c00SJeff Kirsher 				   sizeof(struct rx_desc),
3042f7917c00SJeff Kirsher 				   sizeof(struct rx_sw_desc),
3043f7917c00SJeff Kirsher 				   &q->fl[0].phys_addr, &q->fl[0].sdesc);
3044f7917c00SJeff Kirsher 	if (!q->fl[0].desc)
3045f7917c00SJeff Kirsher 		goto err;
3046f7917c00SJeff Kirsher 
3047f7917c00SJeff Kirsher 	q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
3048f7917c00SJeff Kirsher 				   sizeof(struct rx_desc),
3049f7917c00SJeff Kirsher 				   sizeof(struct rx_sw_desc),
3050f7917c00SJeff Kirsher 				   &q->fl[1].phys_addr, &q->fl[1].sdesc);
3051f7917c00SJeff Kirsher 	if (!q->fl[1].desc)
3052f7917c00SJeff Kirsher 		goto err;
3053f7917c00SJeff Kirsher 
3054f7917c00SJeff Kirsher 	q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
3055f7917c00SJeff Kirsher 				  sizeof(struct rsp_desc), 0,
3056f7917c00SJeff Kirsher 				  &q->rspq.phys_addr, NULL);
3057f7917c00SJeff Kirsher 	if (!q->rspq.desc)
3058f7917c00SJeff Kirsher 		goto err;
3059f7917c00SJeff Kirsher 
3060f7917c00SJeff Kirsher 	for (i = 0; i < ntxq; ++i) {
3061f7917c00SJeff Kirsher 		/*
3062f7917c00SJeff Kirsher 		 * The control queue always uses immediate data so does not
3063f7917c00SJeff Kirsher 		 * need to keep track of any sk_buffs.
3064f7917c00SJeff Kirsher 		 */
3065f7917c00SJeff Kirsher 		size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3066f7917c00SJeff Kirsher 
3067f7917c00SJeff Kirsher 		q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3068f7917c00SJeff Kirsher 					    sizeof(struct tx_desc), sz,
3069f7917c00SJeff Kirsher 					    &q->txq[i].phys_addr,
3070f7917c00SJeff Kirsher 					    &q->txq[i].sdesc);
3071f7917c00SJeff Kirsher 		if (!q->txq[i].desc)
3072f7917c00SJeff Kirsher 			goto err;
3073f7917c00SJeff Kirsher 
3074f7917c00SJeff Kirsher 		q->txq[i].gen = 1;
3075f7917c00SJeff Kirsher 		q->txq[i].size = p->txq_size[i];
3076f7917c00SJeff Kirsher 		spin_lock_init(&q->txq[i].lock);
3077f7917c00SJeff Kirsher 		skb_queue_head_init(&q->txq[i].sendq);
3078f7917c00SJeff Kirsher 	}
3079f7917c00SJeff Kirsher 
3080f7917c00SJeff Kirsher 	tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
3081f7917c00SJeff Kirsher 		     (unsigned long)q);
3082f7917c00SJeff Kirsher 	tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
3083f7917c00SJeff Kirsher 		     (unsigned long)q);
3084f7917c00SJeff Kirsher 
3085f7917c00SJeff Kirsher 	q->fl[0].gen = q->fl[1].gen = 1;
3086f7917c00SJeff Kirsher 	q->fl[0].size = p->fl_size;
3087f7917c00SJeff Kirsher 	q->fl[1].size = p->jumbo_size;
3088f7917c00SJeff Kirsher 
3089f7917c00SJeff Kirsher 	q->rspq.gen = 1;
3090f7917c00SJeff Kirsher 	q->rspq.size = p->rspq_size;
3091f7917c00SJeff Kirsher 	spin_lock_init(&q->rspq.lock);
3092f7917c00SJeff Kirsher 	skb_queue_head_init(&q->rspq.rx_queue);
3093f7917c00SJeff Kirsher 
3094f7917c00SJeff Kirsher 	q->txq[TXQ_ETH].stop_thres = nports *
3095f7917c00SJeff Kirsher 	    flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3096f7917c00SJeff Kirsher 
3097f7917c00SJeff Kirsher #if FL0_PG_CHUNK_SIZE > 0
3098f7917c00SJeff Kirsher 	q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3099f7917c00SJeff Kirsher #else
3100f7917c00SJeff Kirsher 	q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3101f7917c00SJeff Kirsher #endif
3102f7917c00SJeff Kirsher #if FL1_PG_CHUNK_SIZE > 0
3103f7917c00SJeff Kirsher 	q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3104f7917c00SJeff Kirsher #else
3105f7917c00SJeff Kirsher 	q->fl[1].buf_size = is_offload(adapter) ?
3106f7917c00SJeff Kirsher 		(16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3107f7917c00SJeff Kirsher 		MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3108f7917c00SJeff Kirsher #endif
3109f7917c00SJeff Kirsher 
3110f7917c00SJeff Kirsher 	q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3111f7917c00SJeff Kirsher 	q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3112f7917c00SJeff Kirsher 	q->fl[0].order = FL0_PG_ORDER;
3113f7917c00SJeff Kirsher 	q->fl[1].order = FL1_PG_ORDER;
3114f7917c00SJeff Kirsher 	q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3115f7917c00SJeff Kirsher 	q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3116f7917c00SJeff Kirsher 
3117f7917c00SJeff Kirsher 	spin_lock_irq(&adapter->sge.reg_lock);
3118f7917c00SJeff Kirsher 
3119f7917c00SJeff Kirsher 	/* FL threshold comparison uses < */
3120f7917c00SJeff Kirsher 	ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3121f7917c00SJeff Kirsher 				   q->rspq.phys_addr, q->rspq.size,
3122f7917c00SJeff Kirsher 				   q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3123f7917c00SJeff Kirsher 	if (ret)
3124f7917c00SJeff Kirsher 		goto err_unlock;
3125f7917c00SJeff Kirsher 
3126f7917c00SJeff Kirsher 	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3127f7917c00SJeff Kirsher 		ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3128f7917c00SJeff Kirsher 					  q->fl[i].phys_addr, q->fl[i].size,
3129f7917c00SJeff Kirsher 					  q->fl[i].buf_size - SGE_PG_RSVD,
3130f7917c00SJeff Kirsher 					  p->cong_thres, 1, 0);
3131f7917c00SJeff Kirsher 		if (ret)
3132f7917c00SJeff Kirsher 			goto err_unlock;
3133f7917c00SJeff Kirsher 	}
3134f7917c00SJeff Kirsher 
3135f7917c00SJeff Kirsher 	ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3136f7917c00SJeff Kirsher 				 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3137f7917c00SJeff Kirsher 				 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3138f7917c00SJeff Kirsher 				 1, 0);
3139f7917c00SJeff Kirsher 	if (ret)
3140f7917c00SJeff Kirsher 		goto err_unlock;
3141f7917c00SJeff Kirsher 
3142f7917c00SJeff Kirsher 	if (ntxq > 1) {
3143f7917c00SJeff Kirsher 		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3144f7917c00SJeff Kirsher 					 USE_GTS, SGE_CNTXT_OFLD, id,
3145f7917c00SJeff Kirsher 					 q->txq[TXQ_OFLD].phys_addr,
3146f7917c00SJeff Kirsher 					 q->txq[TXQ_OFLD].size, 0, 1, 0);
3147f7917c00SJeff Kirsher 		if (ret)
3148f7917c00SJeff Kirsher 			goto err_unlock;
3149f7917c00SJeff Kirsher 	}
3150f7917c00SJeff Kirsher 
3151f7917c00SJeff Kirsher 	if (ntxq > 2) {
3152f7917c00SJeff Kirsher 		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3153f7917c00SJeff Kirsher 					 SGE_CNTXT_CTRL, id,
3154f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].phys_addr,
3155f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].size,
3156f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].token, 1, 0);
3157f7917c00SJeff Kirsher 		if (ret)
3158f7917c00SJeff Kirsher 			goto err_unlock;
3159f7917c00SJeff Kirsher 	}
3160f7917c00SJeff Kirsher 
3161f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->sge.reg_lock);
3162f7917c00SJeff Kirsher 
3163f7917c00SJeff Kirsher 	q->adap = adapter;
3164f7917c00SJeff Kirsher 	q->netdev = dev;
3165f7917c00SJeff Kirsher 	q->tx_q = netdevq;
3166f7917c00SJeff Kirsher 	t3_update_qset_coalesce(q, p);
3167f7917c00SJeff Kirsher 
3168f7917c00SJeff Kirsher 	avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3169f7917c00SJeff Kirsher 			  GFP_KERNEL | __GFP_COMP);
3170f7917c00SJeff Kirsher 	if (!avail) {
3171f7917c00SJeff Kirsher 		CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3172f7917c00SJeff Kirsher 		goto err;
3173f7917c00SJeff Kirsher 	}
3174f7917c00SJeff Kirsher 	if (avail < q->fl[0].size)
3175f7917c00SJeff Kirsher 		CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3176f7917c00SJeff Kirsher 			avail);
3177f7917c00SJeff Kirsher 
3178f7917c00SJeff Kirsher 	avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3179f7917c00SJeff Kirsher 			  GFP_KERNEL | __GFP_COMP);
3180f7917c00SJeff Kirsher 	if (avail < q->fl[1].size)
3181f7917c00SJeff Kirsher 		CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3182f7917c00SJeff Kirsher 			avail);
3183f7917c00SJeff Kirsher 	refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3184f7917c00SJeff Kirsher 
3185f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3186f7917c00SJeff Kirsher 		     V_NEWTIMER(q->rspq.holdoff_tmr));
3187f7917c00SJeff Kirsher 
3188f7917c00SJeff Kirsher 	return 0;
3189f7917c00SJeff Kirsher 
3190f7917c00SJeff Kirsher err_unlock:
3191f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->sge.reg_lock);
3192f7917c00SJeff Kirsher err:
3193f7917c00SJeff Kirsher 	t3_free_qset(adapter, q);
3194f7917c00SJeff Kirsher 	return ret;
3195f7917c00SJeff Kirsher }
3196f7917c00SJeff Kirsher 
3197f7917c00SJeff Kirsher /**
3198f7917c00SJeff Kirsher  *      t3_start_sge_timers - start SGE timer call backs
3199f7917c00SJeff Kirsher  *      @adap: the adapter
3200f7917c00SJeff Kirsher  *
3201f7917c00SJeff Kirsher  *      Starts each SGE queue set's timer call back
3202f7917c00SJeff Kirsher  */
3203f7917c00SJeff Kirsher void t3_start_sge_timers(struct adapter *adap)
3204f7917c00SJeff Kirsher {
3205f7917c00SJeff Kirsher 	int i;
3206f7917c00SJeff Kirsher 
3207f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3208f7917c00SJeff Kirsher 		struct sge_qset *q = &adap->sge.qs[i];
3209f7917c00SJeff Kirsher 
3210f7917c00SJeff Kirsher 	if (q->tx_reclaim_timer.function)
3211f7917c00SJeff Kirsher 		mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3212f7917c00SJeff Kirsher 
3213f7917c00SJeff Kirsher 	if (q->rx_reclaim_timer.function)
3214f7917c00SJeff Kirsher 		mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3215f7917c00SJeff Kirsher 	}
3216f7917c00SJeff Kirsher }
3217f7917c00SJeff Kirsher 
3218f7917c00SJeff Kirsher /**
3219f7917c00SJeff Kirsher  *	t3_stop_sge_timers - stop SGE timer call backs
3220f7917c00SJeff Kirsher  *	@adap: the adapter
3221f7917c00SJeff Kirsher  *
3222f7917c00SJeff Kirsher  *	Stops each SGE queue set's timer call back
3223f7917c00SJeff Kirsher  */
3224f7917c00SJeff Kirsher void t3_stop_sge_timers(struct adapter *adap)
3225f7917c00SJeff Kirsher {
3226f7917c00SJeff Kirsher 	int i;
3227f7917c00SJeff Kirsher 
3228f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3229f7917c00SJeff Kirsher 		struct sge_qset *q = &adap->sge.qs[i];
3230f7917c00SJeff Kirsher 
3231f7917c00SJeff Kirsher 		if (q->tx_reclaim_timer.function)
3232f7917c00SJeff Kirsher 			del_timer_sync(&q->tx_reclaim_timer);
3233f7917c00SJeff Kirsher 		if (q->rx_reclaim_timer.function)
3234f7917c00SJeff Kirsher 			del_timer_sync(&q->rx_reclaim_timer);
3235f7917c00SJeff Kirsher 	}
3236f7917c00SJeff Kirsher }
3237f7917c00SJeff Kirsher 
3238f7917c00SJeff Kirsher /**
3239f7917c00SJeff Kirsher  *	t3_free_sge_resources - free SGE resources
3240f7917c00SJeff Kirsher  *	@adap: the adapter
3241f7917c00SJeff Kirsher  *
3242f7917c00SJeff Kirsher  *	Frees resources used by the SGE queue sets.
3243f7917c00SJeff Kirsher  */
3244f7917c00SJeff Kirsher void t3_free_sge_resources(struct adapter *adap)
3245f7917c00SJeff Kirsher {
3246f7917c00SJeff Kirsher 	int i;
3247f7917c00SJeff Kirsher 
3248f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i)
3249f7917c00SJeff Kirsher 		t3_free_qset(adap, &adap->sge.qs[i]);
3250f7917c00SJeff Kirsher }
3251f7917c00SJeff Kirsher 
3252f7917c00SJeff Kirsher /**
3253f7917c00SJeff Kirsher  *	t3_sge_start - enable SGE
3254f7917c00SJeff Kirsher  *	@adap: the adapter
3255f7917c00SJeff Kirsher  *
3256f7917c00SJeff Kirsher  *	Enables the SGE for DMAs.  This is the last step in starting packet
3257f7917c00SJeff Kirsher  *	transfers.
3258f7917c00SJeff Kirsher  */
3259f7917c00SJeff Kirsher void t3_sge_start(struct adapter *adap)
3260f7917c00SJeff Kirsher {
3261f7917c00SJeff Kirsher 	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3262f7917c00SJeff Kirsher }
3263f7917c00SJeff Kirsher 
3264f7917c00SJeff Kirsher /**
3265f7917c00SJeff Kirsher  *	t3_sge_stop - disable SGE operation
3266f7917c00SJeff Kirsher  *	@adap: the adapter
3267f7917c00SJeff Kirsher  *
3268f7917c00SJeff Kirsher  *	Disables the DMA engine.  This can be called in emeregencies (e.g.,
3269f7917c00SJeff Kirsher  *	from error interrupts) or from normal process context.  In the latter
3270f7917c00SJeff Kirsher  *	case it also disables any pending queue restart tasklets.  Note that
3271f7917c00SJeff Kirsher  *	if it is called in interrupt context it cannot disable the restart
3272f7917c00SJeff Kirsher  *	tasklets as it cannot wait, however the tasklets will have no effect
3273f7917c00SJeff Kirsher  *	since the doorbells are disabled and the driver will call this again
3274f7917c00SJeff Kirsher  *	later from process context, at which time the tasklets will be stopped
3275f7917c00SJeff Kirsher  *	if they are still running.
3276f7917c00SJeff Kirsher  */
3277f7917c00SJeff Kirsher void t3_sge_stop(struct adapter *adap)
3278f7917c00SJeff Kirsher {
3279f7917c00SJeff Kirsher 	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3280f7917c00SJeff Kirsher 	if (!in_interrupt()) {
3281f7917c00SJeff Kirsher 		int i;
3282f7917c00SJeff Kirsher 
3283f7917c00SJeff Kirsher 		for (i = 0; i < SGE_QSETS; ++i) {
3284f7917c00SJeff Kirsher 			struct sge_qset *qs = &adap->sge.qs[i];
3285f7917c00SJeff Kirsher 
3286f7917c00SJeff Kirsher 			tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3287f7917c00SJeff Kirsher 			tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3288f7917c00SJeff Kirsher 		}
3289f7917c00SJeff Kirsher 	}
3290f7917c00SJeff Kirsher }
3291f7917c00SJeff Kirsher 
3292f7917c00SJeff Kirsher /**
3293f7917c00SJeff Kirsher  *	t3_sge_init - initialize SGE
3294f7917c00SJeff Kirsher  *	@adap: the adapter
3295f7917c00SJeff Kirsher  *	@p: the SGE parameters
3296f7917c00SJeff Kirsher  *
3297f7917c00SJeff Kirsher  *	Performs SGE initialization needed every time after a chip reset.
3298f7917c00SJeff Kirsher  *	We do not initialize any of the queue sets here, instead the driver
3299f7917c00SJeff Kirsher  *	top-level must request those individually.  We also do not enable DMA
3300f7917c00SJeff Kirsher  *	here, that should be done after the queues have been set up.
3301f7917c00SJeff Kirsher  */
3302f7917c00SJeff Kirsher void t3_sge_init(struct adapter *adap, struct sge_params *p)
3303f7917c00SJeff Kirsher {
3304f7917c00SJeff Kirsher 	unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3305f7917c00SJeff Kirsher 
3306f7917c00SJeff Kirsher 	ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3307f7917c00SJeff Kirsher 	    F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3308f7917c00SJeff Kirsher 	    V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3309f7917c00SJeff Kirsher 	    V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3310f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 1
3311f7917c00SJeff Kirsher 	ctrl |= F_EGRGENCTRL;
3312f7917c00SJeff Kirsher #endif
3313f7917c00SJeff Kirsher 	if (adap->params.rev > 0) {
3314f7917c00SJeff Kirsher 		if (!(adap->flags & (USING_MSIX | USING_MSI)))
3315f7917c00SJeff Kirsher 			ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3316f7917c00SJeff Kirsher 	}
3317f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_CONTROL, ctrl);
3318f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3319f7917c00SJeff Kirsher 		     V_LORCQDRBTHRSH(512));
3320f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3321f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3322f7917c00SJeff Kirsher 		     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3323f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3324f7917c00SJeff Kirsher 		     adap->params.rev < T3_REV_C ? 1000 : 500);
3325f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3326f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3327f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3328f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3329f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3330f7917c00SJeff Kirsher }
3331f7917c00SJeff Kirsher 
3332f7917c00SJeff Kirsher /**
3333f7917c00SJeff Kirsher  *	t3_sge_prep - one-time SGE initialization
3334f7917c00SJeff Kirsher  *	@adap: the associated adapter
3335f7917c00SJeff Kirsher  *	@p: SGE parameters
3336f7917c00SJeff Kirsher  *
3337f7917c00SJeff Kirsher  *	Performs one-time initialization of SGE SW state.  Includes determining
3338f7917c00SJeff Kirsher  *	defaults for the assorted SGE parameters, which admins can change until
3339f7917c00SJeff Kirsher  *	they are used to initialize the SGE.
3340f7917c00SJeff Kirsher  */
3341f7917c00SJeff Kirsher void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3342f7917c00SJeff Kirsher {
3343f7917c00SJeff Kirsher 	int i;
3344f7917c00SJeff Kirsher 
3345f7917c00SJeff Kirsher 	p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3346f7917c00SJeff Kirsher 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3347f7917c00SJeff Kirsher 
3348f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3349f7917c00SJeff Kirsher 		struct qset_params *q = p->qset + i;
3350f7917c00SJeff Kirsher 
3351f7917c00SJeff Kirsher 		q->polling = adap->params.rev > 0;
3352f7917c00SJeff Kirsher 		q->coalesce_usecs = 5;
3353f7917c00SJeff Kirsher 		q->rspq_size = 1024;
3354f7917c00SJeff Kirsher 		q->fl_size = 1024;
3355f7917c00SJeff Kirsher  		q->jumbo_size = 512;
3356f7917c00SJeff Kirsher 		q->txq_size[TXQ_ETH] = 1024;
3357f7917c00SJeff Kirsher 		q->txq_size[TXQ_OFLD] = 1024;
3358f7917c00SJeff Kirsher 		q->txq_size[TXQ_CTRL] = 256;
3359f7917c00SJeff Kirsher 		q->cong_thres = 0;
3360f7917c00SJeff Kirsher 	}
3361f7917c00SJeff Kirsher 
3362f7917c00SJeff Kirsher 	spin_lock_init(&adap->sge.reg_lock);
3363f7917c00SJeff Kirsher }
3364