1f7917c00SJeff Kirsher /*
2f7917c00SJeff Kirsher  * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3f7917c00SJeff Kirsher  *
4f7917c00SJeff Kirsher  * This software is available to you under a choice of one of two
5f7917c00SJeff Kirsher  * licenses.  You may choose to be licensed under the terms of the GNU
6f7917c00SJeff Kirsher  * General Public License (GPL) Version 2, available from the file
7f7917c00SJeff Kirsher  * COPYING in the main directory of this source tree, or the
8f7917c00SJeff Kirsher  * OpenIB.org BSD license below:
9f7917c00SJeff Kirsher  *
10f7917c00SJeff Kirsher  *     Redistribution and use in source and binary forms, with or
11f7917c00SJeff Kirsher  *     without modification, are permitted provided that the following
12f7917c00SJeff Kirsher  *     conditions are met:
13f7917c00SJeff Kirsher  *
14f7917c00SJeff Kirsher  *      - Redistributions of source code must retain the above
15f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
16f7917c00SJeff Kirsher  *        disclaimer.
17f7917c00SJeff Kirsher  *
18f7917c00SJeff Kirsher  *      - Redistributions in binary form must reproduce the above
19f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
20f7917c00SJeff Kirsher  *        disclaimer in the documentation and/or other materials
21f7917c00SJeff Kirsher  *        provided with the distribution.
22f7917c00SJeff Kirsher  *
23f7917c00SJeff Kirsher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24f7917c00SJeff Kirsher  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25f7917c00SJeff Kirsher  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26f7917c00SJeff Kirsher  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27f7917c00SJeff Kirsher  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28f7917c00SJeff Kirsher  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29f7917c00SJeff Kirsher  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30f7917c00SJeff Kirsher  * SOFTWARE.
31f7917c00SJeff Kirsher  */
32f7917c00SJeff Kirsher #include <linux/skbuff.h>
33f7917c00SJeff Kirsher #include <linux/netdevice.h>
34f7917c00SJeff Kirsher #include <linux/etherdevice.h>
35f7917c00SJeff Kirsher #include <linux/if_vlan.h>
36f7917c00SJeff Kirsher #include <linux/ip.h>
37f7917c00SJeff Kirsher #include <linux/tcp.h>
38f7917c00SJeff Kirsher #include <linux/dma-mapping.h>
39f7917c00SJeff Kirsher #include <linux/slab.h>
40f7917c00SJeff Kirsher #include <linux/prefetch.h>
41f7917c00SJeff Kirsher #include <net/arp.h>
42f7917c00SJeff Kirsher #include "common.h"
43f7917c00SJeff Kirsher #include "regs.h"
44f7917c00SJeff Kirsher #include "sge_defs.h"
45f7917c00SJeff Kirsher #include "t3_cpl.h"
46f7917c00SJeff Kirsher #include "firmware_exports.h"
47f7917c00SJeff Kirsher #include "cxgb3_offload.h"
48f7917c00SJeff Kirsher 
49f7917c00SJeff Kirsher #define USE_GTS 0
50f7917c00SJeff Kirsher 
51f7917c00SJeff Kirsher #define SGE_RX_SM_BUF_SIZE 1536
52f7917c00SJeff Kirsher 
53f7917c00SJeff Kirsher #define SGE_RX_COPY_THRES  256
54f7917c00SJeff Kirsher #define SGE_RX_PULL_LEN    128
55f7917c00SJeff Kirsher 
56f7917c00SJeff Kirsher #define SGE_PG_RSVD SMP_CACHE_BYTES
57f7917c00SJeff Kirsher /*
58f7917c00SJeff Kirsher  * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
59f7917c00SJeff Kirsher  * It must be a divisor of PAGE_SIZE.  If set to 0 FL0 will use sk_buffs
60f7917c00SJeff Kirsher  * directly.
61f7917c00SJeff Kirsher  */
62f7917c00SJeff Kirsher #define FL0_PG_CHUNK_SIZE  2048
63f7917c00SJeff Kirsher #define FL0_PG_ORDER 0
64f7917c00SJeff Kirsher #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
65f7917c00SJeff Kirsher #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
66f7917c00SJeff Kirsher #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
67f7917c00SJeff Kirsher #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
68f7917c00SJeff Kirsher 
69f7917c00SJeff Kirsher #define SGE_RX_DROP_THRES 16
70f7917c00SJeff Kirsher #define RX_RECLAIM_PERIOD (HZ/4)
71f7917c00SJeff Kirsher 
72f7917c00SJeff Kirsher /*
73f7917c00SJeff Kirsher  * Max number of Rx buffers we replenish at a time.
74f7917c00SJeff Kirsher  */
75f7917c00SJeff Kirsher #define MAX_RX_REFILL 16U
76f7917c00SJeff Kirsher /*
77f7917c00SJeff Kirsher  * Period of the Tx buffer reclaim timer.  This timer does not need to run
78f7917c00SJeff Kirsher  * frequently as Tx buffers are usually reclaimed by new Tx packets.
79f7917c00SJeff Kirsher  */
80f7917c00SJeff Kirsher #define TX_RECLAIM_PERIOD (HZ / 4)
81f7917c00SJeff Kirsher #define TX_RECLAIM_TIMER_CHUNK 64U
82f7917c00SJeff Kirsher #define TX_RECLAIM_CHUNK 16U
83f7917c00SJeff Kirsher 
84f7917c00SJeff Kirsher /* WR size in bytes */
85f7917c00SJeff Kirsher #define WR_LEN (WR_FLITS * 8)
86f7917c00SJeff Kirsher 
87f7917c00SJeff Kirsher /*
88f7917c00SJeff Kirsher  * Types of Tx queues in each queue set.  Order here matters, do not change.
89f7917c00SJeff Kirsher  */
90f7917c00SJeff Kirsher enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
91f7917c00SJeff Kirsher 
92f7917c00SJeff Kirsher /* Values for sge_txq.flags */
93f7917c00SJeff Kirsher enum {
94f7917c00SJeff Kirsher 	TXQ_RUNNING = 1 << 0,	/* fetch engine is running */
95f7917c00SJeff Kirsher 	TXQ_LAST_PKT_DB = 1 << 1,	/* last packet rang the doorbell */
96f7917c00SJeff Kirsher };
97f7917c00SJeff Kirsher 
98f7917c00SJeff Kirsher struct tx_desc {
99f7917c00SJeff Kirsher 	__be64 flit[TX_DESC_FLITS];
100f7917c00SJeff Kirsher };
101f7917c00SJeff Kirsher 
102f7917c00SJeff Kirsher struct rx_desc {
103f7917c00SJeff Kirsher 	__be32 addr_lo;
104f7917c00SJeff Kirsher 	__be32 len_gen;
105f7917c00SJeff Kirsher 	__be32 gen2;
106f7917c00SJeff Kirsher 	__be32 addr_hi;
107f7917c00SJeff Kirsher };
108f7917c00SJeff Kirsher 
109f7917c00SJeff Kirsher struct tx_sw_desc {		/* SW state per Tx descriptor */
110f7917c00SJeff Kirsher 	struct sk_buff *skb;
111f7917c00SJeff Kirsher 	u8 eop;       /* set if last descriptor for packet */
112f7917c00SJeff Kirsher 	u8 addr_idx;  /* buffer index of first SGL entry in descriptor */
113f7917c00SJeff Kirsher 	u8 fragidx;   /* first page fragment associated with descriptor */
114f7917c00SJeff Kirsher 	s8 sflit;     /* start flit of first SGL entry in descriptor */
115f7917c00SJeff Kirsher };
116f7917c00SJeff Kirsher 
117f7917c00SJeff Kirsher struct rx_sw_desc {                /* SW state per Rx descriptor */
118f7917c00SJeff Kirsher 	union {
119f7917c00SJeff Kirsher 		struct sk_buff *skb;
120f7917c00SJeff Kirsher 		struct fl_pg_chunk pg_chunk;
121f7917c00SJeff Kirsher 	};
122f7917c00SJeff Kirsher 	DEFINE_DMA_UNMAP_ADDR(dma_addr);
123f7917c00SJeff Kirsher };
124f7917c00SJeff Kirsher 
125f7917c00SJeff Kirsher struct rsp_desc {		/* response queue descriptor */
126f7917c00SJeff Kirsher 	struct rss_header rss_hdr;
127f7917c00SJeff Kirsher 	__be32 flags;
128f7917c00SJeff Kirsher 	__be32 len_cq;
129f7917c00SJeff Kirsher 	u8 imm_data[47];
130f7917c00SJeff Kirsher 	u8 intr_gen;
131f7917c00SJeff Kirsher };
132f7917c00SJeff Kirsher 
133f7917c00SJeff Kirsher /*
134f7917c00SJeff Kirsher  * Holds unmapping information for Tx packets that need deferred unmapping.
135f7917c00SJeff Kirsher  * This structure lives at skb->head and must be allocated by callers.
136f7917c00SJeff Kirsher  */
137f7917c00SJeff Kirsher struct deferred_unmap_info {
138f7917c00SJeff Kirsher 	struct pci_dev *pdev;
139f7917c00SJeff Kirsher 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
140f7917c00SJeff Kirsher };
141f7917c00SJeff Kirsher 
142f7917c00SJeff Kirsher /*
143f7917c00SJeff Kirsher  * Maps a number of flits to the number of Tx descriptors that can hold them.
144f7917c00SJeff Kirsher  * The formula is
145f7917c00SJeff Kirsher  *
146f7917c00SJeff Kirsher  * desc = 1 + (flits - 2) / (WR_FLITS - 1).
147f7917c00SJeff Kirsher  *
148f7917c00SJeff Kirsher  * HW allows up to 4 descriptors to be combined into a WR.
149f7917c00SJeff Kirsher  */
150f7917c00SJeff Kirsher static u8 flit_desc_map[] = {
151f7917c00SJeff Kirsher 	0,
152f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 1
153f7917c00SJeff Kirsher 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
154f7917c00SJeff Kirsher 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
155f7917c00SJeff Kirsher 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
156f7917c00SJeff Kirsher 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
157f7917c00SJeff Kirsher #elif SGE_NUM_GENBITS == 2
158f7917c00SJeff Kirsher 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
159f7917c00SJeff Kirsher 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
160f7917c00SJeff Kirsher 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
161f7917c00SJeff Kirsher 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
162f7917c00SJeff Kirsher #else
163f7917c00SJeff Kirsher # error "SGE_NUM_GENBITS must be 1 or 2"
164f7917c00SJeff Kirsher #endif
165f7917c00SJeff Kirsher };
166f7917c00SJeff Kirsher 
167f7917c00SJeff Kirsher static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
168f7917c00SJeff Kirsher {
169f7917c00SJeff Kirsher 	return container_of(q, struct sge_qset, fl[qidx]);
170f7917c00SJeff Kirsher }
171f7917c00SJeff Kirsher 
172f7917c00SJeff Kirsher static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
173f7917c00SJeff Kirsher {
174f7917c00SJeff Kirsher 	return container_of(q, struct sge_qset, rspq);
175f7917c00SJeff Kirsher }
176f7917c00SJeff Kirsher 
177f7917c00SJeff Kirsher static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
178f7917c00SJeff Kirsher {
179f7917c00SJeff Kirsher 	return container_of(q, struct sge_qset, txq[qidx]);
180f7917c00SJeff Kirsher }
181f7917c00SJeff Kirsher 
182f7917c00SJeff Kirsher /**
183f7917c00SJeff Kirsher  *	refill_rspq - replenish an SGE response queue
184f7917c00SJeff Kirsher  *	@adapter: the adapter
185f7917c00SJeff Kirsher  *	@q: the response queue to replenish
186f7917c00SJeff Kirsher  *	@credits: how many new responses to make available
187f7917c00SJeff Kirsher  *
188f7917c00SJeff Kirsher  *	Replenishes a response queue by making the supplied number of responses
189f7917c00SJeff Kirsher  *	available to HW.
190f7917c00SJeff Kirsher  */
191f7917c00SJeff Kirsher static inline void refill_rspq(struct adapter *adapter,
192f7917c00SJeff Kirsher 			       const struct sge_rspq *q, unsigned int credits)
193f7917c00SJeff Kirsher {
194f7917c00SJeff Kirsher 	rmb();
195f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
196f7917c00SJeff Kirsher 		     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
197f7917c00SJeff Kirsher }
198f7917c00SJeff Kirsher 
199f7917c00SJeff Kirsher /**
200f7917c00SJeff Kirsher  *	need_skb_unmap - does the platform need unmapping of sk_buffs?
201f7917c00SJeff Kirsher  *
202f7917c00SJeff Kirsher  *	Returns true if the platform needs sk_buff unmapping.  The compiler
203f7917c00SJeff Kirsher  *	optimizes away unnecessary code if this returns true.
204f7917c00SJeff Kirsher  */
205f7917c00SJeff Kirsher static inline int need_skb_unmap(void)
206f7917c00SJeff Kirsher {
207f7917c00SJeff Kirsher #ifdef CONFIG_NEED_DMA_MAP_STATE
208f7917c00SJeff Kirsher 	return 1;
209f7917c00SJeff Kirsher #else
210f7917c00SJeff Kirsher 	return 0;
211f7917c00SJeff Kirsher #endif
212f7917c00SJeff Kirsher }
213f7917c00SJeff Kirsher 
214f7917c00SJeff Kirsher /**
215f7917c00SJeff Kirsher  *	unmap_skb - unmap a packet main body and its page fragments
216f7917c00SJeff Kirsher  *	@skb: the packet
217f7917c00SJeff Kirsher  *	@q: the Tx queue containing Tx descriptors for the packet
218f7917c00SJeff Kirsher  *	@cidx: index of Tx descriptor
219f7917c00SJeff Kirsher  *	@pdev: the PCI device
220f7917c00SJeff Kirsher  *
221f7917c00SJeff Kirsher  *	Unmap the main body of an sk_buff and its page fragments, if any.
222f7917c00SJeff Kirsher  *	Because of the fairly complicated structure of our SGLs and the desire
223f7917c00SJeff Kirsher  *	to conserve space for metadata, the information necessary to unmap an
224f7917c00SJeff Kirsher  *	sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
225f7917c00SJeff Kirsher  *	descriptors (the physical addresses of the various data buffers), and
226f7917c00SJeff Kirsher  *	the SW descriptor state (assorted indices).  The send functions
227f7917c00SJeff Kirsher  *	initialize the indices for the first packet descriptor so we can unmap
228f7917c00SJeff Kirsher  *	the buffers held in the first Tx descriptor here, and we have enough
229f7917c00SJeff Kirsher  *	information at this point to set the state for the next Tx descriptor.
230f7917c00SJeff Kirsher  *
231f7917c00SJeff Kirsher  *	Note that it is possible to clean up the first descriptor of a packet
232f7917c00SJeff Kirsher  *	before the send routines have written the next descriptors, but this
233f7917c00SJeff Kirsher  *	race does not cause any problem.  We just end up writing the unmapping
234f7917c00SJeff Kirsher  *	info for the descriptor first.
235f7917c00SJeff Kirsher  */
236f7917c00SJeff Kirsher static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
237f7917c00SJeff Kirsher 			     unsigned int cidx, struct pci_dev *pdev)
238f7917c00SJeff Kirsher {
239f7917c00SJeff Kirsher 	const struct sg_ent *sgp;
240f7917c00SJeff Kirsher 	struct tx_sw_desc *d = &q->sdesc[cidx];
241f7917c00SJeff Kirsher 	int nfrags, frag_idx, curflit, j = d->addr_idx;
242f7917c00SJeff Kirsher 
243f7917c00SJeff Kirsher 	sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
244f7917c00SJeff Kirsher 	frag_idx = d->fragidx;
245f7917c00SJeff Kirsher 
246f7917c00SJeff Kirsher 	if (frag_idx == 0 && skb_headlen(skb)) {
247f7917c00SJeff Kirsher 		pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
248f7917c00SJeff Kirsher 				 skb_headlen(skb), PCI_DMA_TODEVICE);
249f7917c00SJeff Kirsher 		j = 1;
250f7917c00SJeff Kirsher 	}
251f7917c00SJeff Kirsher 
252f7917c00SJeff Kirsher 	curflit = d->sflit + 1 + j;
253f7917c00SJeff Kirsher 	nfrags = skb_shinfo(skb)->nr_frags;
254f7917c00SJeff Kirsher 
255f7917c00SJeff Kirsher 	while (frag_idx < nfrags && curflit < WR_FLITS) {
256f7917c00SJeff Kirsher 		pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
2579e903e08SEric Dumazet 			       skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
258f7917c00SJeff Kirsher 			       PCI_DMA_TODEVICE);
259f7917c00SJeff Kirsher 		j ^= 1;
260f7917c00SJeff Kirsher 		if (j == 0) {
261f7917c00SJeff Kirsher 			sgp++;
262f7917c00SJeff Kirsher 			curflit++;
263f7917c00SJeff Kirsher 		}
264f7917c00SJeff Kirsher 		curflit++;
265f7917c00SJeff Kirsher 		frag_idx++;
266f7917c00SJeff Kirsher 	}
267f7917c00SJeff Kirsher 
268f7917c00SJeff Kirsher 	if (frag_idx < nfrags) {   /* SGL continues into next Tx descriptor */
269f7917c00SJeff Kirsher 		d = cidx + 1 == q->size ? q->sdesc : d + 1;
270f7917c00SJeff Kirsher 		d->fragidx = frag_idx;
271f7917c00SJeff Kirsher 		d->addr_idx = j;
272f7917c00SJeff Kirsher 		d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
273f7917c00SJeff Kirsher 	}
274f7917c00SJeff Kirsher }
275f7917c00SJeff Kirsher 
276f7917c00SJeff Kirsher /**
277f7917c00SJeff Kirsher  *	free_tx_desc - reclaims Tx descriptors and their buffers
278f7917c00SJeff Kirsher  *	@adapter: the adapter
279f7917c00SJeff Kirsher  *	@q: the Tx queue to reclaim descriptors from
280f7917c00SJeff Kirsher  *	@n: the number of descriptors to reclaim
281f7917c00SJeff Kirsher  *
282f7917c00SJeff Kirsher  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
283f7917c00SJeff Kirsher  *	Tx buffers.  Called with the Tx queue lock held.
284f7917c00SJeff Kirsher  */
285f7917c00SJeff Kirsher static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
286f7917c00SJeff Kirsher 			 unsigned int n)
287f7917c00SJeff Kirsher {
288f7917c00SJeff Kirsher 	struct tx_sw_desc *d;
289f7917c00SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
290f7917c00SJeff Kirsher 	unsigned int cidx = q->cidx;
291f7917c00SJeff Kirsher 
292f7917c00SJeff Kirsher 	const int need_unmap = need_skb_unmap() &&
293f7917c00SJeff Kirsher 			       q->cntxt_id >= FW_TUNNEL_SGEEC_START;
294f7917c00SJeff Kirsher 
295f7917c00SJeff Kirsher 	d = &q->sdesc[cidx];
296f7917c00SJeff Kirsher 	while (n--) {
297f7917c00SJeff Kirsher 		if (d->skb) {	/* an SGL is present */
298f7917c00SJeff Kirsher 			if (need_unmap)
299f7917c00SJeff Kirsher 				unmap_skb(d->skb, q, cidx, pdev);
300f7917c00SJeff Kirsher 			if (d->eop) {
301f9ec8131SEric W. Biederman 				dev_consume_skb_any(d->skb);
302f7917c00SJeff Kirsher 				d->skb = NULL;
303f7917c00SJeff Kirsher 			}
304f7917c00SJeff Kirsher 		}
305f7917c00SJeff Kirsher 		++d;
306f7917c00SJeff Kirsher 		if (++cidx == q->size) {
307f7917c00SJeff Kirsher 			cidx = 0;
308f7917c00SJeff Kirsher 			d = q->sdesc;
309f7917c00SJeff Kirsher 		}
310f7917c00SJeff Kirsher 	}
311f7917c00SJeff Kirsher 	q->cidx = cidx;
312f7917c00SJeff Kirsher }
313f7917c00SJeff Kirsher 
314f7917c00SJeff Kirsher /**
315f7917c00SJeff Kirsher  *	reclaim_completed_tx - reclaims completed Tx descriptors
316f7917c00SJeff Kirsher  *	@adapter: the adapter
317f7917c00SJeff Kirsher  *	@q: the Tx queue to reclaim completed descriptors from
318f7917c00SJeff Kirsher  *	@chunk: maximum number of descriptors to reclaim
319f7917c00SJeff Kirsher  *
320f7917c00SJeff Kirsher  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
321f7917c00SJeff Kirsher  *	and frees the associated buffers if possible.  Called with the Tx
322f7917c00SJeff Kirsher  *	queue's lock held.
323f7917c00SJeff Kirsher  */
324f7917c00SJeff Kirsher static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
325f7917c00SJeff Kirsher 						struct sge_txq *q,
326f7917c00SJeff Kirsher 						unsigned int chunk)
327f7917c00SJeff Kirsher {
328f7917c00SJeff Kirsher 	unsigned int reclaim = q->processed - q->cleaned;
329f7917c00SJeff Kirsher 
330f7917c00SJeff Kirsher 	reclaim = min(chunk, reclaim);
331f7917c00SJeff Kirsher 	if (reclaim) {
332f7917c00SJeff Kirsher 		free_tx_desc(adapter, q, reclaim);
333f7917c00SJeff Kirsher 		q->cleaned += reclaim;
334f7917c00SJeff Kirsher 		q->in_use -= reclaim;
335f7917c00SJeff Kirsher 	}
336f7917c00SJeff Kirsher 	return q->processed - q->cleaned;
337f7917c00SJeff Kirsher }
338f7917c00SJeff Kirsher 
339f7917c00SJeff Kirsher /**
340f7917c00SJeff Kirsher  *	should_restart_tx - are there enough resources to restart a Tx queue?
341f7917c00SJeff Kirsher  *	@q: the Tx queue
342f7917c00SJeff Kirsher  *
343f7917c00SJeff Kirsher  *	Checks if there are enough descriptors to restart a suspended Tx queue.
344f7917c00SJeff Kirsher  */
345f7917c00SJeff Kirsher static inline int should_restart_tx(const struct sge_txq *q)
346f7917c00SJeff Kirsher {
347f7917c00SJeff Kirsher 	unsigned int r = q->processed - q->cleaned;
348f7917c00SJeff Kirsher 
349f7917c00SJeff Kirsher 	return q->in_use - r < (q->size >> 1);
350f7917c00SJeff Kirsher }
351f7917c00SJeff Kirsher 
352f7917c00SJeff Kirsher static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
353f7917c00SJeff Kirsher 			  struct rx_sw_desc *d)
354f7917c00SJeff Kirsher {
355f7917c00SJeff Kirsher 	if (q->use_pages && d->pg_chunk.page) {
356f7917c00SJeff Kirsher 		(*d->pg_chunk.p_cnt)--;
357f7917c00SJeff Kirsher 		if (!*d->pg_chunk.p_cnt)
358f7917c00SJeff Kirsher 			pci_unmap_page(pdev,
359f7917c00SJeff Kirsher 				       d->pg_chunk.mapping,
360f7917c00SJeff Kirsher 				       q->alloc_size, PCI_DMA_FROMDEVICE);
361f7917c00SJeff Kirsher 
362f7917c00SJeff Kirsher 		put_page(d->pg_chunk.page);
363f7917c00SJeff Kirsher 		d->pg_chunk.page = NULL;
364f7917c00SJeff Kirsher 	} else {
365f7917c00SJeff Kirsher 		pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
366f7917c00SJeff Kirsher 				 q->buf_size, PCI_DMA_FROMDEVICE);
367f7917c00SJeff Kirsher 		kfree_skb(d->skb);
368f7917c00SJeff Kirsher 		d->skb = NULL;
369f7917c00SJeff Kirsher 	}
370f7917c00SJeff Kirsher }
371f7917c00SJeff Kirsher 
372f7917c00SJeff Kirsher /**
373f7917c00SJeff Kirsher  *	free_rx_bufs - free the Rx buffers on an SGE free list
374f7917c00SJeff Kirsher  *	@pdev: the PCI device associated with the adapter
375d0ea5cbdSJesse Brandeburg  *	@q: the SGE free list to clean up
376f7917c00SJeff Kirsher  *
377f7917c00SJeff Kirsher  *	Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
378f7917c00SJeff Kirsher  *	this queue should be stopped before calling this function.
379f7917c00SJeff Kirsher  */
380f7917c00SJeff Kirsher static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
381f7917c00SJeff Kirsher {
382f7917c00SJeff Kirsher 	unsigned int cidx = q->cidx;
383f7917c00SJeff Kirsher 
384f7917c00SJeff Kirsher 	while (q->credits--) {
385f7917c00SJeff Kirsher 		struct rx_sw_desc *d = &q->sdesc[cidx];
386f7917c00SJeff Kirsher 
387f7917c00SJeff Kirsher 
388f7917c00SJeff Kirsher 		clear_rx_desc(pdev, q, d);
389f7917c00SJeff Kirsher 		if (++cidx == q->size)
390f7917c00SJeff Kirsher 			cidx = 0;
391f7917c00SJeff Kirsher 	}
392f7917c00SJeff Kirsher 
393f7917c00SJeff Kirsher 	if (q->pg_chunk.page) {
394f7917c00SJeff Kirsher 		__free_pages(q->pg_chunk.page, q->order);
395f7917c00SJeff Kirsher 		q->pg_chunk.page = NULL;
396f7917c00SJeff Kirsher 	}
397f7917c00SJeff Kirsher }
398f7917c00SJeff Kirsher 
399f7917c00SJeff Kirsher /**
400f7917c00SJeff Kirsher  *	add_one_rx_buf - add a packet buffer to a free-buffer list
401f7917c00SJeff Kirsher  *	@va:  buffer start VA
402f7917c00SJeff Kirsher  *	@len: the buffer length
403f7917c00SJeff Kirsher  *	@d: the HW Rx descriptor to write
404f7917c00SJeff Kirsher  *	@sd: the SW Rx descriptor to write
405f7917c00SJeff Kirsher  *	@gen: the generation bit value
406f7917c00SJeff Kirsher  *	@pdev: the PCI device associated with the adapter
407f7917c00SJeff Kirsher  *
408f7917c00SJeff Kirsher  *	Add a buffer of the given length to the supplied HW and SW Rx
409f7917c00SJeff Kirsher  *	descriptors.
410f7917c00SJeff Kirsher  */
411f7917c00SJeff Kirsher static inline int add_one_rx_buf(void *va, unsigned int len,
412f7917c00SJeff Kirsher 				 struct rx_desc *d, struct rx_sw_desc *sd,
413f7917c00SJeff Kirsher 				 unsigned int gen, struct pci_dev *pdev)
414f7917c00SJeff Kirsher {
415f7917c00SJeff Kirsher 	dma_addr_t mapping;
416f7917c00SJeff Kirsher 
417f7917c00SJeff Kirsher 	mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
418f7917c00SJeff Kirsher 	if (unlikely(pci_dma_mapping_error(pdev, mapping)))
419f7917c00SJeff Kirsher 		return -ENOMEM;
420f7917c00SJeff Kirsher 
421f7917c00SJeff Kirsher 	dma_unmap_addr_set(sd, dma_addr, mapping);
422f7917c00SJeff Kirsher 
423f7917c00SJeff Kirsher 	d->addr_lo = cpu_to_be32(mapping);
424f7917c00SJeff Kirsher 	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
425019be1cfSAlexander Duyck 	dma_wmb();
426f7917c00SJeff Kirsher 	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
427f7917c00SJeff Kirsher 	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
428f7917c00SJeff Kirsher 	return 0;
429f7917c00SJeff Kirsher }
430f7917c00SJeff Kirsher 
431f7917c00SJeff Kirsher static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
432f7917c00SJeff Kirsher 				   unsigned int gen)
433f7917c00SJeff Kirsher {
434f7917c00SJeff Kirsher 	d->addr_lo = cpu_to_be32(mapping);
435f7917c00SJeff Kirsher 	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
436019be1cfSAlexander Duyck 	dma_wmb();
437f7917c00SJeff Kirsher 	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
438f7917c00SJeff Kirsher 	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
439f7917c00SJeff Kirsher 	return 0;
440f7917c00SJeff Kirsher }
441f7917c00SJeff Kirsher 
442f7917c00SJeff Kirsher static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
443f7917c00SJeff Kirsher 			  struct rx_sw_desc *sd, gfp_t gfp,
444f7917c00SJeff Kirsher 			  unsigned int order)
445f7917c00SJeff Kirsher {
446f7917c00SJeff Kirsher 	if (!q->pg_chunk.page) {
447f7917c00SJeff Kirsher 		dma_addr_t mapping;
448f7917c00SJeff Kirsher 
449f7917c00SJeff Kirsher 		q->pg_chunk.page = alloc_pages(gfp, order);
450f7917c00SJeff Kirsher 		if (unlikely(!q->pg_chunk.page))
451f7917c00SJeff Kirsher 			return -ENOMEM;
452f7917c00SJeff Kirsher 		q->pg_chunk.va = page_address(q->pg_chunk.page);
453f7917c00SJeff Kirsher 		q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
454f7917c00SJeff Kirsher 				    SGE_PG_RSVD;
455f7917c00SJeff Kirsher 		q->pg_chunk.offset = 0;
456f7917c00SJeff Kirsher 		mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
457f7917c00SJeff Kirsher 				       0, q->alloc_size, PCI_DMA_FROMDEVICE);
458c69fe407SArjun Vynipadath 		if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
459c69fe407SArjun Vynipadath 			__free_pages(q->pg_chunk.page, order);
460c69fe407SArjun Vynipadath 			q->pg_chunk.page = NULL;
461c69fe407SArjun Vynipadath 			return -EIO;
462c69fe407SArjun Vynipadath 		}
463f7917c00SJeff Kirsher 		q->pg_chunk.mapping = mapping;
464f7917c00SJeff Kirsher 	}
465f7917c00SJeff Kirsher 	sd->pg_chunk = q->pg_chunk;
466f7917c00SJeff Kirsher 
467f7917c00SJeff Kirsher 	prefetch(sd->pg_chunk.p_cnt);
468f7917c00SJeff Kirsher 
469f7917c00SJeff Kirsher 	q->pg_chunk.offset += q->buf_size;
470f7917c00SJeff Kirsher 	if (q->pg_chunk.offset == (PAGE_SIZE << order))
471f7917c00SJeff Kirsher 		q->pg_chunk.page = NULL;
472f7917c00SJeff Kirsher 	else {
473f7917c00SJeff Kirsher 		q->pg_chunk.va += q->buf_size;
474f7917c00SJeff Kirsher 		get_page(q->pg_chunk.page);
475f7917c00SJeff Kirsher 	}
476f7917c00SJeff Kirsher 
477f7917c00SJeff Kirsher 	if (sd->pg_chunk.offset == 0)
478f7917c00SJeff Kirsher 		*sd->pg_chunk.p_cnt = 1;
479f7917c00SJeff Kirsher 	else
480f7917c00SJeff Kirsher 		*sd->pg_chunk.p_cnt += 1;
481f7917c00SJeff Kirsher 
482f7917c00SJeff Kirsher 	return 0;
483f7917c00SJeff Kirsher }
484f7917c00SJeff Kirsher 
485f7917c00SJeff Kirsher static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
486f7917c00SJeff Kirsher {
487f7917c00SJeff Kirsher 	if (q->pend_cred >= q->credits / 4) {
488f7917c00SJeff Kirsher 		q->pend_cred = 0;
489f7917c00SJeff Kirsher 		wmb();
490f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
491f7917c00SJeff Kirsher 	}
492f7917c00SJeff Kirsher }
493f7917c00SJeff Kirsher 
494f7917c00SJeff Kirsher /**
495f7917c00SJeff Kirsher  *	refill_fl - refill an SGE free-buffer list
496d0ea5cbdSJesse Brandeburg  *	@adap: the adapter
497f7917c00SJeff Kirsher  *	@q: the free-list to refill
498f7917c00SJeff Kirsher  *	@n: the number of new buffers to allocate
499f7917c00SJeff Kirsher  *	@gfp: the gfp flags for allocating new buffers
500f7917c00SJeff Kirsher  *
501f7917c00SJeff Kirsher  *	(Re)populate an SGE free-buffer list with up to @n new packet buffers,
502f7917c00SJeff Kirsher  *	allocated with the supplied gfp flags.  The caller must assure that
503f7917c00SJeff Kirsher  *	@n does not exceed the queue's capacity.
504f7917c00SJeff Kirsher  */
505f7917c00SJeff Kirsher static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
506f7917c00SJeff Kirsher {
507f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
508f7917c00SJeff Kirsher 	struct rx_desc *d = &q->desc[q->pidx];
509f7917c00SJeff Kirsher 	unsigned int count = 0;
510f7917c00SJeff Kirsher 
511f7917c00SJeff Kirsher 	while (n--) {
512f7917c00SJeff Kirsher 		dma_addr_t mapping;
513f7917c00SJeff Kirsher 		int err;
514f7917c00SJeff Kirsher 
515f7917c00SJeff Kirsher 		if (q->use_pages) {
516f7917c00SJeff Kirsher 			if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
517f7917c00SJeff Kirsher 						    q->order))) {
518f7917c00SJeff Kirsher nomem:				q->alloc_failed++;
519f7917c00SJeff Kirsher 				break;
520f7917c00SJeff Kirsher 			}
521f7917c00SJeff Kirsher 			mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
522f7917c00SJeff Kirsher 			dma_unmap_addr_set(sd, dma_addr, mapping);
523f7917c00SJeff Kirsher 
524f7917c00SJeff Kirsher 			add_one_rx_chunk(mapping, d, q->gen);
525f7917c00SJeff Kirsher 			pci_dma_sync_single_for_device(adap->pdev, mapping,
526f7917c00SJeff Kirsher 						q->buf_size - SGE_PG_RSVD,
527f7917c00SJeff Kirsher 						PCI_DMA_FROMDEVICE);
528f7917c00SJeff Kirsher 		} else {
529f7917c00SJeff Kirsher 			void *buf_start;
530f7917c00SJeff Kirsher 
531f7917c00SJeff Kirsher 			struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
532f7917c00SJeff Kirsher 			if (!skb)
533f7917c00SJeff Kirsher 				goto nomem;
534f7917c00SJeff Kirsher 
535f7917c00SJeff Kirsher 			sd->skb = skb;
536f7917c00SJeff Kirsher 			buf_start = skb->data;
537f7917c00SJeff Kirsher 			err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
538f7917c00SJeff Kirsher 					     q->gen, adap->pdev);
539f7917c00SJeff Kirsher 			if (unlikely(err)) {
540f7917c00SJeff Kirsher 				clear_rx_desc(adap->pdev, q, sd);
541f7917c00SJeff Kirsher 				break;
542f7917c00SJeff Kirsher 			}
543f7917c00SJeff Kirsher 		}
544f7917c00SJeff Kirsher 
545f7917c00SJeff Kirsher 		d++;
546f7917c00SJeff Kirsher 		sd++;
547f7917c00SJeff Kirsher 		if (++q->pidx == q->size) {
548f7917c00SJeff Kirsher 			q->pidx = 0;
549f7917c00SJeff Kirsher 			q->gen ^= 1;
550f7917c00SJeff Kirsher 			sd = q->sdesc;
551f7917c00SJeff Kirsher 			d = q->desc;
552f7917c00SJeff Kirsher 		}
553f7917c00SJeff Kirsher 		count++;
554f7917c00SJeff Kirsher 	}
555f7917c00SJeff Kirsher 
556f7917c00SJeff Kirsher 	q->credits += count;
557f7917c00SJeff Kirsher 	q->pend_cred += count;
558f7917c00SJeff Kirsher 	ring_fl_db(adap, q);
559f7917c00SJeff Kirsher 
560f7917c00SJeff Kirsher 	return count;
561f7917c00SJeff Kirsher }
562f7917c00SJeff Kirsher 
563f7917c00SJeff Kirsher static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
564f7917c00SJeff Kirsher {
565f7917c00SJeff Kirsher 	refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
566f7917c00SJeff Kirsher 		  GFP_ATOMIC | __GFP_COMP);
567f7917c00SJeff Kirsher }
568f7917c00SJeff Kirsher 
569f7917c00SJeff Kirsher /**
570f7917c00SJeff Kirsher  *	recycle_rx_buf - recycle a receive buffer
571d0ea5cbdSJesse Brandeburg  *	@adap: the adapter
572f7917c00SJeff Kirsher  *	@q: the SGE free list
573f7917c00SJeff Kirsher  *	@idx: index of buffer to recycle
574f7917c00SJeff Kirsher  *
575f7917c00SJeff Kirsher  *	Recycles the specified buffer on the given free list by adding it at
576f7917c00SJeff Kirsher  *	the next available slot on the list.
577f7917c00SJeff Kirsher  */
578f7917c00SJeff Kirsher static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
579f7917c00SJeff Kirsher 			   unsigned int idx)
580f7917c00SJeff Kirsher {
581f7917c00SJeff Kirsher 	struct rx_desc *from = &q->desc[idx];
582f7917c00SJeff Kirsher 	struct rx_desc *to = &q->desc[q->pidx];
583f7917c00SJeff Kirsher 
584f7917c00SJeff Kirsher 	q->sdesc[q->pidx] = q->sdesc[idx];
585f7917c00SJeff Kirsher 	to->addr_lo = from->addr_lo;	/* already big endian */
586f7917c00SJeff Kirsher 	to->addr_hi = from->addr_hi;	/* likewise */
587019be1cfSAlexander Duyck 	dma_wmb();
588f7917c00SJeff Kirsher 	to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
589f7917c00SJeff Kirsher 	to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
590f7917c00SJeff Kirsher 
591f7917c00SJeff Kirsher 	if (++q->pidx == q->size) {
592f7917c00SJeff Kirsher 		q->pidx = 0;
593f7917c00SJeff Kirsher 		q->gen ^= 1;
594f7917c00SJeff Kirsher 	}
595f7917c00SJeff Kirsher 
596f7917c00SJeff Kirsher 	q->credits++;
597f7917c00SJeff Kirsher 	q->pend_cred++;
598f7917c00SJeff Kirsher 	ring_fl_db(adap, q);
599f7917c00SJeff Kirsher }
600f7917c00SJeff Kirsher 
601f7917c00SJeff Kirsher /**
602f7917c00SJeff Kirsher  *	alloc_ring - allocate resources for an SGE descriptor ring
603f7917c00SJeff Kirsher  *	@pdev: the PCI device
604f7917c00SJeff Kirsher  *	@nelem: the number of descriptors
605f7917c00SJeff Kirsher  *	@elem_size: the size of each descriptor
606f7917c00SJeff Kirsher  *	@sw_size: the size of the SW state associated with each ring element
607f7917c00SJeff Kirsher  *	@phys: the physical address of the allocated ring
608f7917c00SJeff Kirsher  *	@metadata: address of the array holding the SW state for the ring
609f7917c00SJeff Kirsher  *
610f7917c00SJeff Kirsher  *	Allocates resources for an SGE descriptor ring, such as Tx queues,
611f7917c00SJeff Kirsher  *	free buffer lists, or response queues.  Each SGE ring requires
612f7917c00SJeff Kirsher  *	space for its HW descriptors plus, optionally, space for the SW state
613f7917c00SJeff Kirsher  *	associated with each HW entry (the metadata).  The function returns
614f7917c00SJeff Kirsher  *	three values: the virtual address for the HW ring (the return value
615f7917c00SJeff Kirsher  *	of the function), the physical address of the HW ring, and the address
616f7917c00SJeff Kirsher  *	of the SW ring.
617f7917c00SJeff Kirsher  */
618f7917c00SJeff Kirsher static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
619f7917c00SJeff Kirsher 			size_t sw_size, dma_addr_t * phys, void *metadata)
620f7917c00SJeff Kirsher {
621f7917c00SJeff Kirsher 	size_t len = nelem * elem_size;
622f7917c00SJeff Kirsher 	void *s = NULL;
623750afb08SLuis Chamberlain 	void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
624f7917c00SJeff Kirsher 
625f7917c00SJeff Kirsher 	if (!p)
626f7917c00SJeff Kirsher 		return NULL;
627f7917c00SJeff Kirsher 	if (sw_size && metadata) {
628f7917c00SJeff Kirsher 		s = kcalloc(nelem, sw_size, GFP_KERNEL);
629f7917c00SJeff Kirsher 
630f7917c00SJeff Kirsher 		if (!s) {
631f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev, len, p, *phys);
632f7917c00SJeff Kirsher 			return NULL;
633f7917c00SJeff Kirsher 		}
634f7917c00SJeff Kirsher 		*(void **)metadata = s;
635f7917c00SJeff Kirsher 	}
636f7917c00SJeff Kirsher 	return p;
637f7917c00SJeff Kirsher }
638f7917c00SJeff Kirsher 
639f7917c00SJeff Kirsher /**
640f7917c00SJeff Kirsher  *	t3_reset_qset - reset a sge qset
641f7917c00SJeff Kirsher  *	@q: the queue set
642f7917c00SJeff Kirsher  *
643f7917c00SJeff Kirsher  *	Reset the qset structure.
644f7917c00SJeff Kirsher  *	the NAPI structure is preserved in the event of
645f7917c00SJeff Kirsher  *	the qset's reincarnation, for example during EEH recovery.
646f7917c00SJeff Kirsher  */
647f7917c00SJeff Kirsher static void t3_reset_qset(struct sge_qset *q)
648f7917c00SJeff Kirsher {
649f7917c00SJeff Kirsher 	if (q->adap &&
650f7917c00SJeff Kirsher 	    !(q->adap->flags & NAPI_INIT)) {
651f7917c00SJeff Kirsher 		memset(q, 0, sizeof(*q));
652f7917c00SJeff Kirsher 		return;
653f7917c00SJeff Kirsher 	}
654f7917c00SJeff Kirsher 
655f7917c00SJeff Kirsher 	q->adap = NULL;
656f7917c00SJeff Kirsher 	memset(&q->rspq, 0, sizeof(q->rspq));
657f7917c00SJeff Kirsher 	memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
658f7917c00SJeff Kirsher 	memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
659f7917c00SJeff Kirsher 	q->txq_stopped = 0;
660f7917c00SJeff Kirsher 	q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
661f7917c00SJeff Kirsher 	q->rx_reclaim_timer.function = NULL;
662f7917c00SJeff Kirsher 	q->nomem = 0;
663f7917c00SJeff Kirsher 	napi_free_frags(&q->napi);
664f7917c00SJeff Kirsher }
665f7917c00SJeff Kirsher 
666f7917c00SJeff Kirsher 
667f7917c00SJeff Kirsher /**
668aeed744aSYang Shen  *	t3_free_qset - free the resources of an SGE queue set
669f7917c00SJeff Kirsher  *	@adapter: the adapter owning the queue set
670f7917c00SJeff Kirsher  *	@q: the queue set
671f7917c00SJeff Kirsher  *
672f7917c00SJeff Kirsher  *	Release the HW and SW resources associated with an SGE queue set, such
673f7917c00SJeff Kirsher  *	as HW contexts, packet buffers, and descriptor rings.  Traffic to the
674f7917c00SJeff Kirsher  *	queue set must be quiesced prior to calling this.
675f7917c00SJeff Kirsher  */
676f7917c00SJeff Kirsher static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
677f7917c00SJeff Kirsher {
678f7917c00SJeff Kirsher 	int i;
679f7917c00SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
680f7917c00SJeff Kirsher 
681f7917c00SJeff Kirsher 	for (i = 0; i < SGE_RXQ_PER_SET; ++i)
682f7917c00SJeff Kirsher 		if (q->fl[i].desc) {
683f7917c00SJeff Kirsher 			spin_lock_irq(&adapter->sge.reg_lock);
684f7917c00SJeff Kirsher 			t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
685f7917c00SJeff Kirsher 			spin_unlock_irq(&adapter->sge.reg_lock);
686f7917c00SJeff Kirsher 			free_rx_bufs(pdev, &q->fl[i]);
687f7917c00SJeff Kirsher 			kfree(q->fl[i].sdesc);
688f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev,
689f7917c00SJeff Kirsher 					  q->fl[i].size *
690f7917c00SJeff Kirsher 					  sizeof(struct rx_desc), q->fl[i].desc,
691f7917c00SJeff Kirsher 					  q->fl[i].phys_addr);
692f7917c00SJeff Kirsher 		}
693f7917c00SJeff Kirsher 
694f7917c00SJeff Kirsher 	for (i = 0; i < SGE_TXQ_PER_SET; ++i)
695f7917c00SJeff Kirsher 		if (q->txq[i].desc) {
696f7917c00SJeff Kirsher 			spin_lock_irq(&adapter->sge.reg_lock);
697f7917c00SJeff Kirsher 			t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
698f7917c00SJeff Kirsher 			spin_unlock_irq(&adapter->sge.reg_lock);
699f7917c00SJeff Kirsher 			if (q->txq[i].sdesc) {
700f7917c00SJeff Kirsher 				free_tx_desc(adapter, &q->txq[i],
701f7917c00SJeff Kirsher 					     q->txq[i].in_use);
702f7917c00SJeff Kirsher 				kfree(q->txq[i].sdesc);
703f7917c00SJeff Kirsher 			}
704f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev,
705f7917c00SJeff Kirsher 					  q->txq[i].size *
706f7917c00SJeff Kirsher 					  sizeof(struct tx_desc),
707f7917c00SJeff Kirsher 					  q->txq[i].desc, q->txq[i].phys_addr);
708f7917c00SJeff Kirsher 			__skb_queue_purge(&q->txq[i].sendq);
709f7917c00SJeff Kirsher 		}
710f7917c00SJeff Kirsher 
711f7917c00SJeff Kirsher 	if (q->rspq.desc) {
712f7917c00SJeff Kirsher 		spin_lock_irq(&adapter->sge.reg_lock);
713f7917c00SJeff Kirsher 		t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
714f7917c00SJeff Kirsher 		spin_unlock_irq(&adapter->sge.reg_lock);
715f7917c00SJeff Kirsher 		dma_free_coherent(&pdev->dev,
716f7917c00SJeff Kirsher 				  q->rspq.size * sizeof(struct rsp_desc),
717f7917c00SJeff Kirsher 				  q->rspq.desc, q->rspq.phys_addr);
718f7917c00SJeff Kirsher 	}
719f7917c00SJeff Kirsher 
720f7917c00SJeff Kirsher 	t3_reset_qset(q);
721f7917c00SJeff Kirsher }
722f7917c00SJeff Kirsher 
723f7917c00SJeff Kirsher /**
724f7917c00SJeff Kirsher  *	init_qset_cntxt - initialize an SGE queue set context info
725f7917c00SJeff Kirsher  *	@qs: the queue set
726f7917c00SJeff Kirsher  *	@id: the queue set id
727f7917c00SJeff Kirsher  *
728f7917c00SJeff Kirsher  *	Initializes the TIDs and context ids for the queues of a queue set.
729f7917c00SJeff Kirsher  */
730f7917c00SJeff Kirsher static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
731f7917c00SJeff Kirsher {
732f7917c00SJeff Kirsher 	qs->rspq.cntxt_id = id;
733f7917c00SJeff Kirsher 	qs->fl[0].cntxt_id = 2 * id;
734f7917c00SJeff Kirsher 	qs->fl[1].cntxt_id = 2 * id + 1;
735f7917c00SJeff Kirsher 	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
736f7917c00SJeff Kirsher 	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
737f7917c00SJeff Kirsher 	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
738f7917c00SJeff Kirsher 	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
739f7917c00SJeff Kirsher 	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
740f7917c00SJeff Kirsher }
741f7917c00SJeff Kirsher 
742f7917c00SJeff Kirsher /**
743f7917c00SJeff Kirsher  *	sgl_len - calculates the size of an SGL of the given capacity
744f7917c00SJeff Kirsher  *	@n: the number of SGL entries
745f7917c00SJeff Kirsher  *
746f7917c00SJeff Kirsher  *	Calculates the number of flits needed for a scatter/gather list that
747f7917c00SJeff Kirsher  *	can hold the given number of entries.
748f7917c00SJeff Kirsher  */
749f7917c00SJeff Kirsher static inline unsigned int sgl_len(unsigned int n)
750f7917c00SJeff Kirsher {
751f7917c00SJeff Kirsher 	/* alternatively: 3 * (n / 2) + 2 * (n & 1) */
752f7917c00SJeff Kirsher 	return (3 * n) / 2 + (n & 1);
753f7917c00SJeff Kirsher }
754f7917c00SJeff Kirsher 
755f7917c00SJeff Kirsher /**
756f7917c00SJeff Kirsher  *	flits_to_desc - returns the num of Tx descriptors for the given flits
757f7917c00SJeff Kirsher  *	@n: the number of flits
758f7917c00SJeff Kirsher  *
759f7917c00SJeff Kirsher  *	Calculates the number of Tx descriptors needed for the supplied number
760f7917c00SJeff Kirsher  *	of flits.
761f7917c00SJeff Kirsher  */
762f7917c00SJeff Kirsher static inline unsigned int flits_to_desc(unsigned int n)
763f7917c00SJeff Kirsher {
764f7917c00SJeff Kirsher 	BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
765f7917c00SJeff Kirsher 	return flit_desc_map[n];
766f7917c00SJeff Kirsher }
767f7917c00SJeff Kirsher 
768f7917c00SJeff Kirsher /**
769f7917c00SJeff Kirsher  *	get_packet - return the next ingress packet buffer from a free list
770f7917c00SJeff Kirsher  *	@adap: the adapter that received the packet
771f7917c00SJeff Kirsher  *	@fl: the SGE free list holding the packet
772f7917c00SJeff Kirsher  *	@len: the packet length including any SGE padding
773f7917c00SJeff Kirsher  *	@drop_thres: # of remaining buffers before we start dropping packets
774f7917c00SJeff Kirsher  *
775f7917c00SJeff Kirsher  *	Get the next packet from a free list and complete setup of the
776f7917c00SJeff Kirsher  *	sk_buff.  If the packet is small we make a copy and recycle the
777f7917c00SJeff Kirsher  *	original buffer, otherwise we use the original buffer itself.  If a
778f7917c00SJeff Kirsher  *	positive drop threshold is supplied packets are dropped and their
779f7917c00SJeff Kirsher  *	buffers recycled if (a) the number of remaining buffers is under the
780f7917c00SJeff Kirsher  *	threshold and the packet is too big to copy, or (b) the packet should
781f7917c00SJeff Kirsher  *	be copied but there is no memory for the copy.
782f7917c00SJeff Kirsher  */
783f7917c00SJeff Kirsher static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
784f7917c00SJeff Kirsher 				  unsigned int len, unsigned int drop_thres)
785f7917c00SJeff Kirsher {
786f7917c00SJeff Kirsher 	struct sk_buff *skb = NULL;
787f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
788f7917c00SJeff Kirsher 
789f7917c00SJeff Kirsher 	prefetch(sd->skb->data);
790f7917c00SJeff Kirsher 	fl->credits--;
791f7917c00SJeff Kirsher 
792f7917c00SJeff Kirsher 	if (len <= SGE_RX_COPY_THRES) {
793f7917c00SJeff Kirsher 		skb = alloc_skb(len, GFP_ATOMIC);
794f7917c00SJeff Kirsher 		if (likely(skb != NULL)) {
795f7917c00SJeff Kirsher 			__skb_put(skb, len);
796f7917c00SJeff Kirsher 			pci_dma_sync_single_for_cpu(adap->pdev,
797f7917c00SJeff Kirsher 					    dma_unmap_addr(sd, dma_addr), len,
798f7917c00SJeff Kirsher 					    PCI_DMA_FROMDEVICE);
799f7917c00SJeff Kirsher 			memcpy(skb->data, sd->skb->data, len);
800f7917c00SJeff Kirsher 			pci_dma_sync_single_for_device(adap->pdev,
801f7917c00SJeff Kirsher 					    dma_unmap_addr(sd, dma_addr), len,
802f7917c00SJeff Kirsher 					    PCI_DMA_FROMDEVICE);
803f7917c00SJeff Kirsher 		} else if (!drop_thres)
804f7917c00SJeff Kirsher 			goto use_orig_buf;
805f7917c00SJeff Kirsher recycle:
806f7917c00SJeff Kirsher 		recycle_rx_buf(adap, fl, fl->cidx);
807f7917c00SJeff Kirsher 		return skb;
808f7917c00SJeff Kirsher 	}
809f7917c00SJeff Kirsher 
810f7917c00SJeff Kirsher 	if (unlikely(fl->credits < drop_thres) &&
811f7917c00SJeff Kirsher 	    refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
812f7917c00SJeff Kirsher 		      GFP_ATOMIC | __GFP_COMP) == 0)
813f7917c00SJeff Kirsher 		goto recycle;
814f7917c00SJeff Kirsher 
815f7917c00SJeff Kirsher use_orig_buf:
816f7917c00SJeff Kirsher 	pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
817f7917c00SJeff Kirsher 			 fl->buf_size, PCI_DMA_FROMDEVICE);
818f7917c00SJeff Kirsher 	skb = sd->skb;
819f7917c00SJeff Kirsher 	skb_put(skb, len);
820f7917c00SJeff Kirsher 	__refill_fl(adap, fl);
821f7917c00SJeff Kirsher 	return skb;
822f7917c00SJeff Kirsher }
823f7917c00SJeff Kirsher 
824f7917c00SJeff Kirsher /**
825f7917c00SJeff Kirsher  *	get_packet_pg - return the next ingress packet buffer from a free list
826f7917c00SJeff Kirsher  *	@adap: the adapter that received the packet
827f7917c00SJeff Kirsher  *	@fl: the SGE free list holding the packet
828d0ea5cbdSJesse Brandeburg  *	@q: the queue
829f7917c00SJeff Kirsher  *	@len: the packet length including any SGE padding
830f7917c00SJeff Kirsher  *	@drop_thres: # of remaining buffers before we start dropping packets
831f7917c00SJeff Kirsher  *
832f7917c00SJeff Kirsher  *	Get the next packet from a free list populated with page chunks.
833f7917c00SJeff Kirsher  *	If the packet is small we make a copy and recycle the original buffer,
834f7917c00SJeff Kirsher  *	otherwise we attach the original buffer as a page fragment to a fresh
835f7917c00SJeff Kirsher  *	sk_buff.  If a positive drop threshold is supplied packets are dropped
836f7917c00SJeff Kirsher  *	and their buffers recycled if (a) the number of remaining buffers is
837f7917c00SJeff Kirsher  *	under the threshold and the packet is too big to copy, or (b) there's
838f7917c00SJeff Kirsher  *	no system memory.
839f7917c00SJeff Kirsher  *
840f7917c00SJeff Kirsher  * 	Note: this function is similar to @get_packet but deals with Rx buffers
841f7917c00SJeff Kirsher  * 	that are page chunks rather than sk_buffs.
842f7917c00SJeff Kirsher  */
843f7917c00SJeff Kirsher static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
844f7917c00SJeff Kirsher 				     struct sge_rspq *q, unsigned int len,
845f7917c00SJeff Kirsher 				     unsigned int drop_thres)
846f7917c00SJeff Kirsher {
847f7917c00SJeff Kirsher 	struct sk_buff *newskb, *skb;
848f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
849f7917c00SJeff Kirsher 
850f7917c00SJeff Kirsher 	dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
851f7917c00SJeff Kirsher 
852f7917c00SJeff Kirsher 	newskb = skb = q->pg_skb;
853f7917c00SJeff Kirsher 	if (!skb && (len <= SGE_RX_COPY_THRES)) {
854f7917c00SJeff Kirsher 		newskb = alloc_skb(len, GFP_ATOMIC);
855f7917c00SJeff Kirsher 		if (likely(newskb != NULL)) {
856f7917c00SJeff Kirsher 			__skb_put(newskb, len);
857f7917c00SJeff Kirsher 			pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
858f7917c00SJeff Kirsher 					    PCI_DMA_FROMDEVICE);
859f7917c00SJeff Kirsher 			memcpy(newskb->data, sd->pg_chunk.va, len);
860f7917c00SJeff Kirsher 			pci_dma_sync_single_for_device(adap->pdev, dma_addr,
861f7917c00SJeff Kirsher 						       len,
862f7917c00SJeff Kirsher 						       PCI_DMA_FROMDEVICE);
863f7917c00SJeff Kirsher 		} else if (!drop_thres)
864f7917c00SJeff Kirsher 			return NULL;
865f7917c00SJeff Kirsher recycle:
866f7917c00SJeff Kirsher 		fl->credits--;
867f7917c00SJeff Kirsher 		recycle_rx_buf(adap, fl, fl->cidx);
868f7917c00SJeff Kirsher 		q->rx_recycle_buf++;
869f7917c00SJeff Kirsher 		return newskb;
870f7917c00SJeff Kirsher 	}
871f7917c00SJeff Kirsher 
872f7917c00SJeff Kirsher 	if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
873f7917c00SJeff Kirsher 		goto recycle;
874f7917c00SJeff Kirsher 
875f7917c00SJeff Kirsher 	prefetch(sd->pg_chunk.p_cnt);
876f7917c00SJeff Kirsher 
877f7917c00SJeff Kirsher 	if (!skb)
878f7917c00SJeff Kirsher 		newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
879f7917c00SJeff Kirsher 
880f7917c00SJeff Kirsher 	if (unlikely(!newskb)) {
881f7917c00SJeff Kirsher 		if (!drop_thres)
882f7917c00SJeff Kirsher 			return NULL;
883f7917c00SJeff Kirsher 		goto recycle;
884f7917c00SJeff Kirsher 	}
885f7917c00SJeff Kirsher 
886f7917c00SJeff Kirsher 	pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
887f7917c00SJeff Kirsher 				    PCI_DMA_FROMDEVICE);
888f7917c00SJeff Kirsher 	(*sd->pg_chunk.p_cnt)--;
889f7917c00SJeff Kirsher 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
890f7917c00SJeff Kirsher 		pci_unmap_page(adap->pdev,
891f7917c00SJeff Kirsher 			       sd->pg_chunk.mapping,
892f7917c00SJeff Kirsher 			       fl->alloc_size,
893f7917c00SJeff Kirsher 			       PCI_DMA_FROMDEVICE);
894f7917c00SJeff Kirsher 	if (!skb) {
895f7917c00SJeff Kirsher 		__skb_put(newskb, SGE_RX_PULL_LEN);
896f7917c00SJeff Kirsher 		memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
897f7917c00SJeff Kirsher 		skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
898f7917c00SJeff Kirsher 				   sd->pg_chunk.offset + SGE_RX_PULL_LEN,
899f7917c00SJeff Kirsher 				   len - SGE_RX_PULL_LEN);
900f7917c00SJeff Kirsher 		newskb->len = len;
901f7917c00SJeff Kirsher 		newskb->data_len = len - SGE_RX_PULL_LEN;
902f7917c00SJeff Kirsher 		newskb->truesize += newskb->data_len;
903f7917c00SJeff Kirsher 	} else {
904f7917c00SJeff Kirsher 		skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
905f7917c00SJeff Kirsher 				   sd->pg_chunk.page,
906f7917c00SJeff Kirsher 				   sd->pg_chunk.offset, len);
907f7917c00SJeff Kirsher 		newskb->len += len;
908f7917c00SJeff Kirsher 		newskb->data_len += len;
909f7917c00SJeff Kirsher 		newskb->truesize += len;
910f7917c00SJeff Kirsher 	}
911f7917c00SJeff Kirsher 
912f7917c00SJeff Kirsher 	fl->credits--;
913f7917c00SJeff Kirsher 	/*
914f7917c00SJeff Kirsher 	 * We do not refill FLs here, we let the caller do it to overlap a
915f7917c00SJeff Kirsher 	 * prefetch.
916f7917c00SJeff Kirsher 	 */
917f7917c00SJeff Kirsher 	return newskb;
918f7917c00SJeff Kirsher }
919f7917c00SJeff Kirsher 
920f7917c00SJeff Kirsher /**
921f7917c00SJeff Kirsher  *	get_imm_packet - return the next ingress packet buffer from a response
922f7917c00SJeff Kirsher  *	@resp: the response descriptor containing the packet data
923f7917c00SJeff Kirsher  *
924f7917c00SJeff Kirsher  *	Return a packet containing the immediate data of the given response.
925f7917c00SJeff Kirsher  */
926f7917c00SJeff Kirsher static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
927f7917c00SJeff Kirsher {
928f7917c00SJeff Kirsher 	struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
929f7917c00SJeff Kirsher 
930f7917c00SJeff Kirsher 	if (skb) {
931f7917c00SJeff Kirsher 		__skb_put(skb, IMMED_PKT_SIZE);
932f7917c00SJeff Kirsher 		skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
933f7917c00SJeff Kirsher 	}
934f7917c00SJeff Kirsher 	return skb;
935f7917c00SJeff Kirsher }
936f7917c00SJeff Kirsher 
937f7917c00SJeff Kirsher /**
938f7917c00SJeff Kirsher  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
939f7917c00SJeff Kirsher  *	@skb: the packet
940f7917c00SJeff Kirsher  *
941f7917c00SJeff Kirsher  * 	Returns the number of Tx descriptors needed for the given Ethernet
942f7917c00SJeff Kirsher  * 	packet.  Ethernet packets require addition of WR and CPL headers.
943f7917c00SJeff Kirsher  */
944f7917c00SJeff Kirsher static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
945f7917c00SJeff Kirsher {
946f7917c00SJeff Kirsher 	unsigned int flits;
947f7917c00SJeff Kirsher 
948f7917c00SJeff Kirsher 	if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
949f7917c00SJeff Kirsher 		return 1;
950f7917c00SJeff Kirsher 
951f7917c00SJeff Kirsher 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
952f7917c00SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
953f7917c00SJeff Kirsher 		flits++;
954f7917c00SJeff Kirsher 	return flits_to_desc(flits);
955f7917c00SJeff Kirsher }
956f7917c00SJeff Kirsher 
957c69fe407SArjun Vynipadath /*	map_skb - map a packet main body and its page fragments
958c69fe407SArjun Vynipadath  *	@pdev: the PCI device
959c69fe407SArjun Vynipadath  *	@skb: the packet
960c69fe407SArjun Vynipadath  *	@addr: placeholder to save the mapped addresses
961c69fe407SArjun Vynipadath  *
962c69fe407SArjun Vynipadath  *	map the main body of an sk_buff and its page fragments, if any.
963c69fe407SArjun Vynipadath  */
964c69fe407SArjun Vynipadath static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
965c69fe407SArjun Vynipadath 		   dma_addr_t *addr)
966c69fe407SArjun Vynipadath {
967c69fe407SArjun Vynipadath 	const skb_frag_t *fp, *end;
968c69fe407SArjun Vynipadath 	const struct skb_shared_info *si;
969c69fe407SArjun Vynipadath 
970c69fe407SArjun Vynipadath 	if (skb_headlen(skb)) {
971c69fe407SArjun Vynipadath 		*addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
972c69fe407SArjun Vynipadath 				       PCI_DMA_TODEVICE);
973c69fe407SArjun Vynipadath 		if (pci_dma_mapping_error(pdev, *addr))
974c69fe407SArjun Vynipadath 			goto out_err;
975c69fe407SArjun Vynipadath 		addr++;
976c69fe407SArjun Vynipadath 	}
977c69fe407SArjun Vynipadath 
978c69fe407SArjun Vynipadath 	si = skb_shinfo(skb);
979c69fe407SArjun Vynipadath 	end = &si->frags[si->nr_frags];
980c69fe407SArjun Vynipadath 
981c69fe407SArjun Vynipadath 	for (fp = si->frags; fp < end; fp++) {
982c69fe407SArjun Vynipadath 		*addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
983c69fe407SArjun Vynipadath 					 DMA_TO_DEVICE);
984c69fe407SArjun Vynipadath 		if (pci_dma_mapping_error(pdev, *addr))
985c69fe407SArjun Vynipadath 			goto unwind;
986c69fe407SArjun Vynipadath 		addr++;
987c69fe407SArjun Vynipadath 	}
988c69fe407SArjun Vynipadath 	return 0;
989c69fe407SArjun Vynipadath 
990c69fe407SArjun Vynipadath unwind:
991c69fe407SArjun Vynipadath 	while (fp-- > si->frags)
992c69fe407SArjun Vynipadath 		dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
993c69fe407SArjun Vynipadath 			       DMA_TO_DEVICE);
994c69fe407SArjun Vynipadath 
995c69fe407SArjun Vynipadath 	pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
996c69fe407SArjun Vynipadath out_err:
997c69fe407SArjun Vynipadath 	return -ENOMEM;
998c69fe407SArjun Vynipadath }
999c69fe407SArjun Vynipadath 
1000f7917c00SJeff Kirsher /**
1001c69fe407SArjun Vynipadath  *	write_sgl - populate a scatter/gather list for a packet
1002f7917c00SJeff Kirsher  *	@skb: the packet
1003f7917c00SJeff Kirsher  *	@sgp: the SGL to populate
1004f7917c00SJeff Kirsher  *	@start: start address of skb main body data to include in the SGL
1005f7917c00SJeff Kirsher  *	@len: length of skb main body data to include in the SGL
1006c69fe407SArjun Vynipadath  *	@addr: the list of the mapped addresses
1007f7917c00SJeff Kirsher  *
1008c69fe407SArjun Vynipadath  *	Copies the scatter/gather list for the buffers that make up a packet
1009f7917c00SJeff Kirsher  *	and returns the SGL size in 8-byte words.  The caller must size the SGL
1010f7917c00SJeff Kirsher  *	appropriately.
1011f7917c00SJeff Kirsher  */
1012c69fe407SArjun Vynipadath static inline unsigned int write_sgl(const struct sk_buff *skb,
1013f7917c00SJeff Kirsher 				     struct sg_ent *sgp, unsigned char *start,
1014c69fe407SArjun Vynipadath 				     unsigned int len, const dma_addr_t *addr)
1015f7917c00SJeff Kirsher {
1016c69fe407SArjun Vynipadath 	unsigned int i, j = 0, k = 0, nfrags;
1017f7917c00SJeff Kirsher 
1018f7917c00SJeff Kirsher 	if (len) {
1019f7917c00SJeff Kirsher 		sgp->len[0] = cpu_to_be32(len);
1020c69fe407SArjun Vynipadath 		sgp->addr[j++] = cpu_to_be64(addr[k++]);
1021f7917c00SJeff Kirsher 	}
1022f7917c00SJeff Kirsher 
1023f7917c00SJeff Kirsher 	nfrags = skb_shinfo(skb)->nr_frags;
1024f7917c00SJeff Kirsher 	for (i = 0; i < nfrags; i++) {
10259e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1026f7917c00SJeff Kirsher 
10279e903e08SEric Dumazet 		sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
1028c69fe407SArjun Vynipadath 		sgp->addr[j] = cpu_to_be64(addr[k++]);
1029f7917c00SJeff Kirsher 		j ^= 1;
1030f7917c00SJeff Kirsher 		if (j == 0)
1031f7917c00SJeff Kirsher 			++sgp;
1032f7917c00SJeff Kirsher 	}
1033f7917c00SJeff Kirsher 	if (j)
1034f7917c00SJeff Kirsher 		sgp->len[j] = 0;
1035f7917c00SJeff Kirsher 	return ((nfrags + (len != 0)) * 3) / 2 + j;
1036f7917c00SJeff Kirsher }
1037f7917c00SJeff Kirsher 
1038f7917c00SJeff Kirsher /**
1039f7917c00SJeff Kirsher  *	check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1040f7917c00SJeff Kirsher  *	@adap: the adapter
1041f7917c00SJeff Kirsher  *	@q: the Tx queue
1042f7917c00SJeff Kirsher  *
1043f7917c00SJeff Kirsher  *	Ring the doorbel if a Tx queue is asleep.  There is a natural race,
1044f7917c00SJeff Kirsher  *	where the HW is going to sleep just after we checked, however,
1045f7917c00SJeff Kirsher  *	then the interrupt handler will detect the outstanding TX packet
1046f7917c00SJeff Kirsher  *	and ring the doorbell for us.
1047f7917c00SJeff Kirsher  *
1048f7917c00SJeff Kirsher  *	When GTS is disabled we unconditionally ring the doorbell.
1049f7917c00SJeff Kirsher  */
1050f7917c00SJeff Kirsher static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1051f7917c00SJeff Kirsher {
1052f7917c00SJeff Kirsher #if USE_GTS
1053f7917c00SJeff Kirsher 	clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1054f7917c00SJeff Kirsher 	if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1055f7917c00SJeff Kirsher 		set_bit(TXQ_LAST_PKT_DB, &q->flags);
1056f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL,
1057f7917c00SJeff Kirsher 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1058f7917c00SJeff Kirsher 	}
1059f7917c00SJeff Kirsher #else
1060f7917c00SJeff Kirsher 	wmb();			/* write descriptors before telling HW */
1061f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_KDOORBELL,
1062f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1063f7917c00SJeff Kirsher #endif
1064f7917c00SJeff Kirsher }
1065f7917c00SJeff Kirsher 
1066f7917c00SJeff Kirsher static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1067f7917c00SJeff Kirsher {
1068f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 2
1069f7917c00SJeff Kirsher 	d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1070f7917c00SJeff Kirsher #endif
1071f7917c00SJeff Kirsher }
1072f7917c00SJeff Kirsher 
1073f7917c00SJeff Kirsher /**
1074f7917c00SJeff Kirsher  *	write_wr_hdr_sgl - write a WR header and, optionally, SGL
1075f7917c00SJeff Kirsher  *	@ndesc: number of Tx descriptors spanned by the SGL
1076f7917c00SJeff Kirsher  *	@skb: the packet corresponding to the WR
1077f7917c00SJeff Kirsher  *	@d: first Tx descriptor to be written
1078f7917c00SJeff Kirsher  *	@pidx: index of above descriptors
1079f7917c00SJeff Kirsher  *	@q: the SGE Tx queue
1080f7917c00SJeff Kirsher  *	@sgl: the SGL
1081f7917c00SJeff Kirsher  *	@flits: number of flits to the start of the SGL in the first descriptor
1082f7917c00SJeff Kirsher  *	@sgl_flits: the SGL size in flits
1083f7917c00SJeff Kirsher  *	@gen: the Tx descriptor generation
1084f7917c00SJeff Kirsher  *	@wr_hi: top 32 bits of WR header based on WR type (big endian)
1085f7917c00SJeff Kirsher  *	@wr_lo: low 32 bits of WR header based on WR type (big endian)
1086f7917c00SJeff Kirsher  *
1087f7917c00SJeff Kirsher  *	Write a work request header and an associated SGL.  If the SGL is
1088f7917c00SJeff Kirsher  *	small enough to fit into one Tx descriptor it has already been written
1089f7917c00SJeff Kirsher  *	and we just need to write the WR header.  Otherwise we distribute the
1090f7917c00SJeff Kirsher  *	SGL across the number of descriptors it spans.
1091f7917c00SJeff Kirsher  */
1092f7917c00SJeff Kirsher static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1093f7917c00SJeff Kirsher 			     struct tx_desc *d, unsigned int pidx,
1094f7917c00SJeff Kirsher 			     const struct sge_txq *q,
1095f7917c00SJeff Kirsher 			     const struct sg_ent *sgl,
1096f7917c00SJeff Kirsher 			     unsigned int flits, unsigned int sgl_flits,
1097f7917c00SJeff Kirsher 			     unsigned int gen, __be32 wr_hi,
1098f7917c00SJeff Kirsher 			     __be32 wr_lo)
1099f7917c00SJeff Kirsher {
1100f7917c00SJeff Kirsher 	struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1101f7917c00SJeff Kirsher 	struct tx_sw_desc *sd = &q->sdesc[pidx];
1102f7917c00SJeff Kirsher 
1103f7917c00SJeff Kirsher 	sd->skb = skb;
1104f7917c00SJeff Kirsher 	if (need_skb_unmap()) {
1105f7917c00SJeff Kirsher 		sd->fragidx = 0;
1106f7917c00SJeff Kirsher 		sd->addr_idx = 0;
1107f7917c00SJeff Kirsher 		sd->sflit = flits;
1108f7917c00SJeff Kirsher 	}
1109f7917c00SJeff Kirsher 
1110f7917c00SJeff Kirsher 	if (likely(ndesc == 1)) {
1111f7917c00SJeff Kirsher 		sd->eop = 1;
1112f7917c00SJeff Kirsher 		wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1113f7917c00SJeff Kirsher 				   V_WR_SGLSFLT(flits)) | wr_hi;
1114019be1cfSAlexander Duyck 		dma_wmb();
1115f7917c00SJeff Kirsher 		wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1116f7917c00SJeff Kirsher 				   V_WR_GEN(gen)) | wr_lo;
1117f7917c00SJeff Kirsher 		wr_gen2(d, gen);
1118f7917c00SJeff Kirsher 	} else {
1119f7917c00SJeff Kirsher 		unsigned int ogen = gen;
1120f7917c00SJeff Kirsher 		const u64 *fp = (const u64 *)sgl;
1121f7917c00SJeff Kirsher 		struct work_request_hdr *wp = wrp;
1122f7917c00SJeff Kirsher 
1123f7917c00SJeff Kirsher 		wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1124f7917c00SJeff Kirsher 				   V_WR_SGLSFLT(flits)) | wr_hi;
1125f7917c00SJeff Kirsher 
1126f7917c00SJeff Kirsher 		while (sgl_flits) {
1127f7917c00SJeff Kirsher 			unsigned int avail = WR_FLITS - flits;
1128f7917c00SJeff Kirsher 
1129f7917c00SJeff Kirsher 			if (avail > sgl_flits)
1130f7917c00SJeff Kirsher 				avail = sgl_flits;
1131f7917c00SJeff Kirsher 			memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1132f7917c00SJeff Kirsher 			sgl_flits -= avail;
1133f7917c00SJeff Kirsher 			ndesc--;
1134f7917c00SJeff Kirsher 			if (!sgl_flits)
1135f7917c00SJeff Kirsher 				break;
1136f7917c00SJeff Kirsher 
1137f7917c00SJeff Kirsher 			fp += avail;
1138f7917c00SJeff Kirsher 			d++;
1139f7917c00SJeff Kirsher 			sd->eop = 0;
1140f7917c00SJeff Kirsher 			sd++;
1141f7917c00SJeff Kirsher 			if (++pidx == q->size) {
1142f7917c00SJeff Kirsher 				pidx = 0;
1143f7917c00SJeff Kirsher 				gen ^= 1;
1144f7917c00SJeff Kirsher 				d = q->desc;
1145f7917c00SJeff Kirsher 				sd = q->sdesc;
1146f7917c00SJeff Kirsher 			}
1147f7917c00SJeff Kirsher 
1148f7917c00SJeff Kirsher 			sd->skb = skb;
1149f7917c00SJeff Kirsher 			wrp = (struct work_request_hdr *)d;
1150f7917c00SJeff Kirsher 			wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1151f7917c00SJeff Kirsher 					   V_WR_SGLSFLT(1)) | wr_hi;
1152f7917c00SJeff Kirsher 			wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1153f7917c00SJeff Kirsher 							sgl_flits + 1)) |
1154f7917c00SJeff Kirsher 					   V_WR_GEN(gen)) | wr_lo;
1155f7917c00SJeff Kirsher 			wr_gen2(d, gen);
1156f7917c00SJeff Kirsher 			flits = 1;
1157f7917c00SJeff Kirsher 		}
1158f7917c00SJeff Kirsher 		sd->eop = 1;
1159f7917c00SJeff Kirsher 		wrp->wr_hi |= htonl(F_WR_EOP);
1160019be1cfSAlexander Duyck 		dma_wmb();
1161f7917c00SJeff Kirsher 		wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1162f7917c00SJeff Kirsher 		wr_gen2((struct tx_desc *)wp, ogen);
1163f7917c00SJeff Kirsher 		WARN_ON(ndesc != 0);
1164f7917c00SJeff Kirsher 	}
1165f7917c00SJeff Kirsher }
1166f7917c00SJeff Kirsher 
1167f7917c00SJeff Kirsher /**
1168f7917c00SJeff Kirsher  *	write_tx_pkt_wr - write a TX_PKT work request
1169f7917c00SJeff Kirsher  *	@adap: the adapter
1170f7917c00SJeff Kirsher  *	@skb: the packet to send
1171f7917c00SJeff Kirsher  *	@pi: the egress interface
1172f7917c00SJeff Kirsher  *	@pidx: index of the first Tx descriptor to write
1173f7917c00SJeff Kirsher  *	@gen: the generation value to use
1174f7917c00SJeff Kirsher  *	@q: the Tx queue
1175f7917c00SJeff Kirsher  *	@ndesc: number of descriptors the packet will occupy
1176f7917c00SJeff Kirsher  *	@compl: the value of the COMPL bit to use
1177d0ea5cbdSJesse Brandeburg  *	@addr: address
1178f7917c00SJeff Kirsher  *
1179f7917c00SJeff Kirsher  *	Generate a TX_PKT work request to send the supplied packet.
1180f7917c00SJeff Kirsher  */
1181f7917c00SJeff Kirsher static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1182f7917c00SJeff Kirsher 			    const struct port_info *pi,
1183f7917c00SJeff Kirsher 			    unsigned int pidx, unsigned int gen,
1184f7917c00SJeff Kirsher 			    struct sge_txq *q, unsigned int ndesc,
1185c69fe407SArjun Vynipadath 			    unsigned int compl, const dma_addr_t *addr)
1186f7917c00SJeff Kirsher {
1187f7917c00SJeff Kirsher 	unsigned int flits, sgl_flits, cntrl, tso_info;
1188f7917c00SJeff Kirsher 	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1189f7917c00SJeff Kirsher 	struct tx_desc *d = &q->desc[pidx];
1190f7917c00SJeff Kirsher 	struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1191f7917c00SJeff Kirsher 
1192f7917c00SJeff Kirsher 	cpl->len = htonl(skb->len);
1193f7917c00SJeff Kirsher 	cntrl = V_TXPKT_INTF(pi->port_id);
1194f7917c00SJeff Kirsher 
1195df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
1196df8a39deSJiri Pirko 		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
1197f7917c00SJeff Kirsher 
1198f7917c00SJeff Kirsher 	tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1199f7917c00SJeff Kirsher 	if (tso_info) {
1200f7917c00SJeff Kirsher 		int eth_type;
1201f7917c00SJeff Kirsher 		struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1202f7917c00SJeff Kirsher 
1203f7917c00SJeff Kirsher 		d->flit[2] = 0;
1204f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1205f7917c00SJeff Kirsher 		hdr->cntrl = htonl(cntrl);
1206f7917c00SJeff Kirsher 		eth_type = skb_network_offset(skb) == ETH_HLEN ?
1207f7917c00SJeff Kirsher 		    CPL_ETH_II : CPL_ETH_II_VLAN;
1208f7917c00SJeff Kirsher 		tso_info |= V_LSO_ETH_TYPE(eth_type) |
1209f7917c00SJeff Kirsher 		    V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1210f7917c00SJeff Kirsher 		    V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1211f7917c00SJeff Kirsher 		hdr->lso_info = htonl(tso_info);
1212f7917c00SJeff Kirsher 		flits = 3;
1213f7917c00SJeff Kirsher 	} else {
1214f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1215f7917c00SJeff Kirsher 		cntrl |= F_TXPKT_IPCSUM_DIS;	/* SW calculates IP csum */
1216f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1217f7917c00SJeff Kirsher 		cpl->cntrl = htonl(cntrl);
1218f7917c00SJeff Kirsher 
1219f7917c00SJeff Kirsher 		if (skb->len <= WR_LEN - sizeof(*cpl)) {
1220f7917c00SJeff Kirsher 			q->sdesc[pidx].skb = NULL;
1221f7917c00SJeff Kirsher 			if (!skb->data_len)
1222f7917c00SJeff Kirsher 				skb_copy_from_linear_data(skb, &d->flit[2],
1223f7917c00SJeff Kirsher 							  skb->len);
1224f7917c00SJeff Kirsher 			else
1225f7917c00SJeff Kirsher 				skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1226f7917c00SJeff Kirsher 
1227f7917c00SJeff Kirsher 			flits = (skb->len + 7) / 8 + 2;
1228f7917c00SJeff Kirsher 			cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1229f7917c00SJeff Kirsher 					      V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1230f7917c00SJeff Kirsher 					      | F_WR_SOP | F_WR_EOP | compl);
1231019be1cfSAlexander Duyck 			dma_wmb();
1232f7917c00SJeff Kirsher 			cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1233f7917c00SJeff Kirsher 					      V_WR_TID(q->token));
1234f7917c00SJeff Kirsher 			wr_gen2(d, gen);
1235f9ec8131SEric W. Biederman 			dev_consume_skb_any(skb);
1236f7917c00SJeff Kirsher 			return;
1237f7917c00SJeff Kirsher 		}
1238f7917c00SJeff Kirsher 
1239f7917c00SJeff Kirsher 		flits = 2;
1240f7917c00SJeff Kirsher 	}
1241f7917c00SJeff Kirsher 
1242f7917c00SJeff Kirsher 	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1243c69fe407SArjun Vynipadath 	sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
1244f7917c00SJeff Kirsher 
1245f7917c00SJeff Kirsher 	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1246f7917c00SJeff Kirsher 			 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1247f7917c00SJeff Kirsher 			 htonl(V_WR_TID(q->token)));
1248f7917c00SJeff Kirsher }
1249f7917c00SJeff Kirsher 
1250f7917c00SJeff Kirsher static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1251f7917c00SJeff Kirsher 				    struct sge_qset *qs, struct sge_txq *q)
1252f7917c00SJeff Kirsher {
1253f7917c00SJeff Kirsher 	netif_tx_stop_queue(txq);
1254f7917c00SJeff Kirsher 	set_bit(TXQ_ETH, &qs->txq_stopped);
1255f7917c00SJeff Kirsher 	q->stops++;
1256f7917c00SJeff Kirsher }
1257f7917c00SJeff Kirsher 
1258f7917c00SJeff Kirsher /**
1259aeed744aSYang Shen  *	t3_eth_xmit - add a packet to the Ethernet Tx queue
1260f7917c00SJeff Kirsher  *	@skb: the packet
1261f7917c00SJeff Kirsher  *	@dev: the egress net device
1262f7917c00SJeff Kirsher  *
1263f7917c00SJeff Kirsher  *	Add a packet to an SGE Tx queue.  Runs with softirqs disabled.
1264f7917c00SJeff Kirsher  */
1265f7917c00SJeff Kirsher netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1266f7917c00SJeff Kirsher {
1267f7917c00SJeff Kirsher 	int qidx;
1268f7917c00SJeff Kirsher 	unsigned int ndesc, pidx, credits, gen, compl;
1269f7917c00SJeff Kirsher 	const struct port_info *pi = netdev_priv(dev);
1270f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
1271f7917c00SJeff Kirsher 	struct netdev_queue *txq;
1272f7917c00SJeff Kirsher 	struct sge_qset *qs;
1273f7917c00SJeff Kirsher 	struct sge_txq *q;
1274c69fe407SArjun Vynipadath 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1275f7917c00SJeff Kirsher 
1276f7917c00SJeff Kirsher 	/*
1277f7917c00SJeff Kirsher 	 * The chip min packet length is 9 octets but play safe and reject
1278f7917c00SJeff Kirsher 	 * anything shorter than an Ethernet header.
1279f7917c00SJeff Kirsher 	 */
1280f7917c00SJeff Kirsher 	if (unlikely(skb->len < ETH_HLEN)) {
1281f9ec8131SEric W. Biederman 		dev_kfree_skb_any(skb);
1282f7917c00SJeff Kirsher 		return NETDEV_TX_OK;
1283f7917c00SJeff Kirsher 	}
1284f7917c00SJeff Kirsher 
1285f7917c00SJeff Kirsher 	qidx = skb_get_queue_mapping(skb);
1286f7917c00SJeff Kirsher 	qs = &pi->qs[qidx];
1287f7917c00SJeff Kirsher 	q = &qs->txq[TXQ_ETH];
1288f7917c00SJeff Kirsher 	txq = netdev_get_tx_queue(dev, qidx);
1289f7917c00SJeff Kirsher 
1290f7917c00SJeff Kirsher 	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1291f7917c00SJeff Kirsher 
1292f7917c00SJeff Kirsher 	credits = q->size - q->in_use;
1293f7917c00SJeff Kirsher 	ndesc = calc_tx_descs(skb);
1294f7917c00SJeff Kirsher 
1295f7917c00SJeff Kirsher 	if (unlikely(credits < ndesc)) {
1296f7917c00SJeff Kirsher 		t3_stop_tx_queue(txq, qs, q);
1297f7917c00SJeff Kirsher 		dev_err(&adap->pdev->dev,
1298f7917c00SJeff Kirsher 			"%s: Tx ring %u full while queue awake!\n",
1299f7917c00SJeff Kirsher 			dev->name, q->cntxt_id & 7);
1300f7917c00SJeff Kirsher 		return NETDEV_TX_BUSY;
1301f7917c00SJeff Kirsher 	}
1302f7917c00SJeff Kirsher 
1303c69fe407SArjun Vynipadath 	/* Check if ethernet packet can't be sent as immediate data */
1304c69fe407SArjun Vynipadath 	if (skb->len > (WR_LEN - sizeof(struct cpl_tx_pkt))) {
1305c69fe407SArjun Vynipadath 		if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1306c69fe407SArjun Vynipadath 			dev_kfree_skb(skb);
1307c69fe407SArjun Vynipadath 			return NETDEV_TX_OK;
1308c69fe407SArjun Vynipadath 		}
1309c69fe407SArjun Vynipadath 	}
1310c69fe407SArjun Vynipadath 
1311f7917c00SJeff Kirsher 	q->in_use += ndesc;
1312f7917c00SJeff Kirsher 	if (unlikely(credits - ndesc < q->stop_thres)) {
1313f7917c00SJeff Kirsher 		t3_stop_tx_queue(txq, qs, q);
1314f7917c00SJeff Kirsher 
1315f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1316f7917c00SJeff Kirsher 		    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1317f7917c00SJeff Kirsher 			q->restarts++;
1318f7917c00SJeff Kirsher 			netif_tx_start_queue(txq);
1319f7917c00SJeff Kirsher 		}
1320f7917c00SJeff Kirsher 	}
1321f7917c00SJeff Kirsher 
1322f7917c00SJeff Kirsher 	gen = q->gen;
1323f7917c00SJeff Kirsher 	q->unacked += ndesc;
1324f7917c00SJeff Kirsher 	compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1325f7917c00SJeff Kirsher 	q->unacked &= 7;
1326f7917c00SJeff Kirsher 	pidx = q->pidx;
1327f7917c00SJeff Kirsher 	q->pidx += ndesc;
1328f7917c00SJeff Kirsher 	if (q->pidx >= q->size) {
1329f7917c00SJeff Kirsher 		q->pidx -= q->size;
1330f7917c00SJeff Kirsher 		q->gen ^= 1;
1331f7917c00SJeff Kirsher 	}
1332f7917c00SJeff Kirsher 
1333f7917c00SJeff Kirsher 	/* update port statistics */
1334bc6c47b5SVipul Pandya 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1335f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1336f7917c00SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
1337f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_TSO]++;
1338df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
1339f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_VLANINS]++;
1340f7917c00SJeff Kirsher 
1341f7917c00SJeff Kirsher 	/*
1342f7917c00SJeff Kirsher 	 * We do not use Tx completion interrupts to free DMAd Tx packets.
1343f7917c00SJeff Kirsher 	 * This is good for performance but means that we rely on new Tx
1344f7917c00SJeff Kirsher 	 * packets arriving to run the destructors of completed packets,
1345f7917c00SJeff Kirsher 	 * which open up space in their sockets' send queues.  Sometimes
1346f7917c00SJeff Kirsher 	 * we do not get such new packets causing Tx to stall.  A single
1347f7917c00SJeff Kirsher 	 * UDP transmitter is a good example of this situation.  We have
1348f7917c00SJeff Kirsher 	 * a clean up timer that periodically reclaims completed packets
1349f7917c00SJeff Kirsher 	 * but it doesn't run often enough (nor do we want it to) to prevent
1350f7917c00SJeff Kirsher 	 * lengthy stalls.  A solution to this problem is to run the
1351f7917c00SJeff Kirsher 	 * destructor early, after the packet is queued but before it's DMAd.
1352f7917c00SJeff Kirsher 	 * A cons is that we lie to socket memory accounting, but the amount
1353f7917c00SJeff Kirsher 	 * of extra memory is reasonable (limited by the number of Tx
1354f7917c00SJeff Kirsher 	 * descriptors), the packets do actually get freed quickly by new
1355f7917c00SJeff Kirsher 	 * packets almost always, and for protocols like TCP that wait for
1356f7917c00SJeff Kirsher 	 * acks to really free up the data the extra memory is even less.
1357f7917c00SJeff Kirsher 	 * On the positive side we run the destructors on the sending CPU
1358f7917c00SJeff Kirsher 	 * rather than on a potentially different completing CPU, usually a
1359f7917c00SJeff Kirsher 	 * good thing.  We also run them without holding our Tx queue lock,
1360f7917c00SJeff Kirsher 	 * unlike what reclaim_completed_tx() would otherwise do.
1361f7917c00SJeff Kirsher 	 *
1362f7917c00SJeff Kirsher 	 * Run the destructor before telling the DMA engine about the packet
1363f7917c00SJeff Kirsher 	 * to make sure it doesn't complete and get freed prematurely.
1364f7917c00SJeff Kirsher 	 */
1365f7917c00SJeff Kirsher 	if (likely(!skb_shared(skb)))
1366f7917c00SJeff Kirsher 		skb_orphan(skb);
1367f7917c00SJeff Kirsher 
1368c69fe407SArjun Vynipadath 	write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
1369f7917c00SJeff Kirsher 	check_ring_tx_db(adap, q);
1370f7917c00SJeff Kirsher 	return NETDEV_TX_OK;
1371f7917c00SJeff Kirsher }
1372f7917c00SJeff Kirsher 
1373f7917c00SJeff Kirsher /**
1374f7917c00SJeff Kirsher  *	write_imm - write a packet into a Tx descriptor as immediate data
1375f7917c00SJeff Kirsher  *	@d: the Tx descriptor to write
1376f7917c00SJeff Kirsher  *	@skb: the packet
1377f7917c00SJeff Kirsher  *	@len: the length of packet data to write as immediate data
1378f7917c00SJeff Kirsher  *	@gen: the generation bit value to write
1379f7917c00SJeff Kirsher  *
1380f7917c00SJeff Kirsher  *	Writes a packet as immediate data into a Tx descriptor.  The packet
1381f7917c00SJeff Kirsher  *	contains a work request at its beginning.  We must write the packet
1382f7917c00SJeff Kirsher  *	carefully so the SGE doesn't read it accidentally before it's written
1383f7917c00SJeff Kirsher  *	in its entirety.
1384f7917c00SJeff Kirsher  */
1385f7917c00SJeff Kirsher static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1386f7917c00SJeff Kirsher 			     unsigned int len, unsigned int gen)
1387f7917c00SJeff Kirsher {
1388f7917c00SJeff Kirsher 	struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1389f7917c00SJeff Kirsher 	struct work_request_hdr *to = (struct work_request_hdr *)d;
1390f7917c00SJeff Kirsher 
1391f7917c00SJeff Kirsher 	if (likely(!skb->data_len))
1392f7917c00SJeff Kirsher 		memcpy(&to[1], &from[1], len - sizeof(*from));
1393f7917c00SJeff Kirsher 	else
1394f7917c00SJeff Kirsher 		skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1395f7917c00SJeff Kirsher 
1396f7917c00SJeff Kirsher 	to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1397f7917c00SJeff Kirsher 					V_WR_BCNTLFLT(len & 7));
1398019be1cfSAlexander Duyck 	dma_wmb();
1399f7917c00SJeff Kirsher 	to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1400f7917c00SJeff Kirsher 					V_WR_LEN((len + 7) / 8));
1401f7917c00SJeff Kirsher 	wr_gen2(d, gen);
1402f7917c00SJeff Kirsher 	kfree_skb(skb);
1403f7917c00SJeff Kirsher }
1404f7917c00SJeff Kirsher 
1405f7917c00SJeff Kirsher /**
1406f7917c00SJeff Kirsher  *	check_desc_avail - check descriptor availability on a send queue
1407f7917c00SJeff Kirsher  *	@adap: the adapter
1408f7917c00SJeff Kirsher  *	@q: the send queue
1409f7917c00SJeff Kirsher  *	@skb: the packet needing the descriptors
1410f7917c00SJeff Kirsher  *	@ndesc: the number of Tx descriptors needed
1411f7917c00SJeff Kirsher  *	@qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1412f7917c00SJeff Kirsher  *
1413f7917c00SJeff Kirsher  *	Checks if the requested number of Tx descriptors is available on an
1414f7917c00SJeff Kirsher  *	SGE send queue.  If the queue is already suspended or not enough
1415f7917c00SJeff Kirsher  *	descriptors are available the packet is queued for later transmission.
1416f7917c00SJeff Kirsher  *	Must be called with the Tx queue locked.
1417f7917c00SJeff Kirsher  *
1418f7917c00SJeff Kirsher  *	Returns 0 if enough descriptors are available, 1 if there aren't
1419f7917c00SJeff Kirsher  *	enough descriptors and the packet has been queued, and 2 if the caller
1420f7917c00SJeff Kirsher  *	needs to retry because there weren't enough descriptors at the
1421f7917c00SJeff Kirsher  *	beginning of the call but some freed up in the mean time.
1422f7917c00SJeff Kirsher  */
1423f7917c00SJeff Kirsher static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1424f7917c00SJeff Kirsher 				   struct sk_buff *skb, unsigned int ndesc,
1425f7917c00SJeff Kirsher 				   unsigned int qid)
1426f7917c00SJeff Kirsher {
1427f7917c00SJeff Kirsher 	if (unlikely(!skb_queue_empty(&q->sendq))) {
1428f7917c00SJeff Kirsher 	      addq_exit:__skb_queue_tail(&q->sendq, skb);
1429f7917c00SJeff Kirsher 		return 1;
1430f7917c00SJeff Kirsher 	}
1431f7917c00SJeff Kirsher 	if (unlikely(q->size - q->in_use < ndesc)) {
1432f7917c00SJeff Kirsher 		struct sge_qset *qs = txq_to_qset(q, qid);
1433f7917c00SJeff Kirsher 
1434f7917c00SJeff Kirsher 		set_bit(qid, &qs->txq_stopped);
14354e857c58SPeter Zijlstra 		smp_mb__after_atomic();
1436f7917c00SJeff Kirsher 
1437f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1438f7917c00SJeff Kirsher 		    test_and_clear_bit(qid, &qs->txq_stopped))
1439f7917c00SJeff Kirsher 			return 2;
1440f7917c00SJeff Kirsher 
1441f7917c00SJeff Kirsher 		q->stops++;
1442f7917c00SJeff Kirsher 		goto addq_exit;
1443f7917c00SJeff Kirsher 	}
1444f7917c00SJeff Kirsher 	return 0;
1445f7917c00SJeff Kirsher }
1446f7917c00SJeff Kirsher 
1447f7917c00SJeff Kirsher /**
1448f7917c00SJeff Kirsher  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1449f7917c00SJeff Kirsher  *	@q: the SGE control Tx queue
1450f7917c00SJeff Kirsher  *
1451f7917c00SJeff Kirsher  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1452f7917c00SJeff Kirsher  *	that send only immediate data (presently just the control queues) and
1453f7917c00SJeff Kirsher  *	thus do not have any sk_buffs to release.
1454f7917c00SJeff Kirsher  */
1455f7917c00SJeff Kirsher static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1456f7917c00SJeff Kirsher {
1457f7917c00SJeff Kirsher 	unsigned int reclaim = q->processed - q->cleaned;
1458f7917c00SJeff Kirsher 
1459f7917c00SJeff Kirsher 	q->in_use -= reclaim;
1460f7917c00SJeff Kirsher 	q->cleaned += reclaim;
1461f7917c00SJeff Kirsher }
1462f7917c00SJeff Kirsher 
1463f7917c00SJeff Kirsher static inline int immediate(const struct sk_buff *skb)
1464f7917c00SJeff Kirsher {
1465f7917c00SJeff Kirsher 	return skb->len <= WR_LEN;
1466f7917c00SJeff Kirsher }
1467f7917c00SJeff Kirsher 
1468f7917c00SJeff Kirsher /**
1469f7917c00SJeff Kirsher  *	ctrl_xmit - send a packet through an SGE control Tx queue
1470f7917c00SJeff Kirsher  *	@adap: the adapter
1471f7917c00SJeff Kirsher  *	@q: the control queue
1472f7917c00SJeff Kirsher  *	@skb: the packet
1473f7917c00SJeff Kirsher  *
1474f7917c00SJeff Kirsher  *	Send a packet through an SGE control Tx queue.  Packets sent through
1475f7917c00SJeff Kirsher  *	a control queue must fit entirely as immediate data in a single Tx
1476f7917c00SJeff Kirsher  *	descriptor and have no page fragments.
1477f7917c00SJeff Kirsher  */
1478f7917c00SJeff Kirsher static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1479f7917c00SJeff Kirsher 		     struct sk_buff *skb)
1480f7917c00SJeff Kirsher {
1481f7917c00SJeff Kirsher 	int ret;
1482f7917c00SJeff Kirsher 	struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1483f7917c00SJeff Kirsher 
1484f7917c00SJeff Kirsher 	if (unlikely(!immediate(skb))) {
1485f7917c00SJeff Kirsher 		WARN_ON(1);
1486f7917c00SJeff Kirsher 		dev_kfree_skb(skb);
1487f7917c00SJeff Kirsher 		return NET_XMIT_SUCCESS;
1488f7917c00SJeff Kirsher 	}
1489f7917c00SJeff Kirsher 
1490f7917c00SJeff Kirsher 	wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1491f7917c00SJeff Kirsher 	wrp->wr_lo = htonl(V_WR_TID(q->token));
1492f7917c00SJeff Kirsher 
1493f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1494f7917c00SJeff Kirsher       again:reclaim_completed_tx_imm(q);
1495f7917c00SJeff Kirsher 
1496f7917c00SJeff Kirsher 	ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1497f7917c00SJeff Kirsher 	if (unlikely(ret)) {
1498f7917c00SJeff Kirsher 		if (ret == 1) {
1499f7917c00SJeff Kirsher 			spin_unlock(&q->lock);
1500f7917c00SJeff Kirsher 			return NET_XMIT_CN;
1501f7917c00SJeff Kirsher 		}
1502f7917c00SJeff Kirsher 		goto again;
1503f7917c00SJeff Kirsher 	}
1504f7917c00SJeff Kirsher 
1505f7917c00SJeff Kirsher 	write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1506f7917c00SJeff Kirsher 
1507f7917c00SJeff Kirsher 	q->in_use++;
1508f7917c00SJeff Kirsher 	if (++q->pidx >= q->size) {
1509f7917c00SJeff Kirsher 		q->pidx = 0;
1510f7917c00SJeff Kirsher 		q->gen ^= 1;
1511f7917c00SJeff Kirsher 	}
1512f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1513f7917c00SJeff Kirsher 	wmb();
1514f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_KDOORBELL,
1515f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1516f7917c00SJeff Kirsher 	return NET_XMIT_SUCCESS;
1517f7917c00SJeff Kirsher }
1518f7917c00SJeff Kirsher 
1519f7917c00SJeff Kirsher /**
1520f7917c00SJeff Kirsher  *	restart_ctrlq - restart a suspended control queue
15215e0b8928SÍñigo Huguet  *	@w: pointer to the work associated with this handler
1522f7917c00SJeff Kirsher  *
1523f7917c00SJeff Kirsher  *	Resumes transmission on a suspended Tx control queue.
1524f7917c00SJeff Kirsher  */
15255e0b8928SÍñigo Huguet static void restart_ctrlq(struct work_struct *w)
1526f7917c00SJeff Kirsher {
1527f7917c00SJeff Kirsher 	struct sk_buff *skb;
15285e0b8928SÍñigo Huguet 	struct sge_qset *qs = container_of(w, struct sge_qset,
15295e0b8928SÍñigo Huguet 					   txq[TXQ_CTRL].qresume_task);
1530f7917c00SJeff Kirsher 	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1531f7917c00SJeff Kirsher 
1532f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1533f7917c00SJeff Kirsher       again:reclaim_completed_tx_imm(q);
1534f7917c00SJeff Kirsher 
1535f7917c00SJeff Kirsher 	while (q->in_use < q->size &&
1536f7917c00SJeff Kirsher 	       (skb = __skb_dequeue(&q->sendq)) != NULL) {
1537f7917c00SJeff Kirsher 
1538f7917c00SJeff Kirsher 		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1539f7917c00SJeff Kirsher 
1540f7917c00SJeff Kirsher 		if (++q->pidx >= q->size) {
1541f7917c00SJeff Kirsher 			q->pidx = 0;
1542f7917c00SJeff Kirsher 			q->gen ^= 1;
1543f7917c00SJeff Kirsher 		}
1544f7917c00SJeff Kirsher 		q->in_use++;
1545f7917c00SJeff Kirsher 	}
1546f7917c00SJeff Kirsher 
1547f7917c00SJeff Kirsher 	if (!skb_queue_empty(&q->sendq)) {
1548f7917c00SJeff Kirsher 		set_bit(TXQ_CTRL, &qs->txq_stopped);
15494e857c58SPeter Zijlstra 		smp_mb__after_atomic();
1550f7917c00SJeff Kirsher 
1551f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1552f7917c00SJeff Kirsher 		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1553f7917c00SJeff Kirsher 			goto again;
1554f7917c00SJeff Kirsher 		q->stops++;
1555f7917c00SJeff Kirsher 	}
1556f7917c00SJeff Kirsher 
1557f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1558f7917c00SJeff Kirsher 	wmb();
1559f7917c00SJeff Kirsher 	t3_write_reg(qs->adap, A_SG_KDOORBELL,
1560f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1561f7917c00SJeff Kirsher }
1562f7917c00SJeff Kirsher 
1563f7917c00SJeff Kirsher /*
1564f7917c00SJeff Kirsher  * Send a management message through control queue 0
1565f7917c00SJeff Kirsher  */
1566f7917c00SJeff Kirsher int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1567f7917c00SJeff Kirsher {
1568f7917c00SJeff Kirsher 	int ret;
1569f7917c00SJeff Kirsher 	local_bh_disable();
1570f7917c00SJeff Kirsher 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1571f7917c00SJeff Kirsher 	local_bh_enable();
1572f7917c00SJeff Kirsher 
1573f7917c00SJeff Kirsher 	return ret;
1574f7917c00SJeff Kirsher }
1575f7917c00SJeff Kirsher 
1576f7917c00SJeff Kirsher /**
1577f7917c00SJeff Kirsher  *	deferred_unmap_destructor - unmap a packet when it is freed
1578f7917c00SJeff Kirsher  *	@skb: the packet
1579f7917c00SJeff Kirsher  *
1580f7917c00SJeff Kirsher  *	This is the packet destructor used for Tx packets that need to remain
1581f7917c00SJeff Kirsher  *	mapped until they are freed rather than until their Tx descriptors are
1582f7917c00SJeff Kirsher  *	freed.
1583f7917c00SJeff Kirsher  */
1584f7917c00SJeff Kirsher static void deferred_unmap_destructor(struct sk_buff *skb)
1585f7917c00SJeff Kirsher {
1586f7917c00SJeff Kirsher 	int i;
1587f7917c00SJeff Kirsher 	const dma_addr_t *p;
1588f7917c00SJeff Kirsher 	const struct skb_shared_info *si;
1589f7917c00SJeff Kirsher 	const struct deferred_unmap_info *dui;
1590f7917c00SJeff Kirsher 
1591f7917c00SJeff Kirsher 	dui = (struct deferred_unmap_info *)skb->head;
1592f7917c00SJeff Kirsher 	p = dui->addr;
1593f7917c00SJeff Kirsher 
159415dd16c2SLi RongQing 	if (skb_tail_pointer(skb) - skb_transport_header(skb))
1595be8b678cSSimon Horman 		pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) -
1596be8b678cSSimon Horman 				 skb_transport_header(skb), PCI_DMA_TODEVICE);
1597f7917c00SJeff Kirsher 
1598f7917c00SJeff Kirsher 	si = skb_shinfo(skb);
1599f7917c00SJeff Kirsher 	for (i = 0; i < si->nr_frags; i++)
16009e903e08SEric Dumazet 		pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
1601f7917c00SJeff Kirsher 			       PCI_DMA_TODEVICE);
1602f7917c00SJeff Kirsher }
1603f7917c00SJeff Kirsher 
1604f7917c00SJeff Kirsher static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1605f7917c00SJeff Kirsher 				     const struct sg_ent *sgl, int sgl_flits)
1606f7917c00SJeff Kirsher {
1607f7917c00SJeff Kirsher 	dma_addr_t *p;
1608f7917c00SJeff Kirsher 	struct deferred_unmap_info *dui;
1609f7917c00SJeff Kirsher 
1610f7917c00SJeff Kirsher 	dui = (struct deferred_unmap_info *)skb->head;
1611f7917c00SJeff Kirsher 	dui->pdev = pdev;
1612f7917c00SJeff Kirsher 	for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1613f7917c00SJeff Kirsher 		*p++ = be64_to_cpu(sgl->addr[0]);
1614f7917c00SJeff Kirsher 		*p++ = be64_to_cpu(sgl->addr[1]);
1615f7917c00SJeff Kirsher 	}
1616f7917c00SJeff Kirsher 	if (sgl_flits)
1617f7917c00SJeff Kirsher 		*p = be64_to_cpu(sgl->addr[0]);
1618f7917c00SJeff Kirsher }
1619f7917c00SJeff Kirsher 
1620f7917c00SJeff Kirsher /**
1621f7917c00SJeff Kirsher  *	write_ofld_wr - write an offload work request
1622f7917c00SJeff Kirsher  *	@adap: the adapter
1623f7917c00SJeff Kirsher  *	@skb: the packet to send
1624f7917c00SJeff Kirsher  *	@q: the Tx queue
1625f7917c00SJeff Kirsher  *	@pidx: index of the first Tx descriptor to write
1626f7917c00SJeff Kirsher  *	@gen: the generation value to use
1627f7917c00SJeff Kirsher  *	@ndesc: number of descriptors the packet will occupy
1628d0ea5cbdSJesse Brandeburg  *	@addr: the address
1629f7917c00SJeff Kirsher  *
1630f7917c00SJeff Kirsher  *	Write an offload work request to send the supplied packet.  The packet
1631f7917c00SJeff Kirsher  *	data already carry the work request with most fields populated.
1632f7917c00SJeff Kirsher  */
1633f7917c00SJeff Kirsher static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1634f7917c00SJeff Kirsher 			  struct sge_txq *q, unsigned int pidx,
1635c69fe407SArjun Vynipadath 			  unsigned int gen, unsigned int ndesc,
1636c69fe407SArjun Vynipadath 			  const dma_addr_t *addr)
1637f7917c00SJeff Kirsher {
1638f7917c00SJeff Kirsher 	unsigned int sgl_flits, flits;
1639f7917c00SJeff Kirsher 	struct work_request_hdr *from;
1640f7917c00SJeff Kirsher 	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1641f7917c00SJeff Kirsher 	struct tx_desc *d = &q->desc[pidx];
1642f7917c00SJeff Kirsher 
1643f7917c00SJeff Kirsher 	if (immediate(skb)) {
1644f7917c00SJeff Kirsher 		q->sdesc[pidx].skb = NULL;
1645f7917c00SJeff Kirsher 		write_imm(d, skb, skb->len, gen);
1646f7917c00SJeff Kirsher 		return;
1647f7917c00SJeff Kirsher 	}
1648f7917c00SJeff Kirsher 
1649f7917c00SJeff Kirsher 	/* Only TX_DATA builds SGLs */
1650f7917c00SJeff Kirsher 
1651f7917c00SJeff Kirsher 	from = (struct work_request_hdr *)skb->data;
1652f7917c00SJeff Kirsher 	memcpy(&d->flit[1], &from[1],
1653f7917c00SJeff Kirsher 	       skb_transport_offset(skb) - sizeof(*from));
1654f7917c00SJeff Kirsher 
1655f7917c00SJeff Kirsher 	flits = skb_transport_offset(skb) / 8;
1656f7917c00SJeff Kirsher 	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1657c69fe407SArjun Vynipadath 	sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
1658c69fe407SArjun Vynipadath 			      skb_tail_pointer(skb) - skb_transport_header(skb),
1659c69fe407SArjun Vynipadath 			      addr);
1660f7917c00SJeff Kirsher 	if (need_skb_unmap()) {
1661f7917c00SJeff Kirsher 		setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1662f7917c00SJeff Kirsher 		skb->destructor = deferred_unmap_destructor;
1663f7917c00SJeff Kirsher 	}
1664f7917c00SJeff Kirsher 
1665f7917c00SJeff Kirsher 	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1666f7917c00SJeff Kirsher 			 gen, from->wr_hi, from->wr_lo);
1667f7917c00SJeff Kirsher }
1668f7917c00SJeff Kirsher 
1669f7917c00SJeff Kirsher /**
1670f7917c00SJeff Kirsher  *	calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1671f7917c00SJeff Kirsher  *	@skb: the packet
1672f7917c00SJeff Kirsher  *
1673f7917c00SJeff Kirsher  * 	Returns the number of Tx descriptors needed for the given offload
1674f7917c00SJeff Kirsher  * 	packet.  These packets are already fully constructed.
1675f7917c00SJeff Kirsher  */
1676f7917c00SJeff Kirsher static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1677f7917c00SJeff Kirsher {
1678f7917c00SJeff Kirsher 	unsigned int flits, cnt;
1679f7917c00SJeff Kirsher 
1680f7917c00SJeff Kirsher 	if (skb->len <= WR_LEN)
1681f7917c00SJeff Kirsher 		return 1;	/* packet fits as immediate data */
1682f7917c00SJeff Kirsher 
1683f7917c00SJeff Kirsher 	flits = skb_transport_offset(skb) / 8;	/* headers */
1684f7917c00SJeff Kirsher 	cnt = skb_shinfo(skb)->nr_frags;
1685be8b678cSSimon Horman 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
1686f7917c00SJeff Kirsher 		cnt++;
1687f7917c00SJeff Kirsher 	return flits_to_desc(flits + sgl_len(cnt));
1688f7917c00SJeff Kirsher }
1689f7917c00SJeff Kirsher 
1690f7917c00SJeff Kirsher /**
1691f7917c00SJeff Kirsher  *	ofld_xmit - send a packet through an offload queue
1692f7917c00SJeff Kirsher  *	@adap: the adapter
1693f7917c00SJeff Kirsher  *	@q: the Tx offload queue
1694f7917c00SJeff Kirsher  *	@skb: the packet
1695f7917c00SJeff Kirsher  *
1696f7917c00SJeff Kirsher  *	Send an offload packet through an SGE offload queue.
1697f7917c00SJeff Kirsher  */
1698f7917c00SJeff Kirsher static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1699f7917c00SJeff Kirsher 		     struct sk_buff *skb)
1700f7917c00SJeff Kirsher {
1701f7917c00SJeff Kirsher 	int ret;
1702f7917c00SJeff Kirsher 	unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1703f7917c00SJeff Kirsher 
1704f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1705f7917c00SJeff Kirsher again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1706f7917c00SJeff Kirsher 
1707f7917c00SJeff Kirsher 	ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1708f7917c00SJeff Kirsher 	if (unlikely(ret)) {
1709f7917c00SJeff Kirsher 		if (ret == 1) {
1710f7917c00SJeff Kirsher 			skb->priority = ndesc;	/* save for restart */
1711f7917c00SJeff Kirsher 			spin_unlock(&q->lock);
1712f7917c00SJeff Kirsher 			return NET_XMIT_CN;
1713f7917c00SJeff Kirsher 		}
1714f7917c00SJeff Kirsher 		goto again;
1715f7917c00SJeff Kirsher 	}
1716f7917c00SJeff Kirsher 
1717c69fe407SArjun Vynipadath 	if (!immediate(skb) &&
1718c69fe407SArjun Vynipadath 	    map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1719c69fe407SArjun Vynipadath 		spin_unlock(&q->lock);
1720c69fe407SArjun Vynipadath 		return NET_XMIT_SUCCESS;
1721c69fe407SArjun Vynipadath 	}
1722c69fe407SArjun Vynipadath 
1723f7917c00SJeff Kirsher 	gen = q->gen;
1724f7917c00SJeff Kirsher 	q->in_use += ndesc;
1725f7917c00SJeff Kirsher 	pidx = q->pidx;
1726f7917c00SJeff Kirsher 	q->pidx += ndesc;
1727f7917c00SJeff Kirsher 	if (q->pidx >= q->size) {
1728f7917c00SJeff Kirsher 		q->pidx -= q->size;
1729f7917c00SJeff Kirsher 		q->gen ^= 1;
1730f7917c00SJeff Kirsher 	}
1731f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1732f7917c00SJeff Kirsher 
1733c69fe407SArjun Vynipadath 	write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
1734f7917c00SJeff Kirsher 	check_ring_tx_db(adap, q);
1735f7917c00SJeff Kirsher 	return NET_XMIT_SUCCESS;
1736f7917c00SJeff Kirsher }
1737f7917c00SJeff Kirsher 
1738f7917c00SJeff Kirsher /**
1739f7917c00SJeff Kirsher  *	restart_offloadq - restart a suspended offload queue
17405e0b8928SÍñigo Huguet  *	@w: pointer to the work associated with this handler
1741f7917c00SJeff Kirsher  *
1742f7917c00SJeff Kirsher  *	Resumes transmission on a suspended Tx offload queue.
1743f7917c00SJeff Kirsher  */
17445e0b8928SÍñigo Huguet static void restart_offloadq(struct work_struct *w)
1745f7917c00SJeff Kirsher {
1746f7917c00SJeff Kirsher 	struct sk_buff *skb;
17475e0b8928SÍñigo Huguet 	struct sge_qset *qs = container_of(w, struct sge_qset,
17485e0b8928SÍñigo Huguet 					   txq[TXQ_OFLD].qresume_task);
1749f7917c00SJeff Kirsher 	struct sge_txq *q = &qs->txq[TXQ_OFLD];
1750f7917c00SJeff Kirsher 	const struct port_info *pi = netdev_priv(qs->netdev);
1751f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
1752c69fe407SArjun Vynipadath 	unsigned int written = 0;
1753f7917c00SJeff Kirsher 
1754f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1755f7917c00SJeff Kirsher again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1756f7917c00SJeff Kirsher 
1757f7917c00SJeff Kirsher 	while ((skb = skb_peek(&q->sendq)) != NULL) {
1758f7917c00SJeff Kirsher 		unsigned int gen, pidx;
1759f7917c00SJeff Kirsher 		unsigned int ndesc = skb->priority;
1760f7917c00SJeff Kirsher 
1761f7917c00SJeff Kirsher 		if (unlikely(q->size - q->in_use < ndesc)) {
1762f7917c00SJeff Kirsher 			set_bit(TXQ_OFLD, &qs->txq_stopped);
17634e857c58SPeter Zijlstra 			smp_mb__after_atomic();
1764f7917c00SJeff Kirsher 
1765f7917c00SJeff Kirsher 			if (should_restart_tx(q) &&
1766f7917c00SJeff Kirsher 			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1767f7917c00SJeff Kirsher 				goto again;
1768f7917c00SJeff Kirsher 			q->stops++;
1769f7917c00SJeff Kirsher 			break;
1770f7917c00SJeff Kirsher 		}
1771f7917c00SJeff Kirsher 
1772c69fe407SArjun Vynipadath 		if (!immediate(skb) &&
1773c69fe407SArjun Vynipadath 		    map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1774c69fe407SArjun Vynipadath 			break;
1775c69fe407SArjun Vynipadath 
1776f7917c00SJeff Kirsher 		gen = q->gen;
1777f7917c00SJeff Kirsher 		q->in_use += ndesc;
1778f7917c00SJeff Kirsher 		pidx = q->pidx;
1779f7917c00SJeff Kirsher 		q->pidx += ndesc;
1780c69fe407SArjun Vynipadath 		written += ndesc;
1781f7917c00SJeff Kirsher 		if (q->pidx >= q->size) {
1782f7917c00SJeff Kirsher 			q->pidx -= q->size;
1783f7917c00SJeff Kirsher 			q->gen ^= 1;
1784f7917c00SJeff Kirsher 		}
1785f7917c00SJeff Kirsher 		__skb_unlink(skb, &q->sendq);
1786f7917c00SJeff Kirsher 		spin_unlock(&q->lock);
1787f7917c00SJeff Kirsher 
1788c69fe407SArjun Vynipadath 		write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1789c69fe407SArjun Vynipadath 			      (dma_addr_t *)skb->head);
1790f7917c00SJeff Kirsher 		spin_lock(&q->lock);
1791f7917c00SJeff Kirsher 	}
1792f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1793f7917c00SJeff Kirsher 
1794f7917c00SJeff Kirsher #if USE_GTS
1795f7917c00SJeff Kirsher 	set_bit(TXQ_RUNNING, &q->flags);
1796f7917c00SJeff Kirsher 	set_bit(TXQ_LAST_PKT_DB, &q->flags);
1797f7917c00SJeff Kirsher #endif
1798f7917c00SJeff Kirsher 	wmb();
1799c69fe407SArjun Vynipadath 	if (likely(written))
1800f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL,
1801f7917c00SJeff Kirsher 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1802f7917c00SJeff Kirsher }
1803f7917c00SJeff Kirsher 
1804f7917c00SJeff Kirsher /**
1805f7917c00SJeff Kirsher  *	queue_set - return the queue set a packet should use
1806f7917c00SJeff Kirsher  *	@skb: the packet
1807f7917c00SJeff Kirsher  *
1808f7917c00SJeff Kirsher  *	Maps a packet to the SGE queue set it should use.  The desired queue
1809f7917c00SJeff Kirsher  *	set is carried in bits 1-3 in the packet's priority.
1810f7917c00SJeff Kirsher  */
1811f7917c00SJeff Kirsher static inline int queue_set(const struct sk_buff *skb)
1812f7917c00SJeff Kirsher {
1813f7917c00SJeff Kirsher 	return skb->priority >> 1;
1814f7917c00SJeff Kirsher }
1815f7917c00SJeff Kirsher 
1816f7917c00SJeff Kirsher /**
1817f7917c00SJeff Kirsher  *	is_ctrl_pkt - return whether an offload packet is a control packet
1818f7917c00SJeff Kirsher  *	@skb: the packet
1819f7917c00SJeff Kirsher  *
1820f7917c00SJeff Kirsher  *	Determines whether an offload packet should use an OFLD or a CTRL
1821f7917c00SJeff Kirsher  *	Tx queue.  This is indicated by bit 0 in the packet's priority.
1822f7917c00SJeff Kirsher  */
1823f7917c00SJeff Kirsher static inline int is_ctrl_pkt(const struct sk_buff *skb)
1824f7917c00SJeff Kirsher {
1825f7917c00SJeff Kirsher 	return skb->priority & 1;
1826f7917c00SJeff Kirsher }
1827f7917c00SJeff Kirsher 
1828f7917c00SJeff Kirsher /**
1829f7917c00SJeff Kirsher  *	t3_offload_tx - send an offload packet
1830f7917c00SJeff Kirsher  *	@tdev: the offload device to send to
1831f7917c00SJeff Kirsher  *	@skb: the packet
1832f7917c00SJeff Kirsher  *
1833f7917c00SJeff Kirsher  *	Sends an offload packet.  We use the packet priority to select the
1834f7917c00SJeff Kirsher  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1835f7917c00SJeff Kirsher  *	should be sent as regular or control, bits 1-3 select the queue set.
1836f7917c00SJeff Kirsher  */
1837f7917c00SJeff Kirsher int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1838f7917c00SJeff Kirsher {
1839f7917c00SJeff Kirsher 	struct adapter *adap = tdev2adap(tdev);
1840f7917c00SJeff Kirsher 	struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1841f7917c00SJeff Kirsher 
1842f7917c00SJeff Kirsher 	if (unlikely(is_ctrl_pkt(skb)))
1843f7917c00SJeff Kirsher 		return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1844f7917c00SJeff Kirsher 
1845f7917c00SJeff Kirsher 	return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1846f7917c00SJeff Kirsher }
1847f7917c00SJeff Kirsher 
1848f7917c00SJeff Kirsher /**
1849f7917c00SJeff Kirsher  *	offload_enqueue - add an offload packet to an SGE offload receive queue
1850f7917c00SJeff Kirsher  *	@q: the SGE response queue
1851f7917c00SJeff Kirsher  *	@skb: the packet
1852f7917c00SJeff Kirsher  *
1853f7917c00SJeff Kirsher  *	Add a new offload packet to an SGE response queue's offload packet
1854f7917c00SJeff Kirsher  *	queue.  If the packet is the first on the queue it schedules the RX
1855f7917c00SJeff Kirsher  *	softirq to process the queue.
1856f7917c00SJeff Kirsher  */
1857f7917c00SJeff Kirsher static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1858f7917c00SJeff Kirsher {
1859f7917c00SJeff Kirsher 	int was_empty = skb_queue_empty(&q->rx_queue);
1860f7917c00SJeff Kirsher 
1861f7917c00SJeff Kirsher 	__skb_queue_tail(&q->rx_queue, skb);
1862f7917c00SJeff Kirsher 
1863f7917c00SJeff Kirsher 	if (was_empty) {
1864f7917c00SJeff Kirsher 		struct sge_qset *qs = rspq_to_qset(q);
1865f7917c00SJeff Kirsher 
1866f7917c00SJeff Kirsher 		napi_schedule(&qs->napi);
1867f7917c00SJeff Kirsher 	}
1868f7917c00SJeff Kirsher }
1869f7917c00SJeff Kirsher 
1870f7917c00SJeff Kirsher /**
1871f7917c00SJeff Kirsher  *	deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1872f7917c00SJeff Kirsher  *	@tdev: the offload device that will be receiving the packets
1873f7917c00SJeff Kirsher  *	@q: the SGE response queue that assembled the bundle
1874f7917c00SJeff Kirsher  *	@skbs: the partial bundle
1875f7917c00SJeff Kirsher  *	@n: the number of packets in the bundle
1876f7917c00SJeff Kirsher  *
1877f7917c00SJeff Kirsher  *	Delivers a (partial) bundle of Rx offload packets to an offload device.
1878f7917c00SJeff Kirsher  */
1879f7917c00SJeff Kirsher static inline void deliver_partial_bundle(struct t3cdev *tdev,
1880f7917c00SJeff Kirsher 					  struct sge_rspq *q,
1881f7917c00SJeff Kirsher 					  struct sk_buff *skbs[], int n)
1882f7917c00SJeff Kirsher {
1883f7917c00SJeff Kirsher 	if (n) {
1884f7917c00SJeff Kirsher 		q->offload_bundles++;
1885f7917c00SJeff Kirsher 		tdev->recv(tdev, skbs, n);
1886f7917c00SJeff Kirsher 	}
1887f7917c00SJeff Kirsher }
1888f7917c00SJeff Kirsher 
1889f7917c00SJeff Kirsher /**
1890f7917c00SJeff Kirsher  *	ofld_poll - NAPI handler for offload packets in interrupt mode
1891d0ea5cbdSJesse Brandeburg  *	@napi: the network device doing the polling
1892f7917c00SJeff Kirsher  *	@budget: polling budget
1893f7917c00SJeff Kirsher  *
1894f7917c00SJeff Kirsher  *	The NAPI handler for offload packets when a response queue is serviced
1895f7917c00SJeff Kirsher  *	by the hard interrupt handler, i.e., when it's operating in non-polling
1896f7917c00SJeff Kirsher  *	mode.  Creates small packet batches and sends them through the offload
1897f7917c00SJeff Kirsher  *	receive handler.  Batches need to be of modest size as we do prefetches
1898f7917c00SJeff Kirsher  *	on the packets in each.
1899f7917c00SJeff Kirsher  */
1900f7917c00SJeff Kirsher static int ofld_poll(struct napi_struct *napi, int budget)
1901f7917c00SJeff Kirsher {
1902f7917c00SJeff Kirsher 	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1903f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
1904f7917c00SJeff Kirsher 	struct adapter *adapter = qs->adap;
1905f7917c00SJeff Kirsher 	int work_done = 0;
1906f7917c00SJeff Kirsher 
1907f7917c00SJeff Kirsher 	while (work_done < budget) {
1908f7917c00SJeff Kirsher 		struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1909f7917c00SJeff Kirsher 		struct sk_buff_head queue;
1910f7917c00SJeff Kirsher 		int ngathered;
1911f7917c00SJeff Kirsher 
1912f7917c00SJeff Kirsher 		spin_lock_irq(&q->lock);
1913f7917c00SJeff Kirsher 		__skb_queue_head_init(&queue);
1914f7917c00SJeff Kirsher 		skb_queue_splice_init(&q->rx_queue, &queue);
1915f7917c00SJeff Kirsher 		if (skb_queue_empty(&queue)) {
19166ad20165SEric Dumazet 			napi_complete_done(napi, work_done);
1917f7917c00SJeff Kirsher 			spin_unlock_irq(&q->lock);
1918f7917c00SJeff Kirsher 			return work_done;
1919f7917c00SJeff Kirsher 		}
1920f7917c00SJeff Kirsher 		spin_unlock_irq(&q->lock);
1921f7917c00SJeff Kirsher 
1922f7917c00SJeff Kirsher 		ngathered = 0;
1923f7917c00SJeff Kirsher 		skb_queue_walk_safe(&queue, skb, tmp) {
1924f7917c00SJeff Kirsher 			if (work_done >= budget)
1925f7917c00SJeff Kirsher 				break;
1926f7917c00SJeff Kirsher 			work_done++;
1927f7917c00SJeff Kirsher 
1928f7917c00SJeff Kirsher 			__skb_unlink(skb, &queue);
1929f7917c00SJeff Kirsher 			prefetch(skb->data);
1930f7917c00SJeff Kirsher 			skbs[ngathered] = skb;
1931f7917c00SJeff Kirsher 			if (++ngathered == RX_BUNDLE_SIZE) {
1932f7917c00SJeff Kirsher 				q->offload_bundles++;
1933f7917c00SJeff Kirsher 				adapter->tdev.recv(&adapter->tdev, skbs,
1934f7917c00SJeff Kirsher 						   ngathered);
1935f7917c00SJeff Kirsher 				ngathered = 0;
1936f7917c00SJeff Kirsher 			}
1937f7917c00SJeff Kirsher 		}
1938f7917c00SJeff Kirsher 		if (!skb_queue_empty(&queue)) {
1939f7917c00SJeff Kirsher 			/* splice remaining packets back onto Rx queue */
1940f7917c00SJeff Kirsher 			spin_lock_irq(&q->lock);
1941f7917c00SJeff Kirsher 			skb_queue_splice(&queue, &q->rx_queue);
1942f7917c00SJeff Kirsher 			spin_unlock_irq(&q->lock);
1943f7917c00SJeff Kirsher 		}
1944f7917c00SJeff Kirsher 		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1945f7917c00SJeff Kirsher 	}
1946f7917c00SJeff Kirsher 
1947f7917c00SJeff Kirsher 	return work_done;
1948f7917c00SJeff Kirsher }
1949f7917c00SJeff Kirsher 
1950f7917c00SJeff Kirsher /**
1951f7917c00SJeff Kirsher  *	rx_offload - process a received offload packet
1952f7917c00SJeff Kirsher  *	@tdev: the offload device receiving the packet
1953f7917c00SJeff Kirsher  *	@rq: the response queue that received the packet
1954f7917c00SJeff Kirsher  *	@skb: the packet
1955f7917c00SJeff Kirsher  *	@rx_gather: a gather list of packets if we are building a bundle
1956f7917c00SJeff Kirsher  *	@gather_idx: index of the next available slot in the bundle
1957f7917c00SJeff Kirsher  *
1958f7917c00SJeff Kirsher  *	Process an ingress offload pakcet and add it to the offload ingress
1959f7917c00SJeff Kirsher  *	queue. 	Returns the index of the next available slot in the bundle.
1960f7917c00SJeff Kirsher  */
1961f7917c00SJeff Kirsher static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1962f7917c00SJeff Kirsher 			     struct sk_buff *skb, struct sk_buff *rx_gather[],
1963f7917c00SJeff Kirsher 			     unsigned int gather_idx)
1964f7917c00SJeff Kirsher {
1965f7917c00SJeff Kirsher 	skb_reset_mac_header(skb);
1966f7917c00SJeff Kirsher 	skb_reset_network_header(skb);
1967f7917c00SJeff Kirsher 	skb_reset_transport_header(skb);
1968f7917c00SJeff Kirsher 
1969f7917c00SJeff Kirsher 	if (rq->polling) {
1970f7917c00SJeff Kirsher 		rx_gather[gather_idx++] = skb;
1971f7917c00SJeff Kirsher 		if (gather_idx == RX_BUNDLE_SIZE) {
1972f7917c00SJeff Kirsher 			tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1973f7917c00SJeff Kirsher 			gather_idx = 0;
1974f7917c00SJeff Kirsher 			rq->offload_bundles++;
1975f7917c00SJeff Kirsher 		}
1976f7917c00SJeff Kirsher 	} else
1977f7917c00SJeff Kirsher 		offload_enqueue(rq, skb);
1978f7917c00SJeff Kirsher 
1979f7917c00SJeff Kirsher 	return gather_idx;
1980f7917c00SJeff Kirsher }
1981f7917c00SJeff Kirsher 
1982f7917c00SJeff Kirsher /**
1983f7917c00SJeff Kirsher  *	restart_tx - check whether to restart suspended Tx queues
1984f7917c00SJeff Kirsher  *	@qs: the queue set to resume
1985f7917c00SJeff Kirsher  *
1986f7917c00SJeff Kirsher  *	Restarts suspended Tx queues of an SGE queue set if they have enough
1987f7917c00SJeff Kirsher  *	free resources to resume operation.
1988f7917c00SJeff Kirsher  */
1989f7917c00SJeff Kirsher static void restart_tx(struct sge_qset *qs)
1990f7917c00SJeff Kirsher {
1991f7917c00SJeff Kirsher 	if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1992f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_ETH]) &&
1993f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1994f7917c00SJeff Kirsher 		qs->txq[TXQ_ETH].restarts++;
1995f7917c00SJeff Kirsher 		if (netif_running(qs->netdev))
1996f7917c00SJeff Kirsher 			netif_tx_wake_queue(qs->tx_q);
1997f7917c00SJeff Kirsher 	}
1998f7917c00SJeff Kirsher 
1999f7917c00SJeff Kirsher 	if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
2000f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2001f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2002f7917c00SJeff Kirsher 		qs->txq[TXQ_OFLD].restarts++;
20035e0b8928SÍñigo Huguet 
20045e0b8928SÍñigo Huguet 		/* The work can be quite lengthy so we use driver's own queue */
20055e0b8928SÍñigo Huguet 		queue_work(cxgb3_wq, &qs->txq[TXQ_OFLD].qresume_task);
2006f7917c00SJeff Kirsher 	}
2007f7917c00SJeff Kirsher 	if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
2008f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2009f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2010f7917c00SJeff Kirsher 		qs->txq[TXQ_CTRL].restarts++;
20115e0b8928SÍñigo Huguet 
20125e0b8928SÍñigo Huguet 		/* The work can be quite lengthy so we use driver's own queue */
20135e0b8928SÍñigo Huguet 		queue_work(cxgb3_wq, &qs->txq[TXQ_CTRL].qresume_task);
2014f7917c00SJeff Kirsher 	}
2015f7917c00SJeff Kirsher }
2016f7917c00SJeff Kirsher 
2017f7917c00SJeff Kirsher /**
2018f7917c00SJeff Kirsher  *	cxgb3_arp_process - process an ARP request probing a private IP address
2019d0ea5cbdSJesse Brandeburg  *	@pi: the port info
2020f7917c00SJeff Kirsher  *	@skb: the skbuff containing the ARP request
2021f7917c00SJeff Kirsher  *
2022f7917c00SJeff Kirsher  *	Check if the ARP request is probing the private IP address
2023f7917c00SJeff Kirsher  *	dedicated to iSCSI, generate an ARP reply if so.
2024f7917c00SJeff Kirsher  */
2025f7917c00SJeff Kirsher static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
2026f7917c00SJeff Kirsher {
2027f7917c00SJeff Kirsher 	struct net_device *dev = skb->dev;
2028f7917c00SJeff Kirsher 	struct arphdr *arp;
2029f7917c00SJeff Kirsher 	unsigned char *arp_ptr;
2030f7917c00SJeff Kirsher 	unsigned char *sha;
2031f7917c00SJeff Kirsher 	__be32 sip, tip;
2032f7917c00SJeff Kirsher 
2033f7917c00SJeff Kirsher 	if (!dev)
2034f7917c00SJeff Kirsher 		return;
2035f7917c00SJeff Kirsher 
2036f7917c00SJeff Kirsher 	skb_reset_network_header(skb);
2037f7917c00SJeff Kirsher 	arp = arp_hdr(skb);
2038f7917c00SJeff Kirsher 
2039f7917c00SJeff Kirsher 	if (arp->ar_op != htons(ARPOP_REQUEST))
2040f7917c00SJeff Kirsher 		return;
2041f7917c00SJeff Kirsher 
2042f7917c00SJeff Kirsher 	arp_ptr = (unsigned char *)(arp + 1);
2043f7917c00SJeff Kirsher 	sha = arp_ptr;
2044f7917c00SJeff Kirsher 	arp_ptr += dev->addr_len;
2045f7917c00SJeff Kirsher 	memcpy(&sip, arp_ptr, sizeof(sip));
2046f7917c00SJeff Kirsher 	arp_ptr += sizeof(sip);
2047f7917c00SJeff Kirsher 	arp_ptr += dev->addr_len;
2048f7917c00SJeff Kirsher 	memcpy(&tip, arp_ptr, sizeof(tip));
2049f7917c00SJeff Kirsher 
2050f7917c00SJeff Kirsher 	if (tip != pi->iscsi_ipv4addr)
2051f7917c00SJeff Kirsher 		return;
2052f7917c00SJeff Kirsher 
2053f7917c00SJeff Kirsher 	arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
2054f7917c00SJeff Kirsher 		 pi->iscsic.mac_addr, sha);
2055f7917c00SJeff Kirsher 
2056f7917c00SJeff Kirsher }
2057f7917c00SJeff Kirsher 
2058f7917c00SJeff Kirsher static inline int is_arp(struct sk_buff *skb)
2059f7917c00SJeff Kirsher {
2060f7917c00SJeff Kirsher 	return skb->protocol == htons(ETH_P_ARP);
2061f7917c00SJeff Kirsher }
2062f7917c00SJeff Kirsher 
2063f7917c00SJeff Kirsher static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
2064f7917c00SJeff Kirsher 					struct sk_buff *skb)
2065f7917c00SJeff Kirsher {
2066f7917c00SJeff Kirsher 	if (is_arp(skb)) {
2067f7917c00SJeff Kirsher 		cxgb3_arp_process(pi, skb);
2068f7917c00SJeff Kirsher 		return;
2069f7917c00SJeff Kirsher 	}
2070f7917c00SJeff Kirsher 
2071f7917c00SJeff Kirsher 	if (pi->iscsic.recv)
2072f7917c00SJeff Kirsher 		pi->iscsic.recv(pi, skb);
2073f7917c00SJeff Kirsher 
2074f7917c00SJeff Kirsher }
2075f7917c00SJeff Kirsher 
2076f7917c00SJeff Kirsher /**
2077f7917c00SJeff Kirsher  *	rx_eth - process an ingress ethernet packet
2078f7917c00SJeff Kirsher  *	@adap: the adapter
2079f7917c00SJeff Kirsher  *	@rq: the response queue that received the packet
2080f7917c00SJeff Kirsher  *	@skb: the packet
2081d0ea5cbdSJesse Brandeburg  *	@pad: padding
2082d0ea5cbdSJesse Brandeburg  *	@lro: large receive offload
2083f7917c00SJeff Kirsher  *
2084f7917c00SJeff Kirsher  *	Process an ingress ethernet pakcet and deliver it to the stack.
2085f7917c00SJeff Kirsher  *	The padding is 2 if the packet was delivered in an Rx buffer and 0
2086f7917c00SJeff Kirsher  *	if it was immediate data in a response.
2087f7917c00SJeff Kirsher  */
2088f7917c00SJeff Kirsher static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2089f7917c00SJeff Kirsher 		   struct sk_buff *skb, int pad, int lro)
2090f7917c00SJeff Kirsher {
2091f7917c00SJeff Kirsher 	struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2092f7917c00SJeff Kirsher 	struct sge_qset *qs = rspq_to_qset(rq);
2093f7917c00SJeff Kirsher 	struct port_info *pi;
2094f7917c00SJeff Kirsher 
2095f7917c00SJeff Kirsher 	skb_pull(skb, sizeof(*p) + pad);
2096f7917c00SJeff Kirsher 	skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2097f7917c00SJeff Kirsher 	pi = netdev_priv(skb->dev);
2098f7917c00SJeff Kirsher 	if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2099f7917c00SJeff Kirsher 	    p->csum == htons(0xffff) && !p->fragment) {
2100f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2101f7917c00SJeff Kirsher 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2102f7917c00SJeff Kirsher 	} else
2103f7917c00SJeff Kirsher 		skb_checksum_none_assert(skb);
2104f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2105f7917c00SJeff Kirsher 
2106f7917c00SJeff Kirsher 	if (p->vlan_valid) {
2107f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_VLANEX]++;
210886a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
2109f7917c00SJeff Kirsher 	}
2110f7917c00SJeff Kirsher 	if (rq->polling) {
2111f7917c00SJeff Kirsher 		if (lro)
2112f7917c00SJeff Kirsher 			napi_gro_receive(&qs->napi, skb);
2113f7917c00SJeff Kirsher 		else {
2114f7917c00SJeff Kirsher 			if (unlikely(pi->iscsic.flags))
2115f7917c00SJeff Kirsher 				cxgb3_process_iscsi_prov_pack(pi, skb);
2116f7917c00SJeff Kirsher 			netif_receive_skb(skb);
2117f7917c00SJeff Kirsher 		}
2118f7917c00SJeff Kirsher 	} else
2119f7917c00SJeff Kirsher 		netif_rx(skb);
2120f7917c00SJeff Kirsher }
2121f7917c00SJeff Kirsher 
2122f7917c00SJeff Kirsher static inline int is_eth_tcp(u32 rss)
2123f7917c00SJeff Kirsher {
2124f7917c00SJeff Kirsher 	return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2125f7917c00SJeff Kirsher }
2126f7917c00SJeff Kirsher 
2127f7917c00SJeff Kirsher /**
2128f7917c00SJeff Kirsher  *	lro_add_page - add a page chunk to an LRO session
2129f7917c00SJeff Kirsher  *	@adap: the adapter
2130f7917c00SJeff Kirsher  *	@qs: the associated queue set
2131f7917c00SJeff Kirsher  *	@fl: the free list containing the page chunk to add
2132f7917c00SJeff Kirsher  *	@len: packet length
2133f7917c00SJeff Kirsher  *	@complete: Indicates the last fragment of a frame
2134f7917c00SJeff Kirsher  *
2135f7917c00SJeff Kirsher  *	Add a received packet contained in a page chunk to an existing LRO
2136f7917c00SJeff Kirsher  *	session.
2137f7917c00SJeff Kirsher  */
2138f7917c00SJeff Kirsher static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2139f7917c00SJeff Kirsher 			 struct sge_fl *fl, int len, int complete)
2140f7917c00SJeff Kirsher {
2141f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2142f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2143f7917c00SJeff Kirsher 	struct sk_buff *skb = NULL;
2144f7917c00SJeff Kirsher 	struct cpl_rx_pkt *cpl;
2145d7840976SMatthew Wilcox (Oracle) 	skb_frag_t *rx_frag;
2146f7917c00SJeff Kirsher 	int nr_frags;
2147f7917c00SJeff Kirsher 	int offset = 0;
2148f7917c00SJeff Kirsher 
2149f7917c00SJeff Kirsher 	if (!qs->nomem) {
2150f7917c00SJeff Kirsher 		skb = napi_get_frags(&qs->napi);
2151f7917c00SJeff Kirsher 		qs->nomem = !skb;
2152f7917c00SJeff Kirsher 	}
2153f7917c00SJeff Kirsher 
2154f7917c00SJeff Kirsher 	fl->credits--;
2155f7917c00SJeff Kirsher 
2156f7917c00SJeff Kirsher 	pci_dma_sync_single_for_cpu(adap->pdev,
2157f7917c00SJeff Kirsher 				    dma_unmap_addr(sd, dma_addr),
2158f7917c00SJeff Kirsher 				    fl->buf_size - SGE_PG_RSVD,
2159f7917c00SJeff Kirsher 				    PCI_DMA_FROMDEVICE);
2160f7917c00SJeff Kirsher 
2161f7917c00SJeff Kirsher 	(*sd->pg_chunk.p_cnt)--;
2162f7917c00SJeff Kirsher 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2163f7917c00SJeff Kirsher 		pci_unmap_page(adap->pdev,
2164f7917c00SJeff Kirsher 			       sd->pg_chunk.mapping,
2165f7917c00SJeff Kirsher 			       fl->alloc_size,
2166f7917c00SJeff Kirsher 			       PCI_DMA_FROMDEVICE);
2167f7917c00SJeff Kirsher 
2168f7917c00SJeff Kirsher 	if (!skb) {
2169f7917c00SJeff Kirsher 		put_page(sd->pg_chunk.page);
2170f7917c00SJeff Kirsher 		if (complete)
2171f7917c00SJeff Kirsher 			qs->nomem = 0;
2172f7917c00SJeff Kirsher 		return;
2173f7917c00SJeff Kirsher 	}
2174f7917c00SJeff Kirsher 
2175f7917c00SJeff Kirsher 	rx_frag = skb_shinfo(skb)->frags;
2176f7917c00SJeff Kirsher 	nr_frags = skb_shinfo(skb)->nr_frags;
2177f7917c00SJeff Kirsher 
2178f7917c00SJeff Kirsher 	if (!nr_frags) {
2179f7917c00SJeff Kirsher 		offset = 2 + sizeof(struct cpl_rx_pkt);
2180f7917c00SJeff Kirsher 		cpl = qs->lro_va = sd->pg_chunk.va + 2;
2181f7917c00SJeff Kirsher 
2182f7917c00SJeff Kirsher 		if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2183f7917c00SJeff Kirsher 		     cpl->csum_valid && cpl->csum == htons(0xffff)) {
2184f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2185f7917c00SJeff Kirsher 			qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2186f7917c00SJeff Kirsher 		} else
2187f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_NONE;
2188f7917c00SJeff Kirsher 	} else
2189f7917c00SJeff Kirsher 		cpl = qs->lro_va;
2190f7917c00SJeff Kirsher 
2191f7917c00SJeff Kirsher 	len -= offset;
2192f7917c00SJeff Kirsher 
2193f7917c00SJeff Kirsher 	rx_frag += nr_frags;
21946a930b9fSIan Campbell 	__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
2195b54c9d5bSJonathan Lemon 	skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset);
21969e903e08SEric Dumazet 	skb_frag_size_set(rx_frag, len);
2197f7917c00SJeff Kirsher 
2198f7917c00SJeff Kirsher 	skb->len += len;
2199f7917c00SJeff Kirsher 	skb->data_len += len;
2200f7917c00SJeff Kirsher 	skb->truesize += len;
2201f7917c00SJeff Kirsher 	skb_shinfo(skb)->nr_frags++;
2202f7917c00SJeff Kirsher 
2203f7917c00SJeff Kirsher 	if (!complete)
2204f7917c00SJeff Kirsher 		return;
2205f7917c00SJeff Kirsher 
2206f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2207f7917c00SJeff Kirsher 
220872073ad2SVipul Pandya 	if (cpl->vlan_valid) {
220972073ad2SVipul Pandya 		qs->port_stats[SGE_PSTAT_VLANEX]++;
221086a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
221172073ad2SVipul Pandya 	}
2212f7917c00SJeff Kirsher 	napi_gro_frags(&qs->napi);
2213f7917c00SJeff Kirsher }
2214f7917c00SJeff Kirsher 
2215f7917c00SJeff Kirsher /**
2216f7917c00SJeff Kirsher  *	handle_rsp_cntrl_info - handles control information in a response
2217f7917c00SJeff Kirsher  *	@qs: the queue set corresponding to the response
2218f7917c00SJeff Kirsher  *	@flags: the response control flags
2219f7917c00SJeff Kirsher  *
2220f7917c00SJeff Kirsher  *	Handles the control information of an SGE response, such as GTS
2221f7917c00SJeff Kirsher  *	indications and completion credits for the queue set's Tx queues.
2222f7917c00SJeff Kirsher  *	HW coalesces credits, we don't do any extra SW coalescing.
2223f7917c00SJeff Kirsher  */
2224f7917c00SJeff Kirsher static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2225f7917c00SJeff Kirsher {
2226f7917c00SJeff Kirsher 	unsigned int credits;
2227f7917c00SJeff Kirsher 
2228f7917c00SJeff Kirsher #if USE_GTS
2229f7917c00SJeff Kirsher 	if (flags & F_RSPD_TXQ0_GTS)
2230f7917c00SJeff Kirsher 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2231f7917c00SJeff Kirsher #endif
2232f7917c00SJeff Kirsher 
2233f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ0_CR(flags);
2234f7917c00SJeff Kirsher 	if (credits)
2235f7917c00SJeff Kirsher 		qs->txq[TXQ_ETH].processed += credits;
2236f7917c00SJeff Kirsher 
2237f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ2_CR(flags);
2238f7917c00SJeff Kirsher 	if (credits)
2239f7917c00SJeff Kirsher 		qs->txq[TXQ_CTRL].processed += credits;
2240f7917c00SJeff Kirsher 
2241f7917c00SJeff Kirsher # if USE_GTS
2242f7917c00SJeff Kirsher 	if (flags & F_RSPD_TXQ1_GTS)
2243f7917c00SJeff Kirsher 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2244f7917c00SJeff Kirsher # endif
2245f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ1_CR(flags);
2246f7917c00SJeff Kirsher 	if (credits)
2247f7917c00SJeff Kirsher 		qs->txq[TXQ_OFLD].processed += credits;
2248f7917c00SJeff Kirsher }
2249f7917c00SJeff Kirsher 
2250f7917c00SJeff Kirsher /**
2251f7917c00SJeff Kirsher  *	check_ring_db - check if we need to ring any doorbells
2252d0ea5cbdSJesse Brandeburg  *	@adap: the adapter
2253f7917c00SJeff Kirsher  *	@qs: the queue set whose Tx queues are to be examined
2254f7917c00SJeff Kirsher  *	@sleeping: indicates which Tx queue sent GTS
2255f7917c00SJeff Kirsher  *
2256f7917c00SJeff Kirsher  *	Checks if some of a queue set's Tx queues need to ring their doorbells
2257f7917c00SJeff Kirsher  *	to resume transmission after idling while they still have unprocessed
2258f7917c00SJeff Kirsher  *	descriptors.
2259f7917c00SJeff Kirsher  */
2260f7917c00SJeff Kirsher static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2261f7917c00SJeff Kirsher 			  unsigned int sleeping)
2262f7917c00SJeff Kirsher {
2263f7917c00SJeff Kirsher 	if (sleeping & F_RSPD_TXQ0_GTS) {
2264f7917c00SJeff Kirsher 		struct sge_txq *txq = &qs->txq[TXQ_ETH];
2265f7917c00SJeff Kirsher 
2266f7917c00SJeff Kirsher 		if (txq->cleaned + txq->in_use != txq->processed &&
2267f7917c00SJeff Kirsher 		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2268f7917c00SJeff Kirsher 			set_bit(TXQ_RUNNING, &txq->flags);
2269f7917c00SJeff Kirsher 			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2270f7917c00SJeff Kirsher 				     V_EGRCNTX(txq->cntxt_id));
2271f7917c00SJeff Kirsher 		}
2272f7917c00SJeff Kirsher 	}
2273f7917c00SJeff Kirsher 
2274f7917c00SJeff Kirsher 	if (sleeping & F_RSPD_TXQ1_GTS) {
2275f7917c00SJeff Kirsher 		struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2276f7917c00SJeff Kirsher 
2277f7917c00SJeff Kirsher 		if (txq->cleaned + txq->in_use != txq->processed &&
2278f7917c00SJeff Kirsher 		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2279f7917c00SJeff Kirsher 			set_bit(TXQ_RUNNING, &txq->flags);
2280f7917c00SJeff Kirsher 			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2281f7917c00SJeff Kirsher 				     V_EGRCNTX(txq->cntxt_id));
2282f7917c00SJeff Kirsher 		}
2283f7917c00SJeff Kirsher 	}
2284f7917c00SJeff Kirsher }
2285f7917c00SJeff Kirsher 
2286f7917c00SJeff Kirsher /**
2287f7917c00SJeff Kirsher  *	is_new_response - check if a response is newly written
2288f7917c00SJeff Kirsher  *	@r: the response descriptor
2289f7917c00SJeff Kirsher  *	@q: the response queue
2290f7917c00SJeff Kirsher  *
2291f7917c00SJeff Kirsher  *	Returns true if a response descriptor contains a yet unprocessed
2292f7917c00SJeff Kirsher  *	response.
2293f7917c00SJeff Kirsher  */
2294f7917c00SJeff Kirsher static inline int is_new_response(const struct rsp_desc *r,
2295f7917c00SJeff Kirsher 				  const struct sge_rspq *q)
2296f7917c00SJeff Kirsher {
2297f7917c00SJeff Kirsher 	return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2298f7917c00SJeff Kirsher }
2299f7917c00SJeff Kirsher 
2300f7917c00SJeff Kirsher static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2301f7917c00SJeff Kirsher {
2302f7917c00SJeff Kirsher 	q->pg_skb = NULL;
2303f7917c00SJeff Kirsher 	q->rx_recycle_buf = 0;
2304f7917c00SJeff Kirsher }
2305f7917c00SJeff Kirsher 
2306f7917c00SJeff Kirsher #define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2307f7917c00SJeff Kirsher #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2308f7917c00SJeff Kirsher 			V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2309f7917c00SJeff Kirsher 			V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2310f7917c00SJeff Kirsher 			V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2311f7917c00SJeff Kirsher 
2312f7917c00SJeff Kirsher /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2313f7917c00SJeff Kirsher #define NOMEM_INTR_DELAY 2500
2314f7917c00SJeff Kirsher 
2315f7917c00SJeff Kirsher /**
2316f7917c00SJeff Kirsher  *	process_responses - process responses from an SGE response queue
2317f7917c00SJeff Kirsher  *	@adap: the adapter
2318f7917c00SJeff Kirsher  *	@qs: the queue set to which the response queue belongs
2319f7917c00SJeff Kirsher  *	@budget: how many responses can be processed in this round
2320f7917c00SJeff Kirsher  *
2321f7917c00SJeff Kirsher  *	Process responses from an SGE response queue up to the supplied budget.
2322f7917c00SJeff Kirsher  *	Responses include received packets as well as credits and other events
2323f7917c00SJeff Kirsher  *	for the queues that belong to the response queue's queue set.
2324f7917c00SJeff Kirsher  *	A negative budget is effectively unlimited.
2325f7917c00SJeff Kirsher  *
2326f7917c00SJeff Kirsher  *	Additionally choose the interrupt holdoff time for the next interrupt
2327f7917c00SJeff Kirsher  *	on this queue.  If the system is under memory shortage use a fairly
2328f7917c00SJeff Kirsher  *	long delay to help recovery.
2329f7917c00SJeff Kirsher  */
2330f7917c00SJeff Kirsher static int process_responses(struct adapter *adap, struct sge_qset *qs,
2331f7917c00SJeff Kirsher 			     int budget)
2332f7917c00SJeff Kirsher {
2333f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2334f7917c00SJeff Kirsher 	struct rsp_desc *r = &q->desc[q->cidx];
2335f7917c00SJeff Kirsher 	int budget_left = budget;
2336f7917c00SJeff Kirsher 	unsigned int sleeping = 0;
2337f7917c00SJeff Kirsher 	struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2338f7917c00SJeff Kirsher 	int ngathered = 0;
2339f7917c00SJeff Kirsher 
2340f7917c00SJeff Kirsher 	q->next_holdoff = q->holdoff_tmr;
2341f7917c00SJeff Kirsher 
2342f7917c00SJeff Kirsher 	while (likely(budget_left && is_new_response(r, q))) {
2343f7917c00SJeff Kirsher 		int packet_complete, eth, ethpad = 2;
2344f7917c00SJeff Kirsher 		int lro = !!(qs->netdev->features & NETIF_F_GRO);
2345f7917c00SJeff Kirsher 		struct sk_buff *skb = NULL;
2346f7917c00SJeff Kirsher 		u32 len, flags;
2347f7917c00SJeff Kirsher 		__be32 rss_hi, rss_lo;
2348f7917c00SJeff Kirsher 
2349019be1cfSAlexander Duyck 		dma_rmb();
2350f7917c00SJeff Kirsher 		eth = r->rss_hdr.opcode == CPL_RX_PKT;
2351f7917c00SJeff Kirsher 		rss_hi = *(const __be32 *)r;
2352f7917c00SJeff Kirsher 		rss_lo = r->rss_hdr.rss_hash_val;
2353f7917c00SJeff Kirsher 		flags = ntohl(r->flags);
2354f7917c00SJeff Kirsher 
2355f7917c00SJeff Kirsher 		if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2356f7917c00SJeff Kirsher 			skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2357f7917c00SJeff Kirsher 			if (!skb)
2358f7917c00SJeff Kirsher 				goto no_mem;
2359f7917c00SJeff Kirsher 
2360de77b966Syuan linyu 			__skb_put_data(skb, r, AN_PKT_SIZE);
2361f7917c00SJeff Kirsher 			skb->data[0] = CPL_ASYNC_NOTIF;
2362f7917c00SJeff Kirsher 			rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2363f7917c00SJeff Kirsher 			q->async_notif++;
2364f7917c00SJeff Kirsher 		} else if (flags & F_RSPD_IMM_DATA_VALID) {
2365f7917c00SJeff Kirsher 			skb = get_imm_packet(r);
2366f7917c00SJeff Kirsher 			if (unlikely(!skb)) {
2367f7917c00SJeff Kirsher no_mem:
2368f7917c00SJeff Kirsher 				q->next_holdoff = NOMEM_INTR_DELAY;
2369f7917c00SJeff Kirsher 				q->nomem++;
2370f7917c00SJeff Kirsher 				/* consume one credit since we tried */
2371f7917c00SJeff Kirsher 				budget_left--;
2372f7917c00SJeff Kirsher 				break;
2373f7917c00SJeff Kirsher 			}
2374f7917c00SJeff Kirsher 			q->imm_data++;
2375f7917c00SJeff Kirsher 			ethpad = 0;
2376f7917c00SJeff Kirsher 		} else if ((len = ntohl(r->len_cq)) != 0) {
2377f7917c00SJeff Kirsher 			struct sge_fl *fl;
2378f7917c00SJeff Kirsher 
2379f7917c00SJeff Kirsher 			lro &= eth && is_eth_tcp(rss_hi);
2380f7917c00SJeff Kirsher 
2381f7917c00SJeff Kirsher 			fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2382f7917c00SJeff Kirsher 			if (fl->use_pages) {
2383f7917c00SJeff Kirsher 				void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2384f7917c00SJeff Kirsher 
2385f468f21bSTariq Toukan 				net_prefetch(addr);
2386f7917c00SJeff Kirsher 				__refill_fl(adap, fl);
2387f7917c00SJeff Kirsher 				if (lro > 0) {
2388f7917c00SJeff Kirsher 					lro_add_page(adap, qs, fl,
2389f7917c00SJeff Kirsher 						     G_RSPD_LEN(len),
2390f7917c00SJeff Kirsher 						     flags & F_RSPD_EOP);
2391f7917c00SJeff Kirsher 					goto next_fl;
2392f7917c00SJeff Kirsher 				}
2393f7917c00SJeff Kirsher 
2394f7917c00SJeff Kirsher 				skb = get_packet_pg(adap, fl, q,
2395f7917c00SJeff Kirsher 						    G_RSPD_LEN(len),
2396f7917c00SJeff Kirsher 						    eth ?
2397f7917c00SJeff Kirsher 						    SGE_RX_DROP_THRES : 0);
2398f7917c00SJeff Kirsher 				q->pg_skb = skb;
2399f7917c00SJeff Kirsher 			} else
2400f7917c00SJeff Kirsher 				skb = get_packet(adap, fl, G_RSPD_LEN(len),
2401f7917c00SJeff Kirsher 						 eth ? SGE_RX_DROP_THRES : 0);
2402f7917c00SJeff Kirsher 			if (unlikely(!skb)) {
2403f7917c00SJeff Kirsher 				if (!eth)
2404f7917c00SJeff Kirsher 					goto no_mem;
2405f7917c00SJeff Kirsher 				q->rx_drops++;
2406f7917c00SJeff Kirsher 			} else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2407f7917c00SJeff Kirsher 				__skb_pull(skb, 2);
2408f7917c00SJeff Kirsher next_fl:
2409f7917c00SJeff Kirsher 			if (++fl->cidx == fl->size)
2410f7917c00SJeff Kirsher 				fl->cidx = 0;
2411f7917c00SJeff Kirsher 		} else
2412f7917c00SJeff Kirsher 			q->pure_rsps++;
2413f7917c00SJeff Kirsher 
2414f7917c00SJeff Kirsher 		if (flags & RSPD_CTRL_MASK) {
2415f7917c00SJeff Kirsher 			sleeping |= flags & RSPD_GTS_MASK;
2416f7917c00SJeff Kirsher 			handle_rsp_cntrl_info(qs, flags);
2417f7917c00SJeff Kirsher 		}
2418f7917c00SJeff Kirsher 
2419f7917c00SJeff Kirsher 		r++;
2420f7917c00SJeff Kirsher 		if (unlikely(++q->cidx == q->size)) {
2421f7917c00SJeff Kirsher 			q->cidx = 0;
2422f7917c00SJeff Kirsher 			q->gen ^= 1;
2423f7917c00SJeff Kirsher 			r = q->desc;
2424f7917c00SJeff Kirsher 		}
2425f7917c00SJeff Kirsher 		prefetch(r);
2426f7917c00SJeff Kirsher 
2427f7917c00SJeff Kirsher 		if (++q->credits >= (q->size / 4)) {
2428f7917c00SJeff Kirsher 			refill_rspq(adap, q, q->credits);
2429f7917c00SJeff Kirsher 			q->credits = 0;
2430f7917c00SJeff Kirsher 		}
2431f7917c00SJeff Kirsher 
2432f7917c00SJeff Kirsher 		packet_complete = flags &
2433f7917c00SJeff Kirsher 				  (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2434f7917c00SJeff Kirsher 				   F_RSPD_ASYNC_NOTIF);
2435f7917c00SJeff Kirsher 
2436f7917c00SJeff Kirsher 		if (skb != NULL && packet_complete) {
2437f7917c00SJeff Kirsher 			if (eth)
2438f7917c00SJeff Kirsher 				rx_eth(adap, q, skb, ethpad, lro);
2439f7917c00SJeff Kirsher 			else {
2440f7917c00SJeff Kirsher 				q->offload_pkts++;
2441f7917c00SJeff Kirsher 				/* Preserve the RSS info in csum & priority */
2442f7917c00SJeff Kirsher 				skb->csum = rss_hi;
2443f7917c00SJeff Kirsher 				skb->priority = rss_lo;
2444f7917c00SJeff Kirsher 				ngathered = rx_offload(&adap->tdev, q, skb,
2445f7917c00SJeff Kirsher 						       offload_skbs,
2446f7917c00SJeff Kirsher 						       ngathered);
2447f7917c00SJeff Kirsher 			}
2448f7917c00SJeff Kirsher 
2449f7917c00SJeff Kirsher 			if (flags & F_RSPD_EOP)
2450f7917c00SJeff Kirsher 				clear_rspq_bufstate(q);
2451f7917c00SJeff Kirsher 		}
2452f7917c00SJeff Kirsher 		--budget_left;
2453f7917c00SJeff Kirsher 	}
2454f7917c00SJeff Kirsher 
2455f7917c00SJeff Kirsher 	deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2456f7917c00SJeff Kirsher 
2457f7917c00SJeff Kirsher 	if (sleeping)
2458f7917c00SJeff Kirsher 		check_ring_db(adap, qs, sleeping);
2459f7917c00SJeff Kirsher 
2460f7917c00SJeff Kirsher 	smp_mb();		/* commit Tx queue .processed updates */
2461f7917c00SJeff Kirsher 	if (unlikely(qs->txq_stopped != 0))
2462f7917c00SJeff Kirsher 		restart_tx(qs);
2463f7917c00SJeff Kirsher 
2464f7917c00SJeff Kirsher 	budget -= budget_left;
2465f7917c00SJeff Kirsher 	return budget;
2466f7917c00SJeff Kirsher }
2467f7917c00SJeff Kirsher 
2468f7917c00SJeff Kirsher static inline int is_pure_response(const struct rsp_desc *r)
2469f7917c00SJeff Kirsher {
2470f7917c00SJeff Kirsher 	__be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2471f7917c00SJeff Kirsher 
2472f7917c00SJeff Kirsher 	return (n | r->len_cq) == 0;
2473f7917c00SJeff Kirsher }
2474f7917c00SJeff Kirsher 
2475f7917c00SJeff Kirsher /**
2476f7917c00SJeff Kirsher  *	napi_rx_handler - the NAPI handler for Rx processing
2477f7917c00SJeff Kirsher  *	@napi: the napi instance
2478f7917c00SJeff Kirsher  *	@budget: how many packets we can process in this round
2479f7917c00SJeff Kirsher  *
2480f7917c00SJeff Kirsher  *	Handler for new data events when using NAPI.
2481f7917c00SJeff Kirsher  */
2482f7917c00SJeff Kirsher static int napi_rx_handler(struct napi_struct *napi, int budget)
2483f7917c00SJeff Kirsher {
2484f7917c00SJeff Kirsher 	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2485f7917c00SJeff Kirsher 	struct adapter *adap = qs->adap;
2486f7917c00SJeff Kirsher 	int work_done = process_responses(adap, qs, budget);
2487f7917c00SJeff Kirsher 
2488f7917c00SJeff Kirsher 	if (likely(work_done < budget)) {
24896ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
2490f7917c00SJeff Kirsher 
2491f7917c00SJeff Kirsher 		/*
2492f7917c00SJeff Kirsher 		 * Because we don't atomically flush the following
2493f7917c00SJeff Kirsher 		 * write it is possible that in very rare cases it can
2494f7917c00SJeff Kirsher 		 * reach the device in a way that races with a new
2495f7917c00SJeff Kirsher 		 * response being written plus an error interrupt
2496f7917c00SJeff Kirsher 		 * causing the NAPI interrupt handler below to return
2497f7917c00SJeff Kirsher 		 * unhandled status to the OS.  To protect against
2498f7917c00SJeff Kirsher 		 * this would require flushing the write and doing
2499f7917c00SJeff Kirsher 		 * both the write and the flush with interrupts off.
2500f7917c00SJeff Kirsher 		 * Way too expensive and unjustifiable given the
2501f7917c00SJeff Kirsher 		 * rarity of the race.
2502f7917c00SJeff Kirsher 		 *
2503f7917c00SJeff Kirsher 		 * The race cannot happen at all with MSI-X.
2504f7917c00SJeff Kirsher 		 */
2505f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2506f7917c00SJeff Kirsher 			     V_NEWTIMER(qs->rspq.next_holdoff) |
2507f7917c00SJeff Kirsher 			     V_NEWINDEX(qs->rspq.cidx));
2508f7917c00SJeff Kirsher 	}
2509f7917c00SJeff Kirsher 	return work_done;
2510f7917c00SJeff Kirsher }
2511f7917c00SJeff Kirsher 
2512f7917c00SJeff Kirsher /*
2513f7917c00SJeff Kirsher  * Returns true if the device is already scheduled for polling.
2514f7917c00SJeff Kirsher  */
2515f7917c00SJeff Kirsher static inline int napi_is_scheduled(struct napi_struct *napi)
2516f7917c00SJeff Kirsher {
2517f7917c00SJeff Kirsher 	return test_bit(NAPI_STATE_SCHED, &napi->state);
2518f7917c00SJeff Kirsher }
2519f7917c00SJeff Kirsher 
2520f7917c00SJeff Kirsher /**
2521f7917c00SJeff Kirsher  *	process_pure_responses - process pure responses from a response queue
2522f7917c00SJeff Kirsher  *	@adap: the adapter
2523f7917c00SJeff Kirsher  *	@qs: the queue set owning the response queue
2524f7917c00SJeff Kirsher  *	@r: the first pure response to process
2525f7917c00SJeff Kirsher  *
2526f7917c00SJeff Kirsher  *	A simpler version of process_responses() that handles only pure (i.e.,
2527f7917c00SJeff Kirsher  *	non data-carrying) responses.  Such respones are too light-weight to
2528f7917c00SJeff Kirsher  *	justify calling a softirq under NAPI, so we handle them specially in
2529f7917c00SJeff Kirsher  *	the interrupt handler.  The function is called with a pointer to a
2530f7917c00SJeff Kirsher  *	response, which the caller must ensure is a valid pure response.
2531f7917c00SJeff Kirsher  *
2532f7917c00SJeff Kirsher  *	Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2533f7917c00SJeff Kirsher  */
2534f7917c00SJeff Kirsher static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2535f7917c00SJeff Kirsher 				  struct rsp_desc *r)
2536f7917c00SJeff Kirsher {
2537f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2538f7917c00SJeff Kirsher 	unsigned int sleeping = 0;
2539f7917c00SJeff Kirsher 
2540f7917c00SJeff Kirsher 	do {
2541f7917c00SJeff Kirsher 		u32 flags = ntohl(r->flags);
2542f7917c00SJeff Kirsher 
2543f7917c00SJeff Kirsher 		r++;
2544f7917c00SJeff Kirsher 		if (unlikely(++q->cidx == q->size)) {
2545f7917c00SJeff Kirsher 			q->cidx = 0;
2546f7917c00SJeff Kirsher 			q->gen ^= 1;
2547f7917c00SJeff Kirsher 			r = q->desc;
2548f7917c00SJeff Kirsher 		}
2549f7917c00SJeff Kirsher 		prefetch(r);
2550f7917c00SJeff Kirsher 
2551f7917c00SJeff Kirsher 		if (flags & RSPD_CTRL_MASK) {
2552f7917c00SJeff Kirsher 			sleeping |= flags & RSPD_GTS_MASK;
2553f7917c00SJeff Kirsher 			handle_rsp_cntrl_info(qs, flags);
2554f7917c00SJeff Kirsher 		}
2555f7917c00SJeff Kirsher 
2556f7917c00SJeff Kirsher 		q->pure_rsps++;
2557f7917c00SJeff Kirsher 		if (++q->credits >= (q->size / 4)) {
2558f7917c00SJeff Kirsher 			refill_rspq(adap, q, q->credits);
2559f7917c00SJeff Kirsher 			q->credits = 0;
2560f7917c00SJeff Kirsher 		}
2561f7917c00SJeff Kirsher 		if (!is_new_response(r, q))
2562f7917c00SJeff Kirsher 			break;
2563019be1cfSAlexander Duyck 		dma_rmb();
2564f7917c00SJeff Kirsher 	} while (is_pure_response(r));
2565f7917c00SJeff Kirsher 
2566f7917c00SJeff Kirsher 	if (sleeping)
2567f7917c00SJeff Kirsher 		check_ring_db(adap, qs, sleeping);
2568f7917c00SJeff Kirsher 
2569f7917c00SJeff Kirsher 	smp_mb();		/* commit Tx queue .processed updates */
2570f7917c00SJeff Kirsher 	if (unlikely(qs->txq_stopped != 0))
2571f7917c00SJeff Kirsher 		restart_tx(qs);
2572f7917c00SJeff Kirsher 
2573f7917c00SJeff Kirsher 	return is_new_response(r, q);
2574f7917c00SJeff Kirsher }
2575f7917c00SJeff Kirsher 
2576f7917c00SJeff Kirsher /**
2577f7917c00SJeff Kirsher  *	handle_responses - decide what to do with new responses in NAPI mode
2578f7917c00SJeff Kirsher  *	@adap: the adapter
2579f7917c00SJeff Kirsher  *	@q: the response queue
2580f7917c00SJeff Kirsher  *
2581f7917c00SJeff Kirsher  *	This is used by the NAPI interrupt handlers to decide what to do with
2582f7917c00SJeff Kirsher  *	new SGE responses.  If there are no new responses it returns -1.  If
2583f7917c00SJeff Kirsher  *	there are new responses and they are pure (i.e., non-data carrying)
2584f7917c00SJeff Kirsher  *	it handles them straight in hard interrupt context as they are very
2585f7917c00SJeff Kirsher  *	cheap and don't deliver any packets.  Finally, if there are any data
2586f7917c00SJeff Kirsher  *	signaling responses it schedules the NAPI handler.  Returns 1 if it
2587f7917c00SJeff Kirsher  *	schedules NAPI, 0 if all new responses were pure.
2588f7917c00SJeff Kirsher  *
2589f7917c00SJeff Kirsher  *	The caller must ascertain NAPI is not already running.
2590f7917c00SJeff Kirsher  */
2591f7917c00SJeff Kirsher static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2592f7917c00SJeff Kirsher {
2593f7917c00SJeff Kirsher 	struct sge_qset *qs = rspq_to_qset(q);
2594f7917c00SJeff Kirsher 	struct rsp_desc *r = &q->desc[q->cidx];
2595f7917c00SJeff Kirsher 
2596f7917c00SJeff Kirsher 	if (!is_new_response(r, q))
2597f7917c00SJeff Kirsher 		return -1;
2598019be1cfSAlexander Duyck 	dma_rmb();
2599f7917c00SJeff Kirsher 	if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2600f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2601f7917c00SJeff Kirsher 			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2602f7917c00SJeff Kirsher 		return 0;
2603f7917c00SJeff Kirsher 	}
2604f7917c00SJeff Kirsher 	napi_schedule(&qs->napi);
2605f7917c00SJeff Kirsher 	return 1;
2606f7917c00SJeff Kirsher }
2607f7917c00SJeff Kirsher 
2608f7917c00SJeff Kirsher /*
2609f7917c00SJeff Kirsher  * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2610f7917c00SJeff Kirsher  * (i.e., response queue serviced in hard interrupt).
2611f7917c00SJeff Kirsher  */
2612f7917c00SJeff Kirsher static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2613f7917c00SJeff Kirsher {
2614f7917c00SJeff Kirsher 	struct sge_qset *qs = cookie;
2615f7917c00SJeff Kirsher 	struct adapter *adap = qs->adap;
2616f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2617f7917c00SJeff Kirsher 
2618f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2619f7917c00SJeff Kirsher 	if (process_responses(adap, qs, -1) == 0)
2620f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2621f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2622f7917c00SJeff Kirsher 		     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2623f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2624f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2625f7917c00SJeff Kirsher }
2626f7917c00SJeff Kirsher 
2627f7917c00SJeff Kirsher /*
2628f7917c00SJeff Kirsher  * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2629f7917c00SJeff Kirsher  * (i.e., response queue serviced by NAPI polling).
2630f7917c00SJeff Kirsher  */
2631f7917c00SJeff Kirsher static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2632f7917c00SJeff Kirsher {
2633f7917c00SJeff Kirsher 	struct sge_qset *qs = cookie;
2634f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2635f7917c00SJeff Kirsher 
2636f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2637f7917c00SJeff Kirsher 
2638f7917c00SJeff Kirsher 	if (handle_responses(qs->adap, q) < 0)
2639f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2640f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2641f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2642f7917c00SJeff Kirsher }
2643f7917c00SJeff Kirsher 
2644f7917c00SJeff Kirsher /*
2645f7917c00SJeff Kirsher  * The non-NAPI MSI interrupt handler.  This needs to handle data events from
2646f7917c00SJeff Kirsher  * SGE response queues as well as error and other async events as they all use
2647f7917c00SJeff Kirsher  * the same MSI vector.  We use one SGE response queue per port in this mode
2648f7917c00SJeff Kirsher  * and protect all response queues with queue 0's lock.
2649f7917c00SJeff Kirsher  */
2650f7917c00SJeff Kirsher static irqreturn_t t3_intr_msi(int irq, void *cookie)
2651f7917c00SJeff Kirsher {
2652f7917c00SJeff Kirsher 	int new_packets = 0;
2653f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2654f7917c00SJeff Kirsher 	struct sge_rspq *q = &adap->sge.qs[0].rspq;
2655f7917c00SJeff Kirsher 
2656f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2657f7917c00SJeff Kirsher 
2658f7917c00SJeff Kirsher 	if (process_responses(adap, &adap->sge.qs[0], -1)) {
2659f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2660f7917c00SJeff Kirsher 			     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2661f7917c00SJeff Kirsher 		new_packets = 1;
2662f7917c00SJeff Kirsher 	}
2663f7917c00SJeff Kirsher 
2664f7917c00SJeff Kirsher 	if (adap->params.nports == 2 &&
2665f7917c00SJeff Kirsher 	    process_responses(adap, &adap->sge.qs[1], -1)) {
2666f7917c00SJeff Kirsher 		struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2667f7917c00SJeff Kirsher 
2668f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2669f7917c00SJeff Kirsher 			     V_NEWTIMER(q1->next_holdoff) |
2670f7917c00SJeff Kirsher 			     V_NEWINDEX(q1->cidx));
2671f7917c00SJeff Kirsher 		new_packets = 1;
2672f7917c00SJeff Kirsher 	}
2673f7917c00SJeff Kirsher 
2674f7917c00SJeff Kirsher 	if (!new_packets && t3_slow_intr_handler(adap) == 0)
2675f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2676f7917c00SJeff Kirsher 
2677f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2678f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2679f7917c00SJeff Kirsher }
2680f7917c00SJeff Kirsher 
2681f7917c00SJeff Kirsher static int rspq_check_napi(struct sge_qset *qs)
2682f7917c00SJeff Kirsher {
2683f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2684f7917c00SJeff Kirsher 
2685f7917c00SJeff Kirsher 	if (!napi_is_scheduled(&qs->napi) &&
2686f7917c00SJeff Kirsher 	    is_new_response(&q->desc[q->cidx], q)) {
2687f7917c00SJeff Kirsher 		napi_schedule(&qs->napi);
2688f7917c00SJeff Kirsher 		return 1;
2689f7917c00SJeff Kirsher 	}
2690f7917c00SJeff Kirsher 	return 0;
2691f7917c00SJeff Kirsher }
2692f7917c00SJeff Kirsher 
2693f7917c00SJeff Kirsher /*
2694f7917c00SJeff Kirsher  * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2695f7917c00SJeff Kirsher  * by NAPI polling).  Handles data events from SGE response queues as well as
2696f7917c00SJeff Kirsher  * error and other async events as they all use the same MSI vector.  We use
2697f7917c00SJeff Kirsher  * one SGE response queue per port in this mode and protect all response
2698f7917c00SJeff Kirsher  * queues with queue 0's lock.
2699f7917c00SJeff Kirsher  */
2700f7917c00SJeff Kirsher static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2701f7917c00SJeff Kirsher {
2702f7917c00SJeff Kirsher 	int new_packets;
2703f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2704f7917c00SJeff Kirsher 	struct sge_rspq *q = &adap->sge.qs[0].rspq;
2705f7917c00SJeff Kirsher 
2706f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2707f7917c00SJeff Kirsher 
2708f7917c00SJeff Kirsher 	new_packets = rspq_check_napi(&adap->sge.qs[0]);
2709f7917c00SJeff Kirsher 	if (adap->params.nports == 2)
2710f7917c00SJeff Kirsher 		new_packets += rspq_check_napi(&adap->sge.qs[1]);
2711f7917c00SJeff Kirsher 	if (!new_packets && t3_slow_intr_handler(adap) == 0)
2712f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2713f7917c00SJeff Kirsher 
2714f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2715f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2716f7917c00SJeff Kirsher }
2717f7917c00SJeff Kirsher 
2718f7917c00SJeff Kirsher /*
2719f7917c00SJeff Kirsher  * A helper function that processes responses and issues GTS.
2720f7917c00SJeff Kirsher  */
2721f7917c00SJeff Kirsher static inline int process_responses_gts(struct adapter *adap,
2722f7917c00SJeff Kirsher 					struct sge_rspq *rq)
2723f7917c00SJeff Kirsher {
2724f7917c00SJeff Kirsher 	int work;
2725f7917c00SJeff Kirsher 
2726f7917c00SJeff Kirsher 	work = process_responses(adap, rspq_to_qset(rq), -1);
2727f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2728f7917c00SJeff Kirsher 		     V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2729f7917c00SJeff Kirsher 	return work;
2730f7917c00SJeff Kirsher }
2731f7917c00SJeff Kirsher 
2732f7917c00SJeff Kirsher /*
2733f7917c00SJeff Kirsher  * The legacy INTx interrupt handler.  This needs to handle data events from
2734f7917c00SJeff Kirsher  * SGE response queues as well as error and other async events as they all use
2735f7917c00SJeff Kirsher  * the same interrupt pin.  We use one SGE response queue per port in this mode
2736f7917c00SJeff Kirsher  * and protect all response queues with queue 0's lock.
2737f7917c00SJeff Kirsher  */
2738f7917c00SJeff Kirsher static irqreturn_t t3_intr(int irq, void *cookie)
2739f7917c00SJeff Kirsher {
2740f7917c00SJeff Kirsher 	int work_done, w0, w1;
2741f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2742f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2743f7917c00SJeff Kirsher 	struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2744f7917c00SJeff Kirsher 
2745f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2746f7917c00SJeff Kirsher 
2747f7917c00SJeff Kirsher 	w0 = is_new_response(&q0->desc[q0->cidx], q0);
2748f7917c00SJeff Kirsher 	w1 = adap->params.nports == 2 &&
2749f7917c00SJeff Kirsher 	    is_new_response(&q1->desc[q1->cidx], q1);
2750f7917c00SJeff Kirsher 
2751f7917c00SJeff Kirsher 	if (likely(w0 | w1)) {
2752f7917c00SJeff Kirsher 		t3_write_reg(adap, A_PL_CLI, 0);
2753f7917c00SJeff Kirsher 		t3_read_reg(adap, A_PL_CLI);	/* flush */
2754f7917c00SJeff Kirsher 
2755f7917c00SJeff Kirsher 		if (likely(w0))
2756f7917c00SJeff Kirsher 			process_responses_gts(adap, q0);
2757f7917c00SJeff Kirsher 
2758f7917c00SJeff Kirsher 		if (w1)
2759f7917c00SJeff Kirsher 			process_responses_gts(adap, q1);
2760f7917c00SJeff Kirsher 
2761f7917c00SJeff Kirsher 		work_done = w0 | w1;
2762f7917c00SJeff Kirsher 	} else
2763f7917c00SJeff Kirsher 		work_done = t3_slow_intr_handler(adap);
2764f7917c00SJeff Kirsher 
2765f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2766f7917c00SJeff Kirsher 	return IRQ_RETVAL(work_done != 0);
2767f7917c00SJeff Kirsher }
2768f7917c00SJeff Kirsher 
2769f7917c00SJeff Kirsher /*
2770f7917c00SJeff Kirsher  * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2771f7917c00SJeff Kirsher  * Handles data events from SGE response queues as well as error and other
2772f7917c00SJeff Kirsher  * async events as they all use the same interrupt pin.  We use one SGE
2773f7917c00SJeff Kirsher  * response queue per port in this mode and protect all response queues with
2774f7917c00SJeff Kirsher  * queue 0's lock.
2775f7917c00SJeff Kirsher  */
2776f7917c00SJeff Kirsher static irqreturn_t t3b_intr(int irq, void *cookie)
2777f7917c00SJeff Kirsher {
2778f7917c00SJeff Kirsher 	u32 map;
2779f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2780f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2781f7917c00SJeff Kirsher 
2782f7917c00SJeff Kirsher 	t3_write_reg(adap, A_PL_CLI, 0);
2783f7917c00SJeff Kirsher 	map = t3_read_reg(adap, A_SG_DATA_INTR);
2784f7917c00SJeff Kirsher 
2785f7917c00SJeff Kirsher 	if (unlikely(!map))	/* shared interrupt, most likely */
2786f7917c00SJeff Kirsher 		return IRQ_NONE;
2787f7917c00SJeff Kirsher 
2788f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2789f7917c00SJeff Kirsher 
2790f7917c00SJeff Kirsher 	if (unlikely(map & F_ERRINTR))
2791f7917c00SJeff Kirsher 		t3_slow_intr_handler(adap);
2792f7917c00SJeff Kirsher 
2793f7917c00SJeff Kirsher 	if (likely(map & 1))
2794f7917c00SJeff Kirsher 		process_responses_gts(adap, q0);
2795f7917c00SJeff Kirsher 
2796f7917c00SJeff Kirsher 	if (map & 2)
2797f7917c00SJeff Kirsher 		process_responses_gts(adap, &adap->sge.qs[1].rspq);
2798f7917c00SJeff Kirsher 
2799f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2800f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2801f7917c00SJeff Kirsher }
2802f7917c00SJeff Kirsher 
2803f7917c00SJeff Kirsher /*
2804f7917c00SJeff Kirsher  * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2805f7917c00SJeff Kirsher  * Handles data events from SGE response queues as well as error and other
2806f7917c00SJeff Kirsher  * async events as they all use the same interrupt pin.  We use one SGE
2807f7917c00SJeff Kirsher  * response queue per port in this mode and protect all response queues with
2808f7917c00SJeff Kirsher  * queue 0's lock.
2809f7917c00SJeff Kirsher  */
2810f7917c00SJeff Kirsher static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2811f7917c00SJeff Kirsher {
2812f7917c00SJeff Kirsher 	u32 map;
2813f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2814f7917c00SJeff Kirsher 	struct sge_qset *qs0 = &adap->sge.qs[0];
2815f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &qs0->rspq;
2816f7917c00SJeff Kirsher 
2817f7917c00SJeff Kirsher 	t3_write_reg(adap, A_PL_CLI, 0);
2818f7917c00SJeff Kirsher 	map = t3_read_reg(adap, A_SG_DATA_INTR);
2819f7917c00SJeff Kirsher 
2820f7917c00SJeff Kirsher 	if (unlikely(!map))	/* shared interrupt, most likely */
2821f7917c00SJeff Kirsher 		return IRQ_NONE;
2822f7917c00SJeff Kirsher 
2823f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2824f7917c00SJeff Kirsher 
2825f7917c00SJeff Kirsher 	if (unlikely(map & F_ERRINTR))
2826f7917c00SJeff Kirsher 		t3_slow_intr_handler(adap);
2827f7917c00SJeff Kirsher 
2828f7917c00SJeff Kirsher 	if (likely(map & 1))
2829f7917c00SJeff Kirsher 		napi_schedule(&qs0->napi);
2830f7917c00SJeff Kirsher 
2831f7917c00SJeff Kirsher 	if (map & 2)
2832f7917c00SJeff Kirsher 		napi_schedule(&adap->sge.qs[1].napi);
2833f7917c00SJeff Kirsher 
2834f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2835f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2836f7917c00SJeff Kirsher }
2837f7917c00SJeff Kirsher 
2838f7917c00SJeff Kirsher /**
2839f7917c00SJeff Kirsher  *	t3_intr_handler - select the top-level interrupt handler
2840f7917c00SJeff Kirsher  *	@adap: the adapter
2841f7917c00SJeff Kirsher  *	@polling: whether using NAPI to service response queues
2842f7917c00SJeff Kirsher  *
2843f7917c00SJeff Kirsher  *	Selects the top-level interrupt handler based on the type of interrupts
2844f7917c00SJeff Kirsher  *	(MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2845f7917c00SJeff Kirsher  *	response queues.
2846f7917c00SJeff Kirsher  */
2847f7917c00SJeff Kirsher irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2848f7917c00SJeff Kirsher {
2849f7917c00SJeff Kirsher 	if (adap->flags & USING_MSIX)
2850f7917c00SJeff Kirsher 		return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2851f7917c00SJeff Kirsher 	if (adap->flags & USING_MSI)
2852f7917c00SJeff Kirsher 		return polling ? t3_intr_msi_napi : t3_intr_msi;
2853f7917c00SJeff Kirsher 	if (adap->params.rev > 0)
2854f7917c00SJeff Kirsher 		return polling ? t3b_intr_napi : t3b_intr;
2855f7917c00SJeff Kirsher 	return t3_intr;
2856f7917c00SJeff Kirsher }
2857f7917c00SJeff Kirsher 
2858f7917c00SJeff Kirsher #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2859f7917c00SJeff Kirsher 		    F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2860f7917c00SJeff Kirsher 		    V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2861f7917c00SJeff Kirsher 		    F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2862f7917c00SJeff Kirsher 		    F_HIRCQPARITYERROR)
2863f7917c00SJeff Kirsher #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2864f7917c00SJeff Kirsher #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2865f7917c00SJeff Kirsher 		      F_RSPQDISABLED)
2866f7917c00SJeff Kirsher 
2867f7917c00SJeff Kirsher /**
2868f7917c00SJeff Kirsher  *	t3_sge_err_intr_handler - SGE async event interrupt handler
2869f7917c00SJeff Kirsher  *	@adapter: the adapter
2870f7917c00SJeff Kirsher  *
2871f7917c00SJeff Kirsher  *	Interrupt handler for SGE asynchronous (non-data) events.
2872f7917c00SJeff Kirsher  */
2873f7917c00SJeff Kirsher void t3_sge_err_intr_handler(struct adapter *adapter)
2874f7917c00SJeff Kirsher {
2875f7917c00SJeff Kirsher 	unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2876f7917c00SJeff Kirsher 				 ~F_FLEMPTY;
2877f7917c00SJeff Kirsher 
2878f7917c00SJeff Kirsher 	if (status & SGE_PARERR)
2879f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2880f7917c00SJeff Kirsher 			 status & SGE_PARERR);
2881f7917c00SJeff Kirsher 	if (status & SGE_FRAMINGERR)
2882f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2883f7917c00SJeff Kirsher 			 status & SGE_FRAMINGERR);
2884f7917c00SJeff Kirsher 
2885f7917c00SJeff Kirsher 	if (status & F_RSPQCREDITOVERFOW)
2886f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE response queue credit overflow\n");
2887f7917c00SJeff Kirsher 
2888f7917c00SJeff Kirsher 	if (status & F_RSPQDISABLED) {
2889f7917c00SJeff Kirsher 		v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2890f7917c00SJeff Kirsher 
2891f7917c00SJeff Kirsher 		CH_ALERT(adapter,
2892f7917c00SJeff Kirsher 			 "packet delivered to disabled response queue "
2893f7917c00SJeff Kirsher 			 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2894f7917c00SJeff Kirsher 	}
2895f7917c00SJeff Kirsher 
2896f7917c00SJeff Kirsher 	if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2897f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_drop_task);
2898f7917c00SJeff Kirsher 
2899f7917c00SJeff Kirsher 	if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2900f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_full_task);
2901f7917c00SJeff Kirsher 
2902f7917c00SJeff Kirsher 	if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2903f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_empty_task);
2904f7917c00SJeff Kirsher 
2905f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2906f7917c00SJeff Kirsher 	if (status &  SGE_FATALERR)
2907f7917c00SJeff Kirsher 		t3_fatal_err(adapter);
2908f7917c00SJeff Kirsher }
2909f7917c00SJeff Kirsher 
2910f7917c00SJeff Kirsher /**
2911f7917c00SJeff Kirsher  *	sge_timer_tx - perform periodic maintenance of an SGE qset
2912d0ea5cbdSJesse Brandeburg  *	@t: a timer list containing the SGE queue set to maintain
2913f7917c00SJeff Kirsher  *
2914f7917c00SJeff Kirsher  *	Runs periodically from a timer to perform maintenance of an SGE queue
2915f7917c00SJeff Kirsher  *	set.  It performs two tasks:
2916f7917c00SJeff Kirsher  *
2917f7917c00SJeff Kirsher  *	Cleans up any completed Tx descriptors that may still be pending.
2918f7917c00SJeff Kirsher  *	Normal descriptor cleanup happens when new packets are added to a Tx
2919f7917c00SJeff Kirsher  *	queue so this timer is relatively infrequent and does any cleanup only
2920f7917c00SJeff Kirsher  *	if the Tx queue has not seen any new packets in a while.  We make a
2921f7917c00SJeff Kirsher  *	best effort attempt to reclaim descriptors, in that we don't wait
2922f7917c00SJeff Kirsher  *	around if we cannot get a queue's lock (which most likely is because
2923f7917c00SJeff Kirsher  *	someone else is queueing new packets and so will also handle the clean
2924f7917c00SJeff Kirsher  *	up).  Since control queues use immediate data exclusively we don't
2925f7917c00SJeff Kirsher  *	bother cleaning them up here.
2926f7917c00SJeff Kirsher  *
2927f7917c00SJeff Kirsher  */
29280e23daebSKees Cook static void sge_timer_tx(struct timer_list *t)
2929f7917c00SJeff Kirsher {
29300e23daebSKees Cook 	struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
2931f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2932f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
2933f7917c00SJeff Kirsher 	unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2934f7917c00SJeff Kirsher 	unsigned long next_period;
2935f7917c00SJeff Kirsher 
2936f7917c00SJeff Kirsher 	if (__netif_tx_trylock(qs->tx_q)) {
2937f7917c00SJeff Kirsher                 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2938f7917c00SJeff Kirsher                                                      TX_RECLAIM_TIMER_CHUNK);
2939f7917c00SJeff Kirsher 		__netif_tx_unlock(qs->tx_q);
2940f7917c00SJeff Kirsher 	}
2941f7917c00SJeff Kirsher 
2942f7917c00SJeff Kirsher 	if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2943f7917c00SJeff Kirsher 		tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2944f7917c00SJeff Kirsher 						     TX_RECLAIM_TIMER_CHUNK);
2945f7917c00SJeff Kirsher 		spin_unlock(&qs->txq[TXQ_OFLD].lock);
2946f7917c00SJeff Kirsher 	}
2947f7917c00SJeff Kirsher 
2948f7917c00SJeff Kirsher 	next_period = TX_RECLAIM_PERIOD >>
2949f7917c00SJeff Kirsher                       (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2950f7917c00SJeff Kirsher                       TX_RECLAIM_TIMER_CHUNK);
2951f7917c00SJeff Kirsher 	mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2952f7917c00SJeff Kirsher }
2953f7917c00SJeff Kirsher 
295449ce9c2cSBen Hutchings /**
2955f7917c00SJeff Kirsher  *	sge_timer_rx - perform periodic maintenance of an SGE qset
2956d0ea5cbdSJesse Brandeburg  *	@t: the timer list containing the SGE queue set to maintain
2957f7917c00SJeff Kirsher  *
2958f7917c00SJeff Kirsher  *	a) Replenishes Rx queues that have run out due to memory shortage.
2959f7917c00SJeff Kirsher  *	Normally new Rx buffers are added when existing ones are consumed but
2960f7917c00SJeff Kirsher  *	when out of memory a queue can become empty.  We try to add only a few
2961f7917c00SJeff Kirsher  *	buffers here, the queue will be replenished fully as these new buffers
2962f7917c00SJeff Kirsher  *	are used up if memory shortage has subsided.
2963f7917c00SJeff Kirsher  *
2964f7917c00SJeff Kirsher  *	b) Return coalesced response queue credits in case a response queue is
2965f7917c00SJeff Kirsher  *	starved.
2966f7917c00SJeff Kirsher  *
2967f7917c00SJeff Kirsher  */
29680e23daebSKees Cook static void sge_timer_rx(struct timer_list *t)
2969f7917c00SJeff Kirsher {
2970f7917c00SJeff Kirsher 	spinlock_t *lock;
29710e23daebSKees Cook 	struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
2972f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2973f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
2974f7917c00SJeff Kirsher 	u32 status;
2975f7917c00SJeff Kirsher 
2976f7917c00SJeff Kirsher 	lock = adap->params.rev > 0 ?
2977f7917c00SJeff Kirsher 	       &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2978f7917c00SJeff Kirsher 
2979f7917c00SJeff Kirsher 	if (!spin_trylock_irq(lock))
2980f7917c00SJeff Kirsher 		goto out;
2981f7917c00SJeff Kirsher 
2982f7917c00SJeff Kirsher 	if (napi_is_scheduled(&qs->napi))
2983f7917c00SJeff Kirsher 		goto unlock;
2984f7917c00SJeff Kirsher 
2985f7917c00SJeff Kirsher 	if (adap->params.rev < 4) {
2986f7917c00SJeff Kirsher 		status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2987f7917c00SJeff Kirsher 
2988f7917c00SJeff Kirsher 		if (status & (1 << qs->rspq.cntxt_id)) {
2989f7917c00SJeff Kirsher 			qs->rspq.starved++;
2990f7917c00SJeff Kirsher 			if (qs->rspq.credits) {
2991f7917c00SJeff Kirsher 				qs->rspq.credits--;
2992f7917c00SJeff Kirsher 				refill_rspq(adap, &qs->rspq, 1);
2993f7917c00SJeff Kirsher 				qs->rspq.restarted++;
2994f7917c00SJeff Kirsher 				t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2995f7917c00SJeff Kirsher 					     1 << qs->rspq.cntxt_id);
2996f7917c00SJeff Kirsher 			}
2997f7917c00SJeff Kirsher 		}
2998f7917c00SJeff Kirsher 	}
2999f7917c00SJeff Kirsher 
3000f7917c00SJeff Kirsher 	if (qs->fl[0].credits < qs->fl[0].size)
3001f7917c00SJeff Kirsher 		__refill_fl(adap, &qs->fl[0]);
3002f7917c00SJeff Kirsher 	if (qs->fl[1].credits < qs->fl[1].size)
3003f7917c00SJeff Kirsher 		__refill_fl(adap, &qs->fl[1]);
3004f7917c00SJeff Kirsher 
3005f7917c00SJeff Kirsher unlock:
3006f7917c00SJeff Kirsher 	spin_unlock_irq(lock);
3007f7917c00SJeff Kirsher out:
3008f7917c00SJeff Kirsher 	mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3009f7917c00SJeff Kirsher }
3010f7917c00SJeff Kirsher 
3011f7917c00SJeff Kirsher /**
3012f7917c00SJeff Kirsher  *	t3_update_qset_coalesce - update coalescing settings for a queue set
3013f7917c00SJeff Kirsher  *	@qs: the SGE queue set
3014f7917c00SJeff Kirsher  *	@p: new queue set parameters
3015f7917c00SJeff Kirsher  *
3016f7917c00SJeff Kirsher  *	Update the coalescing settings for an SGE queue set.  Nothing is done
3017f7917c00SJeff Kirsher  *	if the queue set is not initialized yet.
3018f7917c00SJeff Kirsher  */
3019f7917c00SJeff Kirsher void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
3020f7917c00SJeff Kirsher {
3021f7917c00SJeff Kirsher 	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
3022f7917c00SJeff Kirsher 	qs->rspq.polling = p->polling;
3023f7917c00SJeff Kirsher 	qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
3024f7917c00SJeff Kirsher }
3025f7917c00SJeff Kirsher 
3026f7917c00SJeff Kirsher /**
3027f7917c00SJeff Kirsher  *	t3_sge_alloc_qset - initialize an SGE queue set
3028f7917c00SJeff Kirsher  *	@adapter: the adapter
3029f7917c00SJeff Kirsher  *	@id: the queue set id
3030f7917c00SJeff Kirsher  *	@nports: how many Ethernet ports will be using this queue set
3031f7917c00SJeff Kirsher  *	@irq_vec_idx: the IRQ vector index for response queue interrupts
3032f7917c00SJeff Kirsher  *	@p: configuration parameters for this queue set
3033f7917c00SJeff Kirsher  *	@ntxq: number of Tx queues for the queue set
3034d0ea5cbdSJesse Brandeburg  *	@dev: net device associated with this queue set
3035f7917c00SJeff Kirsher  *	@netdevq: net device TX queue associated with this queue set
3036f7917c00SJeff Kirsher  *
3037f7917c00SJeff Kirsher  *	Allocate resources and initialize an SGE queue set.  A queue set
3038f7917c00SJeff Kirsher  *	comprises a response queue, two Rx free-buffer queues, and up to 3
3039f7917c00SJeff Kirsher  *	Tx queues.  The Tx queues are assigned roles in the order Ethernet
3040f7917c00SJeff Kirsher  *	queue, offload queue, and control queue.
3041f7917c00SJeff Kirsher  */
3042f7917c00SJeff Kirsher int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3043f7917c00SJeff Kirsher 		      int irq_vec_idx, const struct qset_params *p,
3044f7917c00SJeff Kirsher 		      int ntxq, struct net_device *dev,
3045f7917c00SJeff Kirsher 		      struct netdev_queue *netdevq)
3046f7917c00SJeff Kirsher {
3047f7917c00SJeff Kirsher 	int i, avail, ret = -ENOMEM;
3048f7917c00SJeff Kirsher 	struct sge_qset *q = &adapter->sge.qs[id];
3049f7917c00SJeff Kirsher 
3050f7917c00SJeff Kirsher 	init_qset_cntxt(q, id);
30510e23daebSKees Cook 	timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0);
30520e23daebSKees Cook 	timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0);
3053f7917c00SJeff Kirsher 
3054f7917c00SJeff Kirsher 	q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
3055f7917c00SJeff Kirsher 				   sizeof(struct rx_desc),
3056f7917c00SJeff Kirsher 				   sizeof(struct rx_sw_desc),
3057f7917c00SJeff Kirsher 				   &q->fl[0].phys_addr, &q->fl[0].sdesc);
3058f7917c00SJeff Kirsher 	if (!q->fl[0].desc)
3059f7917c00SJeff Kirsher 		goto err;
3060f7917c00SJeff Kirsher 
3061f7917c00SJeff Kirsher 	q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
3062f7917c00SJeff Kirsher 				   sizeof(struct rx_desc),
3063f7917c00SJeff Kirsher 				   sizeof(struct rx_sw_desc),
3064f7917c00SJeff Kirsher 				   &q->fl[1].phys_addr, &q->fl[1].sdesc);
3065f7917c00SJeff Kirsher 	if (!q->fl[1].desc)
3066f7917c00SJeff Kirsher 		goto err;
3067f7917c00SJeff Kirsher 
3068f7917c00SJeff Kirsher 	q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
3069f7917c00SJeff Kirsher 				  sizeof(struct rsp_desc), 0,
3070f7917c00SJeff Kirsher 				  &q->rspq.phys_addr, NULL);
3071f7917c00SJeff Kirsher 	if (!q->rspq.desc)
3072f7917c00SJeff Kirsher 		goto err;
3073f7917c00SJeff Kirsher 
3074f7917c00SJeff Kirsher 	for (i = 0; i < ntxq; ++i) {
3075f7917c00SJeff Kirsher 		/*
3076f7917c00SJeff Kirsher 		 * The control queue always uses immediate data so does not
3077f7917c00SJeff Kirsher 		 * need to keep track of any sk_buffs.
3078f7917c00SJeff Kirsher 		 */
3079f7917c00SJeff Kirsher 		size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3080f7917c00SJeff Kirsher 
3081f7917c00SJeff Kirsher 		q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3082f7917c00SJeff Kirsher 					    sizeof(struct tx_desc), sz,
3083f7917c00SJeff Kirsher 					    &q->txq[i].phys_addr,
3084f7917c00SJeff Kirsher 					    &q->txq[i].sdesc);
3085f7917c00SJeff Kirsher 		if (!q->txq[i].desc)
3086f7917c00SJeff Kirsher 			goto err;
3087f7917c00SJeff Kirsher 
3088f7917c00SJeff Kirsher 		q->txq[i].gen = 1;
3089f7917c00SJeff Kirsher 		q->txq[i].size = p->txq_size[i];
3090f7917c00SJeff Kirsher 		spin_lock_init(&q->txq[i].lock);
3091f7917c00SJeff Kirsher 		skb_queue_head_init(&q->txq[i].sendq);
3092f7917c00SJeff Kirsher 	}
3093f7917c00SJeff Kirsher 
30945e0b8928SÍñigo Huguet 	INIT_WORK(&q->txq[TXQ_OFLD].qresume_task, restart_offloadq);
30955e0b8928SÍñigo Huguet 	INIT_WORK(&q->txq[TXQ_CTRL].qresume_task, restart_ctrlq);
3096f7917c00SJeff Kirsher 
3097f7917c00SJeff Kirsher 	q->fl[0].gen = q->fl[1].gen = 1;
3098f7917c00SJeff Kirsher 	q->fl[0].size = p->fl_size;
3099f7917c00SJeff Kirsher 	q->fl[1].size = p->jumbo_size;
3100f7917c00SJeff Kirsher 
3101f7917c00SJeff Kirsher 	q->rspq.gen = 1;
3102f7917c00SJeff Kirsher 	q->rspq.size = p->rspq_size;
3103f7917c00SJeff Kirsher 	spin_lock_init(&q->rspq.lock);
3104f7917c00SJeff Kirsher 	skb_queue_head_init(&q->rspq.rx_queue);
3105f7917c00SJeff Kirsher 
3106f7917c00SJeff Kirsher 	q->txq[TXQ_ETH].stop_thres = nports *
3107f7917c00SJeff Kirsher 	    flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3108f7917c00SJeff Kirsher 
3109f7917c00SJeff Kirsher #if FL0_PG_CHUNK_SIZE > 0
3110f7917c00SJeff Kirsher 	q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3111f7917c00SJeff Kirsher #else
3112f7917c00SJeff Kirsher 	q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3113f7917c00SJeff Kirsher #endif
3114f7917c00SJeff Kirsher #if FL1_PG_CHUNK_SIZE > 0
3115f7917c00SJeff Kirsher 	q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3116f7917c00SJeff Kirsher #else
3117f7917c00SJeff Kirsher 	q->fl[1].buf_size = is_offload(adapter) ?
3118f7917c00SJeff Kirsher 		(16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3119f7917c00SJeff Kirsher 		MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3120f7917c00SJeff Kirsher #endif
3121f7917c00SJeff Kirsher 
3122f7917c00SJeff Kirsher 	q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3123f7917c00SJeff Kirsher 	q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3124f7917c00SJeff Kirsher 	q->fl[0].order = FL0_PG_ORDER;
3125f7917c00SJeff Kirsher 	q->fl[1].order = FL1_PG_ORDER;
3126f7917c00SJeff Kirsher 	q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3127f7917c00SJeff Kirsher 	q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3128f7917c00SJeff Kirsher 
3129f7917c00SJeff Kirsher 	spin_lock_irq(&adapter->sge.reg_lock);
3130f7917c00SJeff Kirsher 
3131f7917c00SJeff Kirsher 	/* FL threshold comparison uses < */
3132f7917c00SJeff Kirsher 	ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3133f7917c00SJeff Kirsher 				   q->rspq.phys_addr, q->rspq.size,
3134f7917c00SJeff Kirsher 				   q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3135f7917c00SJeff Kirsher 	if (ret)
3136f7917c00SJeff Kirsher 		goto err_unlock;
3137f7917c00SJeff Kirsher 
3138f7917c00SJeff Kirsher 	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3139f7917c00SJeff Kirsher 		ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3140f7917c00SJeff Kirsher 					  q->fl[i].phys_addr, q->fl[i].size,
3141f7917c00SJeff Kirsher 					  q->fl[i].buf_size - SGE_PG_RSVD,
3142f7917c00SJeff Kirsher 					  p->cong_thres, 1, 0);
3143f7917c00SJeff Kirsher 		if (ret)
3144f7917c00SJeff Kirsher 			goto err_unlock;
3145f7917c00SJeff Kirsher 	}
3146f7917c00SJeff Kirsher 
3147f7917c00SJeff Kirsher 	ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3148f7917c00SJeff Kirsher 				 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3149f7917c00SJeff Kirsher 				 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3150f7917c00SJeff Kirsher 				 1, 0);
3151f7917c00SJeff Kirsher 	if (ret)
3152f7917c00SJeff Kirsher 		goto err_unlock;
3153f7917c00SJeff Kirsher 
3154f7917c00SJeff Kirsher 	if (ntxq > 1) {
3155f7917c00SJeff Kirsher 		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3156f7917c00SJeff Kirsher 					 USE_GTS, SGE_CNTXT_OFLD, id,
3157f7917c00SJeff Kirsher 					 q->txq[TXQ_OFLD].phys_addr,
3158f7917c00SJeff Kirsher 					 q->txq[TXQ_OFLD].size, 0, 1, 0);
3159f7917c00SJeff Kirsher 		if (ret)
3160f7917c00SJeff Kirsher 			goto err_unlock;
3161f7917c00SJeff Kirsher 	}
3162f7917c00SJeff Kirsher 
3163f7917c00SJeff Kirsher 	if (ntxq > 2) {
3164f7917c00SJeff Kirsher 		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3165f7917c00SJeff Kirsher 					 SGE_CNTXT_CTRL, id,
3166f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].phys_addr,
3167f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].size,
3168f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].token, 1, 0);
3169f7917c00SJeff Kirsher 		if (ret)
3170f7917c00SJeff Kirsher 			goto err_unlock;
3171f7917c00SJeff Kirsher 	}
3172f7917c00SJeff Kirsher 
3173f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->sge.reg_lock);
3174f7917c00SJeff Kirsher 
3175f7917c00SJeff Kirsher 	q->adap = adapter;
3176f7917c00SJeff Kirsher 	q->netdev = dev;
3177f7917c00SJeff Kirsher 	q->tx_q = netdevq;
3178f7917c00SJeff Kirsher 	t3_update_qset_coalesce(q, p);
3179f7917c00SJeff Kirsher 
3180f7917c00SJeff Kirsher 	avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3181f7917c00SJeff Kirsher 			  GFP_KERNEL | __GFP_COMP);
3182f7917c00SJeff Kirsher 	if (!avail) {
3183f7917c00SJeff Kirsher 		CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3184ff992489SZhang Changzhong 		ret = -ENOMEM;
3185f7917c00SJeff Kirsher 		goto err;
3186f7917c00SJeff Kirsher 	}
3187f7917c00SJeff Kirsher 	if (avail < q->fl[0].size)
3188f7917c00SJeff Kirsher 		CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3189f7917c00SJeff Kirsher 			avail);
3190f7917c00SJeff Kirsher 
3191f7917c00SJeff Kirsher 	avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3192f7917c00SJeff Kirsher 			  GFP_KERNEL | __GFP_COMP);
3193f7917c00SJeff Kirsher 	if (avail < q->fl[1].size)
3194f7917c00SJeff Kirsher 		CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3195f7917c00SJeff Kirsher 			avail);
3196f7917c00SJeff Kirsher 	refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3197f7917c00SJeff Kirsher 
3198f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3199f7917c00SJeff Kirsher 		     V_NEWTIMER(q->rspq.holdoff_tmr));
3200f7917c00SJeff Kirsher 
3201f7917c00SJeff Kirsher 	return 0;
3202f7917c00SJeff Kirsher 
3203f7917c00SJeff Kirsher err_unlock:
3204f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->sge.reg_lock);
3205f7917c00SJeff Kirsher err:
3206f7917c00SJeff Kirsher 	t3_free_qset(adapter, q);
3207f7917c00SJeff Kirsher 	return ret;
3208f7917c00SJeff Kirsher }
3209f7917c00SJeff Kirsher 
3210f7917c00SJeff Kirsher /**
3211f7917c00SJeff Kirsher  *      t3_start_sge_timers - start SGE timer call backs
3212f7917c00SJeff Kirsher  *      @adap: the adapter
3213f7917c00SJeff Kirsher  *
3214f7917c00SJeff Kirsher  *      Starts each SGE queue set's timer call back
3215f7917c00SJeff Kirsher  */
3216f7917c00SJeff Kirsher void t3_start_sge_timers(struct adapter *adap)
3217f7917c00SJeff Kirsher {
3218f7917c00SJeff Kirsher 	int i;
3219f7917c00SJeff Kirsher 
3220f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3221f7917c00SJeff Kirsher 		struct sge_qset *q = &adap->sge.qs[i];
3222f7917c00SJeff Kirsher 
3223f7917c00SJeff Kirsher 		if (q->tx_reclaim_timer.function)
32242acc0abcSColin Ian King 			mod_timer(&q->tx_reclaim_timer,
32252acc0abcSColin Ian King 				  jiffies + TX_RECLAIM_PERIOD);
3226f7917c00SJeff Kirsher 
3227f7917c00SJeff Kirsher 		if (q->rx_reclaim_timer.function)
32282acc0abcSColin Ian King 			mod_timer(&q->rx_reclaim_timer,
32292acc0abcSColin Ian King 				  jiffies + RX_RECLAIM_PERIOD);
3230f7917c00SJeff Kirsher 	}
3231f7917c00SJeff Kirsher }
3232f7917c00SJeff Kirsher 
3233f7917c00SJeff Kirsher /**
3234f7917c00SJeff Kirsher  *	t3_stop_sge_timers - stop SGE timer call backs
3235f7917c00SJeff Kirsher  *	@adap: the adapter
3236f7917c00SJeff Kirsher  *
3237f7917c00SJeff Kirsher  *	Stops each SGE queue set's timer call back
3238f7917c00SJeff Kirsher  */
3239f7917c00SJeff Kirsher void t3_stop_sge_timers(struct adapter *adap)
3240f7917c00SJeff Kirsher {
3241f7917c00SJeff Kirsher 	int i;
3242f7917c00SJeff Kirsher 
3243f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3244f7917c00SJeff Kirsher 		struct sge_qset *q = &adap->sge.qs[i];
3245f7917c00SJeff Kirsher 
3246f7917c00SJeff Kirsher 		if (q->tx_reclaim_timer.function)
3247f7917c00SJeff Kirsher 			del_timer_sync(&q->tx_reclaim_timer);
3248f7917c00SJeff Kirsher 		if (q->rx_reclaim_timer.function)
3249f7917c00SJeff Kirsher 			del_timer_sync(&q->rx_reclaim_timer);
3250f7917c00SJeff Kirsher 	}
3251f7917c00SJeff Kirsher }
3252f7917c00SJeff Kirsher 
3253f7917c00SJeff Kirsher /**
3254f7917c00SJeff Kirsher  *	t3_free_sge_resources - free SGE resources
3255f7917c00SJeff Kirsher  *	@adap: the adapter
3256f7917c00SJeff Kirsher  *
3257f7917c00SJeff Kirsher  *	Frees resources used by the SGE queue sets.
3258f7917c00SJeff Kirsher  */
3259f7917c00SJeff Kirsher void t3_free_sge_resources(struct adapter *adap)
3260f7917c00SJeff Kirsher {
3261f7917c00SJeff Kirsher 	int i;
3262f7917c00SJeff Kirsher 
3263f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i)
3264f7917c00SJeff Kirsher 		t3_free_qset(adap, &adap->sge.qs[i]);
3265f7917c00SJeff Kirsher }
3266f7917c00SJeff Kirsher 
3267f7917c00SJeff Kirsher /**
3268f7917c00SJeff Kirsher  *	t3_sge_start - enable SGE
3269f7917c00SJeff Kirsher  *	@adap: the adapter
3270f7917c00SJeff Kirsher  *
3271f7917c00SJeff Kirsher  *	Enables the SGE for DMAs.  This is the last step in starting packet
3272f7917c00SJeff Kirsher  *	transfers.
3273f7917c00SJeff Kirsher  */
3274f7917c00SJeff Kirsher void t3_sge_start(struct adapter *adap)
3275f7917c00SJeff Kirsher {
3276f7917c00SJeff Kirsher 	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3277f7917c00SJeff Kirsher }
3278f7917c00SJeff Kirsher 
3279f7917c00SJeff Kirsher /**
3280a17409e7SThomas Gleixner  *	t3_sge_stop_dma - Disable SGE DMA engine operation
3281f7917c00SJeff Kirsher  *	@adap: the adapter
3282f7917c00SJeff Kirsher  *
3283a17409e7SThomas Gleixner  *	Can be invoked from interrupt context e.g.  error handler.
3284a17409e7SThomas Gleixner  *
32855e0b8928SÍñigo Huguet  *	Note that this function cannot disable the restart of works as
3286a17409e7SThomas Gleixner  *	it cannot wait if called from interrupt context, however the
32875e0b8928SÍñigo Huguet  *	works will have no effect since the doorbells are disabled. The
3288a17409e7SThomas Gleixner  *	driver will call tg3_sge_stop() later from process context, at
32895e0b8928SÍñigo Huguet  *	which time the works will be stopped if they are still running.
3290a17409e7SThomas Gleixner  */
3291a17409e7SThomas Gleixner void t3_sge_stop_dma(struct adapter *adap)
3292a17409e7SThomas Gleixner {
3293a17409e7SThomas Gleixner 	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3294a17409e7SThomas Gleixner }
3295a17409e7SThomas Gleixner 
3296a17409e7SThomas Gleixner /**
3297a17409e7SThomas Gleixner  *	t3_sge_stop - disable SGE operation completly
3298a17409e7SThomas Gleixner  *	@adap: the adapter
3299a17409e7SThomas Gleixner  *
3300a17409e7SThomas Gleixner  *	Called from process context. Disables the DMA engine and any
33015e0b8928SÍñigo Huguet  *	pending queue restart works.
3302f7917c00SJeff Kirsher  */
3303f7917c00SJeff Kirsher void t3_sge_stop(struct adapter *adap)
3304f7917c00SJeff Kirsher {
3305f7917c00SJeff Kirsher 	int i;
3306f7917c00SJeff Kirsher 
3307a17409e7SThomas Gleixner 	t3_sge_stop_dma(adap);
3308a17409e7SThomas Gleixner 
3309f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3310f7917c00SJeff Kirsher 		struct sge_qset *qs = &adap->sge.qs[i];
3311f7917c00SJeff Kirsher 
33125e0b8928SÍñigo Huguet 		cancel_work_sync(&qs->txq[TXQ_OFLD].qresume_task);
3313*d5a73dcfSÍñigo Huguet 		cancel_work_sync(&qs->txq[TXQ_CTRL].qresume_task);
3314f7917c00SJeff Kirsher 	}
3315f7917c00SJeff Kirsher }
3316f7917c00SJeff Kirsher 
3317f7917c00SJeff Kirsher /**
3318f7917c00SJeff Kirsher  *	t3_sge_init - initialize SGE
3319f7917c00SJeff Kirsher  *	@adap: the adapter
3320f7917c00SJeff Kirsher  *	@p: the SGE parameters
3321f7917c00SJeff Kirsher  *
3322f7917c00SJeff Kirsher  *	Performs SGE initialization needed every time after a chip reset.
3323f7917c00SJeff Kirsher  *	We do not initialize any of the queue sets here, instead the driver
3324f7917c00SJeff Kirsher  *	top-level must request those individually.  We also do not enable DMA
3325f7917c00SJeff Kirsher  *	here, that should be done after the queues have been set up.
3326f7917c00SJeff Kirsher  */
3327f7917c00SJeff Kirsher void t3_sge_init(struct adapter *adap, struct sge_params *p)
3328f7917c00SJeff Kirsher {
3329f7917c00SJeff Kirsher 	unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3330f7917c00SJeff Kirsher 
3331f7917c00SJeff Kirsher 	ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3332f7917c00SJeff Kirsher 	    F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3333f7917c00SJeff Kirsher 	    V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3334f7917c00SJeff Kirsher 	    V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3335f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 1
3336f7917c00SJeff Kirsher 	ctrl |= F_EGRGENCTRL;
3337f7917c00SJeff Kirsher #endif
3338f7917c00SJeff Kirsher 	if (adap->params.rev > 0) {
3339f7917c00SJeff Kirsher 		if (!(adap->flags & (USING_MSIX | USING_MSI)))
3340f7917c00SJeff Kirsher 			ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3341f7917c00SJeff Kirsher 	}
3342f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_CONTROL, ctrl);
3343f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3344f7917c00SJeff Kirsher 		     V_LORCQDRBTHRSH(512));
3345f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3346f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3347f7917c00SJeff Kirsher 		     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3348f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3349f7917c00SJeff Kirsher 		     adap->params.rev < T3_REV_C ? 1000 : 500);
3350f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3351f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3352f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3353f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3354f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3355f7917c00SJeff Kirsher }
3356f7917c00SJeff Kirsher 
3357f7917c00SJeff Kirsher /**
3358f7917c00SJeff Kirsher  *	t3_sge_prep - one-time SGE initialization
3359f7917c00SJeff Kirsher  *	@adap: the associated adapter
3360f7917c00SJeff Kirsher  *	@p: SGE parameters
3361f7917c00SJeff Kirsher  *
3362f7917c00SJeff Kirsher  *	Performs one-time initialization of SGE SW state.  Includes determining
3363f7917c00SJeff Kirsher  *	defaults for the assorted SGE parameters, which admins can change until
3364f7917c00SJeff Kirsher  *	they are used to initialize the SGE.
3365f7917c00SJeff Kirsher  */
3366f7917c00SJeff Kirsher void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3367f7917c00SJeff Kirsher {
3368f7917c00SJeff Kirsher 	int i;
3369f7917c00SJeff Kirsher 
3370f7917c00SJeff Kirsher 	p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3371f7917c00SJeff Kirsher 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3372f7917c00SJeff Kirsher 
3373f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3374f7917c00SJeff Kirsher 		struct qset_params *q = p->qset + i;
3375f7917c00SJeff Kirsher 
3376f7917c00SJeff Kirsher 		q->polling = adap->params.rev > 0;
3377f7917c00SJeff Kirsher 		q->coalesce_usecs = 5;
3378f7917c00SJeff Kirsher 		q->rspq_size = 1024;
3379f7917c00SJeff Kirsher 		q->fl_size = 1024;
3380f7917c00SJeff Kirsher 		q->jumbo_size = 512;
3381f7917c00SJeff Kirsher 		q->txq_size[TXQ_ETH] = 1024;
3382f7917c00SJeff Kirsher 		q->txq_size[TXQ_OFLD] = 1024;
3383f7917c00SJeff Kirsher 		q->txq_size[TXQ_CTRL] = 256;
3384f7917c00SJeff Kirsher 		q->cong_thres = 0;
3385f7917c00SJeff Kirsher 	}
3386f7917c00SJeff Kirsher 
3387f7917c00SJeff Kirsher 	spin_lock_init(&adap->sge.reg_lock);
3388f7917c00SJeff Kirsher }
3389