1f7917c00SJeff Kirsher /*
2f7917c00SJeff Kirsher  * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3f7917c00SJeff Kirsher  *
4f7917c00SJeff Kirsher  * This software is available to you under a choice of one of two
5f7917c00SJeff Kirsher  * licenses.  You may choose to be licensed under the terms of the GNU
6f7917c00SJeff Kirsher  * General Public License (GPL) Version 2, available from the file
7f7917c00SJeff Kirsher  * COPYING in the main directory of this source tree, or the
8f7917c00SJeff Kirsher  * OpenIB.org BSD license below:
9f7917c00SJeff Kirsher  *
10f7917c00SJeff Kirsher  *     Redistribution and use in source and binary forms, with or
11f7917c00SJeff Kirsher  *     without modification, are permitted provided that the following
12f7917c00SJeff Kirsher  *     conditions are met:
13f7917c00SJeff Kirsher  *
14f7917c00SJeff Kirsher  *      - Redistributions of source code must retain the above
15f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
16f7917c00SJeff Kirsher  *        disclaimer.
17f7917c00SJeff Kirsher  *
18f7917c00SJeff Kirsher  *      - Redistributions in binary form must reproduce the above
19f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
20f7917c00SJeff Kirsher  *        disclaimer in the documentation and/or other materials
21f7917c00SJeff Kirsher  *        provided with the distribution.
22f7917c00SJeff Kirsher  *
23f7917c00SJeff Kirsher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24f7917c00SJeff Kirsher  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25f7917c00SJeff Kirsher  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26f7917c00SJeff Kirsher  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27f7917c00SJeff Kirsher  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28f7917c00SJeff Kirsher  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29f7917c00SJeff Kirsher  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30f7917c00SJeff Kirsher  * SOFTWARE.
31f7917c00SJeff Kirsher  */
32f7917c00SJeff Kirsher #include <linux/skbuff.h>
33f7917c00SJeff Kirsher #include <linux/netdevice.h>
34f7917c00SJeff Kirsher #include <linux/etherdevice.h>
35f7917c00SJeff Kirsher #include <linux/if_vlan.h>
36f7917c00SJeff Kirsher #include <linux/ip.h>
37f7917c00SJeff Kirsher #include <linux/tcp.h>
38f7917c00SJeff Kirsher #include <linux/dma-mapping.h>
39f7917c00SJeff Kirsher #include <linux/slab.h>
40f7917c00SJeff Kirsher #include <linux/prefetch.h>
41f7917c00SJeff Kirsher #include <net/arp.h>
42f7917c00SJeff Kirsher #include "common.h"
43f7917c00SJeff Kirsher #include "regs.h"
44f7917c00SJeff Kirsher #include "sge_defs.h"
45f7917c00SJeff Kirsher #include "t3_cpl.h"
46f7917c00SJeff Kirsher #include "firmware_exports.h"
47f7917c00SJeff Kirsher #include "cxgb3_offload.h"
48f7917c00SJeff Kirsher 
49f7917c00SJeff Kirsher #define USE_GTS 0
50f7917c00SJeff Kirsher 
51f7917c00SJeff Kirsher #define SGE_RX_SM_BUF_SIZE 1536
52f7917c00SJeff Kirsher 
53f7917c00SJeff Kirsher #define SGE_RX_COPY_THRES  256
54f7917c00SJeff Kirsher #define SGE_RX_PULL_LEN    128
55f7917c00SJeff Kirsher 
56f7917c00SJeff Kirsher #define SGE_PG_RSVD SMP_CACHE_BYTES
57f7917c00SJeff Kirsher /*
58f7917c00SJeff Kirsher  * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
59f7917c00SJeff Kirsher  * It must be a divisor of PAGE_SIZE.  If set to 0 FL0 will use sk_buffs
60f7917c00SJeff Kirsher  * directly.
61f7917c00SJeff Kirsher  */
62f7917c00SJeff Kirsher #define FL0_PG_CHUNK_SIZE  2048
63f7917c00SJeff Kirsher #define FL0_PG_ORDER 0
64f7917c00SJeff Kirsher #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
65f7917c00SJeff Kirsher #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
66f7917c00SJeff Kirsher #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
67f7917c00SJeff Kirsher #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
68f7917c00SJeff Kirsher 
69f7917c00SJeff Kirsher #define SGE_RX_DROP_THRES 16
70f7917c00SJeff Kirsher #define RX_RECLAIM_PERIOD (HZ/4)
71f7917c00SJeff Kirsher 
72f7917c00SJeff Kirsher /*
73f7917c00SJeff Kirsher  * Max number of Rx buffers we replenish at a time.
74f7917c00SJeff Kirsher  */
75f7917c00SJeff Kirsher #define MAX_RX_REFILL 16U
76f7917c00SJeff Kirsher /*
77f7917c00SJeff Kirsher  * Period of the Tx buffer reclaim timer.  This timer does not need to run
78f7917c00SJeff Kirsher  * frequently as Tx buffers are usually reclaimed by new Tx packets.
79f7917c00SJeff Kirsher  */
80f7917c00SJeff Kirsher #define TX_RECLAIM_PERIOD (HZ / 4)
81f7917c00SJeff Kirsher #define TX_RECLAIM_TIMER_CHUNK 64U
82f7917c00SJeff Kirsher #define TX_RECLAIM_CHUNK 16U
83f7917c00SJeff Kirsher 
84f7917c00SJeff Kirsher /* WR size in bytes */
85f7917c00SJeff Kirsher #define WR_LEN (WR_FLITS * 8)
86f7917c00SJeff Kirsher 
87f7917c00SJeff Kirsher /*
88f7917c00SJeff Kirsher  * Types of Tx queues in each queue set.  Order here matters, do not change.
89f7917c00SJeff Kirsher  */
90f7917c00SJeff Kirsher enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
91f7917c00SJeff Kirsher 
92f7917c00SJeff Kirsher /* Values for sge_txq.flags */
93f7917c00SJeff Kirsher enum {
94f7917c00SJeff Kirsher 	TXQ_RUNNING = 1 << 0,	/* fetch engine is running */
95f7917c00SJeff Kirsher 	TXQ_LAST_PKT_DB = 1 << 1,	/* last packet rang the doorbell */
96f7917c00SJeff Kirsher };
97f7917c00SJeff Kirsher 
98f7917c00SJeff Kirsher struct tx_desc {
99f7917c00SJeff Kirsher 	__be64 flit[TX_DESC_FLITS];
100f7917c00SJeff Kirsher };
101f7917c00SJeff Kirsher 
102f7917c00SJeff Kirsher struct rx_desc {
103f7917c00SJeff Kirsher 	__be32 addr_lo;
104f7917c00SJeff Kirsher 	__be32 len_gen;
105f7917c00SJeff Kirsher 	__be32 gen2;
106f7917c00SJeff Kirsher 	__be32 addr_hi;
107f7917c00SJeff Kirsher };
108f7917c00SJeff Kirsher 
109f7917c00SJeff Kirsher struct tx_sw_desc {		/* SW state per Tx descriptor */
110f7917c00SJeff Kirsher 	struct sk_buff *skb;
111f7917c00SJeff Kirsher 	u8 eop;       /* set if last descriptor for packet */
112f7917c00SJeff Kirsher 	u8 addr_idx;  /* buffer index of first SGL entry in descriptor */
113f7917c00SJeff Kirsher 	u8 fragidx;   /* first page fragment associated with descriptor */
114f7917c00SJeff Kirsher 	s8 sflit;     /* start flit of first SGL entry in descriptor */
115f7917c00SJeff Kirsher };
116f7917c00SJeff Kirsher 
117f7917c00SJeff Kirsher struct rx_sw_desc {                /* SW state per Rx descriptor */
118f7917c00SJeff Kirsher 	union {
119f7917c00SJeff Kirsher 		struct sk_buff *skb;
120f7917c00SJeff Kirsher 		struct fl_pg_chunk pg_chunk;
121f7917c00SJeff Kirsher 	};
122f7917c00SJeff Kirsher 	DEFINE_DMA_UNMAP_ADDR(dma_addr);
123f7917c00SJeff Kirsher };
124f7917c00SJeff Kirsher 
125f7917c00SJeff Kirsher struct rsp_desc {		/* response queue descriptor */
126f7917c00SJeff Kirsher 	struct rss_header rss_hdr;
127f7917c00SJeff Kirsher 	__be32 flags;
128f7917c00SJeff Kirsher 	__be32 len_cq;
129f7917c00SJeff Kirsher 	u8 imm_data[47];
130f7917c00SJeff Kirsher 	u8 intr_gen;
131f7917c00SJeff Kirsher };
132f7917c00SJeff Kirsher 
133f7917c00SJeff Kirsher /*
134f7917c00SJeff Kirsher  * Holds unmapping information for Tx packets that need deferred unmapping.
135f7917c00SJeff Kirsher  * This structure lives at skb->head and must be allocated by callers.
136f7917c00SJeff Kirsher  */
137f7917c00SJeff Kirsher struct deferred_unmap_info {
138f7917c00SJeff Kirsher 	struct pci_dev *pdev;
139f7917c00SJeff Kirsher 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
140f7917c00SJeff Kirsher };
141f7917c00SJeff Kirsher 
142f7917c00SJeff Kirsher /*
143f7917c00SJeff Kirsher  * Maps a number of flits to the number of Tx descriptors that can hold them.
144f7917c00SJeff Kirsher  * The formula is
145f7917c00SJeff Kirsher  *
146f7917c00SJeff Kirsher  * desc = 1 + (flits - 2) / (WR_FLITS - 1).
147f7917c00SJeff Kirsher  *
148f7917c00SJeff Kirsher  * HW allows up to 4 descriptors to be combined into a WR.
149f7917c00SJeff Kirsher  */
150f7917c00SJeff Kirsher static u8 flit_desc_map[] = {
151f7917c00SJeff Kirsher 	0,
152f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 1
153f7917c00SJeff Kirsher 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
154f7917c00SJeff Kirsher 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
155f7917c00SJeff Kirsher 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
156f7917c00SJeff Kirsher 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
157f7917c00SJeff Kirsher #elif SGE_NUM_GENBITS == 2
158f7917c00SJeff Kirsher 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
159f7917c00SJeff Kirsher 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
160f7917c00SJeff Kirsher 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
161f7917c00SJeff Kirsher 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
162f7917c00SJeff Kirsher #else
163f7917c00SJeff Kirsher # error "SGE_NUM_GENBITS must be 1 or 2"
164f7917c00SJeff Kirsher #endif
165f7917c00SJeff Kirsher };
166f7917c00SJeff Kirsher 
167f7917c00SJeff Kirsher static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
168f7917c00SJeff Kirsher {
169f7917c00SJeff Kirsher 	return container_of(q, struct sge_qset, fl[qidx]);
170f7917c00SJeff Kirsher }
171f7917c00SJeff Kirsher 
172f7917c00SJeff Kirsher static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
173f7917c00SJeff Kirsher {
174f7917c00SJeff Kirsher 	return container_of(q, struct sge_qset, rspq);
175f7917c00SJeff Kirsher }
176f7917c00SJeff Kirsher 
177f7917c00SJeff Kirsher static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
178f7917c00SJeff Kirsher {
179f7917c00SJeff Kirsher 	return container_of(q, struct sge_qset, txq[qidx]);
180f7917c00SJeff Kirsher }
181f7917c00SJeff Kirsher 
182f7917c00SJeff Kirsher /**
183f7917c00SJeff Kirsher  *	refill_rspq - replenish an SGE response queue
184f7917c00SJeff Kirsher  *	@adapter: the adapter
185f7917c00SJeff Kirsher  *	@q: the response queue to replenish
186f7917c00SJeff Kirsher  *	@credits: how many new responses to make available
187f7917c00SJeff Kirsher  *
188f7917c00SJeff Kirsher  *	Replenishes a response queue by making the supplied number of responses
189f7917c00SJeff Kirsher  *	available to HW.
190f7917c00SJeff Kirsher  */
191f7917c00SJeff Kirsher static inline void refill_rspq(struct adapter *adapter,
192f7917c00SJeff Kirsher 			       const struct sge_rspq *q, unsigned int credits)
193f7917c00SJeff Kirsher {
194f7917c00SJeff Kirsher 	rmb();
195f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
196f7917c00SJeff Kirsher 		     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
197f7917c00SJeff Kirsher }
198f7917c00SJeff Kirsher 
199f7917c00SJeff Kirsher /**
200f7917c00SJeff Kirsher  *	need_skb_unmap - does the platform need unmapping of sk_buffs?
201f7917c00SJeff Kirsher  *
202f7917c00SJeff Kirsher  *	Returns true if the platform needs sk_buff unmapping.  The compiler
203f7917c00SJeff Kirsher  *	optimizes away unnecessary code if this returns true.
204f7917c00SJeff Kirsher  */
205f7917c00SJeff Kirsher static inline int need_skb_unmap(void)
206f7917c00SJeff Kirsher {
207f7917c00SJeff Kirsher #ifdef CONFIG_NEED_DMA_MAP_STATE
208f7917c00SJeff Kirsher 	return 1;
209f7917c00SJeff Kirsher #else
210f7917c00SJeff Kirsher 	return 0;
211f7917c00SJeff Kirsher #endif
212f7917c00SJeff Kirsher }
213f7917c00SJeff Kirsher 
214f7917c00SJeff Kirsher /**
215f7917c00SJeff Kirsher  *	unmap_skb - unmap a packet main body and its page fragments
216f7917c00SJeff Kirsher  *	@skb: the packet
217f7917c00SJeff Kirsher  *	@q: the Tx queue containing Tx descriptors for the packet
218f7917c00SJeff Kirsher  *	@cidx: index of Tx descriptor
219f7917c00SJeff Kirsher  *	@pdev: the PCI device
220f7917c00SJeff Kirsher  *
221f7917c00SJeff Kirsher  *	Unmap the main body of an sk_buff and its page fragments, if any.
222f7917c00SJeff Kirsher  *	Because of the fairly complicated structure of our SGLs and the desire
223f7917c00SJeff Kirsher  *	to conserve space for metadata, the information necessary to unmap an
224f7917c00SJeff Kirsher  *	sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
225f7917c00SJeff Kirsher  *	descriptors (the physical addresses of the various data buffers), and
226f7917c00SJeff Kirsher  *	the SW descriptor state (assorted indices).  The send functions
227f7917c00SJeff Kirsher  *	initialize the indices for the first packet descriptor so we can unmap
228f7917c00SJeff Kirsher  *	the buffers held in the first Tx descriptor here, and we have enough
229f7917c00SJeff Kirsher  *	information at this point to set the state for the next Tx descriptor.
230f7917c00SJeff Kirsher  *
231f7917c00SJeff Kirsher  *	Note that it is possible to clean up the first descriptor of a packet
232f7917c00SJeff Kirsher  *	before the send routines have written the next descriptors, but this
233f7917c00SJeff Kirsher  *	race does not cause any problem.  We just end up writing the unmapping
234f7917c00SJeff Kirsher  *	info for the descriptor first.
235f7917c00SJeff Kirsher  */
236f7917c00SJeff Kirsher static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
237f7917c00SJeff Kirsher 			     unsigned int cidx, struct pci_dev *pdev)
238f7917c00SJeff Kirsher {
239f7917c00SJeff Kirsher 	const struct sg_ent *sgp;
240f7917c00SJeff Kirsher 	struct tx_sw_desc *d = &q->sdesc[cidx];
241f7917c00SJeff Kirsher 	int nfrags, frag_idx, curflit, j = d->addr_idx;
242f7917c00SJeff Kirsher 
243f7917c00SJeff Kirsher 	sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
244f7917c00SJeff Kirsher 	frag_idx = d->fragidx;
245f7917c00SJeff Kirsher 
246f7917c00SJeff Kirsher 	if (frag_idx == 0 && skb_headlen(skb)) {
247f7917c00SJeff Kirsher 		pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
248f7917c00SJeff Kirsher 				 skb_headlen(skb), PCI_DMA_TODEVICE);
249f7917c00SJeff Kirsher 		j = 1;
250f7917c00SJeff Kirsher 	}
251f7917c00SJeff Kirsher 
252f7917c00SJeff Kirsher 	curflit = d->sflit + 1 + j;
253f7917c00SJeff Kirsher 	nfrags = skb_shinfo(skb)->nr_frags;
254f7917c00SJeff Kirsher 
255f7917c00SJeff Kirsher 	while (frag_idx < nfrags && curflit < WR_FLITS) {
256f7917c00SJeff Kirsher 		pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
2579e903e08SEric Dumazet 			       skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
258f7917c00SJeff Kirsher 			       PCI_DMA_TODEVICE);
259f7917c00SJeff Kirsher 		j ^= 1;
260f7917c00SJeff Kirsher 		if (j == 0) {
261f7917c00SJeff Kirsher 			sgp++;
262f7917c00SJeff Kirsher 			curflit++;
263f7917c00SJeff Kirsher 		}
264f7917c00SJeff Kirsher 		curflit++;
265f7917c00SJeff Kirsher 		frag_idx++;
266f7917c00SJeff Kirsher 	}
267f7917c00SJeff Kirsher 
268f7917c00SJeff Kirsher 	if (frag_idx < nfrags) {   /* SGL continues into next Tx descriptor */
269f7917c00SJeff Kirsher 		d = cidx + 1 == q->size ? q->sdesc : d + 1;
270f7917c00SJeff Kirsher 		d->fragidx = frag_idx;
271f7917c00SJeff Kirsher 		d->addr_idx = j;
272f7917c00SJeff Kirsher 		d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
273f7917c00SJeff Kirsher 	}
274f7917c00SJeff Kirsher }
275f7917c00SJeff Kirsher 
276f7917c00SJeff Kirsher /**
277f7917c00SJeff Kirsher  *	free_tx_desc - reclaims Tx descriptors and their buffers
278f7917c00SJeff Kirsher  *	@adapter: the adapter
279f7917c00SJeff Kirsher  *	@q: the Tx queue to reclaim descriptors from
280f7917c00SJeff Kirsher  *	@n: the number of descriptors to reclaim
281f7917c00SJeff Kirsher  *
282f7917c00SJeff Kirsher  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
283f7917c00SJeff Kirsher  *	Tx buffers.  Called with the Tx queue lock held.
284f7917c00SJeff Kirsher  */
285f7917c00SJeff Kirsher static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
286f7917c00SJeff Kirsher 			 unsigned int n)
287f7917c00SJeff Kirsher {
288f7917c00SJeff Kirsher 	struct tx_sw_desc *d;
289f7917c00SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
290f7917c00SJeff Kirsher 	unsigned int cidx = q->cidx;
291f7917c00SJeff Kirsher 
292f7917c00SJeff Kirsher 	const int need_unmap = need_skb_unmap() &&
293f7917c00SJeff Kirsher 			       q->cntxt_id >= FW_TUNNEL_SGEEC_START;
294f7917c00SJeff Kirsher 
295f7917c00SJeff Kirsher 	d = &q->sdesc[cidx];
296f7917c00SJeff Kirsher 	while (n--) {
297f7917c00SJeff Kirsher 		if (d->skb) {	/* an SGL is present */
298f7917c00SJeff Kirsher 			if (need_unmap)
299f7917c00SJeff Kirsher 				unmap_skb(d->skb, q, cidx, pdev);
300f7917c00SJeff Kirsher 			if (d->eop) {
301f9ec8131SEric W. Biederman 				dev_consume_skb_any(d->skb);
302f7917c00SJeff Kirsher 				d->skb = NULL;
303f7917c00SJeff Kirsher 			}
304f7917c00SJeff Kirsher 		}
305f7917c00SJeff Kirsher 		++d;
306f7917c00SJeff Kirsher 		if (++cidx == q->size) {
307f7917c00SJeff Kirsher 			cidx = 0;
308f7917c00SJeff Kirsher 			d = q->sdesc;
309f7917c00SJeff Kirsher 		}
310f7917c00SJeff Kirsher 	}
311f7917c00SJeff Kirsher 	q->cidx = cidx;
312f7917c00SJeff Kirsher }
313f7917c00SJeff Kirsher 
314f7917c00SJeff Kirsher /**
315f7917c00SJeff Kirsher  *	reclaim_completed_tx - reclaims completed Tx descriptors
316f7917c00SJeff Kirsher  *	@adapter: the adapter
317f7917c00SJeff Kirsher  *	@q: the Tx queue to reclaim completed descriptors from
318f7917c00SJeff Kirsher  *	@chunk: maximum number of descriptors to reclaim
319f7917c00SJeff Kirsher  *
320f7917c00SJeff Kirsher  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
321f7917c00SJeff Kirsher  *	and frees the associated buffers if possible.  Called with the Tx
322f7917c00SJeff Kirsher  *	queue's lock held.
323f7917c00SJeff Kirsher  */
324f7917c00SJeff Kirsher static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
325f7917c00SJeff Kirsher 						struct sge_txq *q,
326f7917c00SJeff Kirsher 						unsigned int chunk)
327f7917c00SJeff Kirsher {
328f7917c00SJeff Kirsher 	unsigned int reclaim = q->processed - q->cleaned;
329f7917c00SJeff Kirsher 
330f7917c00SJeff Kirsher 	reclaim = min(chunk, reclaim);
331f7917c00SJeff Kirsher 	if (reclaim) {
332f7917c00SJeff Kirsher 		free_tx_desc(adapter, q, reclaim);
333f7917c00SJeff Kirsher 		q->cleaned += reclaim;
334f7917c00SJeff Kirsher 		q->in_use -= reclaim;
335f7917c00SJeff Kirsher 	}
336f7917c00SJeff Kirsher 	return q->processed - q->cleaned;
337f7917c00SJeff Kirsher }
338f7917c00SJeff Kirsher 
339f7917c00SJeff Kirsher /**
340f7917c00SJeff Kirsher  *	should_restart_tx - are there enough resources to restart a Tx queue?
341f7917c00SJeff Kirsher  *	@q: the Tx queue
342f7917c00SJeff Kirsher  *
343f7917c00SJeff Kirsher  *	Checks if there are enough descriptors to restart a suspended Tx queue.
344f7917c00SJeff Kirsher  */
345f7917c00SJeff Kirsher static inline int should_restart_tx(const struct sge_txq *q)
346f7917c00SJeff Kirsher {
347f7917c00SJeff Kirsher 	unsigned int r = q->processed - q->cleaned;
348f7917c00SJeff Kirsher 
349f7917c00SJeff Kirsher 	return q->in_use - r < (q->size >> 1);
350f7917c00SJeff Kirsher }
351f7917c00SJeff Kirsher 
352f7917c00SJeff Kirsher static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
353f7917c00SJeff Kirsher 			  struct rx_sw_desc *d)
354f7917c00SJeff Kirsher {
355f7917c00SJeff Kirsher 	if (q->use_pages && d->pg_chunk.page) {
356f7917c00SJeff Kirsher 		(*d->pg_chunk.p_cnt)--;
357f7917c00SJeff Kirsher 		if (!*d->pg_chunk.p_cnt)
358f7917c00SJeff Kirsher 			pci_unmap_page(pdev,
359f7917c00SJeff Kirsher 				       d->pg_chunk.mapping,
360f7917c00SJeff Kirsher 				       q->alloc_size, PCI_DMA_FROMDEVICE);
361f7917c00SJeff Kirsher 
362f7917c00SJeff Kirsher 		put_page(d->pg_chunk.page);
363f7917c00SJeff Kirsher 		d->pg_chunk.page = NULL;
364f7917c00SJeff Kirsher 	} else {
365f7917c00SJeff Kirsher 		pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
366f7917c00SJeff Kirsher 				 q->buf_size, PCI_DMA_FROMDEVICE);
367f7917c00SJeff Kirsher 		kfree_skb(d->skb);
368f7917c00SJeff Kirsher 		d->skb = NULL;
369f7917c00SJeff Kirsher 	}
370f7917c00SJeff Kirsher }
371f7917c00SJeff Kirsher 
372f7917c00SJeff Kirsher /**
373f7917c00SJeff Kirsher  *	free_rx_bufs - free the Rx buffers on an SGE free list
374f7917c00SJeff Kirsher  *	@pdev: the PCI device associated with the adapter
375f7917c00SJeff Kirsher  *	@rxq: the SGE free list to clean up
376f7917c00SJeff Kirsher  *
377f7917c00SJeff Kirsher  *	Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
378f7917c00SJeff Kirsher  *	this queue should be stopped before calling this function.
379f7917c00SJeff Kirsher  */
380f7917c00SJeff Kirsher static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
381f7917c00SJeff Kirsher {
382f7917c00SJeff Kirsher 	unsigned int cidx = q->cidx;
383f7917c00SJeff Kirsher 
384f7917c00SJeff Kirsher 	while (q->credits--) {
385f7917c00SJeff Kirsher 		struct rx_sw_desc *d = &q->sdesc[cidx];
386f7917c00SJeff Kirsher 
387f7917c00SJeff Kirsher 
388f7917c00SJeff Kirsher 		clear_rx_desc(pdev, q, d);
389f7917c00SJeff Kirsher 		if (++cidx == q->size)
390f7917c00SJeff Kirsher 			cidx = 0;
391f7917c00SJeff Kirsher 	}
392f7917c00SJeff Kirsher 
393f7917c00SJeff Kirsher 	if (q->pg_chunk.page) {
394f7917c00SJeff Kirsher 		__free_pages(q->pg_chunk.page, q->order);
395f7917c00SJeff Kirsher 		q->pg_chunk.page = NULL;
396f7917c00SJeff Kirsher 	}
397f7917c00SJeff Kirsher }
398f7917c00SJeff Kirsher 
399f7917c00SJeff Kirsher /**
400f7917c00SJeff Kirsher  *	add_one_rx_buf - add a packet buffer to a free-buffer list
401f7917c00SJeff Kirsher  *	@va:  buffer start VA
402f7917c00SJeff Kirsher  *	@len: the buffer length
403f7917c00SJeff Kirsher  *	@d: the HW Rx descriptor to write
404f7917c00SJeff Kirsher  *	@sd: the SW Rx descriptor to write
405f7917c00SJeff Kirsher  *	@gen: the generation bit value
406f7917c00SJeff Kirsher  *	@pdev: the PCI device associated with the adapter
407f7917c00SJeff Kirsher  *
408f7917c00SJeff Kirsher  *	Add a buffer of the given length to the supplied HW and SW Rx
409f7917c00SJeff Kirsher  *	descriptors.
410f7917c00SJeff Kirsher  */
411f7917c00SJeff Kirsher static inline int add_one_rx_buf(void *va, unsigned int len,
412f7917c00SJeff Kirsher 				 struct rx_desc *d, struct rx_sw_desc *sd,
413f7917c00SJeff Kirsher 				 unsigned int gen, struct pci_dev *pdev)
414f7917c00SJeff Kirsher {
415f7917c00SJeff Kirsher 	dma_addr_t mapping;
416f7917c00SJeff Kirsher 
417f7917c00SJeff Kirsher 	mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
418f7917c00SJeff Kirsher 	if (unlikely(pci_dma_mapping_error(pdev, mapping)))
419f7917c00SJeff Kirsher 		return -ENOMEM;
420f7917c00SJeff Kirsher 
421f7917c00SJeff Kirsher 	dma_unmap_addr_set(sd, dma_addr, mapping);
422f7917c00SJeff Kirsher 
423f7917c00SJeff Kirsher 	d->addr_lo = cpu_to_be32(mapping);
424f7917c00SJeff Kirsher 	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
425019be1cfSAlexander Duyck 	dma_wmb();
426f7917c00SJeff Kirsher 	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
427f7917c00SJeff Kirsher 	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
428f7917c00SJeff Kirsher 	return 0;
429f7917c00SJeff Kirsher }
430f7917c00SJeff Kirsher 
431f7917c00SJeff Kirsher static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
432f7917c00SJeff Kirsher 				   unsigned int gen)
433f7917c00SJeff Kirsher {
434f7917c00SJeff Kirsher 	d->addr_lo = cpu_to_be32(mapping);
435f7917c00SJeff Kirsher 	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
436019be1cfSAlexander Duyck 	dma_wmb();
437f7917c00SJeff Kirsher 	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
438f7917c00SJeff Kirsher 	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
439f7917c00SJeff Kirsher 	return 0;
440f7917c00SJeff Kirsher }
441f7917c00SJeff Kirsher 
442f7917c00SJeff Kirsher static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
443f7917c00SJeff Kirsher 			  struct rx_sw_desc *sd, gfp_t gfp,
444f7917c00SJeff Kirsher 			  unsigned int order)
445f7917c00SJeff Kirsher {
446f7917c00SJeff Kirsher 	if (!q->pg_chunk.page) {
447f7917c00SJeff Kirsher 		dma_addr_t mapping;
448f7917c00SJeff Kirsher 
449f7917c00SJeff Kirsher 		q->pg_chunk.page = alloc_pages(gfp, order);
450f7917c00SJeff Kirsher 		if (unlikely(!q->pg_chunk.page))
451f7917c00SJeff Kirsher 			return -ENOMEM;
452f7917c00SJeff Kirsher 		q->pg_chunk.va = page_address(q->pg_chunk.page);
453f7917c00SJeff Kirsher 		q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
454f7917c00SJeff Kirsher 				    SGE_PG_RSVD;
455f7917c00SJeff Kirsher 		q->pg_chunk.offset = 0;
456f7917c00SJeff Kirsher 		mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
457f7917c00SJeff Kirsher 				       0, q->alloc_size, PCI_DMA_FROMDEVICE);
458c69fe407SArjun Vynipadath 		if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
459c69fe407SArjun Vynipadath 			__free_pages(q->pg_chunk.page, order);
460c69fe407SArjun Vynipadath 			q->pg_chunk.page = NULL;
461c69fe407SArjun Vynipadath 			return -EIO;
462c69fe407SArjun Vynipadath 		}
463f7917c00SJeff Kirsher 		q->pg_chunk.mapping = mapping;
464f7917c00SJeff Kirsher 	}
465f7917c00SJeff Kirsher 	sd->pg_chunk = q->pg_chunk;
466f7917c00SJeff Kirsher 
467f7917c00SJeff Kirsher 	prefetch(sd->pg_chunk.p_cnt);
468f7917c00SJeff Kirsher 
469f7917c00SJeff Kirsher 	q->pg_chunk.offset += q->buf_size;
470f7917c00SJeff Kirsher 	if (q->pg_chunk.offset == (PAGE_SIZE << order))
471f7917c00SJeff Kirsher 		q->pg_chunk.page = NULL;
472f7917c00SJeff Kirsher 	else {
473f7917c00SJeff Kirsher 		q->pg_chunk.va += q->buf_size;
474f7917c00SJeff Kirsher 		get_page(q->pg_chunk.page);
475f7917c00SJeff Kirsher 	}
476f7917c00SJeff Kirsher 
477f7917c00SJeff Kirsher 	if (sd->pg_chunk.offset == 0)
478f7917c00SJeff Kirsher 		*sd->pg_chunk.p_cnt = 1;
479f7917c00SJeff Kirsher 	else
480f7917c00SJeff Kirsher 		*sd->pg_chunk.p_cnt += 1;
481f7917c00SJeff Kirsher 
482f7917c00SJeff Kirsher 	return 0;
483f7917c00SJeff Kirsher }
484f7917c00SJeff Kirsher 
485f7917c00SJeff Kirsher static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
486f7917c00SJeff Kirsher {
487f7917c00SJeff Kirsher 	if (q->pend_cred >= q->credits / 4) {
488f7917c00SJeff Kirsher 		q->pend_cred = 0;
489f7917c00SJeff Kirsher 		wmb();
490f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
491f7917c00SJeff Kirsher 	}
492f7917c00SJeff Kirsher }
493f7917c00SJeff Kirsher 
494f7917c00SJeff Kirsher /**
495f7917c00SJeff Kirsher  *	refill_fl - refill an SGE free-buffer list
496f7917c00SJeff Kirsher  *	@adapter: the adapter
497f7917c00SJeff Kirsher  *	@q: the free-list to refill
498f7917c00SJeff Kirsher  *	@n: the number of new buffers to allocate
499f7917c00SJeff Kirsher  *	@gfp: the gfp flags for allocating new buffers
500f7917c00SJeff Kirsher  *
501f7917c00SJeff Kirsher  *	(Re)populate an SGE free-buffer list with up to @n new packet buffers,
502f7917c00SJeff Kirsher  *	allocated with the supplied gfp flags.  The caller must assure that
503f7917c00SJeff Kirsher  *	@n does not exceed the queue's capacity.
504f7917c00SJeff Kirsher  */
505f7917c00SJeff Kirsher static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
506f7917c00SJeff Kirsher {
507f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
508f7917c00SJeff Kirsher 	struct rx_desc *d = &q->desc[q->pidx];
509f7917c00SJeff Kirsher 	unsigned int count = 0;
510f7917c00SJeff Kirsher 
511f7917c00SJeff Kirsher 	while (n--) {
512f7917c00SJeff Kirsher 		dma_addr_t mapping;
513f7917c00SJeff Kirsher 		int err;
514f7917c00SJeff Kirsher 
515f7917c00SJeff Kirsher 		if (q->use_pages) {
516f7917c00SJeff Kirsher 			if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
517f7917c00SJeff Kirsher 						    q->order))) {
518f7917c00SJeff Kirsher nomem:				q->alloc_failed++;
519f7917c00SJeff Kirsher 				break;
520f7917c00SJeff Kirsher 			}
521f7917c00SJeff Kirsher 			mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
522f7917c00SJeff Kirsher 			dma_unmap_addr_set(sd, dma_addr, mapping);
523f7917c00SJeff Kirsher 
524f7917c00SJeff Kirsher 			add_one_rx_chunk(mapping, d, q->gen);
525f7917c00SJeff Kirsher 			pci_dma_sync_single_for_device(adap->pdev, mapping,
526f7917c00SJeff Kirsher 						q->buf_size - SGE_PG_RSVD,
527f7917c00SJeff Kirsher 						PCI_DMA_FROMDEVICE);
528f7917c00SJeff Kirsher 		} else {
529f7917c00SJeff Kirsher 			void *buf_start;
530f7917c00SJeff Kirsher 
531f7917c00SJeff Kirsher 			struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
532f7917c00SJeff Kirsher 			if (!skb)
533f7917c00SJeff Kirsher 				goto nomem;
534f7917c00SJeff Kirsher 
535f7917c00SJeff Kirsher 			sd->skb = skb;
536f7917c00SJeff Kirsher 			buf_start = skb->data;
537f7917c00SJeff Kirsher 			err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
538f7917c00SJeff Kirsher 					     q->gen, adap->pdev);
539f7917c00SJeff Kirsher 			if (unlikely(err)) {
540f7917c00SJeff Kirsher 				clear_rx_desc(adap->pdev, q, sd);
541f7917c00SJeff Kirsher 				break;
542f7917c00SJeff Kirsher 			}
543f7917c00SJeff Kirsher 		}
544f7917c00SJeff Kirsher 
545f7917c00SJeff Kirsher 		d++;
546f7917c00SJeff Kirsher 		sd++;
547f7917c00SJeff Kirsher 		if (++q->pidx == q->size) {
548f7917c00SJeff Kirsher 			q->pidx = 0;
549f7917c00SJeff Kirsher 			q->gen ^= 1;
550f7917c00SJeff Kirsher 			sd = q->sdesc;
551f7917c00SJeff Kirsher 			d = q->desc;
552f7917c00SJeff Kirsher 		}
553f7917c00SJeff Kirsher 		count++;
554f7917c00SJeff Kirsher 	}
555f7917c00SJeff Kirsher 
556f7917c00SJeff Kirsher 	q->credits += count;
557f7917c00SJeff Kirsher 	q->pend_cred += count;
558f7917c00SJeff Kirsher 	ring_fl_db(adap, q);
559f7917c00SJeff Kirsher 
560f7917c00SJeff Kirsher 	return count;
561f7917c00SJeff Kirsher }
562f7917c00SJeff Kirsher 
563f7917c00SJeff Kirsher static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
564f7917c00SJeff Kirsher {
565f7917c00SJeff Kirsher 	refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
566f7917c00SJeff Kirsher 		  GFP_ATOMIC | __GFP_COMP);
567f7917c00SJeff Kirsher }
568f7917c00SJeff Kirsher 
569f7917c00SJeff Kirsher /**
570f7917c00SJeff Kirsher  *	recycle_rx_buf - recycle a receive buffer
571f7917c00SJeff Kirsher  *	@adapter: the adapter
572f7917c00SJeff Kirsher  *	@q: the SGE free list
573f7917c00SJeff Kirsher  *	@idx: index of buffer to recycle
574f7917c00SJeff Kirsher  *
575f7917c00SJeff Kirsher  *	Recycles the specified buffer on the given free list by adding it at
576f7917c00SJeff Kirsher  *	the next available slot on the list.
577f7917c00SJeff Kirsher  */
578f7917c00SJeff Kirsher static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
579f7917c00SJeff Kirsher 			   unsigned int idx)
580f7917c00SJeff Kirsher {
581f7917c00SJeff Kirsher 	struct rx_desc *from = &q->desc[idx];
582f7917c00SJeff Kirsher 	struct rx_desc *to = &q->desc[q->pidx];
583f7917c00SJeff Kirsher 
584f7917c00SJeff Kirsher 	q->sdesc[q->pidx] = q->sdesc[idx];
585f7917c00SJeff Kirsher 	to->addr_lo = from->addr_lo;	/* already big endian */
586f7917c00SJeff Kirsher 	to->addr_hi = from->addr_hi;	/* likewise */
587019be1cfSAlexander Duyck 	dma_wmb();
588f7917c00SJeff Kirsher 	to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
589f7917c00SJeff Kirsher 	to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
590f7917c00SJeff Kirsher 
591f7917c00SJeff Kirsher 	if (++q->pidx == q->size) {
592f7917c00SJeff Kirsher 		q->pidx = 0;
593f7917c00SJeff Kirsher 		q->gen ^= 1;
594f7917c00SJeff Kirsher 	}
595f7917c00SJeff Kirsher 
596f7917c00SJeff Kirsher 	q->credits++;
597f7917c00SJeff Kirsher 	q->pend_cred++;
598f7917c00SJeff Kirsher 	ring_fl_db(adap, q);
599f7917c00SJeff Kirsher }
600f7917c00SJeff Kirsher 
601f7917c00SJeff Kirsher /**
602f7917c00SJeff Kirsher  *	alloc_ring - allocate resources for an SGE descriptor ring
603f7917c00SJeff Kirsher  *	@pdev: the PCI device
604f7917c00SJeff Kirsher  *	@nelem: the number of descriptors
605f7917c00SJeff Kirsher  *	@elem_size: the size of each descriptor
606f7917c00SJeff Kirsher  *	@sw_size: the size of the SW state associated with each ring element
607f7917c00SJeff Kirsher  *	@phys: the physical address of the allocated ring
608f7917c00SJeff Kirsher  *	@metadata: address of the array holding the SW state for the ring
609f7917c00SJeff Kirsher  *
610f7917c00SJeff Kirsher  *	Allocates resources for an SGE descriptor ring, such as Tx queues,
611f7917c00SJeff Kirsher  *	free buffer lists, or response queues.  Each SGE ring requires
612f7917c00SJeff Kirsher  *	space for its HW descriptors plus, optionally, space for the SW state
613f7917c00SJeff Kirsher  *	associated with each HW entry (the metadata).  The function returns
614f7917c00SJeff Kirsher  *	three values: the virtual address for the HW ring (the return value
615f7917c00SJeff Kirsher  *	of the function), the physical address of the HW ring, and the address
616f7917c00SJeff Kirsher  *	of the SW ring.
617f7917c00SJeff Kirsher  */
618f7917c00SJeff Kirsher static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
619f7917c00SJeff Kirsher 			size_t sw_size, dma_addr_t * phys, void *metadata)
620f7917c00SJeff Kirsher {
621f7917c00SJeff Kirsher 	size_t len = nelem * elem_size;
622f7917c00SJeff Kirsher 	void *s = NULL;
62340434a67SYueHaibing 	void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
624f7917c00SJeff Kirsher 
625f7917c00SJeff Kirsher 	if (!p)
626f7917c00SJeff Kirsher 		return NULL;
627f7917c00SJeff Kirsher 	if (sw_size && metadata) {
628f7917c00SJeff Kirsher 		s = kcalloc(nelem, sw_size, GFP_KERNEL);
629f7917c00SJeff Kirsher 
630f7917c00SJeff Kirsher 		if (!s) {
631f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev, len, p, *phys);
632f7917c00SJeff Kirsher 			return NULL;
633f7917c00SJeff Kirsher 		}
634f7917c00SJeff Kirsher 		*(void **)metadata = s;
635f7917c00SJeff Kirsher 	}
636f7917c00SJeff Kirsher 	return p;
637f7917c00SJeff Kirsher }
638f7917c00SJeff Kirsher 
639f7917c00SJeff Kirsher /**
640f7917c00SJeff Kirsher  *	t3_reset_qset - reset a sge qset
641f7917c00SJeff Kirsher  *	@q: the queue set
642f7917c00SJeff Kirsher  *
643f7917c00SJeff Kirsher  *	Reset the qset structure.
644f7917c00SJeff Kirsher  *	the NAPI structure is preserved in the event of
645f7917c00SJeff Kirsher  *	the qset's reincarnation, for example during EEH recovery.
646f7917c00SJeff Kirsher  */
647f7917c00SJeff Kirsher static void t3_reset_qset(struct sge_qset *q)
648f7917c00SJeff Kirsher {
649f7917c00SJeff Kirsher 	if (q->adap &&
650f7917c00SJeff Kirsher 	    !(q->adap->flags & NAPI_INIT)) {
651f7917c00SJeff Kirsher 		memset(q, 0, sizeof(*q));
652f7917c00SJeff Kirsher 		return;
653f7917c00SJeff Kirsher 	}
654f7917c00SJeff Kirsher 
655f7917c00SJeff Kirsher 	q->adap = NULL;
656f7917c00SJeff Kirsher 	memset(&q->rspq, 0, sizeof(q->rspq));
657f7917c00SJeff Kirsher 	memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
658f7917c00SJeff Kirsher 	memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
659f7917c00SJeff Kirsher 	q->txq_stopped = 0;
660f7917c00SJeff Kirsher 	q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
661f7917c00SJeff Kirsher 	q->rx_reclaim_timer.function = NULL;
662f7917c00SJeff Kirsher 	q->nomem = 0;
663f7917c00SJeff Kirsher 	napi_free_frags(&q->napi);
664f7917c00SJeff Kirsher }
665f7917c00SJeff Kirsher 
666f7917c00SJeff Kirsher 
667f7917c00SJeff Kirsher /**
668f7917c00SJeff Kirsher  *	free_qset - free the resources of an SGE queue set
669f7917c00SJeff Kirsher  *	@adapter: the adapter owning the queue set
670f7917c00SJeff Kirsher  *	@q: the queue set
671f7917c00SJeff Kirsher  *
672f7917c00SJeff Kirsher  *	Release the HW and SW resources associated with an SGE queue set, such
673f7917c00SJeff Kirsher  *	as HW contexts, packet buffers, and descriptor rings.  Traffic to the
674f7917c00SJeff Kirsher  *	queue set must be quiesced prior to calling this.
675f7917c00SJeff Kirsher  */
676f7917c00SJeff Kirsher static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
677f7917c00SJeff Kirsher {
678f7917c00SJeff Kirsher 	int i;
679f7917c00SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
680f7917c00SJeff Kirsher 
681f7917c00SJeff Kirsher 	for (i = 0; i < SGE_RXQ_PER_SET; ++i)
682f7917c00SJeff Kirsher 		if (q->fl[i].desc) {
683f7917c00SJeff Kirsher 			spin_lock_irq(&adapter->sge.reg_lock);
684f7917c00SJeff Kirsher 			t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
685f7917c00SJeff Kirsher 			spin_unlock_irq(&adapter->sge.reg_lock);
686f7917c00SJeff Kirsher 			free_rx_bufs(pdev, &q->fl[i]);
687f7917c00SJeff Kirsher 			kfree(q->fl[i].sdesc);
688f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev,
689f7917c00SJeff Kirsher 					  q->fl[i].size *
690f7917c00SJeff Kirsher 					  sizeof(struct rx_desc), q->fl[i].desc,
691f7917c00SJeff Kirsher 					  q->fl[i].phys_addr);
692f7917c00SJeff Kirsher 		}
693f7917c00SJeff Kirsher 
694f7917c00SJeff Kirsher 	for (i = 0; i < SGE_TXQ_PER_SET; ++i)
695f7917c00SJeff Kirsher 		if (q->txq[i].desc) {
696f7917c00SJeff Kirsher 			spin_lock_irq(&adapter->sge.reg_lock);
697f7917c00SJeff Kirsher 			t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
698f7917c00SJeff Kirsher 			spin_unlock_irq(&adapter->sge.reg_lock);
699f7917c00SJeff Kirsher 			if (q->txq[i].sdesc) {
700f7917c00SJeff Kirsher 				free_tx_desc(adapter, &q->txq[i],
701f7917c00SJeff Kirsher 					     q->txq[i].in_use);
702f7917c00SJeff Kirsher 				kfree(q->txq[i].sdesc);
703f7917c00SJeff Kirsher 			}
704f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev,
705f7917c00SJeff Kirsher 					  q->txq[i].size *
706f7917c00SJeff Kirsher 					  sizeof(struct tx_desc),
707f7917c00SJeff Kirsher 					  q->txq[i].desc, q->txq[i].phys_addr);
708f7917c00SJeff Kirsher 			__skb_queue_purge(&q->txq[i].sendq);
709f7917c00SJeff Kirsher 		}
710f7917c00SJeff Kirsher 
711f7917c00SJeff Kirsher 	if (q->rspq.desc) {
712f7917c00SJeff Kirsher 		spin_lock_irq(&adapter->sge.reg_lock);
713f7917c00SJeff Kirsher 		t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
714f7917c00SJeff Kirsher 		spin_unlock_irq(&adapter->sge.reg_lock);
715f7917c00SJeff Kirsher 		dma_free_coherent(&pdev->dev,
716f7917c00SJeff Kirsher 				  q->rspq.size * sizeof(struct rsp_desc),
717f7917c00SJeff Kirsher 				  q->rspq.desc, q->rspq.phys_addr);
718f7917c00SJeff Kirsher 	}
719f7917c00SJeff Kirsher 
720f7917c00SJeff Kirsher 	t3_reset_qset(q);
721f7917c00SJeff Kirsher }
722f7917c00SJeff Kirsher 
723f7917c00SJeff Kirsher /**
724f7917c00SJeff Kirsher  *	init_qset_cntxt - initialize an SGE queue set context info
725f7917c00SJeff Kirsher  *	@qs: the queue set
726f7917c00SJeff Kirsher  *	@id: the queue set id
727f7917c00SJeff Kirsher  *
728f7917c00SJeff Kirsher  *	Initializes the TIDs and context ids for the queues of a queue set.
729f7917c00SJeff Kirsher  */
730f7917c00SJeff Kirsher static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
731f7917c00SJeff Kirsher {
732f7917c00SJeff Kirsher 	qs->rspq.cntxt_id = id;
733f7917c00SJeff Kirsher 	qs->fl[0].cntxt_id = 2 * id;
734f7917c00SJeff Kirsher 	qs->fl[1].cntxt_id = 2 * id + 1;
735f7917c00SJeff Kirsher 	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
736f7917c00SJeff Kirsher 	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
737f7917c00SJeff Kirsher 	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
738f7917c00SJeff Kirsher 	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
739f7917c00SJeff Kirsher 	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
740f7917c00SJeff Kirsher }
741f7917c00SJeff Kirsher 
742f7917c00SJeff Kirsher /**
743f7917c00SJeff Kirsher  *	sgl_len - calculates the size of an SGL of the given capacity
744f7917c00SJeff Kirsher  *	@n: the number of SGL entries
745f7917c00SJeff Kirsher  *
746f7917c00SJeff Kirsher  *	Calculates the number of flits needed for a scatter/gather list that
747f7917c00SJeff Kirsher  *	can hold the given number of entries.
748f7917c00SJeff Kirsher  */
749f7917c00SJeff Kirsher static inline unsigned int sgl_len(unsigned int n)
750f7917c00SJeff Kirsher {
751f7917c00SJeff Kirsher 	/* alternatively: 3 * (n / 2) + 2 * (n & 1) */
752f7917c00SJeff Kirsher 	return (3 * n) / 2 + (n & 1);
753f7917c00SJeff Kirsher }
754f7917c00SJeff Kirsher 
755f7917c00SJeff Kirsher /**
756f7917c00SJeff Kirsher  *	flits_to_desc - returns the num of Tx descriptors for the given flits
757f7917c00SJeff Kirsher  *	@n: the number of flits
758f7917c00SJeff Kirsher  *
759f7917c00SJeff Kirsher  *	Calculates the number of Tx descriptors needed for the supplied number
760f7917c00SJeff Kirsher  *	of flits.
761f7917c00SJeff Kirsher  */
762f7917c00SJeff Kirsher static inline unsigned int flits_to_desc(unsigned int n)
763f7917c00SJeff Kirsher {
764f7917c00SJeff Kirsher 	BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
765f7917c00SJeff Kirsher 	return flit_desc_map[n];
766f7917c00SJeff Kirsher }
767f7917c00SJeff Kirsher 
768f7917c00SJeff Kirsher /**
769f7917c00SJeff Kirsher  *	get_packet - return the next ingress packet buffer from a free list
770f7917c00SJeff Kirsher  *	@adap: the adapter that received the packet
771f7917c00SJeff Kirsher  *	@fl: the SGE free list holding the packet
772f7917c00SJeff Kirsher  *	@len: the packet length including any SGE padding
773f7917c00SJeff Kirsher  *	@drop_thres: # of remaining buffers before we start dropping packets
774f7917c00SJeff Kirsher  *
775f7917c00SJeff Kirsher  *	Get the next packet from a free list and complete setup of the
776f7917c00SJeff Kirsher  *	sk_buff.  If the packet is small we make a copy and recycle the
777f7917c00SJeff Kirsher  *	original buffer, otherwise we use the original buffer itself.  If a
778f7917c00SJeff Kirsher  *	positive drop threshold is supplied packets are dropped and their
779f7917c00SJeff Kirsher  *	buffers recycled if (a) the number of remaining buffers is under the
780f7917c00SJeff Kirsher  *	threshold and the packet is too big to copy, or (b) the packet should
781f7917c00SJeff Kirsher  *	be copied but there is no memory for the copy.
782f7917c00SJeff Kirsher  */
783f7917c00SJeff Kirsher static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
784f7917c00SJeff Kirsher 				  unsigned int len, unsigned int drop_thres)
785f7917c00SJeff Kirsher {
786f7917c00SJeff Kirsher 	struct sk_buff *skb = NULL;
787f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
788f7917c00SJeff Kirsher 
789f7917c00SJeff Kirsher 	prefetch(sd->skb->data);
790f7917c00SJeff Kirsher 	fl->credits--;
791f7917c00SJeff Kirsher 
792f7917c00SJeff Kirsher 	if (len <= SGE_RX_COPY_THRES) {
793f7917c00SJeff Kirsher 		skb = alloc_skb(len, GFP_ATOMIC);
794f7917c00SJeff Kirsher 		if (likely(skb != NULL)) {
795f7917c00SJeff Kirsher 			__skb_put(skb, len);
796f7917c00SJeff Kirsher 			pci_dma_sync_single_for_cpu(adap->pdev,
797f7917c00SJeff Kirsher 					    dma_unmap_addr(sd, dma_addr), len,
798f7917c00SJeff Kirsher 					    PCI_DMA_FROMDEVICE);
799f7917c00SJeff Kirsher 			memcpy(skb->data, sd->skb->data, len);
800f7917c00SJeff Kirsher 			pci_dma_sync_single_for_device(adap->pdev,
801f7917c00SJeff Kirsher 					    dma_unmap_addr(sd, dma_addr), len,
802f7917c00SJeff Kirsher 					    PCI_DMA_FROMDEVICE);
803f7917c00SJeff Kirsher 		} else if (!drop_thres)
804f7917c00SJeff Kirsher 			goto use_orig_buf;
805f7917c00SJeff Kirsher recycle:
806f7917c00SJeff Kirsher 		recycle_rx_buf(adap, fl, fl->cidx);
807f7917c00SJeff Kirsher 		return skb;
808f7917c00SJeff Kirsher 	}
809f7917c00SJeff Kirsher 
810f7917c00SJeff Kirsher 	if (unlikely(fl->credits < drop_thres) &&
811f7917c00SJeff Kirsher 	    refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
812f7917c00SJeff Kirsher 		      GFP_ATOMIC | __GFP_COMP) == 0)
813f7917c00SJeff Kirsher 		goto recycle;
814f7917c00SJeff Kirsher 
815f7917c00SJeff Kirsher use_orig_buf:
816f7917c00SJeff Kirsher 	pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
817f7917c00SJeff Kirsher 			 fl->buf_size, PCI_DMA_FROMDEVICE);
818f7917c00SJeff Kirsher 	skb = sd->skb;
819f7917c00SJeff Kirsher 	skb_put(skb, len);
820f7917c00SJeff Kirsher 	__refill_fl(adap, fl);
821f7917c00SJeff Kirsher 	return skb;
822f7917c00SJeff Kirsher }
823f7917c00SJeff Kirsher 
824f7917c00SJeff Kirsher /**
825f7917c00SJeff Kirsher  *	get_packet_pg - return the next ingress packet buffer from a free list
826f7917c00SJeff Kirsher  *	@adap: the adapter that received the packet
827f7917c00SJeff Kirsher  *	@fl: the SGE free list holding the packet
828f7917c00SJeff Kirsher  *	@len: the packet length including any SGE padding
829f7917c00SJeff Kirsher  *	@drop_thres: # of remaining buffers before we start dropping packets
830f7917c00SJeff Kirsher  *
831f7917c00SJeff Kirsher  *	Get the next packet from a free list populated with page chunks.
832f7917c00SJeff Kirsher  *	If the packet is small we make a copy and recycle the original buffer,
833f7917c00SJeff Kirsher  *	otherwise we attach the original buffer as a page fragment to a fresh
834f7917c00SJeff Kirsher  *	sk_buff.  If a positive drop threshold is supplied packets are dropped
835f7917c00SJeff Kirsher  *	and their buffers recycled if (a) the number of remaining buffers is
836f7917c00SJeff Kirsher  *	under the threshold and the packet is too big to copy, or (b) there's
837f7917c00SJeff Kirsher  *	no system memory.
838f7917c00SJeff Kirsher  *
839f7917c00SJeff Kirsher  * 	Note: this function is similar to @get_packet but deals with Rx buffers
840f7917c00SJeff Kirsher  * 	that are page chunks rather than sk_buffs.
841f7917c00SJeff Kirsher  */
842f7917c00SJeff Kirsher static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
843f7917c00SJeff Kirsher 				     struct sge_rspq *q, unsigned int len,
844f7917c00SJeff Kirsher 				     unsigned int drop_thres)
845f7917c00SJeff Kirsher {
846f7917c00SJeff Kirsher 	struct sk_buff *newskb, *skb;
847f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
848f7917c00SJeff Kirsher 
849f7917c00SJeff Kirsher 	dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
850f7917c00SJeff Kirsher 
851f7917c00SJeff Kirsher 	newskb = skb = q->pg_skb;
852f7917c00SJeff Kirsher 	if (!skb && (len <= SGE_RX_COPY_THRES)) {
853f7917c00SJeff Kirsher 		newskb = alloc_skb(len, GFP_ATOMIC);
854f7917c00SJeff Kirsher 		if (likely(newskb != NULL)) {
855f7917c00SJeff Kirsher 			__skb_put(newskb, len);
856f7917c00SJeff Kirsher 			pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
857f7917c00SJeff Kirsher 					    PCI_DMA_FROMDEVICE);
858f7917c00SJeff Kirsher 			memcpy(newskb->data, sd->pg_chunk.va, len);
859f7917c00SJeff Kirsher 			pci_dma_sync_single_for_device(adap->pdev, dma_addr,
860f7917c00SJeff Kirsher 						       len,
861f7917c00SJeff Kirsher 						       PCI_DMA_FROMDEVICE);
862f7917c00SJeff Kirsher 		} else if (!drop_thres)
863f7917c00SJeff Kirsher 			return NULL;
864f7917c00SJeff Kirsher recycle:
865f7917c00SJeff Kirsher 		fl->credits--;
866f7917c00SJeff Kirsher 		recycle_rx_buf(adap, fl, fl->cidx);
867f7917c00SJeff Kirsher 		q->rx_recycle_buf++;
868f7917c00SJeff Kirsher 		return newskb;
869f7917c00SJeff Kirsher 	}
870f7917c00SJeff Kirsher 
871f7917c00SJeff Kirsher 	if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
872f7917c00SJeff Kirsher 		goto recycle;
873f7917c00SJeff Kirsher 
874f7917c00SJeff Kirsher 	prefetch(sd->pg_chunk.p_cnt);
875f7917c00SJeff Kirsher 
876f7917c00SJeff Kirsher 	if (!skb)
877f7917c00SJeff Kirsher 		newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
878f7917c00SJeff Kirsher 
879f7917c00SJeff Kirsher 	if (unlikely(!newskb)) {
880f7917c00SJeff Kirsher 		if (!drop_thres)
881f7917c00SJeff Kirsher 			return NULL;
882f7917c00SJeff Kirsher 		goto recycle;
883f7917c00SJeff Kirsher 	}
884f7917c00SJeff Kirsher 
885f7917c00SJeff Kirsher 	pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
886f7917c00SJeff Kirsher 				    PCI_DMA_FROMDEVICE);
887f7917c00SJeff Kirsher 	(*sd->pg_chunk.p_cnt)--;
888f7917c00SJeff Kirsher 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
889f7917c00SJeff Kirsher 		pci_unmap_page(adap->pdev,
890f7917c00SJeff Kirsher 			       sd->pg_chunk.mapping,
891f7917c00SJeff Kirsher 			       fl->alloc_size,
892f7917c00SJeff Kirsher 			       PCI_DMA_FROMDEVICE);
893f7917c00SJeff Kirsher 	if (!skb) {
894f7917c00SJeff Kirsher 		__skb_put(newskb, SGE_RX_PULL_LEN);
895f7917c00SJeff Kirsher 		memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
896f7917c00SJeff Kirsher 		skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
897f7917c00SJeff Kirsher 				   sd->pg_chunk.offset + SGE_RX_PULL_LEN,
898f7917c00SJeff Kirsher 				   len - SGE_RX_PULL_LEN);
899f7917c00SJeff Kirsher 		newskb->len = len;
900f7917c00SJeff Kirsher 		newskb->data_len = len - SGE_RX_PULL_LEN;
901f7917c00SJeff Kirsher 		newskb->truesize += newskb->data_len;
902f7917c00SJeff Kirsher 	} else {
903f7917c00SJeff Kirsher 		skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
904f7917c00SJeff Kirsher 				   sd->pg_chunk.page,
905f7917c00SJeff Kirsher 				   sd->pg_chunk.offset, len);
906f7917c00SJeff Kirsher 		newskb->len += len;
907f7917c00SJeff Kirsher 		newskb->data_len += len;
908f7917c00SJeff Kirsher 		newskb->truesize += len;
909f7917c00SJeff Kirsher 	}
910f7917c00SJeff Kirsher 
911f7917c00SJeff Kirsher 	fl->credits--;
912f7917c00SJeff Kirsher 	/*
913f7917c00SJeff Kirsher 	 * We do not refill FLs here, we let the caller do it to overlap a
914f7917c00SJeff Kirsher 	 * prefetch.
915f7917c00SJeff Kirsher 	 */
916f7917c00SJeff Kirsher 	return newskb;
917f7917c00SJeff Kirsher }
918f7917c00SJeff Kirsher 
919f7917c00SJeff Kirsher /**
920f7917c00SJeff Kirsher  *	get_imm_packet - return the next ingress packet buffer from a response
921f7917c00SJeff Kirsher  *	@resp: the response descriptor containing the packet data
922f7917c00SJeff Kirsher  *
923f7917c00SJeff Kirsher  *	Return a packet containing the immediate data of the given response.
924f7917c00SJeff Kirsher  */
925f7917c00SJeff Kirsher static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
926f7917c00SJeff Kirsher {
927f7917c00SJeff Kirsher 	struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
928f7917c00SJeff Kirsher 
929f7917c00SJeff Kirsher 	if (skb) {
930f7917c00SJeff Kirsher 		__skb_put(skb, IMMED_PKT_SIZE);
931f7917c00SJeff Kirsher 		skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
932f7917c00SJeff Kirsher 	}
933f7917c00SJeff Kirsher 	return skb;
934f7917c00SJeff Kirsher }
935f7917c00SJeff Kirsher 
936f7917c00SJeff Kirsher /**
937f7917c00SJeff Kirsher  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
938f7917c00SJeff Kirsher  *	@skb: the packet
939f7917c00SJeff Kirsher  *
940f7917c00SJeff Kirsher  * 	Returns the number of Tx descriptors needed for the given Ethernet
941f7917c00SJeff Kirsher  * 	packet.  Ethernet packets require addition of WR and CPL headers.
942f7917c00SJeff Kirsher  */
943f7917c00SJeff Kirsher static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
944f7917c00SJeff Kirsher {
945f7917c00SJeff Kirsher 	unsigned int flits;
946f7917c00SJeff Kirsher 
947f7917c00SJeff Kirsher 	if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
948f7917c00SJeff Kirsher 		return 1;
949f7917c00SJeff Kirsher 
950f7917c00SJeff Kirsher 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
951f7917c00SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
952f7917c00SJeff Kirsher 		flits++;
953f7917c00SJeff Kirsher 	return flits_to_desc(flits);
954f7917c00SJeff Kirsher }
955f7917c00SJeff Kirsher 
956c69fe407SArjun Vynipadath /*	map_skb - map a packet main body and its page fragments
957c69fe407SArjun Vynipadath  *	@pdev: the PCI device
958c69fe407SArjun Vynipadath  *	@skb: the packet
959c69fe407SArjun Vynipadath  *	@addr: placeholder to save the mapped addresses
960c69fe407SArjun Vynipadath  *
961c69fe407SArjun Vynipadath  *	map the main body of an sk_buff and its page fragments, if any.
962c69fe407SArjun Vynipadath  */
963c69fe407SArjun Vynipadath static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
964c69fe407SArjun Vynipadath 		   dma_addr_t *addr)
965c69fe407SArjun Vynipadath {
966c69fe407SArjun Vynipadath 	const skb_frag_t *fp, *end;
967c69fe407SArjun Vynipadath 	const struct skb_shared_info *si;
968c69fe407SArjun Vynipadath 
969c69fe407SArjun Vynipadath 	if (skb_headlen(skb)) {
970c69fe407SArjun Vynipadath 		*addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
971c69fe407SArjun Vynipadath 				       PCI_DMA_TODEVICE);
972c69fe407SArjun Vynipadath 		if (pci_dma_mapping_error(pdev, *addr))
973c69fe407SArjun Vynipadath 			goto out_err;
974c69fe407SArjun Vynipadath 		addr++;
975c69fe407SArjun Vynipadath 	}
976c69fe407SArjun Vynipadath 
977c69fe407SArjun Vynipadath 	si = skb_shinfo(skb);
978c69fe407SArjun Vynipadath 	end = &si->frags[si->nr_frags];
979c69fe407SArjun Vynipadath 
980c69fe407SArjun Vynipadath 	for (fp = si->frags; fp < end; fp++) {
981c69fe407SArjun Vynipadath 		*addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
982c69fe407SArjun Vynipadath 					 DMA_TO_DEVICE);
983c69fe407SArjun Vynipadath 		if (pci_dma_mapping_error(pdev, *addr))
984c69fe407SArjun Vynipadath 			goto unwind;
985c69fe407SArjun Vynipadath 		addr++;
986c69fe407SArjun Vynipadath 	}
987c69fe407SArjun Vynipadath 	return 0;
988c69fe407SArjun Vynipadath 
989c69fe407SArjun Vynipadath unwind:
990c69fe407SArjun Vynipadath 	while (fp-- > si->frags)
991c69fe407SArjun Vynipadath 		dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
992c69fe407SArjun Vynipadath 			       DMA_TO_DEVICE);
993c69fe407SArjun Vynipadath 
994c69fe407SArjun Vynipadath 	pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
995c69fe407SArjun Vynipadath out_err:
996c69fe407SArjun Vynipadath 	return -ENOMEM;
997c69fe407SArjun Vynipadath }
998c69fe407SArjun Vynipadath 
999f7917c00SJeff Kirsher /**
1000c69fe407SArjun Vynipadath  *	write_sgl - populate a scatter/gather list for a packet
1001f7917c00SJeff Kirsher  *	@skb: the packet
1002f7917c00SJeff Kirsher  *	@sgp: the SGL to populate
1003f7917c00SJeff Kirsher  *	@start: start address of skb main body data to include in the SGL
1004f7917c00SJeff Kirsher  *	@len: length of skb main body data to include in the SGL
1005c69fe407SArjun Vynipadath  *	@addr: the list of the mapped addresses
1006f7917c00SJeff Kirsher  *
1007c69fe407SArjun Vynipadath  *	Copies the scatter/gather list for the buffers that make up a packet
1008f7917c00SJeff Kirsher  *	and returns the SGL size in 8-byte words.  The caller must size the SGL
1009f7917c00SJeff Kirsher  *	appropriately.
1010f7917c00SJeff Kirsher  */
1011c69fe407SArjun Vynipadath static inline unsigned int write_sgl(const struct sk_buff *skb,
1012f7917c00SJeff Kirsher 				     struct sg_ent *sgp, unsigned char *start,
1013c69fe407SArjun Vynipadath 				     unsigned int len, const dma_addr_t *addr)
1014f7917c00SJeff Kirsher {
1015c69fe407SArjun Vynipadath 	unsigned int i, j = 0, k = 0, nfrags;
1016f7917c00SJeff Kirsher 
1017f7917c00SJeff Kirsher 	if (len) {
1018f7917c00SJeff Kirsher 		sgp->len[0] = cpu_to_be32(len);
1019c69fe407SArjun Vynipadath 		sgp->addr[j++] = cpu_to_be64(addr[k++]);
1020f7917c00SJeff Kirsher 	}
1021f7917c00SJeff Kirsher 
1022f7917c00SJeff Kirsher 	nfrags = skb_shinfo(skb)->nr_frags;
1023f7917c00SJeff Kirsher 	for (i = 0; i < nfrags; i++) {
10249e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1025f7917c00SJeff Kirsher 
10269e903e08SEric Dumazet 		sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
1027c69fe407SArjun Vynipadath 		sgp->addr[j] = cpu_to_be64(addr[k++]);
1028f7917c00SJeff Kirsher 		j ^= 1;
1029f7917c00SJeff Kirsher 		if (j == 0)
1030f7917c00SJeff Kirsher 			++sgp;
1031f7917c00SJeff Kirsher 	}
1032f7917c00SJeff Kirsher 	if (j)
1033f7917c00SJeff Kirsher 		sgp->len[j] = 0;
1034f7917c00SJeff Kirsher 	return ((nfrags + (len != 0)) * 3) / 2 + j;
1035f7917c00SJeff Kirsher }
1036f7917c00SJeff Kirsher 
1037f7917c00SJeff Kirsher /**
1038f7917c00SJeff Kirsher  *	check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1039f7917c00SJeff Kirsher  *	@adap: the adapter
1040f7917c00SJeff Kirsher  *	@q: the Tx queue
1041f7917c00SJeff Kirsher  *
1042f7917c00SJeff Kirsher  *	Ring the doorbel if a Tx queue is asleep.  There is a natural race,
1043f7917c00SJeff Kirsher  *	where the HW is going to sleep just after we checked, however,
1044f7917c00SJeff Kirsher  *	then the interrupt handler will detect the outstanding TX packet
1045f7917c00SJeff Kirsher  *	and ring the doorbell for us.
1046f7917c00SJeff Kirsher  *
1047f7917c00SJeff Kirsher  *	When GTS is disabled we unconditionally ring the doorbell.
1048f7917c00SJeff Kirsher  */
1049f7917c00SJeff Kirsher static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1050f7917c00SJeff Kirsher {
1051f7917c00SJeff Kirsher #if USE_GTS
1052f7917c00SJeff Kirsher 	clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1053f7917c00SJeff Kirsher 	if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1054f7917c00SJeff Kirsher 		set_bit(TXQ_LAST_PKT_DB, &q->flags);
1055f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL,
1056f7917c00SJeff Kirsher 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1057f7917c00SJeff Kirsher 	}
1058f7917c00SJeff Kirsher #else
1059f7917c00SJeff Kirsher 	wmb();			/* write descriptors before telling HW */
1060f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_KDOORBELL,
1061f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1062f7917c00SJeff Kirsher #endif
1063f7917c00SJeff Kirsher }
1064f7917c00SJeff Kirsher 
1065f7917c00SJeff Kirsher static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1066f7917c00SJeff Kirsher {
1067f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 2
1068f7917c00SJeff Kirsher 	d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1069f7917c00SJeff Kirsher #endif
1070f7917c00SJeff Kirsher }
1071f7917c00SJeff Kirsher 
1072f7917c00SJeff Kirsher /**
1073f7917c00SJeff Kirsher  *	write_wr_hdr_sgl - write a WR header and, optionally, SGL
1074f7917c00SJeff Kirsher  *	@ndesc: number of Tx descriptors spanned by the SGL
1075f7917c00SJeff Kirsher  *	@skb: the packet corresponding to the WR
1076f7917c00SJeff Kirsher  *	@d: first Tx descriptor to be written
1077f7917c00SJeff Kirsher  *	@pidx: index of above descriptors
1078f7917c00SJeff Kirsher  *	@q: the SGE Tx queue
1079f7917c00SJeff Kirsher  *	@sgl: the SGL
1080f7917c00SJeff Kirsher  *	@flits: number of flits to the start of the SGL in the first descriptor
1081f7917c00SJeff Kirsher  *	@sgl_flits: the SGL size in flits
1082f7917c00SJeff Kirsher  *	@gen: the Tx descriptor generation
1083f7917c00SJeff Kirsher  *	@wr_hi: top 32 bits of WR header based on WR type (big endian)
1084f7917c00SJeff Kirsher  *	@wr_lo: low 32 bits of WR header based on WR type (big endian)
1085f7917c00SJeff Kirsher  *
1086f7917c00SJeff Kirsher  *	Write a work request header and an associated SGL.  If the SGL is
1087f7917c00SJeff Kirsher  *	small enough to fit into one Tx descriptor it has already been written
1088f7917c00SJeff Kirsher  *	and we just need to write the WR header.  Otherwise we distribute the
1089f7917c00SJeff Kirsher  *	SGL across the number of descriptors it spans.
1090f7917c00SJeff Kirsher  */
1091f7917c00SJeff Kirsher static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1092f7917c00SJeff Kirsher 			     struct tx_desc *d, unsigned int pidx,
1093f7917c00SJeff Kirsher 			     const struct sge_txq *q,
1094f7917c00SJeff Kirsher 			     const struct sg_ent *sgl,
1095f7917c00SJeff Kirsher 			     unsigned int flits, unsigned int sgl_flits,
1096f7917c00SJeff Kirsher 			     unsigned int gen, __be32 wr_hi,
1097f7917c00SJeff Kirsher 			     __be32 wr_lo)
1098f7917c00SJeff Kirsher {
1099f7917c00SJeff Kirsher 	struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1100f7917c00SJeff Kirsher 	struct tx_sw_desc *sd = &q->sdesc[pidx];
1101f7917c00SJeff Kirsher 
1102f7917c00SJeff Kirsher 	sd->skb = skb;
1103f7917c00SJeff Kirsher 	if (need_skb_unmap()) {
1104f7917c00SJeff Kirsher 		sd->fragidx = 0;
1105f7917c00SJeff Kirsher 		sd->addr_idx = 0;
1106f7917c00SJeff Kirsher 		sd->sflit = flits;
1107f7917c00SJeff Kirsher 	}
1108f7917c00SJeff Kirsher 
1109f7917c00SJeff Kirsher 	if (likely(ndesc == 1)) {
1110f7917c00SJeff Kirsher 		sd->eop = 1;
1111f7917c00SJeff Kirsher 		wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1112f7917c00SJeff Kirsher 				   V_WR_SGLSFLT(flits)) | wr_hi;
1113019be1cfSAlexander Duyck 		dma_wmb();
1114f7917c00SJeff Kirsher 		wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1115f7917c00SJeff Kirsher 				   V_WR_GEN(gen)) | wr_lo;
1116f7917c00SJeff Kirsher 		wr_gen2(d, gen);
1117f7917c00SJeff Kirsher 	} else {
1118f7917c00SJeff Kirsher 		unsigned int ogen = gen;
1119f7917c00SJeff Kirsher 		const u64 *fp = (const u64 *)sgl;
1120f7917c00SJeff Kirsher 		struct work_request_hdr *wp = wrp;
1121f7917c00SJeff Kirsher 
1122f7917c00SJeff Kirsher 		wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1123f7917c00SJeff Kirsher 				   V_WR_SGLSFLT(flits)) | wr_hi;
1124f7917c00SJeff Kirsher 
1125f7917c00SJeff Kirsher 		while (sgl_flits) {
1126f7917c00SJeff Kirsher 			unsigned int avail = WR_FLITS - flits;
1127f7917c00SJeff Kirsher 
1128f7917c00SJeff Kirsher 			if (avail > sgl_flits)
1129f7917c00SJeff Kirsher 				avail = sgl_flits;
1130f7917c00SJeff Kirsher 			memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1131f7917c00SJeff Kirsher 			sgl_flits -= avail;
1132f7917c00SJeff Kirsher 			ndesc--;
1133f7917c00SJeff Kirsher 			if (!sgl_flits)
1134f7917c00SJeff Kirsher 				break;
1135f7917c00SJeff Kirsher 
1136f7917c00SJeff Kirsher 			fp += avail;
1137f7917c00SJeff Kirsher 			d++;
1138f7917c00SJeff Kirsher 			sd->eop = 0;
1139f7917c00SJeff Kirsher 			sd++;
1140f7917c00SJeff Kirsher 			if (++pidx == q->size) {
1141f7917c00SJeff Kirsher 				pidx = 0;
1142f7917c00SJeff Kirsher 				gen ^= 1;
1143f7917c00SJeff Kirsher 				d = q->desc;
1144f7917c00SJeff Kirsher 				sd = q->sdesc;
1145f7917c00SJeff Kirsher 			}
1146f7917c00SJeff Kirsher 
1147f7917c00SJeff Kirsher 			sd->skb = skb;
1148f7917c00SJeff Kirsher 			wrp = (struct work_request_hdr *)d;
1149f7917c00SJeff Kirsher 			wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1150f7917c00SJeff Kirsher 					   V_WR_SGLSFLT(1)) | wr_hi;
1151f7917c00SJeff Kirsher 			wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1152f7917c00SJeff Kirsher 							sgl_flits + 1)) |
1153f7917c00SJeff Kirsher 					   V_WR_GEN(gen)) | wr_lo;
1154f7917c00SJeff Kirsher 			wr_gen2(d, gen);
1155f7917c00SJeff Kirsher 			flits = 1;
1156f7917c00SJeff Kirsher 		}
1157f7917c00SJeff Kirsher 		sd->eop = 1;
1158f7917c00SJeff Kirsher 		wrp->wr_hi |= htonl(F_WR_EOP);
1159019be1cfSAlexander Duyck 		dma_wmb();
1160f7917c00SJeff Kirsher 		wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1161f7917c00SJeff Kirsher 		wr_gen2((struct tx_desc *)wp, ogen);
1162f7917c00SJeff Kirsher 		WARN_ON(ndesc != 0);
1163f7917c00SJeff Kirsher 	}
1164f7917c00SJeff Kirsher }
1165f7917c00SJeff Kirsher 
1166f7917c00SJeff Kirsher /**
1167f7917c00SJeff Kirsher  *	write_tx_pkt_wr - write a TX_PKT work request
1168f7917c00SJeff Kirsher  *	@adap: the adapter
1169f7917c00SJeff Kirsher  *	@skb: the packet to send
1170f7917c00SJeff Kirsher  *	@pi: the egress interface
1171f7917c00SJeff Kirsher  *	@pidx: index of the first Tx descriptor to write
1172f7917c00SJeff Kirsher  *	@gen: the generation value to use
1173f7917c00SJeff Kirsher  *	@q: the Tx queue
1174f7917c00SJeff Kirsher  *	@ndesc: number of descriptors the packet will occupy
1175f7917c00SJeff Kirsher  *	@compl: the value of the COMPL bit to use
1176f7917c00SJeff Kirsher  *
1177f7917c00SJeff Kirsher  *	Generate a TX_PKT work request to send the supplied packet.
1178f7917c00SJeff Kirsher  */
1179f7917c00SJeff Kirsher static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1180f7917c00SJeff Kirsher 			    const struct port_info *pi,
1181f7917c00SJeff Kirsher 			    unsigned int pidx, unsigned int gen,
1182f7917c00SJeff Kirsher 			    struct sge_txq *q, unsigned int ndesc,
1183c69fe407SArjun Vynipadath 			    unsigned int compl, const dma_addr_t *addr)
1184f7917c00SJeff Kirsher {
1185f7917c00SJeff Kirsher 	unsigned int flits, sgl_flits, cntrl, tso_info;
1186f7917c00SJeff Kirsher 	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1187f7917c00SJeff Kirsher 	struct tx_desc *d = &q->desc[pidx];
1188f7917c00SJeff Kirsher 	struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1189f7917c00SJeff Kirsher 
1190f7917c00SJeff Kirsher 	cpl->len = htonl(skb->len);
1191f7917c00SJeff Kirsher 	cntrl = V_TXPKT_INTF(pi->port_id);
1192f7917c00SJeff Kirsher 
1193df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
1194df8a39deSJiri Pirko 		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
1195f7917c00SJeff Kirsher 
1196f7917c00SJeff Kirsher 	tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1197f7917c00SJeff Kirsher 	if (tso_info) {
1198f7917c00SJeff Kirsher 		int eth_type;
1199f7917c00SJeff Kirsher 		struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1200f7917c00SJeff Kirsher 
1201f7917c00SJeff Kirsher 		d->flit[2] = 0;
1202f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1203f7917c00SJeff Kirsher 		hdr->cntrl = htonl(cntrl);
1204f7917c00SJeff Kirsher 		eth_type = skb_network_offset(skb) == ETH_HLEN ?
1205f7917c00SJeff Kirsher 		    CPL_ETH_II : CPL_ETH_II_VLAN;
1206f7917c00SJeff Kirsher 		tso_info |= V_LSO_ETH_TYPE(eth_type) |
1207f7917c00SJeff Kirsher 		    V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1208f7917c00SJeff Kirsher 		    V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1209f7917c00SJeff Kirsher 		hdr->lso_info = htonl(tso_info);
1210f7917c00SJeff Kirsher 		flits = 3;
1211f7917c00SJeff Kirsher 	} else {
1212f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1213f7917c00SJeff Kirsher 		cntrl |= F_TXPKT_IPCSUM_DIS;	/* SW calculates IP csum */
1214f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1215f7917c00SJeff Kirsher 		cpl->cntrl = htonl(cntrl);
1216f7917c00SJeff Kirsher 
1217f7917c00SJeff Kirsher 		if (skb->len <= WR_LEN - sizeof(*cpl)) {
1218f7917c00SJeff Kirsher 			q->sdesc[pidx].skb = NULL;
1219f7917c00SJeff Kirsher 			if (!skb->data_len)
1220f7917c00SJeff Kirsher 				skb_copy_from_linear_data(skb, &d->flit[2],
1221f7917c00SJeff Kirsher 							  skb->len);
1222f7917c00SJeff Kirsher 			else
1223f7917c00SJeff Kirsher 				skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1224f7917c00SJeff Kirsher 
1225f7917c00SJeff Kirsher 			flits = (skb->len + 7) / 8 + 2;
1226f7917c00SJeff Kirsher 			cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1227f7917c00SJeff Kirsher 					      V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1228f7917c00SJeff Kirsher 					      | F_WR_SOP | F_WR_EOP | compl);
1229019be1cfSAlexander Duyck 			dma_wmb();
1230f7917c00SJeff Kirsher 			cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1231f7917c00SJeff Kirsher 					      V_WR_TID(q->token));
1232f7917c00SJeff Kirsher 			wr_gen2(d, gen);
1233f9ec8131SEric W. Biederman 			dev_consume_skb_any(skb);
1234f7917c00SJeff Kirsher 			return;
1235f7917c00SJeff Kirsher 		}
1236f7917c00SJeff Kirsher 
1237f7917c00SJeff Kirsher 		flits = 2;
1238f7917c00SJeff Kirsher 	}
1239f7917c00SJeff Kirsher 
1240f7917c00SJeff Kirsher 	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1241c69fe407SArjun Vynipadath 	sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
1242f7917c00SJeff Kirsher 
1243f7917c00SJeff Kirsher 	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1244f7917c00SJeff Kirsher 			 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1245f7917c00SJeff Kirsher 			 htonl(V_WR_TID(q->token)));
1246f7917c00SJeff Kirsher }
1247f7917c00SJeff Kirsher 
1248f7917c00SJeff Kirsher static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1249f7917c00SJeff Kirsher 				    struct sge_qset *qs, struct sge_txq *q)
1250f7917c00SJeff Kirsher {
1251f7917c00SJeff Kirsher 	netif_tx_stop_queue(txq);
1252f7917c00SJeff Kirsher 	set_bit(TXQ_ETH, &qs->txq_stopped);
1253f7917c00SJeff Kirsher 	q->stops++;
1254f7917c00SJeff Kirsher }
1255f7917c00SJeff Kirsher 
1256f7917c00SJeff Kirsher /**
1257f7917c00SJeff Kirsher  *	eth_xmit - add a packet to the Ethernet Tx queue
1258f7917c00SJeff Kirsher  *	@skb: the packet
1259f7917c00SJeff Kirsher  *	@dev: the egress net device
1260f7917c00SJeff Kirsher  *
1261f7917c00SJeff Kirsher  *	Add a packet to an SGE Tx queue.  Runs with softirqs disabled.
1262f7917c00SJeff Kirsher  */
1263f7917c00SJeff Kirsher netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1264f7917c00SJeff Kirsher {
1265f7917c00SJeff Kirsher 	int qidx;
1266f7917c00SJeff Kirsher 	unsigned int ndesc, pidx, credits, gen, compl;
1267f7917c00SJeff Kirsher 	const struct port_info *pi = netdev_priv(dev);
1268f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
1269f7917c00SJeff Kirsher 	struct netdev_queue *txq;
1270f7917c00SJeff Kirsher 	struct sge_qset *qs;
1271f7917c00SJeff Kirsher 	struct sge_txq *q;
1272c69fe407SArjun Vynipadath 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1273f7917c00SJeff Kirsher 
1274f7917c00SJeff Kirsher 	/*
1275f7917c00SJeff Kirsher 	 * The chip min packet length is 9 octets but play safe and reject
1276f7917c00SJeff Kirsher 	 * anything shorter than an Ethernet header.
1277f7917c00SJeff Kirsher 	 */
1278f7917c00SJeff Kirsher 	if (unlikely(skb->len < ETH_HLEN)) {
1279f9ec8131SEric W. Biederman 		dev_kfree_skb_any(skb);
1280f7917c00SJeff Kirsher 		return NETDEV_TX_OK;
1281f7917c00SJeff Kirsher 	}
1282f7917c00SJeff Kirsher 
1283f7917c00SJeff Kirsher 	qidx = skb_get_queue_mapping(skb);
1284f7917c00SJeff Kirsher 	qs = &pi->qs[qidx];
1285f7917c00SJeff Kirsher 	q = &qs->txq[TXQ_ETH];
1286f7917c00SJeff Kirsher 	txq = netdev_get_tx_queue(dev, qidx);
1287f7917c00SJeff Kirsher 
1288f7917c00SJeff Kirsher 	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1289f7917c00SJeff Kirsher 
1290f7917c00SJeff Kirsher 	credits = q->size - q->in_use;
1291f7917c00SJeff Kirsher 	ndesc = calc_tx_descs(skb);
1292f7917c00SJeff Kirsher 
1293f7917c00SJeff Kirsher 	if (unlikely(credits < ndesc)) {
1294f7917c00SJeff Kirsher 		t3_stop_tx_queue(txq, qs, q);
1295f7917c00SJeff Kirsher 		dev_err(&adap->pdev->dev,
1296f7917c00SJeff Kirsher 			"%s: Tx ring %u full while queue awake!\n",
1297f7917c00SJeff Kirsher 			dev->name, q->cntxt_id & 7);
1298f7917c00SJeff Kirsher 		return NETDEV_TX_BUSY;
1299f7917c00SJeff Kirsher 	}
1300f7917c00SJeff Kirsher 
1301c69fe407SArjun Vynipadath 	/* Check if ethernet packet can't be sent as immediate data */
1302c69fe407SArjun Vynipadath 	if (skb->len > (WR_LEN - sizeof(struct cpl_tx_pkt))) {
1303c69fe407SArjun Vynipadath 		if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1304c69fe407SArjun Vynipadath 			dev_kfree_skb(skb);
1305c69fe407SArjun Vynipadath 			return NETDEV_TX_OK;
1306c69fe407SArjun Vynipadath 		}
1307c69fe407SArjun Vynipadath 	}
1308c69fe407SArjun Vynipadath 
1309f7917c00SJeff Kirsher 	q->in_use += ndesc;
1310f7917c00SJeff Kirsher 	if (unlikely(credits - ndesc < q->stop_thres)) {
1311f7917c00SJeff Kirsher 		t3_stop_tx_queue(txq, qs, q);
1312f7917c00SJeff Kirsher 
1313f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1314f7917c00SJeff Kirsher 		    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1315f7917c00SJeff Kirsher 			q->restarts++;
1316f7917c00SJeff Kirsher 			netif_tx_start_queue(txq);
1317f7917c00SJeff Kirsher 		}
1318f7917c00SJeff Kirsher 	}
1319f7917c00SJeff Kirsher 
1320f7917c00SJeff Kirsher 	gen = q->gen;
1321f7917c00SJeff Kirsher 	q->unacked += ndesc;
1322f7917c00SJeff Kirsher 	compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1323f7917c00SJeff Kirsher 	q->unacked &= 7;
1324f7917c00SJeff Kirsher 	pidx = q->pidx;
1325f7917c00SJeff Kirsher 	q->pidx += ndesc;
1326f7917c00SJeff Kirsher 	if (q->pidx >= q->size) {
1327f7917c00SJeff Kirsher 		q->pidx -= q->size;
1328f7917c00SJeff Kirsher 		q->gen ^= 1;
1329f7917c00SJeff Kirsher 	}
1330f7917c00SJeff Kirsher 
1331f7917c00SJeff Kirsher 	/* update port statistics */
1332bc6c47b5SVipul Pandya 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1333f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1334f7917c00SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
1335f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_TSO]++;
1336df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
1337f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_VLANINS]++;
1338f7917c00SJeff Kirsher 
1339f7917c00SJeff Kirsher 	/*
1340f7917c00SJeff Kirsher 	 * We do not use Tx completion interrupts to free DMAd Tx packets.
1341f7917c00SJeff Kirsher 	 * This is good for performance but means that we rely on new Tx
1342f7917c00SJeff Kirsher 	 * packets arriving to run the destructors of completed packets,
1343f7917c00SJeff Kirsher 	 * which open up space in their sockets' send queues.  Sometimes
1344f7917c00SJeff Kirsher 	 * we do not get such new packets causing Tx to stall.  A single
1345f7917c00SJeff Kirsher 	 * UDP transmitter is a good example of this situation.  We have
1346f7917c00SJeff Kirsher 	 * a clean up timer that periodically reclaims completed packets
1347f7917c00SJeff Kirsher 	 * but it doesn't run often enough (nor do we want it to) to prevent
1348f7917c00SJeff Kirsher 	 * lengthy stalls.  A solution to this problem is to run the
1349f7917c00SJeff Kirsher 	 * destructor early, after the packet is queued but before it's DMAd.
1350f7917c00SJeff Kirsher 	 * A cons is that we lie to socket memory accounting, but the amount
1351f7917c00SJeff Kirsher 	 * of extra memory is reasonable (limited by the number of Tx
1352f7917c00SJeff Kirsher 	 * descriptors), the packets do actually get freed quickly by new
1353f7917c00SJeff Kirsher 	 * packets almost always, and for protocols like TCP that wait for
1354f7917c00SJeff Kirsher 	 * acks to really free up the data the extra memory is even less.
1355f7917c00SJeff Kirsher 	 * On the positive side we run the destructors on the sending CPU
1356f7917c00SJeff Kirsher 	 * rather than on a potentially different completing CPU, usually a
1357f7917c00SJeff Kirsher 	 * good thing.  We also run them without holding our Tx queue lock,
1358f7917c00SJeff Kirsher 	 * unlike what reclaim_completed_tx() would otherwise do.
1359f7917c00SJeff Kirsher 	 *
1360f7917c00SJeff Kirsher 	 * Run the destructor before telling the DMA engine about the packet
1361f7917c00SJeff Kirsher 	 * to make sure it doesn't complete and get freed prematurely.
1362f7917c00SJeff Kirsher 	 */
1363f7917c00SJeff Kirsher 	if (likely(!skb_shared(skb)))
1364f7917c00SJeff Kirsher 		skb_orphan(skb);
1365f7917c00SJeff Kirsher 
1366c69fe407SArjun Vynipadath 	write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
1367f7917c00SJeff Kirsher 	check_ring_tx_db(adap, q);
1368f7917c00SJeff Kirsher 	return NETDEV_TX_OK;
1369f7917c00SJeff Kirsher }
1370f7917c00SJeff Kirsher 
1371f7917c00SJeff Kirsher /**
1372f7917c00SJeff Kirsher  *	write_imm - write a packet into a Tx descriptor as immediate data
1373f7917c00SJeff Kirsher  *	@d: the Tx descriptor to write
1374f7917c00SJeff Kirsher  *	@skb: the packet
1375f7917c00SJeff Kirsher  *	@len: the length of packet data to write as immediate data
1376f7917c00SJeff Kirsher  *	@gen: the generation bit value to write
1377f7917c00SJeff Kirsher  *
1378f7917c00SJeff Kirsher  *	Writes a packet as immediate data into a Tx descriptor.  The packet
1379f7917c00SJeff Kirsher  *	contains a work request at its beginning.  We must write the packet
1380f7917c00SJeff Kirsher  *	carefully so the SGE doesn't read it accidentally before it's written
1381f7917c00SJeff Kirsher  *	in its entirety.
1382f7917c00SJeff Kirsher  */
1383f7917c00SJeff Kirsher static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1384f7917c00SJeff Kirsher 			     unsigned int len, unsigned int gen)
1385f7917c00SJeff Kirsher {
1386f7917c00SJeff Kirsher 	struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1387f7917c00SJeff Kirsher 	struct work_request_hdr *to = (struct work_request_hdr *)d;
1388f7917c00SJeff Kirsher 
1389f7917c00SJeff Kirsher 	if (likely(!skb->data_len))
1390f7917c00SJeff Kirsher 		memcpy(&to[1], &from[1], len - sizeof(*from));
1391f7917c00SJeff Kirsher 	else
1392f7917c00SJeff Kirsher 		skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1393f7917c00SJeff Kirsher 
1394f7917c00SJeff Kirsher 	to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1395f7917c00SJeff Kirsher 					V_WR_BCNTLFLT(len & 7));
1396019be1cfSAlexander Duyck 	dma_wmb();
1397f7917c00SJeff Kirsher 	to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1398f7917c00SJeff Kirsher 					V_WR_LEN((len + 7) / 8));
1399f7917c00SJeff Kirsher 	wr_gen2(d, gen);
1400f7917c00SJeff Kirsher 	kfree_skb(skb);
1401f7917c00SJeff Kirsher }
1402f7917c00SJeff Kirsher 
1403f7917c00SJeff Kirsher /**
1404f7917c00SJeff Kirsher  *	check_desc_avail - check descriptor availability on a send queue
1405f7917c00SJeff Kirsher  *	@adap: the adapter
1406f7917c00SJeff Kirsher  *	@q: the send queue
1407f7917c00SJeff Kirsher  *	@skb: the packet needing the descriptors
1408f7917c00SJeff Kirsher  *	@ndesc: the number of Tx descriptors needed
1409f7917c00SJeff Kirsher  *	@qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1410f7917c00SJeff Kirsher  *
1411f7917c00SJeff Kirsher  *	Checks if the requested number of Tx descriptors is available on an
1412f7917c00SJeff Kirsher  *	SGE send queue.  If the queue is already suspended or not enough
1413f7917c00SJeff Kirsher  *	descriptors are available the packet is queued for later transmission.
1414f7917c00SJeff Kirsher  *	Must be called with the Tx queue locked.
1415f7917c00SJeff Kirsher  *
1416f7917c00SJeff Kirsher  *	Returns 0 if enough descriptors are available, 1 if there aren't
1417f7917c00SJeff Kirsher  *	enough descriptors and the packet has been queued, and 2 if the caller
1418f7917c00SJeff Kirsher  *	needs to retry because there weren't enough descriptors at the
1419f7917c00SJeff Kirsher  *	beginning of the call but some freed up in the mean time.
1420f7917c00SJeff Kirsher  */
1421f7917c00SJeff Kirsher static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1422f7917c00SJeff Kirsher 				   struct sk_buff *skb, unsigned int ndesc,
1423f7917c00SJeff Kirsher 				   unsigned int qid)
1424f7917c00SJeff Kirsher {
1425f7917c00SJeff Kirsher 	if (unlikely(!skb_queue_empty(&q->sendq))) {
1426f7917c00SJeff Kirsher 	      addq_exit:__skb_queue_tail(&q->sendq, skb);
1427f7917c00SJeff Kirsher 		return 1;
1428f7917c00SJeff Kirsher 	}
1429f7917c00SJeff Kirsher 	if (unlikely(q->size - q->in_use < ndesc)) {
1430f7917c00SJeff Kirsher 		struct sge_qset *qs = txq_to_qset(q, qid);
1431f7917c00SJeff Kirsher 
1432f7917c00SJeff Kirsher 		set_bit(qid, &qs->txq_stopped);
14334e857c58SPeter Zijlstra 		smp_mb__after_atomic();
1434f7917c00SJeff Kirsher 
1435f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1436f7917c00SJeff Kirsher 		    test_and_clear_bit(qid, &qs->txq_stopped))
1437f7917c00SJeff Kirsher 			return 2;
1438f7917c00SJeff Kirsher 
1439f7917c00SJeff Kirsher 		q->stops++;
1440f7917c00SJeff Kirsher 		goto addq_exit;
1441f7917c00SJeff Kirsher 	}
1442f7917c00SJeff Kirsher 	return 0;
1443f7917c00SJeff Kirsher }
1444f7917c00SJeff Kirsher 
1445f7917c00SJeff Kirsher /**
1446f7917c00SJeff Kirsher  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1447f7917c00SJeff Kirsher  *	@q: the SGE control Tx queue
1448f7917c00SJeff Kirsher  *
1449f7917c00SJeff Kirsher  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1450f7917c00SJeff Kirsher  *	that send only immediate data (presently just the control queues) and
1451f7917c00SJeff Kirsher  *	thus do not have any sk_buffs to release.
1452f7917c00SJeff Kirsher  */
1453f7917c00SJeff Kirsher static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1454f7917c00SJeff Kirsher {
1455f7917c00SJeff Kirsher 	unsigned int reclaim = q->processed - q->cleaned;
1456f7917c00SJeff Kirsher 
1457f7917c00SJeff Kirsher 	q->in_use -= reclaim;
1458f7917c00SJeff Kirsher 	q->cleaned += reclaim;
1459f7917c00SJeff Kirsher }
1460f7917c00SJeff Kirsher 
1461f7917c00SJeff Kirsher static inline int immediate(const struct sk_buff *skb)
1462f7917c00SJeff Kirsher {
1463f7917c00SJeff Kirsher 	return skb->len <= WR_LEN;
1464f7917c00SJeff Kirsher }
1465f7917c00SJeff Kirsher 
1466f7917c00SJeff Kirsher /**
1467f7917c00SJeff Kirsher  *	ctrl_xmit - send a packet through an SGE control Tx queue
1468f7917c00SJeff Kirsher  *	@adap: the adapter
1469f7917c00SJeff Kirsher  *	@q: the control queue
1470f7917c00SJeff Kirsher  *	@skb: the packet
1471f7917c00SJeff Kirsher  *
1472f7917c00SJeff Kirsher  *	Send a packet through an SGE control Tx queue.  Packets sent through
1473f7917c00SJeff Kirsher  *	a control queue must fit entirely as immediate data in a single Tx
1474f7917c00SJeff Kirsher  *	descriptor and have no page fragments.
1475f7917c00SJeff Kirsher  */
1476f7917c00SJeff Kirsher static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1477f7917c00SJeff Kirsher 		     struct sk_buff *skb)
1478f7917c00SJeff Kirsher {
1479f7917c00SJeff Kirsher 	int ret;
1480f7917c00SJeff Kirsher 	struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1481f7917c00SJeff Kirsher 
1482f7917c00SJeff Kirsher 	if (unlikely(!immediate(skb))) {
1483f7917c00SJeff Kirsher 		WARN_ON(1);
1484f7917c00SJeff Kirsher 		dev_kfree_skb(skb);
1485f7917c00SJeff Kirsher 		return NET_XMIT_SUCCESS;
1486f7917c00SJeff Kirsher 	}
1487f7917c00SJeff Kirsher 
1488f7917c00SJeff Kirsher 	wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1489f7917c00SJeff Kirsher 	wrp->wr_lo = htonl(V_WR_TID(q->token));
1490f7917c00SJeff Kirsher 
1491f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1492f7917c00SJeff Kirsher       again:reclaim_completed_tx_imm(q);
1493f7917c00SJeff Kirsher 
1494f7917c00SJeff Kirsher 	ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1495f7917c00SJeff Kirsher 	if (unlikely(ret)) {
1496f7917c00SJeff Kirsher 		if (ret == 1) {
1497f7917c00SJeff Kirsher 			spin_unlock(&q->lock);
1498f7917c00SJeff Kirsher 			return NET_XMIT_CN;
1499f7917c00SJeff Kirsher 		}
1500f7917c00SJeff Kirsher 		goto again;
1501f7917c00SJeff Kirsher 	}
1502f7917c00SJeff Kirsher 
1503f7917c00SJeff Kirsher 	write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1504f7917c00SJeff Kirsher 
1505f7917c00SJeff Kirsher 	q->in_use++;
1506f7917c00SJeff Kirsher 	if (++q->pidx >= q->size) {
1507f7917c00SJeff Kirsher 		q->pidx = 0;
1508f7917c00SJeff Kirsher 		q->gen ^= 1;
1509f7917c00SJeff Kirsher 	}
1510f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1511f7917c00SJeff Kirsher 	wmb();
1512f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_KDOORBELL,
1513f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1514f7917c00SJeff Kirsher 	return NET_XMIT_SUCCESS;
1515f7917c00SJeff Kirsher }
1516f7917c00SJeff Kirsher 
1517f7917c00SJeff Kirsher /**
1518f7917c00SJeff Kirsher  *	restart_ctrlq - restart a suspended control queue
1519f7917c00SJeff Kirsher  *	@qs: the queue set cotaining the control queue
1520f7917c00SJeff Kirsher  *
1521f7917c00SJeff Kirsher  *	Resumes transmission on a suspended Tx control queue.
1522f7917c00SJeff Kirsher  */
1523f7917c00SJeff Kirsher static void restart_ctrlq(unsigned long data)
1524f7917c00SJeff Kirsher {
1525f7917c00SJeff Kirsher 	struct sk_buff *skb;
1526f7917c00SJeff Kirsher 	struct sge_qset *qs = (struct sge_qset *)data;
1527f7917c00SJeff Kirsher 	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1528f7917c00SJeff Kirsher 
1529f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1530f7917c00SJeff Kirsher       again:reclaim_completed_tx_imm(q);
1531f7917c00SJeff Kirsher 
1532f7917c00SJeff Kirsher 	while (q->in_use < q->size &&
1533f7917c00SJeff Kirsher 	       (skb = __skb_dequeue(&q->sendq)) != NULL) {
1534f7917c00SJeff Kirsher 
1535f7917c00SJeff Kirsher 		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1536f7917c00SJeff Kirsher 
1537f7917c00SJeff Kirsher 		if (++q->pidx >= q->size) {
1538f7917c00SJeff Kirsher 			q->pidx = 0;
1539f7917c00SJeff Kirsher 			q->gen ^= 1;
1540f7917c00SJeff Kirsher 		}
1541f7917c00SJeff Kirsher 		q->in_use++;
1542f7917c00SJeff Kirsher 	}
1543f7917c00SJeff Kirsher 
1544f7917c00SJeff Kirsher 	if (!skb_queue_empty(&q->sendq)) {
1545f7917c00SJeff Kirsher 		set_bit(TXQ_CTRL, &qs->txq_stopped);
15464e857c58SPeter Zijlstra 		smp_mb__after_atomic();
1547f7917c00SJeff Kirsher 
1548f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1549f7917c00SJeff Kirsher 		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1550f7917c00SJeff Kirsher 			goto again;
1551f7917c00SJeff Kirsher 		q->stops++;
1552f7917c00SJeff Kirsher 	}
1553f7917c00SJeff Kirsher 
1554f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1555f7917c00SJeff Kirsher 	wmb();
1556f7917c00SJeff Kirsher 	t3_write_reg(qs->adap, A_SG_KDOORBELL,
1557f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1558f7917c00SJeff Kirsher }
1559f7917c00SJeff Kirsher 
1560f7917c00SJeff Kirsher /*
1561f7917c00SJeff Kirsher  * Send a management message through control queue 0
1562f7917c00SJeff Kirsher  */
1563f7917c00SJeff Kirsher int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1564f7917c00SJeff Kirsher {
1565f7917c00SJeff Kirsher 	int ret;
1566f7917c00SJeff Kirsher 	local_bh_disable();
1567f7917c00SJeff Kirsher 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1568f7917c00SJeff Kirsher 	local_bh_enable();
1569f7917c00SJeff Kirsher 
1570f7917c00SJeff Kirsher 	return ret;
1571f7917c00SJeff Kirsher }
1572f7917c00SJeff Kirsher 
1573f7917c00SJeff Kirsher /**
1574f7917c00SJeff Kirsher  *	deferred_unmap_destructor - unmap a packet when it is freed
1575f7917c00SJeff Kirsher  *	@skb: the packet
1576f7917c00SJeff Kirsher  *
1577f7917c00SJeff Kirsher  *	This is the packet destructor used for Tx packets that need to remain
1578f7917c00SJeff Kirsher  *	mapped until they are freed rather than until their Tx descriptors are
1579f7917c00SJeff Kirsher  *	freed.
1580f7917c00SJeff Kirsher  */
1581f7917c00SJeff Kirsher static void deferred_unmap_destructor(struct sk_buff *skb)
1582f7917c00SJeff Kirsher {
1583f7917c00SJeff Kirsher 	int i;
1584f7917c00SJeff Kirsher 	const dma_addr_t *p;
1585f7917c00SJeff Kirsher 	const struct skb_shared_info *si;
1586f7917c00SJeff Kirsher 	const struct deferred_unmap_info *dui;
1587f7917c00SJeff Kirsher 
1588f7917c00SJeff Kirsher 	dui = (struct deferred_unmap_info *)skb->head;
1589f7917c00SJeff Kirsher 	p = dui->addr;
1590f7917c00SJeff Kirsher 
159115dd16c2SLi RongQing 	if (skb_tail_pointer(skb) - skb_transport_header(skb))
1592be8b678cSSimon Horman 		pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) -
1593be8b678cSSimon Horman 				 skb_transport_header(skb), PCI_DMA_TODEVICE);
1594f7917c00SJeff Kirsher 
1595f7917c00SJeff Kirsher 	si = skb_shinfo(skb);
1596f7917c00SJeff Kirsher 	for (i = 0; i < si->nr_frags; i++)
15979e903e08SEric Dumazet 		pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
1598f7917c00SJeff Kirsher 			       PCI_DMA_TODEVICE);
1599f7917c00SJeff Kirsher }
1600f7917c00SJeff Kirsher 
1601f7917c00SJeff Kirsher static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1602f7917c00SJeff Kirsher 				     const struct sg_ent *sgl, int sgl_flits)
1603f7917c00SJeff Kirsher {
1604f7917c00SJeff Kirsher 	dma_addr_t *p;
1605f7917c00SJeff Kirsher 	struct deferred_unmap_info *dui;
1606f7917c00SJeff Kirsher 
1607f7917c00SJeff Kirsher 	dui = (struct deferred_unmap_info *)skb->head;
1608f7917c00SJeff Kirsher 	dui->pdev = pdev;
1609f7917c00SJeff Kirsher 	for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1610f7917c00SJeff Kirsher 		*p++ = be64_to_cpu(sgl->addr[0]);
1611f7917c00SJeff Kirsher 		*p++ = be64_to_cpu(sgl->addr[1]);
1612f7917c00SJeff Kirsher 	}
1613f7917c00SJeff Kirsher 	if (sgl_flits)
1614f7917c00SJeff Kirsher 		*p = be64_to_cpu(sgl->addr[0]);
1615f7917c00SJeff Kirsher }
1616f7917c00SJeff Kirsher 
1617f7917c00SJeff Kirsher /**
1618f7917c00SJeff Kirsher  *	write_ofld_wr - write an offload work request
1619f7917c00SJeff Kirsher  *	@adap: the adapter
1620f7917c00SJeff Kirsher  *	@skb: the packet to send
1621f7917c00SJeff Kirsher  *	@q: the Tx queue
1622f7917c00SJeff Kirsher  *	@pidx: index of the first Tx descriptor to write
1623f7917c00SJeff Kirsher  *	@gen: the generation value to use
1624f7917c00SJeff Kirsher  *	@ndesc: number of descriptors the packet will occupy
1625f7917c00SJeff Kirsher  *
1626f7917c00SJeff Kirsher  *	Write an offload work request to send the supplied packet.  The packet
1627f7917c00SJeff Kirsher  *	data already carry the work request with most fields populated.
1628f7917c00SJeff Kirsher  */
1629f7917c00SJeff Kirsher static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1630f7917c00SJeff Kirsher 			  struct sge_txq *q, unsigned int pidx,
1631c69fe407SArjun Vynipadath 			  unsigned int gen, unsigned int ndesc,
1632c69fe407SArjun Vynipadath 			  const dma_addr_t *addr)
1633f7917c00SJeff Kirsher {
1634f7917c00SJeff Kirsher 	unsigned int sgl_flits, flits;
1635f7917c00SJeff Kirsher 	struct work_request_hdr *from;
1636f7917c00SJeff Kirsher 	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1637f7917c00SJeff Kirsher 	struct tx_desc *d = &q->desc[pidx];
1638f7917c00SJeff Kirsher 
1639f7917c00SJeff Kirsher 	if (immediate(skb)) {
1640f7917c00SJeff Kirsher 		q->sdesc[pidx].skb = NULL;
1641f7917c00SJeff Kirsher 		write_imm(d, skb, skb->len, gen);
1642f7917c00SJeff Kirsher 		return;
1643f7917c00SJeff Kirsher 	}
1644f7917c00SJeff Kirsher 
1645f7917c00SJeff Kirsher 	/* Only TX_DATA builds SGLs */
1646f7917c00SJeff Kirsher 
1647f7917c00SJeff Kirsher 	from = (struct work_request_hdr *)skb->data;
1648f7917c00SJeff Kirsher 	memcpy(&d->flit[1], &from[1],
1649f7917c00SJeff Kirsher 	       skb_transport_offset(skb) - sizeof(*from));
1650f7917c00SJeff Kirsher 
1651f7917c00SJeff Kirsher 	flits = skb_transport_offset(skb) / 8;
1652f7917c00SJeff Kirsher 	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1653c69fe407SArjun Vynipadath 	sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
1654c69fe407SArjun Vynipadath 			      skb_tail_pointer(skb) - skb_transport_header(skb),
1655c69fe407SArjun Vynipadath 			      addr);
1656f7917c00SJeff Kirsher 	if (need_skb_unmap()) {
1657f7917c00SJeff Kirsher 		setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1658f7917c00SJeff Kirsher 		skb->destructor = deferred_unmap_destructor;
1659f7917c00SJeff Kirsher 	}
1660f7917c00SJeff Kirsher 
1661f7917c00SJeff Kirsher 	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1662f7917c00SJeff Kirsher 			 gen, from->wr_hi, from->wr_lo);
1663f7917c00SJeff Kirsher }
1664f7917c00SJeff Kirsher 
1665f7917c00SJeff Kirsher /**
1666f7917c00SJeff Kirsher  *	calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1667f7917c00SJeff Kirsher  *	@skb: the packet
1668f7917c00SJeff Kirsher  *
1669f7917c00SJeff Kirsher  * 	Returns the number of Tx descriptors needed for the given offload
1670f7917c00SJeff Kirsher  * 	packet.  These packets are already fully constructed.
1671f7917c00SJeff Kirsher  */
1672f7917c00SJeff Kirsher static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1673f7917c00SJeff Kirsher {
1674f7917c00SJeff Kirsher 	unsigned int flits, cnt;
1675f7917c00SJeff Kirsher 
1676f7917c00SJeff Kirsher 	if (skb->len <= WR_LEN)
1677f7917c00SJeff Kirsher 		return 1;	/* packet fits as immediate data */
1678f7917c00SJeff Kirsher 
1679f7917c00SJeff Kirsher 	flits = skb_transport_offset(skb) / 8;	/* headers */
1680f7917c00SJeff Kirsher 	cnt = skb_shinfo(skb)->nr_frags;
1681be8b678cSSimon Horman 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
1682f7917c00SJeff Kirsher 		cnt++;
1683f7917c00SJeff Kirsher 	return flits_to_desc(flits + sgl_len(cnt));
1684f7917c00SJeff Kirsher }
1685f7917c00SJeff Kirsher 
1686f7917c00SJeff Kirsher /**
1687f7917c00SJeff Kirsher  *	ofld_xmit - send a packet through an offload queue
1688f7917c00SJeff Kirsher  *	@adap: the adapter
1689f7917c00SJeff Kirsher  *	@q: the Tx offload queue
1690f7917c00SJeff Kirsher  *	@skb: the packet
1691f7917c00SJeff Kirsher  *
1692f7917c00SJeff Kirsher  *	Send an offload packet through an SGE offload queue.
1693f7917c00SJeff Kirsher  */
1694f7917c00SJeff Kirsher static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1695f7917c00SJeff Kirsher 		     struct sk_buff *skb)
1696f7917c00SJeff Kirsher {
1697f7917c00SJeff Kirsher 	int ret;
1698f7917c00SJeff Kirsher 	unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1699f7917c00SJeff Kirsher 
1700f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1701f7917c00SJeff Kirsher again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1702f7917c00SJeff Kirsher 
1703f7917c00SJeff Kirsher 	ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1704f7917c00SJeff Kirsher 	if (unlikely(ret)) {
1705f7917c00SJeff Kirsher 		if (ret == 1) {
1706f7917c00SJeff Kirsher 			skb->priority = ndesc;	/* save for restart */
1707f7917c00SJeff Kirsher 			spin_unlock(&q->lock);
1708f7917c00SJeff Kirsher 			return NET_XMIT_CN;
1709f7917c00SJeff Kirsher 		}
1710f7917c00SJeff Kirsher 		goto again;
1711f7917c00SJeff Kirsher 	}
1712f7917c00SJeff Kirsher 
1713c69fe407SArjun Vynipadath 	if (!immediate(skb) &&
1714c69fe407SArjun Vynipadath 	    map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1715c69fe407SArjun Vynipadath 		spin_unlock(&q->lock);
1716c69fe407SArjun Vynipadath 		return NET_XMIT_SUCCESS;
1717c69fe407SArjun Vynipadath 	}
1718c69fe407SArjun Vynipadath 
1719f7917c00SJeff Kirsher 	gen = q->gen;
1720f7917c00SJeff Kirsher 	q->in_use += ndesc;
1721f7917c00SJeff Kirsher 	pidx = q->pidx;
1722f7917c00SJeff Kirsher 	q->pidx += ndesc;
1723f7917c00SJeff Kirsher 	if (q->pidx >= q->size) {
1724f7917c00SJeff Kirsher 		q->pidx -= q->size;
1725f7917c00SJeff Kirsher 		q->gen ^= 1;
1726f7917c00SJeff Kirsher 	}
1727f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1728f7917c00SJeff Kirsher 
1729c69fe407SArjun Vynipadath 	write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
1730f7917c00SJeff Kirsher 	check_ring_tx_db(adap, q);
1731f7917c00SJeff Kirsher 	return NET_XMIT_SUCCESS;
1732f7917c00SJeff Kirsher }
1733f7917c00SJeff Kirsher 
1734f7917c00SJeff Kirsher /**
1735f7917c00SJeff Kirsher  *	restart_offloadq - restart a suspended offload queue
1736f7917c00SJeff Kirsher  *	@qs: the queue set cotaining the offload queue
1737f7917c00SJeff Kirsher  *
1738f7917c00SJeff Kirsher  *	Resumes transmission on a suspended Tx offload queue.
1739f7917c00SJeff Kirsher  */
1740f7917c00SJeff Kirsher static void restart_offloadq(unsigned long data)
1741f7917c00SJeff Kirsher {
1742f7917c00SJeff Kirsher 	struct sk_buff *skb;
1743f7917c00SJeff Kirsher 	struct sge_qset *qs = (struct sge_qset *)data;
1744f7917c00SJeff Kirsher 	struct sge_txq *q = &qs->txq[TXQ_OFLD];
1745f7917c00SJeff Kirsher 	const struct port_info *pi = netdev_priv(qs->netdev);
1746f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
1747c69fe407SArjun Vynipadath 	unsigned int written = 0;
1748f7917c00SJeff Kirsher 
1749f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1750f7917c00SJeff Kirsher again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1751f7917c00SJeff Kirsher 
1752f7917c00SJeff Kirsher 	while ((skb = skb_peek(&q->sendq)) != NULL) {
1753f7917c00SJeff Kirsher 		unsigned int gen, pidx;
1754f7917c00SJeff Kirsher 		unsigned int ndesc = skb->priority;
1755f7917c00SJeff Kirsher 
1756f7917c00SJeff Kirsher 		if (unlikely(q->size - q->in_use < ndesc)) {
1757f7917c00SJeff Kirsher 			set_bit(TXQ_OFLD, &qs->txq_stopped);
17584e857c58SPeter Zijlstra 			smp_mb__after_atomic();
1759f7917c00SJeff Kirsher 
1760f7917c00SJeff Kirsher 			if (should_restart_tx(q) &&
1761f7917c00SJeff Kirsher 			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1762f7917c00SJeff Kirsher 				goto again;
1763f7917c00SJeff Kirsher 			q->stops++;
1764f7917c00SJeff Kirsher 			break;
1765f7917c00SJeff Kirsher 		}
1766f7917c00SJeff Kirsher 
1767c69fe407SArjun Vynipadath 		if (!immediate(skb) &&
1768c69fe407SArjun Vynipadath 		    map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1769c69fe407SArjun Vynipadath 			break;
1770c69fe407SArjun Vynipadath 
1771f7917c00SJeff Kirsher 		gen = q->gen;
1772f7917c00SJeff Kirsher 		q->in_use += ndesc;
1773f7917c00SJeff Kirsher 		pidx = q->pidx;
1774f7917c00SJeff Kirsher 		q->pidx += ndesc;
1775c69fe407SArjun Vynipadath 		written += ndesc;
1776f7917c00SJeff Kirsher 		if (q->pidx >= q->size) {
1777f7917c00SJeff Kirsher 			q->pidx -= q->size;
1778f7917c00SJeff Kirsher 			q->gen ^= 1;
1779f7917c00SJeff Kirsher 		}
1780f7917c00SJeff Kirsher 		__skb_unlink(skb, &q->sendq);
1781f7917c00SJeff Kirsher 		spin_unlock(&q->lock);
1782f7917c00SJeff Kirsher 
1783c69fe407SArjun Vynipadath 		write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1784c69fe407SArjun Vynipadath 			      (dma_addr_t *)skb->head);
1785f7917c00SJeff Kirsher 		spin_lock(&q->lock);
1786f7917c00SJeff Kirsher 	}
1787f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1788f7917c00SJeff Kirsher 
1789f7917c00SJeff Kirsher #if USE_GTS
1790f7917c00SJeff Kirsher 	set_bit(TXQ_RUNNING, &q->flags);
1791f7917c00SJeff Kirsher 	set_bit(TXQ_LAST_PKT_DB, &q->flags);
1792f7917c00SJeff Kirsher #endif
1793f7917c00SJeff Kirsher 	wmb();
1794c69fe407SArjun Vynipadath 	if (likely(written))
1795f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL,
1796f7917c00SJeff Kirsher 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1797f7917c00SJeff Kirsher }
1798f7917c00SJeff Kirsher 
1799f7917c00SJeff Kirsher /**
1800f7917c00SJeff Kirsher  *	queue_set - return the queue set a packet should use
1801f7917c00SJeff Kirsher  *	@skb: the packet
1802f7917c00SJeff Kirsher  *
1803f7917c00SJeff Kirsher  *	Maps a packet to the SGE queue set it should use.  The desired queue
1804f7917c00SJeff Kirsher  *	set is carried in bits 1-3 in the packet's priority.
1805f7917c00SJeff Kirsher  */
1806f7917c00SJeff Kirsher static inline int queue_set(const struct sk_buff *skb)
1807f7917c00SJeff Kirsher {
1808f7917c00SJeff Kirsher 	return skb->priority >> 1;
1809f7917c00SJeff Kirsher }
1810f7917c00SJeff Kirsher 
1811f7917c00SJeff Kirsher /**
1812f7917c00SJeff Kirsher  *	is_ctrl_pkt - return whether an offload packet is a control packet
1813f7917c00SJeff Kirsher  *	@skb: the packet
1814f7917c00SJeff Kirsher  *
1815f7917c00SJeff Kirsher  *	Determines whether an offload packet should use an OFLD or a CTRL
1816f7917c00SJeff Kirsher  *	Tx queue.  This is indicated by bit 0 in the packet's priority.
1817f7917c00SJeff Kirsher  */
1818f7917c00SJeff Kirsher static inline int is_ctrl_pkt(const struct sk_buff *skb)
1819f7917c00SJeff Kirsher {
1820f7917c00SJeff Kirsher 	return skb->priority & 1;
1821f7917c00SJeff Kirsher }
1822f7917c00SJeff Kirsher 
1823f7917c00SJeff Kirsher /**
1824f7917c00SJeff Kirsher  *	t3_offload_tx - send an offload packet
1825f7917c00SJeff Kirsher  *	@tdev: the offload device to send to
1826f7917c00SJeff Kirsher  *	@skb: the packet
1827f7917c00SJeff Kirsher  *
1828f7917c00SJeff Kirsher  *	Sends an offload packet.  We use the packet priority to select the
1829f7917c00SJeff Kirsher  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1830f7917c00SJeff Kirsher  *	should be sent as regular or control, bits 1-3 select the queue set.
1831f7917c00SJeff Kirsher  */
1832f7917c00SJeff Kirsher int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1833f7917c00SJeff Kirsher {
1834f7917c00SJeff Kirsher 	struct adapter *adap = tdev2adap(tdev);
1835f7917c00SJeff Kirsher 	struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1836f7917c00SJeff Kirsher 
1837f7917c00SJeff Kirsher 	if (unlikely(is_ctrl_pkt(skb)))
1838f7917c00SJeff Kirsher 		return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1839f7917c00SJeff Kirsher 
1840f7917c00SJeff Kirsher 	return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1841f7917c00SJeff Kirsher }
1842f7917c00SJeff Kirsher 
1843f7917c00SJeff Kirsher /**
1844f7917c00SJeff Kirsher  *	offload_enqueue - add an offload packet to an SGE offload receive queue
1845f7917c00SJeff Kirsher  *	@q: the SGE response queue
1846f7917c00SJeff Kirsher  *	@skb: the packet
1847f7917c00SJeff Kirsher  *
1848f7917c00SJeff Kirsher  *	Add a new offload packet to an SGE response queue's offload packet
1849f7917c00SJeff Kirsher  *	queue.  If the packet is the first on the queue it schedules the RX
1850f7917c00SJeff Kirsher  *	softirq to process the queue.
1851f7917c00SJeff Kirsher  */
1852f7917c00SJeff Kirsher static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1853f7917c00SJeff Kirsher {
1854f7917c00SJeff Kirsher 	int was_empty = skb_queue_empty(&q->rx_queue);
1855f7917c00SJeff Kirsher 
1856f7917c00SJeff Kirsher 	__skb_queue_tail(&q->rx_queue, skb);
1857f7917c00SJeff Kirsher 
1858f7917c00SJeff Kirsher 	if (was_empty) {
1859f7917c00SJeff Kirsher 		struct sge_qset *qs = rspq_to_qset(q);
1860f7917c00SJeff Kirsher 
1861f7917c00SJeff Kirsher 		napi_schedule(&qs->napi);
1862f7917c00SJeff Kirsher 	}
1863f7917c00SJeff Kirsher }
1864f7917c00SJeff Kirsher 
1865f7917c00SJeff Kirsher /**
1866f7917c00SJeff Kirsher  *	deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1867f7917c00SJeff Kirsher  *	@tdev: the offload device that will be receiving the packets
1868f7917c00SJeff Kirsher  *	@q: the SGE response queue that assembled the bundle
1869f7917c00SJeff Kirsher  *	@skbs: the partial bundle
1870f7917c00SJeff Kirsher  *	@n: the number of packets in the bundle
1871f7917c00SJeff Kirsher  *
1872f7917c00SJeff Kirsher  *	Delivers a (partial) bundle of Rx offload packets to an offload device.
1873f7917c00SJeff Kirsher  */
1874f7917c00SJeff Kirsher static inline void deliver_partial_bundle(struct t3cdev *tdev,
1875f7917c00SJeff Kirsher 					  struct sge_rspq *q,
1876f7917c00SJeff Kirsher 					  struct sk_buff *skbs[], int n)
1877f7917c00SJeff Kirsher {
1878f7917c00SJeff Kirsher 	if (n) {
1879f7917c00SJeff Kirsher 		q->offload_bundles++;
1880f7917c00SJeff Kirsher 		tdev->recv(tdev, skbs, n);
1881f7917c00SJeff Kirsher 	}
1882f7917c00SJeff Kirsher }
1883f7917c00SJeff Kirsher 
1884f7917c00SJeff Kirsher /**
1885f7917c00SJeff Kirsher  *	ofld_poll - NAPI handler for offload packets in interrupt mode
1886f7917c00SJeff Kirsher  *	@dev: the network device doing the polling
1887f7917c00SJeff Kirsher  *	@budget: polling budget
1888f7917c00SJeff Kirsher  *
1889f7917c00SJeff Kirsher  *	The NAPI handler for offload packets when a response queue is serviced
1890f7917c00SJeff Kirsher  *	by the hard interrupt handler, i.e., when it's operating in non-polling
1891f7917c00SJeff Kirsher  *	mode.  Creates small packet batches and sends them through the offload
1892f7917c00SJeff Kirsher  *	receive handler.  Batches need to be of modest size as we do prefetches
1893f7917c00SJeff Kirsher  *	on the packets in each.
1894f7917c00SJeff Kirsher  */
1895f7917c00SJeff Kirsher static int ofld_poll(struct napi_struct *napi, int budget)
1896f7917c00SJeff Kirsher {
1897f7917c00SJeff Kirsher 	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1898f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
1899f7917c00SJeff Kirsher 	struct adapter *adapter = qs->adap;
1900f7917c00SJeff Kirsher 	int work_done = 0;
1901f7917c00SJeff Kirsher 
1902f7917c00SJeff Kirsher 	while (work_done < budget) {
1903f7917c00SJeff Kirsher 		struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1904f7917c00SJeff Kirsher 		struct sk_buff_head queue;
1905f7917c00SJeff Kirsher 		int ngathered;
1906f7917c00SJeff Kirsher 
1907f7917c00SJeff Kirsher 		spin_lock_irq(&q->lock);
1908f7917c00SJeff Kirsher 		__skb_queue_head_init(&queue);
1909f7917c00SJeff Kirsher 		skb_queue_splice_init(&q->rx_queue, &queue);
1910f7917c00SJeff Kirsher 		if (skb_queue_empty(&queue)) {
19116ad20165SEric Dumazet 			napi_complete_done(napi, work_done);
1912f7917c00SJeff Kirsher 			spin_unlock_irq(&q->lock);
1913f7917c00SJeff Kirsher 			return work_done;
1914f7917c00SJeff Kirsher 		}
1915f7917c00SJeff Kirsher 		spin_unlock_irq(&q->lock);
1916f7917c00SJeff Kirsher 
1917f7917c00SJeff Kirsher 		ngathered = 0;
1918f7917c00SJeff Kirsher 		skb_queue_walk_safe(&queue, skb, tmp) {
1919f7917c00SJeff Kirsher 			if (work_done >= budget)
1920f7917c00SJeff Kirsher 				break;
1921f7917c00SJeff Kirsher 			work_done++;
1922f7917c00SJeff Kirsher 
1923f7917c00SJeff Kirsher 			__skb_unlink(skb, &queue);
1924f7917c00SJeff Kirsher 			prefetch(skb->data);
1925f7917c00SJeff Kirsher 			skbs[ngathered] = skb;
1926f7917c00SJeff Kirsher 			if (++ngathered == RX_BUNDLE_SIZE) {
1927f7917c00SJeff Kirsher 				q->offload_bundles++;
1928f7917c00SJeff Kirsher 				adapter->tdev.recv(&adapter->tdev, skbs,
1929f7917c00SJeff Kirsher 						   ngathered);
1930f7917c00SJeff Kirsher 				ngathered = 0;
1931f7917c00SJeff Kirsher 			}
1932f7917c00SJeff Kirsher 		}
1933f7917c00SJeff Kirsher 		if (!skb_queue_empty(&queue)) {
1934f7917c00SJeff Kirsher 			/* splice remaining packets back onto Rx queue */
1935f7917c00SJeff Kirsher 			spin_lock_irq(&q->lock);
1936f7917c00SJeff Kirsher 			skb_queue_splice(&queue, &q->rx_queue);
1937f7917c00SJeff Kirsher 			spin_unlock_irq(&q->lock);
1938f7917c00SJeff Kirsher 		}
1939f7917c00SJeff Kirsher 		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1940f7917c00SJeff Kirsher 	}
1941f7917c00SJeff Kirsher 
1942f7917c00SJeff Kirsher 	return work_done;
1943f7917c00SJeff Kirsher }
1944f7917c00SJeff Kirsher 
1945f7917c00SJeff Kirsher /**
1946f7917c00SJeff Kirsher  *	rx_offload - process a received offload packet
1947f7917c00SJeff Kirsher  *	@tdev: the offload device receiving the packet
1948f7917c00SJeff Kirsher  *	@rq: the response queue that received the packet
1949f7917c00SJeff Kirsher  *	@skb: the packet
1950f7917c00SJeff Kirsher  *	@rx_gather: a gather list of packets if we are building a bundle
1951f7917c00SJeff Kirsher  *	@gather_idx: index of the next available slot in the bundle
1952f7917c00SJeff Kirsher  *
1953f7917c00SJeff Kirsher  *	Process an ingress offload pakcet and add it to the offload ingress
1954f7917c00SJeff Kirsher  *	queue. 	Returns the index of the next available slot in the bundle.
1955f7917c00SJeff Kirsher  */
1956f7917c00SJeff Kirsher static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1957f7917c00SJeff Kirsher 			     struct sk_buff *skb, struct sk_buff *rx_gather[],
1958f7917c00SJeff Kirsher 			     unsigned int gather_idx)
1959f7917c00SJeff Kirsher {
1960f7917c00SJeff Kirsher 	skb_reset_mac_header(skb);
1961f7917c00SJeff Kirsher 	skb_reset_network_header(skb);
1962f7917c00SJeff Kirsher 	skb_reset_transport_header(skb);
1963f7917c00SJeff Kirsher 
1964f7917c00SJeff Kirsher 	if (rq->polling) {
1965f7917c00SJeff Kirsher 		rx_gather[gather_idx++] = skb;
1966f7917c00SJeff Kirsher 		if (gather_idx == RX_BUNDLE_SIZE) {
1967f7917c00SJeff Kirsher 			tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1968f7917c00SJeff Kirsher 			gather_idx = 0;
1969f7917c00SJeff Kirsher 			rq->offload_bundles++;
1970f7917c00SJeff Kirsher 		}
1971f7917c00SJeff Kirsher 	} else
1972f7917c00SJeff Kirsher 		offload_enqueue(rq, skb);
1973f7917c00SJeff Kirsher 
1974f7917c00SJeff Kirsher 	return gather_idx;
1975f7917c00SJeff Kirsher }
1976f7917c00SJeff Kirsher 
1977f7917c00SJeff Kirsher /**
1978f7917c00SJeff Kirsher  *	restart_tx - check whether to restart suspended Tx queues
1979f7917c00SJeff Kirsher  *	@qs: the queue set to resume
1980f7917c00SJeff Kirsher  *
1981f7917c00SJeff Kirsher  *	Restarts suspended Tx queues of an SGE queue set if they have enough
1982f7917c00SJeff Kirsher  *	free resources to resume operation.
1983f7917c00SJeff Kirsher  */
1984f7917c00SJeff Kirsher static void restart_tx(struct sge_qset *qs)
1985f7917c00SJeff Kirsher {
1986f7917c00SJeff Kirsher 	if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1987f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_ETH]) &&
1988f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1989f7917c00SJeff Kirsher 		qs->txq[TXQ_ETH].restarts++;
1990f7917c00SJeff Kirsher 		if (netif_running(qs->netdev))
1991f7917c00SJeff Kirsher 			netif_tx_wake_queue(qs->tx_q);
1992f7917c00SJeff Kirsher 	}
1993f7917c00SJeff Kirsher 
1994f7917c00SJeff Kirsher 	if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1995f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1996f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1997f7917c00SJeff Kirsher 		qs->txq[TXQ_OFLD].restarts++;
1998f7917c00SJeff Kirsher 		tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1999f7917c00SJeff Kirsher 	}
2000f7917c00SJeff Kirsher 	if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
2001f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2002f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2003f7917c00SJeff Kirsher 		qs->txq[TXQ_CTRL].restarts++;
2004f7917c00SJeff Kirsher 		tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
2005f7917c00SJeff Kirsher 	}
2006f7917c00SJeff Kirsher }
2007f7917c00SJeff Kirsher 
2008f7917c00SJeff Kirsher /**
2009f7917c00SJeff Kirsher  *	cxgb3_arp_process - process an ARP request probing a private IP address
2010f7917c00SJeff Kirsher  *	@adapter: the adapter
2011f7917c00SJeff Kirsher  *	@skb: the skbuff containing the ARP request
2012f7917c00SJeff Kirsher  *
2013f7917c00SJeff Kirsher  *	Check if the ARP request is probing the private IP address
2014f7917c00SJeff Kirsher  *	dedicated to iSCSI, generate an ARP reply if so.
2015f7917c00SJeff Kirsher  */
2016f7917c00SJeff Kirsher static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
2017f7917c00SJeff Kirsher {
2018f7917c00SJeff Kirsher 	struct net_device *dev = skb->dev;
2019f7917c00SJeff Kirsher 	struct arphdr *arp;
2020f7917c00SJeff Kirsher 	unsigned char *arp_ptr;
2021f7917c00SJeff Kirsher 	unsigned char *sha;
2022f7917c00SJeff Kirsher 	__be32 sip, tip;
2023f7917c00SJeff Kirsher 
2024f7917c00SJeff Kirsher 	if (!dev)
2025f7917c00SJeff Kirsher 		return;
2026f7917c00SJeff Kirsher 
2027f7917c00SJeff Kirsher 	skb_reset_network_header(skb);
2028f7917c00SJeff Kirsher 	arp = arp_hdr(skb);
2029f7917c00SJeff Kirsher 
2030f7917c00SJeff Kirsher 	if (arp->ar_op != htons(ARPOP_REQUEST))
2031f7917c00SJeff Kirsher 		return;
2032f7917c00SJeff Kirsher 
2033f7917c00SJeff Kirsher 	arp_ptr = (unsigned char *)(arp + 1);
2034f7917c00SJeff Kirsher 	sha = arp_ptr;
2035f7917c00SJeff Kirsher 	arp_ptr += dev->addr_len;
2036f7917c00SJeff Kirsher 	memcpy(&sip, arp_ptr, sizeof(sip));
2037f7917c00SJeff Kirsher 	arp_ptr += sizeof(sip);
2038f7917c00SJeff Kirsher 	arp_ptr += dev->addr_len;
2039f7917c00SJeff Kirsher 	memcpy(&tip, arp_ptr, sizeof(tip));
2040f7917c00SJeff Kirsher 
2041f7917c00SJeff Kirsher 	if (tip != pi->iscsi_ipv4addr)
2042f7917c00SJeff Kirsher 		return;
2043f7917c00SJeff Kirsher 
2044f7917c00SJeff Kirsher 	arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
2045f7917c00SJeff Kirsher 		 pi->iscsic.mac_addr, sha);
2046f7917c00SJeff Kirsher 
2047f7917c00SJeff Kirsher }
2048f7917c00SJeff Kirsher 
2049f7917c00SJeff Kirsher static inline int is_arp(struct sk_buff *skb)
2050f7917c00SJeff Kirsher {
2051f7917c00SJeff Kirsher 	return skb->protocol == htons(ETH_P_ARP);
2052f7917c00SJeff Kirsher }
2053f7917c00SJeff Kirsher 
2054f7917c00SJeff Kirsher static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
2055f7917c00SJeff Kirsher 					struct sk_buff *skb)
2056f7917c00SJeff Kirsher {
2057f7917c00SJeff Kirsher 	if (is_arp(skb)) {
2058f7917c00SJeff Kirsher 		cxgb3_arp_process(pi, skb);
2059f7917c00SJeff Kirsher 		return;
2060f7917c00SJeff Kirsher 	}
2061f7917c00SJeff Kirsher 
2062f7917c00SJeff Kirsher 	if (pi->iscsic.recv)
2063f7917c00SJeff Kirsher 		pi->iscsic.recv(pi, skb);
2064f7917c00SJeff Kirsher 
2065f7917c00SJeff Kirsher }
2066f7917c00SJeff Kirsher 
2067f7917c00SJeff Kirsher /**
2068f7917c00SJeff Kirsher  *	rx_eth - process an ingress ethernet packet
2069f7917c00SJeff Kirsher  *	@adap: the adapter
2070f7917c00SJeff Kirsher  *	@rq: the response queue that received the packet
2071f7917c00SJeff Kirsher  *	@skb: the packet
2072f7917c00SJeff Kirsher  *	@pad: amount of padding at the start of the buffer
2073f7917c00SJeff Kirsher  *
2074f7917c00SJeff Kirsher  *	Process an ingress ethernet pakcet and deliver it to the stack.
2075f7917c00SJeff Kirsher  *	The padding is 2 if the packet was delivered in an Rx buffer and 0
2076f7917c00SJeff Kirsher  *	if it was immediate data in a response.
2077f7917c00SJeff Kirsher  */
2078f7917c00SJeff Kirsher static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2079f7917c00SJeff Kirsher 		   struct sk_buff *skb, int pad, int lro)
2080f7917c00SJeff Kirsher {
2081f7917c00SJeff Kirsher 	struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2082f7917c00SJeff Kirsher 	struct sge_qset *qs = rspq_to_qset(rq);
2083f7917c00SJeff Kirsher 	struct port_info *pi;
2084f7917c00SJeff Kirsher 
2085f7917c00SJeff Kirsher 	skb_pull(skb, sizeof(*p) + pad);
2086f7917c00SJeff Kirsher 	skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2087f7917c00SJeff Kirsher 	pi = netdev_priv(skb->dev);
2088f7917c00SJeff Kirsher 	if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2089f7917c00SJeff Kirsher 	    p->csum == htons(0xffff) && !p->fragment) {
2090f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2091f7917c00SJeff Kirsher 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2092f7917c00SJeff Kirsher 	} else
2093f7917c00SJeff Kirsher 		skb_checksum_none_assert(skb);
2094f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2095f7917c00SJeff Kirsher 
2096f7917c00SJeff Kirsher 	if (p->vlan_valid) {
2097f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_VLANEX]++;
209886a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
2099f7917c00SJeff Kirsher 	}
2100f7917c00SJeff Kirsher 	if (rq->polling) {
2101f7917c00SJeff Kirsher 		if (lro)
2102f7917c00SJeff Kirsher 			napi_gro_receive(&qs->napi, skb);
2103f7917c00SJeff Kirsher 		else {
2104f7917c00SJeff Kirsher 			if (unlikely(pi->iscsic.flags))
2105f7917c00SJeff Kirsher 				cxgb3_process_iscsi_prov_pack(pi, skb);
2106f7917c00SJeff Kirsher 			netif_receive_skb(skb);
2107f7917c00SJeff Kirsher 		}
2108f7917c00SJeff Kirsher 	} else
2109f7917c00SJeff Kirsher 		netif_rx(skb);
2110f7917c00SJeff Kirsher }
2111f7917c00SJeff Kirsher 
2112f7917c00SJeff Kirsher static inline int is_eth_tcp(u32 rss)
2113f7917c00SJeff Kirsher {
2114f7917c00SJeff Kirsher 	return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2115f7917c00SJeff Kirsher }
2116f7917c00SJeff Kirsher 
2117f7917c00SJeff Kirsher /**
2118f7917c00SJeff Kirsher  *	lro_add_page - add a page chunk to an LRO session
2119f7917c00SJeff Kirsher  *	@adap: the adapter
2120f7917c00SJeff Kirsher  *	@qs: the associated queue set
2121f7917c00SJeff Kirsher  *	@fl: the free list containing the page chunk to add
2122f7917c00SJeff Kirsher  *	@len: packet length
2123f7917c00SJeff Kirsher  *	@complete: Indicates the last fragment of a frame
2124f7917c00SJeff Kirsher  *
2125f7917c00SJeff Kirsher  *	Add a received packet contained in a page chunk to an existing LRO
2126f7917c00SJeff Kirsher  *	session.
2127f7917c00SJeff Kirsher  */
2128f7917c00SJeff Kirsher static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2129f7917c00SJeff Kirsher 			 struct sge_fl *fl, int len, int complete)
2130f7917c00SJeff Kirsher {
2131f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2132f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2133f7917c00SJeff Kirsher 	struct sk_buff *skb = NULL;
2134f7917c00SJeff Kirsher 	struct cpl_rx_pkt *cpl;
2135f7917c00SJeff Kirsher 	struct skb_frag_struct *rx_frag;
2136f7917c00SJeff Kirsher 	int nr_frags;
2137f7917c00SJeff Kirsher 	int offset = 0;
2138f7917c00SJeff Kirsher 
2139f7917c00SJeff Kirsher 	if (!qs->nomem) {
2140f7917c00SJeff Kirsher 		skb = napi_get_frags(&qs->napi);
2141f7917c00SJeff Kirsher 		qs->nomem = !skb;
2142f7917c00SJeff Kirsher 	}
2143f7917c00SJeff Kirsher 
2144f7917c00SJeff Kirsher 	fl->credits--;
2145f7917c00SJeff Kirsher 
2146f7917c00SJeff Kirsher 	pci_dma_sync_single_for_cpu(adap->pdev,
2147f7917c00SJeff Kirsher 				    dma_unmap_addr(sd, dma_addr),
2148f7917c00SJeff Kirsher 				    fl->buf_size - SGE_PG_RSVD,
2149f7917c00SJeff Kirsher 				    PCI_DMA_FROMDEVICE);
2150f7917c00SJeff Kirsher 
2151f7917c00SJeff Kirsher 	(*sd->pg_chunk.p_cnt)--;
2152f7917c00SJeff Kirsher 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2153f7917c00SJeff Kirsher 		pci_unmap_page(adap->pdev,
2154f7917c00SJeff Kirsher 			       sd->pg_chunk.mapping,
2155f7917c00SJeff Kirsher 			       fl->alloc_size,
2156f7917c00SJeff Kirsher 			       PCI_DMA_FROMDEVICE);
2157f7917c00SJeff Kirsher 
2158f7917c00SJeff Kirsher 	if (!skb) {
2159f7917c00SJeff Kirsher 		put_page(sd->pg_chunk.page);
2160f7917c00SJeff Kirsher 		if (complete)
2161f7917c00SJeff Kirsher 			qs->nomem = 0;
2162f7917c00SJeff Kirsher 		return;
2163f7917c00SJeff Kirsher 	}
2164f7917c00SJeff Kirsher 
2165f7917c00SJeff Kirsher 	rx_frag = skb_shinfo(skb)->frags;
2166f7917c00SJeff Kirsher 	nr_frags = skb_shinfo(skb)->nr_frags;
2167f7917c00SJeff Kirsher 
2168f7917c00SJeff Kirsher 	if (!nr_frags) {
2169f7917c00SJeff Kirsher 		offset = 2 + sizeof(struct cpl_rx_pkt);
2170f7917c00SJeff Kirsher 		cpl = qs->lro_va = sd->pg_chunk.va + 2;
2171f7917c00SJeff Kirsher 
2172f7917c00SJeff Kirsher 		if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2173f7917c00SJeff Kirsher 		     cpl->csum_valid && cpl->csum == htons(0xffff)) {
2174f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2175f7917c00SJeff Kirsher 			qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2176f7917c00SJeff Kirsher 		} else
2177f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_NONE;
2178f7917c00SJeff Kirsher 	} else
2179f7917c00SJeff Kirsher 		cpl = qs->lro_va;
2180f7917c00SJeff Kirsher 
2181f7917c00SJeff Kirsher 	len -= offset;
2182f7917c00SJeff Kirsher 
2183f7917c00SJeff Kirsher 	rx_frag += nr_frags;
21846a930b9fSIan Campbell 	__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
2185f7917c00SJeff Kirsher 	rx_frag->page_offset = sd->pg_chunk.offset + offset;
21869e903e08SEric Dumazet 	skb_frag_size_set(rx_frag, len);
2187f7917c00SJeff Kirsher 
2188f7917c00SJeff Kirsher 	skb->len += len;
2189f7917c00SJeff Kirsher 	skb->data_len += len;
2190f7917c00SJeff Kirsher 	skb->truesize += len;
2191f7917c00SJeff Kirsher 	skb_shinfo(skb)->nr_frags++;
2192f7917c00SJeff Kirsher 
2193f7917c00SJeff Kirsher 	if (!complete)
2194f7917c00SJeff Kirsher 		return;
2195f7917c00SJeff Kirsher 
2196f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2197f7917c00SJeff Kirsher 
219872073ad2SVipul Pandya 	if (cpl->vlan_valid) {
219972073ad2SVipul Pandya 		qs->port_stats[SGE_PSTAT_VLANEX]++;
220086a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
220172073ad2SVipul Pandya 	}
2202f7917c00SJeff Kirsher 	napi_gro_frags(&qs->napi);
2203f7917c00SJeff Kirsher }
2204f7917c00SJeff Kirsher 
2205f7917c00SJeff Kirsher /**
2206f7917c00SJeff Kirsher  *	handle_rsp_cntrl_info - handles control information in a response
2207f7917c00SJeff Kirsher  *	@qs: the queue set corresponding to the response
2208f7917c00SJeff Kirsher  *	@flags: the response control flags
2209f7917c00SJeff Kirsher  *
2210f7917c00SJeff Kirsher  *	Handles the control information of an SGE response, such as GTS
2211f7917c00SJeff Kirsher  *	indications and completion credits for the queue set's Tx queues.
2212f7917c00SJeff Kirsher  *	HW coalesces credits, we don't do any extra SW coalescing.
2213f7917c00SJeff Kirsher  */
2214f7917c00SJeff Kirsher static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2215f7917c00SJeff Kirsher {
2216f7917c00SJeff Kirsher 	unsigned int credits;
2217f7917c00SJeff Kirsher 
2218f7917c00SJeff Kirsher #if USE_GTS
2219f7917c00SJeff Kirsher 	if (flags & F_RSPD_TXQ0_GTS)
2220f7917c00SJeff Kirsher 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2221f7917c00SJeff Kirsher #endif
2222f7917c00SJeff Kirsher 
2223f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ0_CR(flags);
2224f7917c00SJeff Kirsher 	if (credits)
2225f7917c00SJeff Kirsher 		qs->txq[TXQ_ETH].processed += credits;
2226f7917c00SJeff Kirsher 
2227f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ2_CR(flags);
2228f7917c00SJeff Kirsher 	if (credits)
2229f7917c00SJeff Kirsher 		qs->txq[TXQ_CTRL].processed += credits;
2230f7917c00SJeff Kirsher 
2231f7917c00SJeff Kirsher # if USE_GTS
2232f7917c00SJeff Kirsher 	if (flags & F_RSPD_TXQ1_GTS)
2233f7917c00SJeff Kirsher 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2234f7917c00SJeff Kirsher # endif
2235f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ1_CR(flags);
2236f7917c00SJeff Kirsher 	if (credits)
2237f7917c00SJeff Kirsher 		qs->txq[TXQ_OFLD].processed += credits;
2238f7917c00SJeff Kirsher }
2239f7917c00SJeff Kirsher 
2240f7917c00SJeff Kirsher /**
2241f7917c00SJeff Kirsher  *	check_ring_db - check if we need to ring any doorbells
2242f7917c00SJeff Kirsher  *	@adapter: the adapter
2243f7917c00SJeff Kirsher  *	@qs: the queue set whose Tx queues are to be examined
2244f7917c00SJeff Kirsher  *	@sleeping: indicates which Tx queue sent GTS
2245f7917c00SJeff Kirsher  *
2246f7917c00SJeff Kirsher  *	Checks if some of a queue set's Tx queues need to ring their doorbells
2247f7917c00SJeff Kirsher  *	to resume transmission after idling while they still have unprocessed
2248f7917c00SJeff Kirsher  *	descriptors.
2249f7917c00SJeff Kirsher  */
2250f7917c00SJeff Kirsher static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2251f7917c00SJeff Kirsher 			  unsigned int sleeping)
2252f7917c00SJeff Kirsher {
2253f7917c00SJeff Kirsher 	if (sleeping & F_RSPD_TXQ0_GTS) {
2254f7917c00SJeff Kirsher 		struct sge_txq *txq = &qs->txq[TXQ_ETH];
2255f7917c00SJeff Kirsher 
2256f7917c00SJeff Kirsher 		if (txq->cleaned + txq->in_use != txq->processed &&
2257f7917c00SJeff Kirsher 		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2258f7917c00SJeff Kirsher 			set_bit(TXQ_RUNNING, &txq->flags);
2259f7917c00SJeff Kirsher 			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2260f7917c00SJeff Kirsher 				     V_EGRCNTX(txq->cntxt_id));
2261f7917c00SJeff Kirsher 		}
2262f7917c00SJeff Kirsher 	}
2263f7917c00SJeff Kirsher 
2264f7917c00SJeff Kirsher 	if (sleeping & F_RSPD_TXQ1_GTS) {
2265f7917c00SJeff Kirsher 		struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2266f7917c00SJeff Kirsher 
2267f7917c00SJeff Kirsher 		if (txq->cleaned + txq->in_use != txq->processed &&
2268f7917c00SJeff Kirsher 		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2269f7917c00SJeff Kirsher 			set_bit(TXQ_RUNNING, &txq->flags);
2270f7917c00SJeff Kirsher 			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2271f7917c00SJeff Kirsher 				     V_EGRCNTX(txq->cntxt_id));
2272f7917c00SJeff Kirsher 		}
2273f7917c00SJeff Kirsher 	}
2274f7917c00SJeff Kirsher }
2275f7917c00SJeff Kirsher 
2276f7917c00SJeff Kirsher /**
2277f7917c00SJeff Kirsher  *	is_new_response - check if a response is newly written
2278f7917c00SJeff Kirsher  *	@r: the response descriptor
2279f7917c00SJeff Kirsher  *	@q: the response queue
2280f7917c00SJeff Kirsher  *
2281f7917c00SJeff Kirsher  *	Returns true if a response descriptor contains a yet unprocessed
2282f7917c00SJeff Kirsher  *	response.
2283f7917c00SJeff Kirsher  */
2284f7917c00SJeff Kirsher static inline int is_new_response(const struct rsp_desc *r,
2285f7917c00SJeff Kirsher 				  const struct sge_rspq *q)
2286f7917c00SJeff Kirsher {
2287f7917c00SJeff Kirsher 	return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2288f7917c00SJeff Kirsher }
2289f7917c00SJeff Kirsher 
2290f7917c00SJeff Kirsher static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2291f7917c00SJeff Kirsher {
2292f7917c00SJeff Kirsher 	q->pg_skb = NULL;
2293f7917c00SJeff Kirsher 	q->rx_recycle_buf = 0;
2294f7917c00SJeff Kirsher }
2295f7917c00SJeff Kirsher 
2296f7917c00SJeff Kirsher #define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2297f7917c00SJeff Kirsher #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2298f7917c00SJeff Kirsher 			V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2299f7917c00SJeff Kirsher 			V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2300f7917c00SJeff Kirsher 			V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2301f7917c00SJeff Kirsher 
2302f7917c00SJeff Kirsher /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2303f7917c00SJeff Kirsher #define NOMEM_INTR_DELAY 2500
2304f7917c00SJeff Kirsher 
2305f7917c00SJeff Kirsher /**
2306f7917c00SJeff Kirsher  *	process_responses - process responses from an SGE response queue
2307f7917c00SJeff Kirsher  *	@adap: the adapter
2308f7917c00SJeff Kirsher  *	@qs: the queue set to which the response queue belongs
2309f7917c00SJeff Kirsher  *	@budget: how many responses can be processed in this round
2310f7917c00SJeff Kirsher  *
2311f7917c00SJeff Kirsher  *	Process responses from an SGE response queue up to the supplied budget.
2312f7917c00SJeff Kirsher  *	Responses include received packets as well as credits and other events
2313f7917c00SJeff Kirsher  *	for the queues that belong to the response queue's queue set.
2314f7917c00SJeff Kirsher  *	A negative budget is effectively unlimited.
2315f7917c00SJeff Kirsher  *
2316f7917c00SJeff Kirsher  *	Additionally choose the interrupt holdoff time for the next interrupt
2317f7917c00SJeff Kirsher  *	on this queue.  If the system is under memory shortage use a fairly
2318f7917c00SJeff Kirsher  *	long delay to help recovery.
2319f7917c00SJeff Kirsher  */
2320f7917c00SJeff Kirsher static int process_responses(struct adapter *adap, struct sge_qset *qs,
2321f7917c00SJeff Kirsher 			     int budget)
2322f7917c00SJeff Kirsher {
2323f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2324f7917c00SJeff Kirsher 	struct rsp_desc *r = &q->desc[q->cidx];
2325f7917c00SJeff Kirsher 	int budget_left = budget;
2326f7917c00SJeff Kirsher 	unsigned int sleeping = 0;
2327f7917c00SJeff Kirsher 	struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2328f7917c00SJeff Kirsher 	int ngathered = 0;
2329f7917c00SJeff Kirsher 
2330f7917c00SJeff Kirsher 	q->next_holdoff = q->holdoff_tmr;
2331f7917c00SJeff Kirsher 
2332f7917c00SJeff Kirsher 	while (likely(budget_left && is_new_response(r, q))) {
2333f7917c00SJeff Kirsher 		int packet_complete, eth, ethpad = 2;
2334f7917c00SJeff Kirsher 		int lro = !!(qs->netdev->features & NETIF_F_GRO);
2335f7917c00SJeff Kirsher 		struct sk_buff *skb = NULL;
2336f7917c00SJeff Kirsher 		u32 len, flags;
2337f7917c00SJeff Kirsher 		__be32 rss_hi, rss_lo;
2338f7917c00SJeff Kirsher 
2339019be1cfSAlexander Duyck 		dma_rmb();
2340f7917c00SJeff Kirsher 		eth = r->rss_hdr.opcode == CPL_RX_PKT;
2341f7917c00SJeff Kirsher 		rss_hi = *(const __be32 *)r;
2342f7917c00SJeff Kirsher 		rss_lo = r->rss_hdr.rss_hash_val;
2343f7917c00SJeff Kirsher 		flags = ntohl(r->flags);
2344f7917c00SJeff Kirsher 
2345f7917c00SJeff Kirsher 		if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2346f7917c00SJeff Kirsher 			skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2347f7917c00SJeff Kirsher 			if (!skb)
2348f7917c00SJeff Kirsher 				goto no_mem;
2349f7917c00SJeff Kirsher 
2350de77b966Syuan linyu 			__skb_put_data(skb, r, AN_PKT_SIZE);
2351f7917c00SJeff Kirsher 			skb->data[0] = CPL_ASYNC_NOTIF;
2352f7917c00SJeff Kirsher 			rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2353f7917c00SJeff Kirsher 			q->async_notif++;
2354f7917c00SJeff Kirsher 		} else if (flags & F_RSPD_IMM_DATA_VALID) {
2355f7917c00SJeff Kirsher 			skb = get_imm_packet(r);
2356f7917c00SJeff Kirsher 			if (unlikely(!skb)) {
2357f7917c00SJeff Kirsher no_mem:
2358f7917c00SJeff Kirsher 				q->next_holdoff = NOMEM_INTR_DELAY;
2359f7917c00SJeff Kirsher 				q->nomem++;
2360f7917c00SJeff Kirsher 				/* consume one credit since we tried */
2361f7917c00SJeff Kirsher 				budget_left--;
2362f7917c00SJeff Kirsher 				break;
2363f7917c00SJeff Kirsher 			}
2364f7917c00SJeff Kirsher 			q->imm_data++;
2365f7917c00SJeff Kirsher 			ethpad = 0;
2366f7917c00SJeff Kirsher 		} else if ((len = ntohl(r->len_cq)) != 0) {
2367f7917c00SJeff Kirsher 			struct sge_fl *fl;
2368f7917c00SJeff Kirsher 
2369f7917c00SJeff Kirsher 			lro &= eth && is_eth_tcp(rss_hi);
2370f7917c00SJeff Kirsher 
2371f7917c00SJeff Kirsher 			fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2372f7917c00SJeff Kirsher 			if (fl->use_pages) {
2373f7917c00SJeff Kirsher 				void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2374f7917c00SJeff Kirsher 
2375f7917c00SJeff Kirsher 				prefetch(addr);
2376f7917c00SJeff Kirsher #if L1_CACHE_BYTES < 128
2377f7917c00SJeff Kirsher 				prefetch(addr + L1_CACHE_BYTES);
2378f7917c00SJeff Kirsher #endif
2379f7917c00SJeff Kirsher 				__refill_fl(adap, fl);
2380f7917c00SJeff Kirsher 				if (lro > 0) {
2381f7917c00SJeff Kirsher 					lro_add_page(adap, qs, fl,
2382f7917c00SJeff Kirsher 						     G_RSPD_LEN(len),
2383f7917c00SJeff Kirsher 						     flags & F_RSPD_EOP);
2384f7917c00SJeff Kirsher 					goto next_fl;
2385f7917c00SJeff Kirsher 				}
2386f7917c00SJeff Kirsher 
2387f7917c00SJeff Kirsher 				skb = get_packet_pg(adap, fl, q,
2388f7917c00SJeff Kirsher 						    G_RSPD_LEN(len),
2389f7917c00SJeff Kirsher 						    eth ?
2390f7917c00SJeff Kirsher 						    SGE_RX_DROP_THRES : 0);
2391f7917c00SJeff Kirsher 				q->pg_skb = skb;
2392f7917c00SJeff Kirsher 			} else
2393f7917c00SJeff Kirsher 				skb = get_packet(adap, fl, G_RSPD_LEN(len),
2394f7917c00SJeff Kirsher 						 eth ? SGE_RX_DROP_THRES : 0);
2395f7917c00SJeff Kirsher 			if (unlikely(!skb)) {
2396f7917c00SJeff Kirsher 				if (!eth)
2397f7917c00SJeff Kirsher 					goto no_mem;
2398f7917c00SJeff Kirsher 				q->rx_drops++;
2399f7917c00SJeff Kirsher 			} else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2400f7917c00SJeff Kirsher 				__skb_pull(skb, 2);
2401f7917c00SJeff Kirsher next_fl:
2402f7917c00SJeff Kirsher 			if (++fl->cidx == fl->size)
2403f7917c00SJeff Kirsher 				fl->cidx = 0;
2404f7917c00SJeff Kirsher 		} else
2405f7917c00SJeff Kirsher 			q->pure_rsps++;
2406f7917c00SJeff Kirsher 
2407f7917c00SJeff Kirsher 		if (flags & RSPD_CTRL_MASK) {
2408f7917c00SJeff Kirsher 			sleeping |= flags & RSPD_GTS_MASK;
2409f7917c00SJeff Kirsher 			handle_rsp_cntrl_info(qs, flags);
2410f7917c00SJeff Kirsher 		}
2411f7917c00SJeff Kirsher 
2412f7917c00SJeff Kirsher 		r++;
2413f7917c00SJeff Kirsher 		if (unlikely(++q->cidx == q->size)) {
2414f7917c00SJeff Kirsher 			q->cidx = 0;
2415f7917c00SJeff Kirsher 			q->gen ^= 1;
2416f7917c00SJeff Kirsher 			r = q->desc;
2417f7917c00SJeff Kirsher 		}
2418f7917c00SJeff Kirsher 		prefetch(r);
2419f7917c00SJeff Kirsher 
2420f7917c00SJeff Kirsher 		if (++q->credits >= (q->size / 4)) {
2421f7917c00SJeff Kirsher 			refill_rspq(adap, q, q->credits);
2422f7917c00SJeff Kirsher 			q->credits = 0;
2423f7917c00SJeff Kirsher 		}
2424f7917c00SJeff Kirsher 
2425f7917c00SJeff Kirsher 		packet_complete = flags &
2426f7917c00SJeff Kirsher 				  (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2427f7917c00SJeff Kirsher 				   F_RSPD_ASYNC_NOTIF);
2428f7917c00SJeff Kirsher 
2429f7917c00SJeff Kirsher 		if (skb != NULL && packet_complete) {
2430f7917c00SJeff Kirsher 			if (eth)
2431f7917c00SJeff Kirsher 				rx_eth(adap, q, skb, ethpad, lro);
2432f7917c00SJeff Kirsher 			else {
2433f7917c00SJeff Kirsher 				q->offload_pkts++;
2434f7917c00SJeff Kirsher 				/* Preserve the RSS info in csum & priority */
2435f7917c00SJeff Kirsher 				skb->csum = rss_hi;
2436f7917c00SJeff Kirsher 				skb->priority = rss_lo;
2437f7917c00SJeff Kirsher 				ngathered = rx_offload(&adap->tdev, q, skb,
2438f7917c00SJeff Kirsher 						       offload_skbs,
2439f7917c00SJeff Kirsher 						       ngathered);
2440f7917c00SJeff Kirsher 			}
2441f7917c00SJeff Kirsher 
2442f7917c00SJeff Kirsher 			if (flags & F_RSPD_EOP)
2443f7917c00SJeff Kirsher 				clear_rspq_bufstate(q);
2444f7917c00SJeff Kirsher 		}
2445f7917c00SJeff Kirsher 		--budget_left;
2446f7917c00SJeff Kirsher 	}
2447f7917c00SJeff Kirsher 
2448f7917c00SJeff Kirsher 	deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2449f7917c00SJeff Kirsher 
2450f7917c00SJeff Kirsher 	if (sleeping)
2451f7917c00SJeff Kirsher 		check_ring_db(adap, qs, sleeping);
2452f7917c00SJeff Kirsher 
2453f7917c00SJeff Kirsher 	smp_mb();		/* commit Tx queue .processed updates */
2454f7917c00SJeff Kirsher 	if (unlikely(qs->txq_stopped != 0))
2455f7917c00SJeff Kirsher 		restart_tx(qs);
2456f7917c00SJeff Kirsher 
2457f7917c00SJeff Kirsher 	budget -= budget_left;
2458f7917c00SJeff Kirsher 	return budget;
2459f7917c00SJeff Kirsher }
2460f7917c00SJeff Kirsher 
2461f7917c00SJeff Kirsher static inline int is_pure_response(const struct rsp_desc *r)
2462f7917c00SJeff Kirsher {
2463f7917c00SJeff Kirsher 	__be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2464f7917c00SJeff Kirsher 
2465f7917c00SJeff Kirsher 	return (n | r->len_cq) == 0;
2466f7917c00SJeff Kirsher }
2467f7917c00SJeff Kirsher 
2468f7917c00SJeff Kirsher /**
2469f7917c00SJeff Kirsher  *	napi_rx_handler - the NAPI handler for Rx processing
2470f7917c00SJeff Kirsher  *	@napi: the napi instance
2471f7917c00SJeff Kirsher  *	@budget: how many packets we can process in this round
2472f7917c00SJeff Kirsher  *
2473f7917c00SJeff Kirsher  *	Handler for new data events when using NAPI.
2474f7917c00SJeff Kirsher  */
2475f7917c00SJeff Kirsher static int napi_rx_handler(struct napi_struct *napi, int budget)
2476f7917c00SJeff Kirsher {
2477f7917c00SJeff Kirsher 	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2478f7917c00SJeff Kirsher 	struct adapter *adap = qs->adap;
2479f7917c00SJeff Kirsher 	int work_done = process_responses(adap, qs, budget);
2480f7917c00SJeff Kirsher 
2481f7917c00SJeff Kirsher 	if (likely(work_done < budget)) {
24826ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
2483f7917c00SJeff Kirsher 
2484f7917c00SJeff Kirsher 		/*
2485f7917c00SJeff Kirsher 		 * Because we don't atomically flush the following
2486f7917c00SJeff Kirsher 		 * write it is possible that in very rare cases it can
2487f7917c00SJeff Kirsher 		 * reach the device in a way that races with a new
2488f7917c00SJeff Kirsher 		 * response being written plus an error interrupt
2489f7917c00SJeff Kirsher 		 * causing the NAPI interrupt handler below to return
2490f7917c00SJeff Kirsher 		 * unhandled status to the OS.  To protect against
2491f7917c00SJeff Kirsher 		 * this would require flushing the write and doing
2492f7917c00SJeff Kirsher 		 * both the write and the flush with interrupts off.
2493f7917c00SJeff Kirsher 		 * Way too expensive and unjustifiable given the
2494f7917c00SJeff Kirsher 		 * rarity of the race.
2495f7917c00SJeff Kirsher 		 *
2496f7917c00SJeff Kirsher 		 * The race cannot happen at all with MSI-X.
2497f7917c00SJeff Kirsher 		 */
2498f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2499f7917c00SJeff Kirsher 			     V_NEWTIMER(qs->rspq.next_holdoff) |
2500f7917c00SJeff Kirsher 			     V_NEWINDEX(qs->rspq.cidx));
2501f7917c00SJeff Kirsher 	}
2502f7917c00SJeff Kirsher 	return work_done;
2503f7917c00SJeff Kirsher }
2504f7917c00SJeff Kirsher 
2505f7917c00SJeff Kirsher /*
2506f7917c00SJeff Kirsher  * Returns true if the device is already scheduled for polling.
2507f7917c00SJeff Kirsher  */
2508f7917c00SJeff Kirsher static inline int napi_is_scheduled(struct napi_struct *napi)
2509f7917c00SJeff Kirsher {
2510f7917c00SJeff Kirsher 	return test_bit(NAPI_STATE_SCHED, &napi->state);
2511f7917c00SJeff Kirsher }
2512f7917c00SJeff Kirsher 
2513f7917c00SJeff Kirsher /**
2514f7917c00SJeff Kirsher  *	process_pure_responses - process pure responses from a response queue
2515f7917c00SJeff Kirsher  *	@adap: the adapter
2516f7917c00SJeff Kirsher  *	@qs: the queue set owning the response queue
2517f7917c00SJeff Kirsher  *	@r: the first pure response to process
2518f7917c00SJeff Kirsher  *
2519f7917c00SJeff Kirsher  *	A simpler version of process_responses() that handles only pure (i.e.,
2520f7917c00SJeff Kirsher  *	non data-carrying) responses.  Such respones are too light-weight to
2521f7917c00SJeff Kirsher  *	justify calling a softirq under NAPI, so we handle them specially in
2522f7917c00SJeff Kirsher  *	the interrupt handler.  The function is called with a pointer to a
2523f7917c00SJeff Kirsher  *	response, which the caller must ensure is a valid pure response.
2524f7917c00SJeff Kirsher  *
2525f7917c00SJeff Kirsher  *	Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2526f7917c00SJeff Kirsher  */
2527f7917c00SJeff Kirsher static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2528f7917c00SJeff Kirsher 				  struct rsp_desc *r)
2529f7917c00SJeff Kirsher {
2530f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2531f7917c00SJeff Kirsher 	unsigned int sleeping = 0;
2532f7917c00SJeff Kirsher 
2533f7917c00SJeff Kirsher 	do {
2534f7917c00SJeff Kirsher 		u32 flags = ntohl(r->flags);
2535f7917c00SJeff Kirsher 
2536f7917c00SJeff Kirsher 		r++;
2537f7917c00SJeff Kirsher 		if (unlikely(++q->cidx == q->size)) {
2538f7917c00SJeff Kirsher 			q->cidx = 0;
2539f7917c00SJeff Kirsher 			q->gen ^= 1;
2540f7917c00SJeff Kirsher 			r = q->desc;
2541f7917c00SJeff Kirsher 		}
2542f7917c00SJeff Kirsher 		prefetch(r);
2543f7917c00SJeff Kirsher 
2544f7917c00SJeff Kirsher 		if (flags & RSPD_CTRL_MASK) {
2545f7917c00SJeff Kirsher 			sleeping |= flags & RSPD_GTS_MASK;
2546f7917c00SJeff Kirsher 			handle_rsp_cntrl_info(qs, flags);
2547f7917c00SJeff Kirsher 		}
2548f7917c00SJeff Kirsher 
2549f7917c00SJeff Kirsher 		q->pure_rsps++;
2550f7917c00SJeff Kirsher 		if (++q->credits >= (q->size / 4)) {
2551f7917c00SJeff Kirsher 			refill_rspq(adap, q, q->credits);
2552f7917c00SJeff Kirsher 			q->credits = 0;
2553f7917c00SJeff Kirsher 		}
2554f7917c00SJeff Kirsher 		if (!is_new_response(r, q))
2555f7917c00SJeff Kirsher 			break;
2556019be1cfSAlexander Duyck 		dma_rmb();
2557f7917c00SJeff Kirsher 	} while (is_pure_response(r));
2558f7917c00SJeff Kirsher 
2559f7917c00SJeff Kirsher 	if (sleeping)
2560f7917c00SJeff Kirsher 		check_ring_db(adap, qs, sleeping);
2561f7917c00SJeff Kirsher 
2562f7917c00SJeff Kirsher 	smp_mb();		/* commit Tx queue .processed updates */
2563f7917c00SJeff Kirsher 	if (unlikely(qs->txq_stopped != 0))
2564f7917c00SJeff Kirsher 		restart_tx(qs);
2565f7917c00SJeff Kirsher 
2566f7917c00SJeff Kirsher 	return is_new_response(r, q);
2567f7917c00SJeff Kirsher }
2568f7917c00SJeff Kirsher 
2569f7917c00SJeff Kirsher /**
2570f7917c00SJeff Kirsher  *	handle_responses - decide what to do with new responses in NAPI mode
2571f7917c00SJeff Kirsher  *	@adap: the adapter
2572f7917c00SJeff Kirsher  *	@q: the response queue
2573f7917c00SJeff Kirsher  *
2574f7917c00SJeff Kirsher  *	This is used by the NAPI interrupt handlers to decide what to do with
2575f7917c00SJeff Kirsher  *	new SGE responses.  If there are no new responses it returns -1.  If
2576f7917c00SJeff Kirsher  *	there are new responses and they are pure (i.e., non-data carrying)
2577f7917c00SJeff Kirsher  *	it handles them straight in hard interrupt context as they are very
2578f7917c00SJeff Kirsher  *	cheap and don't deliver any packets.  Finally, if there are any data
2579f7917c00SJeff Kirsher  *	signaling responses it schedules the NAPI handler.  Returns 1 if it
2580f7917c00SJeff Kirsher  *	schedules NAPI, 0 if all new responses were pure.
2581f7917c00SJeff Kirsher  *
2582f7917c00SJeff Kirsher  *	The caller must ascertain NAPI is not already running.
2583f7917c00SJeff Kirsher  */
2584f7917c00SJeff Kirsher static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2585f7917c00SJeff Kirsher {
2586f7917c00SJeff Kirsher 	struct sge_qset *qs = rspq_to_qset(q);
2587f7917c00SJeff Kirsher 	struct rsp_desc *r = &q->desc[q->cidx];
2588f7917c00SJeff Kirsher 
2589f7917c00SJeff Kirsher 	if (!is_new_response(r, q))
2590f7917c00SJeff Kirsher 		return -1;
2591019be1cfSAlexander Duyck 	dma_rmb();
2592f7917c00SJeff Kirsher 	if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2593f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2594f7917c00SJeff Kirsher 			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2595f7917c00SJeff Kirsher 		return 0;
2596f7917c00SJeff Kirsher 	}
2597f7917c00SJeff Kirsher 	napi_schedule(&qs->napi);
2598f7917c00SJeff Kirsher 	return 1;
2599f7917c00SJeff Kirsher }
2600f7917c00SJeff Kirsher 
2601f7917c00SJeff Kirsher /*
2602f7917c00SJeff Kirsher  * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2603f7917c00SJeff Kirsher  * (i.e., response queue serviced in hard interrupt).
2604f7917c00SJeff Kirsher  */
2605f7917c00SJeff Kirsher static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2606f7917c00SJeff Kirsher {
2607f7917c00SJeff Kirsher 	struct sge_qset *qs = cookie;
2608f7917c00SJeff Kirsher 	struct adapter *adap = qs->adap;
2609f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2610f7917c00SJeff Kirsher 
2611f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2612f7917c00SJeff Kirsher 	if (process_responses(adap, qs, -1) == 0)
2613f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2614f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2615f7917c00SJeff Kirsher 		     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2616f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2617f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2618f7917c00SJeff Kirsher }
2619f7917c00SJeff Kirsher 
2620f7917c00SJeff Kirsher /*
2621f7917c00SJeff Kirsher  * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2622f7917c00SJeff Kirsher  * (i.e., response queue serviced by NAPI polling).
2623f7917c00SJeff Kirsher  */
2624f7917c00SJeff Kirsher static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2625f7917c00SJeff Kirsher {
2626f7917c00SJeff Kirsher 	struct sge_qset *qs = cookie;
2627f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2628f7917c00SJeff Kirsher 
2629f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2630f7917c00SJeff Kirsher 
2631f7917c00SJeff Kirsher 	if (handle_responses(qs->adap, q) < 0)
2632f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2633f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2634f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2635f7917c00SJeff Kirsher }
2636f7917c00SJeff Kirsher 
2637f7917c00SJeff Kirsher /*
2638f7917c00SJeff Kirsher  * The non-NAPI MSI interrupt handler.  This needs to handle data events from
2639f7917c00SJeff Kirsher  * SGE response queues as well as error and other async events as they all use
2640f7917c00SJeff Kirsher  * the same MSI vector.  We use one SGE response queue per port in this mode
2641f7917c00SJeff Kirsher  * and protect all response queues with queue 0's lock.
2642f7917c00SJeff Kirsher  */
2643f7917c00SJeff Kirsher static irqreturn_t t3_intr_msi(int irq, void *cookie)
2644f7917c00SJeff Kirsher {
2645f7917c00SJeff Kirsher 	int new_packets = 0;
2646f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2647f7917c00SJeff Kirsher 	struct sge_rspq *q = &adap->sge.qs[0].rspq;
2648f7917c00SJeff Kirsher 
2649f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2650f7917c00SJeff Kirsher 
2651f7917c00SJeff Kirsher 	if (process_responses(adap, &adap->sge.qs[0], -1)) {
2652f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2653f7917c00SJeff Kirsher 			     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2654f7917c00SJeff Kirsher 		new_packets = 1;
2655f7917c00SJeff Kirsher 	}
2656f7917c00SJeff Kirsher 
2657f7917c00SJeff Kirsher 	if (adap->params.nports == 2 &&
2658f7917c00SJeff Kirsher 	    process_responses(adap, &adap->sge.qs[1], -1)) {
2659f7917c00SJeff Kirsher 		struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2660f7917c00SJeff Kirsher 
2661f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2662f7917c00SJeff Kirsher 			     V_NEWTIMER(q1->next_holdoff) |
2663f7917c00SJeff Kirsher 			     V_NEWINDEX(q1->cidx));
2664f7917c00SJeff Kirsher 		new_packets = 1;
2665f7917c00SJeff Kirsher 	}
2666f7917c00SJeff Kirsher 
2667f7917c00SJeff Kirsher 	if (!new_packets && t3_slow_intr_handler(adap) == 0)
2668f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2669f7917c00SJeff Kirsher 
2670f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2671f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2672f7917c00SJeff Kirsher }
2673f7917c00SJeff Kirsher 
2674f7917c00SJeff Kirsher static int rspq_check_napi(struct sge_qset *qs)
2675f7917c00SJeff Kirsher {
2676f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2677f7917c00SJeff Kirsher 
2678f7917c00SJeff Kirsher 	if (!napi_is_scheduled(&qs->napi) &&
2679f7917c00SJeff Kirsher 	    is_new_response(&q->desc[q->cidx], q)) {
2680f7917c00SJeff Kirsher 		napi_schedule(&qs->napi);
2681f7917c00SJeff Kirsher 		return 1;
2682f7917c00SJeff Kirsher 	}
2683f7917c00SJeff Kirsher 	return 0;
2684f7917c00SJeff Kirsher }
2685f7917c00SJeff Kirsher 
2686f7917c00SJeff Kirsher /*
2687f7917c00SJeff Kirsher  * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2688f7917c00SJeff Kirsher  * by NAPI polling).  Handles data events from SGE response queues as well as
2689f7917c00SJeff Kirsher  * error and other async events as they all use the same MSI vector.  We use
2690f7917c00SJeff Kirsher  * one SGE response queue per port in this mode and protect all response
2691f7917c00SJeff Kirsher  * queues with queue 0's lock.
2692f7917c00SJeff Kirsher  */
2693f7917c00SJeff Kirsher static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2694f7917c00SJeff Kirsher {
2695f7917c00SJeff Kirsher 	int new_packets;
2696f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2697f7917c00SJeff Kirsher 	struct sge_rspq *q = &adap->sge.qs[0].rspq;
2698f7917c00SJeff Kirsher 
2699f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2700f7917c00SJeff Kirsher 
2701f7917c00SJeff Kirsher 	new_packets = rspq_check_napi(&adap->sge.qs[0]);
2702f7917c00SJeff Kirsher 	if (adap->params.nports == 2)
2703f7917c00SJeff Kirsher 		new_packets += rspq_check_napi(&adap->sge.qs[1]);
2704f7917c00SJeff Kirsher 	if (!new_packets && t3_slow_intr_handler(adap) == 0)
2705f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2706f7917c00SJeff Kirsher 
2707f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2708f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2709f7917c00SJeff Kirsher }
2710f7917c00SJeff Kirsher 
2711f7917c00SJeff Kirsher /*
2712f7917c00SJeff Kirsher  * A helper function that processes responses and issues GTS.
2713f7917c00SJeff Kirsher  */
2714f7917c00SJeff Kirsher static inline int process_responses_gts(struct adapter *adap,
2715f7917c00SJeff Kirsher 					struct sge_rspq *rq)
2716f7917c00SJeff Kirsher {
2717f7917c00SJeff Kirsher 	int work;
2718f7917c00SJeff Kirsher 
2719f7917c00SJeff Kirsher 	work = process_responses(adap, rspq_to_qset(rq), -1);
2720f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2721f7917c00SJeff Kirsher 		     V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2722f7917c00SJeff Kirsher 	return work;
2723f7917c00SJeff Kirsher }
2724f7917c00SJeff Kirsher 
2725f7917c00SJeff Kirsher /*
2726f7917c00SJeff Kirsher  * The legacy INTx interrupt handler.  This needs to handle data events from
2727f7917c00SJeff Kirsher  * SGE response queues as well as error and other async events as they all use
2728f7917c00SJeff Kirsher  * the same interrupt pin.  We use one SGE response queue per port in this mode
2729f7917c00SJeff Kirsher  * and protect all response queues with queue 0's lock.
2730f7917c00SJeff Kirsher  */
2731f7917c00SJeff Kirsher static irqreturn_t t3_intr(int irq, void *cookie)
2732f7917c00SJeff Kirsher {
2733f7917c00SJeff Kirsher 	int work_done, w0, w1;
2734f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2735f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2736f7917c00SJeff Kirsher 	struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2737f7917c00SJeff Kirsher 
2738f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2739f7917c00SJeff Kirsher 
2740f7917c00SJeff Kirsher 	w0 = is_new_response(&q0->desc[q0->cidx], q0);
2741f7917c00SJeff Kirsher 	w1 = adap->params.nports == 2 &&
2742f7917c00SJeff Kirsher 	    is_new_response(&q1->desc[q1->cidx], q1);
2743f7917c00SJeff Kirsher 
2744f7917c00SJeff Kirsher 	if (likely(w0 | w1)) {
2745f7917c00SJeff Kirsher 		t3_write_reg(adap, A_PL_CLI, 0);
2746f7917c00SJeff Kirsher 		t3_read_reg(adap, A_PL_CLI);	/* flush */
2747f7917c00SJeff Kirsher 
2748f7917c00SJeff Kirsher 		if (likely(w0))
2749f7917c00SJeff Kirsher 			process_responses_gts(adap, q0);
2750f7917c00SJeff Kirsher 
2751f7917c00SJeff Kirsher 		if (w1)
2752f7917c00SJeff Kirsher 			process_responses_gts(adap, q1);
2753f7917c00SJeff Kirsher 
2754f7917c00SJeff Kirsher 		work_done = w0 | w1;
2755f7917c00SJeff Kirsher 	} else
2756f7917c00SJeff Kirsher 		work_done = t3_slow_intr_handler(adap);
2757f7917c00SJeff Kirsher 
2758f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2759f7917c00SJeff Kirsher 	return IRQ_RETVAL(work_done != 0);
2760f7917c00SJeff Kirsher }
2761f7917c00SJeff Kirsher 
2762f7917c00SJeff Kirsher /*
2763f7917c00SJeff Kirsher  * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2764f7917c00SJeff Kirsher  * Handles data events from SGE response queues as well as error and other
2765f7917c00SJeff Kirsher  * async events as they all use the same interrupt pin.  We use one SGE
2766f7917c00SJeff Kirsher  * response queue per port in this mode and protect all response queues with
2767f7917c00SJeff Kirsher  * queue 0's lock.
2768f7917c00SJeff Kirsher  */
2769f7917c00SJeff Kirsher static irqreturn_t t3b_intr(int irq, void *cookie)
2770f7917c00SJeff Kirsher {
2771f7917c00SJeff Kirsher 	u32 map;
2772f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2773f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2774f7917c00SJeff Kirsher 
2775f7917c00SJeff Kirsher 	t3_write_reg(adap, A_PL_CLI, 0);
2776f7917c00SJeff Kirsher 	map = t3_read_reg(adap, A_SG_DATA_INTR);
2777f7917c00SJeff Kirsher 
2778f7917c00SJeff Kirsher 	if (unlikely(!map))	/* shared interrupt, most likely */
2779f7917c00SJeff Kirsher 		return IRQ_NONE;
2780f7917c00SJeff Kirsher 
2781f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2782f7917c00SJeff Kirsher 
2783f7917c00SJeff Kirsher 	if (unlikely(map & F_ERRINTR))
2784f7917c00SJeff Kirsher 		t3_slow_intr_handler(adap);
2785f7917c00SJeff Kirsher 
2786f7917c00SJeff Kirsher 	if (likely(map & 1))
2787f7917c00SJeff Kirsher 		process_responses_gts(adap, q0);
2788f7917c00SJeff Kirsher 
2789f7917c00SJeff Kirsher 	if (map & 2)
2790f7917c00SJeff Kirsher 		process_responses_gts(adap, &adap->sge.qs[1].rspq);
2791f7917c00SJeff Kirsher 
2792f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2793f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2794f7917c00SJeff Kirsher }
2795f7917c00SJeff Kirsher 
2796f7917c00SJeff Kirsher /*
2797f7917c00SJeff Kirsher  * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2798f7917c00SJeff Kirsher  * Handles data events from SGE response queues as well as error and other
2799f7917c00SJeff Kirsher  * async events as they all use the same interrupt pin.  We use one SGE
2800f7917c00SJeff Kirsher  * response queue per port in this mode and protect all response queues with
2801f7917c00SJeff Kirsher  * queue 0's lock.
2802f7917c00SJeff Kirsher  */
2803f7917c00SJeff Kirsher static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2804f7917c00SJeff Kirsher {
2805f7917c00SJeff Kirsher 	u32 map;
2806f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2807f7917c00SJeff Kirsher 	struct sge_qset *qs0 = &adap->sge.qs[0];
2808f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &qs0->rspq;
2809f7917c00SJeff Kirsher 
2810f7917c00SJeff Kirsher 	t3_write_reg(adap, A_PL_CLI, 0);
2811f7917c00SJeff Kirsher 	map = t3_read_reg(adap, A_SG_DATA_INTR);
2812f7917c00SJeff Kirsher 
2813f7917c00SJeff Kirsher 	if (unlikely(!map))	/* shared interrupt, most likely */
2814f7917c00SJeff Kirsher 		return IRQ_NONE;
2815f7917c00SJeff Kirsher 
2816f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2817f7917c00SJeff Kirsher 
2818f7917c00SJeff Kirsher 	if (unlikely(map & F_ERRINTR))
2819f7917c00SJeff Kirsher 		t3_slow_intr_handler(adap);
2820f7917c00SJeff Kirsher 
2821f7917c00SJeff Kirsher 	if (likely(map & 1))
2822f7917c00SJeff Kirsher 		napi_schedule(&qs0->napi);
2823f7917c00SJeff Kirsher 
2824f7917c00SJeff Kirsher 	if (map & 2)
2825f7917c00SJeff Kirsher 		napi_schedule(&adap->sge.qs[1].napi);
2826f7917c00SJeff Kirsher 
2827f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2828f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2829f7917c00SJeff Kirsher }
2830f7917c00SJeff Kirsher 
2831f7917c00SJeff Kirsher /**
2832f7917c00SJeff Kirsher  *	t3_intr_handler - select the top-level interrupt handler
2833f7917c00SJeff Kirsher  *	@adap: the adapter
2834f7917c00SJeff Kirsher  *	@polling: whether using NAPI to service response queues
2835f7917c00SJeff Kirsher  *
2836f7917c00SJeff Kirsher  *	Selects the top-level interrupt handler based on the type of interrupts
2837f7917c00SJeff Kirsher  *	(MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2838f7917c00SJeff Kirsher  *	response queues.
2839f7917c00SJeff Kirsher  */
2840f7917c00SJeff Kirsher irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2841f7917c00SJeff Kirsher {
2842f7917c00SJeff Kirsher 	if (adap->flags & USING_MSIX)
2843f7917c00SJeff Kirsher 		return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2844f7917c00SJeff Kirsher 	if (adap->flags & USING_MSI)
2845f7917c00SJeff Kirsher 		return polling ? t3_intr_msi_napi : t3_intr_msi;
2846f7917c00SJeff Kirsher 	if (adap->params.rev > 0)
2847f7917c00SJeff Kirsher 		return polling ? t3b_intr_napi : t3b_intr;
2848f7917c00SJeff Kirsher 	return t3_intr;
2849f7917c00SJeff Kirsher }
2850f7917c00SJeff Kirsher 
2851f7917c00SJeff Kirsher #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2852f7917c00SJeff Kirsher 		    F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2853f7917c00SJeff Kirsher 		    V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2854f7917c00SJeff Kirsher 		    F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2855f7917c00SJeff Kirsher 		    F_HIRCQPARITYERROR)
2856f7917c00SJeff Kirsher #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2857f7917c00SJeff Kirsher #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2858f7917c00SJeff Kirsher 		      F_RSPQDISABLED)
2859f7917c00SJeff Kirsher 
2860f7917c00SJeff Kirsher /**
2861f7917c00SJeff Kirsher  *	t3_sge_err_intr_handler - SGE async event interrupt handler
2862f7917c00SJeff Kirsher  *	@adapter: the adapter
2863f7917c00SJeff Kirsher  *
2864f7917c00SJeff Kirsher  *	Interrupt handler for SGE asynchronous (non-data) events.
2865f7917c00SJeff Kirsher  */
2866f7917c00SJeff Kirsher void t3_sge_err_intr_handler(struct adapter *adapter)
2867f7917c00SJeff Kirsher {
2868f7917c00SJeff Kirsher 	unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2869f7917c00SJeff Kirsher 				 ~F_FLEMPTY;
2870f7917c00SJeff Kirsher 
2871f7917c00SJeff Kirsher 	if (status & SGE_PARERR)
2872f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2873f7917c00SJeff Kirsher 			 status & SGE_PARERR);
2874f7917c00SJeff Kirsher 	if (status & SGE_FRAMINGERR)
2875f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2876f7917c00SJeff Kirsher 			 status & SGE_FRAMINGERR);
2877f7917c00SJeff Kirsher 
2878f7917c00SJeff Kirsher 	if (status & F_RSPQCREDITOVERFOW)
2879f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE response queue credit overflow\n");
2880f7917c00SJeff Kirsher 
2881f7917c00SJeff Kirsher 	if (status & F_RSPQDISABLED) {
2882f7917c00SJeff Kirsher 		v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2883f7917c00SJeff Kirsher 
2884f7917c00SJeff Kirsher 		CH_ALERT(adapter,
2885f7917c00SJeff Kirsher 			 "packet delivered to disabled response queue "
2886f7917c00SJeff Kirsher 			 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2887f7917c00SJeff Kirsher 	}
2888f7917c00SJeff Kirsher 
2889f7917c00SJeff Kirsher 	if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2890f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_drop_task);
2891f7917c00SJeff Kirsher 
2892f7917c00SJeff Kirsher 	if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2893f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_full_task);
2894f7917c00SJeff Kirsher 
2895f7917c00SJeff Kirsher 	if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2896f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_empty_task);
2897f7917c00SJeff Kirsher 
2898f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2899f7917c00SJeff Kirsher 	if (status &  SGE_FATALERR)
2900f7917c00SJeff Kirsher 		t3_fatal_err(adapter);
2901f7917c00SJeff Kirsher }
2902f7917c00SJeff Kirsher 
2903f7917c00SJeff Kirsher /**
2904f7917c00SJeff Kirsher  *	sge_timer_tx - perform periodic maintenance of an SGE qset
2905f7917c00SJeff Kirsher  *	@data: the SGE queue set to maintain
2906f7917c00SJeff Kirsher  *
2907f7917c00SJeff Kirsher  *	Runs periodically from a timer to perform maintenance of an SGE queue
2908f7917c00SJeff Kirsher  *	set.  It performs two tasks:
2909f7917c00SJeff Kirsher  *
2910f7917c00SJeff Kirsher  *	Cleans up any completed Tx descriptors that may still be pending.
2911f7917c00SJeff Kirsher  *	Normal descriptor cleanup happens when new packets are added to a Tx
2912f7917c00SJeff Kirsher  *	queue so this timer is relatively infrequent and does any cleanup only
2913f7917c00SJeff Kirsher  *	if the Tx queue has not seen any new packets in a while.  We make a
2914f7917c00SJeff Kirsher  *	best effort attempt to reclaim descriptors, in that we don't wait
2915f7917c00SJeff Kirsher  *	around if we cannot get a queue's lock (which most likely is because
2916f7917c00SJeff Kirsher  *	someone else is queueing new packets and so will also handle the clean
2917f7917c00SJeff Kirsher  *	up).  Since control queues use immediate data exclusively we don't
2918f7917c00SJeff Kirsher  *	bother cleaning them up here.
2919f7917c00SJeff Kirsher  *
2920f7917c00SJeff Kirsher  */
29210e23daebSKees Cook static void sge_timer_tx(struct timer_list *t)
2922f7917c00SJeff Kirsher {
29230e23daebSKees Cook 	struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
2924f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2925f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
2926f7917c00SJeff Kirsher 	unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2927f7917c00SJeff Kirsher 	unsigned long next_period;
2928f7917c00SJeff Kirsher 
2929f7917c00SJeff Kirsher 	if (__netif_tx_trylock(qs->tx_q)) {
2930f7917c00SJeff Kirsher                 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2931f7917c00SJeff Kirsher                                                      TX_RECLAIM_TIMER_CHUNK);
2932f7917c00SJeff Kirsher 		__netif_tx_unlock(qs->tx_q);
2933f7917c00SJeff Kirsher 	}
2934f7917c00SJeff Kirsher 
2935f7917c00SJeff Kirsher 	if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2936f7917c00SJeff Kirsher 		tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2937f7917c00SJeff Kirsher 						     TX_RECLAIM_TIMER_CHUNK);
2938f7917c00SJeff Kirsher 		spin_unlock(&qs->txq[TXQ_OFLD].lock);
2939f7917c00SJeff Kirsher 	}
2940f7917c00SJeff Kirsher 
2941f7917c00SJeff Kirsher 	next_period = TX_RECLAIM_PERIOD >>
2942f7917c00SJeff Kirsher                       (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2943f7917c00SJeff Kirsher                       TX_RECLAIM_TIMER_CHUNK);
2944f7917c00SJeff Kirsher 	mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2945f7917c00SJeff Kirsher }
2946f7917c00SJeff Kirsher 
294749ce9c2cSBen Hutchings /**
2948f7917c00SJeff Kirsher  *	sge_timer_rx - perform periodic maintenance of an SGE qset
2949f7917c00SJeff Kirsher  *	@data: the SGE queue set to maintain
2950f7917c00SJeff Kirsher  *
2951f7917c00SJeff Kirsher  *	a) Replenishes Rx queues that have run out due to memory shortage.
2952f7917c00SJeff Kirsher  *	Normally new Rx buffers are added when existing ones are consumed but
2953f7917c00SJeff Kirsher  *	when out of memory a queue can become empty.  We try to add only a few
2954f7917c00SJeff Kirsher  *	buffers here, the queue will be replenished fully as these new buffers
2955f7917c00SJeff Kirsher  *	are used up if memory shortage has subsided.
2956f7917c00SJeff Kirsher  *
2957f7917c00SJeff Kirsher  *	b) Return coalesced response queue credits in case a response queue is
2958f7917c00SJeff Kirsher  *	starved.
2959f7917c00SJeff Kirsher  *
2960f7917c00SJeff Kirsher  */
29610e23daebSKees Cook static void sge_timer_rx(struct timer_list *t)
2962f7917c00SJeff Kirsher {
2963f7917c00SJeff Kirsher 	spinlock_t *lock;
29640e23daebSKees Cook 	struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
2965f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2966f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
2967f7917c00SJeff Kirsher 	u32 status;
2968f7917c00SJeff Kirsher 
2969f7917c00SJeff Kirsher 	lock = adap->params.rev > 0 ?
2970f7917c00SJeff Kirsher 	       &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2971f7917c00SJeff Kirsher 
2972f7917c00SJeff Kirsher 	if (!spin_trylock_irq(lock))
2973f7917c00SJeff Kirsher 		goto out;
2974f7917c00SJeff Kirsher 
2975f7917c00SJeff Kirsher 	if (napi_is_scheduled(&qs->napi))
2976f7917c00SJeff Kirsher 		goto unlock;
2977f7917c00SJeff Kirsher 
2978f7917c00SJeff Kirsher 	if (adap->params.rev < 4) {
2979f7917c00SJeff Kirsher 		status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2980f7917c00SJeff Kirsher 
2981f7917c00SJeff Kirsher 		if (status & (1 << qs->rspq.cntxt_id)) {
2982f7917c00SJeff Kirsher 			qs->rspq.starved++;
2983f7917c00SJeff Kirsher 			if (qs->rspq.credits) {
2984f7917c00SJeff Kirsher 				qs->rspq.credits--;
2985f7917c00SJeff Kirsher 				refill_rspq(adap, &qs->rspq, 1);
2986f7917c00SJeff Kirsher 				qs->rspq.restarted++;
2987f7917c00SJeff Kirsher 				t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2988f7917c00SJeff Kirsher 					     1 << qs->rspq.cntxt_id);
2989f7917c00SJeff Kirsher 			}
2990f7917c00SJeff Kirsher 		}
2991f7917c00SJeff Kirsher 	}
2992f7917c00SJeff Kirsher 
2993f7917c00SJeff Kirsher 	if (qs->fl[0].credits < qs->fl[0].size)
2994f7917c00SJeff Kirsher 		__refill_fl(adap, &qs->fl[0]);
2995f7917c00SJeff Kirsher 	if (qs->fl[1].credits < qs->fl[1].size)
2996f7917c00SJeff Kirsher 		__refill_fl(adap, &qs->fl[1]);
2997f7917c00SJeff Kirsher 
2998f7917c00SJeff Kirsher unlock:
2999f7917c00SJeff Kirsher 	spin_unlock_irq(lock);
3000f7917c00SJeff Kirsher out:
3001f7917c00SJeff Kirsher 	mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3002f7917c00SJeff Kirsher }
3003f7917c00SJeff Kirsher 
3004f7917c00SJeff Kirsher /**
3005f7917c00SJeff Kirsher  *	t3_update_qset_coalesce - update coalescing settings for a queue set
3006f7917c00SJeff Kirsher  *	@qs: the SGE queue set
3007f7917c00SJeff Kirsher  *	@p: new queue set parameters
3008f7917c00SJeff Kirsher  *
3009f7917c00SJeff Kirsher  *	Update the coalescing settings for an SGE queue set.  Nothing is done
3010f7917c00SJeff Kirsher  *	if the queue set is not initialized yet.
3011f7917c00SJeff Kirsher  */
3012f7917c00SJeff Kirsher void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
3013f7917c00SJeff Kirsher {
3014f7917c00SJeff Kirsher 	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
3015f7917c00SJeff Kirsher 	qs->rspq.polling = p->polling;
3016f7917c00SJeff Kirsher 	qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
3017f7917c00SJeff Kirsher }
3018f7917c00SJeff Kirsher 
3019f7917c00SJeff Kirsher /**
3020f7917c00SJeff Kirsher  *	t3_sge_alloc_qset - initialize an SGE queue set
3021f7917c00SJeff Kirsher  *	@adapter: the adapter
3022f7917c00SJeff Kirsher  *	@id: the queue set id
3023f7917c00SJeff Kirsher  *	@nports: how many Ethernet ports will be using this queue set
3024f7917c00SJeff Kirsher  *	@irq_vec_idx: the IRQ vector index for response queue interrupts
3025f7917c00SJeff Kirsher  *	@p: configuration parameters for this queue set
3026f7917c00SJeff Kirsher  *	@ntxq: number of Tx queues for the queue set
3027f7917c00SJeff Kirsher  *	@netdev: net device associated with this queue set
3028f7917c00SJeff Kirsher  *	@netdevq: net device TX queue associated with this queue set
3029f7917c00SJeff Kirsher  *
3030f7917c00SJeff Kirsher  *	Allocate resources and initialize an SGE queue set.  A queue set
3031f7917c00SJeff Kirsher  *	comprises a response queue, two Rx free-buffer queues, and up to 3
3032f7917c00SJeff Kirsher  *	Tx queues.  The Tx queues are assigned roles in the order Ethernet
3033f7917c00SJeff Kirsher  *	queue, offload queue, and control queue.
3034f7917c00SJeff Kirsher  */
3035f7917c00SJeff Kirsher int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3036f7917c00SJeff Kirsher 		      int irq_vec_idx, const struct qset_params *p,
3037f7917c00SJeff Kirsher 		      int ntxq, struct net_device *dev,
3038f7917c00SJeff Kirsher 		      struct netdev_queue *netdevq)
3039f7917c00SJeff Kirsher {
3040f7917c00SJeff Kirsher 	int i, avail, ret = -ENOMEM;
3041f7917c00SJeff Kirsher 	struct sge_qset *q = &adapter->sge.qs[id];
3042f7917c00SJeff Kirsher 
3043f7917c00SJeff Kirsher 	init_qset_cntxt(q, id);
30440e23daebSKees Cook 	timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0);
30450e23daebSKees Cook 	timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0);
3046f7917c00SJeff Kirsher 
3047f7917c00SJeff Kirsher 	q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
3048f7917c00SJeff Kirsher 				   sizeof(struct rx_desc),
3049f7917c00SJeff Kirsher 				   sizeof(struct rx_sw_desc),
3050f7917c00SJeff Kirsher 				   &q->fl[0].phys_addr, &q->fl[0].sdesc);
3051f7917c00SJeff Kirsher 	if (!q->fl[0].desc)
3052f7917c00SJeff Kirsher 		goto err;
3053f7917c00SJeff Kirsher 
3054f7917c00SJeff Kirsher 	q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
3055f7917c00SJeff Kirsher 				   sizeof(struct rx_desc),
3056f7917c00SJeff Kirsher 				   sizeof(struct rx_sw_desc),
3057f7917c00SJeff Kirsher 				   &q->fl[1].phys_addr, &q->fl[1].sdesc);
3058f7917c00SJeff Kirsher 	if (!q->fl[1].desc)
3059f7917c00SJeff Kirsher 		goto err;
3060f7917c00SJeff Kirsher 
3061f7917c00SJeff Kirsher 	q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
3062f7917c00SJeff Kirsher 				  sizeof(struct rsp_desc), 0,
3063f7917c00SJeff Kirsher 				  &q->rspq.phys_addr, NULL);
3064f7917c00SJeff Kirsher 	if (!q->rspq.desc)
3065f7917c00SJeff Kirsher 		goto err;
3066f7917c00SJeff Kirsher 
3067f7917c00SJeff Kirsher 	for (i = 0; i < ntxq; ++i) {
3068f7917c00SJeff Kirsher 		/*
3069f7917c00SJeff Kirsher 		 * The control queue always uses immediate data so does not
3070f7917c00SJeff Kirsher 		 * need to keep track of any sk_buffs.
3071f7917c00SJeff Kirsher 		 */
3072f7917c00SJeff Kirsher 		size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3073f7917c00SJeff Kirsher 
3074f7917c00SJeff Kirsher 		q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3075f7917c00SJeff Kirsher 					    sizeof(struct tx_desc), sz,
3076f7917c00SJeff Kirsher 					    &q->txq[i].phys_addr,
3077f7917c00SJeff Kirsher 					    &q->txq[i].sdesc);
3078f7917c00SJeff Kirsher 		if (!q->txq[i].desc)
3079f7917c00SJeff Kirsher 			goto err;
3080f7917c00SJeff Kirsher 
3081f7917c00SJeff Kirsher 		q->txq[i].gen = 1;
3082f7917c00SJeff Kirsher 		q->txq[i].size = p->txq_size[i];
3083f7917c00SJeff Kirsher 		spin_lock_init(&q->txq[i].lock);
3084f7917c00SJeff Kirsher 		skb_queue_head_init(&q->txq[i].sendq);
3085f7917c00SJeff Kirsher 	}
3086f7917c00SJeff Kirsher 
3087f7917c00SJeff Kirsher 	tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
3088f7917c00SJeff Kirsher 		     (unsigned long)q);
3089f7917c00SJeff Kirsher 	tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
3090f7917c00SJeff Kirsher 		     (unsigned long)q);
3091f7917c00SJeff Kirsher 
3092f7917c00SJeff Kirsher 	q->fl[0].gen = q->fl[1].gen = 1;
3093f7917c00SJeff Kirsher 	q->fl[0].size = p->fl_size;
3094f7917c00SJeff Kirsher 	q->fl[1].size = p->jumbo_size;
3095f7917c00SJeff Kirsher 
3096f7917c00SJeff Kirsher 	q->rspq.gen = 1;
3097f7917c00SJeff Kirsher 	q->rspq.size = p->rspq_size;
3098f7917c00SJeff Kirsher 	spin_lock_init(&q->rspq.lock);
3099f7917c00SJeff Kirsher 	skb_queue_head_init(&q->rspq.rx_queue);
3100f7917c00SJeff Kirsher 
3101f7917c00SJeff Kirsher 	q->txq[TXQ_ETH].stop_thres = nports *
3102f7917c00SJeff Kirsher 	    flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3103f7917c00SJeff Kirsher 
3104f7917c00SJeff Kirsher #if FL0_PG_CHUNK_SIZE > 0
3105f7917c00SJeff Kirsher 	q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3106f7917c00SJeff Kirsher #else
3107f7917c00SJeff Kirsher 	q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3108f7917c00SJeff Kirsher #endif
3109f7917c00SJeff Kirsher #if FL1_PG_CHUNK_SIZE > 0
3110f7917c00SJeff Kirsher 	q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3111f7917c00SJeff Kirsher #else
3112f7917c00SJeff Kirsher 	q->fl[1].buf_size = is_offload(adapter) ?
3113f7917c00SJeff Kirsher 		(16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3114f7917c00SJeff Kirsher 		MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3115f7917c00SJeff Kirsher #endif
3116f7917c00SJeff Kirsher 
3117f7917c00SJeff Kirsher 	q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3118f7917c00SJeff Kirsher 	q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3119f7917c00SJeff Kirsher 	q->fl[0].order = FL0_PG_ORDER;
3120f7917c00SJeff Kirsher 	q->fl[1].order = FL1_PG_ORDER;
3121f7917c00SJeff Kirsher 	q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3122f7917c00SJeff Kirsher 	q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3123f7917c00SJeff Kirsher 
3124f7917c00SJeff Kirsher 	spin_lock_irq(&adapter->sge.reg_lock);
3125f7917c00SJeff Kirsher 
3126f7917c00SJeff Kirsher 	/* FL threshold comparison uses < */
3127f7917c00SJeff Kirsher 	ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3128f7917c00SJeff Kirsher 				   q->rspq.phys_addr, q->rspq.size,
3129f7917c00SJeff Kirsher 				   q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3130f7917c00SJeff Kirsher 	if (ret)
3131f7917c00SJeff Kirsher 		goto err_unlock;
3132f7917c00SJeff Kirsher 
3133f7917c00SJeff Kirsher 	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3134f7917c00SJeff Kirsher 		ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3135f7917c00SJeff Kirsher 					  q->fl[i].phys_addr, q->fl[i].size,
3136f7917c00SJeff Kirsher 					  q->fl[i].buf_size - SGE_PG_RSVD,
3137f7917c00SJeff Kirsher 					  p->cong_thres, 1, 0);
3138f7917c00SJeff Kirsher 		if (ret)
3139f7917c00SJeff Kirsher 			goto err_unlock;
3140f7917c00SJeff Kirsher 	}
3141f7917c00SJeff Kirsher 
3142f7917c00SJeff Kirsher 	ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3143f7917c00SJeff Kirsher 				 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3144f7917c00SJeff Kirsher 				 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3145f7917c00SJeff Kirsher 				 1, 0);
3146f7917c00SJeff Kirsher 	if (ret)
3147f7917c00SJeff Kirsher 		goto err_unlock;
3148f7917c00SJeff Kirsher 
3149f7917c00SJeff Kirsher 	if (ntxq > 1) {
3150f7917c00SJeff Kirsher 		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3151f7917c00SJeff Kirsher 					 USE_GTS, SGE_CNTXT_OFLD, id,
3152f7917c00SJeff Kirsher 					 q->txq[TXQ_OFLD].phys_addr,
3153f7917c00SJeff Kirsher 					 q->txq[TXQ_OFLD].size, 0, 1, 0);
3154f7917c00SJeff Kirsher 		if (ret)
3155f7917c00SJeff Kirsher 			goto err_unlock;
3156f7917c00SJeff Kirsher 	}
3157f7917c00SJeff Kirsher 
3158f7917c00SJeff Kirsher 	if (ntxq > 2) {
3159f7917c00SJeff Kirsher 		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3160f7917c00SJeff Kirsher 					 SGE_CNTXT_CTRL, id,
3161f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].phys_addr,
3162f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].size,
3163f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].token, 1, 0);
3164f7917c00SJeff Kirsher 		if (ret)
3165f7917c00SJeff Kirsher 			goto err_unlock;
3166f7917c00SJeff Kirsher 	}
3167f7917c00SJeff Kirsher 
3168f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->sge.reg_lock);
3169f7917c00SJeff Kirsher 
3170f7917c00SJeff Kirsher 	q->adap = adapter;
3171f7917c00SJeff Kirsher 	q->netdev = dev;
3172f7917c00SJeff Kirsher 	q->tx_q = netdevq;
3173f7917c00SJeff Kirsher 	t3_update_qset_coalesce(q, p);
3174f7917c00SJeff Kirsher 
3175f7917c00SJeff Kirsher 	avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3176f7917c00SJeff Kirsher 			  GFP_KERNEL | __GFP_COMP);
3177f7917c00SJeff Kirsher 	if (!avail) {
3178f7917c00SJeff Kirsher 		CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3179f7917c00SJeff Kirsher 		goto err;
3180f7917c00SJeff Kirsher 	}
3181f7917c00SJeff Kirsher 	if (avail < q->fl[0].size)
3182f7917c00SJeff Kirsher 		CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3183f7917c00SJeff Kirsher 			avail);
3184f7917c00SJeff Kirsher 
3185f7917c00SJeff Kirsher 	avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3186f7917c00SJeff Kirsher 			  GFP_KERNEL | __GFP_COMP);
3187f7917c00SJeff Kirsher 	if (avail < q->fl[1].size)
3188f7917c00SJeff Kirsher 		CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3189f7917c00SJeff Kirsher 			avail);
3190f7917c00SJeff Kirsher 	refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3191f7917c00SJeff Kirsher 
3192f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3193f7917c00SJeff Kirsher 		     V_NEWTIMER(q->rspq.holdoff_tmr));
3194f7917c00SJeff Kirsher 
3195f7917c00SJeff Kirsher 	return 0;
3196f7917c00SJeff Kirsher 
3197f7917c00SJeff Kirsher err_unlock:
3198f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->sge.reg_lock);
3199f7917c00SJeff Kirsher err:
3200f7917c00SJeff Kirsher 	t3_free_qset(adapter, q);
3201f7917c00SJeff Kirsher 	return ret;
3202f7917c00SJeff Kirsher }
3203f7917c00SJeff Kirsher 
3204f7917c00SJeff Kirsher /**
3205f7917c00SJeff Kirsher  *      t3_start_sge_timers - start SGE timer call backs
3206f7917c00SJeff Kirsher  *      @adap: the adapter
3207f7917c00SJeff Kirsher  *
3208f7917c00SJeff Kirsher  *      Starts each SGE queue set's timer call back
3209f7917c00SJeff Kirsher  */
3210f7917c00SJeff Kirsher void t3_start_sge_timers(struct adapter *adap)
3211f7917c00SJeff Kirsher {
3212f7917c00SJeff Kirsher 	int i;
3213f7917c00SJeff Kirsher 
3214f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3215f7917c00SJeff Kirsher 		struct sge_qset *q = &adap->sge.qs[i];
3216f7917c00SJeff Kirsher 
3217f7917c00SJeff Kirsher 		if (q->tx_reclaim_timer.function)
32182acc0abcSColin Ian King 			mod_timer(&q->tx_reclaim_timer,
32192acc0abcSColin Ian King 				  jiffies + TX_RECLAIM_PERIOD);
3220f7917c00SJeff Kirsher 
3221f7917c00SJeff Kirsher 		if (q->rx_reclaim_timer.function)
32222acc0abcSColin Ian King 			mod_timer(&q->rx_reclaim_timer,
32232acc0abcSColin Ian King 				  jiffies + RX_RECLAIM_PERIOD);
3224f7917c00SJeff Kirsher 	}
3225f7917c00SJeff Kirsher }
3226f7917c00SJeff Kirsher 
3227f7917c00SJeff Kirsher /**
3228f7917c00SJeff Kirsher  *	t3_stop_sge_timers - stop SGE timer call backs
3229f7917c00SJeff Kirsher  *	@adap: the adapter
3230f7917c00SJeff Kirsher  *
3231f7917c00SJeff Kirsher  *	Stops each SGE queue set's timer call back
3232f7917c00SJeff Kirsher  */
3233f7917c00SJeff Kirsher void t3_stop_sge_timers(struct adapter *adap)
3234f7917c00SJeff Kirsher {
3235f7917c00SJeff Kirsher 	int i;
3236f7917c00SJeff Kirsher 
3237f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3238f7917c00SJeff Kirsher 		struct sge_qset *q = &adap->sge.qs[i];
3239f7917c00SJeff Kirsher 
3240f7917c00SJeff Kirsher 		if (q->tx_reclaim_timer.function)
3241f7917c00SJeff Kirsher 			del_timer_sync(&q->tx_reclaim_timer);
3242f7917c00SJeff Kirsher 		if (q->rx_reclaim_timer.function)
3243f7917c00SJeff Kirsher 			del_timer_sync(&q->rx_reclaim_timer);
3244f7917c00SJeff Kirsher 	}
3245f7917c00SJeff Kirsher }
3246f7917c00SJeff Kirsher 
3247f7917c00SJeff Kirsher /**
3248f7917c00SJeff Kirsher  *	t3_free_sge_resources - free SGE resources
3249f7917c00SJeff Kirsher  *	@adap: the adapter
3250f7917c00SJeff Kirsher  *
3251f7917c00SJeff Kirsher  *	Frees resources used by the SGE queue sets.
3252f7917c00SJeff Kirsher  */
3253f7917c00SJeff Kirsher void t3_free_sge_resources(struct adapter *adap)
3254f7917c00SJeff Kirsher {
3255f7917c00SJeff Kirsher 	int i;
3256f7917c00SJeff Kirsher 
3257f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i)
3258f7917c00SJeff Kirsher 		t3_free_qset(adap, &adap->sge.qs[i]);
3259f7917c00SJeff Kirsher }
3260f7917c00SJeff Kirsher 
3261f7917c00SJeff Kirsher /**
3262f7917c00SJeff Kirsher  *	t3_sge_start - enable SGE
3263f7917c00SJeff Kirsher  *	@adap: the adapter
3264f7917c00SJeff Kirsher  *
3265f7917c00SJeff Kirsher  *	Enables the SGE for DMAs.  This is the last step in starting packet
3266f7917c00SJeff Kirsher  *	transfers.
3267f7917c00SJeff Kirsher  */
3268f7917c00SJeff Kirsher void t3_sge_start(struct adapter *adap)
3269f7917c00SJeff Kirsher {
3270f7917c00SJeff Kirsher 	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3271f7917c00SJeff Kirsher }
3272f7917c00SJeff Kirsher 
3273f7917c00SJeff Kirsher /**
3274f7917c00SJeff Kirsher  *	t3_sge_stop - disable SGE operation
3275f7917c00SJeff Kirsher  *	@adap: the adapter
3276f7917c00SJeff Kirsher  *
3277f7917c00SJeff Kirsher  *	Disables the DMA engine.  This can be called in emeregencies (e.g.,
3278f7917c00SJeff Kirsher  *	from error interrupts) or from normal process context.  In the latter
3279f7917c00SJeff Kirsher  *	case it also disables any pending queue restart tasklets.  Note that
3280f7917c00SJeff Kirsher  *	if it is called in interrupt context it cannot disable the restart
3281f7917c00SJeff Kirsher  *	tasklets as it cannot wait, however the tasklets will have no effect
3282f7917c00SJeff Kirsher  *	since the doorbells are disabled and the driver will call this again
3283f7917c00SJeff Kirsher  *	later from process context, at which time the tasklets will be stopped
3284f7917c00SJeff Kirsher  *	if they are still running.
3285f7917c00SJeff Kirsher  */
3286f7917c00SJeff Kirsher void t3_sge_stop(struct adapter *adap)
3287f7917c00SJeff Kirsher {
3288f7917c00SJeff Kirsher 	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3289f7917c00SJeff Kirsher 	if (!in_interrupt()) {
3290f7917c00SJeff Kirsher 		int i;
3291f7917c00SJeff Kirsher 
3292f7917c00SJeff Kirsher 		for (i = 0; i < SGE_QSETS; ++i) {
3293f7917c00SJeff Kirsher 			struct sge_qset *qs = &adap->sge.qs[i];
3294f7917c00SJeff Kirsher 
3295f7917c00SJeff Kirsher 			tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3296f7917c00SJeff Kirsher 			tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3297f7917c00SJeff Kirsher 		}
3298f7917c00SJeff Kirsher 	}
3299f7917c00SJeff Kirsher }
3300f7917c00SJeff Kirsher 
3301f7917c00SJeff Kirsher /**
3302f7917c00SJeff Kirsher  *	t3_sge_init - initialize SGE
3303f7917c00SJeff Kirsher  *	@adap: the adapter
3304f7917c00SJeff Kirsher  *	@p: the SGE parameters
3305f7917c00SJeff Kirsher  *
3306f7917c00SJeff Kirsher  *	Performs SGE initialization needed every time after a chip reset.
3307f7917c00SJeff Kirsher  *	We do not initialize any of the queue sets here, instead the driver
3308f7917c00SJeff Kirsher  *	top-level must request those individually.  We also do not enable DMA
3309f7917c00SJeff Kirsher  *	here, that should be done after the queues have been set up.
3310f7917c00SJeff Kirsher  */
3311f7917c00SJeff Kirsher void t3_sge_init(struct adapter *adap, struct sge_params *p)
3312f7917c00SJeff Kirsher {
3313f7917c00SJeff Kirsher 	unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3314f7917c00SJeff Kirsher 
3315f7917c00SJeff Kirsher 	ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3316f7917c00SJeff Kirsher 	    F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3317f7917c00SJeff Kirsher 	    V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3318f7917c00SJeff Kirsher 	    V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3319f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 1
3320f7917c00SJeff Kirsher 	ctrl |= F_EGRGENCTRL;
3321f7917c00SJeff Kirsher #endif
3322f7917c00SJeff Kirsher 	if (adap->params.rev > 0) {
3323f7917c00SJeff Kirsher 		if (!(adap->flags & (USING_MSIX | USING_MSI)))
3324f7917c00SJeff Kirsher 			ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3325f7917c00SJeff Kirsher 	}
3326f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_CONTROL, ctrl);
3327f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3328f7917c00SJeff Kirsher 		     V_LORCQDRBTHRSH(512));
3329f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3330f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3331f7917c00SJeff Kirsher 		     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3332f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3333f7917c00SJeff Kirsher 		     adap->params.rev < T3_REV_C ? 1000 : 500);
3334f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3335f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3336f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3337f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3338f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3339f7917c00SJeff Kirsher }
3340f7917c00SJeff Kirsher 
3341f7917c00SJeff Kirsher /**
3342f7917c00SJeff Kirsher  *	t3_sge_prep - one-time SGE initialization
3343f7917c00SJeff Kirsher  *	@adap: the associated adapter
3344f7917c00SJeff Kirsher  *	@p: SGE parameters
3345f7917c00SJeff Kirsher  *
3346f7917c00SJeff Kirsher  *	Performs one-time initialization of SGE SW state.  Includes determining
3347f7917c00SJeff Kirsher  *	defaults for the assorted SGE parameters, which admins can change until
3348f7917c00SJeff Kirsher  *	they are used to initialize the SGE.
3349f7917c00SJeff Kirsher  */
3350f7917c00SJeff Kirsher void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3351f7917c00SJeff Kirsher {
3352f7917c00SJeff Kirsher 	int i;
3353f7917c00SJeff Kirsher 
3354f7917c00SJeff Kirsher 	p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3355f7917c00SJeff Kirsher 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3356f7917c00SJeff Kirsher 
3357f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3358f7917c00SJeff Kirsher 		struct qset_params *q = p->qset + i;
3359f7917c00SJeff Kirsher 
3360f7917c00SJeff Kirsher 		q->polling = adap->params.rev > 0;
3361f7917c00SJeff Kirsher 		q->coalesce_usecs = 5;
3362f7917c00SJeff Kirsher 		q->rspq_size = 1024;
3363f7917c00SJeff Kirsher 		q->fl_size = 1024;
3364f7917c00SJeff Kirsher  		q->jumbo_size = 512;
3365f7917c00SJeff Kirsher 		q->txq_size[TXQ_ETH] = 1024;
3366f7917c00SJeff Kirsher 		q->txq_size[TXQ_OFLD] = 1024;
3367f7917c00SJeff Kirsher 		q->txq_size[TXQ_CTRL] = 256;
3368f7917c00SJeff Kirsher 		q->cong_thres = 0;
3369f7917c00SJeff Kirsher 	}
3370f7917c00SJeff Kirsher 
3371f7917c00SJeff Kirsher 	spin_lock_init(&adap->sge.reg_lock);
3372f7917c00SJeff Kirsher }
3373