1f7917c00SJeff Kirsher /*
2f7917c00SJeff Kirsher  * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3f7917c00SJeff Kirsher  *
4f7917c00SJeff Kirsher  * This software is available to you under a choice of one of two
5f7917c00SJeff Kirsher  * licenses.  You may choose to be licensed under the terms of the GNU
6f7917c00SJeff Kirsher  * General Public License (GPL) Version 2, available from the file
7f7917c00SJeff Kirsher  * COPYING in the main directory of this source tree, or the
8f7917c00SJeff Kirsher  * OpenIB.org BSD license below:
9f7917c00SJeff Kirsher  *
10f7917c00SJeff Kirsher  *     Redistribution and use in source and binary forms, with or
11f7917c00SJeff Kirsher  *     without modification, are permitted provided that the following
12f7917c00SJeff Kirsher  *     conditions are met:
13f7917c00SJeff Kirsher  *
14f7917c00SJeff Kirsher  *      - Redistributions of source code must retain the above
15f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
16f7917c00SJeff Kirsher  *        disclaimer.
17f7917c00SJeff Kirsher  *
18f7917c00SJeff Kirsher  *      - Redistributions in binary form must reproduce the above
19f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
20f7917c00SJeff Kirsher  *        disclaimer in the documentation and/or other materials
21f7917c00SJeff Kirsher  *        provided with the distribution.
22f7917c00SJeff Kirsher  *
23f7917c00SJeff Kirsher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24f7917c00SJeff Kirsher  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25f7917c00SJeff Kirsher  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26f7917c00SJeff Kirsher  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27f7917c00SJeff Kirsher  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28f7917c00SJeff Kirsher  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29f7917c00SJeff Kirsher  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30f7917c00SJeff Kirsher  * SOFTWARE.
31f7917c00SJeff Kirsher  */
32f7917c00SJeff Kirsher #include <linux/skbuff.h>
33f7917c00SJeff Kirsher #include <linux/netdevice.h>
34f7917c00SJeff Kirsher #include <linux/etherdevice.h>
35f7917c00SJeff Kirsher #include <linux/if_vlan.h>
36f7917c00SJeff Kirsher #include <linux/ip.h>
37f7917c00SJeff Kirsher #include <linux/tcp.h>
38f7917c00SJeff Kirsher #include <linux/dma-mapping.h>
39f7917c00SJeff Kirsher #include <linux/slab.h>
40f7917c00SJeff Kirsher #include <linux/prefetch.h>
41f7917c00SJeff Kirsher #include <net/arp.h>
42f7917c00SJeff Kirsher #include "common.h"
43f7917c00SJeff Kirsher #include "regs.h"
44f7917c00SJeff Kirsher #include "sge_defs.h"
45f7917c00SJeff Kirsher #include "t3_cpl.h"
46f7917c00SJeff Kirsher #include "firmware_exports.h"
47f7917c00SJeff Kirsher #include "cxgb3_offload.h"
48f7917c00SJeff Kirsher 
49f7917c00SJeff Kirsher #define USE_GTS 0
50f7917c00SJeff Kirsher 
51f7917c00SJeff Kirsher #define SGE_RX_SM_BUF_SIZE 1536
52f7917c00SJeff Kirsher 
53f7917c00SJeff Kirsher #define SGE_RX_COPY_THRES  256
54f7917c00SJeff Kirsher #define SGE_RX_PULL_LEN    128
55f7917c00SJeff Kirsher 
56f7917c00SJeff Kirsher #define SGE_PG_RSVD SMP_CACHE_BYTES
57f7917c00SJeff Kirsher /*
58f7917c00SJeff Kirsher  * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
59f7917c00SJeff Kirsher  * It must be a divisor of PAGE_SIZE.  If set to 0 FL0 will use sk_buffs
60f7917c00SJeff Kirsher  * directly.
61f7917c00SJeff Kirsher  */
62f7917c00SJeff Kirsher #define FL0_PG_CHUNK_SIZE  2048
63f7917c00SJeff Kirsher #define FL0_PG_ORDER 0
64f7917c00SJeff Kirsher #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
65f7917c00SJeff Kirsher #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
66f7917c00SJeff Kirsher #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
67f7917c00SJeff Kirsher #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
68f7917c00SJeff Kirsher 
69f7917c00SJeff Kirsher #define SGE_RX_DROP_THRES 16
70f7917c00SJeff Kirsher #define RX_RECLAIM_PERIOD (HZ/4)
71f7917c00SJeff Kirsher 
72f7917c00SJeff Kirsher /*
73f7917c00SJeff Kirsher  * Max number of Rx buffers we replenish at a time.
74f7917c00SJeff Kirsher  */
75f7917c00SJeff Kirsher #define MAX_RX_REFILL 16U
76f7917c00SJeff Kirsher /*
77f7917c00SJeff Kirsher  * Period of the Tx buffer reclaim timer.  This timer does not need to run
78f7917c00SJeff Kirsher  * frequently as Tx buffers are usually reclaimed by new Tx packets.
79f7917c00SJeff Kirsher  */
80f7917c00SJeff Kirsher #define TX_RECLAIM_PERIOD (HZ / 4)
81f7917c00SJeff Kirsher #define TX_RECLAIM_TIMER_CHUNK 64U
82f7917c00SJeff Kirsher #define TX_RECLAIM_CHUNK 16U
83f7917c00SJeff Kirsher 
84f7917c00SJeff Kirsher /* WR size in bytes */
85f7917c00SJeff Kirsher #define WR_LEN (WR_FLITS * 8)
86f7917c00SJeff Kirsher 
87f7917c00SJeff Kirsher /*
88f7917c00SJeff Kirsher  * Types of Tx queues in each queue set.  Order here matters, do not change.
89f7917c00SJeff Kirsher  */
90f7917c00SJeff Kirsher enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
91f7917c00SJeff Kirsher 
92f7917c00SJeff Kirsher /* Values for sge_txq.flags */
93f7917c00SJeff Kirsher enum {
94f7917c00SJeff Kirsher 	TXQ_RUNNING = 1 << 0,	/* fetch engine is running */
95f7917c00SJeff Kirsher 	TXQ_LAST_PKT_DB = 1 << 1,	/* last packet rang the doorbell */
96f7917c00SJeff Kirsher };
97f7917c00SJeff Kirsher 
98f7917c00SJeff Kirsher struct tx_desc {
99f7917c00SJeff Kirsher 	__be64 flit[TX_DESC_FLITS];
100f7917c00SJeff Kirsher };
101f7917c00SJeff Kirsher 
102f7917c00SJeff Kirsher struct rx_desc {
103f7917c00SJeff Kirsher 	__be32 addr_lo;
104f7917c00SJeff Kirsher 	__be32 len_gen;
105f7917c00SJeff Kirsher 	__be32 gen2;
106f7917c00SJeff Kirsher 	__be32 addr_hi;
107f7917c00SJeff Kirsher };
108f7917c00SJeff Kirsher 
109f7917c00SJeff Kirsher struct tx_sw_desc {		/* SW state per Tx descriptor */
110f7917c00SJeff Kirsher 	struct sk_buff *skb;
111f7917c00SJeff Kirsher 	u8 eop;       /* set if last descriptor for packet */
112f7917c00SJeff Kirsher 	u8 addr_idx;  /* buffer index of first SGL entry in descriptor */
113f7917c00SJeff Kirsher 	u8 fragidx;   /* first page fragment associated with descriptor */
114f7917c00SJeff Kirsher 	s8 sflit;     /* start flit of first SGL entry in descriptor */
115f7917c00SJeff Kirsher };
116f7917c00SJeff Kirsher 
117f7917c00SJeff Kirsher struct rx_sw_desc {                /* SW state per Rx descriptor */
118f7917c00SJeff Kirsher 	union {
119f7917c00SJeff Kirsher 		struct sk_buff *skb;
120f7917c00SJeff Kirsher 		struct fl_pg_chunk pg_chunk;
121f7917c00SJeff Kirsher 	};
122f7917c00SJeff Kirsher 	DEFINE_DMA_UNMAP_ADDR(dma_addr);
123f7917c00SJeff Kirsher };
124f7917c00SJeff Kirsher 
125f7917c00SJeff Kirsher struct rsp_desc {		/* response queue descriptor */
126f7917c00SJeff Kirsher 	struct rss_header rss_hdr;
127f7917c00SJeff Kirsher 	__be32 flags;
128f7917c00SJeff Kirsher 	__be32 len_cq;
12988181f1dSKees Cook 	struct_group(immediate,
130f7917c00SJeff Kirsher 		u8 imm_data[47];
131f7917c00SJeff Kirsher 		u8 intr_gen;
13288181f1dSKees Cook 	);
133f7917c00SJeff Kirsher };
134f7917c00SJeff Kirsher 
135f7917c00SJeff Kirsher /*
136f7917c00SJeff Kirsher  * Holds unmapping information for Tx packets that need deferred unmapping.
137f7917c00SJeff Kirsher  * This structure lives at skb->head and must be allocated by callers.
138f7917c00SJeff Kirsher  */
139f7917c00SJeff Kirsher struct deferred_unmap_info {
140f7917c00SJeff Kirsher 	struct pci_dev *pdev;
141f7917c00SJeff Kirsher 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
142f7917c00SJeff Kirsher };
143f7917c00SJeff Kirsher 
144f7917c00SJeff Kirsher /*
145f7917c00SJeff Kirsher  * Maps a number of flits to the number of Tx descriptors that can hold them.
146f7917c00SJeff Kirsher  * The formula is
147f7917c00SJeff Kirsher  *
148f7917c00SJeff Kirsher  * desc = 1 + (flits - 2) / (WR_FLITS - 1).
149f7917c00SJeff Kirsher  *
150f7917c00SJeff Kirsher  * HW allows up to 4 descriptors to be combined into a WR.
151f7917c00SJeff Kirsher  */
152f7917c00SJeff Kirsher static u8 flit_desc_map[] = {
153f7917c00SJeff Kirsher 	0,
154f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 1
155f7917c00SJeff Kirsher 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
156f7917c00SJeff Kirsher 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
157f7917c00SJeff Kirsher 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
158f7917c00SJeff Kirsher 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
159f7917c00SJeff Kirsher #elif SGE_NUM_GENBITS == 2
160f7917c00SJeff Kirsher 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
161f7917c00SJeff Kirsher 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
162f7917c00SJeff Kirsher 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
163f7917c00SJeff Kirsher 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
164f7917c00SJeff Kirsher #else
165f7917c00SJeff Kirsher # error "SGE_NUM_GENBITS must be 1 or 2"
166f7917c00SJeff Kirsher #endif
167f7917c00SJeff Kirsher };
168f7917c00SJeff Kirsher 
rspq_to_qset(const struct sge_rspq * q)169f7917c00SJeff Kirsher static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
170f7917c00SJeff Kirsher {
171f7917c00SJeff Kirsher 	return container_of(q, struct sge_qset, rspq);
172f7917c00SJeff Kirsher }
173f7917c00SJeff Kirsher 
txq_to_qset(const struct sge_txq * q,int qidx)174f7917c00SJeff Kirsher static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
175f7917c00SJeff Kirsher {
176f7917c00SJeff Kirsher 	return container_of(q, struct sge_qset, txq[qidx]);
177f7917c00SJeff Kirsher }
178f7917c00SJeff Kirsher 
179f7917c00SJeff Kirsher /**
180f7917c00SJeff Kirsher  *	refill_rspq - replenish an SGE response queue
181f7917c00SJeff Kirsher  *	@adapter: the adapter
182f7917c00SJeff Kirsher  *	@q: the response queue to replenish
183f7917c00SJeff Kirsher  *	@credits: how many new responses to make available
184f7917c00SJeff Kirsher  *
185f7917c00SJeff Kirsher  *	Replenishes a response queue by making the supplied number of responses
186f7917c00SJeff Kirsher  *	available to HW.
187f7917c00SJeff Kirsher  */
refill_rspq(struct adapter * adapter,const struct sge_rspq * q,unsigned int credits)188f7917c00SJeff Kirsher static inline void refill_rspq(struct adapter *adapter,
189f7917c00SJeff Kirsher 			       const struct sge_rspq *q, unsigned int credits)
190f7917c00SJeff Kirsher {
191f7917c00SJeff Kirsher 	rmb();
192f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
193f7917c00SJeff Kirsher 		     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
194f7917c00SJeff Kirsher }
195f7917c00SJeff Kirsher 
196f7917c00SJeff Kirsher /**
197f7917c00SJeff Kirsher  *	need_skb_unmap - does the platform need unmapping of sk_buffs?
198f7917c00SJeff Kirsher  *
199f7917c00SJeff Kirsher  *	Returns true if the platform needs sk_buff unmapping.  The compiler
200f7917c00SJeff Kirsher  *	optimizes away unnecessary code if this returns true.
201f7917c00SJeff Kirsher  */
need_skb_unmap(void)202f7917c00SJeff Kirsher static inline int need_skb_unmap(void)
203f7917c00SJeff Kirsher {
204f7917c00SJeff Kirsher #ifdef CONFIG_NEED_DMA_MAP_STATE
205f7917c00SJeff Kirsher 	return 1;
206f7917c00SJeff Kirsher #else
207f7917c00SJeff Kirsher 	return 0;
208f7917c00SJeff Kirsher #endif
209f7917c00SJeff Kirsher }
210f7917c00SJeff Kirsher 
211f7917c00SJeff Kirsher /**
212f7917c00SJeff Kirsher  *	unmap_skb - unmap a packet main body and its page fragments
213f7917c00SJeff Kirsher  *	@skb: the packet
214f7917c00SJeff Kirsher  *	@q: the Tx queue containing Tx descriptors for the packet
215f7917c00SJeff Kirsher  *	@cidx: index of Tx descriptor
216f7917c00SJeff Kirsher  *	@pdev: the PCI device
217f7917c00SJeff Kirsher  *
218f7917c00SJeff Kirsher  *	Unmap the main body of an sk_buff and its page fragments, if any.
219f7917c00SJeff Kirsher  *	Because of the fairly complicated structure of our SGLs and the desire
220f7917c00SJeff Kirsher  *	to conserve space for metadata, the information necessary to unmap an
221f7917c00SJeff Kirsher  *	sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
222f7917c00SJeff Kirsher  *	descriptors (the physical addresses of the various data buffers), and
223f7917c00SJeff Kirsher  *	the SW descriptor state (assorted indices).  The send functions
224f7917c00SJeff Kirsher  *	initialize the indices for the first packet descriptor so we can unmap
225f7917c00SJeff Kirsher  *	the buffers held in the first Tx descriptor here, and we have enough
226f7917c00SJeff Kirsher  *	information at this point to set the state for the next Tx descriptor.
227f7917c00SJeff Kirsher  *
228f7917c00SJeff Kirsher  *	Note that it is possible to clean up the first descriptor of a packet
229f7917c00SJeff Kirsher  *	before the send routines have written the next descriptors, but this
230f7917c00SJeff Kirsher  *	race does not cause any problem.  We just end up writing the unmapping
231f7917c00SJeff Kirsher  *	info for the descriptor first.
232f7917c00SJeff Kirsher  */
unmap_skb(struct sk_buff * skb,struct sge_txq * q,unsigned int cidx,struct pci_dev * pdev)233f7917c00SJeff Kirsher static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
234f7917c00SJeff Kirsher 			     unsigned int cidx, struct pci_dev *pdev)
235f7917c00SJeff Kirsher {
236f7917c00SJeff Kirsher 	const struct sg_ent *sgp;
237f7917c00SJeff Kirsher 	struct tx_sw_desc *d = &q->sdesc[cidx];
238f7917c00SJeff Kirsher 	int nfrags, frag_idx, curflit, j = d->addr_idx;
239f7917c00SJeff Kirsher 
240f7917c00SJeff Kirsher 	sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
241f7917c00SJeff Kirsher 	frag_idx = d->fragidx;
242f7917c00SJeff Kirsher 
243f7917c00SJeff Kirsher 	if (frag_idx == 0 && skb_headlen(skb)) {
2444489d8f5SChristophe JAILLET 		dma_unmap_single(&pdev->dev, be64_to_cpu(sgp->addr[0]),
2454489d8f5SChristophe JAILLET 				 skb_headlen(skb), DMA_TO_DEVICE);
246f7917c00SJeff Kirsher 		j = 1;
247f7917c00SJeff Kirsher 	}
248f7917c00SJeff Kirsher 
249f7917c00SJeff Kirsher 	curflit = d->sflit + 1 + j;
250f7917c00SJeff Kirsher 	nfrags = skb_shinfo(skb)->nr_frags;
251f7917c00SJeff Kirsher 
252f7917c00SJeff Kirsher 	while (frag_idx < nfrags && curflit < WR_FLITS) {
2534489d8f5SChristophe JAILLET 		dma_unmap_page(&pdev->dev, be64_to_cpu(sgp->addr[j]),
2549e903e08SEric Dumazet 			       skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
2554489d8f5SChristophe JAILLET 			       DMA_TO_DEVICE);
256f7917c00SJeff Kirsher 		j ^= 1;
257f7917c00SJeff Kirsher 		if (j == 0) {
258f7917c00SJeff Kirsher 			sgp++;
259f7917c00SJeff Kirsher 			curflit++;
260f7917c00SJeff Kirsher 		}
261f7917c00SJeff Kirsher 		curflit++;
262f7917c00SJeff Kirsher 		frag_idx++;
263f7917c00SJeff Kirsher 	}
264f7917c00SJeff Kirsher 
265f7917c00SJeff Kirsher 	if (frag_idx < nfrags) {   /* SGL continues into next Tx descriptor */
266f7917c00SJeff Kirsher 		d = cidx + 1 == q->size ? q->sdesc : d + 1;
267f7917c00SJeff Kirsher 		d->fragidx = frag_idx;
268f7917c00SJeff Kirsher 		d->addr_idx = j;
269f7917c00SJeff Kirsher 		d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
270f7917c00SJeff Kirsher 	}
271f7917c00SJeff Kirsher }
272f7917c00SJeff Kirsher 
273f7917c00SJeff Kirsher /**
274f7917c00SJeff Kirsher  *	free_tx_desc - reclaims Tx descriptors and their buffers
275f7917c00SJeff Kirsher  *	@adapter: the adapter
276f7917c00SJeff Kirsher  *	@q: the Tx queue to reclaim descriptors from
277f7917c00SJeff Kirsher  *	@n: the number of descriptors to reclaim
278f7917c00SJeff Kirsher  *
279f7917c00SJeff Kirsher  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
280f7917c00SJeff Kirsher  *	Tx buffers.  Called with the Tx queue lock held.
281f7917c00SJeff Kirsher  */
free_tx_desc(struct adapter * adapter,struct sge_txq * q,unsigned int n)282f7917c00SJeff Kirsher static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
283f7917c00SJeff Kirsher 			 unsigned int n)
284f7917c00SJeff Kirsher {
285f7917c00SJeff Kirsher 	struct tx_sw_desc *d;
286f7917c00SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
287f7917c00SJeff Kirsher 	unsigned int cidx = q->cidx;
288f7917c00SJeff Kirsher 
289f7917c00SJeff Kirsher 	const int need_unmap = need_skb_unmap() &&
290f7917c00SJeff Kirsher 			       q->cntxt_id >= FW_TUNNEL_SGEEC_START;
291f7917c00SJeff Kirsher 
292f7917c00SJeff Kirsher 	d = &q->sdesc[cidx];
293f7917c00SJeff Kirsher 	while (n--) {
294f7917c00SJeff Kirsher 		if (d->skb) {	/* an SGL is present */
295f7917c00SJeff Kirsher 			if (need_unmap)
296f7917c00SJeff Kirsher 				unmap_skb(d->skb, q, cidx, pdev);
297f7917c00SJeff Kirsher 			if (d->eop) {
298f9ec8131SEric W. Biederman 				dev_consume_skb_any(d->skb);
299f7917c00SJeff Kirsher 				d->skb = NULL;
300f7917c00SJeff Kirsher 			}
301f7917c00SJeff Kirsher 		}
302f7917c00SJeff Kirsher 		++d;
303f7917c00SJeff Kirsher 		if (++cidx == q->size) {
304f7917c00SJeff Kirsher 			cidx = 0;
305f7917c00SJeff Kirsher 			d = q->sdesc;
306f7917c00SJeff Kirsher 		}
307f7917c00SJeff Kirsher 	}
308f7917c00SJeff Kirsher 	q->cidx = cidx;
309f7917c00SJeff Kirsher }
310f7917c00SJeff Kirsher 
311f7917c00SJeff Kirsher /**
312f7917c00SJeff Kirsher  *	reclaim_completed_tx - reclaims completed Tx descriptors
313f7917c00SJeff Kirsher  *	@adapter: the adapter
314f7917c00SJeff Kirsher  *	@q: the Tx queue to reclaim completed descriptors from
315f7917c00SJeff Kirsher  *	@chunk: maximum number of descriptors to reclaim
316f7917c00SJeff Kirsher  *
317f7917c00SJeff Kirsher  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
318f7917c00SJeff Kirsher  *	and frees the associated buffers if possible.  Called with the Tx
319f7917c00SJeff Kirsher  *	queue's lock held.
320f7917c00SJeff Kirsher  */
reclaim_completed_tx(struct adapter * adapter,struct sge_txq * q,unsigned int chunk)321f7917c00SJeff Kirsher static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
322f7917c00SJeff Kirsher 						struct sge_txq *q,
323f7917c00SJeff Kirsher 						unsigned int chunk)
324f7917c00SJeff Kirsher {
325f7917c00SJeff Kirsher 	unsigned int reclaim = q->processed - q->cleaned;
326f7917c00SJeff Kirsher 
327f7917c00SJeff Kirsher 	reclaim = min(chunk, reclaim);
328f7917c00SJeff Kirsher 	if (reclaim) {
329f7917c00SJeff Kirsher 		free_tx_desc(adapter, q, reclaim);
330f7917c00SJeff Kirsher 		q->cleaned += reclaim;
331f7917c00SJeff Kirsher 		q->in_use -= reclaim;
332f7917c00SJeff Kirsher 	}
333f7917c00SJeff Kirsher 	return q->processed - q->cleaned;
334f7917c00SJeff Kirsher }
335f7917c00SJeff Kirsher 
336f7917c00SJeff Kirsher /**
337f7917c00SJeff Kirsher  *	should_restart_tx - are there enough resources to restart a Tx queue?
338f7917c00SJeff Kirsher  *	@q: the Tx queue
339f7917c00SJeff Kirsher  *
340f7917c00SJeff Kirsher  *	Checks if there are enough descriptors to restart a suspended Tx queue.
341f7917c00SJeff Kirsher  */
should_restart_tx(const struct sge_txq * q)342f7917c00SJeff Kirsher static inline int should_restart_tx(const struct sge_txq *q)
343f7917c00SJeff Kirsher {
344f7917c00SJeff Kirsher 	unsigned int r = q->processed - q->cleaned;
345f7917c00SJeff Kirsher 
346f7917c00SJeff Kirsher 	return q->in_use - r < (q->size >> 1);
347f7917c00SJeff Kirsher }
348f7917c00SJeff Kirsher 
clear_rx_desc(struct pci_dev * pdev,const struct sge_fl * q,struct rx_sw_desc * d)349f7917c00SJeff Kirsher static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
350f7917c00SJeff Kirsher 			  struct rx_sw_desc *d)
351f7917c00SJeff Kirsher {
352f7917c00SJeff Kirsher 	if (q->use_pages && d->pg_chunk.page) {
353f7917c00SJeff Kirsher 		(*d->pg_chunk.p_cnt)--;
354f7917c00SJeff Kirsher 		if (!*d->pg_chunk.p_cnt)
3554489d8f5SChristophe JAILLET 			dma_unmap_page(&pdev->dev, d->pg_chunk.mapping,
3564489d8f5SChristophe JAILLET 				       q->alloc_size, DMA_FROM_DEVICE);
357f7917c00SJeff Kirsher 
358f7917c00SJeff Kirsher 		put_page(d->pg_chunk.page);
359f7917c00SJeff Kirsher 		d->pg_chunk.page = NULL;
360f7917c00SJeff Kirsher 	} else {
3614489d8f5SChristophe JAILLET 		dma_unmap_single(&pdev->dev, dma_unmap_addr(d, dma_addr),
3624489d8f5SChristophe JAILLET 				 q->buf_size, DMA_FROM_DEVICE);
363f7917c00SJeff Kirsher 		kfree_skb(d->skb);
364f7917c00SJeff Kirsher 		d->skb = NULL;
365f7917c00SJeff Kirsher 	}
366f7917c00SJeff Kirsher }
367f7917c00SJeff Kirsher 
368f7917c00SJeff Kirsher /**
369f7917c00SJeff Kirsher  *	free_rx_bufs - free the Rx buffers on an SGE free list
370f7917c00SJeff Kirsher  *	@pdev: the PCI device associated with the adapter
371d0ea5cbdSJesse Brandeburg  *	@q: the SGE free list to clean up
372f7917c00SJeff Kirsher  *
373f7917c00SJeff Kirsher  *	Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
374f7917c00SJeff Kirsher  *	this queue should be stopped before calling this function.
375f7917c00SJeff Kirsher  */
free_rx_bufs(struct pci_dev * pdev,struct sge_fl * q)376f7917c00SJeff Kirsher static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
377f7917c00SJeff Kirsher {
378f7917c00SJeff Kirsher 	unsigned int cidx = q->cidx;
379f7917c00SJeff Kirsher 
380f7917c00SJeff Kirsher 	while (q->credits--) {
381f7917c00SJeff Kirsher 		struct rx_sw_desc *d = &q->sdesc[cidx];
382f7917c00SJeff Kirsher 
383f7917c00SJeff Kirsher 
384f7917c00SJeff Kirsher 		clear_rx_desc(pdev, q, d);
385f7917c00SJeff Kirsher 		if (++cidx == q->size)
386f7917c00SJeff Kirsher 			cidx = 0;
387f7917c00SJeff Kirsher 	}
388f7917c00SJeff Kirsher 
389f7917c00SJeff Kirsher 	if (q->pg_chunk.page) {
390f7917c00SJeff Kirsher 		__free_pages(q->pg_chunk.page, q->order);
391f7917c00SJeff Kirsher 		q->pg_chunk.page = NULL;
392f7917c00SJeff Kirsher 	}
393f7917c00SJeff Kirsher }
394f7917c00SJeff Kirsher 
395f7917c00SJeff Kirsher /**
396f7917c00SJeff Kirsher  *	add_one_rx_buf - add a packet buffer to a free-buffer list
397f7917c00SJeff Kirsher  *	@va:  buffer start VA
398f7917c00SJeff Kirsher  *	@len: the buffer length
399f7917c00SJeff Kirsher  *	@d: the HW Rx descriptor to write
400f7917c00SJeff Kirsher  *	@sd: the SW Rx descriptor to write
401f7917c00SJeff Kirsher  *	@gen: the generation bit value
402f7917c00SJeff Kirsher  *	@pdev: the PCI device associated with the adapter
403f7917c00SJeff Kirsher  *
404f7917c00SJeff Kirsher  *	Add a buffer of the given length to the supplied HW and SW Rx
405f7917c00SJeff Kirsher  *	descriptors.
406f7917c00SJeff Kirsher  */
add_one_rx_buf(void * va,unsigned int len,struct rx_desc * d,struct rx_sw_desc * sd,unsigned int gen,struct pci_dev * pdev)407f7917c00SJeff Kirsher static inline int add_one_rx_buf(void *va, unsigned int len,
408f7917c00SJeff Kirsher 				 struct rx_desc *d, struct rx_sw_desc *sd,
409f7917c00SJeff Kirsher 				 unsigned int gen, struct pci_dev *pdev)
410f7917c00SJeff Kirsher {
411f7917c00SJeff Kirsher 	dma_addr_t mapping;
412f7917c00SJeff Kirsher 
4134489d8f5SChristophe JAILLET 	mapping = dma_map_single(&pdev->dev, va, len, DMA_FROM_DEVICE);
4144489d8f5SChristophe JAILLET 	if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
415f7917c00SJeff Kirsher 		return -ENOMEM;
416f7917c00SJeff Kirsher 
417f7917c00SJeff Kirsher 	dma_unmap_addr_set(sd, dma_addr, mapping);
418f7917c00SJeff Kirsher 
419f7917c00SJeff Kirsher 	d->addr_lo = cpu_to_be32(mapping);
420f7917c00SJeff Kirsher 	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
421019be1cfSAlexander Duyck 	dma_wmb();
422f7917c00SJeff Kirsher 	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
423f7917c00SJeff Kirsher 	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
424f7917c00SJeff Kirsher 	return 0;
425f7917c00SJeff Kirsher }
426f7917c00SJeff Kirsher 
add_one_rx_chunk(dma_addr_t mapping,struct rx_desc * d,unsigned int gen)427f7917c00SJeff Kirsher static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
428f7917c00SJeff Kirsher 				   unsigned int gen)
429f7917c00SJeff Kirsher {
430f7917c00SJeff Kirsher 	d->addr_lo = cpu_to_be32(mapping);
431f7917c00SJeff Kirsher 	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
432019be1cfSAlexander Duyck 	dma_wmb();
433f7917c00SJeff Kirsher 	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
434f7917c00SJeff Kirsher 	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
435f7917c00SJeff Kirsher 	return 0;
436f7917c00SJeff Kirsher }
437f7917c00SJeff Kirsher 
alloc_pg_chunk(struct adapter * adapter,struct sge_fl * q,struct rx_sw_desc * sd,gfp_t gfp,unsigned int order)438f7917c00SJeff Kirsher static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
439f7917c00SJeff Kirsher 			  struct rx_sw_desc *sd, gfp_t gfp,
440f7917c00SJeff Kirsher 			  unsigned int order)
441f7917c00SJeff Kirsher {
442f7917c00SJeff Kirsher 	if (!q->pg_chunk.page) {
443f7917c00SJeff Kirsher 		dma_addr_t mapping;
444f7917c00SJeff Kirsher 
445f7917c00SJeff Kirsher 		q->pg_chunk.page = alloc_pages(gfp, order);
446f7917c00SJeff Kirsher 		if (unlikely(!q->pg_chunk.page))
447f7917c00SJeff Kirsher 			return -ENOMEM;
448f7917c00SJeff Kirsher 		q->pg_chunk.va = page_address(q->pg_chunk.page);
449f7917c00SJeff Kirsher 		q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
450f7917c00SJeff Kirsher 				    SGE_PG_RSVD;
451f7917c00SJeff Kirsher 		q->pg_chunk.offset = 0;
4524489d8f5SChristophe JAILLET 		mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page,
4534489d8f5SChristophe JAILLET 				       0, q->alloc_size, DMA_FROM_DEVICE);
4544489d8f5SChristophe JAILLET 		if (unlikely(dma_mapping_error(&adapter->pdev->dev, mapping))) {
455c69fe407SArjun Vynipadath 			__free_pages(q->pg_chunk.page, order);
456c69fe407SArjun Vynipadath 			q->pg_chunk.page = NULL;
457c69fe407SArjun Vynipadath 			return -EIO;
458c69fe407SArjun Vynipadath 		}
459f7917c00SJeff Kirsher 		q->pg_chunk.mapping = mapping;
460f7917c00SJeff Kirsher 	}
461f7917c00SJeff Kirsher 	sd->pg_chunk = q->pg_chunk;
462f7917c00SJeff Kirsher 
463f7917c00SJeff Kirsher 	prefetch(sd->pg_chunk.p_cnt);
464f7917c00SJeff Kirsher 
465f7917c00SJeff Kirsher 	q->pg_chunk.offset += q->buf_size;
466f7917c00SJeff Kirsher 	if (q->pg_chunk.offset == (PAGE_SIZE << order))
467f7917c00SJeff Kirsher 		q->pg_chunk.page = NULL;
468f7917c00SJeff Kirsher 	else {
469f7917c00SJeff Kirsher 		q->pg_chunk.va += q->buf_size;
470f7917c00SJeff Kirsher 		get_page(q->pg_chunk.page);
471f7917c00SJeff Kirsher 	}
472f7917c00SJeff Kirsher 
473f7917c00SJeff Kirsher 	if (sd->pg_chunk.offset == 0)
474f7917c00SJeff Kirsher 		*sd->pg_chunk.p_cnt = 1;
475f7917c00SJeff Kirsher 	else
476f7917c00SJeff Kirsher 		*sd->pg_chunk.p_cnt += 1;
477f7917c00SJeff Kirsher 
478f7917c00SJeff Kirsher 	return 0;
479f7917c00SJeff Kirsher }
480f7917c00SJeff Kirsher 
ring_fl_db(struct adapter * adap,struct sge_fl * q)481f7917c00SJeff Kirsher static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
482f7917c00SJeff Kirsher {
483f7917c00SJeff Kirsher 	if (q->pend_cred >= q->credits / 4) {
484f7917c00SJeff Kirsher 		q->pend_cred = 0;
485f7917c00SJeff Kirsher 		wmb();
486f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
487f7917c00SJeff Kirsher 	}
488f7917c00SJeff Kirsher }
489f7917c00SJeff Kirsher 
490f7917c00SJeff Kirsher /**
491f7917c00SJeff Kirsher  *	refill_fl - refill an SGE free-buffer list
492d0ea5cbdSJesse Brandeburg  *	@adap: the adapter
493f7917c00SJeff Kirsher  *	@q: the free-list to refill
494f7917c00SJeff Kirsher  *	@n: the number of new buffers to allocate
495f7917c00SJeff Kirsher  *	@gfp: the gfp flags for allocating new buffers
496f7917c00SJeff Kirsher  *
497f7917c00SJeff Kirsher  *	(Re)populate an SGE free-buffer list with up to @n new packet buffers,
498f7917c00SJeff Kirsher  *	allocated with the supplied gfp flags.  The caller must assure that
499f7917c00SJeff Kirsher  *	@n does not exceed the queue's capacity.
500f7917c00SJeff Kirsher  */
refill_fl(struct adapter * adap,struct sge_fl * q,int n,gfp_t gfp)501f7917c00SJeff Kirsher static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
502f7917c00SJeff Kirsher {
503f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
504f7917c00SJeff Kirsher 	struct rx_desc *d = &q->desc[q->pidx];
505f7917c00SJeff Kirsher 	unsigned int count = 0;
506f7917c00SJeff Kirsher 
507f7917c00SJeff Kirsher 	while (n--) {
508f7917c00SJeff Kirsher 		dma_addr_t mapping;
509f7917c00SJeff Kirsher 		int err;
510f7917c00SJeff Kirsher 
511f7917c00SJeff Kirsher 		if (q->use_pages) {
512f7917c00SJeff Kirsher 			if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
513f7917c00SJeff Kirsher 						    q->order))) {
514f7917c00SJeff Kirsher nomem:				q->alloc_failed++;
515f7917c00SJeff Kirsher 				break;
516f7917c00SJeff Kirsher 			}
517f7917c00SJeff Kirsher 			mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
518f7917c00SJeff Kirsher 			dma_unmap_addr_set(sd, dma_addr, mapping);
519f7917c00SJeff Kirsher 
520f7917c00SJeff Kirsher 			add_one_rx_chunk(mapping, d, q->gen);
5214489d8f5SChristophe JAILLET 			dma_sync_single_for_device(&adap->pdev->dev, mapping,
522f7917c00SJeff Kirsher 						   q->buf_size - SGE_PG_RSVD,
5234489d8f5SChristophe JAILLET 						   DMA_FROM_DEVICE);
524f7917c00SJeff Kirsher 		} else {
525f7917c00SJeff Kirsher 			void *buf_start;
526f7917c00SJeff Kirsher 
527f7917c00SJeff Kirsher 			struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
528f7917c00SJeff Kirsher 			if (!skb)
529f7917c00SJeff Kirsher 				goto nomem;
530f7917c00SJeff Kirsher 
531f7917c00SJeff Kirsher 			sd->skb = skb;
532f7917c00SJeff Kirsher 			buf_start = skb->data;
533f7917c00SJeff Kirsher 			err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
534f7917c00SJeff Kirsher 					     q->gen, adap->pdev);
535f7917c00SJeff Kirsher 			if (unlikely(err)) {
536f7917c00SJeff Kirsher 				clear_rx_desc(adap->pdev, q, sd);
537f7917c00SJeff Kirsher 				break;
538f7917c00SJeff Kirsher 			}
539f7917c00SJeff Kirsher 		}
540f7917c00SJeff Kirsher 
541f7917c00SJeff Kirsher 		d++;
542f7917c00SJeff Kirsher 		sd++;
543f7917c00SJeff Kirsher 		if (++q->pidx == q->size) {
544f7917c00SJeff Kirsher 			q->pidx = 0;
545f7917c00SJeff Kirsher 			q->gen ^= 1;
546f7917c00SJeff Kirsher 			sd = q->sdesc;
547f7917c00SJeff Kirsher 			d = q->desc;
548f7917c00SJeff Kirsher 		}
549f7917c00SJeff Kirsher 		count++;
550f7917c00SJeff Kirsher 	}
551f7917c00SJeff Kirsher 
552f7917c00SJeff Kirsher 	q->credits += count;
553f7917c00SJeff Kirsher 	q->pend_cred += count;
554f7917c00SJeff Kirsher 	ring_fl_db(adap, q);
555f7917c00SJeff Kirsher 
556f7917c00SJeff Kirsher 	return count;
557f7917c00SJeff Kirsher }
558f7917c00SJeff Kirsher 
__refill_fl(struct adapter * adap,struct sge_fl * fl)559f7917c00SJeff Kirsher static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
560f7917c00SJeff Kirsher {
561f7917c00SJeff Kirsher 	refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
562f7917c00SJeff Kirsher 		  GFP_ATOMIC | __GFP_COMP);
563f7917c00SJeff Kirsher }
564f7917c00SJeff Kirsher 
565f7917c00SJeff Kirsher /**
566f7917c00SJeff Kirsher  *	recycle_rx_buf - recycle a receive buffer
567d0ea5cbdSJesse Brandeburg  *	@adap: the adapter
568f7917c00SJeff Kirsher  *	@q: the SGE free list
569f7917c00SJeff Kirsher  *	@idx: index of buffer to recycle
570f7917c00SJeff Kirsher  *
571f7917c00SJeff Kirsher  *	Recycles the specified buffer on the given free list by adding it at
572f7917c00SJeff Kirsher  *	the next available slot on the list.
573f7917c00SJeff Kirsher  */
recycle_rx_buf(struct adapter * adap,struct sge_fl * q,unsigned int idx)574f7917c00SJeff Kirsher static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
575f7917c00SJeff Kirsher 			   unsigned int idx)
576f7917c00SJeff Kirsher {
577f7917c00SJeff Kirsher 	struct rx_desc *from = &q->desc[idx];
578f7917c00SJeff Kirsher 	struct rx_desc *to = &q->desc[q->pidx];
579f7917c00SJeff Kirsher 
580f7917c00SJeff Kirsher 	q->sdesc[q->pidx] = q->sdesc[idx];
581f7917c00SJeff Kirsher 	to->addr_lo = from->addr_lo;	/* already big endian */
582f7917c00SJeff Kirsher 	to->addr_hi = from->addr_hi;	/* likewise */
583019be1cfSAlexander Duyck 	dma_wmb();
584f7917c00SJeff Kirsher 	to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
585f7917c00SJeff Kirsher 	to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
586f7917c00SJeff Kirsher 
587f7917c00SJeff Kirsher 	if (++q->pidx == q->size) {
588f7917c00SJeff Kirsher 		q->pidx = 0;
589f7917c00SJeff Kirsher 		q->gen ^= 1;
590f7917c00SJeff Kirsher 	}
591f7917c00SJeff Kirsher 
592f7917c00SJeff Kirsher 	q->credits++;
593f7917c00SJeff Kirsher 	q->pend_cred++;
594f7917c00SJeff Kirsher 	ring_fl_db(adap, q);
595f7917c00SJeff Kirsher }
596f7917c00SJeff Kirsher 
597f7917c00SJeff Kirsher /**
598f7917c00SJeff Kirsher  *	alloc_ring - allocate resources for an SGE descriptor ring
599f7917c00SJeff Kirsher  *	@pdev: the PCI device
600f7917c00SJeff Kirsher  *	@nelem: the number of descriptors
601f7917c00SJeff Kirsher  *	@elem_size: the size of each descriptor
602f7917c00SJeff Kirsher  *	@sw_size: the size of the SW state associated with each ring element
603f7917c00SJeff Kirsher  *	@phys: the physical address of the allocated ring
604f7917c00SJeff Kirsher  *	@metadata: address of the array holding the SW state for the ring
605f7917c00SJeff Kirsher  *
606f7917c00SJeff Kirsher  *	Allocates resources for an SGE descriptor ring, such as Tx queues,
607f7917c00SJeff Kirsher  *	free buffer lists, or response queues.  Each SGE ring requires
608f7917c00SJeff Kirsher  *	space for its HW descriptors plus, optionally, space for the SW state
609f7917c00SJeff Kirsher  *	associated with each HW entry (the metadata).  The function returns
610f7917c00SJeff Kirsher  *	three values: the virtual address for the HW ring (the return value
611f7917c00SJeff Kirsher  *	of the function), the physical address of the HW ring, and the address
612f7917c00SJeff Kirsher  *	of the SW ring.
613f7917c00SJeff Kirsher  */
alloc_ring(struct pci_dev * pdev,size_t nelem,size_t elem_size,size_t sw_size,dma_addr_t * phys,void * metadata)614f7917c00SJeff Kirsher static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
615f7917c00SJeff Kirsher 			size_t sw_size, dma_addr_t * phys, void *metadata)
616f7917c00SJeff Kirsher {
617f7917c00SJeff Kirsher 	size_t len = nelem * elem_size;
618f7917c00SJeff Kirsher 	void *s = NULL;
619750afb08SLuis Chamberlain 	void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
620f7917c00SJeff Kirsher 
621f7917c00SJeff Kirsher 	if (!p)
622f7917c00SJeff Kirsher 		return NULL;
623f7917c00SJeff Kirsher 	if (sw_size && metadata) {
624f7917c00SJeff Kirsher 		s = kcalloc(nelem, sw_size, GFP_KERNEL);
625f7917c00SJeff Kirsher 
626f7917c00SJeff Kirsher 		if (!s) {
627f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev, len, p, *phys);
628f7917c00SJeff Kirsher 			return NULL;
629f7917c00SJeff Kirsher 		}
630f7917c00SJeff Kirsher 		*(void **)metadata = s;
631f7917c00SJeff Kirsher 	}
632f7917c00SJeff Kirsher 	return p;
633f7917c00SJeff Kirsher }
634f7917c00SJeff Kirsher 
635f7917c00SJeff Kirsher /**
636f7917c00SJeff Kirsher  *	t3_reset_qset - reset a sge qset
637f7917c00SJeff Kirsher  *	@q: the queue set
638f7917c00SJeff Kirsher  *
639f7917c00SJeff Kirsher  *	Reset the qset structure.
640f7917c00SJeff Kirsher  *	the NAPI structure is preserved in the event of
641f7917c00SJeff Kirsher  *	the qset's reincarnation, for example during EEH recovery.
642f7917c00SJeff Kirsher  */
t3_reset_qset(struct sge_qset * q)643f7917c00SJeff Kirsher static void t3_reset_qset(struct sge_qset *q)
644f7917c00SJeff Kirsher {
645f7917c00SJeff Kirsher 	if (q->adap &&
646f7917c00SJeff Kirsher 	    !(q->adap->flags & NAPI_INIT)) {
647f7917c00SJeff Kirsher 		memset(q, 0, sizeof(*q));
648f7917c00SJeff Kirsher 		return;
649f7917c00SJeff Kirsher 	}
650f7917c00SJeff Kirsher 
651f7917c00SJeff Kirsher 	q->adap = NULL;
652f7917c00SJeff Kirsher 	memset(&q->rspq, 0, sizeof(q->rspq));
653f7917c00SJeff Kirsher 	memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
654f7917c00SJeff Kirsher 	memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
655f7917c00SJeff Kirsher 	q->txq_stopped = 0;
656f7917c00SJeff Kirsher 	q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
657f7917c00SJeff Kirsher 	q->rx_reclaim_timer.function = NULL;
658f7917c00SJeff Kirsher 	q->nomem = 0;
659f7917c00SJeff Kirsher 	napi_free_frags(&q->napi);
660f7917c00SJeff Kirsher }
661f7917c00SJeff Kirsher 
662f7917c00SJeff Kirsher 
663f7917c00SJeff Kirsher /**
664aeed744aSYang Shen  *	t3_free_qset - free the resources of an SGE queue set
665f7917c00SJeff Kirsher  *	@adapter: the adapter owning the queue set
666f7917c00SJeff Kirsher  *	@q: the queue set
667f7917c00SJeff Kirsher  *
668f7917c00SJeff Kirsher  *	Release the HW and SW resources associated with an SGE queue set, such
669f7917c00SJeff Kirsher  *	as HW contexts, packet buffers, and descriptor rings.  Traffic to the
670f7917c00SJeff Kirsher  *	queue set must be quiesced prior to calling this.
671f7917c00SJeff Kirsher  */
t3_free_qset(struct adapter * adapter,struct sge_qset * q)672f7917c00SJeff Kirsher static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
673f7917c00SJeff Kirsher {
674f7917c00SJeff Kirsher 	int i;
675f7917c00SJeff Kirsher 	struct pci_dev *pdev = adapter->pdev;
676f7917c00SJeff Kirsher 
677f7917c00SJeff Kirsher 	for (i = 0; i < SGE_RXQ_PER_SET; ++i)
678f7917c00SJeff Kirsher 		if (q->fl[i].desc) {
679f7917c00SJeff Kirsher 			spin_lock_irq(&adapter->sge.reg_lock);
680f7917c00SJeff Kirsher 			t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
681f7917c00SJeff Kirsher 			spin_unlock_irq(&adapter->sge.reg_lock);
682f7917c00SJeff Kirsher 			free_rx_bufs(pdev, &q->fl[i]);
683f7917c00SJeff Kirsher 			kfree(q->fl[i].sdesc);
684f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev,
685f7917c00SJeff Kirsher 					  q->fl[i].size *
686f7917c00SJeff Kirsher 					  sizeof(struct rx_desc), q->fl[i].desc,
687f7917c00SJeff Kirsher 					  q->fl[i].phys_addr);
688f7917c00SJeff Kirsher 		}
689f7917c00SJeff Kirsher 
690f7917c00SJeff Kirsher 	for (i = 0; i < SGE_TXQ_PER_SET; ++i)
691f7917c00SJeff Kirsher 		if (q->txq[i].desc) {
692f7917c00SJeff Kirsher 			spin_lock_irq(&adapter->sge.reg_lock);
693f7917c00SJeff Kirsher 			t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
694f7917c00SJeff Kirsher 			spin_unlock_irq(&adapter->sge.reg_lock);
695f7917c00SJeff Kirsher 			if (q->txq[i].sdesc) {
696f7917c00SJeff Kirsher 				free_tx_desc(adapter, &q->txq[i],
697f7917c00SJeff Kirsher 					     q->txq[i].in_use);
698f7917c00SJeff Kirsher 				kfree(q->txq[i].sdesc);
699f7917c00SJeff Kirsher 			}
700f7917c00SJeff Kirsher 			dma_free_coherent(&pdev->dev,
701f7917c00SJeff Kirsher 					  q->txq[i].size *
702f7917c00SJeff Kirsher 					  sizeof(struct tx_desc),
703f7917c00SJeff Kirsher 					  q->txq[i].desc, q->txq[i].phys_addr);
704f7917c00SJeff Kirsher 			__skb_queue_purge(&q->txq[i].sendq);
705f7917c00SJeff Kirsher 		}
706f7917c00SJeff Kirsher 
707f7917c00SJeff Kirsher 	if (q->rspq.desc) {
708f7917c00SJeff Kirsher 		spin_lock_irq(&adapter->sge.reg_lock);
709f7917c00SJeff Kirsher 		t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
710f7917c00SJeff Kirsher 		spin_unlock_irq(&adapter->sge.reg_lock);
711f7917c00SJeff Kirsher 		dma_free_coherent(&pdev->dev,
712f7917c00SJeff Kirsher 				  q->rspq.size * sizeof(struct rsp_desc),
713f7917c00SJeff Kirsher 				  q->rspq.desc, q->rspq.phys_addr);
714f7917c00SJeff Kirsher 	}
715f7917c00SJeff Kirsher 
716f7917c00SJeff Kirsher 	t3_reset_qset(q);
717f7917c00SJeff Kirsher }
718f7917c00SJeff Kirsher 
719f7917c00SJeff Kirsher /**
720f7917c00SJeff Kirsher  *	init_qset_cntxt - initialize an SGE queue set context info
721f7917c00SJeff Kirsher  *	@qs: the queue set
722f7917c00SJeff Kirsher  *	@id: the queue set id
723f7917c00SJeff Kirsher  *
724f7917c00SJeff Kirsher  *	Initializes the TIDs and context ids for the queues of a queue set.
725f7917c00SJeff Kirsher  */
init_qset_cntxt(struct sge_qset * qs,unsigned int id)726f7917c00SJeff Kirsher static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
727f7917c00SJeff Kirsher {
728f7917c00SJeff Kirsher 	qs->rspq.cntxt_id = id;
729f7917c00SJeff Kirsher 	qs->fl[0].cntxt_id = 2 * id;
730f7917c00SJeff Kirsher 	qs->fl[1].cntxt_id = 2 * id + 1;
731f7917c00SJeff Kirsher 	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
732f7917c00SJeff Kirsher 	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
733f7917c00SJeff Kirsher 	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
734f7917c00SJeff Kirsher 	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
735f7917c00SJeff Kirsher 	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
736f7917c00SJeff Kirsher }
737f7917c00SJeff Kirsher 
738f7917c00SJeff Kirsher /**
739f7917c00SJeff Kirsher  *	sgl_len - calculates the size of an SGL of the given capacity
740f7917c00SJeff Kirsher  *	@n: the number of SGL entries
741f7917c00SJeff Kirsher  *
742f7917c00SJeff Kirsher  *	Calculates the number of flits needed for a scatter/gather list that
743f7917c00SJeff Kirsher  *	can hold the given number of entries.
744f7917c00SJeff Kirsher  */
sgl_len(unsigned int n)745f7917c00SJeff Kirsher static inline unsigned int sgl_len(unsigned int n)
746f7917c00SJeff Kirsher {
747f7917c00SJeff Kirsher 	/* alternatively: 3 * (n / 2) + 2 * (n & 1) */
748f7917c00SJeff Kirsher 	return (3 * n) / 2 + (n & 1);
749f7917c00SJeff Kirsher }
750f7917c00SJeff Kirsher 
751f7917c00SJeff Kirsher /**
752f7917c00SJeff Kirsher  *	flits_to_desc - returns the num of Tx descriptors for the given flits
753f7917c00SJeff Kirsher  *	@n: the number of flits
754f7917c00SJeff Kirsher  *
755f7917c00SJeff Kirsher  *	Calculates the number of Tx descriptors needed for the supplied number
756f7917c00SJeff Kirsher  *	of flits.
757f7917c00SJeff Kirsher  */
flits_to_desc(unsigned int n)758f7917c00SJeff Kirsher static inline unsigned int flits_to_desc(unsigned int n)
759f7917c00SJeff Kirsher {
760f7917c00SJeff Kirsher 	BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
761f7917c00SJeff Kirsher 	return flit_desc_map[n];
762f7917c00SJeff Kirsher }
763f7917c00SJeff Kirsher 
764f7917c00SJeff Kirsher /**
765f7917c00SJeff Kirsher  *	get_packet - return the next ingress packet buffer from a free list
766f7917c00SJeff Kirsher  *	@adap: the adapter that received the packet
767f7917c00SJeff Kirsher  *	@fl: the SGE free list holding the packet
768f7917c00SJeff Kirsher  *	@len: the packet length including any SGE padding
769f7917c00SJeff Kirsher  *	@drop_thres: # of remaining buffers before we start dropping packets
770f7917c00SJeff Kirsher  *
771f7917c00SJeff Kirsher  *	Get the next packet from a free list and complete setup of the
772f7917c00SJeff Kirsher  *	sk_buff.  If the packet is small we make a copy and recycle the
773f7917c00SJeff Kirsher  *	original buffer, otherwise we use the original buffer itself.  If a
774f7917c00SJeff Kirsher  *	positive drop threshold is supplied packets are dropped and their
775f7917c00SJeff Kirsher  *	buffers recycled if (a) the number of remaining buffers is under the
776f7917c00SJeff Kirsher  *	threshold and the packet is too big to copy, or (b) the packet should
777f7917c00SJeff Kirsher  *	be copied but there is no memory for the copy.
778f7917c00SJeff Kirsher  */
get_packet(struct adapter * adap,struct sge_fl * fl,unsigned int len,unsigned int drop_thres)779f7917c00SJeff Kirsher static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
780f7917c00SJeff Kirsher 				  unsigned int len, unsigned int drop_thres)
781f7917c00SJeff Kirsher {
782f7917c00SJeff Kirsher 	struct sk_buff *skb = NULL;
783f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
784f7917c00SJeff Kirsher 
785f7917c00SJeff Kirsher 	prefetch(sd->skb->data);
786f7917c00SJeff Kirsher 	fl->credits--;
787f7917c00SJeff Kirsher 
788f7917c00SJeff Kirsher 	if (len <= SGE_RX_COPY_THRES) {
789f7917c00SJeff Kirsher 		skb = alloc_skb(len, GFP_ATOMIC);
790f7917c00SJeff Kirsher 		if (likely(skb != NULL)) {
791f7917c00SJeff Kirsher 			__skb_put(skb, len);
7924489d8f5SChristophe JAILLET 			dma_sync_single_for_cpu(&adap->pdev->dev,
7934489d8f5SChristophe JAILLET 						dma_unmap_addr(sd, dma_addr),
7944489d8f5SChristophe JAILLET 						len, DMA_FROM_DEVICE);
795f7917c00SJeff Kirsher 			memcpy(skb->data, sd->skb->data, len);
7964489d8f5SChristophe JAILLET 			dma_sync_single_for_device(&adap->pdev->dev,
7974489d8f5SChristophe JAILLET 						   dma_unmap_addr(sd, dma_addr),
7984489d8f5SChristophe JAILLET 						   len, DMA_FROM_DEVICE);
799f7917c00SJeff Kirsher 		} else if (!drop_thres)
800f7917c00SJeff Kirsher 			goto use_orig_buf;
801f7917c00SJeff Kirsher recycle:
802f7917c00SJeff Kirsher 		recycle_rx_buf(adap, fl, fl->cidx);
803f7917c00SJeff Kirsher 		return skb;
804f7917c00SJeff Kirsher 	}
805f7917c00SJeff Kirsher 
806f7917c00SJeff Kirsher 	if (unlikely(fl->credits < drop_thres) &&
807f7917c00SJeff Kirsher 	    refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
808f7917c00SJeff Kirsher 		      GFP_ATOMIC | __GFP_COMP) == 0)
809f7917c00SJeff Kirsher 		goto recycle;
810f7917c00SJeff Kirsher 
811f7917c00SJeff Kirsher use_orig_buf:
8124489d8f5SChristophe JAILLET 	dma_unmap_single(&adap->pdev->dev, dma_unmap_addr(sd, dma_addr),
8134489d8f5SChristophe JAILLET 			 fl->buf_size, DMA_FROM_DEVICE);
814f7917c00SJeff Kirsher 	skb = sd->skb;
815f7917c00SJeff Kirsher 	skb_put(skb, len);
816f7917c00SJeff Kirsher 	__refill_fl(adap, fl);
817f7917c00SJeff Kirsher 	return skb;
818f7917c00SJeff Kirsher }
819f7917c00SJeff Kirsher 
820f7917c00SJeff Kirsher /**
821f7917c00SJeff Kirsher  *	get_packet_pg - return the next ingress packet buffer from a free list
822f7917c00SJeff Kirsher  *	@adap: the adapter that received the packet
823f7917c00SJeff Kirsher  *	@fl: the SGE free list holding the packet
824d0ea5cbdSJesse Brandeburg  *	@q: the queue
825f7917c00SJeff Kirsher  *	@len: the packet length including any SGE padding
826f7917c00SJeff Kirsher  *	@drop_thres: # of remaining buffers before we start dropping packets
827f7917c00SJeff Kirsher  *
828f7917c00SJeff Kirsher  *	Get the next packet from a free list populated with page chunks.
829f7917c00SJeff Kirsher  *	If the packet is small we make a copy and recycle the original buffer,
830f7917c00SJeff Kirsher  *	otherwise we attach the original buffer as a page fragment to a fresh
831f7917c00SJeff Kirsher  *	sk_buff.  If a positive drop threshold is supplied packets are dropped
832f7917c00SJeff Kirsher  *	and their buffers recycled if (a) the number of remaining buffers is
833f7917c00SJeff Kirsher  *	under the threshold and the packet is too big to copy, or (b) there's
834f7917c00SJeff Kirsher  *	no system memory.
835f7917c00SJeff Kirsher  *
836f7917c00SJeff Kirsher  * 	Note: this function is similar to @get_packet but deals with Rx buffers
837f7917c00SJeff Kirsher  * 	that are page chunks rather than sk_buffs.
838f7917c00SJeff Kirsher  */
get_packet_pg(struct adapter * adap,struct sge_fl * fl,struct sge_rspq * q,unsigned int len,unsigned int drop_thres)839f7917c00SJeff Kirsher static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
840f7917c00SJeff Kirsher 				     struct sge_rspq *q, unsigned int len,
841f7917c00SJeff Kirsher 				     unsigned int drop_thres)
842f7917c00SJeff Kirsher {
843f7917c00SJeff Kirsher 	struct sk_buff *newskb, *skb;
844f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
845f7917c00SJeff Kirsher 
846f7917c00SJeff Kirsher 	dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
847f7917c00SJeff Kirsher 
848f7917c00SJeff Kirsher 	newskb = skb = q->pg_skb;
849f7917c00SJeff Kirsher 	if (!skb && (len <= SGE_RX_COPY_THRES)) {
850f7917c00SJeff Kirsher 		newskb = alloc_skb(len, GFP_ATOMIC);
851f7917c00SJeff Kirsher 		if (likely(newskb != NULL)) {
852f7917c00SJeff Kirsher 			__skb_put(newskb, len);
8534489d8f5SChristophe JAILLET 			dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr,
8544489d8f5SChristophe JAILLET 						len, DMA_FROM_DEVICE);
855f7917c00SJeff Kirsher 			memcpy(newskb->data, sd->pg_chunk.va, len);
8564489d8f5SChristophe JAILLET 			dma_sync_single_for_device(&adap->pdev->dev, dma_addr,
8574489d8f5SChristophe JAILLET 						   len, DMA_FROM_DEVICE);
858f7917c00SJeff Kirsher 		} else if (!drop_thres)
859f7917c00SJeff Kirsher 			return NULL;
860f7917c00SJeff Kirsher recycle:
861f7917c00SJeff Kirsher 		fl->credits--;
862f7917c00SJeff Kirsher 		recycle_rx_buf(adap, fl, fl->cidx);
863f7917c00SJeff Kirsher 		q->rx_recycle_buf++;
864f7917c00SJeff Kirsher 		return newskb;
865f7917c00SJeff Kirsher 	}
866f7917c00SJeff Kirsher 
867f7917c00SJeff Kirsher 	if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
868f7917c00SJeff Kirsher 		goto recycle;
869f7917c00SJeff Kirsher 
870f7917c00SJeff Kirsher 	prefetch(sd->pg_chunk.p_cnt);
871f7917c00SJeff Kirsher 
872f7917c00SJeff Kirsher 	if (!skb)
873f7917c00SJeff Kirsher 		newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
874f7917c00SJeff Kirsher 
875f7917c00SJeff Kirsher 	if (unlikely(!newskb)) {
876f7917c00SJeff Kirsher 		if (!drop_thres)
877f7917c00SJeff Kirsher 			return NULL;
878f7917c00SJeff Kirsher 		goto recycle;
879f7917c00SJeff Kirsher 	}
880f7917c00SJeff Kirsher 
8814489d8f5SChristophe JAILLET 	dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr, len,
8824489d8f5SChristophe JAILLET 				DMA_FROM_DEVICE);
883f7917c00SJeff Kirsher 	(*sd->pg_chunk.p_cnt)--;
884f7917c00SJeff Kirsher 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
8854489d8f5SChristophe JAILLET 		dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
8864489d8f5SChristophe JAILLET 			       fl->alloc_size, DMA_FROM_DEVICE);
887f7917c00SJeff Kirsher 	if (!skb) {
888f7917c00SJeff Kirsher 		__skb_put(newskb, SGE_RX_PULL_LEN);
889f7917c00SJeff Kirsher 		memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
890f7917c00SJeff Kirsher 		skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
891f7917c00SJeff Kirsher 				   sd->pg_chunk.offset + SGE_RX_PULL_LEN,
892f7917c00SJeff Kirsher 				   len - SGE_RX_PULL_LEN);
893f7917c00SJeff Kirsher 		newskb->len = len;
894f7917c00SJeff Kirsher 		newskb->data_len = len - SGE_RX_PULL_LEN;
895f7917c00SJeff Kirsher 		newskb->truesize += newskb->data_len;
896f7917c00SJeff Kirsher 	} else {
897f7917c00SJeff Kirsher 		skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
898f7917c00SJeff Kirsher 				   sd->pg_chunk.page,
899f7917c00SJeff Kirsher 				   sd->pg_chunk.offset, len);
900f7917c00SJeff Kirsher 		newskb->len += len;
901f7917c00SJeff Kirsher 		newskb->data_len += len;
902f7917c00SJeff Kirsher 		newskb->truesize += len;
903f7917c00SJeff Kirsher 	}
904f7917c00SJeff Kirsher 
905f7917c00SJeff Kirsher 	fl->credits--;
906f7917c00SJeff Kirsher 	/*
907f7917c00SJeff Kirsher 	 * We do not refill FLs here, we let the caller do it to overlap a
908f7917c00SJeff Kirsher 	 * prefetch.
909f7917c00SJeff Kirsher 	 */
910f7917c00SJeff Kirsher 	return newskb;
911f7917c00SJeff Kirsher }
912f7917c00SJeff Kirsher 
913f7917c00SJeff Kirsher /**
914f7917c00SJeff Kirsher  *	get_imm_packet - return the next ingress packet buffer from a response
915f7917c00SJeff Kirsher  *	@resp: the response descriptor containing the packet data
916f7917c00SJeff Kirsher  *
917f7917c00SJeff Kirsher  *	Return a packet containing the immediate data of the given response.
918f7917c00SJeff Kirsher  */
get_imm_packet(const struct rsp_desc * resp)919f7917c00SJeff Kirsher static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
920f7917c00SJeff Kirsher {
921f7917c00SJeff Kirsher 	struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
922f7917c00SJeff Kirsher 
923f7917c00SJeff Kirsher 	if (skb) {
924f7917c00SJeff Kirsher 		__skb_put(skb, IMMED_PKT_SIZE);
92588181f1dSKees Cook 		BUILD_BUG_ON(IMMED_PKT_SIZE != sizeof(resp->immediate));
92688181f1dSKees Cook 		skb_copy_to_linear_data(skb, &resp->immediate, IMMED_PKT_SIZE);
927f7917c00SJeff Kirsher 	}
928f7917c00SJeff Kirsher 	return skb;
929f7917c00SJeff Kirsher }
930f7917c00SJeff Kirsher 
931f7917c00SJeff Kirsher /**
932f7917c00SJeff Kirsher  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
933f7917c00SJeff Kirsher  *	@skb: the packet
934f7917c00SJeff Kirsher  *
935f7917c00SJeff Kirsher  * 	Returns the number of Tx descriptors needed for the given Ethernet
936f7917c00SJeff Kirsher  * 	packet.  Ethernet packets require addition of WR and CPL headers.
937f7917c00SJeff Kirsher  */
calc_tx_descs(const struct sk_buff * skb)938f7917c00SJeff Kirsher static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
939f7917c00SJeff Kirsher {
940f7917c00SJeff Kirsher 	unsigned int flits;
941f7917c00SJeff Kirsher 
942f7917c00SJeff Kirsher 	if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
943f7917c00SJeff Kirsher 		return 1;
944f7917c00SJeff Kirsher 
945f7917c00SJeff Kirsher 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
946f7917c00SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
947f7917c00SJeff Kirsher 		flits++;
948f7917c00SJeff Kirsher 	return flits_to_desc(flits);
949f7917c00SJeff Kirsher }
950f7917c00SJeff Kirsher 
951c69fe407SArjun Vynipadath /*	map_skb - map a packet main body and its page fragments
952c69fe407SArjun Vynipadath  *	@pdev: the PCI device
953c69fe407SArjun Vynipadath  *	@skb: the packet
954c69fe407SArjun Vynipadath  *	@addr: placeholder to save the mapped addresses
955c69fe407SArjun Vynipadath  *
956c69fe407SArjun Vynipadath  *	map the main body of an sk_buff and its page fragments, if any.
957c69fe407SArjun Vynipadath  */
map_skb(struct pci_dev * pdev,const struct sk_buff * skb,dma_addr_t * addr)958c69fe407SArjun Vynipadath static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
959c69fe407SArjun Vynipadath 		   dma_addr_t *addr)
960c69fe407SArjun Vynipadath {
961c69fe407SArjun Vynipadath 	const skb_frag_t *fp, *end;
962c69fe407SArjun Vynipadath 	const struct skb_shared_info *si;
963c69fe407SArjun Vynipadath 
964c69fe407SArjun Vynipadath 	if (skb_headlen(skb)) {
9654489d8f5SChristophe JAILLET 		*addr = dma_map_single(&pdev->dev, skb->data,
9664489d8f5SChristophe JAILLET 				       skb_headlen(skb), DMA_TO_DEVICE);
9674489d8f5SChristophe JAILLET 		if (dma_mapping_error(&pdev->dev, *addr))
968c69fe407SArjun Vynipadath 			goto out_err;
969c69fe407SArjun Vynipadath 		addr++;
970c69fe407SArjun Vynipadath 	}
971c69fe407SArjun Vynipadath 
972c69fe407SArjun Vynipadath 	si = skb_shinfo(skb);
973c69fe407SArjun Vynipadath 	end = &si->frags[si->nr_frags];
974c69fe407SArjun Vynipadath 
975c69fe407SArjun Vynipadath 	for (fp = si->frags; fp < end; fp++) {
976c69fe407SArjun Vynipadath 		*addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
977c69fe407SArjun Vynipadath 					 DMA_TO_DEVICE);
9784489d8f5SChristophe JAILLET 		if (dma_mapping_error(&pdev->dev, *addr))
979c69fe407SArjun Vynipadath 			goto unwind;
980c69fe407SArjun Vynipadath 		addr++;
981c69fe407SArjun Vynipadath 	}
982c69fe407SArjun Vynipadath 	return 0;
983c69fe407SArjun Vynipadath 
984c69fe407SArjun Vynipadath unwind:
985c69fe407SArjun Vynipadath 	while (fp-- > si->frags)
986c69fe407SArjun Vynipadath 		dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
987c69fe407SArjun Vynipadath 			       DMA_TO_DEVICE);
988c69fe407SArjun Vynipadath 
9894489d8f5SChristophe JAILLET 	dma_unmap_single(&pdev->dev, addr[-1], skb_headlen(skb),
9904489d8f5SChristophe JAILLET 			 DMA_TO_DEVICE);
991c69fe407SArjun Vynipadath out_err:
992c69fe407SArjun Vynipadath 	return -ENOMEM;
993c69fe407SArjun Vynipadath }
994c69fe407SArjun Vynipadath 
995f7917c00SJeff Kirsher /**
996c69fe407SArjun Vynipadath  *	write_sgl - populate a scatter/gather list for a packet
997f7917c00SJeff Kirsher  *	@skb: the packet
998f7917c00SJeff Kirsher  *	@sgp: the SGL to populate
999f7917c00SJeff Kirsher  *	@start: start address of skb main body data to include in the SGL
1000f7917c00SJeff Kirsher  *	@len: length of skb main body data to include in the SGL
1001c69fe407SArjun Vynipadath  *	@addr: the list of the mapped addresses
1002f7917c00SJeff Kirsher  *
1003c69fe407SArjun Vynipadath  *	Copies the scatter/gather list for the buffers that make up a packet
1004f7917c00SJeff Kirsher  *	and returns the SGL size in 8-byte words.  The caller must size the SGL
1005f7917c00SJeff Kirsher  *	appropriately.
1006f7917c00SJeff Kirsher  */
write_sgl(const struct sk_buff * skb,struct sg_ent * sgp,unsigned char * start,unsigned int len,const dma_addr_t * addr)1007c69fe407SArjun Vynipadath static inline unsigned int write_sgl(const struct sk_buff *skb,
1008f7917c00SJeff Kirsher 				     struct sg_ent *sgp, unsigned char *start,
1009c69fe407SArjun Vynipadath 				     unsigned int len, const dma_addr_t *addr)
1010f7917c00SJeff Kirsher {
1011c69fe407SArjun Vynipadath 	unsigned int i, j = 0, k = 0, nfrags;
1012f7917c00SJeff Kirsher 
1013f7917c00SJeff Kirsher 	if (len) {
1014f7917c00SJeff Kirsher 		sgp->len[0] = cpu_to_be32(len);
1015c69fe407SArjun Vynipadath 		sgp->addr[j++] = cpu_to_be64(addr[k++]);
1016f7917c00SJeff Kirsher 	}
1017f7917c00SJeff Kirsher 
1018f7917c00SJeff Kirsher 	nfrags = skb_shinfo(skb)->nr_frags;
1019f7917c00SJeff Kirsher 	for (i = 0; i < nfrags; i++) {
10209e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1021f7917c00SJeff Kirsher 
10229e903e08SEric Dumazet 		sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
1023c69fe407SArjun Vynipadath 		sgp->addr[j] = cpu_to_be64(addr[k++]);
1024f7917c00SJeff Kirsher 		j ^= 1;
1025f7917c00SJeff Kirsher 		if (j == 0)
1026f7917c00SJeff Kirsher 			++sgp;
1027f7917c00SJeff Kirsher 	}
1028f7917c00SJeff Kirsher 	if (j)
1029f7917c00SJeff Kirsher 		sgp->len[j] = 0;
1030f7917c00SJeff Kirsher 	return ((nfrags + (len != 0)) * 3) / 2 + j;
1031f7917c00SJeff Kirsher }
1032f7917c00SJeff Kirsher 
1033f7917c00SJeff Kirsher /**
1034f7917c00SJeff Kirsher  *	check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1035f7917c00SJeff Kirsher  *	@adap: the adapter
1036f7917c00SJeff Kirsher  *	@q: the Tx queue
1037f7917c00SJeff Kirsher  *
1038f7917c00SJeff Kirsher  *	Ring the doorbel if a Tx queue is asleep.  There is a natural race,
1039f7917c00SJeff Kirsher  *	where the HW is going to sleep just after we checked, however,
1040f7917c00SJeff Kirsher  *	then the interrupt handler will detect the outstanding TX packet
1041f7917c00SJeff Kirsher  *	and ring the doorbell for us.
1042f7917c00SJeff Kirsher  *
1043f7917c00SJeff Kirsher  *	When GTS is disabled we unconditionally ring the doorbell.
1044f7917c00SJeff Kirsher  */
check_ring_tx_db(struct adapter * adap,struct sge_txq * q)1045f7917c00SJeff Kirsher static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1046f7917c00SJeff Kirsher {
1047f7917c00SJeff Kirsher #if USE_GTS
1048f7917c00SJeff Kirsher 	clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1049f7917c00SJeff Kirsher 	if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1050f7917c00SJeff Kirsher 		set_bit(TXQ_LAST_PKT_DB, &q->flags);
1051f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL,
1052f7917c00SJeff Kirsher 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1053f7917c00SJeff Kirsher 	}
1054f7917c00SJeff Kirsher #else
1055f7917c00SJeff Kirsher 	wmb();			/* write descriptors before telling HW */
1056f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_KDOORBELL,
1057f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1058f7917c00SJeff Kirsher #endif
1059f7917c00SJeff Kirsher }
1060f7917c00SJeff Kirsher 
wr_gen2(struct tx_desc * d,unsigned int gen)1061f7917c00SJeff Kirsher static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1062f7917c00SJeff Kirsher {
1063f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 2
1064f7917c00SJeff Kirsher 	d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1065f7917c00SJeff Kirsher #endif
1066f7917c00SJeff Kirsher }
1067f7917c00SJeff Kirsher 
1068f7917c00SJeff Kirsher /**
1069f7917c00SJeff Kirsher  *	write_wr_hdr_sgl - write a WR header and, optionally, SGL
1070f7917c00SJeff Kirsher  *	@ndesc: number of Tx descriptors spanned by the SGL
1071f7917c00SJeff Kirsher  *	@skb: the packet corresponding to the WR
1072f7917c00SJeff Kirsher  *	@d: first Tx descriptor to be written
1073f7917c00SJeff Kirsher  *	@pidx: index of above descriptors
1074f7917c00SJeff Kirsher  *	@q: the SGE Tx queue
1075f7917c00SJeff Kirsher  *	@sgl: the SGL
1076f7917c00SJeff Kirsher  *	@flits: number of flits to the start of the SGL in the first descriptor
1077f7917c00SJeff Kirsher  *	@sgl_flits: the SGL size in flits
1078f7917c00SJeff Kirsher  *	@gen: the Tx descriptor generation
1079f7917c00SJeff Kirsher  *	@wr_hi: top 32 bits of WR header based on WR type (big endian)
1080f7917c00SJeff Kirsher  *	@wr_lo: low 32 bits of WR header based on WR type (big endian)
1081f7917c00SJeff Kirsher  *
1082f7917c00SJeff Kirsher  *	Write a work request header and an associated SGL.  If the SGL is
1083f7917c00SJeff Kirsher  *	small enough to fit into one Tx descriptor it has already been written
1084f7917c00SJeff Kirsher  *	and we just need to write the WR header.  Otherwise we distribute the
1085f7917c00SJeff Kirsher  *	SGL across the number of descriptors it spans.
1086f7917c00SJeff Kirsher  */
write_wr_hdr_sgl(unsigned int ndesc,struct sk_buff * skb,struct tx_desc * d,unsigned int pidx,const struct sge_txq * q,const struct sg_ent * sgl,unsigned int flits,unsigned int sgl_flits,unsigned int gen,__be32 wr_hi,__be32 wr_lo)1087f7917c00SJeff Kirsher static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1088f7917c00SJeff Kirsher 			     struct tx_desc *d, unsigned int pidx,
1089f7917c00SJeff Kirsher 			     const struct sge_txq *q,
1090f7917c00SJeff Kirsher 			     const struct sg_ent *sgl,
1091f7917c00SJeff Kirsher 			     unsigned int flits, unsigned int sgl_flits,
1092f7917c00SJeff Kirsher 			     unsigned int gen, __be32 wr_hi,
1093f7917c00SJeff Kirsher 			     __be32 wr_lo)
1094f7917c00SJeff Kirsher {
1095f7917c00SJeff Kirsher 	struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1096f7917c00SJeff Kirsher 	struct tx_sw_desc *sd = &q->sdesc[pidx];
1097f7917c00SJeff Kirsher 
1098f7917c00SJeff Kirsher 	sd->skb = skb;
1099f7917c00SJeff Kirsher 	if (need_skb_unmap()) {
1100f7917c00SJeff Kirsher 		sd->fragidx = 0;
1101f7917c00SJeff Kirsher 		sd->addr_idx = 0;
1102f7917c00SJeff Kirsher 		sd->sflit = flits;
1103f7917c00SJeff Kirsher 	}
1104f7917c00SJeff Kirsher 
1105f7917c00SJeff Kirsher 	if (likely(ndesc == 1)) {
1106f7917c00SJeff Kirsher 		sd->eop = 1;
1107f7917c00SJeff Kirsher 		wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1108f7917c00SJeff Kirsher 				   V_WR_SGLSFLT(flits)) | wr_hi;
1109019be1cfSAlexander Duyck 		dma_wmb();
1110f7917c00SJeff Kirsher 		wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1111f7917c00SJeff Kirsher 				   V_WR_GEN(gen)) | wr_lo;
1112f7917c00SJeff Kirsher 		wr_gen2(d, gen);
1113f7917c00SJeff Kirsher 	} else {
1114f7917c00SJeff Kirsher 		unsigned int ogen = gen;
1115f7917c00SJeff Kirsher 		const u64 *fp = (const u64 *)sgl;
1116f7917c00SJeff Kirsher 		struct work_request_hdr *wp = wrp;
1117f7917c00SJeff Kirsher 
1118f7917c00SJeff Kirsher 		wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1119f7917c00SJeff Kirsher 				   V_WR_SGLSFLT(flits)) | wr_hi;
1120f7917c00SJeff Kirsher 
1121f7917c00SJeff Kirsher 		while (sgl_flits) {
1122f7917c00SJeff Kirsher 			unsigned int avail = WR_FLITS - flits;
1123f7917c00SJeff Kirsher 
1124f7917c00SJeff Kirsher 			if (avail > sgl_flits)
1125f7917c00SJeff Kirsher 				avail = sgl_flits;
1126f7917c00SJeff Kirsher 			memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1127f7917c00SJeff Kirsher 			sgl_flits -= avail;
1128f7917c00SJeff Kirsher 			ndesc--;
1129f7917c00SJeff Kirsher 			if (!sgl_flits)
1130f7917c00SJeff Kirsher 				break;
1131f7917c00SJeff Kirsher 
1132f7917c00SJeff Kirsher 			fp += avail;
1133f7917c00SJeff Kirsher 			d++;
1134f7917c00SJeff Kirsher 			sd->eop = 0;
1135f7917c00SJeff Kirsher 			sd++;
1136f7917c00SJeff Kirsher 			if (++pidx == q->size) {
1137f7917c00SJeff Kirsher 				pidx = 0;
1138f7917c00SJeff Kirsher 				gen ^= 1;
1139f7917c00SJeff Kirsher 				d = q->desc;
1140f7917c00SJeff Kirsher 				sd = q->sdesc;
1141f7917c00SJeff Kirsher 			}
1142f7917c00SJeff Kirsher 
1143f7917c00SJeff Kirsher 			sd->skb = skb;
1144f7917c00SJeff Kirsher 			wrp = (struct work_request_hdr *)d;
1145f7917c00SJeff Kirsher 			wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1146f7917c00SJeff Kirsher 					   V_WR_SGLSFLT(1)) | wr_hi;
1147f7917c00SJeff Kirsher 			wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1148f7917c00SJeff Kirsher 							sgl_flits + 1)) |
1149f7917c00SJeff Kirsher 					   V_WR_GEN(gen)) | wr_lo;
1150f7917c00SJeff Kirsher 			wr_gen2(d, gen);
1151f7917c00SJeff Kirsher 			flits = 1;
1152f7917c00SJeff Kirsher 		}
1153f7917c00SJeff Kirsher 		sd->eop = 1;
1154f7917c00SJeff Kirsher 		wrp->wr_hi |= htonl(F_WR_EOP);
1155019be1cfSAlexander Duyck 		dma_wmb();
1156f7917c00SJeff Kirsher 		wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1157f7917c00SJeff Kirsher 		wr_gen2((struct tx_desc *)wp, ogen);
1158f7917c00SJeff Kirsher 		WARN_ON(ndesc != 0);
1159f7917c00SJeff Kirsher 	}
1160f7917c00SJeff Kirsher }
1161f7917c00SJeff Kirsher 
1162f7917c00SJeff Kirsher /**
1163f7917c00SJeff Kirsher  *	write_tx_pkt_wr - write a TX_PKT work request
1164f7917c00SJeff Kirsher  *	@adap: the adapter
1165f7917c00SJeff Kirsher  *	@skb: the packet to send
1166f7917c00SJeff Kirsher  *	@pi: the egress interface
1167f7917c00SJeff Kirsher  *	@pidx: index of the first Tx descriptor to write
1168f7917c00SJeff Kirsher  *	@gen: the generation value to use
1169f7917c00SJeff Kirsher  *	@q: the Tx queue
1170f7917c00SJeff Kirsher  *	@ndesc: number of descriptors the packet will occupy
1171f7917c00SJeff Kirsher  *	@compl: the value of the COMPL bit to use
1172d0ea5cbdSJesse Brandeburg  *	@addr: address
1173f7917c00SJeff Kirsher  *
1174f7917c00SJeff Kirsher  *	Generate a TX_PKT work request to send the supplied packet.
1175f7917c00SJeff Kirsher  */
write_tx_pkt_wr(struct adapter * adap,struct sk_buff * skb,const struct port_info * pi,unsigned int pidx,unsigned int gen,struct sge_txq * q,unsigned int ndesc,unsigned int compl,const dma_addr_t * addr)1176f7917c00SJeff Kirsher static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1177f7917c00SJeff Kirsher 			    const struct port_info *pi,
1178f7917c00SJeff Kirsher 			    unsigned int pidx, unsigned int gen,
1179f7917c00SJeff Kirsher 			    struct sge_txq *q, unsigned int ndesc,
1180c69fe407SArjun Vynipadath 			    unsigned int compl, const dma_addr_t *addr)
1181f7917c00SJeff Kirsher {
1182f7917c00SJeff Kirsher 	unsigned int flits, sgl_flits, cntrl, tso_info;
1183f7917c00SJeff Kirsher 	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1184f7917c00SJeff Kirsher 	struct tx_desc *d = &q->desc[pidx];
1185f7917c00SJeff Kirsher 	struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1186f7917c00SJeff Kirsher 
1187f7917c00SJeff Kirsher 	cpl->len = htonl(skb->len);
1188f7917c00SJeff Kirsher 	cntrl = V_TXPKT_INTF(pi->port_id);
1189f7917c00SJeff Kirsher 
1190df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
1191df8a39deSJiri Pirko 		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
1192f7917c00SJeff Kirsher 
1193f7917c00SJeff Kirsher 	tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1194f7917c00SJeff Kirsher 	if (tso_info) {
1195f7917c00SJeff Kirsher 		int eth_type;
1196f7917c00SJeff Kirsher 		struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1197f7917c00SJeff Kirsher 
1198f7917c00SJeff Kirsher 		d->flit[2] = 0;
1199f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1200f7917c00SJeff Kirsher 		hdr->cntrl = htonl(cntrl);
1201f7917c00SJeff Kirsher 		eth_type = skb_network_offset(skb) == ETH_HLEN ?
1202f7917c00SJeff Kirsher 		    CPL_ETH_II : CPL_ETH_II_VLAN;
1203f7917c00SJeff Kirsher 		tso_info |= V_LSO_ETH_TYPE(eth_type) |
1204f7917c00SJeff Kirsher 		    V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1205f7917c00SJeff Kirsher 		    V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1206f7917c00SJeff Kirsher 		hdr->lso_info = htonl(tso_info);
1207f7917c00SJeff Kirsher 		flits = 3;
1208f7917c00SJeff Kirsher 	} else {
1209f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1210f7917c00SJeff Kirsher 		cntrl |= F_TXPKT_IPCSUM_DIS;	/* SW calculates IP csum */
1211f7917c00SJeff Kirsher 		cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1212f7917c00SJeff Kirsher 		cpl->cntrl = htonl(cntrl);
1213f7917c00SJeff Kirsher 
1214f7917c00SJeff Kirsher 		if (skb->len <= WR_LEN - sizeof(*cpl)) {
1215f7917c00SJeff Kirsher 			q->sdesc[pidx].skb = NULL;
1216f7917c00SJeff Kirsher 			if (!skb->data_len)
1217f7917c00SJeff Kirsher 				skb_copy_from_linear_data(skb, &d->flit[2],
1218f7917c00SJeff Kirsher 							  skb->len);
1219f7917c00SJeff Kirsher 			else
1220f7917c00SJeff Kirsher 				skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1221f7917c00SJeff Kirsher 
1222f7917c00SJeff Kirsher 			flits = (skb->len + 7) / 8 + 2;
1223f7917c00SJeff Kirsher 			cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1224f7917c00SJeff Kirsher 					      V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1225f7917c00SJeff Kirsher 					      | F_WR_SOP | F_WR_EOP | compl);
1226019be1cfSAlexander Duyck 			dma_wmb();
1227f7917c00SJeff Kirsher 			cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1228f7917c00SJeff Kirsher 					      V_WR_TID(q->token));
1229f7917c00SJeff Kirsher 			wr_gen2(d, gen);
1230f9ec8131SEric W. Biederman 			dev_consume_skb_any(skb);
1231f7917c00SJeff Kirsher 			return;
1232f7917c00SJeff Kirsher 		}
1233f7917c00SJeff Kirsher 
1234f7917c00SJeff Kirsher 		flits = 2;
1235f7917c00SJeff Kirsher 	}
1236f7917c00SJeff Kirsher 
1237f7917c00SJeff Kirsher 	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1238c69fe407SArjun Vynipadath 	sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
1239f7917c00SJeff Kirsher 
1240f7917c00SJeff Kirsher 	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1241f7917c00SJeff Kirsher 			 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1242f7917c00SJeff Kirsher 			 htonl(V_WR_TID(q->token)));
1243f7917c00SJeff Kirsher }
1244f7917c00SJeff Kirsher 
t3_stop_tx_queue(struct netdev_queue * txq,struct sge_qset * qs,struct sge_txq * q)1245f7917c00SJeff Kirsher static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1246f7917c00SJeff Kirsher 				    struct sge_qset *qs, struct sge_txq *q)
1247f7917c00SJeff Kirsher {
1248f7917c00SJeff Kirsher 	netif_tx_stop_queue(txq);
1249f7917c00SJeff Kirsher 	set_bit(TXQ_ETH, &qs->txq_stopped);
1250f7917c00SJeff Kirsher 	q->stops++;
1251f7917c00SJeff Kirsher }
1252f7917c00SJeff Kirsher 
1253f7917c00SJeff Kirsher /**
1254aeed744aSYang Shen  *	t3_eth_xmit - add a packet to the Ethernet Tx queue
1255f7917c00SJeff Kirsher  *	@skb: the packet
1256f7917c00SJeff Kirsher  *	@dev: the egress net device
1257f7917c00SJeff Kirsher  *
1258f7917c00SJeff Kirsher  *	Add a packet to an SGE Tx queue.  Runs with softirqs disabled.
1259f7917c00SJeff Kirsher  */
t3_eth_xmit(struct sk_buff * skb,struct net_device * dev)1260f7917c00SJeff Kirsher netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1261f7917c00SJeff Kirsher {
1262f7917c00SJeff Kirsher 	int qidx;
1263f7917c00SJeff Kirsher 	unsigned int ndesc, pidx, credits, gen, compl;
1264f7917c00SJeff Kirsher 	const struct port_info *pi = netdev_priv(dev);
1265f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
1266f7917c00SJeff Kirsher 	struct netdev_queue *txq;
1267f7917c00SJeff Kirsher 	struct sge_qset *qs;
1268f7917c00SJeff Kirsher 	struct sge_txq *q;
1269c69fe407SArjun Vynipadath 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1270f7917c00SJeff Kirsher 
1271f7917c00SJeff Kirsher 	/*
1272f7917c00SJeff Kirsher 	 * The chip min packet length is 9 octets but play safe and reject
1273f7917c00SJeff Kirsher 	 * anything shorter than an Ethernet header.
1274f7917c00SJeff Kirsher 	 */
1275f7917c00SJeff Kirsher 	if (unlikely(skb->len < ETH_HLEN)) {
1276f9ec8131SEric W. Biederman 		dev_kfree_skb_any(skb);
1277f7917c00SJeff Kirsher 		return NETDEV_TX_OK;
1278f7917c00SJeff Kirsher 	}
1279f7917c00SJeff Kirsher 
1280f7917c00SJeff Kirsher 	qidx = skb_get_queue_mapping(skb);
1281f7917c00SJeff Kirsher 	qs = &pi->qs[qidx];
1282f7917c00SJeff Kirsher 	q = &qs->txq[TXQ_ETH];
1283f7917c00SJeff Kirsher 	txq = netdev_get_tx_queue(dev, qidx);
1284f7917c00SJeff Kirsher 
1285f7917c00SJeff Kirsher 	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1286f7917c00SJeff Kirsher 
1287f7917c00SJeff Kirsher 	credits = q->size - q->in_use;
1288f7917c00SJeff Kirsher 	ndesc = calc_tx_descs(skb);
1289f7917c00SJeff Kirsher 
1290f7917c00SJeff Kirsher 	if (unlikely(credits < ndesc)) {
1291f7917c00SJeff Kirsher 		t3_stop_tx_queue(txq, qs, q);
1292f7917c00SJeff Kirsher 		dev_err(&adap->pdev->dev,
1293f7917c00SJeff Kirsher 			"%s: Tx ring %u full while queue awake!\n",
1294f7917c00SJeff Kirsher 			dev->name, q->cntxt_id & 7);
1295f7917c00SJeff Kirsher 		return NETDEV_TX_BUSY;
1296f7917c00SJeff Kirsher 	}
1297f7917c00SJeff Kirsher 
1298c69fe407SArjun Vynipadath 	/* Check if ethernet packet can't be sent as immediate data */
1299c69fe407SArjun Vynipadath 	if (skb->len > (WR_LEN - sizeof(struct cpl_tx_pkt))) {
1300c69fe407SArjun Vynipadath 		if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1301c69fe407SArjun Vynipadath 			dev_kfree_skb(skb);
1302c69fe407SArjun Vynipadath 			return NETDEV_TX_OK;
1303c69fe407SArjun Vynipadath 		}
1304c69fe407SArjun Vynipadath 	}
1305c69fe407SArjun Vynipadath 
1306f7917c00SJeff Kirsher 	q->in_use += ndesc;
1307f7917c00SJeff Kirsher 	if (unlikely(credits - ndesc < q->stop_thres)) {
1308f7917c00SJeff Kirsher 		t3_stop_tx_queue(txq, qs, q);
1309f7917c00SJeff Kirsher 
1310f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1311f7917c00SJeff Kirsher 		    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1312f7917c00SJeff Kirsher 			q->restarts++;
1313f7917c00SJeff Kirsher 			netif_tx_start_queue(txq);
1314f7917c00SJeff Kirsher 		}
1315f7917c00SJeff Kirsher 	}
1316f7917c00SJeff Kirsher 
1317f7917c00SJeff Kirsher 	gen = q->gen;
1318f7917c00SJeff Kirsher 	q->unacked += ndesc;
1319f7917c00SJeff Kirsher 	compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1320f7917c00SJeff Kirsher 	q->unacked &= 7;
1321f7917c00SJeff Kirsher 	pidx = q->pidx;
1322f7917c00SJeff Kirsher 	q->pidx += ndesc;
1323f7917c00SJeff Kirsher 	if (q->pidx >= q->size) {
1324f7917c00SJeff Kirsher 		q->pidx -= q->size;
1325f7917c00SJeff Kirsher 		q->gen ^= 1;
1326f7917c00SJeff Kirsher 	}
1327f7917c00SJeff Kirsher 
1328f7917c00SJeff Kirsher 	/* update port statistics */
1329bc6c47b5SVipul Pandya 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1330f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1331f7917c00SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
1332f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_TSO]++;
1333df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
1334f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_VLANINS]++;
1335f7917c00SJeff Kirsher 
1336f7917c00SJeff Kirsher 	/*
1337f7917c00SJeff Kirsher 	 * We do not use Tx completion interrupts to free DMAd Tx packets.
1338f7917c00SJeff Kirsher 	 * This is good for performance but means that we rely on new Tx
1339f7917c00SJeff Kirsher 	 * packets arriving to run the destructors of completed packets,
1340f7917c00SJeff Kirsher 	 * which open up space in their sockets' send queues.  Sometimes
1341f7917c00SJeff Kirsher 	 * we do not get such new packets causing Tx to stall.  A single
1342f7917c00SJeff Kirsher 	 * UDP transmitter is a good example of this situation.  We have
1343f7917c00SJeff Kirsher 	 * a clean up timer that periodically reclaims completed packets
1344f7917c00SJeff Kirsher 	 * but it doesn't run often enough (nor do we want it to) to prevent
1345f7917c00SJeff Kirsher 	 * lengthy stalls.  A solution to this problem is to run the
1346f7917c00SJeff Kirsher 	 * destructor early, after the packet is queued but before it's DMAd.
1347f7917c00SJeff Kirsher 	 * A cons is that we lie to socket memory accounting, but the amount
1348f7917c00SJeff Kirsher 	 * of extra memory is reasonable (limited by the number of Tx
1349f7917c00SJeff Kirsher 	 * descriptors), the packets do actually get freed quickly by new
1350f7917c00SJeff Kirsher 	 * packets almost always, and for protocols like TCP that wait for
1351f7917c00SJeff Kirsher 	 * acks to really free up the data the extra memory is even less.
1352f7917c00SJeff Kirsher 	 * On the positive side we run the destructors on the sending CPU
1353f7917c00SJeff Kirsher 	 * rather than on a potentially different completing CPU, usually a
1354f7917c00SJeff Kirsher 	 * good thing.  We also run them without holding our Tx queue lock,
1355f7917c00SJeff Kirsher 	 * unlike what reclaim_completed_tx() would otherwise do.
1356f7917c00SJeff Kirsher 	 *
1357f7917c00SJeff Kirsher 	 * Run the destructor before telling the DMA engine about the packet
1358f7917c00SJeff Kirsher 	 * to make sure it doesn't complete and get freed prematurely.
1359f7917c00SJeff Kirsher 	 */
1360f7917c00SJeff Kirsher 	if (likely(!skb_shared(skb)))
1361f7917c00SJeff Kirsher 		skb_orphan(skb);
1362f7917c00SJeff Kirsher 
1363c69fe407SArjun Vynipadath 	write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
1364f7917c00SJeff Kirsher 	check_ring_tx_db(adap, q);
1365f7917c00SJeff Kirsher 	return NETDEV_TX_OK;
1366f7917c00SJeff Kirsher }
1367f7917c00SJeff Kirsher 
1368f7917c00SJeff Kirsher /**
1369f7917c00SJeff Kirsher  *	write_imm - write a packet into a Tx descriptor as immediate data
1370f7917c00SJeff Kirsher  *	@d: the Tx descriptor to write
1371f7917c00SJeff Kirsher  *	@skb: the packet
1372f7917c00SJeff Kirsher  *	@len: the length of packet data to write as immediate data
1373f7917c00SJeff Kirsher  *	@gen: the generation bit value to write
1374f7917c00SJeff Kirsher  *
1375f7917c00SJeff Kirsher  *	Writes a packet as immediate data into a Tx descriptor.  The packet
1376f7917c00SJeff Kirsher  *	contains a work request at its beginning.  We must write the packet
1377f7917c00SJeff Kirsher  *	carefully so the SGE doesn't read it accidentally before it's written
1378f7917c00SJeff Kirsher  *	in its entirety.
1379f7917c00SJeff Kirsher  */
write_imm(struct tx_desc * d,struct sk_buff * skb,unsigned int len,unsigned int gen)1380f7917c00SJeff Kirsher static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1381f7917c00SJeff Kirsher 			     unsigned int len, unsigned int gen)
1382f7917c00SJeff Kirsher {
1383f7917c00SJeff Kirsher 	struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1384f7917c00SJeff Kirsher 	struct work_request_hdr *to = (struct work_request_hdr *)d;
1385f7917c00SJeff Kirsher 
1386f7917c00SJeff Kirsher 	if (likely(!skb->data_len))
1387f7917c00SJeff Kirsher 		memcpy(&to[1], &from[1], len - sizeof(*from));
1388f7917c00SJeff Kirsher 	else
1389f7917c00SJeff Kirsher 		skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1390f7917c00SJeff Kirsher 
1391f7917c00SJeff Kirsher 	to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1392f7917c00SJeff Kirsher 					V_WR_BCNTLFLT(len & 7));
1393019be1cfSAlexander Duyck 	dma_wmb();
1394f7917c00SJeff Kirsher 	to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1395f7917c00SJeff Kirsher 					V_WR_LEN((len + 7) / 8));
1396f7917c00SJeff Kirsher 	wr_gen2(d, gen);
1397f7917c00SJeff Kirsher 	kfree_skb(skb);
1398f7917c00SJeff Kirsher }
1399f7917c00SJeff Kirsher 
1400f7917c00SJeff Kirsher /**
1401f7917c00SJeff Kirsher  *	check_desc_avail - check descriptor availability on a send queue
1402f7917c00SJeff Kirsher  *	@adap: the adapter
1403f7917c00SJeff Kirsher  *	@q: the send queue
1404f7917c00SJeff Kirsher  *	@skb: the packet needing the descriptors
1405f7917c00SJeff Kirsher  *	@ndesc: the number of Tx descriptors needed
1406f7917c00SJeff Kirsher  *	@qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1407f7917c00SJeff Kirsher  *
1408f7917c00SJeff Kirsher  *	Checks if the requested number of Tx descriptors is available on an
1409f7917c00SJeff Kirsher  *	SGE send queue.  If the queue is already suspended or not enough
1410f7917c00SJeff Kirsher  *	descriptors are available the packet is queued for later transmission.
1411f7917c00SJeff Kirsher  *	Must be called with the Tx queue locked.
1412f7917c00SJeff Kirsher  *
1413f7917c00SJeff Kirsher  *	Returns 0 if enough descriptors are available, 1 if there aren't
1414f7917c00SJeff Kirsher  *	enough descriptors and the packet has been queued, and 2 if the caller
1415f7917c00SJeff Kirsher  *	needs to retry because there weren't enough descriptors at the
1416f7917c00SJeff Kirsher  *	beginning of the call but some freed up in the mean time.
1417f7917c00SJeff Kirsher  */
check_desc_avail(struct adapter * adap,struct sge_txq * q,struct sk_buff * skb,unsigned int ndesc,unsigned int qid)1418f7917c00SJeff Kirsher static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1419f7917c00SJeff Kirsher 				   struct sk_buff *skb, unsigned int ndesc,
1420f7917c00SJeff Kirsher 				   unsigned int qid)
1421f7917c00SJeff Kirsher {
1422f7917c00SJeff Kirsher 	if (unlikely(!skb_queue_empty(&q->sendq))) {
1423f7917c00SJeff Kirsher 	      addq_exit:__skb_queue_tail(&q->sendq, skb);
1424f7917c00SJeff Kirsher 		return 1;
1425f7917c00SJeff Kirsher 	}
1426f7917c00SJeff Kirsher 	if (unlikely(q->size - q->in_use < ndesc)) {
1427f7917c00SJeff Kirsher 		struct sge_qset *qs = txq_to_qset(q, qid);
1428f7917c00SJeff Kirsher 
1429f7917c00SJeff Kirsher 		set_bit(qid, &qs->txq_stopped);
14304e857c58SPeter Zijlstra 		smp_mb__after_atomic();
1431f7917c00SJeff Kirsher 
1432f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1433f7917c00SJeff Kirsher 		    test_and_clear_bit(qid, &qs->txq_stopped))
1434f7917c00SJeff Kirsher 			return 2;
1435f7917c00SJeff Kirsher 
1436f7917c00SJeff Kirsher 		q->stops++;
1437f7917c00SJeff Kirsher 		goto addq_exit;
1438f7917c00SJeff Kirsher 	}
1439f7917c00SJeff Kirsher 	return 0;
1440f7917c00SJeff Kirsher }
1441f7917c00SJeff Kirsher 
1442f7917c00SJeff Kirsher /**
1443f7917c00SJeff Kirsher  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1444f7917c00SJeff Kirsher  *	@q: the SGE control Tx queue
1445f7917c00SJeff Kirsher  *
1446f7917c00SJeff Kirsher  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1447f7917c00SJeff Kirsher  *	that send only immediate data (presently just the control queues) and
1448f7917c00SJeff Kirsher  *	thus do not have any sk_buffs to release.
1449f7917c00SJeff Kirsher  */
reclaim_completed_tx_imm(struct sge_txq * q)1450f7917c00SJeff Kirsher static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1451f7917c00SJeff Kirsher {
1452f7917c00SJeff Kirsher 	unsigned int reclaim = q->processed - q->cleaned;
1453f7917c00SJeff Kirsher 
1454f7917c00SJeff Kirsher 	q->in_use -= reclaim;
1455f7917c00SJeff Kirsher 	q->cleaned += reclaim;
1456f7917c00SJeff Kirsher }
1457f7917c00SJeff Kirsher 
immediate(const struct sk_buff * skb)1458f7917c00SJeff Kirsher static inline int immediate(const struct sk_buff *skb)
1459f7917c00SJeff Kirsher {
1460f7917c00SJeff Kirsher 	return skb->len <= WR_LEN;
1461f7917c00SJeff Kirsher }
1462f7917c00SJeff Kirsher 
1463f7917c00SJeff Kirsher /**
1464f7917c00SJeff Kirsher  *	ctrl_xmit - send a packet through an SGE control Tx queue
1465f7917c00SJeff Kirsher  *	@adap: the adapter
1466f7917c00SJeff Kirsher  *	@q: the control queue
1467f7917c00SJeff Kirsher  *	@skb: the packet
1468f7917c00SJeff Kirsher  *
1469f7917c00SJeff Kirsher  *	Send a packet through an SGE control Tx queue.  Packets sent through
1470f7917c00SJeff Kirsher  *	a control queue must fit entirely as immediate data in a single Tx
1471f7917c00SJeff Kirsher  *	descriptor and have no page fragments.
1472f7917c00SJeff Kirsher  */
ctrl_xmit(struct adapter * adap,struct sge_txq * q,struct sk_buff * skb)1473f7917c00SJeff Kirsher static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1474f7917c00SJeff Kirsher 		     struct sk_buff *skb)
1475f7917c00SJeff Kirsher {
1476f7917c00SJeff Kirsher 	int ret;
1477f7917c00SJeff Kirsher 	struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1478f7917c00SJeff Kirsher 
1479f7917c00SJeff Kirsher 	if (unlikely(!immediate(skb))) {
1480f7917c00SJeff Kirsher 		WARN_ON(1);
1481f7917c00SJeff Kirsher 		dev_kfree_skb(skb);
1482f7917c00SJeff Kirsher 		return NET_XMIT_SUCCESS;
1483f7917c00SJeff Kirsher 	}
1484f7917c00SJeff Kirsher 
1485f7917c00SJeff Kirsher 	wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1486f7917c00SJeff Kirsher 	wrp->wr_lo = htonl(V_WR_TID(q->token));
1487f7917c00SJeff Kirsher 
1488f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1489f7917c00SJeff Kirsher       again:reclaim_completed_tx_imm(q);
1490f7917c00SJeff Kirsher 
1491f7917c00SJeff Kirsher 	ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1492f7917c00SJeff Kirsher 	if (unlikely(ret)) {
1493f7917c00SJeff Kirsher 		if (ret == 1) {
1494f7917c00SJeff Kirsher 			spin_unlock(&q->lock);
1495f7917c00SJeff Kirsher 			return NET_XMIT_CN;
1496f7917c00SJeff Kirsher 		}
1497f7917c00SJeff Kirsher 		goto again;
1498f7917c00SJeff Kirsher 	}
1499f7917c00SJeff Kirsher 
1500f7917c00SJeff Kirsher 	write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1501f7917c00SJeff Kirsher 
1502f7917c00SJeff Kirsher 	q->in_use++;
1503f7917c00SJeff Kirsher 	if (++q->pidx >= q->size) {
1504f7917c00SJeff Kirsher 		q->pidx = 0;
1505f7917c00SJeff Kirsher 		q->gen ^= 1;
1506f7917c00SJeff Kirsher 	}
1507f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1508f7917c00SJeff Kirsher 	wmb();
1509f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_KDOORBELL,
1510f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1511f7917c00SJeff Kirsher 	return NET_XMIT_SUCCESS;
1512f7917c00SJeff Kirsher }
1513f7917c00SJeff Kirsher 
1514f7917c00SJeff Kirsher /**
1515f7917c00SJeff Kirsher  *	restart_ctrlq - restart a suspended control queue
15165e0b8928SÍñigo Huguet  *	@w: pointer to the work associated with this handler
1517f7917c00SJeff Kirsher  *
1518f7917c00SJeff Kirsher  *	Resumes transmission on a suspended Tx control queue.
1519f7917c00SJeff Kirsher  */
restart_ctrlq(struct work_struct * w)15205e0b8928SÍñigo Huguet static void restart_ctrlq(struct work_struct *w)
1521f7917c00SJeff Kirsher {
1522f7917c00SJeff Kirsher 	struct sk_buff *skb;
15235e0b8928SÍñigo Huguet 	struct sge_qset *qs = container_of(w, struct sge_qset,
15245e0b8928SÍñigo Huguet 					   txq[TXQ_CTRL].qresume_task);
1525f7917c00SJeff Kirsher 	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1526f7917c00SJeff Kirsher 
1527f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1528f7917c00SJeff Kirsher       again:reclaim_completed_tx_imm(q);
1529f7917c00SJeff Kirsher 
1530f7917c00SJeff Kirsher 	while (q->in_use < q->size &&
1531f7917c00SJeff Kirsher 	       (skb = __skb_dequeue(&q->sendq)) != NULL) {
1532f7917c00SJeff Kirsher 
1533f7917c00SJeff Kirsher 		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1534f7917c00SJeff Kirsher 
1535f7917c00SJeff Kirsher 		if (++q->pidx >= q->size) {
1536f7917c00SJeff Kirsher 			q->pidx = 0;
1537f7917c00SJeff Kirsher 			q->gen ^= 1;
1538f7917c00SJeff Kirsher 		}
1539f7917c00SJeff Kirsher 		q->in_use++;
1540f7917c00SJeff Kirsher 	}
1541f7917c00SJeff Kirsher 
1542f7917c00SJeff Kirsher 	if (!skb_queue_empty(&q->sendq)) {
1543f7917c00SJeff Kirsher 		set_bit(TXQ_CTRL, &qs->txq_stopped);
15444e857c58SPeter Zijlstra 		smp_mb__after_atomic();
1545f7917c00SJeff Kirsher 
1546f7917c00SJeff Kirsher 		if (should_restart_tx(q) &&
1547f7917c00SJeff Kirsher 		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1548f7917c00SJeff Kirsher 			goto again;
1549f7917c00SJeff Kirsher 		q->stops++;
1550f7917c00SJeff Kirsher 	}
1551f7917c00SJeff Kirsher 
1552f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1553f7917c00SJeff Kirsher 	wmb();
1554f7917c00SJeff Kirsher 	t3_write_reg(qs->adap, A_SG_KDOORBELL,
1555f7917c00SJeff Kirsher 		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1556f7917c00SJeff Kirsher }
1557f7917c00SJeff Kirsher 
1558f7917c00SJeff Kirsher /*
1559f7917c00SJeff Kirsher  * Send a management message through control queue 0
1560f7917c00SJeff Kirsher  */
t3_mgmt_tx(struct adapter * adap,struct sk_buff * skb)1561f7917c00SJeff Kirsher int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1562f7917c00SJeff Kirsher {
1563f7917c00SJeff Kirsher 	int ret;
1564f7917c00SJeff Kirsher 	local_bh_disable();
1565f7917c00SJeff Kirsher 	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1566f7917c00SJeff Kirsher 	local_bh_enable();
1567f7917c00SJeff Kirsher 
1568f7917c00SJeff Kirsher 	return ret;
1569f7917c00SJeff Kirsher }
1570f7917c00SJeff Kirsher 
1571f7917c00SJeff Kirsher /**
1572f7917c00SJeff Kirsher  *	deferred_unmap_destructor - unmap a packet when it is freed
1573f7917c00SJeff Kirsher  *	@skb: the packet
1574f7917c00SJeff Kirsher  *
1575f7917c00SJeff Kirsher  *	This is the packet destructor used for Tx packets that need to remain
1576f7917c00SJeff Kirsher  *	mapped until they are freed rather than until their Tx descriptors are
1577f7917c00SJeff Kirsher  *	freed.
1578f7917c00SJeff Kirsher  */
deferred_unmap_destructor(struct sk_buff * skb)1579f7917c00SJeff Kirsher static void deferred_unmap_destructor(struct sk_buff *skb)
1580f7917c00SJeff Kirsher {
1581f7917c00SJeff Kirsher 	int i;
1582f7917c00SJeff Kirsher 	const dma_addr_t *p;
1583f7917c00SJeff Kirsher 	const struct skb_shared_info *si;
1584f7917c00SJeff Kirsher 	const struct deferred_unmap_info *dui;
1585f7917c00SJeff Kirsher 
1586f7917c00SJeff Kirsher 	dui = (struct deferred_unmap_info *)skb->head;
1587f7917c00SJeff Kirsher 	p = dui->addr;
1588f7917c00SJeff Kirsher 
158915dd16c2SLi RongQing 	if (skb_tail_pointer(skb) - skb_transport_header(skb))
15904489d8f5SChristophe JAILLET 		dma_unmap_single(&dui->pdev->dev, *p++,
15914489d8f5SChristophe JAILLET 				 skb_tail_pointer(skb) - skb_transport_header(skb),
15924489d8f5SChristophe JAILLET 				 DMA_TO_DEVICE);
1593f7917c00SJeff Kirsher 
1594f7917c00SJeff Kirsher 	si = skb_shinfo(skb);
1595f7917c00SJeff Kirsher 	for (i = 0; i < si->nr_frags; i++)
15964489d8f5SChristophe JAILLET 		dma_unmap_page(&dui->pdev->dev, *p++,
15974489d8f5SChristophe JAILLET 			       skb_frag_size(&si->frags[i]), DMA_TO_DEVICE);
1598f7917c00SJeff Kirsher }
1599f7917c00SJeff Kirsher 
setup_deferred_unmapping(struct sk_buff * skb,struct pci_dev * pdev,const struct sg_ent * sgl,int sgl_flits)1600f7917c00SJeff Kirsher static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1601f7917c00SJeff Kirsher 				     const struct sg_ent *sgl, int sgl_flits)
1602f7917c00SJeff Kirsher {
1603f7917c00SJeff Kirsher 	dma_addr_t *p;
1604f7917c00SJeff Kirsher 	struct deferred_unmap_info *dui;
1605f7917c00SJeff Kirsher 
1606f7917c00SJeff Kirsher 	dui = (struct deferred_unmap_info *)skb->head;
1607f7917c00SJeff Kirsher 	dui->pdev = pdev;
1608f7917c00SJeff Kirsher 	for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1609f7917c00SJeff Kirsher 		*p++ = be64_to_cpu(sgl->addr[0]);
1610f7917c00SJeff Kirsher 		*p++ = be64_to_cpu(sgl->addr[1]);
1611f7917c00SJeff Kirsher 	}
1612f7917c00SJeff Kirsher 	if (sgl_flits)
1613f7917c00SJeff Kirsher 		*p = be64_to_cpu(sgl->addr[0]);
1614f7917c00SJeff Kirsher }
1615f7917c00SJeff Kirsher 
1616f7917c00SJeff Kirsher /**
1617f7917c00SJeff Kirsher  *	write_ofld_wr - write an offload work request
1618f7917c00SJeff Kirsher  *	@adap: the adapter
1619f7917c00SJeff Kirsher  *	@skb: the packet to send
1620f7917c00SJeff Kirsher  *	@q: the Tx queue
1621f7917c00SJeff Kirsher  *	@pidx: index of the first Tx descriptor to write
1622f7917c00SJeff Kirsher  *	@gen: the generation value to use
1623f7917c00SJeff Kirsher  *	@ndesc: number of descriptors the packet will occupy
1624d0ea5cbdSJesse Brandeburg  *	@addr: the address
1625f7917c00SJeff Kirsher  *
1626f7917c00SJeff Kirsher  *	Write an offload work request to send the supplied packet.  The packet
1627f7917c00SJeff Kirsher  *	data already carry the work request with most fields populated.
1628f7917c00SJeff Kirsher  */
write_ofld_wr(struct adapter * adap,struct sk_buff * skb,struct sge_txq * q,unsigned int pidx,unsigned int gen,unsigned int ndesc,const dma_addr_t * addr)1629f7917c00SJeff Kirsher static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1630f7917c00SJeff Kirsher 			  struct sge_txq *q, unsigned int pidx,
1631c69fe407SArjun Vynipadath 			  unsigned int gen, unsigned int ndesc,
1632c69fe407SArjun Vynipadath 			  const dma_addr_t *addr)
1633f7917c00SJeff Kirsher {
1634f7917c00SJeff Kirsher 	unsigned int sgl_flits, flits;
1635f7917c00SJeff Kirsher 	struct work_request_hdr *from;
1636f7917c00SJeff Kirsher 	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1637f7917c00SJeff Kirsher 	struct tx_desc *d = &q->desc[pidx];
1638f7917c00SJeff Kirsher 
1639f7917c00SJeff Kirsher 	if (immediate(skb)) {
1640f7917c00SJeff Kirsher 		q->sdesc[pidx].skb = NULL;
1641f7917c00SJeff Kirsher 		write_imm(d, skb, skb->len, gen);
1642f7917c00SJeff Kirsher 		return;
1643f7917c00SJeff Kirsher 	}
1644f7917c00SJeff Kirsher 
1645f7917c00SJeff Kirsher 	/* Only TX_DATA builds SGLs */
1646f7917c00SJeff Kirsher 
1647f7917c00SJeff Kirsher 	from = (struct work_request_hdr *)skb->data;
1648f7917c00SJeff Kirsher 	memcpy(&d->flit[1], &from[1],
1649f7917c00SJeff Kirsher 	       skb_transport_offset(skb) - sizeof(*from));
1650f7917c00SJeff Kirsher 
1651f7917c00SJeff Kirsher 	flits = skb_transport_offset(skb) / 8;
1652f7917c00SJeff Kirsher 	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1653c69fe407SArjun Vynipadath 	sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
1654c69fe407SArjun Vynipadath 			      skb_tail_pointer(skb) - skb_transport_header(skb),
1655c69fe407SArjun Vynipadath 			      addr);
1656f7917c00SJeff Kirsher 	if (need_skb_unmap()) {
1657f7917c00SJeff Kirsher 		setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1658f7917c00SJeff Kirsher 		skb->destructor = deferred_unmap_destructor;
1659f7917c00SJeff Kirsher 	}
1660f7917c00SJeff Kirsher 
1661f7917c00SJeff Kirsher 	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1662f7917c00SJeff Kirsher 			 gen, from->wr_hi, from->wr_lo);
1663f7917c00SJeff Kirsher }
1664f7917c00SJeff Kirsher 
1665f7917c00SJeff Kirsher /**
1666f7917c00SJeff Kirsher  *	calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1667f7917c00SJeff Kirsher  *	@skb: the packet
1668f7917c00SJeff Kirsher  *
1669f7917c00SJeff Kirsher  * 	Returns the number of Tx descriptors needed for the given offload
1670f7917c00SJeff Kirsher  * 	packet.  These packets are already fully constructed.
1671f7917c00SJeff Kirsher  */
calc_tx_descs_ofld(const struct sk_buff * skb)1672f7917c00SJeff Kirsher static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1673f7917c00SJeff Kirsher {
1674f7917c00SJeff Kirsher 	unsigned int flits, cnt;
1675f7917c00SJeff Kirsher 
1676f7917c00SJeff Kirsher 	if (skb->len <= WR_LEN)
1677f7917c00SJeff Kirsher 		return 1;	/* packet fits as immediate data */
1678f7917c00SJeff Kirsher 
1679f7917c00SJeff Kirsher 	flits = skb_transport_offset(skb) / 8;	/* headers */
1680f7917c00SJeff Kirsher 	cnt = skb_shinfo(skb)->nr_frags;
1681be8b678cSSimon Horman 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
1682f7917c00SJeff Kirsher 		cnt++;
1683f7917c00SJeff Kirsher 	return flits_to_desc(flits + sgl_len(cnt));
1684f7917c00SJeff Kirsher }
1685f7917c00SJeff Kirsher 
1686f7917c00SJeff Kirsher /**
1687f7917c00SJeff Kirsher  *	ofld_xmit - send a packet through an offload queue
1688f7917c00SJeff Kirsher  *	@adap: the adapter
1689f7917c00SJeff Kirsher  *	@q: the Tx offload queue
1690f7917c00SJeff Kirsher  *	@skb: the packet
1691f7917c00SJeff Kirsher  *
1692f7917c00SJeff Kirsher  *	Send an offload packet through an SGE offload queue.
1693f7917c00SJeff Kirsher  */
ofld_xmit(struct adapter * adap,struct sge_txq * q,struct sk_buff * skb)1694f7917c00SJeff Kirsher static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1695f7917c00SJeff Kirsher 		     struct sk_buff *skb)
1696f7917c00SJeff Kirsher {
1697f7917c00SJeff Kirsher 	int ret;
1698f7917c00SJeff Kirsher 	unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1699f7917c00SJeff Kirsher 
1700f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1701f7917c00SJeff Kirsher again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1702f7917c00SJeff Kirsher 
1703f7917c00SJeff Kirsher 	ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1704f7917c00SJeff Kirsher 	if (unlikely(ret)) {
1705f7917c00SJeff Kirsher 		if (ret == 1) {
1706f7917c00SJeff Kirsher 			skb->priority = ndesc;	/* save for restart */
1707f7917c00SJeff Kirsher 			spin_unlock(&q->lock);
1708f7917c00SJeff Kirsher 			return NET_XMIT_CN;
1709f7917c00SJeff Kirsher 		}
1710f7917c00SJeff Kirsher 		goto again;
1711f7917c00SJeff Kirsher 	}
1712f7917c00SJeff Kirsher 
1713c69fe407SArjun Vynipadath 	if (!immediate(skb) &&
1714c69fe407SArjun Vynipadath 	    map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1715c69fe407SArjun Vynipadath 		spin_unlock(&q->lock);
1716c69fe407SArjun Vynipadath 		return NET_XMIT_SUCCESS;
1717c69fe407SArjun Vynipadath 	}
1718c69fe407SArjun Vynipadath 
1719f7917c00SJeff Kirsher 	gen = q->gen;
1720f7917c00SJeff Kirsher 	q->in_use += ndesc;
1721f7917c00SJeff Kirsher 	pidx = q->pidx;
1722f7917c00SJeff Kirsher 	q->pidx += ndesc;
1723f7917c00SJeff Kirsher 	if (q->pidx >= q->size) {
1724f7917c00SJeff Kirsher 		q->pidx -= q->size;
1725f7917c00SJeff Kirsher 		q->gen ^= 1;
1726f7917c00SJeff Kirsher 	}
1727f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1728f7917c00SJeff Kirsher 
1729c69fe407SArjun Vynipadath 	write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
1730f7917c00SJeff Kirsher 	check_ring_tx_db(adap, q);
1731f7917c00SJeff Kirsher 	return NET_XMIT_SUCCESS;
1732f7917c00SJeff Kirsher }
1733f7917c00SJeff Kirsher 
1734f7917c00SJeff Kirsher /**
1735f7917c00SJeff Kirsher  *	restart_offloadq - restart a suspended offload queue
17365e0b8928SÍñigo Huguet  *	@w: pointer to the work associated with this handler
1737f7917c00SJeff Kirsher  *
1738f7917c00SJeff Kirsher  *	Resumes transmission on a suspended Tx offload queue.
1739f7917c00SJeff Kirsher  */
restart_offloadq(struct work_struct * w)17405e0b8928SÍñigo Huguet static void restart_offloadq(struct work_struct *w)
1741f7917c00SJeff Kirsher {
1742f7917c00SJeff Kirsher 	struct sk_buff *skb;
17435e0b8928SÍñigo Huguet 	struct sge_qset *qs = container_of(w, struct sge_qset,
17445e0b8928SÍñigo Huguet 					   txq[TXQ_OFLD].qresume_task);
1745f7917c00SJeff Kirsher 	struct sge_txq *q = &qs->txq[TXQ_OFLD];
1746f7917c00SJeff Kirsher 	const struct port_info *pi = netdev_priv(qs->netdev);
1747f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
1748c69fe407SArjun Vynipadath 	unsigned int written = 0;
1749f7917c00SJeff Kirsher 
1750f7917c00SJeff Kirsher 	spin_lock(&q->lock);
1751f7917c00SJeff Kirsher again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1752f7917c00SJeff Kirsher 
1753f7917c00SJeff Kirsher 	while ((skb = skb_peek(&q->sendq)) != NULL) {
1754f7917c00SJeff Kirsher 		unsigned int gen, pidx;
1755f7917c00SJeff Kirsher 		unsigned int ndesc = skb->priority;
1756f7917c00SJeff Kirsher 
1757f7917c00SJeff Kirsher 		if (unlikely(q->size - q->in_use < ndesc)) {
1758f7917c00SJeff Kirsher 			set_bit(TXQ_OFLD, &qs->txq_stopped);
17594e857c58SPeter Zijlstra 			smp_mb__after_atomic();
1760f7917c00SJeff Kirsher 
1761f7917c00SJeff Kirsher 			if (should_restart_tx(q) &&
1762f7917c00SJeff Kirsher 			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1763f7917c00SJeff Kirsher 				goto again;
1764f7917c00SJeff Kirsher 			q->stops++;
1765f7917c00SJeff Kirsher 			break;
1766f7917c00SJeff Kirsher 		}
1767f7917c00SJeff Kirsher 
1768c69fe407SArjun Vynipadath 		if (!immediate(skb) &&
1769c69fe407SArjun Vynipadath 		    map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1770c69fe407SArjun Vynipadath 			break;
1771c69fe407SArjun Vynipadath 
1772f7917c00SJeff Kirsher 		gen = q->gen;
1773f7917c00SJeff Kirsher 		q->in_use += ndesc;
1774f7917c00SJeff Kirsher 		pidx = q->pidx;
1775f7917c00SJeff Kirsher 		q->pidx += ndesc;
1776c69fe407SArjun Vynipadath 		written += ndesc;
1777f7917c00SJeff Kirsher 		if (q->pidx >= q->size) {
1778f7917c00SJeff Kirsher 			q->pidx -= q->size;
1779f7917c00SJeff Kirsher 			q->gen ^= 1;
1780f7917c00SJeff Kirsher 		}
1781f7917c00SJeff Kirsher 		__skb_unlink(skb, &q->sendq);
1782f7917c00SJeff Kirsher 		spin_unlock(&q->lock);
1783f7917c00SJeff Kirsher 
1784c69fe407SArjun Vynipadath 		write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1785c69fe407SArjun Vynipadath 			      (dma_addr_t *)skb->head);
1786f7917c00SJeff Kirsher 		spin_lock(&q->lock);
1787f7917c00SJeff Kirsher 	}
1788f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
1789f7917c00SJeff Kirsher 
1790f7917c00SJeff Kirsher #if USE_GTS
1791f7917c00SJeff Kirsher 	set_bit(TXQ_RUNNING, &q->flags);
1792f7917c00SJeff Kirsher 	set_bit(TXQ_LAST_PKT_DB, &q->flags);
1793f7917c00SJeff Kirsher #endif
1794f7917c00SJeff Kirsher 	wmb();
1795c69fe407SArjun Vynipadath 	if (likely(written))
1796f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_KDOORBELL,
1797f7917c00SJeff Kirsher 			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1798f7917c00SJeff Kirsher }
1799f7917c00SJeff Kirsher 
1800f7917c00SJeff Kirsher /**
1801f7917c00SJeff Kirsher  *	queue_set - return the queue set a packet should use
1802f7917c00SJeff Kirsher  *	@skb: the packet
1803f7917c00SJeff Kirsher  *
1804f7917c00SJeff Kirsher  *	Maps a packet to the SGE queue set it should use.  The desired queue
1805f7917c00SJeff Kirsher  *	set is carried in bits 1-3 in the packet's priority.
1806f7917c00SJeff Kirsher  */
queue_set(const struct sk_buff * skb)1807f7917c00SJeff Kirsher static inline int queue_set(const struct sk_buff *skb)
1808f7917c00SJeff Kirsher {
1809f7917c00SJeff Kirsher 	return skb->priority >> 1;
1810f7917c00SJeff Kirsher }
1811f7917c00SJeff Kirsher 
1812f7917c00SJeff Kirsher /**
1813f7917c00SJeff Kirsher  *	is_ctrl_pkt - return whether an offload packet is a control packet
1814f7917c00SJeff Kirsher  *	@skb: the packet
1815f7917c00SJeff Kirsher  *
1816f7917c00SJeff Kirsher  *	Determines whether an offload packet should use an OFLD or a CTRL
1817f7917c00SJeff Kirsher  *	Tx queue.  This is indicated by bit 0 in the packet's priority.
1818f7917c00SJeff Kirsher  */
is_ctrl_pkt(const struct sk_buff * skb)1819f7917c00SJeff Kirsher static inline int is_ctrl_pkt(const struct sk_buff *skb)
1820f7917c00SJeff Kirsher {
1821f7917c00SJeff Kirsher 	return skb->priority & 1;
1822f7917c00SJeff Kirsher }
1823f7917c00SJeff Kirsher 
1824f7917c00SJeff Kirsher /**
1825f7917c00SJeff Kirsher  *	t3_offload_tx - send an offload packet
1826f7917c00SJeff Kirsher  *	@tdev: the offload device to send to
1827f7917c00SJeff Kirsher  *	@skb: the packet
1828f7917c00SJeff Kirsher  *
1829f7917c00SJeff Kirsher  *	Sends an offload packet.  We use the packet priority to select the
1830f7917c00SJeff Kirsher  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1831f7917c00SJeff Kirsher  *	should be sent as regular or control, bits 1-3 select the queue set.
1832f7917c00SJeff Kirsher  */
t3_offload_tx(struct t3cdev * tdev,struct sk_buff * skb)1833f7917c00SJeff Kirsher int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1834f7917c00SJeff Kirsher {
1835f7917c00SJeff Kirsher 	struct adapter *adap = tdev2adap(tdev);
1836f7917c00SJeff Kirsher 	struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1837f7917c00SJeff Kirsher 
1838f7917c00SJeff Kirsher 	if (unlikely(is_ctrl_pkt(skb)))
1839f7917c00SJeff Kirsher 		return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1840f7917c00SJeff Kirsher 
1841f7917c00SJeff Kirsher 	return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1842f7917c00SJeff Kirsher }
1843f7917c00SJeff Kirsher 
1844f7917c00SJeff Kirsher /**
1845f7917c00SJeff Kirsher  *	offload_enqueue - add an offload packet to an SGE offload receive queue
1846f7917c00SJeff Kirsher  *	@q: the SGE response queue
1847f7917c00SJeff Kirsher  *	@skb: the packet
1848f7917c00SJeff Kirsher  *
1849f7917c00SJeff Kirsher  *	Add a new offload packet to an SGE response queue's offload packet
1850f7917c00SJeff Kirsher  *	queue.  If the packet is the first on the queue it schedules the RX
1851f7917c00SJeff Kirsher  *	softirq to process the queue.
1852f7917c00SJeff Kirsher  */
offload_enqueue(struct sge_rspq * q,struct sk_buff * skb)1853f7917c00SJeff Kirsher static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1854f7917c00SJeff Kirsher {
1855f7917c00SJeff Kirsher 	int was_empty = skb_queue_empty(&q->rx_queue);
1856f7917c00SJeff Kirsher 
1857f7917c00SJeff Kirsher 	__skb_queue_tail(&q->rx_queue, skb);
1858f7917c00SJeff Kirsher 
1859f7917c00SJeff Kirsher 	if (was_empty) {
1860f7917c00SJeff Kirsher 		struct sge_qset *qs = rspq_to_qset(q);
1861f7917c00SJeff Kirsher 
1862f7917c00SJeff Kirsher 		napi_schedule(&qs->napi);
1863f7917c00SJeff Kirsher 	}
1864f7917c00SJeff Kirsher }
1865f7917c00SJeff Kirsher 
1866f7917c00SJeff Kirsher /**
1867f7917c00SJeff Kirsher  *	deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1868f7917c00SJeff Kirsher  *	@tdev: the offload device that will be receiving the packets
1869f7917c00SJeff Kirsher  *	@q: the SGE response queue that assembled the bundle
1870f7917c00SJeff Kirsher  *	@skbs: the partial bundle
1871f7917c00SJeff Kirsher  *	@n: the number of packets in the bundle
1872f7917c00SJeff Kirsher  *
1873f7917c00SJeff Kirsher  *	Delivers a (partial) bundle of Rx offload packets to an offload device.
1874f7917c00SJeff Kirsher  */
deliver_partial_bundle(struct t3cdev * tdev,struct sge_rspq * q,struct sk_buff * skbs[],int n)1875f7917c00SJeff Kirsher static inline void deliver_partial_bundle(struct t3cdev *tdev,
1876f7917c00SJeff Kirsher 					  struct sge_rspq *q,
1877f7917c00SJeff Kirsher 					  struct sk_buff *skbs[], int n)
1878f7917c00SJeff Kirsher {
1879f7917c00SJeff Kirsher 	if (n) {
1880f7917c00SJeff Kirsher 		q->offload_bundles++;
1881f7917c00SJeff Kirsher 		tdev->recv(tdev, skbs, n);
1882f7917c00SJeff Kirsher 	}
1883f7917c00SJeff Kirsher }
1884f7917c00SJeff Kirsher 
1885f7917c00SJeff Kirsher /**
1886f7917c00SJeff Kirsher  *	ofld_poll - NAPI handler for offload packets in interrupt mode
1887d0ea5cbdSJesse Brandeburg  *	@napi: the network device doing the polling
1888f7917c00SJeff Kirsher  *	@budget: polling budget
1889f7917c00SJeff Kirsher  *
1890f7917c00SJeff Kirsher  *	The NAPI handler for offload packets when a response queue is serviced
1891f7917c00SJeff Kirsher  *	by the hard interrupt handler, i.e., when it's operating in non-polling
1892f7917c00SJeff Kirsher  *	mode.  Creates small packet batches and sends them through the offload
1893f7917c00SJeff Kirsher  *	receive handler.  Batches need to be of modest size as we do prefetches
1894f7917c00SJeff Kirsher  *	on the packets in each.
1895f7917c00SJeff Kirsher  */
ofld_poll(struct napi_struct * napi,int budget)1896f7917c00SJeff Kirsher static int ofld_poll(struct napi_struct *napi, int budget)
1897f7917c00SJeff Kirsher {
1898f7917c00SJeff Kirsher 	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1899f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
1900f7917c00SJeff Kirsher 	struct adapter *adapter = qs->adap;
1901f7917c00SJeff Kirsher 	int work_done = 0;
1902f7917c00SJeff Kirsher 
1903f7917c00SJeff Kirsher 	while (work_done < budget) {
1904f7917c00SJeff Kirsher 		struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1905f7917c00SJeff Kirsher 		struct sk_buff_head queue;
1906f7917c00SJeff Kirsher 		int ngathered;
1907f7917c00SJeff Kirsher 
1908f7917c00SJeff Kirsher 		spin_lock_irq(&q->lock);
1909f7917c00SJeff Kirsher 		__skb_queue_head_init(&queue);
1910f7917c00SJeff Kirsher 		skb_queue_splice_init(&q->rx_queue, &queue);
1911f7917c00SJeff Kirsher 		if (skb_queue_empty(&queue)) {
19126ad20165SEric Dumazet 			napi_complete_done(napi, work_done);
1913f7917c00SJeff Kirsher 			spin_unlock_irq(&q->lock);
1914f7917c00SJeff Kirsher 			return work_done;
1915f7917c00SJeff Kirsher 		}
1916f7917c00SJeff Kirsher 		spin_unlock_irq(&q->lock);
1917f7917c00SJeff Kirsher 
1918f7917c00SJeff Kirsher 		ngathered = 0;
1919f7917c00SJeff Kirsher 		skb_queue_walk_safe(&queue, skb, tmp) {
1920f7917c00SJeff Kirsher 			if (work_done >= budget)
1921f7917c00SJeff Kirsher 				break;
1922f7917c00SJeff Kirsher 			work_done++;
1923f7917c00SJeff Kirsher 
1924f7917c00SJeff Kirsher 			__skb_unlink(skb, &queue);
1925f7917c00SJeff Kirsher 			prefetch(skb->data);
1926f7917c00SJeff Kirsher 			skbs[ngathered] = skb;
1927f7917c00SJeff Kirsher 			if (++ngathered == RX_BUNDLE_SIZE) {
1928f7917c00SJeff Kirsher 				q->offload_bundles++;
1929f7917c00SJeff Kirsher 				adapter->tdev.recv(&adapter->tdev, skbs,
1930f7917c00SJeff Kirsher 						   ngathered);
1931f7917c00SJeff Kirsher 				ngathered = 0;
1932f7917c00SJeff Kirsher 			}
1933f7917c00SJeff Kirsher 		}
1934f7917c00SJeff Kirsher 		if (!skb_queue_empty(&queue)) {
1935f7917c00SJeff Kirsher 			/* splice remaining packets back onto Rx queue */
1936f7917c00SJeff Kirsher 			spin_lock_irq(&q->lock);
1937f7917c00SJeff Kirsher 			skb_queue_splice(&queue, &q->rx_queue);
1938f7917c00SJeff Kirsher 			spin_unlock_irq(&q->lock);
1939f7917c00SJeff Kirsher 		}
1940f7917c00SJeff Kirsher 		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1941f7917c00SJeff Kirsher 	}
1942f7917c00SJeff Kirsher 
1943f7917c00SJeff Kirsher 	return work_done;
1944f7917c00SJeff Kirsher }
1945f7917c00SJeff Kirsher 
1946f7917c00SJeff Kirsher /**
1947f7917c00SJeff Kirsher  *	rx_offload - process a received offload packet
1948f7917c00SJeff Kirsher  *	@tdev: the offload device receiving the packet
1949f7917c00SJeff Kirsher  *	@rq: the response queue that received the packet
1950f7917c00SJeff Kirsher  *	@skb: the packet
1951f7917c00SJeff Kirsher  *	@rx_gather: a gather list of packets if we are building a bundle
1952f7917c00SJeff Kirsher  *	@gather_idx: index of the next available slot in the bundle
1953f7917c00SJeff Kirsher  *
1954067bb3c3SJean Sacren  *	Process an ingress offload packet and add it to the offload ingress
1955f7917c00SJeff Kirsher  *	queue. 	Returns the index of the next available slot in the bundle.
1956f7917c00SJeff Kirsher  */
rx_offload(struct t3cdev * tdev,struct sge_rspq * rq,struct sk_buff * skb,struct sk_buff * rx_gather[],unsigned int gather_idx)1957f7917c00SJeff Kirsher static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1958f7917c00SJeff Kirsher 			     struct sk_buff *skb, struct sk_buff *rx_gather[],
1959f7917c00SJeff Kirsher 			     unsigned int gather_idx)
1960f7917c00SJeff Kirsher {
1961f7917c00SJeff Kirsher 	skb_reset_mac_header(skb);
1962f7917c00SJeff Kirsher 	skb_reset_network_header(skb);
1963f7917c00SJeff Kirsher 	skb_reset_transport_header(skb);
1964f7917c00SJeff Kirsher 
1965f7917c00SJeff Kirsher 	if (rq->polling) {
1966f7917c00SJeff Kirsher 		rx_gather[gather_idx++] = skb;
1967f7917c00SJeff Kirsher 		if (gather_idx == RX_BUNDLE_SIZE) {
1968f7917c00SJeff Kirsher 			tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1969f7917c00SJeff Kirsher 			gather_idx = 0;
1970f7917c00SJeff Kirsher 			rq->offload_bundles++;
1971f7917c00SJeff Kirsher 		}
1972f7917c00SJeff Kirsher 	} else
1973f7917c00SJeff Kirsher 		offload_enqueue(rq, skb);
1974f7917c00SJeff Kirsher 
1975f7917c00SJeff Kirsher 	return gather_idx;
1976f7917c00SJeff Kirsher }
1977f7917c00SJeff Kirsher 
1978f7917c00SJeff Kirsher /**
1979f7917c00SJeff Kirsher  *	restart_tx - check whether to restart suspended Tx queues
1980f7917c00SJeff Kirsher  *	@qs: the queue set to resume
1981f7917c00SJeff Kirsher  *
1982f7917c00SJeff Kirsher  *	Restarts suspended Tx queues of an SGE queue set if they have enough
1983f7917c00SJeff Kirsher  *	free resources to resume operation.
1984f7917c00SJeff Kirsher  */
restart_tx(struct sge_qset * qs)1985f7917c00SJeff Kirsher static void restart_tx(struct sge_qset *qs)
1986f7917c00SJeff Kirsher {
1987f7917c00SJeff Kirsher 	if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1988f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_ETH]) &&
1989f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1990f7917c00SJeff Kirsher 		qs->txq[TXQ_ETH].restarts++;
1991f7917c00SJeff Kirsher 		if (netif_running(qs->netdev))
1992f7917c00SJeff Kirsher 			netif_tx_wake_queue(qs->tx_q);
1993f7917c00SJeff Kirsher 	}
1994f7917c00SJeff Kirsher 
1995f7917c00SJeff Kirsher 	if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1996f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1997f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1998f7917c00SJeff Kirsher 		qs->txq[TXQ_OFLD].restarts++;
19995e0b8928SÍñigo Huguet 
20005e0b8928SÍñigo Huguet 		/* The work can be quite lengthy so we use driver's own queue */
20015e0b8928SÍñigo Huguet 		queue_work(cxgb3_wq, &qs->txq[TXQ_OFLD].qresume_task);
2002f7917c00SJeff Kirsher 	}
2003f7917c00SJeff Kirsher 	if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
2004f7917c00SJeff Kirsher 	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2005f7917c00SJeff Kirsher 	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2006f7917c00SJeff Kirsher 		qs->txq[TXQ_CTRL].restarts++;
20075e0b8928SÍñigo Huguet 
20085e0b8928SÍñigo Huguet 		/* The work can be quite lengthy so we use driver's own queue */
20095e0b8928SÍñigo Huguet 		queue_work(cxgb3_wq, &qs->txq[TXQ_CTRL].qresume_task);
2010f7917c00SJeff Kirsher 	}
2011f7917c00SJeff Kirsher }
2012f7917c00SJeff Kirsher 
2013f7917c00SJeff Kirsher /**
2014f7917c00SJeff Kirsher  *	cxgb3_arp_process - process an ARP request probing a private IP address
2015d0ea5cbdSJesse Brandeburg  *	@pi: the port info
2016f7917c00SJeff Kirsher  *	@skb: the skbuff containing the ARP request
2017f7917c00SJeff Kirsher  *
2018f7917c00SJeff Kirsher  *	Check if the ARP request is probing the private IP address
2019f7917c00SJeff Kirsher  *	dedicated to iSCSI, generate an ARP reply if so.
2020f7917c00SJeff Kirsher  */
cxgb3_arp_process(struct port_info * pi,struct sk_buff * skb)2021f7917c00SJeff Kirsher static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
2022f7917c00SJeff Kirsher {
2023f7917c00SJeff Kirsher 	struct net_device *dev = skb->dev;
2024f7917c00SJeff Kirsher 	struct arphdr *arp;
2025f7917c00SJeff Kirsher 	unsigned char *arp_ptr;
2026f7917c00SJeff Kirsher 	unsigned char *sha;
2027f7917c00SJeff Kirsher 	__be32 sip, tip;
2028f7917c00SJeff Kirsher 
2029f7917c00SJeff Kirsher 	if (!dev)
2030f7917c00SJeff Kirsher 		return;
2031f7917c00SJeff Kirsher 
2032f7917c00SJeff Kirsher 	skb_reset_network_header(skb);
2033f7917c00SJeff Kirsher 	arp = arp_hdr(skb);
2034f7917c00SJeff Kirsher 
2035f7917c00SJeff Kirsher 	if (arp->ar_op != htons(ARPOP_REQUEST))
2036f7917c00SJeff Kirsher 		return;
2037f7917c00SJeff Kirsher 
2038f7917c00SJeff Kirsher 	arp_ptr = (unsigned char *)(arp + 1);
2039f7917c00SJeff Kirsher 	sha = arp_ptr;
2040f7917c00SJeff Kirsher 	arp_ptr += dev->addr_len;
2041f7917c00SJeff Kirsher 	memcpy(&sip, arp_ptr, sizeof(sip));
2042f7917c00SJeff Kirsher 	arp_ptr += sizeof(sip);
2043f7917c00SJeff Kirsher 	arp_ptr += dev->addr_len;
2044f7917c00SJeff Kirsher 	memcpy(&tip, arp_ptr, sizeof(tip));
2045f7917c00SJeff Kirsher 
2046f7917c00SJeff Kirsher 	if (tip != pi->iscsi_ipv4addr)
2047f7917c00SJeff Kirsher 		return;
2048f7917c00SJeff Kirsher 
2049f7917c00SJeff Kirsher 	arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
2050f7917c00SJeff Kirsher 		 pi->iscsic.mac_addr, sha);
2051f7917c00SJeff Kirsher 
2052f7917c00SJeff Kirsher }
2053f7917c00SJeff Kirsher 
is_arp(struct sk_buff * skb)2054f7917c00SJeff Kirsher static inline int is_arp(struct sk_buff *skb)
2055f7917c00SJeff Kirsher {
2056f7917c00SJeff Kirsher 	return skb->protocol == htons(ETH_P_ARP);
2057f7917c00SJeff Kirsher }
2058f7917c00SJeff Kirsher 
cxgb3_process_iscsi_prov_pack(struct port_info * pi,struct sk_buff * skb)2059f7917c00SJeff Kirsher static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
2060f7917c00SJeff Kirsher 					struct sk_buff *skb)
2061f7917c00SJeff Kirsher {
2062f7917c00SJeff Kirsher 	if (is_arp(skb)) {
2063f7917c00SJeff Kirsher 		cxgb3_arp_process(pi, skb);
2064f7917c00SJeff Kirsher 		return;
2065f7917c00SJeff Kirsher 	}
2066f7917c00SJeff Kirsher 
2067f7917c00SJeff Kirsher 	if (pi->iscsic.recv)
2068f7917c00SJeff Kirsher 		pi->iscsic.recv(pi, skb);
2069f7917c00SJeff Kirsher 
2070f7917c00SJeff Kirsher }
2071f7917c00SJeff Kirsher 
2072f7917c00SJeff Kirsher /**
2073f7917c00SJeff Kirsher  *	rx_eth - process an ingress ethernet packet
2074f7917c00SJeff Kirsher  *	@adap: the adapter
2075f7917c00SJeff Kirsher  *	@rq: the response queue that received the packet
2076f7917c00SJeff Kirsher  *	@skb: the packet
2077d0ea5cbdSJesse Brandeburg  *	@pad: padding
2078d0ea5cbdSJesse Brandeburg  *	@lro: large receive offload
2079f7917c00SJeff Kirsher  *
2080067bb3c3SJean Sacren  *	Process an ingress ethernet packet and deliver it to the stack.
2081f7917c00SJeff Kirsher  *	The padding is 2 if the packet was delivered in an Rx buffer and 0
2082f7917c00SJeff Kirsher  *	if it was immediate data in a response.
2083f7917c00SJeff Kirsher  */
rx_eth(struct adapter * adap,struct sge_rspq * rq,struct sk_buff * skb,int pad,int lro)2084f7917c00SJeff Kirsher static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2085f7917c00SJeff Kirsher 		   struct sk_buff *skb, int pad, int lro)
2086f7917c00SJeff Kirsher {
2087f7917c00SJeff Kirsher 	struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2088f7917c00SJeff Kirsher 	struct sge_qset *qs = rspq_to_qset(rq);
2089f7917c00SJeff Kirsher 	struct port_info *pi;
2090f7917c00SJeff Kirsher 
2091f7917c00SJeff Kirsher 	skb_pull(skb, sizeof(*p) + pad);
2092f7917c00SJeff Kirsher 	skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2093f7917c00SJeff Kirsher 	pi = netdev_priv(skb->dev);
2094f7917c00SJeff Kirsher 	if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2095f7917c00SJeff Kirsher 	    p->csum == htons(0xffff) && !p->fragment) {
2096f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2097f7917c00SJeff Kirsher 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2098f7917c00SJeff Kirsher 	} else
2099f7917c00SJeff Kirsher 		skb_checksum_none_assert(skb);
2100f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2101f7917c00SJeff Kirsher 
2102f7917c00SJeff Kirsher 	if (p->vlan_valid) {
2103f7917c00SJeff Kirsher 		qs->port_stats[SGE_PSTAT_VLANEX]++;
210486a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
2105f7917c00SJeff Kirsher 	}
2106f7917c00SJeff Kirsher 	if (rq->polling) {
2107f7917c00SJeff Kirsher 		if (lro)
2108f7917c00SJeff Kirsher 			napi_gro_receive(&qs->napi, skb);
2109f7917c00SJeff Kirsher 		else {
2110f7917c00SJeff Kirsher 			if (unlikely(pi->iscsic.flags))
2111f7917c00SJeff Kirsher 				cxgb3_process_iscsi_prov_pack(pi, skb);
2112f7917c00SJeff Kirsher 			netif_receive_skb(skb);
2113f7917c00SJeff Kirsher 		}
2114f7917c00SJeff Kirsher 	} else
2115f7917c00SJeff Kirsher 		netif_rx(skb);
2116f7917c00SJeff Kirsher }
2117f7917c00SJeff Kirsher 
is_eth_tcp(u32 rss)2118f7917c00SJeff Kirsher static inline int is_eth_tcp(u32 rss)
2119f7917c00SJeff Kirsher {
2120f7917c00SJeff Kirsher 	return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2121f7917c00SJeff Kirsher }
2122f7917c00SJeff Kirsher 
2123f7917c00SJeff Kirsher /**
2124f7917c00SJeff Kirsher  *	lro_add_page - add a page chunk to an LRO session
2125f7917c00SJeff Kirsher  *	@adap: the adapter
2126f7917c00SJeff Kirsher  *	@qs: the associated queue set
2127f7917c00SJeff Kirsher  *	@fl: the free list containing the page chunk to add
2128f7917c00SJeff Kirsher  *	@len: packet length
2129f7917c00SJeff Kirsher  *	@complete: Indicates the last fragment of a frame
2130f7917c00SJeff Kirsher  *
2131f7917c00SJeff Kirsher  *	Add a received packet contained in a page chunk to an existing LRO
2132f7917c00SJeff Kirsher  *	session.
2133f7917c00SJeff Kirsher  */
lro_add_page(struct adapter * adap,struct sge_qset * qs,struct sge_fl * fl,int len,int complete)2134f7917c00SJeff Kirsher static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2135f7917c00SJeff Kirsher 			 struct sge_fl *fl, int len, int complete)
2136f7917c00SJeff Kirsher {
2137f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2138f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2139f7917c00SJeff Kirsher 	struct sk_buff *skb = NULL;
2140f7917c00SJeff Kirsher 	struct cpl_rx_pkt *cpl;
2141d7840976SMatthew Wilcox (Oracle) 	skb_frag_t *rx_frag;
2142f7917c00SJeff Kirsher 	int nr_frags;
2143f7917c00SJeff Kirsher 	int offset = 0;
2144f7917c00SJeff Kirsher 
2145f7917c00SJeff Kirsher 	if (!qs->nomem) {
2146f7917c00SJeff Kirsher 		skb = napi_get_frags(&qs->napi);
2147f7917c00SJeff Kirsher 		qs->nomem = !skb;
2148f7917c00SJeff Kirsher 	}
2149f7917c00SJeff Kirsher 
2150f7917c00SJeff Kirsher 	fl->credits--;
2151f7917c00SJeff Kirsher 
21524489d8f5SChristophe JAILLET 	dma_sync_single_for_cpu(&adap->pdev->dev,
2153f7917c00SJeff Kirsher 				dma_unmap_addr(sd, dma_addr),
21544489d8f5SChristophe JAILLET 				fl->buf_size - SGE_PG_RSVD, DMA_FROM_DEVICE);
2155f7917c00SJeff Kirsher 
2156f7917c00SJeff Kirsher 	(*sd->pg_chunk.p_cnt)--;
2157f7917c00SJeff Kirsher 	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
21584489d8f5SChristophe JAILLET 		dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
21594489d8f5SChristophe JAILLET 			       fl->alloc_size, DMA_FROM_DEVICE);
2160f7917c00SJeff Kirsher 
2161f7917c00SJeff Kirsher 	if (!skb) {
2162f7917c00SJeff Kirsher 		put_page(sd->pg_chunk.page);
2163f7917c00SJeff Kirsher 		if (complete)
2164f7917c00SJeff Kirsher 			qs->nomem = 0;
2165f7917c00SJeff Kirsher 		return;
2166f7917c00SJeff Kirsher 	}
2167f7917c00SJeff Kirsher 
2168f7917c00SJeff Kirsher 	rx_frag = skb_shinfo(skb)->frags;
2169f7917c00SJeff Kirsher 	nr_frags = skb_shinfo(skb)->nr_frags;
2170f7917c00SJeff Kirsher 
2171f7917c00SJeff Kirsher 	if (!nr_frags) {
2172f7917c00SJeff Kirsher 		offset = 2 + sizeof(struct cpl_rx_pkt);
2173f7917c00SJeff Kirsher 		cpl = qs->lro_va = sd->pg_chunk.va + 2;
2174f7917c00SJeff Kirsher 
2175f7917c00SJeff Kirsher 		if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2176f7917c00SJeff Kirsher 		     cpl->csum_valid && cpl->csum == htons(0xffff)) {
2177f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2178f7917c00SJeff Kirsher 			qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2179f7917c00SJeff Kirsher 		} else
2180f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_NONE;
2181f7917c00SJeff Kirsher 	} else
2182f7917c00SJeff Kirsher 		cpl = qs->lro_va;
2183f7917c00SJeff Kirsher 
2184f7917c00SJeff Kirsher 	len -= offset;
2185f7917c00SJeff Kirsher 
2186f7917c00SJeff Kirsher 	rx_frag += nr_frags;
2187*b51f4113SYunsheng Lin 	skb_frag_fill_page_desc(rx_frag, sd->pg_chunk.page,
2188*b51f4113SYunsheng Lin 				sd->pg_chunk.offset + offset, len);
2189f7917c00SJeff Kirsher 
2190f7917c00SJeff Kirsher 	skb->len += len;
2191f7917c00SJeff Kirsher 	skb->data_len += len;
2192f7917c00SJeff Kirsher 	skb->truesize += len;
2193f7917c00SJeff Kirsher 	skb_shinfo(skb)->nr_frags++;
2194f7917c00SJeff Kirsher 
2195f7917c00SJeff Kirsher 	if (!complete)
2196f7917c00SJeff Kirsher 		return;
2197f7917c00SJeff Kirsher 
2198f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2199f7917c00SJeff Kirsher 
220072073ad2SVipul Pandya 	if (cpl->vlan_valid) {
220172073ad2SVipul Pandya 		qs->port_stats[SGE_PSTAT_VLANEX]++;
220286a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
220372073ad2SVipul Pandya 	}
2204f7917c00SJeff Kirsher 	napi_gro_frags(&qs->napi);
2205f7917c00SJeff Kirsher }
2206f7917c00SJeff Kirsher 
2207f7917c00SJeff Kirsher /**
2208f7917c00SJeff Kirsher  *	handle_rsp_cntrl_info - handles control information in a response
2209f7917c00SJeff Kirsher  *	@qs: the queue set corresponding to the response
2210f7917c00SJeff Kirsher  *	@flags: the response control flags
2211f7917c00SJeff Kirsher  *
2212f7917c00SJeff Kirsher  *	Handles the control information of an SGE response, such as GTS
2213f7917c00SJeff Kirsher  *	indications and completion credits for the queue set's Tx queues.
2214f7917c00SJeff Kirsher  *	HW coalesces credits, we don't do any extra SW coalescing.
2215f7917c00SJeff Kirsher  */
handle_rsp_cntrl_info(struct sge_qset * qs,u32 flags)2216f7917c00SJeff Kirsher static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2217f7917c00SJeff Kirsher {
2218f7917c00SJeff Kirsher 	unsigned int credits;
2219f7917c00SJeff Kirsher 
2220f7917c00SJeff Kirsher #if USE_GTS
2221f7917c00SJeff Kirsher 	if (flags & F_RSPD_TXQ0_GTS)
2222f7917c00SJeff Kirsher 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2223f7917c00SJeff Kirsher #endif
2224f7917c00SJeff Kirsher 
2225f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ0_CR(flags);
2226f7917c00SJeff Kirsher 	if (credits)
2227f7917c00SJeff Kirsher 		qs->txq[TXQ_ETH].processed += credits;
2228f7917c00SJeff Kirsher 
2229f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ2_CR(flags);
2230f7917c00SJeff Kirsher 	if (credits)
2231f7917c00SJeff Kirsher 		qs->txq[TXQ_CTRL].processed += credits;
2232f7917c00SJeff Kirsher 
2233f7917c00SJeff Kirsher # if USE_GTS
2234f7917c00SJeff Kirsher 	if (flags & F_RSPD_TXQ1_GTS)
2235f7917c00SJeff Kirsher 		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2236f7917c00SJeff Kirsher # endif
2237f7917c00SJeff Kirsher 	credits = G_RSPD_TXQ1_CR(flags);
2238f7917c00SJeff Kirsher 	if (credits)
2239f7917c00SJeff Kirsher 		qs->txq[TXQ_OFLD].processed += credits;
2240f7917c00SJeff Kirsher }
2241f7917c00SJeff Kirsher 
2242f7917c00SJeff Kirsher /**
2243f7917c00SJeff Kirsher  *	check_ring_db - check if we need to ring any doorbells
2244d0ea5cbdSJesse Brandeburg  *	@adap: the adapter
2245f7917c00SJeff Kirsher  *	@qs: the queue set whose Tx queues are to be examined
2246f7917c00SJeff Kirsher  *	@sleeping: indicates which Tx queue sent GTS
2247f7917c00SJeff Kirsher  *
2248f7917c00SJeff Kirsher  *	Checks if some of a queue set's Tx queues need to ring their doorbells
2249f7917c00SJeff Kirsher  *	to resume transmission after idling while they still have unprocessed
2250f7917c00SJeff Kirsher  *	descriptors.
2251f7917c00SJeff Kirsher  */
check_ring_db(struct adapter * adap,struct sge_qset * qs,unsigned int sleeping)2252f7917c00SJeff Kirsher static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2253f7917c00SJeff Kirsher 			  unsigned int sleeping)
2254f7917c00SJeff Kirsher {
2255f7917c00SJeff Kirsher 	if (sleeping & F_RSPD_TXQ0_GTS) {
2256f7917c00SJeff Kirsher 		struct sge_txq *txq = &qs->txq[TXQ_ETH];
2257f7917c00SJeff Kirsher 
2258f7917c00SJeff Kirsher 		if (txq->cleaned + txq->in_use != txq->processed &&
2259f7917c00SJeff Kirsher 		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2260f7917c00SJeff Kirsher 			set_bit(TXQ_RUNNING, &txq->flags);
2261f7917c00SJeff Kirsher 			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2262f7917c00SJeff Kirsher 				     V_EGRCNTX(txq->cntxt_id));
2263f7917c00SJeff Kirsher 		}
2264f7917c00SJeff Kirsher 	}
2265f7917c00SJeff Kirsher 
2266f7917c00SJeff Kirsher 	if (sleeping & F_RSPD_TXQ1_GTS) {
2267f7917c00SJeff Kirsher 		struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2268f7917c00SJeff Kirsher 
2269f7917c00SJeff Kirsher 		if (txq->cleaned + txq->in_use != txq->processed &&
2270f7917c00SJeff Kirsher 		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2271f7917c00SJeff Kirsher 			set_bit(TXQ_RUNNING, &txq->flags);
2272f7917c00SJeff Kirsher 			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2273f7917c00SJeff Kirsher 				     V_EGRCNTX(txq->cntxt_id));
2274f7917c00SJeff Kirsher 		}
2275f7917c00SJeff Kirsher 	}
2276f7917c00SJeff Kirsher }
2277f7917c00SJeff Kirsher 
2278f7917c00SJeff Kirsher /**
2279f7917c00SJeff Kirsher  *	is_new_response - check if a response is newly written
2280f7917c00SJeff Kirsher  *	@r: the response descriptor
2281f7917c00SJeff Kirsher  *	@q: the response queue
2282f7917c00SJeff Kirsher  *
2283f7917c00SJeff Kirsher  *	Returns true if a response descriptor contains a yet unprocessed
2284f7917c00SJeff Kirsher  *	response.
2285f7917c00SJeff Kirsher  */
is_new_response(const struct rsp_desc * r,const struct sge_rspq * q)2286f7917c00SJeff Kirsher static inline int is_new_response(const struct rsp_desc *r,
2287f7917c00SJeff Kirsher 				  const struct sge_rspq *q)
2288f7917c00SJeff Kirsher {
2289f7917c00SJeff Kirsher 	return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2290f7917c00SJeff Kirsher }
2291f7917c00SJeff Kirsher 
clear_rspq_bufstate(struct sge_rspq * const q)2292f7917c00SJeff Kirsher static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2293f7917c00SJeff Kirsher {
2294f7917c00SJeff Kirsher 	q->pg_skb = NULL;
2295f7917c00SJeff Kirsher 	q->rx_recycle_buf = 0;
2296f7917c00SJeff Kirsher }
2297f7917c00SJeff Kirsher 
2298f7917c00SJeff Kirsher #define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2299f7917c00SJeff Kirsher #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2300f7917c00SJeff Kirsher 			V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2301f7917c00SJeff Kirsher 			V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2302f7917c00SJeff Kirsher 			V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2303f7917c00SJeff Kirsher 
2304f7917c00SJeff Kirsher /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2305f7917c00SJeff Kirsher #define NOMEM_INTR_DELAY 2500
2306f7917c00SJeff Kirsher 
2307f7917c00SJeff Kirsher /**
2308f7917c00SJeff Kirsher  *	process_responses - process responses from an SGE response queue
2309f7917c00SJeff Kirsher  *	@adap: the adapter
2310f7917c00SJeff Kirsher  *	@qs: the queue set to which the response queue belongs
2311f7917c00SJeff Kirsher  *	@budget: how many responses can be processed in this round
2312f7917c00SJeff Kirsher  *
2313f7917c00SJeff Kirsher  *	Process responses from an SGE response queue up to the supplied budget.
2314f7917c00SJeff Kirsher  *	Responses include received packets as well as credits and other events
2315f7917c00SJeff Kirsher  *	for the queues that belong to the response queue's queue set.
2316f7917c00SJeff Kirsher  *	A negative budget is effectively unlimited.
2317f7917c00SJeff Kirsher  *
2318f7917c00SJeff Kirsher  *	Additionally choose the interrupt holdoff time for the next interrupt
2319f7917c00SJeff Kirsher  *	on this queue.  If the system is under memory shortage use a fairly
2320f7917c00SJeff Kirsher  *	long delay to help recovery.
2321f7917c00SJeff Kirsher  */
process_responses(struct adapter * adap,struct sge_qset * qs,int budget)2322f7917c00SJeff Kirsher static int process_responses(struct adapter *adap, struct sge_qset *qs,
2323f7917c00SJeff Kirsher 			     int budget)
2324f7917c00SJeff Kirsher {
2325f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2326f7917c00SJeff Kirsher 	struct rsp_desc *r = &q->desc[q->cidx];
2327f7917c00SJeff Kirsher 	int budget_left = budget;
2328f7917c00SJeff Kirsher 	unsigned int sleeping = 0;
2329f7917c00SJeff Kirsher 	struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2330f7917c00SJeff Kirsher 	int ngathered = 0;
2331f7917c00SJeff Kirsher 
2332f7917c00SJeff Kirsher 	q->next_holdoff = q->holdoff_tmr;
2333f7917c00SJeff Kirsher 
2334f7917c00SJeff Kirsher 	while (likely(budget_left && is_new_response(r, q))) {
2335f7917c00SJeff Kirsher 		int packet_complete, eth, ethpad = 2;
2336f7917c00SJeff Kirsher 		int lro = !!(qs->netdev->features & NETIF_F_GRO);
2337f7917c00SJeff Kirsher 		struct sk_buff *skb = NULL;
2338f7917c00SJeff Kirsher 		u32 len, flags;
2339f7917c00SJeff Kirsher 		__be32 rss_hi, rss_lo;
2340f7917c00SJeff Kirsher 
2341019be1cfSAlexander Duyck 		dma_rmb();
2342f7917c00SJeff Kirsher 		eth = r->rss_hdr.opcode == CPL_RX_PKT;
2343f7917c00SJeff Kirsher 		rss_hi = *(const __be32 *)r;
2344f7917c00SJeff Kirsher 		rss_lo = r->rss_hdr.rss_hash_val;
2345f7917c00SJeff Kirsher 		flags = ntohl(r->flags);
2346f7917c00SJeff Kirsher 
2347f7917c00SJeff Kirsher 		if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2348f7917c00SJeff Kirsher 			skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2349f7917c00SJeff Kirsher 			if (!skb)
2350f7917c00SJeff Kirsher 				goto no_mem;
2351f7917c00SJeff Kirsher 
2352de77b966Syuan linyu 			__skb_put_data(skb, r, AN_PKT_SIZE);
2353f7917c00SJeff Kirsher 			skb->data[0] = CPL_ASYNC_NOTIF;
2354f7917c00SJeff Kirsher 			rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2355f7917c00SJeff Kirsher 			q->async_notif++;
2356f7917c00SJeff Kirsher 		} else if (flags & F_RSPD_IMM_DATA_VALID) {
2357f7917c00SJeff Kirsher 			skb = get_imm_packet(r);
2358f7917c00SJeff Kirsher 			if (unlikely(!skb)) {
2359f7917c00SJeff Kirsher no_mem:
2360f7917c00SJeff Kirsher 				q->next_holdoff = NOMEM_INTR_DELAY;
2361f7917c00SJeff Kirsher 				q->nomem++;
2362f7917c00SJeff Kirsher 				/* consume one credit since we tried */
2363f7917c00SJeff Kirsher 				budget_left--;
2364f7917c00SJeff Kirsher 				break;
2365f7917c00SJeff Kirsher 			}
2366f7917c00SJeff Kirsher 			q->imm_data++;
2367f7917c00SJeff Kirsher 			ethpad = 0;
2368f7917c00SJeff Kirsher 		} else if ((len = ntohl(r->len_cq)) != 0) {
2369f7917c00SJeff Kirsher 			struct sge_fl *fl;
2370f7917c00SJeff Kirsher 
2371f7917c00SJeff Kirsher 			lro &= eth && is_eth_tcp(rss_hi);
2372f7917c00SJeff Kirsher 
2373f7917c00SJeff Kirsher 			fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2374f7917c00SJeff Kirsher 			if (fl->use_pages) {
2375f7917c00SJeff Kirsher 				void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2376f7917c00SJeff Kirsher 
2377f468f21bSTariq Toukan 				net_prefetch(addr);
2378f7917c00SJeff Kirsher 				__refill_fl(adap, fl);
2379f7917c00SJeff Kirsher 				if (lro > 0) {
2380f7917c00SJeff Kirsher 					lro_add_page(adap, qs, fl,
2381f7917c00SJeff Kirsher 						     G_RSPD_LEN(len),
2382f7917c00SJeff Kirsher 						     flags & F_RSPD_EOP);
2383f7917c00SJeff Kirsher 					goto next_fl;
2384f7917c00SJeff Kirsher 				}
2385f7917c00SJeff Kirsher 
2386f7917c00SJeff Kirsher 				skb = get_packet_pg(adap, fl, q,
2387f7917c00SJeff Kirsher 						    G_RSPD_LEN(len),
2388f7917c00SJeff Kirsher 						    eth ?
2389f7917c00SJeff Kirsher 						    SGE_RX_DROP_THRES : 0);
2390f7917c00SJeff Kirsher 				q->pg_skb = skb;
2391f7917c00SJeff Kirsher 			} else
2392f7917c00SJeff Kirsher 				skb = get_packet(adap, fl, G_RSPD_LEN(len),
2393f7917c00SJeff Kirsher 						 eth ? SGE_RX_DROP_THRES : 0);
2394f7917c00SJeff Kirsher 			if (unlikely(!skb)) {
2395f7917c00SJeff Kirsher 				if (!eth)
2396f7917c00SJeff Kirsher 					goto no_mem;
2397f7917c00SJeff Kirsher 				q->rx_drops++;
2398f7917c00SJeff Kirsher 			} else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2399f7917c00SJeff Kirsher 				__skb_pull(skb, 2);
2400f7917c00SJeff Kirsher next_fl:
2401f7917c00SJeff Kirsher 			if (++fl->cidx == fl->size)
2402f7917c00SJeff Kirsher 				fl->cidx = 0;
2403f7917c00SJeff Kirsher 		} else
2404f7917c00SJeff Kirsher 			q->pure_rsps++;
2405f7917c00SJeff Kirsher 
2406f7917c00SJeff Kirsher 		if (flags & RSPD_CTRL_MASK) {
2407f7917c00SJeff Kirsher 			sleeping |= flags & RSPD_GTS_MASK;
2408f7917c00SJeff Kirsher 			handle_rsp_cntrl_info(qs, flags);
2409f7917c00SJeff Kirsher 		}
2410f7917c00SJeff Kirsher 
2411f7917c00SJeff Kirsher 		r++;
2412f7917c00SJeff Kirsher 		if (unlikely(++q->cidx == q->size)) {
2413f7917c00SJeff Kirsher 			q->cidx = 0;
2414f7917c00SJeff Kirsher 			q->gen ^= 1;
2415f7917c00SJeff Kirsher 			r = q->desc;
2416f7917c00SJeff Kirsher 		}
2417f7917c00SJeff Kirsher 		prefetch(r);
2418f7917c00SJeff Kirsher 
2419f7917c00SJeff Kirsher 		if (++q->credits >= (q->size / 4)) {
2420f7917c00SJeff Kirsher 			refill_rspq(adap, q, q->credits);
2421f7917c00SJeff Kirsher 			q->credits = 0;
2422f7917c00SJeff Kirsher 		}
2423f7917c00SJeff Kirsher 
2424f7917c00SJeff Kirsher 		packet_complete = flags &
2425f7917c00SJeff Kirsher 				  (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2426f7917c00SJeff Kirsher 				   F_RSPD_ASYNC_NOTIF);
2427f7917c00SJeff Kirsher 
2428f7917c00SJeff Kirsher 		if (skb != NULL && packet_complete) {
2429f7917c00SJeff Kirsher 			if (eth)
2430f7917c00SJeff Kirsher 				rx_eth(adap, q, skb, ethpad, lro);
2431f7917c00SJeff Kirsher 			else {
2432f7917c00SJeff Kirsher 				q->offload_pkts++;
2433f7917c00SJeff Kirsher 				/* Preserve the RSS info in csum & priority */
2434f7917c00SJeff Kirsher 				skb->csum = rss_hi;
2435f7917c00SJeff Kirsher 				skb->priority = rss_lo;
2436f7917c00SJeff Kirsher 				ngathered = rx_offload(&adap->tdev, q, skb,
2437f7917c00SJeff Kirsher 						       offload_skbs,
2438f7917c00SJeff Kirsher 						       ngathered);
2439f7917c00SJeff Kirsher 			}
2440f7917c00SJeff Kirsher 
2441f7917c00SJeff Kirsher 			if (flags & F_RSPD_EOP)
2442f7917c00SJeff Kirsher 				clear_rspq_bufstate(q);
2443f7917c00SJeff Kirsher 		}
2444f7917c00SJeff Kirsher 		--budget_left;
2445f7917c00SJeff Kirsher 	}
2446f7917c00SJeff Kirsher 
2447f7917c00SJeff Kirsher 	deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2448f7917c00SJeff Kirsher 
2449f7917c00SJeff Kirsher 	if (sleeping)
2450f7917c00SJeff Kirsher 		check_ring_db(adap, qs, sleeping);
2451f7917c00SJeff Kirsher 
2452f7917c00SJeff Kirsher 	smp_mb();		/* commit Tx queue .processed updates */
2453f7917c00SJeff Kirsher 	if (unlikely(qs->txq_stopped != 0))
2454f7917c00SJeff Kirsher 		restart_tx(qs);
2455f7917c00SJeff Kirsher 
2456f7917c00SJeff Kirsher 	budget -= budget_left;
2457f7917c00SJeff Kirsher 	return budget;
2458f7917c00SJeff Kirsher }
2459f7917c00SJeff Kirsher 
is_pure_response(const struct rsp_desc * r)2460f7917c00SJeff Kirsher static inline int is_pure_response(const struct rsp_desc *r)
2461f7917c00SJeff Kirsher {
2462f7917c00SJeff Kirsher 	__be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2463f7917c00SJeff Kirsher 
2464f7917c00SJeff Kirsher 	return (n | r->len_cq) == 0;
2465f7917c00SJeff Kirsher }
2466f7917c00SJeff Kirsher 
2467f7917c00SJeff Kirsher /**
2468f7917c00SJeff Kirsher  *	napi_rx_handler - the NAPI handler for Rx processing
2469f7917c00SJeff Kirsher  *	@napi: the napi instance
2470f7917c00SJeff Kirsher  *	@budget: how many packets we can process in this round
2471f7917c00SJeff Kirsher  *
2472f7917c00SJeff Kirsher  *	Handler for new data events when using NAPI.
2473f7917c00SJeff Kirsher  */
napi_rx_handler(struct napi_struct * napi,int budget)2474f7917c00SJeff Kirsher static int napi_rx_handler(struct napi_struct *napi, int budget)
2475f7917c00SJeff Kirsher {
2476f7917c00SJeff Kirsher 	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2477f7917c00SJeff Kirsher 	struct adapter *adap = qs->adap;
2478f7917c00SJeff Kirsher 	int work_done = process_responses(adap, qs, budget);
2479f7917c00SJeff Kirsher 
2480f7917c00SJeff Kirsher 	if (likely(work_done < budget)) {
24816ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
2482f7917c00SJeff Kirsher 
2483f7917c00SJeff Kirsher 		/*
2484f7917c00SJeff Kirsher 		 * Because we don't atomically flush the following
2485f7917c00SJeff Kirsher 		 * write it is possible that in very rare cases it can
2486f7917c00SJeff Kirsher 		 * reach the device in a way that races with a new
2487f7917c00SJeff Kirsher 		 * response being written plus an error interrupt
2488f7917c00SJeff Kirsher 		 * causing the NAPI interrupt handler below to return
2489f7917c00SJeff Kirsher 		 * unhandled status to the OS.  To protect against
2490f7917c00SJeff Kirsher 		 * this would require flushing the write and doing
2491f7917c00SJeff Kirsher 		 * both the write and the flush with interrupts off.
2492f7917c00SJeff Kirsher 		 * Way too expensive and unjustifiable given the
2493f7917c00SJeff Kirsher 		 * rarity of the race.
2494f7917c00SJeff Kirsher 		 *
2495f7917c00SJeff Kirsher 		 * The race cannot happen at all with MSI-X.
2496f7917c00SJeff Kirsher 		 */
2497f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2498f7917c00SJeff Kirsher 			     V_NEWTIMER(qs->rspq.next_holdoff) |
2499f7917c00SJeff Kirsher 			     V_NEWINDEX(qs->rspq.cidx));
2500f7917c00SJeff Kirsher 	}
2501f7917c00SJeff Kirsher 	return work_done;
2502f7917c00SJeff Kirsher }
2503f7917c00SJeff Kirsher 
2504f7917c00SJeff Kirsher /*
2505f7917c00SJeff Kirsher  * Returns true if the device is already scheduled for polling.
2506f7917c00SJeff Kirsher  */
napi_is_scheduled(struct napi_struct * napi)2507f7917c00SJeff Kirsher static inline int napi_is_scheduled(struct napi_struct *napi)
2508f7917c00SJeff Kirsher {
2509f7917c00SJeff Kirsher 	return test_bit(NAPI_STATE_SCHED, &napi->state);
2510f7917c00SJeff Kirsher }
2511f7917c00SJeff Kirsher 
2512f7917c00SJeff Kirsher /**
2513f7917c00SJeff Kirsher  *	process_pure_responses - process pure responses from a response queue
2514f7917c00SJeff Kirsher  *	@adap: the adapter
2515f7917c00SJeff Kirsher  *	@qs: the queue set owning the response queue
2516f7917c00SJeff Kirsher  *	@r: the first pure response to process
2517f7917c00SJeff Kirsher  *
2518f7917c00SJeff Kirsher  *	A simpler version of process_responses() that handles only pure (i.e.,
2519f7917c00SJeff Kirsher  *	non data-carrying) responses.  Such respones are too light-weight to
2520f7917c00SJeff Kirsher  *	justify calling a softirq under NAPI, so we handle them specially in
2521f7917c00SJeff Kirsher  *	the interrupt handler.  The function is called with a pointer to a
2522f7917c00SJeff Kirsher  *	response, which the caller must ensure is a valid pure response.
2523f7917c00SJeff Kirsher  *
2524f7917c00SJeff Kirsher  *	Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2525f7917c00SJeff Kirsher  */
process_pure_responses(struct adapter * adap,struct sge_qset * qs,struct rsp_desc * r)2526f7917c00SJeff Kirsher static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2527f7917c00SJeff Kirsher 				  struct rsp_desc *r)
2528f7917c00SJeff Kirsher {
2529f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2530f7917c00SJeff Kirsher 	unsigned int sleeping = 0;
2531f7917c00SJeff Kirsher 
2532f7917c00SJeff Kirsher 	do {
2533f7917c00SJeff Kirsher 		u32 flags = ntohl(r->flags);
2534f7917c00SJeff Kirsher 
2535f7917c00SJeff Kirsher 		r++;
2536f7917c00SJeff Kirsher 		if (unlikely(++q->cidx == q->size)) {
2537f7917c00SJeff Kirsher 			q->cidx = 0;
2538f7917c00SJeff Kirsher 			q->gen ^= 1;
2539f7917c00SJeff Kirsher 			r = q->desc;
2540f7917c00SJeff Kirsher 		}
2541f7917c00SJeff Kirsher 		prefetch(r);
2542f7917c00SJeff Kirsher 
2543f7917c00SJeff Kirsher 		if (flags & RSPD_CTRL_MASK) {
2544f7917c00SJeff Kirsher 			sleeping |= flags & RSPD_GTS_MASK;
2545f7917c00SJeff Kirsher 			handle_rsp_cntrl_info(qs, flags);
2546f7917c00SJeff Kirsher 		}
2547f7917c00SJeff Kirsher 
2548f7917c00SJeff Kirsher 		q->pure_rsps++;
2549f7917c00SJeff Kirsher 		if (++q->credits >= (q->size / 4)) {
2550f7917c00SJeff Kirsher 			refill_rspq(adap, q, q->credits);
2551f7917c00SJeff Kirsher 			q->credits = 0;
2552f7917c00SJeff Kirsher 		}
2553f7917c00SJeff Kirsher 		if (!is_new_response(r, q))
2554f7917c00SJeff Kirsher 			break;
2555019be1cfSAlexander Duyck 		dma_rmb();
2556f7917c00SJeff Kirsher 	} while (is_pure_response(r));
2557f7917c00SJeff Kirsher 
2558f7917c00SJeff Kirsher 	if (sleeping)
2559f7917c00SJeff Kirsher 		check_ring_db(adap, qs, sleeping);
2560f7917c00SJeff Kirsher 
2561f7917c00SJeff Kirsher 	smp_mb();		/* commit Tx queue .processed updates */
2562f7917c00SJeff Kirsher 	if (unlikely(qs->txq_stopped != 0))
2563f7917c00SJeff Kirsher 		restart_tx(qs);
2564f7917c00SJeff Kirsher 
2565f7917c00SJeff Kirsher 	return is_new_response(r, q);
2566f7917c00SJeff Kirsher }
2567f7917c00SJeff Kirsher 
2568f7917c00SJeff Kirsher /**
2569f7917c00SJeff Kirsher  *	handle_responses - decide what to do with new responses in NAPI mode
2570f7917c00SJeff Kirsher  *	@adap: the adapter
2571f7917c00SJeff Kirsher  *	@q: the response queue
2572f7917c00SJeff Kirsher  *
2573f7917c00SJeff Kirsher  *	This is used by the NAPI interrupt handlers to decide what to do with
2574f7917c00SJeff Kirsher  *	new SGE responses.  If there are no new responses it returns -1.  If
2575f7917c00SJeff Kirsher  *	there are new responses and they are pure (i.e., non-data carrying)
2576f7917c00SJeff Kirsher  *	it handles them straight in hard interrupt context as they are very
2577f7917c00SJeff Kirsher  *	cheap and don't deliver any packets.  Finally, if there are any data
2578f7917c00SJeff Kirsher  *	signaling responses it schedules the NAPI handler.  Returns 1 if it
2579f7917c00SJeff Kirsher  *	schedules NAPI, 0 if all new responses were pure.
2580f7917c00SJeff Kirsher  *
2581f7917c00SJeff Kirsher  *	The caller must ascertain NAPI is not already running.
2582f7917c00SJeff Kirsher  */
handle_responses(struct adapter * adap,struct sge_rspq * q)2583f7917c00SJeff Kirsher static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2584f7917c00SJeff Kirsher {
2585f7917c00SJeff Kirsher 	struct sge_qset *qs = rspq_to_qset(q);
2586f7917c00SJeff Kirsher 	struct rsp_desc *r = &q->desc[q->cidx];
2587f7917c00SJeff Kirsher 
2588f7917c00SJeff Kirsher 	if (!is_new_response(r, q))
2589f7917c00SJeff Kirsher 		return -1;
2590019be1cfSAlexander Duyck 	dma_rmb();
2591f7917c00SJeff Kirsher 	if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2592f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2593f7917c00SJeff Kirsher 			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2594f7917c00SJeff Kirsher 		return 0;
2595f7917c00SJeff Kirsher 	}
2596f7917c00SJeff Kirsher 	napi_schedule(&qs->napi);
2597f7917c00SJeff Kirsher 	return 1;
2598f7917c00SJeff Kirsher }
2599f7917c00SJeff Kirsher 
2600f7917c00SJeff Kirsher /*
2601f7917c00SJeff Kirsher  * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2602f7917c00SJeff Kirsher  * (i.e., response queue serviced in hard interrupt).
2603f7917c00SJeff Kirsher  */
t3_sge_intr_msix(int irq,void * cookie)2604f7917c00SJeff Kirsher static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2605f7917c00SJeff Kirsher {
2606f7917c00SJeff Kirsher 	struct sge_qset *qs = cookie;
2607f7917c00SJeff Kirsher 	struct adapter *adap = qs->adap;
2608f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2609f7917c00SJeff Kirsher 
2610f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2611f7917c00SJeff Kirsher 	if (process_responses(adap, qs, -1) == 0)
2612f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2613f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2614f7917c00SJeff Kirsher 		     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2615f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2616f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2617f7917c00SJeff Kirsher }
2618f7917c00SJeff Kirsher 
2619f7917c00SJeff Kirsher /*
2620f7917c00SJeff Kirsher  * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2621f7917c00SJeff Kirsher  * (i.e., response queue serviced by NAPI polling).
2622f7917c00SJeff Kirsher  */
t3_sge_intr_msix_napi(int irq,void * cookie)2623f7917c00SJeff Kirsher static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2624f7917c00SJeff Kirsher {
2625f7917c00SJeff Kirsher 	struct sge_qset *qs = cookie;
2626f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2627f7917c00SJeff Kirsher 
2628f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2629f7917c00SJeff Kirsher 
2630f7917c00SJeff Kirsher 	if (handle_responses(qs->adap, q) < 0)
2631f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2632f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2633f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2634f7917c00SJeff Kirsher }
2635f7917c00SJeff Kirsher 
2636f7917c00SJeff Kirsher /*
2637f7917c00SJeff Kirsher  * The non-NAPI MSI interrupt handler.  This needs to handle data events from
2638f7917c00SJeff Kirsher  * SGE response queues as well as error and other async events as they all use
2639f7917c00SJeff Kirsher  * the same MSI vector.  We use one SGE response queue per port in this mode
2640f7917c00SJeff Kirsher  * and protect all response queues with queue 0's lock.
2641f7917c00SJeff Kirsher  */
t3_intr_msi(int irq,void * cookie)2642f7917c00SJeff Kirsher static irqreturn_t t3_intr_msi(int irq, void *cookie)
2643f7917c00SJeff Kirsher {
2644f7917c00SJeff Kirsher 	int new_packets = 0;
2645f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2646f7917c00SJeff Kirsher 	struct sge_rspq *q = &adap->sge.qs[0].rspq;
2647f7917c00SJeff Kirsher 
2648f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2649f7917c00SJeff Kirsher 
2650f7917c00SJeff Kirsher 	if (process_responses(adap, &adap->sge.qs[0], -1)) {
2651f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2652f7917c00SJeff Kirsher 			     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2653f7917c00SJeff Kirsher 		new_packets = 1;
2654f7917c00SJeff Kirsher 	}
2655f7917c00SJeff Kirsher 
2656f7917c00SJeff Kirsher 	if (adap->params.nports == 2 &&
2657f7917c00SJeff Kirsher 	    process_responses(adap, &adap->sge.qs[1], -1)) {
2658f7917c00SJeff Kirsher 		struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2659f7917c00SJeff Kirsher 
2660f7917c00SJeff Kirsher 		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2661f7917c00SJeff Kirsher 			     V_NEWTIMER(q1->next_holdoff) |
2662f7917c00SJeff Kirsher 			     V_NEWINDEX(q1->cidx));
2663f7917c00SJeff Kirsher 		new_packets = 1;
2664f7917c00SJeff Kirsher 	}
2665f7917c00SJeff Kirsher 
2666f7917c00SJeff Kirsher 	if (!new_packets && t3_slow_intr_handler(adap) == 0)
2667f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2668f7917c00SJeff Kirsher 
2669f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2670f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2671f7917c00SJeff Kirsher }
2672f7917c00SJeff Kirsher 
rspq_check_napi(struct sge_qset * qs)2673f7917c00SJeff Kirsher static int rspq_check_napi(struct sge_qset *qs)
2674f7917c00SJeff Kirsher {
2675f7917c00SJeff Kirsher 	struct sge_rspq *q = &qs->rspq;
2676f7917c00SJeff Kirsher 
2677f7917c00SJeff Kirsher 	if (!napi_is_scheduled(&qs->napi) &&
2678f7917c00SJeff Kirsher 	    is_new_response(&q->desc[q->cidx], q)) {
2679f7917c00SJeff Kirsher 		napi_schedule(&qs->napi);
2680f7917c00SJeff Kirsher 		return 1;
2681f7917c00SJeff Kirsher 	}
2682f7917c00SJeff Kirsher 	return 0;
2683f7917c00SJeff Kirsher }
2684f7917c00SJeff Kirsher 
2685f7917c00SJeff Kirsher /*
2686f7917c00SJeff Kirsher  * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2687f7917c00SJeff Kirsher  * by NAPI polling).  Handles data events from SGE response queues as well as
2688f7917c00SJeff Kirsher  * error and other async events as they all use the same MSI vector.  We use
2689f7917c00SJeff Kirsher  * one SGE response queue per port in this mode and protect all response
2690f7917c00SJeff Kirsher  * queues with queue 0's lock.
2691f7917c00SJeff Kirsher  */
t3_intr_msi_napi(int irq,void * cookie)2692f7917c00SJeff Kirsher static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2693f7917c00SJeff Kirsher {
2694f7917c00SJeff Kirsher 	int new_packets;
2695f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2696f7917c00SJeff Kirsher 	struct sge_rspq *q = &adap->sge.qs[0].rspq;
2697f7917c00SJeff Kirsher 
2698f7917c00SJeff Kirsher 	spin_lock(&q->lock);
2699f7917c00SJeff Kirsher 
2700f7917c00SJeff Kirsher 	new_packets = rspq_check_napi(&adap->sge.qs[0]);
2701f7917c00SJeff Kirsher 	if (adap->params.nports == 2)
2702f7917c00SJeff Kirsher 		new_packets += rspq_check_napi(&adap->sge.qs[1]);
2703f7917c00SJeff Kirsher 	if (!new_packets && t3_slow_intr_handler(adap) == 0)
2704f7917c00SJeff Kirsher 		q->unhandled_irqs++;
2705f7917c00SJeff Kirsher 
2706f7917c00SJeff Kirsher 	spin_unlock(&q->lock);
2707f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2708f7917c00SJeff Kirsher }
2709f7917c00SJeff Kirsher 
2710f7917c00SJeff Kirsher /*
2711f7917c00SJeff Kirsher  * A helper function that processes responses and issues GTS.
2712f7917c00SJeff Kirsher  */
process_responses_gts(struct adapter * adap,struct sge_rspq * rq)2713f7917c00SJeff Kirsher static inline int process_responses_gts(struct adapter *adap,
2714f7917c00SJeff Kirsher 					struct sge_rspq *rq)
2715f7917c00SJeff Kirsher {
2716f7917c00SJeff Kirsher 	int work;
2717f7917c00SJeff Kirsher 
2718f7917c00SJeff Kirsher 	work = process_responses(adap, rspq_to_qset(rq), -1);
2719f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2720f7917c00SJeff Kirsher 		     V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2721f7917c00SJeff Kirsher 	return work;
2722f7917c00SJeff Kirsher }
2723f7917c00SJeff Kirsher 
2724f7917c00SJeff Kirsher /*
2725f7917c00SJeff Kirsher  * The legacy INTx interrupt handler.  This needs to handle data events from
2726f7917c00SJeff Kirsher  * SGE response queues as well as error and other async events as they all use
2727f7917c00SJeff Kirsher  * the same interrupt pin.  We use one SGE response queue per port in this mode
2728f7917c00SJeff Kirsher  * and protect all response queues with queue 0's lock.
2729f7917c00SJeff Kirsher  */
t3_intr(int irq,void * cookie)2730f7917c00SJeff Kirsher static irqreturn_t t3_intr(int irq, void *cookie)
2731f7917c00SJeff Kirsher {
2732f7917c00SJeff Kirsher 	int work_done, w0, w1;
2733f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2734f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2735f7917c00SJeff Kirsher 	struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2736f7917c00SJeff Kirsher 
2737f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2738f7917c00SJeff Kirsher 
2739f7917c00SJeff Kirsher 	w0 = is_new_response(&q0->desc[q0->cidx], q0);
2740f7917c00SJeff Kirsher 	w1 = adap->params.nports == 2 &&
2741f7917c00SJeff Kirsher 	    is_new_response(&q1->desc[q1->cidx], q1);
2742f7917c00SJeff Kirsher 
2743f7917c00SJeff Kirsher 	if (likely(w0 | w1)) {
2744f7917c00SJeff Kirsher 		t3_write_reg(adap, A_PL_CLI, 0);
2745f7917c00SJeff Kirsher 		t3_read_reg(adap, A_PL_CLI);	/* flush */
2746f7917c00SJeff Kirsher 
2747f7917c00SJeff Kirsher 		if (likely(w0))
2748f7917c00SJeff Kirsher 			process_responses_gts(adap, q0);
2749f7917c00SJeff Kirsher 
2750f7917c00SJeff Kirsher 		if (w1)
2751f7917c00SJeff Kirsher 			process_responses_gts(adap, q1);
2752f7917c00SJeff Kirsher 
2753f7917c00SJeff Kirsher 		work_done = w0 | w1;
2754f7917c00SJeff Kirsher 	} else
2755f7917c00SJeff Kirsher 		work_done = t3_slow_intr_handler(adap);
2756f7917c00SJeff Kirsher 
2757f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2758f7917c00SJeff Kirsher 	return IRQ_RETVAL(work_done != 0);
2759f7917c00SJeff Kirsher }
2760f7917c00SJeff Kirsher 
2761f7917c00SJeff Kirsher /*
2762f7917c00SJeff Kirsher  * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2763f7917c00SJeff Kirsher  * Handles data events from SGE response queues as well as error and other
2764f7917c00SJeff Kirsher  * async events as they all use the same interrupt pin.  We use one SGE
2765f7917c00SJeff Kirsher  * response queue per port in this mode and protect all response queues with
2766f7917c00SJeff Kirsher  * queue 0's lock.
2767f7917c00SJeff Kirsher  */
t3b_intr(int irq,void * cookie)2768f7917c00SJeff Kirsher static irqreturn_t t3b_intr(int irq, void *cookie)
2769f7917c00SJeff Kirsher {
2770f7917c00SJeff Kirsher 	u32 map;
2771f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2772f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2773f7917c00SJeff Kirsher 
2774f7917c00SJeff Kirsher 	t3_write_reg(adap, A_PL_CLI, 0);
2775f7917c00SJeff Kirsher 	map = t3_read_reg(adap, A_SG_DATA_INTR);
2776f7917c00SJeff Kirsher 
2777f7917c00SJeff Kirsher 	if (unlikely(!map))	/* shared interrupt, most likely */
2778f7917c00SJeff Kirsher 		return IRQ_NONE;
2779f7917c00SJeff Kirsher 
2780f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2781f7917c00SJeff Kirsher 
2782f7917c00SJeff Kirsher 	if (unlikely(map & F_ERRINTR))
2783f7917c00SJeff Kirsher 		t3_slow_intr_handler(adap);
2784f7917c00SJeff Kirsher 
2785f7917c00SJeff Kirsher 	if (likely(map & 1))
2786f7917c00SJeff Kirsher 		process_responses_gts(adap, q0);
2787f7917c00SJeff Kirsher 
2788f7917c00SJeff Kirsher 	if (map & 2)
2789f7917c00SJeff Kirsher 		process_responses_gts(adap, &adap->sge.qs[1].rspq);
2790f7917c00SJeff Kirsher 
2791f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2792f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2793f7917c00SJeff Kirsher }
2794f7917c00SJeff Kirsher 
2795f7917c00SJeff Kirsher /*
2796f7917c00SJeff Kirsher  * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2797f7917c00SJeff Kirsher  * Handles data events from SGE response queues as well as error and other
2798f7917c00SJeff Kirsher  * async events as they all use the same interrupt pin.  We use one SGE
2799f7917c00SJeff Kirsher  * response queue per port in this mode and protect all response queues with
2800f7917c00SJeff Kirsher  * queue 0's lock.
2801f7917c00SJeff Kirsher  */
t3b_intr_napi(int irq,void * cookie)2802f7917c00SJeff Kirsher static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2803f7917c00SJeff Kirsher {
2804f7917c00SJeff Kirsher 	u32 map;
2805f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2806f7917c00SJeff Kirsher 	struct sge_qset *qs0 = &adap->sge.qs[0];
2807f7917c00SJeff Kirsher 	struct sge_rspq *q0 = &qs0->rspq;
2808f7917c00SJeff Kirsher 
2809f7917c00SJeff Kirsher 	t3_write_reg(adap, A_PL_CLI, 0);
2810f7917c00SJeff Kirsher 	map = t3_read_reg(adap, A_SG_DATA_INTR);
2811f7917c00SJeff Kirsher 
2812f7917c00SJeff Kirsher 	if (unlikely(!map))	/* shared interrupt, most likely */
2813f7917c00SJeff Kirsher 		return IRQ_NONE;
2814f7917c00SJeff Kirsher 
2815f7917c00SJeff Kirsher 	spin_lock(&q0->lock);
2816f7917c00SJeff Kirsher 
2817f7917c00SJeff Kirsher 	if (unlikely(map & F_ERRINTR))
2818f7917c00SJeff Kirsher 		t3_slow_intr_handler(adap);
2819f7917c00SJeff Kirsher 
2820f7917c00SJeff Kirsher 	if (likely(map & 1))
2821f7917c00SJeff Kirsher 		napi_schedule(&qs0->napi);
2822f7917c00SJeff Kirsher 
2823f7917c00SJeff Kirsher 	if (map & 2)
2824f7917c00SJeff Kirsher 		napi_schedule(&adap->sge.qs[1].napi);
2825f7917c00SJeff Kirsher 
2826f7917c00SJeff Kirsher 	spin_unlock(&q0->lock);
2827f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2828f7917c00SJeff Kirsher }
2829f7917c00SJeff Kirsher 
2830f7917c00SJeff Kirsher /**
2831f7917c00SJeff Kirsher  *	t3_intr_handler - select the top-level interrupt handler
2832f7917c00SJeff Kirsher  *	@adap: the adapter
2833f7917c00SJeff Kirsher  *	@polling: whether using NAPI to service response queues
2834f7917c00SJeff Kirsher  *
2835f7917c00SJeff Kirsher  *	Selects the top-level interrupt handler based on the type of interrupts
2836f7917c00SJeff Kirsher  *	(MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2837f7917c00SJeff Kirsher  *	response queues.
2838f7917c00SJeff Kirsher  */
t3_intr_handler(struct adapter * adap,int polling)2839f7917c00SJeff Kirsher irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2840f7917c00SJeff Kirsher {
2841f7917c00SJeff Kirsher 	if (adap->flags & USING_MSIX)
2842f7917c00SJeff Kirsher 		return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2843f7917c00SJeff Kirsher 	if (adap->flags & USING_MSI)
2844f7917c00SJeff Kirsher 		return polling ? t3_intr_msi_napi : t3_intr_msi;
2845f7917c00SJeff Kirsher 	if (adap->params.rev > 0)
2846f7917c00SJeff Kirsher 		return polling ? t3b_intr_napi : t3b_intr;
2847f7917c00SJeff Kirsher 	return t3_intr;
2848f7917c00SJeff Kirsher }
2849f7917c00SJeff Kirsher 
2850f7917c00SJeff Kirsher #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2851f7917c00SJeff Kirsher 		    F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2852f7917c00SJeff Kirsher 		    V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2853f7917c00SJeff Kirsher 		    F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2854f7917c00SJeff Kirsher 		    F_HIRCQPARITYERROR)
2855f7917c00SJeff Kirsher #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2856f7917c00SJeff Kirsher #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2857f7917c00SJeff Kirsher 		      F_RSPQDISABLED)
2858f7917c00SJeff Kirsher 
2859f7917c00SJeff Kirsher /**
2860f7917c00SJeff Kirsher  *	t3_sge_err_intr_handler - SGE async event interrupt handler
2861f7917c00SJeff Kirsher  *	@adapter: the adapter
2862f7917c00SJeff Kirsher  *
2863f7917c00SJeff Kirsher  *	Interrupt handler for SGE asynchronous (non-data) events.
2864f7917c00SJeff Kirsher  */
t3_sge_err_intr_handler(struct adapter * adapter)2865f7917c00SJeff Kirsher void t3_sge_err_intr_handler(struct adapter *adapter)
2866f7917c00SJeff Kirsher {
2867f7917c00SJeff Kirsher 	unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2868f7917c00SJeff Kirsher 				 ~F_FLEMPTY;
2869f7917c00SJeff Kirsher 
2870f7917c00SJeff Kirsher 	if (status & SGE_PARERR)
2871f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2872f7917c00SJeff Kirsher 			 status & SGE_PARERR);
2873f7917c00SJeff Kirsher 	if (status & SGE_FRAMINGERR)
2874f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2875f7917c00SJeff Kirsher 			 status & SGE_FRAMINGERR);
2876f7917c00SJeff Kirsher 
2877f7917c00SJeff Kirsher 	if (status & F_RSPQCREDITOVERFOW)
2878f7917c00SJeff Kirsher 		CH_ALERT(adapter, "SGE response queue credit overflow\n");
2879f7917c00SJeff Kirsher 
2880f7917c00SJeff Kirsher 	if (status & F_RSPQDISABLED) {
2881f7917c00SJeff Kirsher 		v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2882f7917c00SJeff Kirsher 
2883f7917c00SJeff Kirsher 		CH_ALERT(adapter,
2884f7917c00SJeff Kirsher 			 "packet delivered to disabled response queue "
2885f7917c00SJeff Kirsher 			 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2886f7917c00SJeff Kirsher 	}
2887f7917c00SJeff Kirsher 
2888f7917c00SJeff Kirsher 	if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2889f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_drop_task);
2890f7917c00SJeff Kirsher 
2891f7917c00SJeff Kirsher 	if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2892f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_full_task);
2893f7917c00SJeff Kirsher 
2894f7917c00SJeff Kirsher 	if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2895f7917c00SJeff Kirsher 		queue_work(cxgb3_wq, &adapter->db_empty_task);
2896f7917c00SJeff Kirsher 
2897f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2898f7917c00SJeff Kirsher 	if (status &  SGE_FATALERR)
2899f7917c00SJeff Kirsher 		t3_fatal_err(adapter);
2900f7917c00SJeff Kirsher }
2901f7917c00SJeff Kirsher 
2902f7917c00SJeff Kirsher /**
2903f7917c00SJeff Kirsher  *	sge_timer_tx - perform periodic maintenance of an SGE qset
2904d0ea5cbdSJesse Brandeburg  *	@t: a timer list containing the SGE queue set to maintain
2905f7917c00SJeff Kirsher  *
2906f7917c00SJeff Kirsher  *	Runs periodically from a timer to perform maintenance of an SGE queue
2907f7917c00SJeff Kirsher  *	set.  It performs two tasks:
2908f7917c00SJeff Kirsher  *
2909f7917c00SJeff Kirsher  *	Cleans up any completed Tx descriptors that may still be pending.
2910f7917c00SJeff Kirsher  *	Normal descriptor cleanup happens when new packets are added to a Tx
2911f7917c00SJeff Kirsher  *	queue so this timer is relatively infrequent and does any cleanup only
2912f7917c00SJeff Kirsher  *	if the Tx queue has not seen any new packets in a while.  We make a
2913f7917c00SJeff Kirsher  *	best effort attempt to reclaim descriptors, in that we don't wait
2914f7917c00SJeff Kirsher  *	around if we cannot get a queue's lock (which most likely is because
2915f7917c00SJeff Kirsher  *	someone else is queueing new packets and so will also handle the clean
2916f7917c00SJeff Kirsher  *	up).  Since control queues use immediate data exclusively we don't
2917f7917c00SJeff Kirsher  *	bother cleaning them up here.
2918f7917c00SJeff Kirsher  *
2919f7917c00SJeff Kirsher  */
sge_timer_tx(struct timer_list * t)29200e23daebSKees Cook static void sge_timer_tx(struct timer_list *t)
2921f7917c00SJeff Kirsher {
29220e23daebSKees Cook 	struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
2923f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2924f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
2925f7917c00SJeff Kirsher 	unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2926f7917c00SJeff Kirsher 	unsigned long next_period;
2927f7917c00SJeff Kirsher 
2928f7917c00SJeff Kirsher 	if (__netif_tx_trylock(qs->tx_q)) {
2929f7917c00SJeff Kirsher                 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2930f7917c00SJeff Kirsher                                                      TX_RECLAIM_TIMER_CHUNK);
2931f7917c00SJeff Kirsher 		__netif_tx_unlock(qs->tx_q);
2932f7917c00SJeff Kirsher 	}
2933f7917c00SJeff Kirsher 
2934f7917c00SJeff Kirsher 	if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2935f7917c00SJeff Kirsher 		tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2936f7917c00SJeff Kirsher 						     TX_RECLAIM_TIMER_CHUNK);
2937f7917c00SJeff Kirsher 		spin_unlock(&qs->txq[TXQ_OFLD].lock);
2938f7917c00SJeff Kirsher 	}
2939f7917c00SJeff Kirsher 
2940f7917c00SJeff Kirsher 	next_period = TX_RECLAIM_PERIOD >>
2941f7917c00SJeff Kirsher                       (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2942f7917c00SJeff Kirsher                       TX_RECLAIM_TIMER_CHUNK);
2943f7917c00SJeff Kirsher 	mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2944f7917c00SJeff Kirsher }
2945f7917c00SJeff Kirsher 
294649ce9c2cSBen Hutchings /**
2947f7917c00SJeff Kirsher  *	sge_timer_rx - perform periodic maintenance of an SGE qset
2948d0ea5cbdSJesse Brandeburg  *	@t: the timer list containing the SGE queue set to maintain
2949f7917c00SJeff Kirsher  *
2950f7917c00SJeff Kirsher  *	a) Replenishes Rx queues that have run out due to memory shortage.
2951f7917c00SJeff Kirsher  *	Normally new Rx buffers are added when existing ones are consumed but
2952f7917c00SJeff Kirsher  *	when out of memory a queue can become empty.  We try to add only a few
2953f7917c00SJeff Kirsher  *	buffers here, the queue will be replenished fully as these new buffers
2954f7917c00SJeff Kirsher  *	are used up if memory shortage has subsided.
2955f7917c00SJeff Kirsher  *
2956f7917c00SJeff Kirsher  *	b) Return coalesced response queue credits in case a response queue is
2957f7917c00SJeff Kirsher  *	starved.
2958f7917c00SJeff Kirsher  *
2959f7917c00SJeff Kirsher  */
sge_timer_rx(struct timer_list * t)29600e23daebSKees Cook static void sge_timer_rx(struct timer_list *t)
2961f7917c00SJeff Kirsher {
2962f7917c00SJeff Kirsher 	spinlock_t *lock;
29630e23daebSKees Cook 	struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
2964f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(qs->netdev);
2965f7917c00SJeff Kirsher 	struct adapter *adap = pi->adapter;
2966f7917c00SJeff Kirsher 	u32 status;
2967f7917c00SJeff Kirsher 
2968f7917c00SJeff Kirsher 	lock = adap->params.rev > 0 ?
2969f7917c00SJeff Kirsher 	       &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2970f7917c00SJeff Kirsher 
2971f7917c00SJeff Kirsher 	if (!spin_trylock_irq(lock))
2972f7917c00SJeff Kirsher 		goto out;
2973f7917c00SJeff Kirsher 
2974f7917c00SJeff Kirsher 	if (napi_is_scheduled(&qs->napi))
2975f7917c00SJeff Kirsher 		goto unlock;
2976f7917c00SJeff Kirsher 
2977f7917c00SJeff Kirsher 	if (adap->params.rev < 4) {
2978f7917c00SJeff Kirsher 		status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2979f7917c00SJeff Kirsher 
2980f7917c00SJeff Kirsher 		if (status & (1 << qs->rspq.cntxt_id)) {
2981f7917c00SJeff Kirsher 			qs->rspq.starved++;
2982f7917c00SJeff Kirsher 			if (qs->rspq.credits) {
2983f7917c00SJeff Kirsher 				qs->rspq.credits--;
2984f7917c00SJeff Kirsher 				refill_rspq(adap, &qs->rspq, 1);
2985f7917c00SJeff Kirsher 				qs->rspq.restarted++;
2986f7917c00SJeff Kirsher 				t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2987f7917c00SJeff Kirsher 					     1 << qs->rspq.cntxt_id);
2988f7917c00SJeff Kirsher 			}
2989f7917c00SJeff Kirsher 		}
2990f7917c00SJeff Kirsher 	}
2991f7917c00SJeff Kirsher 
2992f7917c00SJeff Kirsher 	if (qs->fl[0].credits < qs->fl[0].size)
2993f7917c00SJeff Kirsher 		__refill_fl(adap, &qs->fl[0]);
2994f7917c00SJeff Kirsher 	if (qs->fl[1].credits < qs->fl[1].size)
2995f7917c00SJeff Kirsher 		__refill_fl(adap, &qs->fl[1]);
2996f7917c00SJeff Kirsher 
2997f7917c00SJeff Kirsher unlock:
2998f7917c00SJeff Kirsher 	spin_unlock_irq(lock);
2999f7917c00SJeff Kirsher out:
3000f7917c00SJeff Kirsher 	mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3001f7917c00SJeff Kirsher }
3002f7917c00SJeff Kirsher 
3003f7917c00SJeff Kirsher /**
3004f7917c00SJeff Kirsher  *	t3_update_qset_coalesce - update coalescing settings for a queue set
3005f7917c00SJeff Kirsher  *	@qs: the SGE queue set
3006f7917c00SJeff Kirsher  *	@p: new queue set parameters
3007f7917c00SJeff Kirsher  *
3008f7917c00SJeff Kirsher  *	Update the coalescing settings for an SGE queue set.  Nothing is done
3009f7917c00SJeff Kirsher  *	if the queue set is not initialized yet.
3010f7917c00SJeff Kirsher  */
t3_update_qset_coalesce(struct sge_qset * qs,const struct qset_params * p)3011f7917c00SJeff Kirsher void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
3012f7917c00SJeff Kirsher {
3013f7917c00SJeff Kirsher 	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
3014f7917c00SJeff Kirsher 	qs->rspq.polling = p->polling;
3015f7917c00SJeff Kirsher 	qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
3016f7917c00SJeff Kirsher }
3017f7917c00SJeff Kirsher 
3018f7917c00SJeff Kirsher /**
3019f7917c00SJeff Kirsher  *	t3_sge_alloc_qset - initialize an SGE queue set
3020f7917c00SJeff Kirsher  *	@adapter: the adapter
3021f7917c00SJeff Kirsher  *	@id: the queue set id
3022f7917c00SJeff Kirsher  *	@nports: how many Ethernet ports will be using this queue set
3023f7917c00SJeff Kirsher  *	@irq_vec_idx: the IRQ vector index for response queue interrupts
3024f7917c00SJeff Kirsher  *	@p: configuration parameters for this queue set
3025f7917c00SJeff Kirsher  *	@ntxq: number of Tx queues for the queue set
3026d0ea5cbdSJesse Brandeburg  *	@dev: net device associated with this queue set
3027f7917c00SJeff Kirsher  *	@netdevq: net device TX queue associated with this queue set
3028f7917c00SJeff Kirsher  *
3029f7917c00SJeff Kirsher  *	Allocate resources and initialize an SGE queue set.  A queue set
3030f7917c00SJeff Kirsher  *	comprises a response queue, two Rx free-buffer queues, and up to 3
3031f7917c00SJeff Kirsher  *	Tx queues.  The Tx queues are assigned roles in the order Ethernet
3032f7917c00SJeff Kirsher  *	queue, offload queue, and control queue.
3033f7917c00SJeff Kirsher  */
t3_sge_alloc_qset(struct adapter * adapter,unsigned int id,int nports,int irq_vec_idx,const struct qset_params * p,int ntxq,struct net_device * dev,struct netdev_queue * netdevq)3034f7917c00SJeff Kirsher int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3035f7917c00SJeff Kirsher 		      int irq_vec_idx, const struct qset_params *p,
3036f7917c00SJeff Kirsher 		      int ntxq, struct net_device *dev,
3037f7917c00SJeff Kirsher 		      struct netdev_queue *netdevq)
3038f7917c00SJeff Kirsher {
3039f7917c00SJeff Kirsher 	int i, avail, ret = -ENOMEM;
3040f7917c00SJeff Kirsher 	struct sge_qset *q = &adapter->sge.qs[id];
3041f7917c00SJeff Kirsher 
3042f7917c00SJeff Kirsher 	init_qset_cntxt(q, id);
30430e23daebSKees Cook 	timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0);
30440e23daebSKees Cook 	timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0);
3045f7917c00SJeff Kirsher 
3046f7917c00SJeff Kirsher 	q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
3047f7917c00SJeff Kirsher 				   sizeof(struct rx_desc),
3048f7917c00SJeff Kirsher 				   sizeof(struct rx_sw_desc),
3049f7917c00SJeff Kirsher 				   &q->fl[0].phys_addr, &q->fl[0].sdesc);
3050f7917c00SJeff Kirsher 	if (!q->fl[0].desc)
3051f7917c00SJeff Kirsher 		goto err;
3052f7917c00SJeff Kirsher 
3053f7917c00SJeff Kirsher 	q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
3054f7917c00SJeff Kirsher 				   sizeof(struct rx_desc),
3055f7917c00SJeff Kirsher 				   sizeof(struct rx_sw_desc),
3056f7917c00SJeff Kirsher 				   &q->fl[1].phys_addr, &q->fl[1].sdesc);
3057f7917c00SJeff Kirsher 	if (!q->fl[1].desc)
3058f7917c00SJeff Kirsher 		goto err;
3059f7917c00SJeff Kirsher 
3060f7917c00SJeff Kirsher 	q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
3061f7917c00SJeff Kirsher 				  sizeof(struct rsp_desc), 0,
3062f7917c00SJeff Kirsher 				  &q->rspq.phys_addr, NULL);
3063f7917c00SJeff Kirsher 	if (!q->rspq.desc)
3064f7917c00SJeff Kirsher 		goto err;
3065f7917c00SJeff Kirsher 
3066f7917c00SJeff Kirsher 	for (i = 0; i < ntxq; ++i) {
3067f7917c00SJeff Kirsher 		/*
3068f7917c00SJeff Kirsher 		 * The control queue always uses immediate data so does not
3069f7917c00SJeff Kirsher 		 * need to keep track of any sk_buffs.
3070f7917c00SJeff Kirsher 		 */
3071f7917c00SJeff Kirsher 		size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3072f7917c00SJeff Kirsher 
3073f7917c00SJeff Kirsher 		q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3074f7917c00SJeff Kirsher 					    sizeof(struct tx_desc), sz,
3075f7917c00SJeff Kirsher 					    &q->txq[i].phys_addr,
3076f7917c00SJeff Kirsher 					    &q->txq[i].sdesc);
3077f7917c00SJeff Kirsher 		if (!q->txq[i].desc)
3078f7917c00SJeff Kirsher 			goto err;
3079f7917c00SJeff Kirsher 
3080f7917c00SJeff Kirsher 		q->txq[i].gen = 1;
3081f7917c00SJeff Kirsher 		q->txq[i].size = p->txq_size[i];
3082f7917c00SJeff Kirsher 		spin_lock_init(&q->txq[i].lock);
3083f7917c00SJeff Kirsher 		skb_queue_head_init(&q->txq[i].sendq);
3084f7917c00SJeff Kirsher 	}
3085f7917c00SJeff Kirsher 
30865e0b8928SÍñigo Huguet 	INIT_WORK(&q->txq[TXQ_OFLD].qresume_task, restart_offloadq);
30875e0b8928SÍñigo Huguet 	INIT_WORK(&q->txq[TXQ_CTRL].qresume_task, restart_ctrlq);
3088f7917c00SJeff Kirsher 
3089f7917c00SJeff Kirsher 	q->fl[0].gen = q->fl[1].gen = 1;
3090f7917c00SJeff Kirsher 	q->fl[0].size = p->fl_size;
3091f7917c00SJeff Kirsher 	q->fl[1].size = p->jumbo_size;
3092f7917c00SJeff Kirsher 
3093f7917c00SJeff Kirsher 	q->rspq.gen = 1;
3094f7917c00SJeff Kirsher 	q->rspq.size = p->rspq_size;
3095f7917c00SJeff Kirsher 	spin_lock_init(&q->rspq.lock);
3096f7917c00SJeff Kirsher 	skb_queue_head_init(&q->rspq.rx_queue);
3097f7917c00SJeff Kirsher 
3098f7917c00SJeff Kirsher 	q->txq[TXQ_ETH].stop_thres = nports *
3099f7917c00SJeff Kirsher 	    flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3100f7917c00SJeff Kirsher 
3101f7917c00SJeff Kirsher #if FL0_PG_CHUNK_SIZE > 0
3102f7917c00SJeff Kirsher 	q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3103f7917c00SJeff Kirsher #else
3104f7917c00SJeff Kirsher 	q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3105f7917c00SJeff Kirsher #endif
3106f7917c00SJeff Kirsher #if FL1_PG_CHUNK_SIZE > 0
3107f7917c00SJeff Kirsher 	q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3108f7917c00SJeff Kirsher #else
3109f7917c00SJeff Kirsher 	q->fl[1].buf_size = is_offload(adapter) ?
3110f7917c00SJeff Kirsher 		(16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3111f7917c00SJeff Kirsher 		MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3112f7917c00SJeff Kirsher #endif
3113f7917c00SJeff Kirsher 
3114f7917c00SJeff Kirsher 	q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3115f7917c00SJeff Kirsher 	q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3116f7917c00SJeff Kirsher 	q->fl[0].order = FL0_PG_ORDER;
3117f7917c00SJeff Kirsher 	q->fl[1].order = FL1_PG_ORDER;
3118f7917c00SJeff Kirsher 	q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3119f7917c00SJeff Kirsher 	q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3120f7917c00SJeff Kirsher 
3121f7917c00SJeff Kirsher 	spin_lock_irq(&adapter->sge.reg_lock);
3122f7917c00SJeff Kirsher 
3123f7917c00SJeff Kirsher 	/* FL threshold comparison uses < */
3124f7917c00SJeff Kirsher 	ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3125f7917c00SJeff Kirsher 				   q->rspq.phys_addr, q->rspq.size,
3126f7917c00SJeff Kirsher 				   q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3127f7917c00SJeff Kirsher 	if (ret)
3128f7917c00SJeff Kirsher 		goto err_unlock;
3129f7917c00SJeff Kirsher 
3130f7917c00SJeff Kirsher 	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3131f7917c00SJeff Kirsher 		ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3132f7917c00SJeff Kirsher 					  q->fl[i].phys_addr, q->fl[i].size,
3133f7917c00SJeff Kirsher 					  q->fl[i].buf_size - SGE_PG_RSVD,
3134f7917c00SJeff Kirsher 					  p->cong_thres, 1, 0);
3135f7917c00SJeff Kirsher 		if (ret)
3136f7917c00SJeff Kirsher 			goto err_unlock;
3137f7917c00SJeff Kirsher 	}
3138f7917c00SJeff Kirsher 
3139f7917c00SJeff Kirsher 	ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3140f7917c00SJeff Kirsher 				 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3141f7917c00SJeff Kirsher 				 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3142f7917c00SJeff Kirsher 				 1, 0);
3143f7917c00SJeff Kirsher 	if (ret)
3144f7917c00SJeff Kirsher 		goto err_unlock;
3145f7917c00SJeff Kirsher 
3146f7917c00SJeff Kirsher 	if (ntxq > 1) {
3147f7917c00SJeff Kirsher 		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3148f7917c00SJeff Kirsher 					 USE_GTS, SGE_CNTXT_OFLD, id,
3149f7917c00SJeff Kirsher 					 q->txq[TXQ_OFLD].phys_addr,
3150f7917c00SJeff Kirsher 					 q->txq[TXQ_OFLD].size, 0, 1, 0);
3151f7917c00SJeff Kirsher 		if (ret)
3152f7917c00SJeff Kirsher 			goto err_unlock;
3153f7917c00SJeff Kirsher 	}
3154f7917c00SJeff Kirsher 
3155f7917c00SJeff Kirsher 	if (ntxq > 2) {
3156f7917c00SJeff Kirsher 		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3157f7917c00SJeff Kirsher 					 SGE_CNTXT_CTRL, id,
3158f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].phys_addr,
3159f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].size,
3160f7917c00SJeff Kirsher 					 q->txq[TXQ_CTRL].token, 1, 0);
3161f7917c00SJeff Kirsher 		if (ret)
3162f7917c00SJeff Kirsher 			goto err_unlock;
3163f7917c00SJeff Kirsher 	}
3164f7917c00SJeff Kirsher 
3165f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->sge.reg_lock);
3166f7917c00SJeff Kirsher 
3167f7917c00SJeff Kirsher 	q->adap = adapter;
3168f7917c00SJeff Kirsher 	q->netdev = dev;
3169f7917c00SJeff Kirsher 	q->tx_q = netdevq;
3170f7917c00SJeff Kirsher 	t3_update_qset_coalesce(q, p);
3171f7917c00SJeff Kirsher 
3172f7917c00SJeff Kirsher 	avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3173f7917c00SJeff Kirsher 			  GFP_KERNEL | __GFP_COMP);
3174f7917c00SJeff Kirsher 	if (!avail) {
3175f7917c00SJeff Kirsher 		CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3176ff992489SZhang Changzhong 		ret = -ENOMEM;
3177f7917c00SJeff Kirsher 		goto err;
3178f7917c00SJeff Kirsher 	}
3179f7917c00SJeff Kirsher 	if (avail < q->fl[0].size)
3180f7917c00SJeff Kirsher 		CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3181f7917c00SJeff Kirsher 			avail);
3182f7917c00SJeff Kirsher 
3183f7917c00SJeff Kirsher 	avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3184f7917c00SJeff Kirsher 			  GFP_KERNEL | __GFP_COMP);
3185f7917c00SJeff Kirsher 	if (avail < q->fl[1].size)
3186f7917c00SJeff Kirsher 		CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3187f7917c00SJeff Kirsher 			avail);
3188f7917c00SJeff Kirsher 	refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3189f7917c00SJeff Kirsher 
3190f7917c00SJeff Kirsher 	t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3191f7917c00SJeff Kirsher 		     V_NEWTIMER(q->rspq.holdoff_tmr));
3192f7917c00SJeff Kirsher 
3193f7917c00SJeff Kirsher 	return 0;
3194f7917c00SJeff Kirsher 
3195f7917c00SJeff Kirsher err_unlock:
3196f7917c00SJeff Kirsher 	spin_unlock_irq(&adapter->sge.reg_lock);
3197f7917c00SJeff Kirsher err:
3198f7917c00SJeff Kirsher 	t3_free_qset(adapter, q);
3199f7917c00SJeff Kirsher 	return ret;
3200f7917c00SJeff Kirsher }
3201f7917c00SJeff Kirsher 
3202f7917c00SJeff Kirsher /**
3203f7917c00SJeff Kirsher  *      t3_start_sge_timers - start SGE timer call backs
3204f7917c00SJeff Kirsher  *      @adap: the adapter
3205f7917c00SJeff Kirsher  *
3206f7917c00SJeff Kirsher  *      Starts each SGE queue set's timer call back
3207f7917c00SJeff Kirsher  */
t3_start_sge_timers(struct adapter * adap)3208f7917c00SJeff Kirsher void t3_start_sge_timers(struct adapter *adap)
3209f7917c00SJeff Kirsher {
3210f7917c00SJeff Kirsher 	int i;
3211f7917c00SJeff Kirsher 
3212f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3213f7917c00SJeff Kirsher 		struct sge_qset *q = &adap->sge.qs[i];
3214f7917c00SJeff Kirsher 
3215f7917c00SJeff Kirsher 		if (q->tx_reclaim_timer.function)
32162acc0abcSColin Ian King 			mod_timer(&q->tx_reclaim_timer,
32172acc0abcSColin Ian King 				  jiffies + TX_RECLAIM_PERIOD);
3218f7917c00SJeff Kirsher 
3219f7917c00SJeff Kirsher 		if (q->rx_reclaim_timer.function)
32202acc0abcSColin Ian King 			mod_timer(&q->rx_reclaim_timer,
32212acc0abcSColin Ian King 				  jiffies + RX_RECLAIM_PERIOD);
3222f7917c00SJeff Kirsher 	}
3223f7917c00SJeff Kirsher }
3224f7917c00SJeff Kirsher 
3225f7917c00SJeff Kirsher /**
3226f7917c00SJeff Kirsher  *	t3_stop_sge_timers - stop SGE timer call backs
3227f7917c00SJeff Kirsher  *	@adap: the adapter
3228f7917c00SJeff Kirsher  *
3229f7917c00SJeff Kirsher  *	Stops each SGE queue set's timer call back
3230f7917c00SJeff Kirsher  */
t3_stop_sge_timers(struct adapter * adap)3231f7917c00SJeff Kirsher void t3_stop_sge_timers(struct adapter *adap)
3232f7917c00SJeff Kirsher {
3233f7917c00SJeff Kirsher 	int i;
3234f7917c00SJeff Kirsher 
3235f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3236f7917c00SJeff Kirsher 		struct sge_qset *q = &adap->sge.qs[i];
3237f7917c00SJeff Kirsher 
3238f7917c00SJeff Kirsher 		if (q->tx_reclaim_timer.function)
3239f7917c00SJeff Kirsher 			del_timer_sync(&q->tx_reclaim_timer);
3240f7917c00SJeff Kirsher 		if (q->rx_reclaim_timer.function)
3241f7917c00SJeff Kirsher 			del_timer_sync(&q->rx_reclaim_timer);
3242f7917c00SJeff Kirsher 	}
3243f7917c00SJeff Kirsher }
3244f7917c00SJeff Kirsher 
3245f7917c00SJeff Kirsher /**
3246f7917c00SJeff Kirsher  *	t3_free_sge_resources - free SGE resources
3247f7917c00SJeff Kirsher  *	@adap: the adapter
3248f7917c00SJeff Kirsher  *
3249f7917c00SJeff Kirsher  *	Frees resources used by the SGE queue sets.
3250f7917c00SJeff Kirsher  */
t3_free_sge_resources(struct adapter * adap)3251f7917c00SJeff Kirsher void t3_free_sge_resources(struct adapter *adap)
3252f7917c00SJeff Kirsher {
3253f7917c00SJeff Kirsher 	int i;
3254f7917c00SJeff Kirsher 
3255f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i)
3256f7917c00SJeff Kirsher 		t3_free_qset(adap, &adap->sge.qs[i]);
3257f7917c00SJeff Kirsher }
3258f7917c00SJeff Kirsher 
3259f7917c00SJeff Kirsher /**
3260f7917c00SJeff Kirsher  *	t3_sge_start - enable SGE
3261f7917c00SJeff Kirsher  *	@adap: the adapter
3262f7917c00SJeff Kirsher  *
3263f7917c00SJeff Kirsher  *	Enables the SGE for DMAs.  This is the last step in starting packet
3264f7917c00SJeff Kirsher  *	transfers.
3265f7917c00SJeff Kirsher  */
t3_sge_start(struct adapter * adap)3266f7917c00SJeff Kirsher void t3_sge_start(struct adapter *adap)
3267f7917c00SJeff Kirsher {
3268f7917c00SJeff Kirsher 	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3269f7917c00SJeff Kirsher }
3270f7917c00SJeff Kirsher 
3271f7917c00SJeff Kirsher /**
3272a17409e7SThomas Gleixner  *	t3_sge_stop_dma - Disable SGE DMA engine operation
3273f7917c00SJeff Kirsher  *	@adap: the adapter
3274f7917c00SJeff Kirsher  *
3275a17409e7SThomas Gleixner  *	Can be invoked from interrupt context e.g.  error handler.
3276a17409e7SThomas Gleixner  *
32775e0b8928SÍñigo Huguet  *	Note that this function cannot disable the restart of works as
3278a17409e7SThomas Gleixner  *	it cannot wait if called from interrupt context, however the
32795e0b8928SÍñigo Huguet  *	works will have no effect since the doorbells are disabled. The
3280a17409e7SThomas Gleixner  *	driver will call tg3_sge_stop() later from process context, at
32815e0b8928SÍñigo Huguet  *	which time the works will be stopped if they are still running.
3282a17409e7SThomas Gleixner  */
t3_sge_stop_dma(struct adapter * adap)3283a17409e7SThomas Gleixner void t3_sge_stop_dma(struct adapter *adap)
3284a17409e7SThomas Gleixner {
3285a17409e7SThomas Gleixner 	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3286a17409e7SThomas Gleixner }
3287a17409e7SThomas Gleixner 
3288a17409e7SThomas Gleixner /**
3289a17409e7SThomas Gleixner  *	t3_sge_stop - disable SGE operation completly
3290a17409e7SThomas Gleixner  *	@adap: the adapter
3291a17409e7SThomas Gleixner  *
3292a17409e7SThomas Gleixner  *	Called from process context. Disables the DMA engine and any
32935e0b8928SÍñigo Huguet  *	pending queue restart works.
3294f7917c00SJeff Kirsher  */
t3_sge_stop(struct adapter * adap)3295f7917c00SJeff Kirsher void t3_sge_stop(struct adapter *adap)
3296f7917c00SJeff Kirsher {
3297f7917c00SJeff Kirsher 	int i;
3298f7917c00SJeff Kirsher 
3299a17409e7SThomas Gleixner 	t3_sge_stop_dma(adap);
3300a17409e7SThomas Gleixner 
3301be27a47aSHeiner Kallweit 	/* workqueues aren't initialized otherwise */
3302be27a47aSHeiner Kallweit 	if (!(adap->flags & FULL_INIT_DONE))
3303be27a47aSHeiner Kallweit 		return;
3304f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3305f7917c00SJeff Kirsher 		struct sge_qset *qs = &adap->sge.qs[i];
3306f7917c00SJeff Kirsher 
33075e0b8928SÍñigo Huguet 		cancel_work_sync(&qs->txq[TXQ_OFLD].qresume_task);
3308d5a73dcfSÍñigo Huguet 		cancel_work_sync(&qs->txq[TXQ_CTRL].qresume_task);
3309f7917c00SJeff Kirsher 	}
3310f7917c00SJeff Kirsher }
3311f7917c00SJeff Kirsher 
3312f7917c00SJeff Kirsher /**
3313f7917c00SJeff Kirsher  *	t3_sge_init - initialize SGE
3314f7917c00SJeff Kirsher  *	@adap: the adapter
3315f7917c00SJeff Kirsher  *	@p: the SGE parameters
3316f7917c00SJeff Kirsher  *
3317f7917c00SJeff Kirsher  *	Performs SGE initialization needed every time after a chip reset.
3318f7917c00SJeff Kirsher  *	We do not initialize any of the queue sets here, instead the driver
3319f7917c00SJeff Kirsher  *	top-level must request those individually.  We also do not enable DMA
3320f7917c00SJeff Kirsher  *	here, that should be done after the queues have been set up.
3321f7917c00SJeff Kirsher  */
t3_sge_init(struct adapter * adap,struct sge_params * p)3322f7917c00SJeff Kirsher void t3_sge_init(struct adapter *adap, struct sge_params *p)
3323f7917c00SJeff Kirsher {
3324f7917c00SJeff Kirsher 	unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3325f7917c00SJeff Kirsher 
3326f7917c00SJeff Kirsher 	ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3327f7917c00SJeff Kirsher 	    F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3328f7917c00SJeff Kirsher 	    V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3329f7917c00SJeff Kirsher 	    V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3330f7917c00SJeff Kirsher #if SGE_NUM_GENBITS == 1
3331f7917c00SJeff Kirsher 	ctrl |= F_EGRGENCTRL;
3332f7917c00SJeff Kirsher #endif
3333f7917c00SJeff Kirsher 	if (adap->params.rev > 0) {
3334f7917c00SJeff Kirsher 		if (!(adap->flags & (USING_MSIX | USING_MSI)))
3335f7917c00SJeff Kirsher 			ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3336f7917c00SJeff Kirsher 	}
3337f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_CONTROL, ctrl);
3338f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3339f7917c00SJeff Kirsher 		     V_LORCQDRBTHRSH(512));
3340f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3341f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3342f7917c00SJeff Kirsher 		     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3343f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3344f7917c00SJeff Kirsher 		     adap->params.rev < T3_REV_C ? 1000 : 500);
3345f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3346f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3347f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3348f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3349f7917c00SJeff Kirsher 	t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3350f7917c00SJeff Kirsher }
3351f7917c00SJeff Kirsher 
3352f7917c00SJeff Kirsher /**
3353f7917c00SJeff Kirsher  *	t3_sge_prep - one-time SGE initialization
3354f7917c00SJeff Kirsher  *	@adap: the associated adapter
3355f7917c00SJeff Kirsher  *	@p: SGE parameters
3356f7917c00SJeff Kirsher  *
3357f7917c00SJeff Kirsher  *	Performs one-time initialization of SGE SW state.  Includes determining
3358f7917c00SJeff Kirsher  *	defaults for the assorted SGE parameters, which admins can change until
3359f7917c00SJeff Kirsher  *	they are used to initialize the SGE.
3360f7917c00SJeff Kirsher  */
t3_sge_prep(struct adapter * adap,struct sge_params * p)3361f7917c00SJeff Kirsher void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3362f7917c00SJeff Kirsher {
3363f7917c00SJeff Kirsher 	int i;
3364f7917c00SJeff Kirsher 
3365f7917c00SJeff Kirsher 	p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3366f7917c00SJeff Kirsher 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3367f7917c00SJeff Kirsher 
3368f7917c00SJeff Kirsher 	for (i = 0; i < SGE_QSETS; ++i) {
3369f7917c00SJeff Kirsher 		struct qset_params *q = p->qset + i;
3370f7917c00SJeff Kirsher 
3371f7917c00SJeff Kirsher 		q->polling = adap->params.rev > 0;
3372f7917c00SJeff Kirsher 		q->coalesce_usecs = 5;
3373f7917c00SJeff Kirsher 		q->rspq_size = 1024;
3374f7917c00SJeff Kirsher 		q->fl_size = 1024;
3375f7917c00SJeff Kirsher 		q->jumbo_size = 512;
3376f7917c00SJeff Kirsher 		q->txq_size[TXQ_ETH] = 1024;
3377f7917c00SJeff Kirsher 		q->txq_size[TXQ_OFLD] = 1024;
3378f7917c00SJeff Kirsher 		q->txq_size[TXQ_CTRL] = 256;
3379f7917c00SJeff Kirsher 		q->cong_thres = 0;
3380f7917c00SJeff Kirsher 	}
3381f7917c00SJeff Kirsher 
3382f7917c00SJeff Kirsher 	spin_lock_init(&adap->sge.reg_lock);
3383f7917c00SJeff Kirsher }
3384