1f7917c00SJeff Kirsher /*
2f7917c00SJeff Kirsher  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3f7917c00SJeff Kirsher  *
4ce100b8bSAnish Bhatt  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5f7917c00SJeff Kirsher  *
6f7917c00SJeff Kirsher  * This software is available to you under a choice of one of two
7f7917c00SJeff Kirsher  * licenses.  You may choose to be licensed under the terms of the GNU
8f7917c00SJeff Kirsher  * General Public License (GPL) Version 2, available from the file
9f7917c00SJeff Kirsher  * COPYING in the main directory of this source tree, or the
10f7917c00SJeff Kirsher  * OpenIB.org BSD license below:
11f7917c00SJeff Kirsher  *
12f7917c00SJeff Kirsher  *     Redistribution and use in source and binary forms, with or
13f7917c00SJeff Kirsher  *     without modification, are permitted provided that the following
14f7917c00SJeff Kirsher  *     conditions are met:
15f7917c00SJeff Kirsher  *
16f7917c00SJeff Kirsher  *      - Redistributions of source code must retain the above
17f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
18f7917c00SJeff Kirsher  *        disclaimer.
19f7917c00SJeff Kirsher  *
20f7917c00SJeff Kirsher  *      - Redistributions in binary form must reproduce the above
21f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
22f7917c00SJeff Kirsher  *        disclaimer in the documentation and/or other materials
23f7917c00SJeff Kirsher  *        provided with the distribution.
24f7917c00SJeff Kirsher  *
25f7917c00SJeff Kirsher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26f7917c00SJeff Kirsher  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27f7917c00SJeff Kirsher  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28f7917c00SJeff Kirsher  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29f7917c00SJeff Kirsher  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30f7917c00SJeff Kirsher  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31f7917c00SJeff Kirsher  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32f7917c00SJeff Kirsher  * SOFTWARE.
33f7917c00SJeff Kirsher  */
34f7917c00SJeff Kirsher 
35f7917c00SJeff Kirsher #include <linux/skbuff.h>
36f7917c00SJeff Kirsher #include <linux/netdevice.h>
37f7917c00SJeff Kirsher #include <linux/etherdevice.h>
38f7917c00SJeff Kirsher #include <linux/if_vlan.h>
39f7917c00SJeff Kirsher #include <linux/ip.h>
40f7917c00SJeff Kirsher #include <linux/dma-mapping.h>
41f7917c00SJeff Kirsher #include <linux/jiffies.h>
42f7917c00SJeff Kirsher #include <linux/prefetch.h>
43ee40fa06SPaul Gortmaker #include <linux/export.h>
44f7917c00SJeff Kirsher #include <net/ipv6.h>
45f7917c00SJeff Kirsher #include <net/tcp.h>
463a336cb1SHariprasad Shenai #ifdef CONFIG_NET_RX_BUSY_POLL
473a336cb1SHariprasad Shenai #include <net/busy_poll.h>
483a336cb1SHariprasad Shenai #endif /* CONFIG_NET_RX_BUSY_POLL */
4984a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE
5084a200b3SVarun Prakash #include <scsi/fc/fc_fcoe.h>
5184a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */
52f7917c00SJeff Kirsher #include "cxgb4.h"
53f7917c00SJeff Kirsher #include "t4_regs.h"
54f612b815SHariprasad Shenai #include "t4_values.h"
55f7917c00SJeff Kirsher #include "t4_msg.h"
56f7917c00SJeff Kirsher #include "t4fw_api.h"
57f7917c00SJeff Kirsher 
58f7917c00SJeff Kirsher /*
59f7917c00SJeff Kirsher  * Rx buffer size.  We use largish buffers if possible but settle for single
60f7917c00SJeff Kirsher  * pages under memory shortage.
61f7917c00SJeff Kirsher  */
62f7917c00SJeff Kirsher #if PAGE_SHIFT >= 16
63f7917c00SJeff Kirsher # define FL_PG_ORDER 0
64f7917c00SJeff Kirsher #else
65f7917c00SJeff Kirsher # define FL_PG_ORDER (16 - PAGE_SHIFT)
66f7917c00SJeff Kirsher #endif
67f7917c00SJeff Kirsher 
68f7917c00SJeff Kirsher /* RX_PULL_LEN should be <= RX_COPY_THRES */
69f7917c00SJeff Kirsher #define RX_COPY_THRES    256
70f7917c00SJeff Kirsher #define RX_PULL_LEN      128
71f7917c00SJeff Kirsher 
72f7917c00SJeff Kirsher /*
73f7917c00SJeff Kirsher  * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
74f7917c00SJeff Kirsher  * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
75f7917c00SJeff Kirsher  */
76f7917c00SJeff Kirsher #define RX_PKT_SKB_LEN   512
77f7917c00SJeff Kirsher 
78f7917c00SJeff Kirsher /*
79f7917c00SJeff Kirsher  * Max number of Tx descriptors we clean up at a time.  Should be modest as
80f7917c00SJeff Kirsher  * freeing skbs isn't cheap and it happens while holding locks.  We just need
81f7917c00SJeff Kirsher  * to free packets faster than they arrive, we eventually catch up and keep
82f7917c00SJeff Kirsher  * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.
83f7917c00SJeff Kirsher  */
84f7917c00SJeff Kirsher #define MAX_TX_RECLAIM 16
85f7917c00SJeff Kirsher 
86f7917c00SJeff Kirsher /*
87f7917c00SJeff Kirsher  * Max number of Rx buffers we replenish at a time.  Again keep this modest,
88f7917c00SJeff Kirsher  * allocating buffers isn't cheap either.
89f7917c00SJeff Kirsher  */
90f7917c00SJeff Kirsher #define MAX_RX_REFILL 16U
91f7917c00SJeff Kirsher 
92f7917c00SJeff Kirsher /*
93f7917c00SJeff Kirsher  * Period of the Rx queue check timer.  This timer is infrequent as it has
94f7917c00SJeff Kirsher  * something to do only when the system experiences severe memory shortage.
95f7917c00SJeff Kirsher  */
96f7917c00SJeff Kirsher #define RX_QCHECK_PERIOD (HZ / 2)
97f7917c00SJeff Kirsher 
98f7917c00SJeff Kirsher /*
99f7917c00SJeff Kirsher  * Period of the Tx queue check timer.
100f7917c00SJeff Kirsher  */
101f7917c00SJeff Kirsher #define TX_QCHECK_PERIOD (HZ / 2)
102f7917c00SJeff Kirsher 
1030f4d201fSKumar Sanghvi /* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
1040f4d201fSKumar Sanghvi  * (in RX_QCHECK_PERIOD multiples).  If we find one of the SGE Ingress DMA
1050f4d201fSKumar Sanghvi  * State Machines in the same state for this amount of time (in HZ) then we'll
1060f4d201fSKumar Sanghvi  * issue a warning about a potential hang.  We'll repeat the warning as the
1070f4d201fSKumar Sanghvi  * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
1080f4d201fSKumar Sanghvi  * the situation clears.  If the situation clears, we'll note that as well.
1090f4d201fSKumar Sanghvi  */
1100f4d201fSKumar Sanghvi #define SGE_IDMA_WARN_THRESH (1 * HZ)
1110f4d201fSKumar Sanghvi #define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
1120f4d201fSKumar Sanghvi 
113f7917c00SJeff Kirsher /*
114f7917c00SJeff Kirsher  * Max number of Tx descriptors to be reclaimed by the Tx timer.
115f7917c00SJeff Kirsher  */
116f7917c00SJeff Kirsher #define MAX_TIMER_TX_RECLAIM 100
117f7917c00SJeff Kirsher 
118f7917c00SJeff Kirsher /*
119f7917c00SJeff Kirsher  * Timer index used when backing off due to memory shortage.
120f7917c00SJeff Kirsher  */
121f7917c00SJeff Kirsher #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
122f7917c00SJeff Kirsher 
123f7917c00SJeff Kirsher /*
124f7917c00SJeff Kirsher  * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
125f7917c00SJeff Kirsher  * attempt to refill it.
126f7917c00SJeff Kirsher  */
127f7917c00SJeff Kirsher #define FL_STARVE_THRES 4
128f7917c00SJeff Kirsher 
129f7917c00SJeff Kirsher /*
130f7917c00SJeff Kirsher  * Suspend an Ethernet Tx queue with fewer available descriptors than this.
131f7917c00SJeff Kirsher  * This is the same as calc_tx_descs() for a TSO packet with
132f7917c00SJeff Kirsher  * nr_frags == MAX_SKB_FRAGS.
133f7917c00SJeff Kirsher  */
134f7917c00SJeff Kirsher #define ETHTXQ_STOP_THRES \
135f7917c00SJeff Kirsher 	(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
136f7917c00SJeff Kirsher 
137f7917c00SJeff Kirsher /*
138f7917c00SJeff Kirsher  * Suspension threshold for non-Ethernet Tx queues.  We require enough room
139f7917c00SJeff Kirsher  * for a full sized WR.
140f7917c00SJeff Kirsher  */
141f7917c00SJeff Kirsher #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
142f7917c00SJeff Kirsher 
143f7917c00SJeff Kirsher /*
144f7917c00SJeff Kirsher  * Max Tx descriptor space we allow for an Ethernet packet to be inlined
145f7917c00SJeff Kirsher  * into a WR.
146f7917c00SJeff Kirsher  */
14721dcfad6SHariprasad Shenai #define MAX_IMM_TX_PKT_LEN 256
148f7917c00SJeff Kirsher 
149f7917c00SJeff Kirsher /*
150f7917c00SJeff Kirsher  * Max size of a WR sent through a control Tx queue.
151f7917c00SJeff Kirsher  */
152f7917c00SJeff Kirsher #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
153f7917c00SJeff Kirsher 
154f7917c00SJeff Kirsher struct tx_sw_desc {                /* SW state per Tx descriptor */
155f7917c00SJeff Kirsher 	struct sk_buff *skb;
156f7917c00SJeff Kirsher 	struct ulptx_sgl *sgl;
157f7917c00SJeff Kirsher };
158f7917c00SJeff Kirsher 
159f7917c00SJeff Kirsher struct rx_sw_desc {                /* SW state per Rx descriptor */
160f7917c00SJeff Kirsher 	struct page *page;
161f7917c00SJeff Kirsher 	dma_addr_t dma_addr;
162f7917c00SJeff Kirsher };
163f7917c00SJeff Kirsher 
164f7917c00SJeff Kirsher /*
16552367a76SVipul Pandya  * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
16652367a76SVipul Pandya  * buffer).  We currently only support two sizes for 1500- and 9000-byte MTUs.
16752367a76SVipul Pandya  * We could easily support more but there doesn't seem to be much need for
16852367a76SVipul Pandya  * that ...
16952367a76SVipul Pandya  */
17052367a76SVipul Pandya #define FL_MTU_SMALL 1500
17152367a76SVipul Pandya #define FL_MTU_LARGE 9000
17252367a76SVipul Pandya 
17352367a76SVipul Pandya static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
17452367a76SVipul Pandya 					  unsigned int mtu)
17552367a76SVipul Pandya {
17652367a76SVipul Pandya 	struct sge *s = &adapter->sge;
17752367a76SVipul Pandya 
17852367a76SVipul Pandya 	return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
17952367a76SVipul Pandya }
18052367a76SVipul Pandya 
18152367a76SVipul Pandya #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
18252367a76SVipul Pandya #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
18352367a76SVipul Pandya 
18452367a76SVipul Pandya /*
18552367a76SVipul Pandya  * Bits 0..3 of rx_sw_desc.dma_addr have special meaning.  The hardware uses
18652367a76SVipul Pandya  * these to specify the buffer size as an index into the SGE Free List Buffer
18752367a76SVipul Pandya  * Size register array.  We also use bit 4, when the buffer has been unmapped
18852367a76SVipul Pandya  * for DMA, but this is of course never sent to the hardware and is only used
18952367a76SVipul Pandya  * to prevent double unmappings.  All of the above requires that the Free List
19052367a76SVipul Pandya  * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
19152367a76SVipul Pandya  * 32-byte or or a power of 2 greater in alignment.  Since the SGE's minimal
19252367a76SVipul Pandya  * Free List Buffer alignment is 32 bytes, this works out for us ...
193f7917c00SJeff Kirsher  */
194f7917c00SJeff Kirsher enum {
19552367a76SVipul Pandya 	RX_BUF_FLAGS     = 0x1f,   /* bottom five bits are special */
19652367a76SVipul Pandya 	RX_BUF_SIZE      = 0x0f,   /* bottom three bits are for buf sizes */
19752367a76SVipul Pandya 	RX_UNMAPPED_BUF  = 0x10,   /* buffer is not mapped */
19852367a76SVipul Pandya 
19952367a76SVipul Pandya 	/*
20052367a76SVipul Pandya 	 * XXX We shouldn't depend on being able to use these indices.
20152367a76SVipul Pandya 	 * XXX Especially when some other Master PF has initialized the
20252367a76SVipul Pandya 	 * XXX adapter or we use the Firmware Configuration File.  We
20352367a76SVipul Pandya 	 * XXX should really search through the Host Buffer Size register
20452367a76SVipul Pandya 	 * XXX array for the appropriately sized buffer indices.
20552367a76SVipul Pandya 	 */
20652367a76SVipul Pandya 	RX_SMALL_PG_BUF  = 0x0,   /* small (PAGE_SIZE) page buffer */
20752367a76SVipul Pandya 	RX_LARGE_PG_BUF  = 0x1,   /* buffer large (FL_PG_ORDER) page buffer */
20852367a76SVipul Pandya 
20952367a76SVipul Pandya 	RX_SMALL_MTU_BUF = 0x2,   /* small MTU buffer */
21052367a76SVipul Pandya 	RX_LARGE_MTU_BUF = 0x3,   /* large MTU buffer */
211f7917c00SJeff Kirsher };
212f7917c00SJeff Kirsher 
213e553ec3fSHariprasad Shenai static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
214e553ec3fSHariprasad Shenai #define MIN_NAPI_WORK  1
215e553ec3fSHariprasad Shenai 
216f7917c00SJeff Kirsher static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
217f7917c00SJeff Kirsher {
21852367a76SVipul Pandya 	return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
219f7917c00SJeff Kirsher }
220f7917c00SJeff Kirsher 
221f7917c00SJeff Kirsher static inline bool is_buf_mapped(const struct rx_sw_desc *d)
222f7917c00SJeff Kirsher {
223f7917c00SJeff Kirsher 	return !(d->dma_addr & RX_UNMAPPED_BUF);
224f7917c00SJeff Kirsher }
225f7917c00SJeff Kirsher 
226f7917c00SJeff Kirsher /**
227f7917c00SJeff Kirsher  *	txq_avail - return the number of available slots in a Tx queue
228f7917c00SJeff Kirsher  *	@q: the Tx queue
229f7917c00SJeff Kirsher  *
230f7917c00SJeff Kirsher  *	Returns the number of descriptors in a Tx queue available to write new
231f7917c00SJeff Kirsher  *	packets.
232f7917c00SJeff Kirsher  */
233f7917c00SJeff Kirsher static inline unsigned int txq_avail(const struct sge_txq *q)
234f7917c00SJeff Kirsher {
235f7917c00SJeff Kirsher 	return q->size - 1 - q->in_use;
236f7917c00SJeff Kirsher }
237f7917c00SJeff Kirsher 
238f7917c00SJeff Kirsher /**
239f7917c00SJeff Kirsher  *	fl_cap - return the capacity of a free-buffer list
240f7917c00SJeff Kirsher  *	@fl: the FL
241f7917c00SJeff Kirsher  *
242f7917c00SJeff Kirsher  *	Returns the capacity of a free-buffer list.  The capacity is less than
243f7917c00SJeff Kirsher  *	the size because one descriptor needs to be left unpopulated, otherwise
244f7917c00SJeff Kirsher  *	HW will think the FL is empty.
245f7917c00SJeff Kirsher  */
246f7917c00SJeff Kirsher static inline unsigned int fl_cap(const struct sge_fl *fl)
247f7917c00SJeff Kirsher {
248f7917c00SJeff Kirsher 	return fl->size - 8;   /* 1 descriptor = 8 buffers */
249f7917c00SJeff Kirsher }
250f7917c00SJeff Kirsher 
251f7917c00SJeff Kirsher static inline bool fl_starving(const struct sge_fl *fl)
252f7917c00SJeff Kirsher {
253f7917c00SJeff Kirsher 	return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
254f7917c00SJeff Kirsher }
255f7917c00SJeff Kirsher 
256f7917c00SJeff Kirsher static int map_skb(struct device *dev, const struct sk_buff *skb,
257f7917c00SJeff Kirsher 		   dma_addr_t *addr)
258f7917c00SJeff Kirsher {
259f7917c00SJeff Kirsher 	const skb_frag_t *fp, *end;
260f7917c00SJeff Kirsher 	const struct skb_shared_info *si;
261f7917c00SJeff Kirsher 
262f7917c00SJeff Kirsher 	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
263f7917c00SJeff Kirsher 	if (dma_mapping_error(dev, *addr))
264f7917c00SJeff Kirsher 		goto out_err;
265f7917c00SJeff Kirsher 
266f7917c00SJeff Kirsher 	si = skb_shinfo(skb);
267f7917c00SJeff Kirsher 	end = &si->frags[si->nr_frags];
268f7917c00SJeff Kirsher 
269f7917c00SJeff Kirsher 	for (fp = si->frags; fp < end; fp++) {
270e91b0f24SIan Campbell 		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
271e91b0f24SIan Campbell 					   DMA_TO_DEVICE);
272f7917c00SJeff Kirsher 		if (dma_mapping_error(dev, *addr))
273f7917c00SJeff Kirsher 			goto unwind;
274f7917c00SJeff Kirsher 	}
275f7917c00SJeff Kirsher 	return 0;
276f7917c00SJeff Kirsher 
277f7917c00SJeff Kirsher unwind:
278f7917c00SJeff Kirsher 	while (fp-- > si->frags)
2799e903e08SEric Dumazet 		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
280f7917c00SJeff Kirsher 
281f7917c00SJeff Kirsher 	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
282f7917c00SJeff Kirsher out_err:
283f7917c00SJeff Kirsher 	return -ENOMEM;
284f7917c00SJeff Kirsher }
285f7917c00SJeff Kirsher 
286f7917c00SJeff Kirsher #ifdef CONFIG_NEED_DMA_MAP_STATE
287f7917c00SJeff Kirsher static void unmap_skb(struct device *dev, const struct sk_buff *skb,
288f7917c00SJeff Kirsher 		      const dma_addr_t *addr)
289f7917c00SJeff Kirsher {
290f7917c00SJeff Kirsher 	const skb_frag_t *fp, *end;
291f7917c00SJeff Kirsher 	const struct skb_shared_info *si;
292f7917c00SJeff Kirsher 
293f7917c00SJeff Kirsher 	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
294f7917c00SJeff Kirsher 
295f7917c00SJeff Kirsher 	si = skb_shinfo(skb);
296f7917c00SJeff Kirsher 	end = &si->frags[si->nr_frags];
297f7917c00SJeff Kirsher 	for (fp = si->frags; fp < end; fp++)
2989e903e08SEric Dumazet 		dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
299f7917c00SJeff Kirsher }
300f7917c00SJeff Kirsher 
301f7917c00SJeff Kirsher /**
302f7917c00SJeff Kirsher  *	deferred_unmap_destructor - unmap a packet when it is freed
303f7917c00SJeff Kirsher  *	@skb: the packet
304f7917c00SJeff Kirsher  *
305f7917c00SJeff Kirsher  *	This is the packet destructor used for Tx packets that need to remain
306f7917c00SJeff Kirsher  *	mapped until they are freed rather than until their Tx descriptors are
307f7917c00SJeff Kirsher  *	freed.
308f7917c00SJeff Kirsher  */
309f7917c00SJeff Kirsher static void deferred_unmap_destructor(struct sk_buff *skb)
310f7917c00SJeff Kirsher {
311f7917c00SJeff Kirsher 	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
312f7917c00SJeff Kirsher }
313f7917c00SJeff Kirsher #endif
314f7917c00SJeff Kirsher 
315f7917c00SJeff Kirsher static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
316f7917c00SJeff Kirsher 		      const struct ulptx_sgl *sgl, const struct sge_txq *q)
317f7917c00SJeff Kirsher {
318f7917c00SJeff Kirsher 	const struct ulptx_sge_pair *p;
319f7917c00SJeff Kirsher 	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
320f7917c00SJeff Kirsher 
321f7917c00SJeff Kirsher 	if (likely(skb_headlen(skb)))
322f7917c00SJeff Kirsher 		dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
323f7917c00SJeff Kirsher 				 DMA_TO_DEVICE);
324f7917c00SJeff Kirsher 	else {
325f7917c00SJeff Kirsher 		dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
326f7917c00SJeff Kirsher 			       DMA_TO_DEVICE);
327f7917c00SJeff Kirsher 		nfrags--;
328f7917c00SJeff Kirsher 	}
329f7917c00SJeff Kirsher 
330f7917c00SJeff Kirsher 	/*
331f7917c00SJeff Kirsher 	 * the complexity below is because of the possibility of a wrap-around
332f7917c00SJeff Kirsher 	 * in the middle of an SGL
333f7917c00SJeff Kirsher 	 */
334f7917c00SJeff Kirsher 	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
335f7917c00SJeff Kirsher 		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
336f7917c00SJeff Kirsher unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
337f7917c00SJeff Kirsher 				       ntohl(p->len[0]), DMA_TO_DEVICE);
338f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
339f7917c00SJeff Kirsher 				       ntohl(p->len[1]), DMA_TO_DEVICE);
340f7917c00SJeff Kirsher 			p++;
341f7917c00SJeff Kirsher 		} else if ((u8 *)p == (u8 *)q->stat) {
342f7917c00SJeff Kirsher 			p = (const struct ulptx_sge_pair *)q->desc;
343f7917c00SJeff Kirsher 			goto unmap;
344f7917c00SJeff Kirsher 		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
345f7917c00SJeff Kirsher 			const __be64 *addr = (const __be64 *)q->desc;
346f7917c00SJeff Kirsher 
347f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
348f7917c00SJeff Kirsher 				       ntohl(p->len[0]), DMA_TO_DEVICE);
349f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(addr[1]),
350f7917c00SJeff Kirsher 				       ntohl(p->len[1]), DMA_TO_DEVICE);
351f7917c00SJeff Kirsher 			p = (const struct ulptx_sge_pair *)&addr[2];
352f7917c00SJeff Kirsher 		} else {
353f7917c00SJeff Kirsher 			const __be64 *addr = (const __be64 *)q->desc;
354f7917c00SJeff Kirsher 
355f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
356f7917c00SJeff Kirsher 				       ntohl(p->len[0]), DMA_TO_DEVICE);
357f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
358f7917c00SJeff Kirsher 				       ntohl(p->len[1]), DMA_TO_DEVICE);
359f7917c00SJeff Kirsher 			p = (const struct ulptx_sge_pair *)&addr[1];
360f7917c00SJeff Kirsher 		}
361f7917c00SJeff Kirsher 	}
362f7917c00SJeff Kirsher 	if (nfrags) {
363f7917c00SJeff Kirsher 		__be64 addr;
364f7917c00SJeff Kirsher 
365f7917c00SJeff Kirsher 		if ((u8 *)p == (u8 *)q->stat)
366f7917c00SJeff Kirsher 			p = (const struct ulptx_sge_pair *)q->desc;
367f7917c00SJeff Kirsher 		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
368f7917c00SJeff Kirsher 						       *(const __be64 *)q->desc;
369f7917c00SJeff Kirsher 		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
370f7917c00SJeff Kirsher 			       DMA_TO_DEVICE);
371f7917c00SJeff Kirsher 	}
372f7917c00SJeff Kirsher }
373f7917c00SJeff Kirsher 
374f7917c00SJeff Kirsher /**
375f7917c00SJeff Kirsher  *	free_tx_desc - reclaims Tx descriptors and their buffers
376f7917c00SJeff Kirsher  *	@adapter: the adapter
377f7917c00SJeff Kirsher  *	@q: the Tx queue to reclaim descriptors from
378f7917c00SJeff Kirsher  *	@n: the number of descriptors to reclaim
379f7917c00SJeff Kirsher  *	@unmap: whether the buffers should be unmapped for DMA
380f7917c00SJeff Kirsher  *
381f7917c00SJeff Kirsher  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
382f7917c00SJeff Kirsher  *	Tx buffers.  Called with the Tx queue lock held.
383f7917c00SJeff Kirsher  */
384f7917c00SJeff Kirsher static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
385f7917c00SJeff Kirsher 			 unsigned int n, bool unmap)
386f7917c00SJeff Kirsher {
387f7917c00SJeff Kirsher 	struct tx_sw_desc *d;
388f7917c00SJeff Kirsher 	unsigned int cidx = q->cidx;
389f7917c00SJeff Kirsher 	struct device *dev = adap->pdev_dev;
390f7917c00SJeff Kirsher 
391f7917c00SJeff Kirsher 	d = &q->sdesc[cidx];
392f7917c00SJeff Kirsher 	while (n--) {
393f7917c00SJeff Kirsher 		if (d->skb) {                       /* an SGL is present */
394f7917c00SJeff Kirsher 			if (unmap)
395f7917c00SJeff Kirsher 				unmap_sgl(dev, d->skb, d->sgl, q);
396a7525198SEric W. Biederman 			dev_consume_skb_any(d->skb);
397f7917c00SJeff Kirsher 			d->skb = NULL;
398f7917c00SJeff Kirsher 		}
399f7917c00SJeff Kirsher 		++d;
400f7917c00SJeff Kirsher 		if (++cidx == q->size) {
401f7917c00SJeff Kirsher 			cidx = 0;
402f7917c00SJeff Kirsher 			d = q->sdesc;
403f7917c00SJeff Kirsher 		}
404f7917c00SJeff Kirsher 	}
405f7917c00SJeff Kirsher 	q->cidx = cidx;
406f7917c00SJeff Kirsher }
407f7917c00SJeff Kirsher 
408f7917c00SJeff Kirsher /*
409f7917c00SJeff Kirsher  * Return the number of reclaimable descriptors in a Tx queue.
410f7917c00SJeff Kirsher  */
411f7917c00SJeff Kirsher static inline int reclaimable(const struct sge_txq *q)
412f7917c00SJeff Kirsher {
413f7917c00SJeff Kirsher 	int hw_cidx = ntohs(q->stat->cidx);
414f7917c00SJeff Kirsher 	hw_cidx -= q->cidx;
415f7917c00SJeff Kirsher 	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
416f7917c00SJeff Kirsher }
417f7917c00SJeff Kirsher 
418f7917c00SJeff Kirsher /**
419f7917c00SJeff Kirsher  *	reclaim_completed_tx - reclaims completed Tx descriptors
420f7917c00SJeff Kirsher  *	@adap: the adapter
421f7917c00SJeff Kirsher  *	@q: the Tx queue to reclaim completed descriptors from
422f7917c00SJeff Kirsher  *	@unmap: whether the buffers should be unmapped for DMA
423f7917c00SJeff Kirsher  *
424f7917c00SJeff Kirsher  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
425f7917c00SJeff Kirsher  *	and frees the associated buffers if possible.  Called with the Tx
426f7917c00SJeff Kirsher  *	queue locked.
427f7917c00SJeff Kirsher  */
428f7917c00SJeff Kirsher static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
429f7917c00SJeff Kirsher 					bool unmap)
430f7917c00SJeff Kirsher {
431f7917c00SJeff Kirsher 	int avail = reclaimable(q);
432f7917c00SJeff Kirsher 
433f7917c00SJeff Kirsher 	if (avail) {
434f7917c00SJeff Kirsher 		/*
435f7917c00SJeff Kirsher 		 * Limit the amount of clean up work we do at a time to keep
436f7917c00SJeff Kirsher 		 * the Tx lock hold time O(1).
437f7917c00SJeff Kirsher 		 */
438f7917c00SJeff Kirsher 		if (avail > MAX_TX_RECLAIM)
439f7917c00SJeff Kirsher 			avail = MAX_TX_RECLAIM;
440f7917c00SJeff Kirsher 
441f7917c00SJeff Kirsher 		free_tx_desc(adap, q, avail, unmap);
442f7917c00SJeff Kirsher 		q->in_use -= avail;
443f7917c00SJeff Kirsher 	}
444f7917c00SJeff Kirsher }
445f7917c00SJeff Kirsher 
44652367a76SVipul Pandya static inline int get_buf_size(struct adapter *adapter,
44752367a76SVipul Pandya 			       const struct rx_sw_desc *d)
448f7917c00SJeff Kirsher {
44952367a76SVipul Pandya 	struct sge *s = &adapter->sge;
45052367a76SVipul Pandya 	unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
45152367a76SVipul Pandya 	int buf_size;
45252367a76SVipul Pandya 
45352367a76SVipul Pandya 	switch (rx_buf_size_idx) {
45452367a76SVipul Pandya 	case RX_SMALL_PG_BUF:
45552367a76SVipul Pandya 		buf_size = PAGE_SIZE;
45652367a76SVipul Pandya 		break;
45752367a76SVipul Pandya 
45852367a76SVipul Pandya 	case RX_LARGE_PG_BUF:
45952367a76SVipul Pandya 		buf_size = PAGE_SIZE << s->fl_pg_order;
46052367a76SVipul Pandya 		break;
46152367a76SVipul Pandya 
46252367a76SVipul Pandya 	case RX_SMALL_MTU_BUF:
46352367a76SVipul Pandya 		buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
46452367a76SVipul Pandya 		break;
46552367a76SVipul Pandya 
46652367a76SVipul Pandya 	case RX_LARGE_MTU_BUF:
46752367a76SVipul Pandya 		buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
46852367a76SVipul Pandya 		break;
46952367a76SVipul Pandya 
47052367a76SVipul Pandya 	default:
47152367a76SVipul Pandya 		BUG_ON(1);
47252367a76SVipul Pandya 	}
47352367a76SVipul Pandya 
47452367a76SVipul Pandya 	return buf_size;
475f7917c00SJeff Kirsher }
476f7917c00SJeff Kirsher 
477f7917c00SJeff Kirsher /**
478f7917c00SJeff Kirsher  *	free_rx_bufs - free the Rx buffers on an SGE free list
479f7917c00SJeff Kirsher  *	@adap: the adapter
480f7917c00SJeff Kirsher  *	@q: the SGE free list to free buffers from
481f7917c00SJeff Kirsher  *	@n: how many buffers to free
482f7917c00SJeff Kirsher  *
483f7917c00SJeff Kirsher  *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
484f7917c00SJeff Kirsher  *	buffers must be made inaccessible to HW before calling this function.
485f7917c00SJeff Kirsher  */
486f7917c00SJeff Kirsher static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
487f7917c00SJeff Kirsher {
488f7917c00SJeff Kirsher 	while (n--) {
489f7917c00SJeff Kirsher 		struct rx_sw_desc *d = &q->sdesc[q->cidx];
490f7917c00SJeff Kirsher 
491f7917c00SJeff Kirsher 		if (is_buf_mapped(d))
492f7917c00SJeff Kirsher 			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
49352367a76SVipul Pandya 				       get_buf_size(adap, d),
49452367a76SVipul Pandya 				       PCI_DMA_FROMDEVICE);
495f7917c00SJeff Kirsher 		put_page(d->page);
496f7917c00SJeff Kirsher 		d->page = NULL;
497f7917c00SJeff Kirsher 		if (++q->cidx == q->size)
498f7917c00SJeff Kirsher 			q->cidx = 0;
499f7917c00SJeff Kirsher 		q->avail--;
500f7917c00SJeff Kirsher 	}
501f7917c00SJeff Kirsher }
502f7917c00SJeff Kirsher 
503f7917c00SJeff Kirsher /**
504f7917c00SJeff Kirsher  *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
505f7917c00SJeff Kirsher  *	@adap: the adapter
506f7917c00SJeff Kirsher  *	@q: the SGE free list
507f7917c00SJeff Kirsher  *
508f7917c00SJeff Kirsher  *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
509f7917c00SJeff Kirsher  *	buffer must be made inaccessible to HW before calling this function.
510f7917c00SJeff Kirsher  *
511f7917c00SJeff Kirsher  *	This is similar to @free_rx_bufs above but does not free the buffer.
512f7917c00SJeff Kirsher  *	Do note that the FL still loses any further access to the buffer.
513f7917c00SJeff Kirsher  */
514f7917c00SJeff Kirsher static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
515f7917c00SJeff Kirsher {
516f7917c00SJeff Kirsher 	struct rx_sw_desc *d = &q->sdesc[q->cidx];
517f7917c00SJeff Kirsher 
518f7917c00SJeff Kirsher 	if (is_buf_mapped(d))
519f7917c00SJeff Kirsher 		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
52052367a76SVipul Pandya 			       get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
521f7917c00SJeff Kirsher 	d->page = NULL;
522f7917c00SJeff Kirsher 	if (++q->cidx == q->size)
523f7917c00SJeff Kirsher 		q->cidx = 0;
524f7917c00SJeff Kirsher 	q->avail--;
525f7917c00SJeff Kirsher }
526f7917c00SJeff Kirsher 
527f7917c00SJeff Kirsher static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
528f7917c00SJeff Kirsher {
5290a57a536SSantosh Rastapur 	u32 val;
530f7917c00SJeff Kirsher 	if (q->pend_cred >= 8) {
531f612b815SHariprasad Shenai 		if (is_t4(adap->params.chip))
532f612b815SHariprasad Shenai 			val = PIDX_V(q->pend_cred / 8);
533f612b815SHariprasad Shenai 		else
534f612b815SHariprasad Shenai 			val = PIDX_T5_V(q->pend_cred / 8) |
535f612b815SHariprasad Shenai 				DBTYPE_F;
536f612b815SHariprasad Shenai 		val |= DBPRIO_F;
537f7917c00SJeff Kirsher 		wmb();
538d63a6dcfSHariprasad Shenai 
539df64e4d3SHariprasad Shenai 		/* If we don't have access to the new User Doorbell (T5+), use
540df64e4d3SHariprasad Shenai 		 * the old doorbell mechanism; otherwise use the new BAR2
541df64e4d3SHariprasad Shenai 		 * mechanism.
542d63a6dcfSHariprasad Shenai 		 */
543df64e4d3SHariprasad Shenai 		if (unlikely(q->bar2_addr == NULL)) {
544f612b815SHariprasad Shenai 			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
545f612b815SHariprasad Shenai 				     val | QID_V(q->cntxt_id));
546d63a6dcfSHariprasad Shenai 		} else {
547f612b815SHariprasad Shenai 			writel(val | QID_V(q->bar2_qid),
548df64e4d3SHariprasad Shenai 			       q->bar2_addr + SGE_UDB_KDOORBELL);
549d63a6dcfSHariprasad Shenai 
550d63a6dcfSHariprasad Shenai 			/* This Write memory Barrier will force the write to
551d63a6dcfSHariprasad Shenai 			 * the User Doorbell area to be flushed.
552d63a6dcfSHariprasad Shenai 			 */
553d63a6dcfSHariprasad Shenai 			wmb();
554d63a6dcfSHariprasad Shenai 		}
555f7917c00SJeff Kirsher 		q->pend_cred &= 7;
556f7917c00SJeff Kirsher 	}
557f7917c00SJeff Kirsher }
558f7917c00SJeff Kirsher 
559f7917c00SJeff Kirsher static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
560f7917c00SJeff Kirsher 				  dma_addr_t mapping)
561f7917c00SJeff Kirsher {
562f7917c00SJeff Kirsher 	sd->page = pg;
563f7917c00SJeff Kirsher 	sd->dma_addr = mapping;      /* includes size low bits */
564f7917c00SJeff Kirsher }
565f7917c00SJeff Kirsher 
566f7917c00SJeff Kirsher /**
567f7917c00SJeff Kirsher  *	refill_fl - refill an SGE Rx buffer ring
568f7917c00SJeff Kirsher  *	@adap: the adapter
569f7917c00SJeff Kirsher  *	@q: the ring to refill
570f7917c00SJeff Kirsher  *	@n: the number of new buffers to allocate
571f7917c00SJeff Kirsher  *	@gfp: the gfp flags for the allocations
572f7917c00SJeff Kirsher  *
573f7917c00SJeff Kirsher  *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
574f7917c00SJeff Kirsher  *	allocated with the supplied gfp flags.  The caller must assure that
575f7917c00SJeff Kirsher  *	@n does not exceed the queue's capacity.  If afterwards the queue is
576f7917c00SJeff Kirsher  *	found critically low mark it as starving in the bitmap of starving FLs.
577f7917c00SJeff Kirsher  *
578f7917c00SJeff Kirsher  *	Returns the number of buffers allocated.
579f7917c00SJeff Kirsher  */
580f7917c00SJeff Kirsher static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
581f7917c00SJeff Kirsher 			      gfp_t gfp)
582f7917c00SJeff Kirsher {
58352367a76SVipul Pandya 	struct sge *s = &adap->sge;
584f7917c00SJeff Kirsher 	struct page *pg;
585f7917c00SJeff Kirsher 	dma_addr_t mapping;
586f7917c00SJeff Kirsher 	unsigned int cred = q->avail;
587f7917c00SJeff Kirsher 	__be64 *d = &q->desc[q->pidx];
588f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
589f7917c00SJeff Kirsher 
590aa9cd31cSAlexander Duyck 	gfp |= __GFP_NOWARN;
591f7917c00SJeff Kirsher 
59252367a76SVipul Pandya 	if (s->fl_pg_order == 0)
59352367a76SVipul Pandya 		goto alloc_small_pages;
59452367a76SVipul Pandya 
595f7917c00SJeff Kirsher 	/*
596f7917c00SJeff Kirsher 	 * Prefer large buffers
597f7917c00SJeff Kirsher 	 */
598f7917c00SJeff Kirsher 	while (n) {
599aa9cd31cSAlexander Duyck 		pg = __dev_alloc_pages(gfp, s->fl_pg_order);
600f7917c00SJeff Kirsher 		if (unlikely(!pg)) {
601f7917c00SJeff Kirsher 			q->large_alloc_failed++;
602f7917c00SJeff Kirsher 			break;       /* fall back to single pages */
603f7917c00SJeff Kirsher 		}
604f7917c00SJeff Kirsher 
605f7917c00SJeff Kirsher 		mapping = dma_map_page(adap->pdev_dev, pg, 0,
60652367a76SVipul Pandya 				       PAGE_SIZE << s->fl_pg_order,
607f7917c00SJeff Kirsher 				       PCI_DMA_FROMDEVICE);
608f7917c00SJeff Kirsher 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
60952367a76SVipul Pandya 			__free_pages(pg, s->fl_pg_order);
610f7917c00SJeff Kirsher 			goto out;   /* do not try small pages for this error */
611f7917c00SJeff Kirsher 		}
61252367a76SVipul Pandya 		mapping |= RX_LARGE_PG_BUF;
613f7917c00SJeff Kirsher 		*d++ = cpu_to_be64(mapping);
614f7917c00SJeff Kirsher 
615f7917c00SJeff Kirsher 		set_rx_sw_desc(sd, pg, mapping);
616f7917c00SJeff Kirsher 		sd++;
617f7917c00SJeff Kirsher 
618f7917c00SJeff Kirsher 		q->avail++;
619f7917c00SJeff Kirsher 		if (++q->pidx == q->size) {
620f7917c00SJeff Kirsher 			q->pidx = 0;
621f7917c00SJeff Kirsher 			sd = q->sdesc;
622f7917c00SJeff Kirsher 			d = q->desc;
623f7917c00SJeff Kirsher 		}
624f7917c00SJeff Kirsher 		n--;
625f7917c00SJeff Kirsher 	}
626f7917c00SJeff Kirsher 
62752367a76SVipul Pandya alloc_small_pages:
628f7917c00SJeff Kirsher 	while (n--) {
629aa9cd31cSAlexander Duyck 		pg = __dev_alloc_page(gfp);
630f7917c00SJeff Kirsher 		if (unlikely(!pg)) {
631f7917c00SJeff Kirsher 			q->alloc_failed++;
632f7917c00SJeff Kirsher 			break;
633f7917c00SJeff Kirsher 		}
634f7917c00SJeff Kirsher 
635f7917c00SJeff Kirsher 		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
636f7917c00SJeff Kirsher 				       PCI_DMA_FROMDEVICE);
637f7917c00SJeff Kirsher 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
6381f2149c1SEric Dumazet 			put_page(pg);
639f7917c00SJeff Kirsher 			goto out;
640f7917c00SJeff Kirsher 		}
641f7917c00SJeff Kirsher 		*d++ = cpu_to_be64(mapping);
642f7917c00SJeff Kirsher 
643f7917c00SJeff Kirsher 		set_rx_sw_desc(sd, pg, mapping);
644f7917c00SJeff Kirsher 		sd++;
645f7917c00SJeff Kirsher 
646f7917c00SJeff Kirsher 		q->avail++;
647f7917c00SJeff Kirsher 		if (++q->pidx == q->size) {
648f7917c00SJeff Kirsher 			q->pidx = 0;
649f7917c00SJeff Kirsher 			sd = q->sdesc;
650f7917c00SJeff Kirsher 			d = q->desc;
651f7917c00SJeff Kirsher 		}
652f7917c00SJeff Kirsher 	}
653f7917c00SJeff Kirsher 
654f7917c00SJeff Kirsher out:	cred = q->avail - cred;
655f7917c00SJeff Kirsher 	q->pend_cred += cred;
656f7917c00SJeff Kirsher 	ring_fl_db(adap, q);
657f7917c00SJeff Kirsher 
658f7917c00SJeff Kirsher 	if (unlikely(fl_starving(q))) {
659f7917c00SJeff Kirsher 		smp_wmb();
660f7917c00SJeff Kirsher 		set_bit(q->cntxt_id - adap->sge.egr_start,
661f7917c00SJeff Kirsher 			adap->sge.starving_fl);
662f7917c00SJeff Kirsher 	}
663f7917c00SJeff Kirsher 
664f7917c00SJeff Kirsher 	return cred;
665f7917c00SJeff Kirsher }
666f7917c00SJeff Kirsher 
667f7917c00SJeff Kirsher static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
668f7917c00SJeff Kirsher {
669f7917c00SJeff Kirsher 	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
670f7917c00SJeff Kirsher 		  GFP_ATOMIC);
671f7917c00SJeff Kirsher }
672f7917c00SJeff Kirsher 
673f7917c00SJeff Kirsher /**
674f7917c00SJeff Kirsher  *	alloc_ring - allocate resources for an SGE descriptor ring
675f7917c00SJeff Kirsher  *	@dev: the PCI device's core device
676f7917c00SJeff Kirsher  *	@nelem: the number of descriptors
677f7917c00SJeff Kirsher  *	@elem_size: the size of each descriptor
678f7917c00SJeff Kirsher  *	@sw_size: the size of the SW state associated with each ring element
679f7917c00SJeff Kirsher  *	@phys: the physical address of the allocated ring
680f7917c00SJeff Kirsher  *	@metadata: address of the array holding the SW state for the ring
681f7917c00SJeff Kirsher  *	@stat_size: extra space in HW ring for status information
682f7917c00SJeff Kirsher  *	@node: preferred node for memory allocations
683f7917c00SJeff Kirsher  *
684f7917c00SJeff Kirsher  *	Allocates resources for an SGE descriptor ring, such as Tx queues,
685f7917c00SJeff Kirsher  *	free buffer lists, or response queues.  Each SGE ring requires
686f7917c00SJeff Kirsher  *	space for its HW descriptors plus, optionally, space for the SW state
687f7917c00SJeff Kirsher  *	associated with each HW entry (the metadata).  The function returns
688f7917c00SJeff Kirsher  *	three values: the virtual address for the HW ring (the return value
689f7917c00SJeff Kirsher  *	of the function), the bus address of the HW ring, and the address
690f7917c00SJeff Kirsher  *	of the SW ring.
691f7917c00SJeff Kirsher  */
692f7917c00SJeff Kirsher static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
693f7917c00SJeff Kirsher 			size_t sw_size, dma_addr_t *phys, void *metadata,
694f7917c00SJeff Kirsher 			size_t stat_size, int node)
695f7917c00SJeff Kirsher {
696f7917c00SJeff Kirsher 	size_t len = nelem * elem_size + stat_size;
697f7917c00SJeff Kirsher 	void *s = NULL;
698f7917c00SJeff Kirsher 	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
699f7917c00SJeff Kirsher 
700f7917c00SJeff Kirsher 	if (!p)
701f7917c00SJeff Kirsher 		return NULL;
702f7917c00SJeff Kirsher 	if (sw_size) {
703f7917c00SJeff Kirsher 		s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
704f7917c00SJeff Kirsher 
705f7917c00SJeff Kirsher 		if (!s) {
706f7917c00SJeff Kirsher 			dma_free_coherent(dev, len, p, *phys);
707f7917c00SJeff Kirsher 			return NULL;
708f7917c00SJeff Kirsher 		}
709f7917c00SJeff Kirsher 	}
710f7917c00SJeff Kirsher 	if (metadata)
711f7917c00SJeff Kirsher 		*(void **)metadata = s;
712f7917c00SJeff Kirsher 	memset(p, 0, len);
713f7917c00SJeff Kirsher 	return p;
714f7917c00SJeff Kirsher }
715f7917c00SJeff Kirsher 
716f7917c00SJeff Kirsher /**
717f7917c00SJeff Kirsher  *	sgl_len - calculates the size of an SGL of the given capacity
718f7917c00SJeff Kirsher  *	@n: the number of SGL entries
719f7917c00SJeff Kirsher  *
720f7917c00SJeff Kirsher  *	Calculates the number of flits needed for a scatter/gather list that
721f7917c00SJeff Kirsher  *	can hold the given number of entries.
722f7917c00SJeff Kirsher  */
723f7917c00SJeff Kirsher static inline unsigned int sgl_len(unsigned int n)
724f7917c00SJeff Kirsher {
725f7917c00SJeff Kirsher 	n--;
726f7917c00SJeff Kirsher 	return (3 * n) / 2 + (n & 1) + 2;
727f7917c00SJeff Kirsher }
728f7917c00SJeff Kirsher 
729f7917c00SJeff Kirsher /**
730f7917c00SJeff Kirsher  *	flits_to_desc - returns the num of Tx descriptors for the given flits
731f7917c00SJeff Kirsher  *	@n: the number of flits
732f7917c00SJeff Kirsher  *
733f7917c00SJeff Kirsher  *	Returns the number of Tx descriptors needed for the supplied number
734f7917c00SJeff Kirsher  *	of flits.
735f7917c00SJeff Kirsher  */
736f7917c00SJeff Kirsher static inline unsigned int flits_to_desc(unsigned int n)
737f7917c00SJeff Kirsher {
738f7917c00SJeff Kirsher 	BUG_ON(n > SGE_MAX_WR_LEN / 8);
739f7917c00SJeff Kirsher 	return DIV_ROUND_UP(n, 8);
740f7917c00SJeff Kirsher }
741f7917c00SJeff Kirsher 
742f7917c00SJeff Kirsher /**
743f7917c00SJeff Kirsher  *	is_eth_imm - can an Ethernet packet be sent as immediate data?
744f7917c00SJeff Kirsher  *	@skb: the packet
745f7917c00SJeff Kirsher  *
746f7917c00SJeff Kirsher  *	Returns whether an Ethernet packet is small enough to fit as
7470034b298SKumar Sanghvi  *	immediate data. Return value corresponds to headroom required.
748f7917c00SJeff Kirsher  */
749f7917c00SJeff Kirsher static inline int is_eth_imm(const struct sk_buff *skb)
750f7917c00SJeff Kirsher {
7510034b298SKumar Sanghvi 	int hdrlen = skb_shinfo(skb)->gso_size ?
7520034b298SKumar Sanghvi 			sizeof(struct cpl_tx_pkt_lso_core) : 0;
7530034b298SKumar Sanghvi 
7540034b298SKumar Sanghvi 	hdrlen += sizeof(struct cpl_tx_pkt);
7550034b298SKumar Sanghvi 	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
7560034b298SKumar Sanghvi 		return hdrlen;
7570034b298SKumar Sanghvi 	return 0;
758f7917c00SJeff Kirsher }
759f7917c00SJeff Kirsher 
760f7917c00SJeff Kirsher /**
761f7917c00SJeff Kirsher  *	calc_tx_flits - calculate the number of flits for a packet Tx WR
762f7917c00SJeff Kirsher  *	@skb: the packet
763f7917c00SJeff Kirsher  *
764f7917c00SJeff Kirsher  *	Returns the number of flits needed for a Tx WR for the given Ethernet
765f7917c00SJeff Kirsher  *	packet, including the needed WR and CPL headers.
766f7917c00SJeff Kirsher  */
767f7917c00SJeff Kirsher static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
768f7917c00SJeff Kirsher {
769f7917c00SJeff Kirsher 	unsigned int flits;
7700034b298SKumar Sanghvi 	int hdrlen = is_eth_imm(skb);
771f7917c00SJeff Kirsher 
7720034b298SKumar Sanghvi 	if (hdrlen)
7730034b298SKumar Sanghvi 		return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
774f7917c00SJeff Kirsher 
775f7917c00SJeff Kirsher 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
776f7917c00SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
777f7917c00SJeff Kirsher 		flits += 2;
778f7917c00SJeff Kirsher 	return flits;
779f7917c00SJeff Kirsher }
780f7917c00SJeff Kirsher 
781f7917c00SJeff Kirsher /**
782f7917c00SJeff Kirsher  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
783f7917c00SJeff Kirsher  *	@skb: the packet
784f7917c00SJeff Kirsher  *
785f7917c00SJeff Kirsher  *	Returns the number of Tx descriptors needed for the given Ethernet
786f7917c00SJeff Kirsher  *	packet, including the needed WR and CPL headers.
787f7917c00SJeff Kirsher  */
788f7917c00SJeff Kirsher static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
789f7917c00SJeff Kirsher {
790f7917c00SJeff Kirsher 	return flits_to_desc(calc_tx_flits(skb));
791f7917c00SJeff Kirsher }
792f7917c00SJeff Kirsher 
793f7917c00SJeff Kirsher /**
794f7917c00SJeff Kirsher  *	write_sgl - populate a scatter/gather list for a packet
795f7917c00SJeff Kirsher  *	@skb: the packet
796f7917c00SJeff Kirsher  *	@q: the Tx queue we are writing into
797f7917c00SJeff Kirsher  *	@sgl: starting location for writing the SGL
798f7917c00SJeff Kirsher  *	@end: points right after the end of the SGL
799f7917c00SJeff Kirsher  *	@start: start offset into skb main-body data to include in the SGL
800f7917c00SJeff Kirsher  *	@addr: the list of bus addresses for the SGL elements
801f7917c00SJeff Kirsher  *
802f7917c00SJeff Kirsher  *	Generates a gather list for the buffers that make up a packet.
803f7917c00SJeff Kirsher  *	The caller must provide adequate space for the SGL that will be written.
804f7917c00SJeff Kirsher  *	The SGL includes all of the packet's page fragments and the data in its
805f7917c00SJeff Kirsher  *	main body except for the first @start bytes.  @sgl must be 16-byte
806f7917c00SJeff Kirsher  *	aligned and within a Tx descriptor with available space.  @end points
807f7917c00SJeff Kirsher  *	right after the end of the SGL but does not account for any potential
808f7917c00SJeff Kirsher  *	wrap around, i.e., @end > @sgl.
809f7917c00SJeff Kirsher  */
810f7917c00SJeff Kirsher static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
811f7917c00SJeff Kirsher 		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
812f7917c00SJeff Kirsher 		      const dma_addr_t *addr)
813f7917c00SJeff Kirsher {
814f7917c00SJeff Kirsher 	unsigned int i, len;
815f7917c00SJeff Kirsher 	struct ulptx_sge_pair *to;
816f7917c00SJeff Kirsher 	const struct skb_shared_info *si = skb_shinfo(skb);
817f7917c00SJeff Kirsher 	unsigned int nfrags = si->nr_frags;
818f7917c00SJeff Kirsher 	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
819f7917c00SJeff Kirsher 
820f7917c00SJeff Kirsher 	len = skb_headlen(skb) - start;
821f7917c00SJeff Kirsher 	if (likely(len)) {
822f7917c00SJeff Kirsher 		sgl->len0 = htonl(len);
823f7917c00SJeff Kirsher 		sgl->addr0 = cpu_to_be64(addr[0] + start);
824f7917c00SJeff Kirsher 		nfrags++;
825f7917c00SJeff Kirsher 	} else {
8269e903e08SEric Dumazet 		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
827f7917c00SJeff Kirsher 		sgl->addr0 = cpu_to_be64(addr[1]);
828f7917c00SJeff Kirsher 	}
829f7917c00SJeff Kirsher 
830bdc590b9SHariprasad Shenai 	sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
831bdc590b9SHariprasad Shenai 			      ULPTX_NSGE_V(nfrags));
832f7917c00SJeff Kirsher 	if (likely(--nfrags == 0))
833f7917c00SJeff Kirsher 		return;
834f7917c00SJeff Kirsher 	/*
835f7917c00SJeff Kirsher 	 * Most of the complexity below deals with the possibility we hit the
836f7917c00SJeff Kirsher 	 * end of the queue in the middle of writing the SGL.  For this case
837f7917c00SJeff Kirsher 	 * only we create the SGL in a temporary buffer and then copy it.
838f7917c00SJeff Kirsher 	 */
839f7917c00SJeff Kirsher 	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
840f7917c00SJeff Kirsher 
841f7917c00SJeff Kirsher 	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
8429e903e08SEric Dumazet 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
8439e903e08SEric Dumazet 		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
844f7917c00SJeff Kirsher 		to->addr[0] = cpu_to_be64(addr[i]);
845f7917c00SJeff Kirsher 		to->addr[1] = cpu_to_be64(addr[++i]);
846f7917c00SJeff Kirsher 	}
847f7917c00SJeff Kirsher 	if (nfrags) {
8489e903e08SEric Dumazet 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
849f7917c00SJeff Kirsher 		to->len[1] = cpu_to_be32(0);
850f7917c00SJeff Kirsher 		to->addr[0] = cpu_to_be64(addr[i + 1]);
851f7917c00SJeff Kirsher 	}
852f7917c00SJeff Kirsher 	if (unlikely((u8 *)end > (u8 *)q->stat)) {
853f7917c00SJeff Kirsher 		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
854f7917c00SJeff Kirsher 
855f7917c00SJeff Kirsher 		if (likely(part0))
856f7917c00SJeff Kirsher 			memcpy(sgl->sge, buf, part0);
857f7917c00SJeff Kirsher 		part1 = (u8 *)end - (u8 *)q->stat;
858f7917c00SJeff Kirsher 		memcpy(q->desc, (u8 *)buf + part0, part1);
859f7917c00SJeff Kirsher 		end = (void *)q->desc + part1;
860f7917c00SJeff Kirsher 	}
861f7917c00SJeff Kirsher 	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
86264699336SJoe Perches 		*end = 0;
863f7917c00SJeff Kirsher }
864f7917c00SJeff Kirsher 
865df64e4d3SHariprasad Shenai /* This function copies 64 byte coalesced work request to
866df64e4d3SHariprasad Shenai  * memory mapped BAR2 space. For coalesced WR SGE fetches
867df64e4d3SHariprasad Shenai  * data from the FIFO instead of from Host.
86822adfe0aSSantosh Rastapur  */
869df64e4d3SHariprasad Shenai static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
87022adfe0aSSantosh Rastapur {
871df64e4d3SHariprasad Shenai 	int count = 8;
87222adfe0aSSantosh Rastapur 
87322adfe0aSSantosh Rastapur 	while (count) {
87422adfe0aSSantosh Rastapur 		writeq(*src, dst);
87522adfe0aSSantosh Rastapur 		src++;
87622adfe0aSSantosh Rastapur 		dst++;
87722adfe0aSSantosh Rastapur 		count--;
87822adfe0aSSantosh Rastapur 	}
87922adfe0aSSantosh Rastapur }
88022adfe0aSSantosh Rastapur 
881f7917c00SJeff Kirsher /**
882f7917c00SJeff Kirsher  *	ring_tx_db - check and potentially ring a Tx queue's doorbell
883f7917c00SJeff Kirsher  *	@adap: the adapter
884f7917c00SJeff Kirsher  *	@q: the Tx queue
885f7917c00SJeff Kirsher  *	@n: number of new descriptors to give to HW
886f7917c00SJeff Kirsher  *
887f7917c00SJeff Kirsher  *	Ring the doorbel for a Tx queue.
888f7917c00SJeff Kirsher  */
889f7917c00SJeff Kirsher static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
890f7917c00SJeff Kirsher {
891d63a6dcfSHariprasad Shenai 	wmb();            /* write descriptors before telling HW */
892d63a6dcfSHariprasad Shenai 
893df64e4d3SHariprasad Shenai 	/* If we don't have access to the new User Doorbell (T5+), use the old
894df64e4d3SHariprasad Shenai 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
895df64e4d3SHariprasad Shenai 	 */
896df64e4d3SHariprasad Shenai 	if (unlikely(q->bar2_addr == NULL)) {
897f612b815SHariprasad Shenai 		u32 val = PIDX_V(n);
89805eb2389SSteve Wise 		unsigned long flags;
89922adfe0aSSantosh Rastapur 
900d63a6dcfSHariprasad Shenai 		/* For T4 we need to participate in the Doorbell Recovery
901d63a6dcfSHariprasad Shenai 		 * mechanism.
902d63a6dcfSHariprasad Shenai 		 */
90305eb2389SSteve Wise 		spin_lock_irqsave(&q->db_lock, flags);
904d63a6dcfSHariprasad Shenai 		if (!q->db_disabled)
905f612b815SHariprasad Shenai 			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
906f612b815SHariprasad Shenai 				     QID_V(q->cntxt_id) | val);
907d63a6dcfSHariprasad Shenai 		else
90805eb2389SSteve Wise 			q->db_pidx_inc += n;
9093069ee9bSVipul Pandya 		q->db_pidx = q->pidx;
91005eb2389SSteve Wise 		spin_unlock_irqrestore(&q->db_lock, flags);
911d63a6dcfSHariprasad Shenai 	} else {
912f612b815SHariprasad Shenai 		u32 val = PIDX_T5_V(n);
913d63a6dcfSHariprasad Shenai 
914d63a6dcfSHariprasad Shenai 		/* T4 and later chips share the same PIDX field offset within
915d63a6dcfSHariprasad Shenai 		 * the doorbell, but T5 and later shrank the field in order to
916d63a6dcfSHariprasad Shenai 		 * gain a bit for Doorbell Priority.  The field was absurdly
917d63a6dcfSHariprasad Shenai 		 * large in the first place (14 bits) so we just use the T5
918d63a6dcfSHariprasad Shenai 		 * and later limits and warn if a Queue ID is too large.
919d63a6dcfSHariprasad Shenai 		 */
920f612b815SHariprasad Shenai 		WARN_ON(val & DBPRIO_F);
921d63a6dcfSHariprasad Shenai 
922df64e4d3SHariprasad Shenai 		/* If we're only writing a single TX Descriptor and we can use
923df64e4d3SHariprasad Shenai 		 * Inferred QID registers, we can use the Write Combining
924df64e4d3SHariprasad Shenai 		 * Gather Buffer; otherwise we use the simple doorbell.
925d63a6dcfSHariprasad Shenai 		 */
926df64e4d3SHariprasad Shenai 		if (n == 1 && q->bar2_qid == 0) {
927d63a6dcfSHariprasad Shenai 			int index = (q->pidx
928d63a6dcfSHariprasad Shenai 				     ? (q->pidx - 1)
929d63a6dcfSHariprasad Shenai 				     : (q->size - 1));
930df64e4d3SHariprasad Shenai 			u64 *wr = (u64 *)&q->desc[index];
931d63a6dcfSHariprasad Shenai 
932df64e4d3SHariprasad Shenai 			cxgb_pio_copy((u64 __iomem *)
933df64e4d3SHariprasad Shenai 				      (q->bar2_addr + SGE_UDB_WCDOORBELL),
934df64e4d3SHariprasad Shenai 				      wr);
935d63a6dcfSHariprasad Shenai 		} else {
936f612b815SHariprasad Shenai 			writel(val | QID_V(q->bar2_qid),
937df64e4d3SHariprasad Shenai 			       q->bar2_addr + SGE_UDB_KDOORBELL);
938d63a6dcfSHariprasad Shenai 		}
939d63a6dcfSHariprasad Shenai 
940d63a6dcfSHariprasad Shenai 		/* This Write Memory Barrier will force the write to the User
941d63a6dcfSHariprasad Shenai 		 * Doorbell area to be flushed.  This is needed to prevent
942d63a6dcfSHariprasad Shenai 		 * writes on different CPUs for the same queue from hitting
943d63a6dcfSHariprasad Shenai 		 * the adapter out of order.  This is required when some Work
944d63a6dcfSHariprasad Shenai 		 * Requests take the Write Combine Gather Buffer path (user
945d63a6dcfSHariprasad Shenai 		 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
946d63a6dcfSHariprasad Shenai 		 * take the traditional path where we simply increment the
947d63a6dcfSHariprasad Shenai 		 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
948d63a6dcfSHariprasad Shenai 		 * hardware DMA read the actual Work Request.
949d63a6dcfSHariprasad Shenai 		 */
950d63a6dcfSHariprasad Shenai 		wmb();
951d63a6dcfSHariprasad Shenai 	}
952f7917c00SJeff Kirsher }
953f7917c00SJeff Kirsher 
954f7917c00SJeff Kirsher /**
955f7917c00SJeff Kirsher  *	inline_tx_skb - inline a packet's data into Tx descriptors
956f7917c00SJeff Kirsher  *	@skb: the packet
957f7917c00SJeff Kirsher  *	@q: the Tx queue where the packet will be inlined
958f7917c00SJeff Kirsher  *	@pos: starting position in the Tx queue where to inline the packet
959f7917c00SJeff Kirsher  *
960f7917c00SJeff Kirsher  *	Inline a packet's contents directly into Tx descriptors, starting at
961f7917c00SJeff Kirsher  *	the given position within the Tx DMA ring.
962f7917c00SJeff Kirsher  *	Most of the complexity of this operation is dealing with wrap arounds
963f7917c00SJeff Kirsher  *	in the middle of the packet we want to inline.
964f7917c00SJeff Kirsher  */
965f7917c00SJeff Kirsher static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
966f7917c00SJeff Kirsher 			  void *pos)
967f7917c00SJeff Kirsher {
968f7917c00SJeff Kirsher 	u64 *p;
969f7917c00SJeff Kirsher 	int left = (void *)q->stat - pos;
970f7917c00SJeff Kirsher 
971f7917c00SJeff Kirsher 	if (likely(skb->len <= left)) {
972f7917c00SJeff Kirsher 		if (likely(!skb->data_len))
973f7917c00SJeff Kirsher 			skb_copy_from_linear_data(skb, pos, skb->len);
974f7917c00SJeff Kirsher 		else
975f7917c00SJeff Kirsher 			skb_copy_bits(skb, 0, pos, skb->len);
976f7917c00SJeff Kirsher 		pos += skb->len;
977f7917c00SJeff Kirsher 	} else {
978f7917c00SJeff Kirsher 		skb_copy_bits(skb, 0, pos, left);
979f7917c00SJeff Kirsher 		skb_copy_bits(skb, left, q->desc, skb->len - left);
980f7917c00SJeff Kirsher 		pos = (void *)q->desc + (skb->len - left);
981f7917c00SJeff Kirsher 	}
982f7917c00SJeff Kirsher 
983f7917c00SJeff Kirsher 	/* 0-pad to multiple of 16 */
984f7917c00SJeff Kirsher 	p = PTR_ALIGN(pos, 8);
985f7917c00SJeff Kirsher 	if ((uintptr_t)p & 8)
986f7917c00SJeff Kirsher 		*p = 0;
987f7917c00SJeff Kirsher }
988f7917c00SJeff Kirsher 
989f7917c00SJeff Kirsher /*
990f7917c00SJeff Kirsher  * Figure out what HW csum a packet wants and return the appropriate control
991f7917c00SJeff Kirsher  * bits.
992f7917c00SJeff Kirsher  */
993f7917c00SJeff Kirsher static u64 hwcsum(const struct sk_buff *skb)
994f7917c00SJeff Kirsher {
995f7917c00SJeff Kirsher 	int csum_type;
996f7917c00SJeff Kirsher 	const struct iphdr *iph = ip_hdr(skb);
997f7917c00SJeff Kirsher 
998f7917c00SJeff Kirsher 	if (iph->version == 4) {
999f7917c00SJeff Kirsher 		if (iph->protocol == IPPROTO_TCP)
1000f7917c00SJeff Kirsher 			csum_type = TX_CSUM_TCPIP;
1001f7917c00SJeff Kirsher 		else if (iph->protocol == IPPROTO_UDP)
1002f7917c00SJeff Kirsher 			csum_type = TX_CSUM_UDPIP;
1003f7917c00SJeff Kirsher 		else {
1004f7917c00SJeff Kirsher nocsum:			/*
1005f7917c00SJeff Kirsher 			 * unknown protocol, disable HW csum
1006f7917c00SJeff Kirsher 			 * and hope a bad packet is detected
1007f7917c00SJeff Kirsher 			 */
1008f7917c00SJeff Kirsher 			return TXPKT_L4CSUM_DIS;
1009f7917c00SJeff Kirsher 		}
1010f7917c00SJeff Kirsher 	} else {
1011f7917c00SJeff Kirsher 		/*
1012f7917c00SJeff Kirsher 		 * this doesn't work with extension headers
1013f7917c00SJeff Kirsher 		 */
1014f7917c00SJeff Kirsher 		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1015f7917c00SJeff Kirsher 
1016f7917c00SJeff Kirsher 		if (ip6h->nexthdr == IPPROTO_TCP)
1017f7917c00SJeff Kirsher 			csum_type = TX_CSUM_TCPIP6;
1018f7917c00SJeff Kirsher 		else if (ip6h->nexthdr == IPPROTO_UDP)
1019f7917c00SJeff Kirsher 			csum_type = TX_CSUM_UDPIP6;
1020f7917c00SJeff Kirsher 		else
1021f7917c00SJeff Kirsher 			goto nocsum;
1022f7917c00SJeff Kirsher 	}
1023f7917c00SJeff Kirsher 
1024f7917c00SJeff Kirsher 	if (likely(csum_type >= TX_CSUM_TCPIP))
1025f7917c00SJeff Kirsher 		return TXPKT_CSUM_TYPE(csum_type) |
1026f7917c00SJeff Kirsher 			TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
1027f7917c00SJeff Kirsher 			TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
1028f7917c00SJeff Kirsher 	else {
1029f7917c00SJeff Kirsher 		int start = skb_transport_offset(skb);
1030f7917c00SJeff Kirsher 
1031f7917c00SJeff Kirsher 		return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
1032f7917c00SJeff Kirsher 			TXPKT_CSUM_LOC(start + skb->csum_offset);
1033f7917c00SJeff Kirsher 	}
1034f7917c00SJeff Kirsher }
1035f7917c00SJeff Kirsher 
1036f7917c00SJeff Kirsher static void eth_txq_stop(struct sge_eth_txq *q)
1037f7917c00SJeff Kirsher {
1038f7917c00SJeff Kirsher 	netif_tx_stop_queue(q->txq);
1039f7917c00SJeff Kirsher 	q->q.stops++;
1040f7917c00SJeff Kirsher }
1041f7917c00SJeff Kirsher 
1042f7917c00SJeff Kirsher static inline void txq_advance(struct sge_txq *q, unsigned int n)
1043f7917c00SJeff Kirsher {
1044f7917c00SJeff Kirsher 	q->in_use += n;
1045f7917c00SJeff Kirsher 	q->pidx += n;
1046f7917c00SJeff Kirsher 	if (q->pidx >= q->size)
1047f7917c00SJeff Kirsher 		q->pidx -= q->size;
1048f7917c00SJeff Kirsher }
1049f7917c00SJeff Kirsher 
105084a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE
105184a200b3SVarun Prakash static inline int
105284a200b3SVarun Prakash cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
105384a200b3SVarun Prakash 		  const struct port_info *pi, u64 *cntrl)
105484a200b3SVarun Prakash {
105584a200b3SVarun Prakash 	const struct cxgb_fcoe *fcoe = &pi->fcoe;
105684a200b3SVarun Prakash 
105784a200b3SVarun Prakash 	if (!(fcoe->flags & CXGB_FCOE_ENABLED))
105884a200b3SVarun Prakash 		return 0;
105984a200b3SVarun Prakash 
106084a200b3SVarun Prakash 	if (skb->protocol != htons(ETH_P_FCOE))
106184a200b3SVarun Prakash 		return 0;
106284a200b3SVarun Prakash 
106384a200b3SVarun Prakash 	skb_reset_mac_header(skb);
106484a200b3SVarun Prakash 	skb->mac_len = sizeof(struct ethhdr);
106584a200b3SVarun Prakash 
106684a200b3SVarun Prakash 	skb_set_network_header(skb, skb->mac_len);
106784a200b3SVarun Prakash 	skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
106884a200b3SVarun Prakash 
106984a200b3SVarun Prakash 	if (!cxgb_fcoe_sof_eof_supported(adap, skb))
107084a200b3SVarun Prakash 		return -ENOTSUPP;
107184a200b3SVarun Prakash 
107284a200b3SVarun Prakash 	/* FC CRC offload */
107384a200b3SVarun Prakash 	*cntrl = TXPKT_CSUM_TYPE(TX_CSUM_FCOE) |
107484a200b3SVarun Prakash 		     TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS |
107584a200b3SVarun Prakash 		     TXPKT_CSUM_START(CXGB_FCOE_TXPKT_CSUM_START) |
107684a200b3SVarun Prakash 		     TXPKT_CSUM_END(CXGB_FCOE_TXPKT_CSUM_END) |
107784a200b3SVarun Prakash 		     TXPKT_CSUM_LOC(CXGB_FCOE_TXPKT_CSUM_END);
107884a200b3SVarun Prakash 	return 0;
107984a200b3SVarun Prakash }
108084a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */
108184a200b3SVarun Prakash 
1082f7917c00SJeff Kirsher /**
1083f7917c00SJeff Kirsher  *	t4_eth_xmit - add a packet to an Ethernet Tx queue
1084f7917c00SJeff Kirsher  *	@skb: the packet
1085f7917c00SJeff Kirsher  *	@dev: the egress net device
1086f7917c00SJeff Kirsher  *
1087f7917c00SJeff Kirsher  *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
1088f7917c00SJeff Kirsher  */
1089f7917c00SJeff Kirsher netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1090f7917c00SJeff Kirsher {
10910034b298SKumar Sanghvi 	int len;
1092f7917c00SJeff Kirsher 	u32 wr_mid;
1093f7917c00SJeff Kirsher 	u64 cntrl, *end;
1094f7917c00SJeff Kirsher 	int qidx, credits;
1095f7917c00SJeff Kirsher 	unsigned int flits, ndesc;
1096f7917c00SJeff Kirsher 	struct adapter *adap;
1097f7917c00SJeff Kirsher 	struct sge_eth_txq *q;
1098f7917c00SJeff Kirsher 	const struct port_info *pi;
1099f7917c00SJeff Kirsher 	struct fw_eth_tx_pkt_wr *wr;
1100f7917c00SJeff Kirsher 	struct cpl_tx_pkt_core *cpl;
1101f7917c00SJeff Kirsher 	const struct skb_shared_info *ssi;
1102f7917c00SJeff Kirsher 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
11030034b298SKumar Sanghvi 	bool immediate = false;
110484a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE
110584a200b3SVarun Prakash 	int err;
110684a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */
1107f7917c00SJeff Kirsher 
1108f7917c00SJeff Kirsher 	/*
1109f7917c00SJeff Kirsher 	 * The chip min packet length is 10 octets but play safe and reject
1110f7917c00SJeff Kirsher 	 * anything shorter than an Ethernet header.
1111f7917c00SJeff Kirsher 	 */
1112f7917c00SJeff Kirsher 	if (unlikely(skb->len < ETH_HLEN)) {
1113a7525198SEric W. Biederman out_free:	dev_kfree_skb_any(skb);
1114f7917c00SJeff Kirsher 		return NETDEV_TX_OK;
1115f7917c00SJeff Kirsher 	}
1116f7917c00SJeff Kirsher 
1117f7917c00SJeff Kirsher 	pi = netdev_priv(dev);
1118f7917c00SJeff Kirsher 	adap = pi->adapter;
1119f7917c00SJeff Kirsher 	qidx = skb_get_queue_mapping(skb);
1120f7917c00SJeff Kirsher 	q = &adap->sge.ethtxq[qidx + pi->first_qset];
1121f7917c00SJeff Kirsher 
1122f7917c00SJeff Kirsher 	reclaim_completed_tx(adap, &q->q, true);
112384a200b3SVarun Prakash 	cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
112484a200b3SVarun Prakash 
112584a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE
112684a200b3SVarun Prakash 	err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
112784a200b3SVarun Prakash 	if (unlikely(err == -ENOTSUPP))
112884a200b3SVarun Prakash 		goto out_free;
112984a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */
1130f7917c00SJeff Kirsher 
1131f7917c00SJeff Kirsher 	flits = calc_tx_flits(skb);
1132f7917c00SJeff Kirsher 	ndesc = flits_to_desc(flits);
1133f7917c00SJeff Kirsher 	credits = txq_avail(&q->q) - ndesc;
1134f7917c00SJeff Kirsher 
1135f7917c00SJeff Kirsher 	if (unlikely(credits < 0)) {
1136f7917c00SJeff Kirsher 		eth_txq_stop(q);
1137f7917c00SJeff Kirsher 		dev_err(adap->pdev_dev,
1138f7917c00SJeff Kirsher 			"%s: Tx ring %u full while queue awake!\n",
1139f7917c00SJeff Kirsher 			dev->name, qidx);
1140f7917c00SJeff Kirsher 		return NETDEV_TX_BUSY;
1141f7917c00SJeff Kirsher 	}
1142f7917c00SJeff Kirsher 
11430034b298SKumar Sanghvi 	if (is_eth_imm(skb))
11440034b298SKumar Sanghvi 		immediate = true;
11450034b298SKumar Sanghvi 
11460034b298SKumar Sanghvi 	if (!immediate &&
1147f7917c00SJeff Kirsher 	    unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
1148f7917c00SJeff Kirsher 		q->mapping_err++;
1149f7917c00SJeff Kirsher 		goto out_free;
1150f7917c00SJeff Kirsher 	}
1151f7917c00SJeff Kirsher 
1152e2ac9628SHariprasad Shenai 	wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1153f7917c00SJeff Kirsher 	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1154f7917c00SJeff Kirsher 		eth_txq_stop(q);
1155e2ac9628SHariprasad Shenai 		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1156f7917c00SJeff Kirsher 	}
1157f7917c00SJeff Kirsher 
1158f7917c00SJeff Kirsher 	wr = (void *)&q->q.desc[q->q.pidx];
1159f7917c00SJeff Kirsher 	wr->equiq_to_len16 = htonl(wr_mid);
1160f7917c00SJeff Kirsher 	wr->r3 = cpu_to_be64(0);
1161f7917c00SJeff Kirsher 	end = (u64 *)wr + flits;
1162f7917c00SJeff Kirsher 
11630034b298SKumar Sanghvi 	len = immediate ? skb->len : 0;
1164f7917c00SJeff Kirsher 	ssi = skb_shinfo(skb);
1165f7917c00SJeff Kirsher 	if (ssi->gso_size) {
1166f7917c00SJeff Kirsher 		struct cpl_tx_pkt_lso *lso = (void *)wr;
1167f7917c00SJeff Kirsher 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1168f7917c00SJeff Kirsher 		int l3hdr_len = skb_network_header_len(skb);
1169f7917c00SJeff Kirsher 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1170f7917c00SJeff Kirsher 
11710034b298SKumar Sanghvi 		len += sizeof(*lso);
1172e2ac9628SHariprasad Shenai 		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1173e2ac9628SHariprasad Shenai 				       FW_WR_IMMDLEN_V(len));
1174f7917c00SJeff Kirsher 		lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
1175f7917c00SJeff Kirsher 					LSO_FIRST_SLICE | LSO_LAST_SLICE |
1176f7917c00SJeff Kirsher 					LSO_IPV6(v6) |
1177f7917c00SJeff Kirsher 					LSO_ETHHDR_LEN(eth_xtra_len / 4) |
1178f7917c00SJeff Kirsher 					LSO_IPHDR_LEN(l3hdr_len / 4) |
1179f7917c00SJeff Kirsher 					LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
1180f7917c00SJeff Kirsher 		lso->c.ipid_ofst = htons(0);
1181f7917c00SJeff Kirsher 		lso->c.mss = htons(ssi->gso_size);
1182f7917c00SJeff Kirsher 		lso->c.seqno_offset = htonl(0);
11837207c0d1SHariprasad Shenai 		if (is_t4(adap->params.chip))
1184f7917c00SJeff Kirsher 			lso->c.len = htonl(skb->len);
11857207c0d1SHariprasad Shenai 		else
11867207c0d1SHariprasad Shenai 			lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len));
1187f7917c00SJeff Kirsher 		cpl = (void *)(lso + 1);
1188f7917c00SJeff Kirsher 		cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1189f7917c00SJeff Kirsher 			TXPKT_IPHDR_LEN(l3hdr_len) |
1190f7917c00SJeff Kirsher 			TXPKT_ETHHDR_LEN(eth_xtra_len);
1191f7917c00SJeff Kirsher 		q->tso++;
1192f7917c00SJeff Kirsher 		q->tx_cso += ssi->gso_segs;
1193f7917c00SJeff Kirsher 	} else {
1194ca71de6bSKumar Sanghvi 		len += sizeof(*cpl);
1195e2ac9628SHariprasad Shenai 		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1196e2ac9628SHariprasad Shenai 				       FW_WR_IMMDLEN_V(len));
1197f7917c00SJeff Kirsher 		cpl = (void *)(wr + 1);
1198f7917c00SJeff Kirsher 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1199f7917c00SJeff Kirsher 			cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
1200f7917c00SJeff Kirsher 			q->tx_cso++;
120184a200b3SVarun Prakash 		}
1202f7917c00SJeff Kirsher 	}
1203f7917c00SJeff Kirsher 
1204df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb)) {
1205f7917c00SJeff Kirsher 		q->vlan_ins++;
1206df8a39deSJiri Pirko 		cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
120784a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE
120884a200b3SVarun Prakash 		if (skb->protocol == htons(ETH_P_FCOE))
120984a200b3SVarun Prakash 			cntrl |= TXPKT_VLAN(
121084a200b3SVarun Prakash 				 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
121184a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */
1212f7917c00SJeff Kirsher 	}
1213f7917c00SJeff Kirsher 
1214f7917c00SJeff Kirsher 	cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
1215f7917c00SJeff Kirsher 			   TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
1216f7917c00SJeff Kirsher 	cpl->pack = htons(0);
1217f7917c00SJeff Kirsher 	cpl->len = htons(skb->len);
1218f7917c00SJeff Kirsher 	cpl->ctrl1 = cpu_to_be64(cntrl);
1219f7917c00SJeff Kirsher 
12200034b298SKumar Sanghvi 	if (immediate) {
1221f7917c00SJeff Kirsher 		inline_tx_skb(skb, &q->q, cpl + 1);
1222a7525198SEric W. Biederman 		dev_consume_skb_any(skb);
1223f7917c00SJeff Kirsher 	} else {
1224f7917c00SJeff Kirsher 		int last_desc;
1225f7917c00SJeff Kirsher 
1226f7917c00SJeff Kirsher 		write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1227f7917c00SJeff Kirsher 			  addr);
1228f7917c00SJeff Kirsher 		skb_orphan(skb);
1229f7917c00SJeff Kirsher 
1230f7917c00SJeff Kirsher 		last_desc = q->q.pidx + ndesc - 1;
1231f7917c00SJeff Kirsher 		if (last_desc >= q->q.size)
1232f7917c00SJeff Kirsher 			last_desc -= q->q.size;
1233f7917c00SJeff Kirsher 		q->q.sdesc[last_desc].skb = skb;
1234f7917c00SJeff Kirsher 		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1235f7917c00SJeff Kirsher 	}
1236f7917c00SJeff Kirsher 
1237f7917c00SJeff Kirsher 	txq_advance(&q->q, ndesc);
1238f7917c00SJeff Kirsher 
1239f7917c00SJeff Kirsher 	ring_tx_db(adap, &q->q, ndesc);
1240f7917c00SJeff Kirsher 	return NETDEV_TX_OK;
1241f7917c00SJeff Kirsher }
1242f7917c00SJeff Kirsher 
1243f7917c00SJeff Kirsher /**
1244f7917c00SJeff Kirsher  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1245f7917c00SJeff Kirsher  *	@q: the SGE control Tx queue
1246f7917c00SJeff Kirsher  *
1247f7917c00SJeff Kirsher  *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1248f7917c00SJeff Kirsher  *	that send only immediate data (presently just the control queues) and
1249f7917c00SJeff Kirsher  *	thus do not have any sk_buffs to release.
1250f7917c00SJeff Kirsher  */
1251f7917c00SJeff Kirsher static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1252f7917c00SJeff Kirsher {
1253f7917c00SJeff Kirsher 	int hw_cidx = ntohs(q->stat->cidx);
1254f7917c00SJeff Kirsher 	int reclaim = hw_cidx - q->cidx;
1255f7917c00SJeff Kirsher 
1256f7917c00SJeff Kirsher 	if (reclaim < 0)
1257f7917c00SJeff Kirsher 		reclaim += q->size;
1258f7917c00SJeff Kirsher 
1259f7917c00SJeff Kirsher 	q->in_use -= reclaim;
1260f7917c00SJeff Kirsher 	q->cidx = hw_cidx;
1261f7917c00SJeff Kirsher }
1262f7917c00SJeff Kirsher 
1263f7917c00SJeff Kirsher /**
1264f7917c00SJeff Kirsher  *	is_imm - check whether a packet can be sent as immediate data
1265f7917c00SJeff Kirsher  *	@skb: the packet
1266f7917c00SJeff Kirsher  *
1267f7917c00SJeff Kirsher  *	Returns true if a packet can be sent as a WR with immediate data.
1268f7917c00SJeff Kirsher  */
1269f7917c00SJeff Kirsher static inline int is_imm(const struct sk_buff *skb)
1270f7917c00SJeff Kirsher {
1271f7917c00SJeff Kirsher 	return skb->len <= MAX_CTRL_WR_LEN;
1272f7917c00SJeff Kirsher }
1273f7917c00SJeff Kirsher 
1274f7917c00SJeff Kirsher /**
1275f7917c00SJeff Kirsher  *	ctrlq_check_stop - check if a control queue is full and should stop
1276f7917c00SJeff Kirsher  *	@q: the queue
1277f7917c00SJeff Kirsher  *	@wr: most recent WR written to the queue
1278f7917c00SJeff Kirsher  *
1279f7917c00SJeff Kirsher  *	Check if a control queue has become full and should be stopped.
1280f7917c00SJeff Kirsher  *	We clean up control queue descriptors very lazily, only when we are out.
1281f7917c00SJeff Kirsher  *	If the queue is still full after reclaiming any completed descriptors
1282f7917c00SJeff Kirsher  *	we suspend it and have the last WR wake it up.
1283f7917c00SJeff Kirsher  */
1284f7917c00SJeff Kirsher static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1285f7917c00SJeff Kirsher {
1286f7917c00SJeff Kirsher 	reclaim_completed_tx_imm(&q->q);
1287f7917c00SJeff Kirsher 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1288e2ac9628SHariprasad Shenai 		wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1289f7917c00SJeff Kirsher 		q->q.stops++;
1290f7917c00SJeff Kirsher 		q->full = 1;
1291f7917c00SJeff Kirsher 	}
1292f7917c00SJeff Kirsher }
1293f7917c00SJeff Kirsher 
1294f7917c00SJeff Kirsher /**
1295f7917c00SJeff Kirsher  *	ctrl_xmit - send a packet through an SGE control Tx queue
1296f7917c00SJeff Kirsher  *	@q: the control queue
1297f7917c00SJeff Kirsher  *	@skb: the packet
1298f7917c00SJeff Kirsher  *
1299f7917c00SJeff Kirsher  *	Send a packet through an SGE control Tx queue.  Packets sent through
1300f7917c00SJeff Kirsher  *	a control queue must fit entirely as immediate data.
1301f7917c00SJeff Kirsher  */
1302f7917c00SJeff Kirsher static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1303f7917c00SJeff Kirsher {
1304f7917c00SJeff Kirsher 	unsigned int ndesc;
1305f7917c00SJeff Kirsher 	struct fw_wr_hdr *wr;
1306f7917c00SJeff Kirsher 
1307f7917c00SJeff Kirsher 	if (unlikely(!is_imm(skb))) {
1308f7917c00SJeff Kirsher 		WARN_ON(1);
1309f7917c00SJeff Kirsher 		dev_kfree_skb(skb);
1310f7917c00SJeff Kirsher 		return NET_XMIT_DROP;
1311f7917c00SJeff Kirsher 	}
1312f7917c00SJeff Kirsher 
1313f7917c00SJeff Kirsher 	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1314f7917c00SJeff Kirsher 	spin_lock(&q->sendq.lock);
1315f7917c00SJeff Kirsher 
1316f7917c00SJeff Kirsher 	if (unlikely(q->full)) {
1317f7917c00SJeff Kirsher 		skb->priority = ndesc;                  /* save for restart */
1318f7917c00SJeff Kirsher 		__skb_queue_tail(&q->sendq, skb);
1319f7917c00SJeff Kirsher 		spin_unlock(&q->sendq.lock);
1320f7917c00SJeff Kirsher 		return NET_XMIT_CN;
1321f7917c00SJeff Kirsher 	}
1322f7917c00SJeff Kirsher 
1323f7917c00SJeff Kirsher 	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1324f7917c00SJeff Kirsher 	inline_tx_skb(skb, &q->q, wr);
1325f7917c00SJeff Kirsher 
1326f7917c00SJeff Kirsher 	txq_advance(&q->q, ndesc);
1327f7917c00SJeff Kirsher 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1328f7917c00SJeff Kirsher 		ctrlq_check_stop(q, wr);
1329f7917c00SJeff Kirsher 
1330f7917c00SJeff Kirsher 	ring_tx_db(q->adap, &q->q, ndesc);
1331f7917c00SJeff Kirsher 	spin_unlock(&q->sendq.lock);
1332f7917c00SJeff Kirsher 
1333f7917c00SJeff Kirsher 	kfree_skb(skb);
1334f7917c00SJeff Kirsher 	return NET_XMIT_SUCCESS;
1335f7917c00SJeff Kirsher }
1336f7917c00SJeff Kirsher 
1337f7917c00SJeff Kirsher /**
1338f7917c00SJeff Kirsher  *	restart_ctrlq - restart a suspended control queue
1339f7917c00SJeff Kirsher  *	@data: the control queue to restart
1340f7917c00SJeff Kirsher  *
1341f7917c00SJeff Kirsher  *	Resumes transmission on a suspended Tx control queue.
1342f7917c00SJeff Kirsher  */
1343f7917c00SJeff Kirsher static void restart_ctrlq(unsigned long data)
1344f7917c00SJeff Kirsher {
1345f7917c00SJeff Kirsher 	struct sk_buff *skb;
1346f7917c00SJeff Kirsher 	unsigned int written = 0;
1347f7917c00SJeff Kirsher 	struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1348f7917c00SJeff Kirsher 
1349f7917c00SJeff Kirsher 	spin_lock(&q->sendq.lock);
1350f7917c00SJeff Kirsher 	reclaim_completed_tx_imm(&q->q);
1351f7917c00SJeff Kirsher 	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
1352f7917c00SJeff Kirsher 
1353f7917c00SJeff Kirsher 	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1354f7917c00SJeff Kirsher 		struct fw_wr_hdr *wr;
1355f7917c00SJeff Kirsher 		unsigned int ndesc = skb->priority;     /* previously saved */
1356f7917c00SJeff Kirsher 
1357f7917c00SJeff Kirsher 		/*
1358f7917c00SJeff Kirsher 		 * Write descriptors and free skbs outside the lock to limit
1359f7917c00SJeff Kirsher 		 * wait times.  q->full is still set so new skbs will be queued.
1360f7917c00SJeff Kirsher 		 */
1361f7917c00SJeff Kirsher 		spin_unlock(&q->sendq.lock);
1362f7917c00SJeff Kirsher 
1363f7917c00SJeff Kirsher 		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1364f7917c00SJeff Kirsher 		inline_tx_skb(skb, &q->q, wr);
1365f7917c00SJeff Kirsher 		kfree_skb(skb);
1366f7917c00SJeff Kirsher 
1367f7917c00SJeff Kirsher 		written += ndesc;
1368f7917c00SJeff Kirsher 		txq_advance(&q->q, ndesc);
1369f7917c00SJeff Kirsher 		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1370f7917c00SJeff Kirsher 			unsigned long old = q->q.stops;
1371f7917c00SJeff Kirsher 
1372f7917c00SJeff Kirsher 			ctrlq_check_stop(q, wr);
1373f7917c00SJeff Kirsher 			if (q->q.stops != old) {          /* suspended anew */
1374f7917c00SJeff Kirsher 				spin_lock(&q->sendq.lock);
1375f7917c00SJeff Kirsher 				goto ringdb;
1376f7917c00SJeff Kirsher 			}
1377f7917c00SJeff Kirsher 		}
1378f7917c00SJeff Kirsher 		if (written > 16) {
1379f7917c00SJeff Kirsher 			ring_tx_db(q->adap, &q->q, written);
1380f7917c00SJeff Kirsher 			written = 0;
1381f7917c00SJeff Kirsher 		}
1382f7917c00SJeff Kirsher 		spin_lock(&q->sendq.lock);
1383f7917c00SJeff Kirsher 	}
1384f7917c00SJeff Kirsher 	q->full = 0;
1385f7917c00SJeff Kirsher ringdb: if (written)
1386f7917c00SJeff Kirsher 		ring_tx_db(q->adap, &q->q, written);
1387f7917c00SJeff Kirsher 	spin_unlock(&q->sendq.lock);
1388f7917c00SJeff Kirsher }
1389f7917c00SJeff Kirsher 
1390f7917c00SJeff Kirsher /**
1391f7917c00SJeff Kirsher  *	t4_mgmt_tx - send a management message
1392f7917c00SJeff Kirsher  *	@adap: the adapter
1393f7917c00SJeff Kirsher  *	@skb: the packet containing the management message
1394f7917c00SJeff Kirsher  *
1395f7917c00SJeff Kirsher  *	Send a management message through control queue 0.
1396f7917c00SJeff Kirsher  */
1397f7917c00SJeff Kirsher int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1398f7917c00SJeff Kirsher {
1399f7917c00SJeff Kirsher 	int ret;
1400f7917c00SJeff Kirsher 
1401f7917c00SJeff Kirsher 	local_bh_disable();
1402f7917c00SJeff Kirsher 	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1403f7917c00SJeff Kirsher 	local_bh_enable();
1404f7917c00SJeff Kirsher 	return ret;
1405f7917c00SJeff Kirsher }
1406f7917c00SJeff Kirsher 
1407f7917c00SJeff Kirsher /**
1408f7917c00SJeff Kirsher  *	is_ofld_imm - check whether a packet can be sent as immediate data
1409f7917c00SJeff Kirsher  *	@skb: the packet
1410f7917c00SJeff Kirsher  *
1411f7917c00SJeff Kirsher  *	Returns true if a packet can be sent as an offload WR with immediate
1412f7917c00SJeff Kirsher  *	data.  We currently use the same limit as for Ethernet packets.
1413f7917c00SJeff Kirsher  */
1414f7917c00SJeff Kirsher static inline int is_ofld_imm(const struct sk_buff *skb)
1415f7917c00SJeff Kirsher {
1416f7917c00SJeff Kirsher 	return skb->len <= MAX_IMM_TX_PKT_LEN;
1417f7917c00SJeff Kirsher }
1418f7917c00SJeff Kirsher 
1419f7917c00SJeff Kirsher /**
1420f7917c00SJeff Kirsher  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
1421f7917c00SJeff Kirsher  *	@skb: the packet
1422f7917c00SJeff Kirsher  *
1423f7917c00SJeff Kirsher  *	Returns the number of flits needed for the given offload packet.
1424f7917c00SJeff Kirsher  *	These packets are already fully constructed and no additional headers
1425f7917c00SJeff Kirsher  *	will be added.
1426f7917c00SJeff Kirsher  */
1427f7917c00SJeff Kirsher static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1428f7917c00SJeff Kirsher {
1429f7917c00SJeff Kirsher 	unsigned int flits, cnt;
1430f7917c00SJeff Kirsher 
1431f7917c00SJeff Kirsher 	if (is_ofld_imm(skb))
1432f7917c00SJeff Kirsher 		return DIV_ROUND_UP(skb->len, 8);
1433f7917c00SJeff Kirsher 
1434f7917c00SJeff Kirsher 	flits = skb_transport_offset(skb) / 8U;   /* headers */
1435f7917c00SJeff Kirsher 	cnt = skb_shinfo(skb)->nr_frags;
143615dd16c2SLi RongQing 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
1437f7917c00SJeff Kirsher 		cnt++;
1438f7917c00SJeff Kirsher 	return flits + sgl_len(cnt);
1439f7917c00SJeff Kirsher }
1440f7917c00SJeff Kirsher 
1441f7917c00SJeff Kirsher /**
1442f7917c00SJeff Kirsher  *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1443f7917c00SJeff Kirsher  *	@adap: the adapter
1444f7917c00SJeff Kirsher  *	@q: the queue to stop
1445f7917c00SJeff Kirsher  *
1446f7917c00SJeff Kirsher  *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1447f7917c00SJeff Kirsher  *	inability to map packets.  A periodic timer attempts to restart
1448f7917c00SJeff Kirsher  *	queues so marked.
1449f7917c00SJeff Kirsher  */
1450f7917c00SJeff Kirsher static void txq_stop_maperr(struct sge_ofld_txq *q)
1451f7917c00SJeff Kirsher {
1452f7917c00SJeff Kirsher 	q->mapping_err++;
1453f7917c00SJeff Kirsher 	q->q.stops++;
1454f7917c00SJeff Kirsher 	set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1455f7917c00SJeff Kirsher 		q->adap->sge.txq_maperr);
1456f7917c00SJeff Kirsher }
1457f7917c00SJeff Kirsher 
1458f7917c00SJeff Kirsher /**
1459f7917c00SJeff Kirsher  *	ofldtxq_stop - stop an offload Tx queue that has become full
1460f7917c00SJeff Kirsher  *	@q: the queue to stop
1461f7917c00SJeff Kirsher  *	@skb: the packet causing the queue to become full
1462f7917c00SJeff Kirsher  *
1463f7917c00SJeff Kirsher  *	Stops an offload Tx queue that has become full and modifies the packet
1464f7917c00SJeff Kirsher  *	being written to request a wakeup.
1465f7917c00SJeff Kirsher  */
1466f7917c00SJeff Kirsher static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1467f7917c00SJeff Kirsher {
1468f7917c00SJeff Kirsher 	struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1469f7917c00SJeff Kirsher 
1470e2ac9628SHariprasad Shenai 	wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1471f7917c00SJeff Kirsher 	q->q.stops++;
1472f7917c00SJeff Kirsher 	q->full = 1;
1473f7917c00SJeff Kirsher }
1474f7917c00SJeff Kirsher 
1475f7917c00SJeff Kirsher /**
1476f7917c00SJeff Kirsher  *	service_ofldq - restart a suspended offload queue
1477f7917c00SJeff Kirsher  *	@q: the offload queue
1478f7917c00SJeff Kirsher  *
1479f7917c00SJeff Kirsher  *	Services an offload Tx queue by moving packets from its packet queue
1480f7917c00SJeff Kirsher  *	to the HW Tx ring.  The function starts and ends with the queue locked.
1481f7917c00SJeff Kirsher  */
1482f7917c00SJeff Kirsher static void service_ofldq(struct sge_ofld_txq *q)
1483f7917c00SJeff Kirsher {
1484f7917c00SJeff Kirsher 	u64 *pos;
1485f7917c00SJeff Kirsher 	int credits;
1486f7917c00SJeff Kirsher 	struct sk_buff *skb;
1487f7917c00SJeff Kirsher 	unsigned int written = 0;
1488f7917c00SJeff Kirsher 	unsigned int flits, ndesc;
1489f7917c00SJeff Kirsher 
1490f7917c00SJeff Kirsher 	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1491f7917c00SJeff Kirsher 		/*
1492f7917c00SJeff Kirsher 		 * We drop the lock but leave skb on sendq, thus retaining
1493f7917c00SJeff Kirsher 		 * exclusive access to the state of the queue.
1494f7917c00SJeff Kirsher 		 */
1495f7917c00SJeff Kirsher 		spin_unlock(&q->sendq.lock);
1496f7917c00SJeff Kirsher 
1497f7917c00SJeff Kirsher 		reclaim_completed_tx(q->adap, &q->q, false);
1498f7917c00SJeff Kirsher 
1499f7917c00SJeff Kirsher 		flits = skb->priority;                /* previously saved */
1500f7917c00SJeff Kirsher 		ndesc = flits_to_desc(flits);
1501f7917c00SJeff Kirsher 		credits = txq_avail(&q->q) - ndesc;
1502f7917c00SJeff Kirsher 		BUG_ON(credits < 0);
1503f7917c00SJeff Kirsher 		if (unlikely(credits < TXQ_STOP_THRES))
1504f7917c00SJeff Kirsher 			ofldtxq_stop(q, skb);
1505f7917c00SJeff Kirsher 
1506f7917c00SJeff Kirsher 		pos = (u64 *)&q->q.desc[q->q.pidx];
1507f7917c00SJeff Kirsher 		if (is_ofld_imm(skb))
1508f7917c00SJeff Kirsher 			inline_tx_skb(skb, &q->q, pos);
1509f7917c00SJeff Kirsher 		else if (map_skb(q->adap->pdev_dev, skb,
1510f7917c00SJeff Kirsher 				 (dma_addr_t *)skb->head)) {
1511f7917c00SJeff Kirsher 			txq_stop_maperr(q);
1512f7917c00SJeff Kirsher 			spin_lock(&q->sendq.lock);
1513f7917c00SJeff Kirsher 			break;
1514f7917c00SJeff Kirsher 		} else {
1515f7917c00SJeff Kirsher 			int last_desc, hdr_len = skb_transport_offset(skb);
1516f7917c00SJeff Kirsher 
1517f7917c00SJeff Kirsher 			memcpy(pos, skb->data, hdr_len);
1518f7917c00SJeff Kirsher 			write_sgl(skb, &q->q, (void *)pos + hdr_len,
1519f7917c00SJeff Kirsher 				  pos + flits, hdr_len,
1520f7917c00SJeff Kirsher 				  (dma_addr_t *)skb->head);
1521f7917c00SJeff Kirsher #ifdef CONFIG_NEED_DMA_MAP_STATE
1522f7917c00SJeff Kirsher 			skb->dev = q->adap->port[0];
1523f7917c00SJeff Kirsher 			skb->destructor = deferred_unmap_destructor;
1524f7917c00SJeff Kirsher #endif
1525f7917c00SJeff Kirsher 			last_desc = q->q.pidx + ndesc - 1;
1526f7917c00SJeff Kirsher 			if (last_desc >= q->q.size)
1527f7917c00SJeff Kirsher 				last_desc -= q->q.size;
1528f7917c00SJeff Kirsher 			q->q.sdesc[last_desc].skb = skb;
1529f7917c00SJeff Kirsher 		}
1530f7917c00SJeff Kirsher 
1531f7917c00SJeff Kirsher 		txq_advance(&q->q, ndesc);
1532f7917c00SJeff Kirsher 		written += ndesc;
1533f7917c00SJeff Kirsher 		if (unlikely(written > 32)) {
1534f7917c00SJeff Kirsher 			ring_tx_db(q->adap, &q->q, written);
1535f7917c00SJeff Kirsher 			written = 0;
1536f7917c00SJeff Kirsher 		}
1537f7917c00SJeff Kirsher 
1538f7917c00SJeff Kirsher 		spin_lock(&q->sendq.lock);
1539f7917c00SJeff Kirsher 		__skb_unlink(skb, &q->sendq);
1540f7917c00SJeff Kirsher 		if (is_ofld_imm(skb))
1541f7917c00SJeff Kirsher 			kfree_skb(skb);
1542f7917c00SJeff Kirsher 	}
1543f7917c00SJeff Kirsher 	if (likely(written))
1544f7917c00SJeff Kirsher 		ring_tx_db(q->adap, &q->q, written);
1545f7917c00SJeff Kirsher }
1546f7917c00SJeff Kirsher 
1547f7917c00SJeff Kirsher /**
1548f7917c00SJeff Kirsher  *	ofld_xmit - send a packet through an offload queue
1549f7917c00SJeff Kirsher  *	@q: the Tx offload queue
1550f7917c00SJeff Kirsher  *	@skb: the packet
1551f7917c00SJeff Kirsher  *
1552f7917c00SJeff Kirsher  *	Send an offload packet through an SGE offload queue.
1553f7917c00SJeff Kirsher  */
1554f7917c00SJeff Kirsher static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1555f7917c00SJeff Kirsher {
1556f7917c00SJeff Kirsher 	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
1557f7917c00SJeff Kirsher 	spin_lock(&q->sendq.lock);
1558f7917c00SJeff Kirsher 	__skb_queue_tail(&q->sendq, skb);
1559f7917c00SJeff Kirsher 	if (q->sendq.qlen == 1)
1560f7917c00SJeff Kirsher 		service_ofldq(q);
1561f7917c00SJeff Kirsher 	spin_unlock(&q->sendq.lock);
1562f7917c00SJeff Kirsher 	return NET_XMIT_SUCCESS;
1563f7917c00SJeff Kirsher }
1564f7917c00SJeff Kirsher 
1565f7917c00SJeff Kirsher /**
1566f7917c00SJeff Kirsher  *	restart_ofldq - restart a suspended offload queue
1567f7917c00SJeff Kirsher  *	@data: the offload queue to restart
1568f7917c00SJeff Kirsher  *
1569f7917c00SJeff Kirsher  *	Resumes transmission on a suspended Tx offload queue.
1570f7917c00SJeff Kirsher  */
1571f7917c00SJeff Kirsher static void restart_ofldq(unsigned long data)
1572f7917c00SJeff Kirsher {
1573f7917c00SJeff Kirsher 	struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1574f7917c00SJeff Kirsher 
1575f7917c00SJeff Kirsher 	spin_lock(&q->sendq.lock);
1576f7917c00SJeff Kirsher 	q->full = 0;            /* the queue actually is completely empty now */
1577f7917c00SJeff Kirsher 	service_ofldq(q);
1578f7917c00SJeff Kirsher 	spin_unlock(&q->sendq.lock);
1579f7917c00SJeff Kirsher }
1580f7917c00SJeff Kirsher 
1581f7917c00SJeff Kirsher /**
1582f7917c00SJeff Kirsher  *	skb_txq - return the Tx queue an offload packet should use
1583f7917c00SJeff Kirsher  *	@skb: the packet
1584f7917c00SJeff Kirsher  *
1585f7917c00SJeff Kirsher  *	Returns the Tx queue an offload packet should use as indicated by bits
1586f7917c00SJeff Kirsher  *	1-15 in the packet's queue_mapping.
1587f7917c00SJeff Kirsher  */
1588f7917c00SJeff Kirsher static inline unsigned int skb_txq(const struct sk_buff *skb)
1589f7917c00SJeff Kirsher {
1590f7917c00SJeff Kirsher 	return skb->queue_mapping >> 1;
1591f7917c00SJeff Kirsher }
1592f7917c00SJeff Kirsher 
1593f7917c00SJeff Kirsher /**
1594f7917c00SJeff Kirsher  *	is_ctrl_pkt - return whether an offload packet is a control packet
1595f7917c00SJeff Kirsher  *	@skb: the packet
1596f7917c00SJeff Kirsher  *
1597f7917c00SJeff Kirsher  *	Returns whether an offload packet should use an OFLD or a CTRL
1598f7917c00SJeff Kirsher  *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
1599f7917c00SJeff Kirsher  */
1600f7917c00SJeff Kirsher static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1601f7917c00SJeff Kirsher {
1602f7917c00SJeff Kirsher 	return skb->queue_mapping & 1;
1603f7917c00SJeff Kirsher }
1604f7917c00SJeff Kirsher 
1605f7917c00SJeff Kirsher static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1606f7917c00SJeff Kirsher {
1607f7917c00SJeff Kirsher 	unsigned int idx = skb_txq(skb);
1608f7917c00SJeff Kirsher 
16094fe44dd7SKumar Sanghvi 	if (unlikely(is_ctrl_pkt(skb))) {
16104fe44dd7SKumar Sanghvi 		/* Single ctrl queue is a requirement for LE workaround path */
16114fe44dd7SKumar Sanghvi 		if (adap->tids.nsftids)
16124fe44dd7SKumar Sanghvi 			idx = 0;
1613f7917c00SJeff Kirsher 		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
16144fe44dd7SKumar Sanghvi 	}
1615f7917c00SJeff Kirsher 	return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1616f7917c00SJeff Kirsher }
1617f7917c00SJeff Kirsher 
1618f7917c00SJeff Kirsher /**
1619f7917c00SJeff Kirsher  *	t4_ofld_send - send an offload packet
1620f7917c00SJeff Kirsher  *	@adap: the adapter
1621f7917c00SJeff Kirsher  *	@skb: the packet
1622f7917c00SJeff Kirsher  *
1623f7917c00SJeff Kirsher  *	Sends an offload packet.  We use the packet queue_mapping to select the
1624f7917c00SJeff Kirsher  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1625f7917c00SJeff Kirsher  *	should be sent as regular or control, bits 1-15 select the queue.
1626f7917c00SJeff Kirsher  */
1627f7917c00SJeff Kirsher int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1628f7917c00SJeff Kirsher {
1629f7917c00SJeff Kirsher 	int ret;
1630f7917c00SJeff Kirsher 
1631f7917c00SJeff Kirsher 	local_bh_disable();
1632f7917c00SJeff Kirsher 	ret = ofld_send(adap, skb);
1633f7917c00SJeff Kirsher 	local_bh_enable();
1634f7917c00SJeff Kirsher 	return ret;
1635f7917c00SJeff Kirsher }
1636f7917c00SJeff Kirsher 
1637f7917c00SJeff Kirsher /**
1638f7917c00SJeff Kirsher  *	cxgb4_ofld_send - send an offload packet
1639f7917c00SJeff Kirsher  *	@dev: the net device
1640f7917c00SJeff Kirsher  *	@skb: the packet
1641f7917c00SJeff Kirsher  *
1642f7917c00SJeff Kirsher  *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
1643f7917c00SJeff Kirsher  *	intended for ULDs.
1644f7917c00SJeff Kirsher  */
1645f7917c00SJeff Kirsher int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1646f7917c00SJeff Kirsher {
1647f7917c00SJeff Kirsher 	return t4_ofld_send(netdev2adap(dev), skb);
1648f7917c00SJeff Kirsher }
1649f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb4_ofld_send);
1650f7917c00SJeff Kirsher 
1651e91b0f24SIan Campbell static inline void copy_frags(struct sk_buff *skb,
1652f7917c00SJeff Kirsher 			      const struct pkt_gl *gl, unsigned int offset)
1653f7917c00SJeff Kirsher {
1654e91b0f24SIan Campbell 	int i;
1655f7917c00SJeff Kirsher 
1656f7917c00SJeff Kirsher 	/* usually there's just one frag */
1657e91b0f24SIan Campbell 	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
1658e91b0f24SIan Campbell 			     gl->frags[0].offset + offset,
1659e91b0f24SIan Campbell 			     gl->frags[0].size - offset);
1660e91b0f24SIan Campbell 	skb_shinfo(skb)->nr_frags = gl->nfrags;
1661e91b0f24SIan Campbell 	for (i = 1; i < gl->nfrags; i++)
1662e91b0f24SIan Campbell 		__skb_fill_page_desc(skb, i, gl->frags[i].page,
1663e91b0f24SIan Campbell 				     gl->frags[i].offset,
1664e91b0f24SIan Campbell 				     gl->frags[i].size);
1665f7917c00SJeff Kirsher 
1666f7917c00SJeff Kirsher 	/* get a reference to the last page, we don't own it */
1667e91b0f24SIan Campbell 	get_page(gl->frags[gl->nfrags - 1].page);
1668f7917c00SJeff Kirsher }
1669f7917c00SJeff Kirsher 
1670f7917c00SJeff Kirsher /**
1671f7917c00SJeff Kirsher  *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1672f7917c00SJeff Kirsher  *	@gl: the gather list
1673f7917c00SJeff Kirsher  *	@skb_len: size of sk_buff main body if it carries fragments
1674f7917c00SJeff Kirsher  *	@pull_len: amount of data to move to the sk_buff's main body
1675f7917c00SJeff Kirsher  *
1676f7917c00SJeff Kirsher  *	Builds an sk_buff from the given packet gather list.  Returns the
1677f7917c00SJeff Kirsher  *	sk_buff or %NULL if sk_buff allocation failed.
1678f7917c00SJeff Kirsher  */
1679f7917c00SJeff Kirsher struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1680f7917c00SJeff Kirsher 				   unsigned int skb_len, unsigned int pull_len)
1681f7917c00SJeff Kirsher {
1682f7917c00SJeff Kirsher 	struct sk_buff *skb;
1683f7917c00SJeff Kirsher 
1684f7917c00SJeff Kirsher 	/*
1685f7917c00SJeff Kirsher 	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1686f7917c00SJeff Kirsher 	 * size, which is expected since buffers are at least PAGE_SIZEd.
1687f7917c00SJeff Kirsher 	 * In this case packets up to RX_COPY_THRES have only one fragment.
1688f7917c00SJeff Kirsher 	 */
1689f7917c00SJeff Kirsher 	if (gl->tot_len <= RX_COPY_THRES) {
1690f7917c00SJeff Kirsher 		skb = dev_alloc_skb(gl->tot_len);
1691f7917c00SJeff Kirsher 		if (unlikely(!skb))
1692f7917c00SJeff Kirsher 			goto out;
1693f7917c00SJeff Kirsher 		__skb_put(skb, gl->tot_len);
1694f7917c00SJeff Kirsher 		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1695f7917c00SJeff Kirsher 	} else {
1696f7917c00SJeff Kirsher 		skb = dev_alloc_skb(skb_len);
1697f7917c00SJeff Kirsher 		if (unlikely(!skb))
1698f7917c00SJeff Kirsher 			goto out;
1699f7917c00SJeff Kirsher 		__skb_put(skb, pull_len);
1700f7917c00SJeff Kirsher 		skb_copy_to_linear_data(skb, gl->va, pull_len);
1701f7917c00SJeff Kirsher 
1702e91b0f24SIan Campbell 		copy_frags(skb, gl, pull_len);
1703f7917c00SJeff Kirsher 		skb->len = gl->tot_len;
1704f7917c00SJeff Kirsher 		skb->data_len = skb->len - pull_len;
1705f7917c00SJeff Kirsher 		skb->truesize += skb->data_len;
1706f7917c00SJeff Kirsher 	}
1707f7917c00SJeff Kirsher out:	return skb;
1708f7917c00SJeff Kirsher }
1709f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1710f7917c00SJeff Kirsher 
1711f7917c00SJeff Kirsher /**
1712f7917c00SJeff Kirsher  *	t4_pktgl_free - free a packet gather list
1713f7917c00SJeff Kirsher  *	@gl: the gather list
1714f7917c00SJeff Kirsher  *
1715f7917c00SJeff Kirsher  *	Releases the pages of a packet gather list.  We do not own the last
1716f7917c00SJeff Kirsher  *	page on the list and do not free it.
1717f7917c00SJeff Kirsher  */
1718f7917c00SJeff Kirsher static void t4_pktgl_free(const struct pkt_gl *gl)
1719f7917c00SJeff Kirsher {
1720f7917c00SJeff Kirsher 	int n;
1721e91b0f24SIan Campbell 	const struct page_frag *p;
1722f7917c00SJeff Kirsher 
1723f7917c00SJeff Kirsher 	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1724f7917c00SJeff Kirsher 		put_page(p->page);
1725f7917c00SJeff Kirsher }
1726f7917c00SJeff Kirsher 
1727f7917c00SJeff Kirsher /*
1728f7917c00SJeff Kirsher  * Process an MPS trace packet.  Give it an unused protocol number so it won't
1729f7917c00SJeff Kirsher  * be delivered to anyone and send it to the stack for capture.
1730f7917c00SJeff Kirsher  */
1731f7917c00SJeff Kirsher static noinline int handle_trace_pkt(struct adapter *adap,
1732f7917c00SJeff Kirsher 				     const struct pkt_gl *gl)
1733f7917c00SJeff Kirsher {
1734f7917c00SJeff Kirsher 	struct sk_buff *skb;
1735f7917c00SJeff Kirsher 
1736f7917c00SJeff Kirsher 	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1737f7917c00SJeff Kirsher 	if (unlikely(!skb)) {
1738f7917c00SJeff Kirsher 		t4_pktgl_free(gl);
1739f7917c00SJeff Kirsher 		return 0;
1740f7917c00SJeff Kirsher 	}
1741f7917c00SJeff Kirsher 
1742d14807ddSHariprasad Shenai 	if (is_t4(adap->params.chip))
17430a57a536SSantosh Rastapur 		__skb_pull(skb, sizeof(struct cpl_trace_pkt));
17440a57a536SSantosh Rastapur 	else
17450a57a536SSantosh Rastapur 		__skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
17460a57a536SSantosh Rastapur 
1747f7917c00SJeff Kirsher 	skb_reset_mac_header(skb);
1748f7917c00SJeff Kirsher 	skb->protocol = htons(0xffff);
1749f7917c00SJeff Kirsher 	skb->dev = adap->port[0];
1750f7917c00SJeff Kirsher 	netif_receive_skb(skb);
1751f7917c00SJeff Kirsher 	return 0;
1752f7917c00SJeff Kirsher }
1753f7917c00SJeff Kirsher 
1754f7917c00SJeff Kirsher static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1755f7917c00SJeff Kirsher 		   const struct cpl_rx_pkt *pkt)
1756f7917c00SJeff Kirsher {
175752367a76SVipul Pandya 	struct adapter *adapter = rxq->rspq.adap;
175852367a76SVipul Pandya 	struct sge *s = &adapter->sge;
1759f7917c00SJeff Kirsher 	int ret;
1760f7917c00SJeff Kirsher 	struct sk_buff *skb;
1761f7917c00SJeff Kirsher 
1762f7917c00SJeff Kirsher 	skb = napi_get_frags(&rxq->rspq.napi);
1763f7917c00SJeff Kirsher 	if (unlikely(!skb)) {
1764f7917c00SJeff Kirsher 		t4_pktgl_free(gl);
1765f7917c00SJeff Kirsher 		rxq->stats.rx_drops++;
1766f7917c00SJeff Kirsher 		return;
1767f7917c00SJeff Kirsher 	}
1768f7917c00SJeff Kirsher 
176952367a76SVipul Pandya 	copy_frags(skb, gl, s->pktshift);
177052367a76SVipul Pandya 	skb->len = gl->tot_len - s->pktshift;
1771f7917c00SJeff Kirsher 	skb->data_len = skb->len;
1772f7917c00SJeff Kirsher 	skb->truesize += skb->data_len;
1773f7917c00SJeff Kirsher 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1774f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, rxq->rspq.idx);
17753a336cb1SHariprasad Shenai 	skb_mark_napi_id(skb, &rxq->rspq.napi);
1776f7917c00SJeff Kirsher 	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
17778264989cSTom Herbert 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
17788264989cSTom Herbert 			     PKT_HASH_TYPE_L3);
1779f7917c00SJeff Kirsher 
1780f7917c00SJeff Kirsher 	if (unlikely(pkt->vlan_ex)) {
178186a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1782f7917c00SJeff Kirsher 		rxq->stats.vlan_ex++;
1783f7917c00SJeff Kirsher 	}
1784f7917c00SJeff Kirsher 	ret = napi_gro_frags(&rxq->rspq.napi);
1785f7917c00SJeff Kirsher 	if (ret == GRO_HELD)
1786f7917c00SJeff Kirsher 		rxq->stats.lro_pkts++;
1787f7917c00SJeff Kirsher 	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1788f7917c00SJeff Kirsher 		rxq->stats.lro_merged++;
1789f7917c00SJeff Kirsher 	rxq->stats.pkts++;
1790f7917c00SJeff Kirsher 	rxq->stats.rx_cso++;
1791f7917c00SJeff Kirsher }
1792f7917c00SJeff Kirsher 
1793f7917c00SJeff Kirsher /**
1794f7917c00SJeff Kirsher  *	t4_ethrx_handler - process an ingress ethernet packet
1795f7917c00SJeff Kirsher  *	@q: the response queue that received the packet
1796f7917c00SJeff Kirsher  *	@rsp: the response queue descriptor holding the RX_PKT message
1797f7917c00SJeff Kirsher  *	@si: the gather list of packet fragments
1798f7917c00SJeff Kirsher  *
1799f7917c00SJeff Kirsher  *	Process an ingress ethernet packet and deliver it to the stack.
1800f7917c00SJeff Kirsher  */
1801f7917c00SJeff Kirsher int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1802f7917c00SJeff Kirsher 		     const struct pkt_gl *si)
1803f7917c00SJeff Kirsher {
1804f7917c00SJeff Kirsher 	bool csum_ok;
1805f7917c00SJeff Kirsher 	struct sk_buff *skb;
1806f7917c00SJeff Kirsher 	const struct cpl_rx_pkt *pkt;
1807f7917c00SJeff Kirsher 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
180852367a76SVipul Pandya 	struct sge *s = &q->adap->sge;
1809d14807ddSHariprasad Shenai 	int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
18100a57a536SSantosh Rastapur 			    CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
181184a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE
181284a200b3SVarun Prakash 	struct port_info *pi;
181384a200b3SVarun Prakash #endif
1814f7917c00SJeff Kirsher 
18150a57a536SSantosh Rastapur 	if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
1816f7917c00SJeff Kirsher 		return handle_trace_pkt(q->adap, si);
1817f7917c00SJeff Kirsher 
1818f7917c00SJeff Kirsher 	pkt = (const struct cpl_rx_pkt *)rsp;
1819cca2822dSHariprasad Shenai 	csum_ok = pkt->csum_calc && !pkt->err_vec &&
1820cca2822dSHariprasad Shenai 		  (q->netdev->features & NETIF_F_RXCSUM);
1821bdc590b9SHariprasad Shenai 	if ((pkt->l2info & htonl(RXF_TCP_F)) &&
18223a336cb1SHariprasad Shenai 	    !(cxgb_poll_busy_polling(q)) &&
1823f7917c00SJeff Kirsher 	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1824f7917c00SJeff Kirsher 		do_gro(rxq, si, pkt);
1825f7917c00SJeff Kirsher 		return 0;
1826f7917c00SJeff Kirsher 	}
1827f7917c00SJeff Kirsher 
1828f7917c00SJeff Kirsher 	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
1829f7917c00SJeff Kirsher 	if (unlikely(!skb)) {
1830f7917c00SJeff Kirsher 		t4_pktgl_free(si);
1831f7917c00SJeff Kirsher 		rxq->stats.rx_drops++;
1832f7917c00SJeff Kirsher 		return 0;
1833f7917c00SJeff Kirsher 	}
1834f7917c00SJeff Kirsher 
183552367a76SVipul Pandya 	__skb_pull(skb, s->pktshift);      /* remove ethernet header padding */
1836f7917c00SJeff Kirsher 	skb->protocol = eth_type_trans(skb, q->netdev);
1837f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, q->idx);
1838f7917c00SJeff Kirsher 	if (skb->dev->features & NETIF_F_RXHASH)
18398264989cSTom Herbert 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
18408264989cSTom Herbert 			     PKT_HASH_TYPE_L3);
1841f7917c00SJeff Kirsher 
1842f7917c00SJeff Kirsher 	rxq->stats.pkts++;
1843f7917c00SJeff Kirsher 
1844bdc590b9SHariprasad Shenai 	if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
1845f7917c00SJeff Kirsher 		if (!pkt->ip_frag) {
1846f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1847f7917c00SJeff Kirsher 			rxq->stats.rx_cso++;
1848bdc590b9SHariprasad Shenai 		} else if (pkt->l2info & htonl(RXF_IP_F)) {
1849f7917c00SJeff Kirsher 			__sum16 c = (__force __sum16)pkt->csum;
1850f7917c00SJeff Kirsher 			skb->csum = csum_unfold(c);
1851f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_COMPLETE;
1852f7917c00SJeff Kirsher 			rxq->stats.rx_cso++;
1853f7917c00SJeff Kirsher 		}
185484a200b3SVarun Prakash 	} else {
1855f7917c00SJeff Kirsher 		skb_checksum_none_assert(skb);
185684a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE
185784a200b3SVarun Prakash #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
185884a200b3SVarun Prakash 			  RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
185984a200b3SVarun Prakash 
186084a200b3SVarun Prakash 		pi = netdev_priv(skb->dev);
186184a200b3SVarun Prakash 		if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
186284a200b3SVarun Prakash 			if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
186384a200b3SVarun Prakash 			    (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
186484a200b3SVarun Prakash 				if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F)))
186584a200b3SVarun Prakash 					skb->ip_summed = CHECKSUM_UNNECESSARY;
186684a200b3SVarun Prakash 			}
186784a200b3SVarun Prakash 		}
186884a200b3SVarun Prakash 
186984a200b3SVarun Prakash #undef CPL_RX_PKT_FLAGS
187084a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */
187184a200b3SVarun Prakash 	}
1872f7917c00SJeff Kirsher 
1873f7917c00SJeff Kirsher 	if (unlikely(pkt->vlan_ex)) {
187486a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1875f7917c00SJeff Kirsher 		rxq->stats.vlan_ex++;
1876f7917c00SJeff Kirsher 	}
18773a336cb1SHariprasad Shenai 	skb_mark_napi_id(skb, &q->napi);
1878f7917c00SJeff Kirsher 	netif_receive_skb(skb);
1879f7917c00SJeff Kirsher 	return 0;
1880f7917c00SJeff Kirsher }
1881f7917c00SJeff Kirsher 
1882f7917c00SJeff Kirsher /**
1883f7917c00SJeff Kirsher  *	restore_rx_bufs - put back a packet's Rx buffers
1884f7917c00SJeff Kirsher  *	@si: the packet gather list
1885f7917c00SJeff Kirsher  *	@q: the SGE free list
1886f7917c00SJeff Kirsher  *	@frags: number of FL buffers to restore
1887f7917c00SJeff Kirsher  *
1888f7917c00SJeff Kirsher  *	Puts back on an FL the Rx buffers associated with @si.  The buffers
1889f7917c00SJeff Kirsher  *	have already been unmapped and are left unmapped, we mark them so to
1890f7917c00SJeff Kirsher  *	prevent further unmapping attempts.
1891f7917c00SJeff Kirsher  *
1892f7917c00SJeff Kirsher  *	This function undoes a series of @unmap_rx_buf calls when we find out
1893f7917c00SJeff Kirsher  *	that the current packet can't be processed right away afterall and we
1894f7917c00SJeff Kirsher  *	need to come back to it later.  This is a very rare event and there's
1895f7917c00SJeff Kirsher  *	no effort to make this particularly efficient.
1896f7917c00SJeff Kirsher  */
1897f7917c00SJeff Kirsher static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1898f7917c00SJeff Kirsher 			    int frags)
1899f7917c00SJeff Kirsher {
1900f7917c00SJeff Kirsher 	struct rx_sw_desc *d;
1901f7917c00SJeff Kirsher 
1902f7917c00SJeff Kirsher 	while (frags--) {
1903f7917c00SJeff Kirsher 		if (q->cidx == 0)
1904f7917c00SJeff Kirsher 			q->cidx = q->size - 1;
1905f7917c00SJeff Kirsher 		else
1906f7917c00SJeff Kirsher 			q->cidx--;
1907f7917c00SJeff Kirsher 		d = &q->sdesc[q->cidx];
1908f7917c00SJeff Kirsher 		d->page = si->frags[frags].page;
1909f7917c00SJeff Kirsher 		d->dma_addr |= RX_UNMAPPED_BUF;
1910f7917c00SJeff Kirsher 		q->avail++;
1911f7917c00SJeff Kirsher 	}
1912f7917c00SJeff Kirsher }
1913f7917c00SJeff Kirsher 
1914f7917c00SJeff Kirsher /**
1915f7917c00SJeff Kirsher  *	is_new_response - check if a response is newly written
1916f7917c00SJeff Kirsher  *	@r: the response descriptor
1917f7917c00SJeff Kirsher  *	@q: the response queue
1918f7917c00SJeff Kirsher  *
1919f7917c00SJeff Kirsher  *	Returns true if a response descriptor contains a yet unprocessed
1920f7917c00SJeff Kirsher  *	response.
1921f7917c00SJeff Kirsher  */
1922f7917c00SJeff Kirsher static inline bool is_new_response(const struct rsp_ctrl *r,
1923f7917c00SJeff Kirsher 				   const struct sge_rspq *q)
1924f7917c00SJeff Kirsher {
1925f7917c00SJeff Kirsher 	return RSPD_GEN(r->type_gen) == q->gen;
1926f7917c00SJeff Kirsher }
1927f7917c00SJeff Kirsher 
1928f7917c00SJeff Kirsher /**
1929f7917c00SJeff Kirsher  *	rspq_next - advance to the next entry in a response queue
1930f7917c00SJeff Kirsher  *	@q: the queue
1931f7917c00SJeff Kirsher  *
1932f7917c00SJeff Kirsher  *	Updates the state of a response queue to advance it to the next entry.
1933f7917c00SJeff Kirsher  */
1934f7917c00SJeff Kirsher static inline void rspq_next(struct sge_rspq *q)
1935f7917c00SJeff Kirsher {
1936f7917c00SJeff Kirsher 	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1937f7917c00SJeff Kirsher 	if (unlikely(++q->cidx == q->size)) {
1938f7917c00SJeff Kirsher 		q->cidx = 0;
1939f7917c00SJeff Kirsher 		q->gen ^= 1;
1940f7917c00SJeff Kirsher 		q->cur_desc = q->desc;
1941f7917c00SJeff Kirsher 	}
1942f7917c00SJeff Kirsher }
1943f7917c00SJeff Kirsher 
1944f7917c00SJeff Kirsher /**
1945f7917c00SJeff Kirsher  *	process_responses - process responses from an SGE response queue
1946f7917c00SJeff Kirsher  *	@q: the ingress queue to process
1947f7917c00SJeff Kirsher  *	@budget: how many responses can be processed in this round
1948f7917c00SJeff Kirsher  *
1949f7917c00SJeff Kirsher  *	Process responses from an SGE response queue up to the supplied budget.
1950f7917c00SJeff Kirsher  *	Responses include received packets as well as control messages from FW
1951f7917c00SJeff Kirsher  *	or HW.
1952f7917c00SJeff Kirsher  *
1953f7917c00SJeff Kirsher  *	Additionally choose the interrupt holdoff time for the next interrupt
1954f7917c00SJeff Kirsher  *	on this queue.  If the system is under memory shortage use a fairly
1955f7917c00SJeff Kirsher  *	long delay to help recovery.
1956f7917c00SJeff Kirsher  */
1957f7917c00SJeff Kirsher static int process_responses(struct sge_rspq *q, int budget)
1958f7917c00SJeff Kirsher {
1959f7917c00SJeff Kirsher 	int ret, rsp_type;
1960f7917c00SJeff Kirsher 	int budget_left = budget;
1961f7917c00SJeff Kirsher 	const struct rsp_ctrl *rc;
1962f7917c00SJeff Kirsher 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
196352367a76SVipul Pandya 	struct adapter *adapter = q->adap;
196452367a76SVipul Pandya 	struct sge *s = &adapter->sge;
1965f7917c00SJeff Kirsher 
1966f7917c00SJeff Kirsher 	while (likely(budget_left)) {
1967f7917c00SJeff Kirsher 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1968f7917c00SJeff Kirsher 		if (!is_new_response(rc, q))
1969f7917c00SJeff Kirsher 			break;
1970f7917c00SJeff Kirsher 
1971019be1cfSAlexander Duyck 		dma_rmb();
1972f7917c00SJeff Kirsher 		rsp_type = RSPD_TYPE(rc->type_gen);
1973f7917c00SJeff Kirsher 		if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1974e91b0f24SIan Campbell 			struct page_frag *fp;
1975f7917c00SJeff Kirsher 			struct pkt_gl si;
1976f7917c00SJeff Kirsher 			const struct rx_sw_desc *rsd;
1977f7917c00SJeff Kirsher 			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
1978f7917c00SJeff Kirsher 
1979f7917c00SJeff Kirsher 			if (len & RSPD_NEWBUF) {
1980f7917c00SJeff Kirsher 				if (likely(q->offset > 0)) {
1981f7917c00SJeff Kirsher 					free_rx_bufs(q->adap, &rxq->fl, 1);
1982f7917c00SJeff Kirsher 					q->offset = 0;
1983f7917c00SJeff Kirsher 				}
1984f7917c00SJeff Kirsher 				len = RSPD_LEN(len);
1985f7917c00SJeff Kirsher 			}
1986f7917c00SJeff Kirsher 			si.tot_len = len;
1987f7917c00SJeff Kirsher 
1988f7917c00SJeff Kirsher 			/* gather packet fragments */
1989f7917c00SJeff Kirsher 			for (frags = 0, fp = si.frags; ; frags++, fp++) {
1990f7917c00SJeff Kirsher 				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
199152367a76SVipul Pandya 				bufsz = get_buf_size(adapter, rsd);
1992f7917c00SJeff Kirsher 				fp->page = rsd->page;
1993e91b0f24SIan Campbell 				fp->offset = q->offset;
1994e91b0f24SIan Campbell 				fp->size = min(bufsz, len);
1995e91b0f24SIan Campbell 				len -= fp->size;
1996f7917c00SJeff Kirsher 				if (!len)
1997f7917c00SJeff Kirsher 					break;
1998f7917c00SJeff Kirsher 				unmap_rx_buf(q->adap, &rxq->fl);
1999f7917c00SJeff Kirsher 			}
2000f7917c00SJeff Kirsher 
2001f7917c00SJeff Kirsher 			/*
2002f7917c00SJeff Kirsher 			 * Last buffer remains mapped so explicitly make it
2003f7917c00SJeff Kirsher 			 * coherent for CPU access.
2004f7917c00SJeff Kirsher 			 */
2005f7917c00SJeff Kirsher 			dma_sync_single_for_cpu(q->adap->pdev_dev,
2006f7917c00SJeff Kirsher 						get_buf_addr(rsd),
2007e91b0f24SIan Campbell 						fp->size, DMA_FROM_DEVICE);
2008f7917c00SJeff Kirsher 
2009f7917c00SJeff Kirsher 			si.va = page_address(si.frags[0].page) +
2010e91b0f24SIan Campbell 				si.frags[0].offset;
2011f7917c00SJeff Kirsher 			prefetch(si.va);
2012f7917c00SJeff Kirsher 
2013f7917c00SJeff Kirsher 			si.nfrags = frags + 1;
2014f7917c00SJeff Kirsher 			ret = q->handler(q, q->cur_desc, &si);
2015f7917c00SJeff Kirsher 			if (likely(ret == 0))
201652367a76SVipul Pandya 				q->offset += ALIGN(fp->size, s->fl_align);
2017f7917c00SJeff Kirsher 			else
2018f7917c00SJeff Kirsher 				restore_rx_bufs(&si, &rxq->fl, frags);
2019f7917c00SJeff Kirsher 		} else if (likely(rsp_type == RSP_TYPE_CPL)) {
2020f7917c00SJeff Kirsher 			ret = q->handler(q, q->cur_desc, NULL);
2021f7917c00SJeff Kirsher 		} else {
2022f7917c00SJeff Kirsher 			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
2023f7917c00SJeff Kirsher 		}
2024f7917c00SJeff Kirsher 
2025f7917c00SJeff Kirsher 		if (unlikely(ret)) {
2026f7917c00SJeff Kirsher 			/* couldn't process descriptor, back off for recovery */
2027f7917c00SJeff Kirsher 			q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
2028f7917c00SJeff Kirsher 			break;
2029f7917c00SJeff Kirsher 		}
2030f7917c00SJeff Kirsher 
2031f7917c00SJeff Kirsher 		rspq_next(q);
2032f7917c00SJeff Kirsher 		budget_left--;
2033f7917c00SJeff Kirsher 	}
2034f7917c00SJeff Kirsher 
2035f7917c00SJeff Kirsher 	if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
2036f7917c00SJeff Kirsher 		__refill_fl(q->adap, &rxq->fl);
2037f7917c00SJeff Kirsher 	return budget - budget_left;
2038f7917c00SJeff Kirsher }
2039f7917c00SJeff Kirsher 
20403a336cb1SHariprasad Shenai #ifdef CONFIG_NET_RX_BUSY_POLL
20413a336cb1SHariprasad Shenai int cxgb_busy_poll(struct napi_struct *napi)
20423a336cb1SHariprasad Shenai {
20433a336cb1SHariprasad Shenai 	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
20443a336cb1SHariprasad Shenai 	unsigned int params, work_done;
20453a336cb1SHariprasad Shenai 	u32 val;
20463a336cb1SHariprasad Shenai 
20473a336cb1SHariprasad Shenai 	if (!cxgb_poll_lock_poll(q))
20483a336cb1SHariprasad Shenai 		return LL_FLUSH_BUSY;
20493a336cb1SHariprasad Shenai 
20503a336cb1SHariprasad Shenai 	work_done = process_responses(q, 4);
20513a336cb1SHariprasad Shenai 	params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
20523a336cb1SHariprasad Shenai 	q->next_intr_params = params;
20533a336cb1SHariprasad Shenai 	val = CIDXINC_V(work_done) | SEINTARM_V(params);
20543a336cb1SHariprasad Shenai 
20553a336cb1SHariprasad Shenai 	/* If we don't have access to the new User GTS (T5+), use the old
20563a336cb1SHariprasad Shenai 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
20573a336cb1SHariprasad Shenai 	 */
20583a336cb1SHariprasad Shenai 	if (unlikely(!q->bar2_addr))
20593a336cb1SHariprasad Shenai 		t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
20603a336cb1SHariprasad Shenai 			     val | INGRESSQID_V((u32)q->cntxt_id));
20613a336cb1SHariprasad Shenai 	else {
20623a336cb1SHariprasad Shenai 		writel(val | INGRESSQID_V(q->bar2_qid),
20633a336cb1SHariprasad Shenai 		       q->bar2_addr + SGE_UDB_GTS);
20643a336cb1SHariprasad Shenai 		wmb();
20653a336cb1SHariprasad Shenai 	}
20663a336cb1SHariprasad Shenai 
20673a336cb1SHariprasad Shenai 	cxgb_poll_unlock_poll(q);
20683a336cb1SHariprasad Shenai 	return work_done;
20693a336cb1SHariprasad Shenai }
20703a336cb1SHariprasad Shenai #endif /* CONFIG_NET_RX_BUSY_POLL */
20713a336cb1SHariprasad Shenai 
2072f7917c00SJeff Kirsher /**
2073f7917c00SJeff Kirsher  *	napi_rx_handler - the NAPI handler for Rx processing
2074f7917c00SJeff Kirsher  *	@napi: the napi instance
2075f7917c00SJeff Kirsher  *	@budget: how many packets we can process in this round
2076f7917c00SJeff Kirsher  *
2077f7917c00SJeff Kirsher  *	Handler for new data events when using NAPI.  This does not need any
2078f7917c00SJeff Kirsher  *	locking or protection from interrupts as data interrupts are off at
2079f7917c00SJeff Kirsher  *	this point and other adapter interrupts do not interfere (the latter
2080f7917c00SJeff Kirsher  *	in not a concern at all with MSI-X as non-data interrupts then have
2081f7917c00SJeff Kirsher  *	a separate handler).
2082f7917c00SJeff Kirsher  */
2083f7917c00SJeff Kirsher static int napi_rx_handler(struct napi_struct *napi, int budget)
2084f7917c00SJeff Kirsher {
2085f7917c00SJeff Kirsher 	unsigned int params;
2086f7917c00SJeff Kirsher 	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
20873a336cb1SHariprasad Shenai 	int work_done;
2088d63a6dcfSHariprasad Shenai 	u32 val;
2089f7917c00SJeff Kirsher 
20903a336cb1SHariprasad Shenai 	if (!cxgb_poll_lock_napi(q))
20913a336cb1SHariprasad Shenai 		return budget;
20923a336cb1SHariprasad Shenai 
20933a336cb1SHariprasad Shenai 	work_done = process_responses(q, budget);
2094f7917c00SJeff Kirsher 	if (likely(work_done < budget)) {
2095e553ec3fSHariprasad Shenai 		int timer_index;
2096e553ec3fSHariprasad Shenai 
2097f7917c00SJeff Kirsher 		napi_complete(napi);
2098e553ec3fSHariprasad Shenai 		timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params);
2099e553ec3fSHariprasad Shenai 
2100e553ec3fSHariprasad Shenai 		if (q->adaptive_rx) {
2101e553ec3fSHariprasad Shenai 			if (work_done > max(timer_pkt_quota[timer_index],
2102e553ec3fSHariprasad Shenai 					    MIN_NAPI_WORK))
2103e553ec3fSHariprasad Shenai 				timer_index = (timer_index + 1);
2104e553ec3fSHariprasad Shenai 			else
2105e553ec3fSHariprasad Shenai 				timer_index = timer_index - 1;
2106e553ec3fSHariprasad Shenai 
2107e553ec3fSHariprasad Shenai 			timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
2108e553ec3fSHariprasad Shenai 			q->next_intr_params = QINTR_TIMER_IDX(timer_index) |
2109e553ec3fSHariprasad Shenai 							      V_QINTR_CNT_EN;
2110e553ec3fSHariprasad Shenai 			params = q->next_intr_params;
2111e553ec3fSHariprasad Shenai 		} else {
2112f7917c00SJeff Kirsher 			params = q->next_intr_params;
2113f7917c00SJeff Kirsher 			q->next_intr_params = q->intr_params;
2114e553ec3fSHariprasad Shenai 		}
2115f7917c00SJeff Kirsher 	} else
2116f7917c00SJeff Kirsher 		params = QINTR_TIMER_IDX(7);
2117f7917c00SJeff Kirsher 
2118f612b815SHariprasad Shenai 	val = CIDXINC_V(work_done) | SEINTARM_V(params);
2119df64e4d3SHariprasad Shenai 
2120df64e4d3SHariprasad Shenai 	/* If we don't have access to the new User GTS (T5+), use the old
2121df64e4d3SHariprasad Shenai 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2122df64e4d3SHariprasad Shenai 	 */
2123df64e4d3SHariprasad Shenai 	if (unlikely(q->bar2_addr == NULL)) {
2124f612b815SHariprasad Shenai 		t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
2125f612b815SHariprasad Shenai 			     val | INGRESSQID_V((u32)q->cntxt_id));
2126d63a6dcfSHariprasad Shenai 	} else {
2127f612b815SHariprasad Shenai 		writel(val | INGRESSQID_V(q->bar2_qid),
2128df64e4d3SHariprasad Shenai 		       q->bar2_addr + SGE_UDB_GTS);
2129d63a6dcfSHariprasad Shenai 		wmb();
2130d63a6dcfSHariprasad Shenai 	}
21313a336cb1SHariprasad Shenai 	cxgb_poll_unlock_napi(q);
2132f7917c00SJeff Kirsher 	return work_done;
2133f7917c00SJeff Kirsher }
2134f7917c00SJeff Kirsher 
2135f7917c00SJeff Kirsher /*
2136f7917c00SJeff Kirsher  * The MSI-X interrupt handler for an SGE response queue.
2137f7917c00SJeff Kirsher  */
2138f7917c00SJeff Kirsher irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
2139f7917c00SJeff Kirsher {
2140f7917c00SJeff Kirsher 	struct sge_rspq *q = cookie;
2141f7917c00SJeff Kirsher 
2142f7917c00SJeff Kirsher 	napi_schedule(&q->napi);
2143f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2144f7917c00SJeff Kirsher }
2145f7917c00SJeff Kirsher 
2146f7917c00SJeff Kirsher /*
2147f7917c00SJeff Kirsher  * Process the indirect interrupt entries in the interrupt queue and kick off
2148f7917c00SJeff Kirsher  * NAPI for each queue that has generated an entry.
2149f7917c00SJeff Kirsher  */
2150f7917c00SJeff Kirsher static unsigned int process_intrq(struct adapter *adap)
2151f7917c00SJeff Kirsher {
2152f7917c00SJeff Kirsher 	unsigned int credits;
2153f7917c00SJeff Kirsher 	const struct rsp_ctrl *rc;
2154f7917c00SJeff Kirsher 	struct sge_rspq *q = &adap->sge.intrq;
2155d63a6dcfSHariprasad Shenai 	u32 val;
2156f7917c00SJeff Kirsher 
2157f7917c00SJeff Kirsher 	spin_lock(&adap->sge.intrq_lock);
2158f7917c00SJeff Kirsher 	for (credits = 0; ; credits++) {
2159f7917c00SJeff Kirsher 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2160f7917c00SJeff Kirsher 		if (!is_new_response(rc, q))
2161f7917c00SJeff Kirsher 			break;
2162f7917c00SJeff Kirsher 
2163019be1cfSAlexander Duyck 		dma_rmb();
2164f7917c00SJeff Kirsher 		if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
2165f7917c00SJeff Kirsher 			unsigned int qid = ntohl(rc->pldbuflen_qid);
2166f7917c00SJeff Kirsher 
2167f7917c00SJeff Kirsher 			qid -= adap->sge.ingr_start;
2168f7917c00SJeff Kirsher 			napi_schedule(&adap->sge.ingr_map[qid]->napi);
2169f7917c00SJeff Kirsher 		}
2170f7917c00SJeff Kirsher 
2171f7917c00SJeff Kirsher 		rspq_next(q);
2172f7917c00SJeff Kirsher 	}
2173f7917c00SJeff Kirsher 
2174f612b815SHariprasad Shenai 	val =  CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
2175df64e4d3SHariprasad Shenai 
2176df64e4d3SHariprasad Shenai 	/* If we don't have access to the new User GTS (T5+), use the old
2177df64e4d3SHariprasad Shenai 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2178df64e4d3SHariprasad Shenai 	 */
2179df64e4d3SHariprasad Shenai 	if (unlikely(q->bar2_addr == NULL)) {
2180f612b815SHariprasad Shenai 		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
2181f612b815SHariprasad Shenai 			     val | INGRESSQID_V(q->cntxt_id));
2182d63a6dcfSHariprasad Shenai 	} else {
2183f612b815SHariprasad Shenai 		writel(val | INGRESSQID_V(q->bar2_qid),
2184df64e4d3SHariprasad Shenai 		       q->bar2_addr + SGE_UDB_GTS);
2185d63a6dcfSHariprasad Shenai 		wmb();
2186d63a6dcfSHariprasad Shenai 	}
2187f7917c00SJeff Kirsher 	spin_unlock(&adap->sge.intrq_lock);
2188f7917c00SJeff Kirsher 	return credits;
2189f7917c00SJeff Kirsher }
2190f7917c00SJeff Kirsher 
2191f7917c00SJeff Kirsher /*
2192f7917c00SJeff Kirsher  * The MSI interrupt handler, which handles data events from SGE response queues
2193f7917c00SJeff Kirsher  * as well as error and other async events as they all use the same MSI vector.
2194f7917c00SJeff Kirsher  */
2195f7917c00SJeff Kirsher static irqreturn_t t4_intr_msi(int irq, void *cookie)
2196f7917c00SJeff Kirsher {
2197f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2198f7917c00SJeff Kirsher 
2199f7917c00SJeff Kirsher 	t4_slow_intr_handler(adap);
2200f7917c00SJeff Kirsher 	process_intrq(adap);
2201f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2202f7917c00SJeff Kirsher }
2203f7917c00SJeff Kirsher 
2204f7917c00SJeff Kirsher /*
2205f7917c00SJeff Kirsher  * Interrupt handler for legacy INTx interrupts.
2206f7917c00SJeff Kirsher  * Handles data events from SGE response queues as well as error and other
2207f7917c00SJeff Kirsher  * async events as they all use the same interrupt line.
2208f7917c00SJeff Kirsher  */
2209f7917c00SJeff Kirsher static irqreturn_t t4_intr_intx(int irq, void *cookie)
2210f7917c00SJeff Kirsher {
2211f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
2212f7917c00SJeff Kirsher 
2213f061de42SHariprasad Shenai 	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
2214f7917c00SJeff Kirsher 	if (t4_slow_intr_handler(adap) | process_intrq(adap))
2215f7917c00SJeff Kirsher 		return IRQ_HANDLED;
2216f7917c00SJeff Kirsher 	return IRQ_NONE;             /* probably shared interrupt */
2217f7917c00SJeff Kirsher }
2218f7917c00SJeff Kirsher 
2219f7917c00SJeff Kirsher /**
2220f7917c00SJeff Kirsher  *	t4_intr_handler - select the top-level interrupt handler
2221f7917c00SJeff Kirsher  *	@adap: the adapter
2222f7917c00SJeff Kirsher  *
2223f7917c00SJeff Kirsher  *	Selects the top-level interrupt handler based on the type of interrupts
2224f7917c00SJeff Kirsher  *	(MSI-X, MSI, or INTx).
2225f7917c00SJeff Kirsher  */
2226f7917c00SJeff Kirsher irq_handler_t t4_intr_handler(struct adapter *adap)
2227f7917c00SJeff Kirsher {
2228f7917c00SJeff Kirsher 	if (adap->flags & USING_MSIX)
2229f7917c00SJeff Kirsher 		return t4_sge_intr_msix;
2230f7917c00SJeff Kirsher 	if (adap->flags & USING_MSI)
2231f7917c00SJeff Kirsher 		return t4_intr_msi;
2232f7917c00SJeff Kirsher 	return t4_intr_intx;
2233f7917c00SJeff Kirsher }
2234f7917c00SJeff Kirsher 
2235f7917c00SJeff Kirsher static void sge_rx_timer_cb(unsigned long data)
2236f7917c00SJeff Kirsher {
2237f7917c00SJeff Kirsher 	unsigned long m;
22380f4d201fSKumar Sanghvi 	unsigned int i, idma_same_state_cnt[2];
2239f7917c00SJeff Kirsher 	struct adapter *adap = (struct adapter *)data;
2240f7917c00SJeff Kirsher 	struct sge *s = &adap->sge;
2241f7917c00SJeff Kirsher 
22424b8e27a8SHariprasad Shenai 	for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2243f7917c00SJeff Kirsher 		for (m = s->starving_fl[i]; m; m &= m - 1) {
2244f7917c00SJeff Kirsher 			struct sge_eth_rxq *rxq;
2245f7917c00SJeff Kirsher 			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2246f7917c00SJeff Kirsher 			struct sge_fl *fl = s->egr_map[id];
2247f7917c00SJeff Kirsher 
2248f7917c00SJeff Kirsher 			clear_bit(id, s->starving_fl);
22494e857c58SPeter Zijlstra 			smp_mb__after_atomic();
2250f7917c00SJeff Kirsher 
2251f7917c00SJeff Kirsher 			if (fl_starving(fl)) {
2252f7917c00SJeff Kirsher 				rxq = container_of(fl, struct sge_eth_rxq, fl);
2253f7917c00SJeff Kirsher 				if (napi_reschedule(&rxq->rspq.napi))
2254f7917c00SJeff Kirsher 					fl->starving++;
2255f7917c00SJeff Kirsher 				else
2256f7917c00SJeff Kirsher 					set_bit(id, s->starving_fl);
2257f7917c00SJeff Kirsher 			}
2258f7917c00SJeff Kirsher 		}
2259f7917c00SJeff Kirsher 
2260f061de42SHariprasad Shenai 	t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
2261f061de42SHariprasad Shenai 	idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
2262f061de42SHariprasad Shenai 	idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2263f7917c00SJeff Kirsher 
22640f4d201fSKumar Sanghvi 	for (i = 0; i < 2; i++) {
22650f4d201fSKumar Sanghvi 		u32 debug0, debug11;
22660f4d201fSKumar Sanghvi 
22670f4d201fSKumar Sanghvi 		/* If the Ingress DMA Same State Counter ("timer") is less
22680f4d201fSKumar Sanghvi 		 * than 1s, then we can reset our synthesized Stall Timer and
22690f4d201fSKumar Sanghvi 		 * continue.  If we have previously emitted warnings about a
22700f4d201fSKumar Sanghvi 		 * potential stalled Ingress Queue, issue a note indicating
22710f4d201fSKumar Sanghvi 		 * that the Ingress Queue has resumed forward progress.
22720f4d201fSKumar Sanghvi 		 */
22730f4d201fSKumar Sanghvi 		if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
22740f4d201fSKumar Sanghvi 			if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
22750f4d201fSKumar Sanghvi 				CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
22760f4d201fSKumar Sanghvi 					i, s->idma_qid[i],
22770f4d201fSKumar Sanghvi 					s->idma_stalled[i]/HZ);
22780f4d201fSKumar Sanghvi 			s->idma_stalled[i] = 0;
2279f7917c00SJeff Kirsher 			continue;
22800f4d201fSKumar Sanghvi 		}
22810f4d201fSKumar Sanghvi 
22820f4d201fSKumar Sanghvi 		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
22830f4d201fSKumar Sanghvi 		 * domain.  The first time we get here it'll be because we
22840f4d201fSKumar Sanghvi 		 * passed the 1s Threshold; each additional time it'll be
22850f4d201fSKumar Sanghvi 		 * because the RX Timer Callback is being fired on its regular
22860f4d201fSKumar Sanghvi 		 * schedule.
22870f4d201fSKumar Sanghvi 		 *
22880f4d201fSKumar Sanghvi 		 * If the stall is below our Potential Hung Ingress Queue
22890f4d201fSKumar Sanghvi 		 * Warning Threshold, continue.
22900f4d201fSKumar Sanghvi 		 */
22910f4d201fSKumar Sanghvi 		if (s->idma_stalled[i] == 0)
22920f4d201fSKumar Sanghvi 			s->idma_stalled[i] = HZ;
22930f4d201fSKumar Sanghvi 		else
22940f4d201fSKumar Sanghvi 			s->idma_stalled[i] += RX_QCHECK_PERIOD;
22950f4d201fSKumar Sanghvi 
22960f4d201fSKumar Sanghvi 		if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
22970f4d201fSKumar Sanghvi 			continue;
22980f4d201fSKumar Sanghvi 
22990f4d201fSKumar Sanghvi 		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
23000f4d201fSKumar Sanghvi 		if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
23010f4d201fSKumar Sanghvi 			continue;
23020f4d201fSKumar Sanghvi 
23030f4d201fSKumar Sanghvi 		/* Read and save the SGE IDMA State and Queue ID information.
23040f4d201fSKumar Sanghvi 		 * We do this every time in case it changes across time ...
23050f4d201fSKumar Sanghvi 		 */
2306f061de42SHariprasad Shenai 		t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
2307f061de42SHariprasad Shenai 		debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
23080f4d201fSKumar Sanghvi 		s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
23090f4d201fSKumar Sanghvi 
2310f061de42SHariprasad Shenai 		t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
2311f061de42SHariprasad Shenai 		debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
23120f4d201fSKumar Sanghvi 		s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
23130f4d201fSKumar Sanghvi 
23140f4d201fSKumar Sanghvi 		CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
23150f4d201fSKumar Sanghvi 			i, s->idma_qid[i], s->idma_state[i],
23160f4d201fSKumar Sanghvi 			s->idma_stalled[i]/HZ, debug0, debug11);
23170f4d201fSKumar Sanghvi 		t4_sge_decode_idma_state(adap, s->idma_state[i]);
23180f4d201fSKumar Sanghvi 	}
2319f7917c00SJeff Kirsher 
2320f7917c00SJeff Kirsher 	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2321f7917c00SJeff Kirsher }
2322f7917c00SJeff Kirsher 
2323f7917c00SJeff Kirsher static void sge_tx_timer_cb(unsigned long data)
2324f7917c00SJeff Kirsher {
2325f7917c00SJeff Kirsher 	unsigned long m;
2326f7917c00SJeff Kirsher 	unsigned int i, budget;
2327f7917c00SJeff Kirsher 	struct adapter *adap = (struct adapter *)data;
2328f7917c00SJeff Kirsher 	struct sge *s = &adap->sge;
2329f7917c00SJeff Kirsher 
23304b8e27a8SHariprasad Shenai 	for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2331f7917c00SJeff Kirsher 		for (m = s->txq_maperr[i]; m; m &= m - 1) {
2332f7917c00SJeff Kirsher 			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2333f7917c00SJeff Kirsher 			struct sge_ofld_txq *txq = s->egr_map[id];
2334f7917c00SJeff Kirsher 
2335f7917c00SJeff Kirsher 			clear_bit(id, s->txq_maperr);
2336f7917c00SJeff Kirsher 			tasklet_schedule(&txq->qresume_tsk);
2337f7917c00SJeff Kirsher 		}
2338f7917c00SJeff Kirsher 
2339f7917c00SJeff Kirsher 	budget = MAX_TIMER_TX_RECLAIM;
2340f7917c00SJeff Kirsher 	i = s->ethtxq_rover;
2341f7917c00SJeff Kirsher 	do {
2342f7917c00SJeff Kirsher 		struct sge_eth_txq *q = &s->ethtxq[i];
2343f7917c00SJeff Kirsher 
2344f7917c00SJeff Kirsher 		if (q->q.in_use &&
2345f7917c00SJeff Kirsher 		    time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
2346f7917c00SJeff Kirsher 		    __netif_tx_trylock(q->txq)) {
2347f7917c00SJeff Kirsher 			int avail = reclaimable(&q->q);
2348f7917c00SJeff Kirsher 
2349f7917c00SJeff Kirsher 			if (avail) {
2350f7917c00SJeff Kirsher 				if (avail > budget)
2351f7917c00SJeff Kirsher 					avail = budget;
2352f7917c00SJeff Kirsher 
2353f7917c00SJeff Kirsher 				free_tx_desc(adap, &q->q, avail, true);
2354f7917c00SJeff Kirsher 				q->q.in_use -= avail;
2355f7917c00SJeff Kirsher 				budget -= avail;
2356f7917c00SJeff Kirsher 			}
2357f7917c00SJeff Kirsher 			__netif_tx_unlock(q->txq);
2358f7917c00SJeff Kirsher 		}
2359f7917c00SJeff Kirsher 
2360f7917c00SJeff Kirsher 		if (++i >= s->ethqsets)
2361f7917c00SJeff Kirsher 			i = 0;
2362f7917c00SJeff Kirsher 	} while (budget && i != s->ethtxq_rover);
2363f7917c00SJeff Kirsher 	s->ethtxq_rover = i;
2364f7917c00SJeff Kirsher 	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2365f7917c00SJeff Kirsher }
2366f7917c00SJeff Kirsher 
2367d63a6dcfSHariprasad Shenai /**
2368df64e4d3SHariprasad Shenai  *	bar2_address - return the BAR2 address for an SGE Queue's Registers
2369df64e4d3SHariprasad Shenai  *	@adapter: the adapter
2370df64e4d3SHariprasad Shenai  *	@qid: the SGE Queue ID
2371df64e4d3SHariprasad Shenai  *	@qtype: the SGE Queue Type (Egress or Ingress)
2372df64e4d3SHariprasad Shenai  *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2373d63a6dcfSHariprasad Shenai  *
2374df64e4d3SHariprasad Shenai  *	Returns the BAR2 address for the SGE Queue Registers associated with
2375df64e4d3SHariprasad Shenai  *	@qid.  If BAR2 SGE Registers aren't available, returns NULL.  Also
2376df64e4d3SHariprasad Shenai  *	returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2377df64e4d3SHariprasad Shenai  *	Queue Registers.  If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2378df64e4d3SHariprasad Shenai  *	Registers are supported (e.g. the Write Combining Doorbell Buffer).
2379d63a6dcfSHariprasad Shenai  */
2380df64e4d3SHariprasad Shenai static void __iomem *bar2_address(struct adapter *adapter,
2381df64e4d3SHariprasad Shenai 				  unsigned int qid,
2382df64e4d3SHariprasad Shenai 				  enum t4_bar2_qtype qtype,
2383df64e4d3SHariprasad Shenai 				  unsigned int *pbar2_qid)
2384d63a6dcfSHariprasad Shenai {
2385df64e4d3SHariprasad Shenai 	u64 bar2_qoffset;
2386df64e4d3SHariprasad Shenai 	int ret;
2387d63a6dcfSHariprasad Shenai 
2388dd0bcc0bSStephen Rothwell 	ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype,
2389df64e4d3SHariprasad Shenai 				&bar2_qoffset, pbar2_qid);
2390df64e4d3SHariprasad Shenai 	if (ret)
2391df64e4d3SHariprasad Shenai 		return NULL;
2392d63a6dcfSHariprasad Shenai 
2393df64e4d3SHariprasad Shenai 	return adapter->bar2 + bar2_qoffset;
2394d63a6dcfSHariprasad Shenai }
2395d63a6dcfSHariprasad Shenai 
2396f7917c00SJeff Kirsher int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2397f7917c00SJeff Kirsher 		     struct net_device *dev, int intr_idx,
2398f7917c00SJeff Kirsher 		     struct sge_fl *fl, rspq_handler_t hnd)
2399f7917c00SJeff Kirsher {
2400f7917c00SJeff Kirsher 	int ret, flsz = 0;
2401f7917c00SJeff Kirsher 	struct fw_iq_cmd c;
240252367a76SVipul Pandya 	struct sge *s = &adap->sge;
2403f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2404f7917c00SJeff Kirsher 
2405f7917c00SJeff Kirsher 	/* Size needs to be multiple of 16, including status entry. */
2406f7917c00SJeff Kirsher 	iq->size = roundup(iq->size, 16);
2407f7917c00SJeff Kirsher 
2408f7917c00SJeff Kirsher 	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
2409f7917c00SJeff Kirsher 			      &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
2410f7917c00SJeff Kirsher 	if (!iq->desc)
2411f7917c00SJeff Kirsher 		return -ENOMEM;
2412f7917c00SJeff Kirsher 
2413f7917c00SJeff Kirsher 	memset(&c, 0, sizeof(c));
2414e2ac9628SHariprasad Shenai 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
2415e2ac9628SHariprasad Shenai 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
24166e4b51a6SHariprasad Shenai 			    FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0));
24176e4b51a6SHariprasad Shenai 	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
2418f7917c00SJeff Kirsher 				 FW_LEN16(c));
24196e4b51a6SHariprasad Shenai 	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
24206e4b51a6SHariprasad Shenai 		FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
24216e4b51a6SHariprasad Shenai 		FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) |
24226e4b51a6SHariprasad Shenai 		FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
2423f7917c00SJeff Kirsher 							-intr_idx - 1));
24246e4b51a6SHariprasad Shenai 	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
24256e4b51a6SHariprasad Shenai 		FW_IQ_CMD_IQGTSMODE_F |
24266e4b51a6SHariprasad Shenai 		FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
24276e4b51a6SHariprasad Shenai 		FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
2428f7917c00SJeff Kirsher 	c.iqsize = htons(iq->size);
2429f7917c00SJeff Kirsher 	c.iqaddr = cpu_to_be64(iq->phys_addr);
2430f7917c00SJeff Kirsher 
2431f7917c00SJeff Kirsher 	if (fl) {
2432f7917c00SJeff Kirsher 		fl->size = roundup(fl->size, 8);
2433f7917c00SJeff Kirsher 		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2434f7917c00SJeff Kirsher 				      sizeof(struct rx_sw_desc), &fl->addr,
243552367a76SVipul Pandya 				      &fl->sdesc, s->stat_len, NUMA_NO_NODE);
2436f7917c00SJeff Kirsher 		if (!fl->desc)
2437f7917c00SJeff Kirsher 			goto fl_nomem;
2438f7917c00SJeff Kirsher 
243952367a76SVipul Pandya 		flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
24406e4b51a6SHariprasad Shenai 		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F |
24416e4b51a6SHariprasad Shenai 					    FW_IQ_CMD_FL0FETCHRO_F |
24426e4b51a6SHariprasad Shenai 					    FW_IQ_CMD_FL0DATARO_F |
24436e4b51a6SHariprasad Shenai 					    FW_IQ_CMD_FL0PADEN_F);
24446e4b51a6SHariprasad Shenai 		c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) |
24456e4b51a6SHariprasad Shenai 				FW_IQ_CMD_FL0FBMAX_V(3));
2446f7917c00SJeff Kirsher 		c.fl0size = htons(flsz);
2447f7917c00SJeff Kirsher 		c.fl0addr = cpu_to_be64(fl->addr);
2448f7917c00SJeff Kirsher 	}
2449f7917c00SJeff Kirsher 
2450f7917c00SJeff Kirsher 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2451f7917c00SJeff Kirsher 	if (ret)
2452f7917c00SJeff Kirsher 		goto err;
2453f7917c00SJeff Kirsher 
2454f7917c00SJeff Kirsher 	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
24553a336cb1SHariprasad Shenai 	napi_hash_add(&iq->napi);
2456f7917c00SJeff Kirsher 	iq->cur_desc = iq->desc;
2457f7917c00SJeff Kirsher 	iq->cidx = 0;
2458f7917c00SJeff Kirsher 	iq->gen = 1;
2459f7917c00SJeff Kirsher 	iq->next_intr_params = iq->intr_params;
2460f7917c00SJeff Kirsher 	iq->cntxt_id = ntohs(c.iqid);
2461f7917c00SJeff Kirsher 	iq->abs_id = ntohs(c.physiqid);
2462df64e4d3SHariprasad Shenai 	iq->bar2_addr = bar2_address(adap,
2463df64e4d3SHariprasad Shenai 				     iq->cntxt_id,
2464df64e4d3SHariprasad Shenai 				     T4_BAR2_QTYPE_INGRESS,
2465df64e4d3SHariprasad Shenai 				     &iq->bar2_qid);
2466f7917c00SJeff Kirsher 	iq->size--;                           /* subtract status entry */
2467f7917c00SJeff Kirsher 	iq->netdev = dev;
2468f7917c00SJeff Kirsher 	iq->handler = hnd;
2469f7917c00SJeff Kirsher 
2470f7917c00SJeff Kirsher 	/* set offset to -1 to distinguish ingress queues without FL */
2471f7917c00SJeff Kirsher 	iq->offset = fl ? 0 : -1;
2472f7917c00SJeff Kirsher 
2473f7917c00SJeff Kirsher 	adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2474f7917c00SJeff Kirsher 
2475f7917c00SJeff Kirsher 	if (fl) {
2476f7917c00SJeff Kirsher 		fl->cntxt_id = ntohs(c.fl0id);
2477f7917c00SJeff Kirsher 		fl->avail = fl->pend_cred = 0;
2478f7917c00SJeff Kirsher 		fl->pidx = fl->cidx = 0;
2479f7917c00SJeff Kirsher 		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2480f7917c00SJeff Kirsher 		adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2481d63a6dcfSHariprasad Shenai 
2482df64e4d3SHariprasad Shenai 		/* Note, we must initialize the BAR2 Free List User Doorbell
2483df64e4d3SHariprasad Shenai 		 * information before refilling the Free List!
2484d63a6dcfSHariprasad Shenai 		 */
2485df64e4d3SHariprasad Shenai 		fl->bar2_addr = bar2_address(adap,
2486df64e4d3SHariprasad Shenai 					     fl->cntxt_id,
2487df64e4d3SHariprasad Shenai 					     T4_BAR2_QTYPE_EGRESS,
2488df64e4d3SHariprasad Shenai 					     &fl->bar2_qid);
2489f7917c00SJeff Kirsher 		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2490f7917c00SJeff Kirsher 	}
2491f7917c00SJeff Kirsher 	return 0;
2492f7917c00SJeff Kirsher 
2493f7917c00SJeff Kirsher fl_nomem:
2494f7917c00SJeff Kirsher 	ret = -ENOMEM;
2495f7917c00SJeff Kirsher err:
2496f7917c00SJeff Kirsher 	if (iq->desc) {
2497f7917c00SJeff Kirsher 		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2498f7917c00SJeff Kirsher 				  iq->desc, iq->phys_addr);
2499f7917c00SJeff Kirsher 		iq->desc = NULL;
2500f7917c00SJeff Kirsher 	}
2501f7917c00SJeff Kirsher 	if (fl && fl->desc) {
2502f7917c00SJeff Kirsher 		kfree(fl->sdesc);
2503f7917c00SJeff Kirsher 		fl->sdesc = NULL;
2504f7917c00SJeff Kirsher 		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2505f7917c00SJeff Kirsher 				  fl->desc, fl->addr);
2506f7917c00SJeff Kirsher 		fl->desc = NULL;
2507f7917c00SJeff Kirsher 	}
2508f7917c00SJeff Kirsher 	return ret;
2509f7917c00SJeff Kirsher }
2510f7917c00SJeff Kirsher 
2511f7917c00SJeff Kirsher static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2512f7917c00SJeff Kirsher {
251322adfe0aSSantosh Rastapur 	q->cntxt_id = id;
2514df64e4d3SHariprasad Shenai 	q->bar2_addr = bar2_address(adap,
2515df64e4d3SHariprasad Shenai 				    q->cntxt_id,
2516df64e4d3SHariprasad Shenai 				    T4_BAR2_QTYPE_EGRESS,
2517df64e4d3SHariprasad Shenai 				    &q->bar2_qid);
2518f7917c00SJeff Kirsher 	q->in_use = 0;
2519f7917c00SJeff Kirsher 	q->cidx = q->pidx = 0;
2520f7917c00SJeff Kirsher 	q->stops = q->restarts = 0;
2521f7917c00SJeff Kirsher 	q->stat = (void *)&q->desc[q->size];
25223069ee9bSVipul Pandya 	spin_lock_init(&q->db_lock);
2523f7917c00SJeff Kirsher 	adap->sge.egr_map[id - adap->sge.egr_start] = q;
2524f7917c00SJeff Kirsher }
2525f7917c00SJeff Kirsher 
2526f7917c00SJeff Kirsher int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2527f7917c00SJeff Kirsher 			 struct net_device *dev, struct netdev_queue *netdevq,
2528f7917c00SJeff Kirsher 			 unsigned int iqid)
2529f7917c00SJeff Kirsher {
2530f7917c00SJeff Kirsher 	int ret, nentries;
2531f7917c00SJeff Kirsher 	struct fw_eq_eth_cmd c;
253252367a76SVipul Pandya 	struct sge *s = &adap->sge;
2533f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2534f7917c00SJeff Kirsher 
2535f7917c00SJeff Kirsher 	/* Add status entries */
253652367a76SVipul Pandya 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2537f7917c00SJeff Kirsher 
2538f7917c00SJeff Kirsher 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2539f7917c00SJeff Kirsher 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
254052367a76SVipul Pandya 			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2541f7917c00SJeff Kirsher 			netdev_queue_numa_node_read(netdevq));
2542f7917c00SJeff Kirsher 	if (!txq->q.desc)
2543f7917c00SJeff Kirsher 		return -ENOMEM;
2544f7917c00SJeff Kirsher 
2545f7917c00SJeff Kirsher 	memset(&c, 0, sizeof(c));
2546e2ac9628SHariprasad Shenai 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
2547e2ac9628SHariprasad Shenai 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
25486e4b51a6SHariprasad Shenai 			    FW_EQ_ETH_CMD_PFN_V(adap->fn) |
25496e4b51a6SHariprasad Shenai 			    FW_EQ_ETH_CMD_VFN_V(0));
25506e4b51a6SHariprasad Shenai 	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
25516e4b51a6SHariprasad Shenai 				 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
25526e4b51a6SHariprasad Shenai 	c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
25536e4b51a6SHariprasad Shenai 			   FW_EQ_ETH_CMD_VIID_V(pi->viid));
25546e4b51a6SHariprasad Shenai 	c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) |
25556e4b51a6SHariprasad Shenai 				   FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
25566e4b51a6SHariprasad Shenai 				   FW_EQ_ETH_CMD_FETCHRO_V(1) |
25576e4b51a6SHariprasad Shenai 				   FW_EQ_ETH_CMD_IQID_V(iqid));
25586e4b51a6SHariprasad Shenai 	c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) |
25596e4b51a6SHariprasad Shenai 				  FW_EQ_ETH_CMD_FBMAX_V(3) |
25606e4b51a6SHariprasad Shenai 				  FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) |
25616e4b51a6SHariprasad Shenai 				  FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2562f7917c00SJeff Kirsher 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2563f7917c00SJeff Kirsher 
2564f7917c00SJeff Kirsher 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2565f7917c00SJeff Kirsher 	if (ret) {
2566f7917c00SJeff Kirsher 		kfree(txq->q.sdesc);
2567f7917c00SJeff Kirsher 		txq->q.sdesc = NULL;
2568f7917c00SJeff Kirsher 		dma_free_coherent(adap->pdev_dev,
2569f7917c00SJeff Kirsher 				  nentries * sizeof(struct tx_desc),
2570f7917c00SJeff Kirsher 				  txq->q.desc, txq->q.phys_addr);
2571f7917c00SJeff Kirsher 		txq->q.desc = NULL;
2572f7917c00SJeff Kirsher 		return ret;
2573f7917c00SJeff Kirsher 	}
2574f7917c00SJeff Kirsher 
25756e4b51a6SHariprasad Shenai 	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
2576f7917c00SJeff Kirsher 	txq->txq = netdevq;
2577f7917c00SJeff Kirsher 	txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2578f7917c00SJeff Kirsher 	txq->mapping_err = 0;
2579f7917c00SJeff Kirsher 	return 0;
2580f7917c00SJeff Kirsher }
2581f7917c00SJeff Kirsher 
2582f7917c00SJeff Kirsher int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2583f7917c00SJeff Kirsher 			  struct net_device *dev, unsigned int iqid,
2584f7917c00SJeff Kirsher 			  unsigned int cmplqid)
2585f7917c00SJeff Kirsher {
2586f7917c00SJeff Kirsher 	int ret, nentries;
2587f7917c00SJeff Kirsher 	struct fw_eq_ctrl_cmd c;
258852367a76SVipul Pandya 	struct sge *s = &adap->sge;
2589f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2590f7917c00SJeff Kirsher 
2591f7917c00SJeff Kirsher 	/* Add status entries */
259252367a76SVipul Pandya 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2593f7917c00SJeff Kirsher 
2594f7917c00SJeff Kirsher 	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2595f7917c00SJeff Kirsher 				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2596f7917c00SJeff Kirsher 				 NULL, 0, NUMA_NO_NODE);
2597f7917c00SJeff Kirsher 	if (!txq->q.desc)
2598f7917c00SJeff Kirsher 		return -ENOMEM;
2599f7917c00SJeff Kirsher 
2600e2ac9628SHariprasad Shenai 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
2601e2ac9628SHariprasad Shenai 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
26026e4b51a6SHariprasad Shenai 			    FW_EQ_CTRL_CMD_PFN_V(adap->fn) |
26036e4b51a6SHariprasad Shenai 			    FW_EQ_CTRL_CMD_VFN_V(0));
26046e4b51a6SHariprasad Shenai 	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
26056e4b51a6SHariprasad Shenai 				 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
26066e4b51a6SHariprasad Shenai 	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
2607f7917c00SJeff Kirsher 	c.physeqid_pkd = htonl(0);
26086e4b51a6SHariprasad Shenai 	c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) |
26096e4b51a6SHariprasad Shenai 				   FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
26106e4b51a6SHariprasad Shenai 				   FW_EQ_CTRL_CMD_FETCHRO_F |
26116e4b51a6SHariprasad Shenai 				   FW_EQ_CTRL_CMD_IQID_V(iqid));
26126e4b51a6SHariprasad Shenai 	c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) |
26136e4b51a6SHariprasad Shenai 				  FW_EQ_CTRL_CMD_FBMAX_V(3) |
26146e4b51a6SHariprasad Shenai 				  FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) |
26156e4b51a6SHariprasad Shenai 				  FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
2616f7917c00SJeff Kirsher 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2617f7917c00SJeff Kirsher 
2618f7917c00SJeff Kirsher 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2619f7917c00SJeff Kirsher 	if (ret) {
2620f7917c00SJeff Kirsher 		dma_free_coherent(adap->pdev_dev,
2621f7917c00SJeff Kirsher 				  nentries * sizeof(struct tx_desc),
2622f7917c00SJeff Kirsher 				  txq->q.desc, txq->q.phys_addr);
2623f7917c00SJeff Kirsher 		txq->q.desc = NULL;
2624f7917c00SJeff Kirsher 		return ret;
2625f7917c00SJeff Kirsher 	}
2626f7917c00SJeff Kirsher 
26276e4b51a6SHariprasad Shenai 	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
2628f7917c00SJeff Kirsher 	txq->adap = adap;
2629f7917c00SJeff Kirsher 	skb_queue_head_init(&txq->sendq);
2630f7917c00SJeff Kirsher 	tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2631f7917c00SJeff Kirsher 	txq->full = 0;
2632f7917c00SJeff Kirsher 	return 0;
2633f7917c00SJeff Kirsher }
2634f7917c00SJeff Kirsher 
2635f7917c00SJeff Kirsher int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2636f7917c00SJeff Kirsher 			  struct net_device *dev, unsigned int iqid)
2637f7917c00SJeff Kirsher {
2638f7917c00SJeff Kirsher 	int ret, nentries;
2639f7917c00SJeff Kirsher 	struct fw_eq_ofld_cmd c;
264052367a76SVipul Pandya 	struct sge *s = &adap->sge;
2641f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2642f7917c00SJeff Kirsher 
2643f7917c00SJeff Kirsher 	/* Add status entries */
264452367a76SVipul Pandya 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2645f7917c00SJeff Kirsher 
2646f7917c00SJeff Kirsher 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2647f7917c00SJeff Kirsher 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
264852367a76SVipul Pandya 			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2649f7917c00SJeff Kirsher 			NUMA_NO_NODE);
2650f7917c00SJeff Kirsher 	if (!txq->q.desc)
2651f7917c00SJeff Kirsher 		return -ENOMEM;
2652f7917c00SJeff Kirsher 
2653f7917c00SJeff Kirsher 	memset(&c, 0, sizeof(c));
2654e2ac9628SHariprasad Shenai 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
2655e2ac9628SHariprasad Shenai 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
26566e4b51a6SHariprasad Shenai 			    FW_EQ_OFLD_CMD_PFN_V(adap->fn) |
26576e4b51a6SHariprasad Shenai 			    FW_EQ_OFLD_CMD_VFN_V(0));
26586e4b51a6SHariprasad Shenai 	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
26596e4b51a6SHariprasad Shenai 				 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
26606e4b51a6SHariprasad Shenai 	c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) |
26616e4b51a6SHariprasad Shenai 				   FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
26626e4b51a6SHariprasad Shenai 				   FW_EQ_OFLD_CMD_FETCHRO_F |
26636e4b51a6SHariprasad Shenai 				   FW_EQ_OFLD_CMD_IQID_V(iqid));
26646e4b51a6SHariprasad Shenai 	c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) |
26656e4b51a6SHariprasad Shenai 				  FW_EQ_OFLD_CMD_FBMAX_V(3) |
26666e4b51a6SHariprasad Shenai 				  FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) |
26676e4b51a6SHariprasad Shenai 				  FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
2668f7917c00SJeff Kirsher 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2669f7917c00SJeff Kirsher 
2670f7917c00SJeff Kirsher 	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2671f7917c00SJeff Kirsher 	if (ret) {
2672f7917c00SJeff Kirsher 		kfree(txq->q.sdesc);
2673f7917c00SJeff Kirsher 		txq->q.sdesc = NULL;
2674f7917c00SJeff Kirsher 		dma_free_coherent(adap->pdev_dev,
2675f7917c00SJeff Kirsher 				  nentries * sizeof(struct tx_desc),
2676f7917c00SJeff Kirsher 				  txq->q.desc, txq->q.phys_addr);
2677f7917c00SJeff Kirsher 		txq->q.desc = NULL;
2678f7917c00SJeff Kirsher 		return ret;
2679f7917c00SJeff Kirsher 	}
2680f7917c00SJeff Kirsher 
26816e4b51a6SHariprasad Shenai 	init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
2682f7917c00SJeff Kirsher 	txq->adap = adap;
2683f7917c00SJeff Kirsher 	skb_queue_head_init(&txq->sendq);
2684f7917c00SJeff Kirsher 	tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2685f7917c00SJeff Kirsher 	txq->full = 0;
2686f7917c00SJeff Kirsher 	txq->mapping_err = 0;
2687f7917c00SJeff Kirsher 	return 0;
2688f7917c00SJeff Kirsher }
2689f7917c00SJeff Kirsher 
2690f7917c00SJeff Kirsher static void free_txq(struct adapter *adap, struct sge_txq *q)
2691f7917c00SJeff Kirsher {
269252367a76SVipul Pandya 	struct sge *s = &adap->sge;
269352367a76SVipul Pandya 
2694f7917c00SJeff Kirsher 	dma_free_coherent(adap->pdev_dev,
269552367a76SVipul Pandya 			  q->size * sizeof(struct tx_desc) + s->stat_len,
2696f7917c00SJeff Kirsher 			  q->desc, q->phys_addr);
2697f7917c00SJeff Kirsher 	q->cntxt_id = 0;
2698f7917c00SJeff Kirsher 	q->sdesc = NULL;
2699f7917c00SJeff Kirsher 	q->desc = NULL;
2700f7917c00SJeff Kirsher }
2701f7917c00SJeff Kirsher 
2702f7917c00SJeff Kirsher static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2703f7917c00SJeff Kirsher 			 struct sge_fl *fl)
2704f7917c00SJeff Kirsher {
270552367a76SVipul Pandya 	struct sge *s = &adap->sge;
2706f7917c00SJeff Kirsher 	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2707f7917c00SJeff Kirsher 
2708f7917c00SJeff Kirsher 	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2709f7917c00SJeff Kirsher 	t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2710f7917c00SJeff Kirsher 		   rq->cntxt_id, fl_id, 0xffff);
2711f7917c00SJeff Kirsher 	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2712f7917c00SJeff Kirsher 			  rq->desc, rq->phys_addr);
27133a336cb1SHariprasad Shenai 	napi_hash_del(&rq->napi);
2714f7917c00SJeff Kirsher 	netif_napi_del(&rq->napi);
2715f7917c00SJeff Kirsher 	rq->netdev = NULL;
2716f7917c00SJeff Kirsher 	rq->cntxt_id = rq->abs_id = 0;
2717f7917c00SJeff Kirsher 	rq->desc = NULL;
2718f7917c00SJeff Kirsher 
2719f7917c00SJeff Kirsher 	if (fl) {
2720f7917c00SJeff Kirsher 		free_rx_bufs(adap, fl, fl->avail);
272152367a76SVipul Pandya 		dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
2722f7917c00SJeff Kirsher 				  fl->desc, fl->addr);
2723f7917c00SJeff Kirsher 		kfree(fl->sdesc);
2724f7917c00SJeff Kirsher 		fl->sdesc = NULL;
2725f7917c00SJeff Kirsher 		fl->cntxt_id = 0;
2726f7917c00SJeff Kirsher 		fl->desc = NULL;
2727f7917c00SJeff Kirsher 	}
2728f7917c00SJeff Kirsher }
2729f7917c00SJeff Kirsher 
2730f7917c00SJeff Kirsher /**
27315fa76694SHariprasad Shenai  *      t4_free_ofld_rxqs - free a block of consecutive Rx queues
27325fa76694SHariprasad Shenai  *      @adap: the adapter
27335fa76694SHariprasad Shenai  *      @n: number of queues
27345fa76694SHariprasad Shenai  *      @q: pointer to first queue
27355fa76694SHariprasad Shenai  *
27365fa76694SHariprasad Shenai  *      Release the resources of a consecutive block of offload Rx queues.
27375fa76694SHariprasad Shenai  */
27385fa76694SHariprasad Shenai void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
27395fa76694SHariprasad Shenai {
27405fa76694SHariprasad Shenai 	for ( ; n; n--, q++)
27415fa76694SHariprasad Shenai 		if (q->rspq.desc)
27425fa76694SHariprasad Shenai 			free_rspq_fl(adap, &q->rspq,
27435fa76694SHariprasad Shenai 				     q->fl.size ? &q->fl : NULL);
27445fa76694SHariprasad Shenai }
27455fa76694SHariprasad Shenai 
27465fa76694SHariprasad Shenai /**
2747f7917c00SJeff Kirsher  *	t4_free_sge_resources - free SGE resources
2748f7917c00SJeff Kirsher  *	@adap: the adapter
2749f7917c00SJeff Kirsher  *
2750f7917c00SJeff Kirsher  *	Frees resources used by the SGE queue sets.
2751f7917c00SJeff Kirsher  */
2752f7917c00SJeff Kirsher void t4_free_sge_resources(struct adapter *adap)
2753f7917c00SJeff Kirsher {
2754f7917c00SJeff Kirsher 	int i;
2755f7917c00SJeff Kirsher 	struct sge_eth_rxq *eq = adap->sge.ethrxq;
2756f7917c00SJeff Kirsher 	struct sge_eth_txq *etq = adap->sge.ethtxq;
2757f7917c00SJeff Kirsher 
2758f7917c00SJeff Kirsher 	/* clean up Ethernet Tx/Rx queues */
2759f7917c00SJeff Kirsher 	for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2760f7917c00SJeff Kirsher 		if (eq->rspq.desc)
27615fa76694SHariprasad Shenai 			free_rspq_fl(adap, &eq->rspq,
27625fa76694SHariprasad Shenai 				     eq->fl.size ? &eq->fl : NULL);
2763f7917c00SJeff Kirsher 		if (etq->q.desc) {
2764f7917c00SJeff Kirsher 			t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
2765f7917c00SJeff Kirsher 				       etq->q.cntxt_id);
2766f7917c00SJeff Kirsher 			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2767f7917c00SJeff Kirsher 			kfree(etq->q.sdesc);
2768f7917c00SJeff Kirsher 			free_txq(adap, &etq->q);
2769f7917c00SJeff Kirsher 		}
2770f7917c00SJeff Kirsher 	}
2771f7917c00SJeff Kirsher 
2772f7917c00SJeff Kirsher 	/* clean up RDMA and iSCSI Rx queues */
27735fa76694SHariprasad Shenai 	t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq);
27745fa76694SHariprasad Shenai 	t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
27755fa76694SHariprasad Shenai 	t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
2776f7917c00SJeff Kirsher 
2777f7917c00SJeff Kirsher 	/* clean up offload Tx queues */
2778f7917c00SJeff Kirsher 	for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2779f7917c00SJeff Kirsher 		struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2780f7917c00SJeff Kirsher 
2781f7917c00SJeff Kirsher 		if (q->q.desc) {
2782f7917c00SJeff Kirsher 			tasklet_kill(&q->qresume_tsk);
2783f7917c00SJeff Kirsher 			t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
2784f7917c00SJeff Kirsher 					q->q.cntxt_id);
2785f7917c00SJeff Kirsher 			free_tx_desc(adap, &q->q, q->q.in_use, false);
2786f7917c00SJeff Kirsher 			kfree(q->q.sdesc);
2787f7917c00SJeff Kirsher 			__skb_queue_purge(&q->sendq);
2788f7917c00SJeff Kirsher 			free_txq(adap, &q->q);
2789f7917c00SJeff Kirsher 		}
2790f7917c00SJeff Kirsher 	}
2791f7917c00SJeff Kirsher 
2792f7917c00SJeff Kirsher 	/* clean up control Tx queues */
2793f7917c00SJeff Kirsher 	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2794f7917c00SJeff Kirsher 		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2795f7917c00SJeff Kirsher 
2796f7917c00SJeff Kirsher 		if (cq->q.desc) {
2797f7917c00SJeff Kirsher 			tasklet_kill(&cq->qresume_tsk);
2798f7917c00SJeff Kirsher 			t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
2799f7917c00SJeff Kirsher 					cq->q.cntxt_id);
2800f7917c00SJeff Kirsher 			__skb_queue_purge(&cq->sendq);
2801f7917c00SJeff Kirsher 			free_txq(adap, &cq->q);
2802f7917c00SJeff Kirsher 		}
2803f7917c00SJeff Kirsher 	}
2804f7917c00SJeff Kirsher 
2805f7917c00SJeff Kirsher 	if (adap->sge.fw_evtq.desc)
2806f7917c00SJeff Kirsher 		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2807f7917c00SJeff Kirsher 
2808f7917c00SJeff Kirsher 	if (adap->sge.intrq.desc)
2809f7917c00SJeff Kirsher 		free_rspq_fl(adap, &adap->sge.intrq, NULL);
2810f7917c00SJeff Kirsher 
2811f7917c00SJeff Kirsher 	/* clear the reverse egress queue map */
28124b8e27a8SHariprasad Shenai 	memset(adap->sge.egr_map, 0,
28134b8e27a8SHariprasad Shenai 	       adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
2814f7917c00SJeff Kirsher }
2815f7917c00SJeff Kirsher 
2816f7917c00SJeff Kirsher void t4_sge_start(struct adapter *adap)
2817f7917c00SJeff Kirsher {
2818f7917c00SJeff Kirsher 	adap->sge.ethtxq_rover = 0;
2819f7917c00SJeff Kirsher 	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2820f7917c00SJeff Kirsher 	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2821f7917c00SJeff Kirsher }
2822f7917c00SJeff Kirsher 
2823f7917c00SJeff Kirsher /**
2824f7917c00SJeff Kirsher  *	t4_sge_stop - disable SGE operation
2825f7917c00SJeff Kirsher  *	@adap: the adapter
2826f7917c00SJeff Kirsher  *
2827f7917c00SJeff Kirsher  *	Stop tasklets and timers associated with the DMA engine.  Note that
2828f7917c00SJeff Kirsher  *	this is effective only if measures have been taken to disable any HW
2829f7917c00SJeff Kirsher  *	events that may restart them.
2830f7917c00SJeff Kirsher  */
2831f7917c00SJeff Kirsher void t4_sge_stop(struct adapter *adap)
2832f7917c00SJeff Kirsher {
2833f7917c00SJeff Kirsher 	int i;
2834f7917c00SJeff Kirsher 	struct sge *s = &adap->sge;
2835f7917c00SJeff Kirsher 
2836f7917c00SJeff Kirsher 	if (in_interrupt())  /* actions below require waiting */
2837f7917c00SJeff Kirsher 		return;
2838f7917c00SJeff Kirsher 
2839f7917c00SJeff Kirsher 	if (s->rx_timer.function)
2840f7917c00SJeff Kirsher 		del_timer_sync(&s->rx_timer);
2841f7917c00SJeff Kirsher 	if (s->tx_timer.function)
2842f7917c00SJeff Kirsher 		del_timer_sync(&s->tx_timer);
2843f7917c00SJeff Kirsher 
2844f7917c00SJeff Kirsher 	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2845f7917c00SJeff Kirsher 		struct sge_ofld_txq *q = &s->ofldtxq[i];
2846f7917c00SJeff Kirsher 
2847f7917c00SJeff Kirsher 		if (q->q.desc)
2848f7917c00SJeff Kirsher 			tasklet_kill(&q->qresume_tsk);
2849f7917c00SJeff Kirsher 	}
2850f7917c00SJeff Kirsher 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2851f7917c00SJeff Kirsher 		struct sge_ctrl_txq *cq = &s->ctrlq[i];
2852f7917c00SJeff Kirsher 
2853f7917c00SJeff Kirsher 		if (cq->q.desc)
2854f7917c00SJeff Kirsher 			tasklet_kill(&cq->qresume_tsk);
2855f7917c00SJeff Kirsher 	}
2856f7917c00SJeff Kirsher }
2857f7917c00SJeff Kirsher 
2858f7917c00SJeff Kirsher /**
285906640310SHariprasad Shenai  *	t4_sge_init_soft - grab core SGE values needed by SGE code
2860f7917c00SJeff Kirsher  *	@adap: the adapter
2861f7917c00SJeff Kirsher  *
286206640310SHariprasad Shenai  *	We need to grab the SGE operating parameters that we need to have
286306640310SHariprasad Shenai  *	in order to do our job and make sure we can live with them.
2864f7917c00SJeff Kirsher  */
2865f7917c00SJeff Kirsher 
286652367a76SVipul Pandya static int t4_sge_init_soft(struct adapter *adap)
286752367a76SVipul Pandya {
286852367a76SVipul Pandya 	struct sge *s = &adap->sge;
286952367a76SVipul Pandya 	u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
287052367a76SVipul Pandya 	u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
287152367a76SVipul Pandya 	u32 ingress_rx_threshold;
287252367a76SVipul Pandya 
287352367a76SVipul Pandya 	/*
287452367a76SVipul Pandya 	 * Verify that CPL messages are going to the Ingress Queue for
287552367a76SVipul Pandya 	 * process_responses() and that only packet data is going to the
287652367a76SVipul Pandya 	 * Free Lists.
287752367a76SVipul Pandya 	 */
2878f612b815SHariprasad Shenai 	if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
2879f612b815SHariprasad Shenai 	    RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
288052367a76SVipul Pandya 		dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
288152367a76SVipul Pandya 		return -EINVAL;
288252367a76SVipul Pandya 	}
288352367a76SVipul Pandya 
288452367a76SVipul Pandya 	/*
288552367a76SVipul Pandya 	 * Validate the Host Buffer Register Array indices that we want to
288652367a76SVipul Pandya 	 * use ...
288752367a76SVipul Pandya 	 *
288852367a76SVipul Pandya 	 * XXX Note that we should really read through the Host Buffer Size
288952367a76SVipul Pandya 	 * XXX register array and find the indices of the Buffer Sizes which
289052367a76SVipul Pandya 	 * XXX meet our needs!
289152367a76SVipul Pandya 	 */
289252367a76SVipul Pandya 	#define READ_FL_BUF(x) \
2893f612b815SHariprasad Shenai 		t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
289452367a76SVipul Pandya 
289552367a76SVipul Pandya 	fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
289652367a76SVipul Pandya 	fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
289752367a76SVipul Pandya 	fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
289852367a76SVipul Pandya 	fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
289952367a76SVipul Pandya 
290092ddcc7bSKumar Sanghvi 	/* We only bother using the Large Page logic if the Large Page Buffer
290192ddcc7bSKumar Sanghvi 	 * is larger than our Page Size Buffer.
290292ddcc7bSKumar Sanghvi 	 */
290392ddcc7bSKumar Sanghvi 	if (fl_large_pg <= fl_small_pg)
290492ddcc7bSKumar Sanghvi 		fl_large_pg = 0;
290592ddcc7bSKumar Sanghvi 
290652367a76SVipul Pandya 	#undef READ_FL_BUF
290752367a76SVipul Pandya 
290892ddcc7bSKumar Sanghvi 	/* The Page Size Buffer must be exactly equal to our Page Size and the
290992ddcc7bSKumar Sanghvi 	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
291092ddcc7bSKumar Sanghvi 	 */
291152367a76SVipul Pandya 	if (fl_small_pg != PAGE_SIZE ||
291292ddcc7bSKumar Sanghvi 	    (fl_large_pg & (fl_large_pg-1)) != 0) {
291352367a76SVipul Pandya 		dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
291452367a76SVipul Pandya 			fl_small_pg, fl_large_pg);
291552367a76SVipul Pandya 		return -EINVAL;
291652367a76SVipul Pandya 	}
291752367a76SVipul Pandya 	if (fl_large_pg)
291852367a76SVipul Pandya 		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
291952367a76SVipul Pandya 
292052367a76SVipul Pandya 	if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
292152367a76SVipul Pandya 	    fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
292252367a76SVipul Pandya 		dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
292352367a76SVipul Pandya 			fl_small_mtu, fl_large_mtu);
292452367a76SVipul Pandya 		return -EINVAL;
292552367a76SVipul Pandya 	}
292652367a76SVipul Pandya 
292752367a76SVipul Pandya 	/*
292852367a76SVipul Pandya 	 * Retrieve our RX interrupt holdoff timer values and counter
292952367a76SVipul Pandya 	 * threshold values from the SGE parameters.
293052367a76SVipul Pandya 	 */
2931f061de42SHariprasad Shenai 	timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
2932f061de42SHariprasad Shenai 	timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
2933f061de42SHariprasad Shenai 	timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
293452367a76SVipul Pandya 	s->timer_val[0] = core_ticks_to_us(adap,
2935f061de42SHariprasad Shenai 		TIMERVALUE0_G(timer_value_0_and_1));
293652367a76SVipul Pandya 	s->timer_val[1] = core_ticks_to_us(adap,
2937f061de42SHariprasad Shenai 		TIMERVALUE1_G(timer_value_0_and_1));
293852367a76SVipul Pandya 	s->timer_val[2] = core_ticks_to_us(adap,
2939f061de42SHariprasad Shenai 		TIMERVALUE2_G(timer_value_2_and_3));
294052367a76SVipul Pandya 	s->timer_val[3] = core_ticks_to_us(adap,
2941f061de42SHariprasad Shenai 		TIMERVALUE3_G(timer_value_2_and_3));
294252367a76SVipul Pandya 	s->timer_val[4] = core_ticks_to_us(adap,
2943f061de42SHariprasad Shenai 		TIMERVALUE4_G(timer_value_4_and_5));
294452367a76SVipul Pandya 	s->timer_val[5] = core_ticks_to_us(adap,
2945f061de42SHariprasad Shenai 		TIMERVALUE5_G(timer_value_4_and_5));
294652367a76SVipul Pandya 
2947f612b815SHariprasad Shenai 	ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
2948f612b815SHariprasad Shenai 	s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
2949f612b815SHariprasad Shenai 	s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
2950f612b815SHariprasad Shenai 	s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
2951f612b815SHariprasad Shenai 	s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
295252367a76SVipul Pandya 
295352367a76SVipul Pandya 	return 0;
295452367a76SVipul Pandya }
295552367a76SVipul Pandya 
295606640310SHariprasad Shenai /**
295706640310SHariprasad Shenai  *     t4_sge_init - initialize SGE
295806640310SHariprasad Shenai  *     @adap: the adapter
295906640310SHariprasad Shenai  *
296006640310SHariprasad Shenai  *     Perform low-level SGE code initialization needed every time after a
296106640310SHariprasad Shenai  *     chip reset.
296252367a76SVipul Pandya  */
296352367a76SVipul Pandya int t4_sge_init(struct adapter *adap)
296452367a76SVipul Pandya {
296552367a76SVipul Pandya 	struct sge *s = &adap->sge;
2966ce8f407aSHariprasad Shenai 	u32 sge_control, sge_control2, sge_conm_ctrl;
2967ce8f407aSHariprasad Shenai 	unsigned int ingpadboundary, ingpackboundary;
2968c2b955e0SKumar Sanghvi 	int ret, egress_threshold;
296952367a76SVipul Pandya 
297052367a76SVipul Pandya 	/*
297152367a76SVipul Pandya 	 * Ingress Padding Boundary and Egress Status Page Size are set up by
297252367a76SVipul Pandya 	 * t4_fixup_host_params().
297352367a76SVipul Pandya 	 */
2974f612b815SHariprasad Shenai 	sge_control = t4_read_reg(adap, SGE_CONTROL_A);
2975f612b815SHariprasad Shenai 	s->pktshift = PKTSHIFT_G(sge_control);
2976f612b815SHariprasad Shenai 	s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
2977ce8f407aSHariprasad Shenai 
2978ce8f407aSHariprasad Shenai 	/* T4 uses a single control field to specify both the PCIe Padding and
2979ce8f407aSHariprasad Shenai 	 * Packing Boundary.  T5 introduced the ability to specify these
2980ce8f407aSHariprasad Shenai 	 * separately.  The actual Ingress Packet Data alignment boundary
2981ce8f407aSHariprasad Shenai 	 * within Packed Buffer Mode is the maximum of these two
2982ce8f407aSHariprasad Shenai 	 * specifications.
2983ce8f407aSHariprasad Shenai 	 */
2984f612b815SHariprasad Shenai 	ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
2985f612b815SHariprasad Shenai 			       INGPADBOUNDARY_SHIFT_X);
2986ce8f407aSHariprasad Shenai 	if (is_t4(adap->params.chip)) {
2987ce8f407aSHariprasad Shenai 		s->fl_align = ingpadboundary;
2988ce8f407aSHariprasad Shenai 	} else {
2989ce8f407aSHariprasad Shenai 		/* T5 has a different interpretation of one of the PCIe Packing
2990ce8f407aSHariprasad Shenai 		 * Boundary values.
2991ce8f407aSHariprasad Shenai 		 */
2992ce8f407aSHariprasad Shenai 		sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
2993ce8f407aSHariprasad Shenai 		ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
2994ce8f407aSHariprasad Shenai 		if (ingpackboundary == INGPACKBOUNDARY_16B_X)
2995ce8f407aSHariprasad Shenai 			ingpackboundary = 16;
2996ce8f407aSHariprasad Shenai 		else
2997ce8f407aSHariprasad Shenai 			ingpackboundary = 1 << (ingpackboundary +
2998ce8f407aSHariprasad Shenai 						INGPACKBOUNDARY_SHIFT_X);
2999ce8f407aSHariprasad Shenai 
3000ce8f407aSHariprasad Shenai 		s->fl_align = max(ingpadboundary, ingpackboundary);
3001ce8f407aSHariprasad Shenai 	}
300252367a76SVipul Pandya 
300352367a76SVipul Pandya 	ret = t4_sge_init_soft(adap);
300452367a76SVipul Pandya 	if (ret < 0)
300552367a76SVipul Pandya 		return ret;
300652367a76SVipul Pandya 
300752367a76SVipul Pandya 	/*
300852367a76SVipul Pandya 	 * A FL with <= fl_starve_thres buffers is starving and a periodic
300952367a76SVipul Pandya 	 * timer will attempt to refill it.  This needs to be larger than the
301052367a76SVipul Pandya 	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
301152367a76SVipul Pandya 	 * stuck waiting for new packets while the SGE is waiting for us to
301252367a76SVipul Pandya 	 * give it more Free List entries.  (Note that the SGE's Egress
3013c2b955e0SKumar Sanghvi 	 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
3014c2b955e0SKumar Sanghvi 	 * there was only a single field to control this.  For T5 there's the
3015c2b955e0SKumar Sanghvi 	 * original field which now only applies to Unpacked Mode Free List
3016c2b955e0SKumar Sanghvi 	 * buffers and a new field which only applies to Packed Mode Free List
3017c2b955e0SKumar Sanghvi 	 * buffers.
301852367a76SVipul Pandya 	 */
3019f612b815SHariprasad Shenai 	sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
3020c2b955e0SKumar Sanghvi 	if (is_t4(adap->params.chip))
3021f612b815SHariprasad Shenai 		egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
3022c2b955e0SKumar Sanghvi 	else
3023f612b815SHariprasad Shenai 		egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
3024c2b955e0SKumar Sanghvi 	s->fl_starve_thres = 2*egress_threshold + 1;
302552367a76SVipul Pandya 
3026f7917c00SJeff Kirsher 	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
3027f7917c00SJeff Kirsher 	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
30280f4d201fSKumar Sanghvi 	s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
30290f4d201fSKumar Sanghvi 	s->idma_stalled[0] = 0;
30300f4d201fSKumar Sanghvi 	s->idma_stalled[1] = 0;
3031f7917c00SJeff Kirsher 	spin_lock_init(&s->intrq_lock);
303252367a76SVipul Pandya 
303352367a76SVipul Pandya 	return 0;
3034f7917c00SJeff Kirsher }
3035