1f7917c00SJeff Kirsher /*
2f7917c00SJeff Kirsher  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3f7917c00SJeff Kirsher  *
4ce100b8bSAnish Bhatt  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5f7917c00SJeff Kirsher  *
6f7917c00SJeff Kirsher  * This software is available to you under a choice of one of two
7f7917c00SJeff Kirsher  * licenses.  You may choose to be licensed under the terms of the GNU
8f7917c00SJeff Kirsher  * General Public License (GPL) Version 2, available from the file
9f7917c00SJeff Kirsher  * COPYING in the main directory of this source tree, or the
10f7917c00SJeff Kirsher  * OpenIB.org BSD license below:
11f7917c00SJeff Kirsher  *
12f7917c00SJeff Kirsher  *     Redistribution and use in source and binary forms, with or
13f7917c00SJeff Kirsher  *     without modification, are permitted provided that the following
14f7917c00SJeff Kirsher  *     conditions are met:
15f7917c00SJeff Kirsher  *
16f7917c00SJeff Kirsher  *      - Redistributions of source code must retain the above
17f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
18f7917c00SJeff Kirsher  *        disclaimer.
19f7917c00SJeff Kirsher  *
20f7917c00SJeff Kirsher  *      - Redistributions in binary form must reproduce the above
21f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
22f7917c00SJeff Kirsher  *        disclaimer in the documentation and/or other materials
23f7917c00SJeff Kirsher  *        provided with the distribution.
24f7917c00SJeff Kirsher  *
25f7917c00SJeff Kirsher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26f7917c00SJeff Kirsher  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27f7917c00SJeff Kirsher  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28f7917c00SJeff Kirsher  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29f7917c00SJeff Kirsher  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30f7917c00SJeff Kirsher  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31f7917c00SJeff Kirsher  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32f7917c00SJeff Kirsher  * SOFTWARE.
33f7917c00SJeff Kirsher  */
34f7917c00SJeff Kirsher 
35f7917c00SJeff Kirsher #include <linux/skbuff.h>
36f7917c00SJeff Kirsher #include <linux/netdevice.h>
37f7917c00SJeff Kirsher #include <linux/etherdevice.h>
38f7917c00SJeff Kirsher #include <linux/if_vlan.h>
39f7917c00SJeff Kirsher #include <linux/ip.h>
40f7917c00SJeff Kirsher #include <linux/dma-mapping.h>
41f7917c00SJeff Kirsher #include <linux/jiffies.h>
42f7917c00SJeff Kirsher #include <linux/prefetch.h>
43ee40fa06SPaul Gortmaker #include <linux/export.h>
44a6ec572bSAtul Gupta #include <net/xfrm.h>
45f7917c00SJeff Kirsher #include <net/ipv6.h>
46f7917c00SJeff Kirsher #include <net/tcp.h>
473a336cb1SHariprasad Shenai #include <net/busy_poll.h>
4884a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE
4984a200b3SVarun Prakash #include <scsi/fc/fc_fcoe.h>
5084a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */
51f7917c00SJeff Kirsher #include "cxgb4.h"
52f7917c00SJeff Kirsher #include "t4_regs.h"
53f612b815SHariprasad Shenai #include "t4_values.h"
54f7917c00SJeff Kirsher #include "t4_msg.h"
55f7917c00SJeff Kirsher #include "t4fw_api.h"
56a4569504SAtul Gupta #include "cxgb4_ptp.h"
57a6ec572bSAtul Gupta #include "cxgb4_uld.h"
58f7917c00SJeff Kirsher 
59f7917c00SJeff Kirsher /*
60f7917c00SJeff Kirsher  * Rx buffer size.  We use largish buffers if possible but settle for single
61f7917c00SJeff Kirsher  * pages under memory shortage.
62f7917c00SJeff Kirsher  */
63f7917c00SJeff Kirsher #if PAGE_SHIFT >= 16
64f7917c00SJeff Kirsher # define FL_PG_ORDER 0
65f7917c00SJeff Kirsher #else
66f7917c00SJeff Kirsher # define FL_PG_ORDER (16 - PAGE_SHIFT)
67f7917c00SJeff Kirsher #endif
68f7917c00SJeff Kirsher 
69f7917c00SJeff Kirsher /* RX_PULL_LEN should be <= RX_COPY_THRES */
70f7917c00SJeff Kirsher #define RX_COPY_THRES    256
71f7917c00SJeff Kirsher #define RX_PULL_LEN      128
72f7917c00SJeff Kirsher 
73f7917c00SJeff Kirsher /*
74f7917c00SJeff Kirsher  * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
75f7917c00SJeff Kirsher  * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
76f7917c00SJeff Kirsher  */
77f7917c00SJeff Kirsher #define RX_PKT_SKB_LEN   512
78f7917c00SJeff Kirsher 
79f7917c00SJeff Kirsher /*
80f7917c00SJeff Kirsher  * Max number of Tx descriptors we clean up at a time.  Should be modest as
81f7917c00SJeff Kirsher  * freeing skbs isn't cheap and it happens while holding locks.  We just need
82f7917c00SJeff Kirsher  * to free packets faster than they arrive, we eventually catch up and keep
83d429005fSVishal Kulkarni  * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.  It should
84d429005fSVishal Kulkarni  * also match the CIDX Flush Threshold.
85f7917c00SJeff Kirsher  */
86d429005fSVishal Kulkarni #define MAX_TX_RECLAIM 32
87f7917c00SJeff Kirsher 
88f7917c00SJeff Kirsher /*
89f7917c00SJeff Kirsher  * Max number of Rx buffers we replenish at a time.  Again keep this modest,
90f7917c00SJeff Kirsher  * allocating buffers isn't cheap either.
91f7917c00SJeff Kirsher  */
92f7917c00SJeff Kirsher #define MAX_RX_REFILL 16U
93f7917c00SJeff Kirsher 
94f7917c00SJeff Kirsher /*
95f7917c00SJeff Kirsher  * Period of the Rx queue check timer.  This timer is infrequent as it has
96f7917c00SJeff Kirsher  * something to do only when the system experiences severe memory shortage.
97f7917c00SJeff Kirsher  */
98f7917c00SJeff Kirsher #define RX_QCHECK_PERIOD (HZ / 2)
99f7917c00SJeff Kirsher 
100f7917c00SJeff Kirsher /*
101f7917c00SJeff Kirsher  * Period of the Tx queue check timer.
102f7917c00SJeff Kirsher  */
103f7917c00SJeff Kirsher #define TX_QCHECK_PERIOD (HZ / 2)
104f7917c00SJeff Kirsher 
105f7917c00SJeff Kirsher /*
106f7917c00SJeff Kirsher  * Max number of Tx descriptors to be reclaimed by the Tx timer.
107f7917c00SJeff Kirsher  */
108f7917c00SJeff Kirsher #define MAX_TIMER_TX_RECLAIM 100
109f7917c00SJeff Kirsher 
110f7917c00SJeff Kirsher /*
111f7917c00SJeff Kirsher  * Timer index used when backing off due to memory shortage.
112f7917c00SJeff Kirsher  */
113f7917c00SJeff Kirsher #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
114f7917c00SJeff Kirsher 
115f7917c00SJeff Kirsher /*
116f7917c00SJeff Kirsher  * Suspension threshold for non-Ethernet Tx queues.  We require enough room
117f7917c00SJeff Kirsher  * for a full sized WR.
118f7917c00SJeff Kirsher  */
119f7917c00SJeff Kirsher #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
120f7917c00SJeff Kirsher 
121f7917c00SJeff Kirsher /*
122f7917c00SJeff Kirsher  * Max Tx descriptor space we allow for an Ethernet packet to be inlined
123f7917c00SJeff Kirsher  * into a WR.
124f7917c00SJeff Kirsher  */
12521dcfad6SHariprasad Shenai #define MAX_IMM_TX_PKT_LEN 256
126f7917c00SJeff Kirsher 
127f7917c00SJeff Kirsher /*
128f7917c00SJeff Kirsher  * Max size of a WR sent through a control Tx queue.
129f7917c00SJeff Kirsher  */
130f7917c00SJeff Kirsher #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
131f7917c00SJeff Kirsher 
132f7917c00SJeff Kirsher struct rx_sw_desc {                /* SW state per Rx descriptor */
133f7917c00SJeff Kirsher 	struct page *page;
134f7917c00SJeff Kirsher 	dma_addr_t dma_addr;
135f7917c00SJeff Kirsher };
136f7917c00SJeff Kirsher 
137f7917c00SJeff Kirsher /*
13852367a76SVipul Pandya  * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
13952367a76SVipul Pandya  * buffer).  We currently only support two sizes for 1500- and 9000-byte MTUs.
14052367a76SVipul Pandya  * We could easily support more but there doesn't seem to be much need for
14152367a76SVipul Pandya  * that ...
14252367a76SVipul Pandya  */
14352367a76SVipul Pandya #define FL_MTU_SMALL 1500
14452367a76SVipul Pandya #define FL_MTU_LARGE 9000
14552367a76SVipul Pandya 
14652367a76SVipul Pandya static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
14752367a76SVipul Pandya 					  unsigned int mtu)
14852367a76SVipul Pandya {
14952367a76SVipul Pandya 	struct sge *s = &adapter->sge;
15052367a76SVipul Pandya 
15152367a76SVipul Pandya 	return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
15252367a76SVipul Pandya }
15352367a76SVipul Pandya 
15452367a76SVipul Pandya #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
15552367a76SVipul Pandya #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
15652367a76SVipul Pandya 
15752367a76SVipul Pandya /*
15852367a76SVipul Pandya  * Bits 0..3 of rx_sw_desc.dma_addr have special meaning.  The hardware uses
15952367a76SVipul Pandya  * these to specify the buffer size as an index into the SGE Free List Buffer
16052367a76SVipul Pandya  * Size register array.  We also use bit 4, when the buffer has been unmapped
16152367a76SVipul Pandya  * for DMA, but this is of course never sent to the hardware and is only used
16252367a76SVipul Pandya  * to prevent double unmappings.  All of the above requires that the Free List
16352367a76SVipul Pandya  * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
16452367a76SVipul Pandya  * 32-byte or or a power of 2 greater in alignment.  Since the SGE's minimal
16552367a76SVipul Pandya  * Free List Buffer alignment is 32 bytes, this works out for us ...
166f7917c00SJeff Kirsher  */
167f7917c00SJeff Kirsher enum {
16852367a76SVipul Pandya 	RX_BUF_FLAGS     = 0x1f,   /* bottom five bits are special */
16952367a76SVipul Pandya 	RX_BUF_SIZE      = 0x0f,   /* bottom three bits are for buf sizes */
17052367a76SVipul Pandya 	RX_UNMAPPED_BUF  = 0x10,   /* buffer is not mapped */
17152367a76SVipul Pandya 
17252367a76SVipul Pandya 	/*
17352367a76SVipul Pandya 	 * XXX We shouldn't depend on being able to use these indices.
17452367a76SVipul Pandya 	 * XXX Especially when some other Master PF has initialized the
17552367a76SVipul Pandya 	 * XXX adapter or we use the Firmware Configuration File.  We
17652367a76SVipul Pandya 	 * XXX should really search through the Host Buffer Size register
17752367a76SVipul Pandya 	 * XXX array for the appropriately sized buffer indices.
17852367a76SVipul Pandya 	 */
17952367a76SVipul Pandya 	RX_SMALL_PG_BUF  = 0x0,   /* small (PAGE_SIZE) page buffer */
18052367a76SVipul Pandya 	RX_LARGE_PG_BUF  = 0x1,   /* buffer large (FL_PG_ORDER) page buffer */
18152367a76SVipul Pandya 
18252367a76SVipul Pandya 	RX_SMALL_MTU_BUF = 0x2,   /* small MTU buffer */
18352367a76SVipul Pandya 	RX_LARGE_MTU_BUF = 0x3,   /* large MTU buffer */
184f7917c00SJeff Kirsher };
185f7917c00SJeff Kirsher 
186e553ec3fSHariprasad Shenai static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
187e553ec3fSHariprasad Shenai #define MIN_NAPI_WORK  1
188e553ec3fSHariprasad Shenai 
189f7917c00SJeff Kirsher static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
190f7917c00SJeff Kirsher {
19152367a76SVipul Pandya 	return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
192f7917c00SJeff Kirsher }
193f7917c00SJeff Kirsher 
194f7917c00SJeff Kirsher static inline bool is_buf_mapped(const struct rx_sw_desc *d)
195f7917c00SJeff Kirsher {
196f7917c00SJeff Kirsher 	return !(d->dma_addr & RX_UNMAPPED_BUF);
197f7917c00SJeff Kirsher }
198f7917c00SJeff Kirsher 
199f7917c00SJeff Kirsher /**
200f7917c00SJeff Kirsher  *	txq_avail - return the number of available slots in a Tx queue
201f7917c00SJeff Kirsher  *	@q: the Tx queue
202f7917c00SJeff Kirsher  *
203f7917c00SJeff Kirsher  *	Returns the number of descriptors in a Tx queue available to write new
204f7917c00SJeff Kirsher  *	packets.
205f7917c00SJeff Kirsher  */
206f7917c00SJeff Kirsher static inline unsigned int txq_avail(const struct sge_txq *q)
207f7917c00SJeff Kirsher {
208f7917c00SJeff Kirsher 	return q->size - 1 - q->in_use;
209f7917c00SJeff Kirsher }
210f7917c00SJeff Kirsher 
211f7917c00SJeff Kirsher /**
212f7917c00SJeff Kirsher  *	fl_cap - return the capacity of a free-buffer list
213f7917c00SJeff Kirsher  *	@fl: the FL
214f7917c00SJeff Kirsher  *
215f7917c00SJeff Kirsher  *	Returns the capacity of a free-buffer list.  The capacity is less than
216f7917c00SJeff Kirsher  *	the size because one descriptor needs to be left unpopulated, otherwise
217f7917c00SJeff Kirsher  *	HW will think the FL is empty.
218f7917c00SJeff Kirsher  */
219f7917c00SJeff Kirsher static inline unsigned int fl_cap(const struct sge_fl *fl)
220f7917c00SJeff Kirsher {
221f7917c00SJeff Kirsher 	return fl->size - 8;   /* 1 descriptor = 8 buffers */
222f7917c00SJeff Kirsher }
223f7917c00SJeff Kirsher 
224c098b026SHariprasad Shenai /**
225c098b026SHariprasad Shenai  *	fl_starving - return whether a Free List is starving.
226c098b026SHariprasad Shenai  *	@adapter: pointer to the adapter
227c098b026SHariprasad Shenai  *	@fl: the Free List
228c098b026SHariprasad Shenai  *
229c098b026SHariprasad Shenai  *	Tests specified Free List to see whether the number of buffers
230c098b026SHariprasad Shenai  *	available to the hardware has falled below our "starvation"
231c098b026SHariprasad Shenai  *	threshold.
232c098b026SHariprasad Shenai  */
233c098b026SHariprasad Shenai static inline bool fl_starving(const struct adapter *adapter,
234c098b026SHariprasad Shenai 			       const struct sge_fl *fl)
235f7917c00SJeff Kirsher {
236c098b026SHariprasad Shenai 	const struct sge *s = &adapter->sge;
237c098b026SHariprasad Shenai 
238c098b026SHariprasad Shenai 	return fl->avail - fl->pend_cred <= s->fl_starve_thres;
239f7917c00SJeff Kirsher }
240f7917c00SJeff Kirsher 
241a6ec572bSAtul Gupta int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
242f7917c00SJeff Kirsher 		  dma_addr_t *addr)
243f7917c00SJeff Kirsher {
244f7917c00SJeff Kirsher 	const skb_frag_t *fp, *end;
245f7917c00SJeff Kirsher 	const struct skb_shared_info *si;
246f7917c00SJeff Kirsher 
247f7917c00SJeff Kirsher 	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
248f7917c00SJeff Kirsher 	if (dma_mapping_error(dev, *addr))
249f7917c00SJeff Kirsher 		goto out_err;
250f7917c00SJeff Kirsher 
251f7917c00SJeff Kirsher 	si = skb_shinfo(skb);
252f7917c00SJeff Kirsher 	end = &si->frags[si->nr_frags];
253f7917c00SJeff Kirsher 
254f7917c00SJeff Kirsher 	for (fp = si->frags; fp < end; fp++) {
255e91b0f24SIan Campbell 		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
256e91b0f24SIan Campbell 					   DMA_TO_DEVICE);
257f7917c00SJeff Kirsher 		if (dma_mapping_error(dev, *addr))
258f7917c00SJeff Kirsher 			goto unwind;
259f7917c00SJeff Kirsher 	}
260f7917c00SJeff Kirsher 	return 0;
261f7917c00SJeff Kirsher 
262f7917c00SJeff Kirsher unwind:
263f7917c00SJeff Kirsher 	while (fp-- > si->frags)
2649e903e08SEric Dumazet 		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
265f7917c00SJeff Kirsher 
266f7917c00SJeff Kirsher 	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
267f7917c00SJeff Kirsher out_err:
268f7917c00SJeff Kirsher 	return -ENOMEM;
269f7917c00SJeff Kirsher }
270a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_map_skb);
271f7917c00SJeff Kirsher 
272f7917c00SJeff Kirsher #ifdef CONFIG_NEED_DMA_MAP_STATE
273f7917c00SJeff Kirsher static void unmap_skb(struct device *dev, const struct sk_buff *skb,
274f7917c00SJeff Kirsher 		      const dma_addr_t *addr)
275f7917c00SJeff Kirsher {
276f7917c00SJeff Kirsher 	const skb_frag_t *fp, *end;
277f7917c00SJeff Kirsher 	const struct skb_shared_info *si;
278f7917c00SJeff Kirsher 
279f7917c00SJeff Kirsher 	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
280f7917c00SJeff Kirsher 
281f7917c00SJeff Kirsher 	si = skb_shinfo(skb);
282f7917c00SJeff Kirsher 	end = &si->frags[si->nr_frags];
283f7917c00SJeff Kirsher 	for (fp = si->frags; fp < end; fp++)
2849e903e08SEric Dumazet 		dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
285f7917c00SJeff Kirsher }
286f7917c00SJeff Kirsher 
287f7917c00SJeff Kirsher /**
288f7917c00SJeff Kirsher  *	deferred_unmap_destructor - unmap a packet when it is freed
289f7917c00SJeff Kirsher  *	@skb: the packet
290f7917c00SJeff Kirsher  *
291f7917c00SJeff Kirsher  *	This is the packet destructor used for Tx packets that need to remain
292f7917c00SJeff Kirsher  *	mapped until they are freed rather than until their Tx descriptors are
293f7917c00SJeff Kirsher  *	freed.
294f7917c00SJeff Kirsher  */
295f7917c00SJeff Kirsher static void deferred_unmap_destructor(struct sk_buff *skb)
296f7917c00SJeff Kirsher {
297f7917c00SJeff Kirsher 	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
298f7917c00SJeff Kirsher }
299f7917c00SJeff Kirsher #endif
300f7917c00SJeff Kirsher 
301f7917c00SJeff Kirsher static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
302f7917c00SJeff Kirsher 		      const struct ulptx_sgl *sgl, const struct sge_txq *q)
303f7917c00SJeff Kirsher {
304f7917c00SJeff Kirsher 	const struct ulptx_sge_pair *p;
305f7917c00SJeff Kirsher 	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
306f7917c00SJeff Kirsher 
307f7917c00SJeff Kirsher 	if (likely(skb_headlen(skb)))
308f7917c00SJeff Kirsher 		dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
309f7917c00SJeff Kirsher 				 DMA_TO_DEVICE);
310f7917c00SJeff Kirsher 	else {
311f7917c00SJeff Kirsher 		dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
312f7917c00SJeff Kirsher 			       DMA_TO_DEVICE);
313f7917c00SJeff Kirsher 		nfrags--;
314f7917c00SJeff Kirsher 	}
315f7917c00SJeff Kirsher 
316f7917c00SJeff Kirsher 	/*
317f7917c00SJeff Kirsher 	 * the complexity below is because of the possibility of a wrap-around
318f7917c00SJeff Kirsher 	 * in the middle of an SGL
319f7917c00SJeff Kirsher 	 */
320f7917c00SJeff Kirsher 	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
321f7917c00SJeff Kirsher 		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
322f7917c00SJeff Kirsher unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
323f7917c00SJeff Kirsher 				       ntohl(p->len[0]), DMA_TO_DEVICE);
324f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
325f7917c00SJeff Kirsher 				       ntohl(p->len[1]), DMA_TO_DEVICE);
326f7917c00SJeff Kirsher 			p++;
327f7917c00SJeff Kirsher 		} else if ((u8 *)p == (u8 *)q->stat) {
328f7917c00SJeff Kirsher 			p = (const struct ulptx_sge_pair *)q->desc;
329f7917c00SJeff Kirsher 			goto unmap;
330f7917c00SJeff Kirsher 		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
331f7917c00SJeff Kirsher 			const __be64 *addr = (const __be64 *)q->desc;
332f7917c00SJeff Kirsher 
333f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
334f7917c00SJeff Kirsher 				       ntohl(p->len[0]), DMA_TO_DEVICE);
335f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(addr[1]),
336f7917c00SJeff Kirsher 				       ntohl(p->len[1]), DMA_TO_DEVICE);
337f7917c00SJeff Kirsher 			p = (const struct ulptx_sge_pair *)&addr[2];
338f7917c00SJeff Kirsher 		} else {
339f7917c00SJeff Kirsher 			const __be64 *addr = (const __be64 *)q->desc;
340f7917c00SJeff Kirsher 
341f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
342f7917c00SJeff Kirsher 				       ntohl(p->len[0]), DMA_TO_DEVICE);
343f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
344f7917c00SJeff Kirsher 				       ntohl(p->len[1]), DMA_TO_DEVICE);
345f7917c00SJeff Kirsher 			p = (const struct ulptx_sge_pair *)&addr[1];
346f7917c00SJeff Kirsher 		}
347f7917c00SJeff Kirsher 	}
348f7917c00SJeff Kirsher 	if (nfrags) {
349f7917c00SJeff Kirsher 		__be64 addr;
350f7917c00SJeff Kirsher 
351f7917c00SJeff Kirsher 		if ((u8 *)p == (u8 *)q->stat)
352f7917c00SJeff Kirsher 			p = (const struct ulptx_sge_pair *)q->desc;
353f7917c00SJeff Kirsher 		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
354f7917c00SJeff Kirsher 						       *(const __be64 *)q->desc;
355f7917c00SJeff Kirsher 		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
356f7917c00SJeff Kirsher 			       DMA_TO_DEVICE);
357f7917c00SJeff Kirsher 	}
358f7917c00SJeff Kirsher }
359f7917c00SJeff Kirsher 
360f7917c00SJeff Kirsher /**
361f7917c00SJeff Kirsher  *	free_tx_desc - reclaims Tx descriptors and their buffers
362f7917c00SJeff Kirsher  *	@adapter: the adapter
363f7917c00SJeff Kirsher  *	@q: the Tx queue to reclaim descriptors from
364f7917c00SJeff Kirsher  *	@n: the number of descriptors to reclaim
365f7917c00SJeff Kirsher  *	@unmap: whether the buffers should be unmapped for DMA
366f7917c00SJeff Kirsher  *
367f7917c00SJeff Kirsher  *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
368f7917c00SJeff Kirsher  *	Tx buffers.  Called with the Tx queue lock held.
369f7917c00SJeff Kirsher  */
370ab677ff4SHariprasad Shenai void free_tx_desc(struct adapter *adap, struct sge_txq *q,
371f7917c00SJeff Kirsher 		  unsigned int n, bool unmap)
372f7917c00SJeff Kirsher {
373f7917c00SJeff Kirsher 	struct tx_sw_desc *d;
374f7917c00SJeff Kirsher 	unsigned int cidx = q->cidx;
375f7917c00SJeff Kirsher 	struct device *dev = adap->pdev_dev;
376f7917c00SJeff Kirsher 
377f7917c00SJeff Kirsher 	d = &q->sdesc[cidx];
378f7917c00SJeff Kirsher 	while (n--) {
379f7917c00SJeff Kirsher 		if (d->skb) {                       /* an SGL is present */
380f7917c00SJeff Kirsher 			if (unmap)
381f7917c00SJeff Kirsher 				unmap_sgl(dev, d->skb, d->sgl, q);
382a7525198SEric W. Biederman 			dev_consume_skb_any(d->skb);
383f7917c00SJeff Kirsher 			d->skb = NULL;
384f7917c00SJeff Kirsher 		}
385f7917c00SJeff Kirsher 		++d;
386f7917c00SJeff Kirsher 		if (++cidx == q->size) {
387f7917c00SJeff Kirsher 			cidx = 0;
388f7917c00SJeff Kirsher 			d = q->sdesc;
389f7917c00SJeff Kirsher 		}
390f7917c00SJeff Kirsher 	}
391f7917c00SJeff Kirsher 	q->cidx = cidx;
392f7917c00SJeff Kirsher }
393f7917c00SJeff Kirsher 
394f7917c00SJeff Kirsher /*
395f7917c00SJeff Kirsher  * Return the number of reclaimable descriptors in a Tx queue.
396f7917c00SJeff Kirsher  */
397f7917c00SJeff Kirsher static inline int reclaimable(const struct sge_txq *q)
398f7917c00SJeff Kirsher {
3996aa7de05SMark Rutland 	int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
400f7917c00SJeff Kirsher 	hw_cidx -= q->cidx;
401f7917c00SJeff Kirsher 	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
402f7917c00SJeff Kirsher }
403f7917c00SJeff Kirsher 
404f7917c00SJeff Kirsher /**
405d429005fSVishal Kulkarni  *	reclaim_completed_tx - reclaims completed TX Descriptors
406d429005fSVishal Kulkarni  *	@adap: the adapter
407d429005fSVishal Kulkarni  *	@q: the Tx queue to reclaim completed descriptors from
408d429005fSVishal Kulkarni  *	@maxreclaim: the maximum number of TX Descriptors to reclaim or -1
409d429005fSVishal Kulkarni  *	@unmap: whether the buffers should be unmapped for DMA
410d429005fSVishal Kulkarni  *
411d429005fSVishal Kulkarni  *	Reclaims Tx Descriptors that the SGE has indicated it has processed,
412d429005fSVishal Kulkarni  *	and frees the associated buffers if possible.  If @max == -1, then
413d429005fSVishal Kulkarni  *	we'll use a defaiult maximum.  Called with the TX Queue locked.
414d429005fSVishal Kulkarni  */
415d429005fSVishal Kulkarni static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
416d429005fSVishal Kulkarni 				       int maxreclaim, bool unmap)
417d429005fSVishal Kulkarni {
418d429005fSVishal Kulkarni 	int reclaim = reclaimable(q);
419d429005fSVishal Kulkarni 
420d429005fSVishal Kulkarni 	if (reclaim) {
421d429005fSVishal Kulkarni 		/*
422d429005fSVishal Kulkarni 		 * Limit the amount of clean up work we do at a time to keep
423d429005fSVishal Kulkarni 		 * the Tx lock hold time O(1).
424d429005fSVishal Kulkarni 		 */
425d429005fSVishal Kulkarni 		if (maxreclaim < 0)
426d429005fSVishal Kulkarni 			maxreclaim = MAX_TX_RECLAIM;
427d429005fSVishal Kulkarni 		if (reclaim > maxreclaim)
428d429005fSVishal Kulkarni 			reclaim = maxreclaim;
429d429005fSVishal Kulkarni 
430d429005fSVishal Kulkarni 		free_tx_desc(adap, q, reclaim, unmap);
431d429005fSVishal Kulkarni 		q->in_use -= reclaim;
432d429005fSVishal Kulkarni 	}
433d429005fSVishal Kulkarni 
434d429005fSVishal Kulkarni 	return reclaim;
435d429005fSVishal Kulkarni }
436d429005fSVishal Kulkarni 
437d429005fSVishal Kulkarni /**
438a6ec572bSAtul Gupta  *	cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
439f7917c00SJeff Kirsher  *	@adap: the adapter
440f7917c00SJeff Kirsher  *	@q: the Tx queue to reclaim completed descriptors from
441f7917c00SJeff Kirsher  *	@unmap: whether the buffers should be unmapped for DMA
442f7917c00SJeff Kirsher  *
443f7917c00SJeff Kirsher  *	Reclaims Tx descriptors that the SGE has indicated it has processed,
444f7917c00SJeff Kirsher  *	and frees the associated buffers if possible.  Called with the Tx
445f7917c00SJeff Kirsher  *	queue locked.
446f7917c00SJeff Kirsher  */
447d429005fSVishal Kulkarni void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
448f7917c00SJeff Kirsher 				bool unmap)
449f7917c00SJeff Kirsher {
450d429005fSVishal Kulkarni 	(void)reclaim_completed_tx(adap, q, -1, unmap);
451f7917c00SJeff Kirsher }
452a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
453f7917c00SJeff Kirsher 
45452367a76SVipul Pandya static inline int get_buf_size(struct adapter *adapter,
45552367a76SVipul Pandya 			       const struct rx_sw_desc *d)
456f7917c00SJeff Kirsher {
45752367a76SVipul Pandya 	struct sge *s = &adapter->sge;
45852367a76SVipul Pandya 	unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
45952367a76SVipul Pandya 	int buf_size;
46052367a76SVipul Pandya 
46152367a76SVipul Pandya 	switch (rx_buf_size_idx) {
46252367a76SVipul Pandya 	case RX_SMALL_PG_BUF:
46352367a76SVipul Pandya 		buf_size = PAGE_SIZE;
46452367a76SVipul Pandya 		break;
46552367a76SVipul Pandya 
46652367a76SVipul Pandya 	case RX_LARGE_PG_BUF:
46752367a76SVipul Pandya 		buf_size = PAGE_SIZE << s->fl_pg_order;
46852367a76SVipul Pandya 		break;
46952367a76SVipul Pandya 
47052367a76SVipul Pandya 	case RX_SMALL_MTU_BUF:
47152367a76SVipul Pandya 		buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
47252367a76SVipul Pandya 		break;
47352367a76SVipul Pandya 
47452367a76SVipul Pandya 	case RX_LARGE_MTU_BUF:
47552367a76SVipul Pandya 		buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
47652367a76SVipul Pandya 		break;
47752367a76SVipul Pandya 
47852367a76SVipul Pandya 	default:
47952367a76SVipul Pandya 		BUG_ON(1);
48052367a76SVipul Pandya 	}
48152367a76SVipul Pandya 
48252367a76SVipul Pandya 	return buf_size;
483f7917c00SJeff Kirsher }
484f7917c00SJeff Kirsher 
485f7917c00SJeff Kirsher /**
486f7917c00SJeff Kirsher  *	free_rx_bufs - free the Rx buffers on an SGE free list
487f7917c00SJeff Kirsher  *	@adap: the adapter
488f7917c00SJeff Kirsher  *	@q: the SGE free list to free buffers from
489f7917c00SJeff Kirsher  *	@n: how many buffers to free
490f7917c00SJeff Kirsher  *
491f7917c00SJeff Kirsher  *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
492f7917c00SJeff Kirsher  *	buffers must be made inaccessible to HW before calling this function.
493f7917c00SJeff Kirsher  */
494f7917c00SJeff Kirsher static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
495f7917c00SJeff Kirsher {
496f7917c00SJeff Kirsher 	while (n--) {
497f7917c00SJeff Kirsher 		struct rx_sw_desc *d = &q->sdesc[q->cidx];
498f7917c00SJeff Kirsher 
499f7917c00SJeff Kirsher 		if (is_buf_mapped(d))
500f7917c00SJeff Kirsher 			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
50152367a76SVipul Pandya 				       get_buf_size(adap, d),
50252367a76SVipul Pandya 				       PCI_DMA_FROMDEVICE);
503f7917c00SJeff Kirsher 		put_page(d->page);
504f7917c00SJeff Kirsher 		d->page = NULL;
505f7917c00SJeff Kirsher 		if (++q->cidx == q->size)
506f7917c00SJeff Kirsher 			q->cidx = 0;
507f7917c00SJeff Kirsher 		q->avail--;
508f7917c00SJeff Kirsher 	}
509f7917c00SJeff Kirsher }
510f7917c00SJeff Kirsher 
511f7917c00SJeff Kirsher /**
512f7917c00SJeff Kirsher  *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
513f7917c00SJeff Kirsher  *	@adap: the adapter
514f7917c00SJeff Kirsher  *	@q: the SGE free list
515f7917c00SJeff Kirsher  *
516f7917c00SJeff Kirsher  *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
517f7917c00SJeff Kirsher  *	buffer must be made inaccessible to HW before calling this function.
518f7917c00SJeff Kirsher  *
519f7917c00SJeff Kirsher  *	This is similar to @free_rx_bufs above but does not free the buffer.
520f7917c00SJeff Kirsher  *	Do note that the FL still loses any further access to the buffer.
521f7917c00SJeff Kirsher  */
522f7917c00SJeff Kirsher static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
523f7917c00SJeff Kirsher {
524f7917c00SJeff Kirsher 	struct rx_sw_desc *d = &q->sdesc[q->cidx];
525f7917c00SJeff Kirsher 
526f7917c00SJeff Kirsher 	if (is_buf_mapped(d))
527f7917c00SJeff Kirsher 		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
52852367a76SVipul Pandya 			       get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
529f7917c00SJeff Kirsher 	d->page = NULL;
530f7917c00SJeff Kirsher 	if (++q->cidx == q->size)
531f7917c00SJeff Kirsher 		q->cidx = 0;
532f7917c00SJeff Kirsher 	q->avail--;
533f7917c00SJeff Kirsher }
534f7917c00SJeff Kirsher 
535f7917c00SJeff Kirsher static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
536f7917c00SJeff Kirsher {
537f7917c00SJeff Kirsher 	if (q->pend_cred >= 8) {
5383ccc6cf7SHariprasad Shenai 		u32 val = adap->params.arch.sge_fl_db;
5393ccc6cf7SHariprasad Shenai 
540f612b815SHariprasad Shenai 		if (is_t4(adap->params.chip))
5413ccc6cf7SHariprasad Shenai 			val |= PIDX_V(q->pend_cred / 8);
542f612b815SHariprasad Shenai 		else
5433ccc6cf7SHariprasad Shenai 			val |= PIDX_T5_V(q->pend_cred / 8);
5441ecc7b7aSHariprasad Shenai 
5451ecc7b7aSHariprasad Shenai 		/* Make sure all memory writes to the Free List queue are
5461ecc7b7aSHariprasad Shenai 		 * committed before we tell the hardware about them.
5471ecc7b7aSHariprasad Shenai 		 */
548f7917c00SJeff Kirsher 		wmb();
549d63a6dcfSHariprasad Shenai 
550df64e4d3SHariprasad Shenai 		/* If we don't have access to the new User Doorbell (T5+), use
551df64e4d3SHariprasad Shenai 		 * the old doorbell mechanism; otherwise use the new BAR2
552df64e4d3SHariprasad Shenai 		 * mechanism.
553d63a6dcfSHariprasad Shenai 		 */
554df64e4d3SHariprasad Shenai 		if (unlikely(q->bar2_addr == NULL)) {
555f612b815SHariprasad Shenai 			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
556f612b815SHariprasad Shenai 				     val | QID_V(q->cntxt_id));
557d63a6dcfSHariprasad Shenai 		} else {
558f612b815SHariprasad Shenai 			writel(val | QID_V(q->bar2_qid),
559df64e4d3SHariprasad Shenai 			       q->bar2_addr + SGE_UDB_KDOORBELL);
560d63a6dcfSHariprasad Shenai 
561d63a6dcfSHariprasad Shenai 			/* This Write memory Barrier will force the write to
562d63a6dcfSHariprasad Shenai 			 * the User Doorbell area to be flushed.
563d63a6dcfSHariprasad Shenai 			 */
564d63a6dcfSHariprasad Shenai 			wmb();
565d63a6dcfSHariprasad Shenai 		}
566f7917c00SJeff Kirsher 		q->pend_cred &= 7;
567f7917c00SJeff Kirsher 	}
568f7917c00SJeff Kirsher }
569f7917c00SJeff Kirsher 
570f7917c00SJeff Kirsher static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
571f7917c00SJeff Kirsher 				  dma_addr_t mapping)
572f7917c00SJeff Kirsher {
573f7917c00SJeff Kirsher 	sd->page = pg;
574f7917c00SJeff Kirsher 	sd->dma_addr = mapping;      /* includes size low bits */
575f7917c00SJeff Kirsher }
576f7917c00SJeff Kirsher 
577f7917c00SJeff Kirsher /**
578f7917c00SJeff Kirsher  *	refill_fl - refill an SGE Rx buffer ring
579f7917c00SJeff Kirsher  *	@adap: the adapter
580f7917c00SJeff Kirsher  *	@q: the ring to refill
581f7917c00SJeff Kirsher  *	@n: the number of new buffers to allocate
582f7917c00SJeff Kirsher  *	@gfp: the gfp flags for the allocations
583f7917c00SJeff Kirsher  *
584f7917c00SJeff Kirsher  *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
585f7917c00SJeff Kirsher  *	allocated with the supplied gfp flags.  The caller must assure that
586f7917c00SJeff Kirsher  *	@n does not exceed the queue's capacity.  If afterwards the queue is
587f7917c00SJeff Kirsher  *	found critically low mark it as starving in the bitmap of starving FLs.
588f7917c00SJeff Kirsher  *
589f7917c00SJeff Kirsher  *	Returns the number of buffers allocated.
590f7917c00SJeff Kirsher  */
591f7917c00SJeff Kirsher static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
592f7917c00SJeff Kirsher 			      gfp_t gfp)
593f7917c00SJeff Kirsher {
59452367a76SVipul Pandya 	struct sge *s = &adap->sge;
595f7917c00SJeff Kirsher 	struct page *pg;
596f7917c00SJeff Kirsher 	dma_addr_t mapping;
597f7917c00SJeff Kirsher 	unsigned int cred = q->avail;
598f7917c00SJeff Kirsher 	__be64 *d = &q->desc[q->pidx];
599f7917c00SJeff Kirsher 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
600d52ce920SHariprasad Shenai 	int node;
601f7917c00SJeff Kirsher 
6025b377d11SHariprasad Shenai #ifdef CONFIG_DEBUG_FS
6035b377d11SHariprasad Shenai 	if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
6045b377d11SHariprasad Shenai 		goto out;
6055b377d11SHariprasad Shenai #endif
6065b377d11SHariprasad Shenai 
607aa9cd31cSAlexander Duyck 	gfp |= __GFP_NOWARN;
608d52ce920SHariprasad Shenai 	node = dev_to_node(adap->pdev_dev);
609f7917c00SJeff Kirsher 
61052367a76SVipul Pandya 	if (s->fl_pg_order == 0)
61152367a76SVipul Pandya 		goto alloc_small_pages;
61252367a76SVipul Pandya 
613f7917c00SJeff Kirsher 	/*
614f7917c00SJeff Kirsher 	 * Prefer large buffers
615f7917c00SJeff Kirsher 	 */
616f7917c00SJeff Kirsher 	while (n) {
617d52ce920SHariprasad Shenai 		pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
618f7917c00SJeff Kirsher 		if (unlikely(!pg)) {
619f7917c00SJeff Kirsher 			q->large_alloc_failed++;
620f7917c00SJeff Kirsher 			break;       /* fall back to single pages */
621f7917c00SJeff Kirsher 		}
622f7917c00SJeff Kirsher 
623f7917c00SJeff Kirsher 		mapping = dma_map_page(adap->pdev_dev, pg, 0,
62452367a76SVipul Pandya 				       PAGE_SIZE << s->fl_pg_order,
625f7917c00SJeff Kirsher 				       PCI_DMA_FROMDEVICE);
626f7917c00SJeff Kirsher 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
62752367a76SVipul Pandya 			__free_pages(pg, s->fl_pg_order);
62870055dd0SHariprasad Shenai 			q->mapping_err++;
629f7917c00SJeff Kirsher 			goto out;   /* do not try small pages for this error */
630f7917c00SJeff Kirsher 		}
63152367a76SVipul Pandya 		mapping |= RX_LARGE_PG_BUF;
632f7917c00SJeff Kirsher 		*d++ = cpu_to_be64(mapping);
633f7917c00SJeff Kirsher 
634f7917c00SJeff Kirsher 		set_rx_sw_desc(sd, pg, mapping);
635f7917c00SJeff Kirsher 		sd++;
636f7917c00SJeff Kirsher 
637f7917c00SJeff Kirsher 		q->avail++;
638f7917c00SJeff Kirsher 		if (++q->pidx == q->size) {
639f7917c00SJeff Kirsher 			q->pidx = 0;
640f7917c00SJeff Kirsher 			sd = q->sdesc;
641f7917c00SJeff Kirsher 			d = q->desc;
642f7917c00SJeff Kirsher 		}
643f7917c00SJeff Kirsher 		n--;
644f7917c00SJeff Kirsher 	}
645f7917c00SJeff Kirsher 
64652367a76SVipul Pandya alloc_small_pages:
647f7917c00SJeff Kirsher 	while (n--) {
648d52ce920SHariprasad Shenai 		pg = alloc_pages_node(node, gfp, 0);
649f7917c00SJeff Kirsher 		if (unlikely(!pg)) {
650f7917c00SJeff Kirsher 			q->alloc_failed++;
651f7917c00SJeff Kirsher 			break;
652f7917c00SJeff Kirsher 		}
653f7917c00SJeff Kirsher 
654f7917c00SJeff Kirsher 		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
655f7917c00SJeff Kirsher 				       PCI_DMA_FROMDEVICE);
656f7917c00SJeff Kirsher 		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
6571f2149c1SEric Dumazet 			put_page(pg);
65870055dd0SHariprasad Shenai 			q->mapping_err++;
659f7917c00SJeff Kirsher 			goto out;
660f7917c00SJeff Kirsher 		}
661f7917c00SJeff Kirsher 		*d++ = cpu_to_be64(mapping);
662f7917c00SJeff Kirsher 
663f7917c00SJeff Kirsher 		set_rx_sw_desc(sd, pg, mapping);
664f7917c00SJeff Kirsher 		sd++;
665f7917c00SJeff Kirsher 
666f7917c00SJeff Kirsher 		q->avail++;
667f7917c00SJeff Kirsher 		if (++q->pidx == q->size) {
668f7917c00SJeff Kirsher 			q->pidx = 0;
669f7917c00SJeff Kirsher 			sd = q->sdesc;
670f7917c00SJeff Kirsher 			d = q->desc;
671f7917c00SJeff Kirsher 		}
672f7917c00SJeff Kirsher 	}
673f7917c00SJeff Kirsher 
674f7917c00SJeff Kirsher out:	cred = q->avail - cred;
675f7917c00SJeff Kirsher 	q->pend_cred += cred;
676f7917c00SJeff Kirsher 	ring_fl_db(adap, q);
677f7917c00SJeff Kirsher 
678c098b026SHariprasad Shenai 	if (unlikely(fl_starving(adap, q))) {
679f7917c00SJeff Kirsher 		smp_wmb();
68070055dd0SHariprasad Shenai 		q->low++;
681f7917c00SJeff Kirsher 		set_bit(q->cntxt_id - adap->sge.egr_start,
682f7917c00SJeff Kirsher 			adap->sge.starving_fl);
683f7917c00SJeff Kirsher 	}
684f7917c00SJeff Kirsher 
685f7917c00SJeff Kirsher 	return cred;
686f7917c00SJeff Kirsher }
687f7917c00SJeff Kirsher 
688f7917c00SJeff Kirsher static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
689f7917c00SJeff Kirsher {
690f7917c00SJeff Kirsher 	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
691f7917c00SJeff Kirsher 		  GFP_ATOMIC);
692f7917c00SJeff Kirsher }
693f7917c00SJeff Kirsher 
694f7917c00SJeff Kirsher /**
695f7917c00SJeff Kirsher  *	alloc_ring - allocate resources for an SGE descriptor ring
696f7917c00SJeff Kirsher  *	@dev: the PCI device's core device
697f7917c00SJeff Kirsher  *	@nelem: the number of descriptors
698f7917c00SJeff Kirsher  *	@elem_size: the size of each descriptor
699f7917c00SJeff Kirsher  *	@sw_size: the size of the SW state associated with each ring element
700f7917c00SJeff Kirsher  *	@phys: the physical address of the allocated ring
701f7917c00SJeff Kirsher  *	@metadata: address of the array holding the SW state for the ring
702f7917c00SJeff Kirsher  *	@stat_size: extra space in HW ring for status information
703f7917c00SJeff Kirsher  *	@node: preferred node for memory allocations
704f7917c00SJeff Kirsher  *
705f7917c00SJeff Kirsher  *	Allocates resources for an SGE descriptor ring, such as Tx queues,
706f7917c00SJeff Kirsher  *	free buffer lists, or response queues.  Each SGE ring requires
707f7917c00SJeff Kirsher  *	space for its HW descriptors plus, optionally, space for the SW state
708f7917c00SJeff Kirsher  *	associated with each HW entry (the metadata).  The function returns
709f7917c00SJeff Kirsher  *	three values: the virtual address for the HW ring (the return value
710f7917c00SJeff Kirsher  *	of the function), the bus address of the HW ring, and the address
711f7917c00SJeff Kirsher  *	of the SW ring.
712f7917c00SJeff Kirsher  */
713f7917c00SJeff Kirsher static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
714f7917c00SJeff Kirsher 			size_t sw_size, dma_addr_t *phys, void *metadata,
715f7917c00SJeff Kirsher 			size_t stat_size, int node)
716f7917c00SJeff Kirsher {
717f7917c00SJeff Kirsher 	size_t len = nelem * elem_size + stat_size;
718f7917c00SJeff Kirsher 	void *s = NULL;
719750afb08SLuis Chamberlain 	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
720f7917c00SJeff Kirsher 
721f7917c00SJeff Kirsher 	if (!p)
722f7917c00SJeff Kirsher 		return NULL;
723f7917c00SJeff Kirsher 	if (sw_size) {
724590b5b7dSKees Cook 		s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node);
725f7917c00SJeff Kirsher 
726f7917c00SJeff Kirsher 		if (!s) {
727f7917c00SJeff Kirsher 			dma_free_coherent(dev, len, p, *phys);
728f7917c00SJeff Kirsher 			return NULL;
729f7917c00SJeff Kirsher 		}
730f7917c00SJeff Kirsher 	}
731f7917c00SJeff Kirsher 	if (metadata)
732f7917c00SJeff Kirsher 		*(void **)metadata = s;
733f7917c00SJeff Kirsher 	return p;
734f7917c00SJeff Kirsher }
735f7917c00SJeff Kirsher 
736f7917c00SJeff Kirsher /**
737f7917c00SJeff Kirsher  *	sgl_len - calculates the size of an SGL of the given capacity
738f7917c00SJeff Kirsher  *	@n: the number of SGL entries
739f7917c00SJeff Kirsher  *
740f7917c00SJeff Kirsher  *	Calculates the number of flits needed for a scatter/gather list that
741f7917c00SJeff Kirsher  *	can hold the given number of entries.
742f7917c00SJeff Kirsher  */
743f7917c00SJeff Kirsher static inline unsigned int sgl_len(unsigned int n)
744f7917c00SJeff Kirsher {
7450aac3f56SHariprasad Shenai 	/* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
7460aac3f56SHariprasad Shenai 	 * addresses.  The DSGL Work Request starts off with a 32-bit DSGL
7470aac3f56SHariprasad Shenai 	 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
7480aac3f56SHariprasad Shenai 	 * repeated sequences of { Length[i], Length[i+1], Address[i],
7490aac3f56SHariprasad Shenai 	 * Address[i+1] } (this ensures that all addresses are on 64-bit
7500aac3f56SHariprasad Shenai 	 * boundaries).  If N is even, then Length[N+1] should be set to 0 and
7510aac3f56SHariprasad Shenai 	 * Address[N+1] is omitted.
7520aac3f56SHariprasad Shenai 	 *
7530aac3f56SHariprasad Shenai 	 * The following calculation incorporates all of the above.  It's
7540aac3f56SHariprasad Shenai 	 * somewhat hard to follow but, briefly: the "+2" accounts for the
7550aac3f56SHariprasad Shenai 	 * first two flits which include the DSGL header, Length0 and
7560aac3f56SHariprasad Shenai 	 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
7570aac3f56SHariprasad Shenai 	 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
7580aac3f56SHariprasad Shenai 	 * finally the "+((n-1)&1)" adds the one remaining flit needed if
7590aac3f56SHariprasad Shenai 	 * (n-1) is odd ...
7600aac3f56SHariprasad Shenai 	 */
761f7917c00SJeff Kirsher 	n--;
762f7917c00SJeff Kirsher 	return (3 * n) / 2 + (n & 1) + 2;
763f7917c00SJeff Kirsher }
764f7917c00SJeff Kirsher 
765f7917c00SJeff Kirsher /**
766f7917c00SJeff Kirsher  *	flits_to_desc - returns the num of Tx descriptors for the given flits
767f7917c00SJeff Kirsher  *	@n: the number of flits
768f7917c00SJeff Kirsher  *
769f7917c00SJeff Kirsher  *	Returns the number of Tx descriptors needed for the supplied number
770f7917c00SJeff Kirsher  *	of flits.
771f7917c00SJeff Kirsher  */
772f7917c00SJeff Kirsher static inline unsigned int flits_to_desc(unsigned int n)
773f7917c00SJeff Kirsher {
774f7917c00SJeff Kirsher 	BUG_ON(n > SGE_MAX_WR_LEN / 8);
775f7917c00SJeff Kirsher 	return DIV_ROUND_UP(n, 8);
776f7917c00SJeff Kirsher }
777f7917c00SJeff Kirsher 
778f7917c00SJeff Kirsher /**
779f7917c00SJeff Kirsher  *	is_eth_imm - can an Ethernet packet be sent as immediate data?
780f7917c00SJeff Kirsher  *	@skb: the packet
781f7917c00SJeff Kirsher  *
782f7917c00SJeff Kirsher  *	Returns whether an Ethernet packet is small enough to fit as
7830034b298SKumar Sanghvi  *	immediate data. Return value corresponds to headroom required.
784f7917c00SJeff Kirsher  */
785d0a1299cSGanesh Goudar static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
786f7917c00SJeff Kirsher {
787d0a1299cSGanesh Goudar 	int hdrlen = 0;
7880034b298SKumar Sanghvi 
789d0a1299cSGanesh Goudar 	if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
790d0a1299cSGanesh Goudar 	    chip_ver > CHELSIO_T5) {
791d0a1299cSGanesh Goudar 		hdrlen = sizeof(struct cpl_tx_tnl_lso);
792d0a1299cSGanesh Goudar 		hdrlen += sizeof(struct cpl_tx_pkt_core);
793d0a1299cSGanesh Goudar 	} else {
794d0a1299cSGanesh Goudar 		hdrlen = skb_shinfo(skb)->gso_size ?
795d0a1299cSGanesh Goudar 			 sizeof(struct cpl_tx_pkt_lso_core) : 0;
7960034b298SKumar Sanghvi 		hdrlen += sizeof(struct cpl_tx_pkt);
797d0a1299cSGanesh Goudar 	}
7980034b298SKumar Sanghvi 	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
7990034b298SKumar Sanghvi 		return hdrlen;
8000034b298SKumar Sanghvi 	return 0;
801f7917c00SJeff Kirsher }
802f7917c00SJeff Kirsher 
803f7917c00SJeff Kirsher /**
804f7917c00SJeff Kirsher  *	calc_tx_flits - calculate the number of flits for a packet Tx WR
805f7917c00SJeff Kirsher  *	@skb: the packet
806f7917c00SJeff Kirsher  *
807f7917c00SJeff Kirsher  *	Returns the number of flits needed for a Tx WR for the given Ethernet
808f7917c00SJeff Kirsher  *	packet, including the needed WR and CPL headers.
809f7917c00SJeff Kirsher  */
810d0a1299cSGanesh Goudar static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
811d0a1299cSGanesh Goudar 					 unsigned int chip_ver)
812f7917c00SJeff Kirsher {
813f7917c00SJeff Kirsher 	unsigned int flits;
814d0a1299cSGanesh Goudar 	int hdrlen = is_eth_imm(skb, chip_ver);
815f7917c00SJeff Kirsher 
8160aac3f56SHariprasad Shenai 	/* If the skb is small enough, we can pump it out as a work request
8170aac3f56SHariprasad Shenai 	 * with only immediate data.  In that case we just have to have the
8180aac3f56SHariprasad Shenai 	 * TX Packet header plus the skb data in the Work Request.
8190aac3f56SHariprasad Shenai 	 */
8200aac3f56SHariprasad Shenai 
8210034b298SKumar Sanghvi 	if (hdrlen)
8220034b298SKumar Sanghvi 		return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
823f7917c00SJeff Kirsher 
8240aac3f56SHariprasad Shenai 	/* Otherwise, we're going to have to construct a Scatter gather list
8250aac3f56SHariprasad Shenai 	 * of the skb body and fragments.  We also include the flits necessary
8260aac3f56SHariprasad Shenai 	 * for the TX Packet Work Request and CPL.  We always have a firmware
8270aac3f56SHariprasad Shenai 	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
8280aac3f56SHariprasad Shenai 	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
8290aac3f56SHariprasad Shenai 	 * message or, if we're doing a Large Send Offload, an LSO CPL message
8300aac3f56SHariprasad Shenai 	 * with an embedded TX Packet Write CPL message.
8310aac3f56SHariprasad Shenai 	 */
832fd1754fbSHariprasad Shenai 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
833d0a1299cSGanesh Goudar 	if (skb_shinfo(skb)->gso_size) {
834d0a1299cSGanesh Goudar 		if (skb->encapsulation && chip_ver > CHELSIO_T5)
835d0a1299cSGanesh Goudar 			hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
836d0a1299cSGanesh Goudar 				 sizeof(struct cpl_tx_tnl_lso);
8370aac3f56SHariprasad Shenai 		else
838d0a1299cSGanesh Goudar 			hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
839d0a1299cSGanesh Goudar 				 sizeof(struct cpl_tx_pkt_lso_core);
840d0a1299cSGanesh Goudar 
841d0a1299cSGanesh Goudar 		hdrlen += sizeof(struct cpl_tx_pkt_core);
842d0a1299cSGanesh Goudar 		flits += (hdrlen / sizeof(__be64));
843d0a1299cSGanesh Goudar 	} else {
8440aac3f56SHariprasad Shenai 		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
8450aac3f56SHariprasad Shenai 			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
846d0a1299cSGanesh Goudar 	}
847f7917c00SJeff Kirsher 	return flits;
848f7917c00SJeff Kirsher }
849f7917c00SJeff Kirsher 
850f7917c00SJeff Kirsher /**
851f7917c00SJeff Kirsher  *	calc_tx_descs - calculate the number of Tx descriptors for a packet
852f7917c00SJeff Kirsher  *	@skb: the packet
853f7917c00SJeff Kirsher  *
854f7917c00SJeff Kirsher  *	Returns the number of Tx descriptors needed for the given Ethernet
855f7917c00SJeff Kirsher  *	packet, including the needed WR and CPL headers.
856f7917c00SJeff Kirsher  */
857d0a1299cSGanesh Goudar static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
858d0a1299cSGanesh Goudar 					 unsigned int chip_ver)
859f7917c00SJeff Kirsher {
860d0a1299cSGanesh Goudar 	return flits_to_desc(calc_tx_flits(skb, chip_ver));
861f7917c00SJeff Kirsher }
862f7917c00SJeff Kirsher 
863f7917c00SJeff Kirsher /**
864a6ec572bSAtul Gupta  *	cxgb4_write_sgl - populate a scatter/gather list for a packet
865f7917c00SJeff Kirsher  *	@skb: the packet
866f7917c00SJeff Kirsher  *	@q: the Tx queue we are writing into
867f7917c00SJeff Kirsher  *	@sgl: starting location for writing the SGL
868f7917c00SJeff Kirsher  *	@end: points right after the end of the SGL
869f7917c00SJeff Kirsher  *	@start: start offset into skb main-body data to include in the SGL
870f7917c00SJeff Kirsher  *	@addr: the list of bus addresses for the SGL elements
871f7917c00SJeff Kirsher  *
872f7917c00SJeff Kirsher  *	Generates a gather list for the buffers that make up a packet.
873f7917c00SJeff Kirsher  *	The caller must provide adequate space for the SGL that will be written.
874f7917c00SJeff Kirsher  *	The SGL includes all of the packet's page fragments and the data in its
875f7917c00SJeff Kirsher  *	main body except for the first @start bytes.  @sgl must be 16-byte
876f7917c00SJeff Kirsher  *	aligned and within a Tx descriptor with available space.  @end points
877f7917c00SJeff Kirsher  *	right after the end of the SGL but does not account for any potential
878f7917c00SJeff Kirsher  *	wrap around, i.e., @end > @sgl.
879f7917c00SJeff Kirsher  */
880a6ec572bSAtul Gupta void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
881f7917c00SJeff Kirsher 		     struct ulptx_sgl *sgl, u64 *end, unsigned int start,
882f7917c00SJeff Kirsher 		     const dma_addr_t *addr)
883f7917c00SJeff Kirsher {
884f7917c00SJeff Kirsher 	unsigned int i, len;
885f7917c00SJeff Kirsher 	struct ulptx_sge_pair *to;
886f7917c00SJeff Kirsher 	const struct skb_shared_info *si = skb_shinfo(skb);
887f7917c00SJeff Kirsher 	unsigned int nfrags = si->nr_frags;
888f7917c00SJeff Kirsher 	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
889f7917c00SJeff Kirsher 
890f7917c00SJeff Kirsher 	len = skb_headlen(skb) - start;
891f7917c00SJeff Kirsher 	if (likely(len)) {
892f7917c00SJeff Kirsher 		sgl->len0 = htonl(len);
893f7917c00SJeff Kirsher 		sgl->addr0 = cpu_to_be64(addr[0] + start);
894f7917c00SJeff Kirsher 		nfrags++;
895f7917c00SJeff Kirsher 	} else {
8969e903e08SEric Dumazet 		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
897f7917c00SJeff Kirsher 		sgl->addr0 = cpu_to_be64(addr[1]);
898f7917c00SJeff Kirsher 	}
899f7917c00SJeff Kirsher 
900bdc590b9SHariprasad Shenai 	sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
901bdc590b9SHariprasad Shenai 			      ULPTX_NSGE_V(nfrags));
902f7917c00SJeff Kirsher 	if (likely(--nfrags == 0))
903f7917c00SJeff Kirsher 		return;
904f7917c00SJeff Kirsher 	/*
905f7917c00SJeff Kirsher 	 * Most of the complexity below deals with the possibility we hit the
906f7917c00SJeff Kirsher 	 * end of the queue in the middle of writing the SGL.  For this case
907f7917c00SJeff Kirsher 	 * only we create the SGL in a temporary buffer and then copy it.
908f7917c00SJeff Kirsher 	 */
909f7917c00SJeff Kirsher 	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
910f7917c00SJeff Kirsher 
911f7917c00SJeff Kirsher 	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
9129e903e08SEric Dumazet 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
9139e903e08SEric Dumazet 		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
914f7917c00SJeff Kirsher 		to->addr[0] = cpu_to_be64(addr[i]);
915f7917c00SJeff Kirsher 		to->addr[1] = cpu_to_be64(addr[++i]);
916f7917c00SJeff Kirsher 	}
917f7917c00SJeff Kirsher 	if (nfrags) {
9189e903e08SEric Dumazet 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
919f7917c00SJeff Kirsher 		to->len[1] = cpu_to_be32(0);
920f7917c00SJeff Kirsher 		to->addr[0] = cpu_to_be64(addr[i + 1]);
921f7917c00SJeff Kirsher 	}
922f7917c00SJeff Kirsher 	if (unlikely((u8 *)end > (u8 *)q->stat)) {
923f7917c00SJeff Kirsher 		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
924f7917c00SJeff Kirsher 
925f7917c00SJeff Kirsher 		if (likely(part0))
926f7917c00SJeff Kirsher 			memcpy(sgl->sge, buf, part0);
927f7917c00SJeff Kirsher 		part1 = (u8 *)end - (u8 *)q->stat;
928f7917c00SJeff Kirsher 		memcpy(q->desc, (u8 *)buf + part0, part1);
929f7917c00SJeff Kirsher 		end = (void *)q->desc + part1;
930f7917c00SJeff Kirsher 	}
931f7917c00SJeff Kirsher 	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
93264699336SJoe Perches 		*end = 0;
933f7917c00SJeff Kirsher }
934a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_write_sgl);
935f7917c00SJeff Kirsher 
936df64e4d3SHariprasad Shenai /* This function copies 64 byte coalesced work request to
937df64e4d3SHariprasad Shenai  * memory mapped BAR2 space. For coalesced WR SGE fetches
938df64e4d3SHariprasad Shenai  * data from the FIFO instead of from Host.
93922adfe0aSSantosh Rastapur  */
940df64e4d3SHariprasad Shenai static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
94122adfe0aSSantosh Rastapur {
942df64e4d3SHariprasad Shenai 	int count = 8;
94322adfe0aSSantosh Rastapur 
94422adfe0aSSantosh Rastapur 	while (count) {
94522adfe0aSSantosh Rastapur 		writeq(*src, dst);
94622adfe0aSSantosh Rastapur 		src++;
94722adfe0aSSantosh Rastapur 		dst++;
94822adfe0aSSantosh Rastapur 		count--;
94922adfe0aSSantosh Rastapur 	}
95022adfe0aSSantosh Rastapur }
95122adfe0aSSantosh Rastapur 
952f7917c00SJeff Kirsher /**
953a6ec572bSAtul Gupta  *	cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
954f7917c00SJeff Kirsher  *	@adap: the adapter
955f7917c00SJeff Kirsher  *	@q: the Tx queue
956f7917c00SJeff Kirsher  *	@n: number of new descriptors to give to HW
957f7917c00SJeff Kirsher  *
958f7917c00SJeff Kirsher  *	Ring the doorbel for a Tx queue.
959f7917c00SJeff Kirsher  */
960a6ec572bSAtul Gupta inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
961f7917c00SJeff Kirsher {
9621ecc7b7aSHariprasad Shenai 	/* Make sure that all writes to the TX Descriptors are committed
9631ecc7b7aSHariprasad Shenai 	 * before we tell the hardware about them.
9641ecc7b7aSHariprasad Shenai 	 */
9651ecc7b7aSHariprasad Shenai 	wmb();
966d63a6dcfSHariprasad Shenai 
967df64e4d3SHariprasad Shenai 	/* If we don't have access to the new User Doorbell (T5+), use the old
968df64e4d3SHariprasad Shenai 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
969df64e4d3SHariprasad Shenai 	 */
970df64e4d3SHariprasad Shenai 	if (unlikely(q->bar2_addr == NULL)) {
971f612b815SHariprasad Shenai 		u32 val = PIDX_V(n);
97205eb2389SSteve Wise 		unsigned long flags;
97322adfe0aSSantosh Rastapur 
974d63a6dcfSHariprasad Shenai 		/* For T4 we need to participate in the Doorbell Recovery
975d63a6dcfSHariprasad Shenai 		 * mechanism.
976d63a6dcfSHariprasad Shenai 		 */
97705eb2389SSteve Wise 		spin_lock_irqsave(&q->db_lock, flags);
978d63a6dcfSHariprasad Shenai 		if (!q->db_disabled)
979f612b815SHariprasad Shenai 			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
980f612b815SHariprasad Shenai 				     QID_V(q->cntxt_id) | val);
981d63a6dcfSHariprasad Shenai 		else
98205eb2389SSteve Wise 			q->db_pidx_inc += n;
9833069ee9bSVipul Pandya 		q->db_pidx = q->pidx;
98405eb2389SSteve Wise 		spin_unlock_irqrestore(&q->db_lock, flags);
985d63a6dcfSHariprasad Shenai 	} else {
986f612b815SHariprasad Shenai 		u32 val = PIDX_T5_V(n);
987d63a6dcfSHariprasad Shenai 
988d63a6dcfSHariprasad Shenai 		/* T4 and later chips share the same PIDX field offset within
989d63a6dcfSHariprasad Shenai 		 * the doorbell, but T5 and later shrank the field in order to
990d63a6dcfSHariprasad Shenai 		 * gain a bit for Doorbell Priority.  The field was absurdly
991d63a6dcfSHariprasad Shenai 		 * large in the first place (14 bits) so we just use the T5
992d63a6dcfSHariprasad Shenai 		 * and later limits and warn if a Queue ID is too large.
993d63a6dcfSHariprasad Shenai 		 */
994f612b815SHariprasad Shenai 		WARN_ON(val & DBPRIO_F);
995d63a6dcfSHariprasad Shenai 
996df64e4d3SHariprasad Shenai 		/* If we're only writing a single TX Descriptor and we can use
997df64e4d3SHariprasad Shenai 		 * Inferred QID registers, we can use the Write Combining
998df64e4d3SHariprasad Shenai 		 * Gather Buffer; otherwise we use the simple doorbell.
999d63a6dcfSHariprasad Shenai 		 */
1000df64e4d3SHariprasad Shenai 		if (n == 1 && q->bar2_qid == 0) {
1001d63a6dcfSHariprasad Shenai 			int index = (q->pidx
1002d63a6dcfSHariprasad Shenai 				     ? (q->pidx - 1)
1003d63a6dcfSHariprasad Shenai 				     : (q->size - 1));
1004df64e4d3SHariprasad Shenai 			u64 *wr = (u64 *)&q->desc[index];
1005d63a6dcfSHariprasad Shenai 
1006df64e4d3SHariprasad Shenai 			cxgb_pio_copy((u64 __iomem *)
1007df64e4d3SHariprasad Shenai 				      (q->bar2_addr + SGE_UDB_WCDOORBELL),
1008df64e4d3SHariprasad Shenai 				      wr);
1009d63a6dcfSHariprasad Shenai 		} else {
1010f612b815SHariprasad Shenai 			writel(val | QID_V(q->bar2_qid),
1011df64e4d3SHariprasad Shenai 			       q->bar2_addr + SGE_UDB_KDOORBELL);
1012d63a6dcfSHariprasad Shenai 		}
1013d63a6dcfSHariprasad Shenai 
1014d63a6dcfSHariprasad Shenai 		/* This Write Memory Barrier will force the write to the User
1015d63a6dcfSHariprasad Shenai 		 * Doorbell area to be flushed.  This is needed to prevent
1016d63a6dcfSHariprasad Shenai 		 * writes on different CPUs for the same queue from hitting
1017d63a6dcfSHariprasad Shenai 		 * the adapter out of order.  This is required when some Work
1018d63a6dcfSHariprasad Shenai 		 * Requests take the Write Combine Gather Buffer path (user
1019d63a6dcfSHariprasad Shenai 		 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
1020d63a6dcfSHariprasad Shenai 		 * take the traditional path where we simply increment the
1021d63a6dcfSHariprasad Shenai 		 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1022d63a6dcfSHariprasad Shenai 		 * hardware DMA read the actual Work Request.
1023d63a6dcfSHariprasad Shenai 		 */
1024d63a6dcfSHariprasad Shenai 		wmb();
1025d63a6dcfSHariprasad Shenai 	}
1026f7917c00SJeff Kirsher }
1027a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_ring_tx_db);
1028f7917c00SJeff Kirsher 
1029f7917c00SJeff Kirsher /**
1030a6ec572bSAtul Gupta  *	cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
1031f7917c00SJeff Kirsher  *	@skb: the packet
1032f7917c00SJeff Kirsher  *	@q: the Tx queue where the packet will be inlined
1033f7917c00SJeff Kirsher  *	@pos: starting position in the Tx queue where to inline the packet
1034f7917c00SJeff Kirsher  *
1035f7917c00SJeff Kirsher  *	Inline a packet's contents directly into Tx descriptors, starting at
1036f7917c00SJeff Kirsher  *	the given position within the Tx DMA ring.
1037f7917c00SJeff Kirsher  *	Most of the complexity of this operation is dealing with wrap arounds
1038f7917c00SJeff Kirsher  *	in the middle of the packet we want to inline.
1039f7917c00SJeff Kirsher  */
1040a6ec572bSAtul Gupta void cxgb4_inline_tx_skb(const struct sk_buff *skb,
1041a6ec572bSAtul Gupta 			 const struct sge_txq *q, void *pos)
1042f7917c00SJeff Kirsher {
1043f7917c00SJeff Kirsher 	int left = (void *)q->stat - pos;
1044e383f248SAtul Gupta 	u64 *p;
1045f7917c00SJeff Kirsher 
1046f7917c00SJeff Kirsher 	if (likely(skb->len <= left)) {
1047f7917c00SJeff Kirsher 		if (likely(!skb->data_len))
1048f7917c00SJeff Kirsher 			skb_copy_from_linear_data(skb, pos, skb->len);
1049f7917c00SJeff Kirsher 		else
1050f7917c00SJeff Kirsher 			skb_copy_bits(skb, 0, pos, skb->len);
1051f7917c00SJeff Kirsher 		pos += skb->len;
1052f7917c00SJeff Kirsher 	} else {
1053f7917c00SJeff Kirsher 		skb_copy_bits(skb, 0, pos, left);
1054f7917c00SJeff Kirsher 		skb_copy_bits(skb, left, q->desc, skb->len - left);
1055f7917c00SJeff Kirsher 		pos = (void *)q->desc + (skb->len - left);
1056f7917c00SJeff Kirsher 	}
1057f7917c00SJeff Kirsher 
1058f7917c00SJeff Kirsher 	/* 0-pad to multiple of 16 */
1059f7917c00SJeff Kirsher 	p = PTR_ALIGN(pos, 8);
1060f7917c00SJeff Kirsher 	if ((uintptr_t)p & 8)
1061f7917c00SJeff Kirsher 		*p = 0;
1062f7917c00SJeff Kirsher }
1063a6ec572bSAtul Gupta EXPORT_SYMBOL(cxgb4_inline_tx_skb);
1064f7917c00SJeff Kirsher 
10658d0557d2SHariprasad Shenai static void *inline_tx_skb_header(const struct sk_buff *skb,
10668d0557d2SHariprasad Shenai 				  const struct sge_txq *q,  void *pos,
10678d0557d2SHariprasad Shenai 				  int length)
10688d0557d2SHariprasad Shenai {
10698d0557d2SHariprasad Shenai 	u64 *p;
10708d0557d2SHariprasad Shenai 	int left = (void *)q->stat - pos;
10718d0557d2SHariprasad Shenai 
10728d0557d2SHariprasad Shenai 	if (likely(length <= left)) {
10738d0557d2SHariprasad Shenai 		memcpy(pos, skb->data, length);
10748d0557d2SHariprasad Shenai 		pos += length;
10758d0557d2SHariprasad Shenai 	} else {
10768d0557d2SHariprasad Shenai 		memcpy(pos, skb->data, left);
10778d0557d2SHariprasad Shenai 		memcpy(q->desc, skb->data + left, length - left);
10788d0557d2SHariprasad Shenai 		pos = (void *)q->desc + (length - left);
10798d0557d2SHariprasad Shenai 	}
10808d0557d2SHariprasad Shenai 	/* 0-pad to multiple of 16 */
10818d0557d2SHariprasad Shenai 	p = PTR_ALIGN(pos, 8);
10828d0557d2SHariprasad Shenai 	if ((uintptr_t)p & 8) {
10838d0557d2SHariprasad Shenai 		*p = 0;
10848d0557d2SHariprasad Shenai 		return p + 1;
10858d0557d2SHariprasad Shenai 	}
10868d0557d2SHariprasad Shenai 	return p;
10878d0557d2SHariprasad Shenai }
10888d0557d2SHariprasad Shenai 
1089f7917c00SJeff Kirsher /*
1090f7917c00SJeff Kirsher  * Figure out what HW csum a packet wants and return the appropriate control
1091f7917c00SJeff Kirsher  * bits.
1092f7917c00SJeff Kirsher  */
10933ccc6cf7SHariprasad Shenai static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1094f7917c00SJeff Kirsher {
1095f7917c00SJeff Kirsher 	int csum_type;
1096c50ae55eSGanesh Goudar 	bool inner_hdr_csum = false;
1097c50ae55eSGanesh Goudar 	u16 proto, ver;
1098f7917c00SJeff Kirsher 
1099c50ae55eSGanesh Goudar 	if (skb->encapsulation &&
1100c50ae55eSGanesh Goudar 	    (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5))
1101c50ae55eSGanesh Goudar 		inner_hdr_csum = true;
1102c50ae55eSGanesh Goudar 
1103c50ae55eSGanesh Goudar 	if (inner_hdr_csum) {
1104c50ae55eSGanesh Goudar 		ver = inner_ip_hdr(skb)->version;
1105c50ae55eSGanesh Goudar 		proto = (ver == 4) ? inner_ip_hdr(skb)->protocol :
1106c50ae55eSGanesh Goudar 			inner_ipv6_hdr(skb)->nexthdr;
1107c50ae55eSGanesh Goudar 	} else {
1108c50ae55eSGanesh Goudar 		ver = ip_hdr(skb)->version;
1109c50ae55eSGanesh Goudar 		proto = (ver == 4) ? ip_hdr(skb)->protocol :
1110c50ae55eSGanesh Goudar 			ipv6_hdr(skb)->nexthdr;
1111c50ae55eSGanesh Goudar 	}
1112c50ae55eSGanesh Goudar 
1113c50ae55eSGanesh Goudar 	if (ver == 4) {
1114c50ae55eSGanesh Goudar 		if (proto == IPPROTO_TCP)
1115f7917c00SJeff Kirsher 			csum_type = TX_CSUM_TCPIP;
1116c50ae55eSGanesh Goudar 		else if (proto == IPPROTO_UDP)
1117f7917c00SJeff Kirsher 			csum_type = TX_CSUM_UDPIP;
1118f7917c00SJeff Kirsher 		else {
1119f7917c00SJeff Kirsher nocsum:			/*
1120f7917c00SJeff Kirsher 			 * unknown protocol, disable HW csum
1121f7917c00SJeff Kirsher 			 * and hope a bad packet is detected
1122f7917c00SJeff Kirsher 			 */
11231ecc7b7aSHariprasad Shenai 			return TXPKT_L4CSUM_DIS_F;
1124f7917c00SJeff Kirsher 		}
1125f7917c00SJeff Kirsher 	} else {
1126f7917c00SJeff Kirsher 		/*
1127f7917c00SJeff Kirsher 		 * this doesn't work with extension headers
1128f7917c00SJeff Kirsher 		 */
1129c50ae55eSGanesh Goudar 		if (proto == IPPROTO_TCP)
1130f7917c00SJeff Kirsher 			csum_type = TX_CSUM_TCPIP6;
1131c50ae55eSGanesh Goudar 		else if (proto == IPPROTO_UDP)
1132f7917c00SJeff Kirsher 			csum_type = TX_CSUM_UDPIP6;
1133f7917c00SJeff Kirsher 		else
1134f7917c00SJeff Kirsher 			goto nocsum;
1135f7917c00SJeff Kirsher 	}
1136f7917c00SJeff Kirsher 
11373ccc6cf7SHariprasad Shenai 	if (likely(csum_type >= TX_CSUM_TCPIP)) {
1138c50ae55eSGanesh Goudar 		int eth_hdr_len, l4_len;
1139c50ae55eSGanesh Goudar 		u64 hdr_len;
1140c50ae55eSGanesh Goudar 
1141c50ae55eSGanesh Goudar 		if (inner_hdr_csum) {
1142c50ae55eSGanesh Goudar 			/* This allows checksum offload for all encapsulated
1143c50ae55eSGanesh Goudar 			 * packets like GRE etc..
1144c50ae55eSGanesh Goudar 			 */
1145c50ae55eSGanesh Goudar 			l4_len = skb_inner_network_header_len(skb);
1146c50ae55eSGanesh Goudar 			eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN;
1147c50ae55eSGanesh Goudar 		} else {
1148c50ae55eSGanesh Goudar 			l4_len = skb_network_header_len(skb);
1149c50ae55eSGanesh Goudar 			eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1150c50ae55eSGanesh Goudar 		}
1151c50ae55eSGanesh Goudar 		hdr_len = TXPKT_IPHDR_LEN_V(l4_len);
11523ccc6cf7SHariprasad Shenai 
11533ccc6cf7SHariprasad Shenai 		if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
11543ccc6cf7SHariprasad Shenai 			hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
11553ccc6cf7SHariprasad Shenai 		else
11563ccc6cf7SHariprasad Shenai 			hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
11573ccc6cf7SHariprasad Shenai 		return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
11583ccc6cf7SHariprasad Shenai 	} else {
1159f7917c00SJeff Kirsher 		int start = skb_transport_offset(skb);
1160f7917c00SJeff Kirsher 
11611ecc7b7aSHariprasad Shenai 		return TXPKT_CSUM_TYPE_V(csum_type) |
11621ecc7b7aSHariprasad Shenai 			TXPKT_CSUM_START_V(start) |
11631ecc7b7aSHariprasad Shenai 			TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1164f7917c00SJeff Kirsher 	}
1165f7917c00SJeff Kirsher }
1166f7917c00SJeff Kirsher 
1167f7917c00SJeff Kirsher static void eth_txq_stop(struct sge_eth_txq *q)
1168f7917c00SJeff Kirsher {
1169f7917c00SJeff Kirsher 	netif_tx_stop_queue(q->txq);
1170f7917c00SJeff Kirsher 	q->q.stops++;
1171f7917c00SJeff Kirsher }
1172f7917c00SJeff Kirsher 
1173f7917c00SJeff Kirsher static inline void txq_advance(struct sge_txq *q, unsigned int n)
1174f7917c00SJeff Kirsher {
1175f7917c00SJeff Kirsher 	q->in_use += n;
1176f7917c00SJeff Kirsher 	q->pidx += n;
1177f7917c00SJeff Kirsher 	if (q->pidx >= q->size)
1178f7917c00SJeff Kirsher 		q->pidx -= q->size;
1179f7917c00SJeff Kirsher }
1180f7917c00SJeff Kirsher 
118184a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE
118284a200b3SVarun Prakash static inline int
118384a200b3SVarun Prakash cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
118484a200b3SVarun Prakash 		  const struct port_info *pi, u64 *cntrl)
118584a200b3SVarun Prakash {
118684a200b3SVarun Prakash 	const struct cxgb_fcoe *fcoe = &pi->fcoe;
118784a200b3SVarun Prakash 
118884a200b3SVarun Prakash 	if (!(fcoe->flags & CXGB_FCOE_ENABLED))
118984a200b3SVarun Prakash 		return 0;
119084a200b3SVarun Prakash 
119184a200b3SVarun Prakash 	if (skb->protocol != htons(ETH_P_FCOE))
119284a200b3SVarun Prakash 		return 0;
119384a200b3SVarun Prakash 
119484a200b3SVarun Prakash 	skb_reset_mac_header(skb);
119584a200b3SVarun Prakash 	skb->mac_len = sizeof(struct ethhdr);
119684a200b3SVarun Prakash 
119784a200b3SVarun Prakash 	skb_set_network_header(skb, skb->mac_len);
119884a200b3SVarun Prakash 	skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
119984a200b3SVarun Prakash 
120084a200b3SVarun Prakash 	if (!cxgb_fcoe_sof_eof_supported(adap, skb))
120184a200b3SVarun Prakash 		return -ENOTSUPP;
120284a200b3SVarun Prakash 
120384a200b3SVarun Prakash 	/* FC CRC offload */
12041ecc7b7aSHariprasad Shenai 	*cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
12051ecc7b7aSHariprasad Shenai 		     TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
12061ecc7b7aSHariprasad Shenai 		     TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
12071ecc7b7aSHariprasad Shenai 		     TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
12081ecc7b7aSHariprasad Shenai 		     TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
120984a200b3SVarun Prakash 	return 0;
121084a200b3SVarun Prakash }
121184a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */
121284a200b3SVarun Prakash 
1213d0a1299cSGanesh Goudar /* Returns tunnel type if hardware supports offloading of the same.
1214d0a1299cSGanesh Goudar  * It is called only for T5 and onwards.
1215d0a1299cSGanesh Goudar  */
1216d0a1299cSGanesh Goudar enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
1217d0a1299cSGanesh Goudar {
1218d0a1299cSGanesh Goudar 	u8 l4_hdr = 0;
1219d0a1299cSGanesh Goudar 	enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1220d0a1299cSGanesh Goudar 	struct port_info *pi = netdev_priv(skb->dev);
1221d0a1299cSGanesh Goudar 	struct adapter *adapter = pi->adapter;
1222d0a1299cSGanesh Goudar 
1223d0a1299cSGanesh Goudar 	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1224d0a1299cSGanesh Goudar 	    skb->inner_protocol != htons(ETH_P_TEB))
1225d0a1299cSGanesh Goudar 		return tnl_type;
1226d0a1299cSGanesh Goudar 
1227d0a1299cSGanesh Goudar 	switch (vlan_get_protocol(skb)) {
1228d0a1299cSGanesh Goudar 	case htons(ETH_P_IP):
1229d0a1299cSGanesh Goudar 		l4_hdr = ip_hdr(skb)->protocol;
1230d0a1299cSGanesh Goudar 		break;
1231d0a1299cSGanesh Goudar 	case htons(ETH_P_IPV6):
1232d0a1299cSGanesh Goudar 		l4_hdr = ipv6_hdr(skb)->nexthdr;
1233d0a1299cSGanesh Goudar 		break;
1234d0a1299cSGanesh Goudar 	default:
1235d0a1299cSGanesh Goudar 		return tnl_type;
1236d0a1299cSGanesh Goudar 	}
1237d0a1299cSGanesh Goudar 
1238d0a1299cSGanesh Goudar 	switch (l4_hdr) {
1239d0a1299cSGanesh Goudar 	case IPPROTO_UDP:
1240d0a1299cSGanesh Goudar 		if (adapter->vxlan_port == udp_hdr(skb)->dest)
1241d0a1299cSGanesh Goudar 			tnl_type = TX_TNL_TYPE_VXLAN;
1242c746fc0eSGanesh Goudar 		else if (adapter->geneve_port == udp_hdr(skb)->dest)
1243c746fc0eSGanesh Goudar 			tnl_type = TX_TNL_TYPE_GENEVE;
1244d0a1299cSGanesh Goudar 		break;
1245d0a1299cSGanesh Goudar 	default:
1246d0a1299cSGanesh Goudar 		return tnl_type;
1247d0a1299cSGanesh Goudar 	}
1248d0a1299cSGanesh Goudar 
1249d0a1299cSGanesh Goudar 	return tnl_type;
1250d0a1299cSGanesh Goudar }
1251d0a1299cSGanesh Goudar 
1252d0a1299cSGanesh Goudar static inline void t6_fill_tnl_lso(struct sk_buff *skb,
1253d0a1299cSGanesh Goudar 				   struct cpl_tx_tnl_lso *tnl_lso,
1254d0a1299cSGanesh Goudar 				   enum cpl_tx_tnl_lso_type tnl_type)
1255d0a1299cSGanesh Goudar {
1256d0a1299cSGanesh Goudar 	u32 val;
1257d0a1299cSGanesh Goudar 	int in_eth_xtra_len;
1258d0a1299cSGanesh Goudar 	int l3hdr_len = skb_network_header_len(skb);
1259d0a1299cSGanesh Goudar 	int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1260d0a1299cSGanesh Goudar 	const struct skb_shared_info *ssi = skb_shinfo(skb);
1261d0a1299cSGanesh Goudar 	bool v6 = (ip_hdr(skb)->version == 6);
1262d0a1299cSGanesh Goudar 
1263d0a1299cSGanesh Goudar 	val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
1264d0a1299cSGanesh Goudar 	      CPL_TX_TNL_LSO_FIRST_F |
1265d0a1299cSGanesh Goudar 	      CPL_TX_TNL_LSO_LAST_F |
1266d0a1299cSGanesh Goudar 	      (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
1267d0a1299cSGanesh Goudar 	      CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
1268d0a1299cSGanesh Goudar 	      CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
1269d0a1299cSGanesh Goudar 	      (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
1270d0a1299cSGanesh Goudar 	      CPL_TX_TNL_LSO_IPLENSETOUT_F |
1271d0a1299cSGanesh Goudar 	      (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
1272d0a1299cSGanesh Goudar 	tnl_lso->op_to_IpIdSplitOut = htonl(val);
1273d0a1299cSGanesh Goudar 
1274d0a1299cSGanesh Goudar 	tnl_lso->IpIdOffsetOut = 0;
1275d0a1299cSGanesh Goudar 
1276d0a1299cSGanesh Goudar 	/* Get the tunnel header length */
1277d0a1299cSGanesh Goudar 	val = skb_inner_mac_header(skb) - skb_mac_header(skb);
1278d0a1299cSGanesh Goudar 	in_eth_xtra_len = skb_inner_network_header(skb) -
1279d0a1299cSGanesh Goudar 			  skb_inner_mac_header(skb) - ETH_HLEN;
1280d0a1299cSGanesh Goudar 
1281d0a1299cSGanesh Goudar 	switch (tnl_type) {
1282d0a1299cSGanesh Goudar 	case TX_TNL_TYPE_VXLAN:
1283c746fc0eSGanesh Goudar 	case TX_TNL_TYPE_GENEVE:
1284d0a1299cSGanesh Goudar 		tnl_lso->UdpLenSetOut_to_TnlHdrLen =
1285d0a1299cSGanesh Goudar 			htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
1286d0a1299cSGanesh Goudar 			CPL_TX_TNL_LSO_UDPLENSETOUT_F);
1287d0a1299cSGanesh Goudar 		break;
1288d0a1299cSGanesh Goudar 	default:
1289d0a1299cSGanesh Goudar 		tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
1290d0a1299cSGanesh Goudar 		break;
1291d0a1299cSGanesh Goudar 	}
1292d0a1299cSGanesh Goudar 
1293d0a1299cSGanesh Goudar 	tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
1294d0a1299cSGanesh Goudar 		 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
1295d0a1299cSGanesh Goudar 		       CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
1296d0a1299cSGanesh Goudar 
1297d0a1299cSGanesh Goudar 	tnl_lso->r1 = 0;
1298d0a1299cSGanesh Goudar 
1299d0a1299cSGanesh Goudar 	val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
1300d0a1299cSGanesh Goudar 	      CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
1301d0a1299cSGanesh Goudar 	      CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
1302d0a1299cSGanesh Goudar 	      CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
1303d0a1299cSGanesh Goudar 	tnl_lso->Flow_to_TcpHdrLen = htonl(val);
1304d0a1299cSGanesh Goudar 
1305d0a1299cSGanesh Goudar 	tnl_lso->IpIdOffset = htons(0);
1306d0a1299cSGanesh Goudar 
1307d0a1299cSGanesh Goudar 	tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
1308d0a1299cSGanesh Goudar 	tnl_lso->TCPSeqOffset = htonl(0);
1309d0a1299cSGanesh Goudar 	tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
1310d0a1299cSGanesh Goudar }
1311d0a1299cSGanesh Goudar 
1312f7917c00SJeff Kirsher /**
1313d429005fSVishal Kulkarni  *	t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
1314d429005fSVishal Kulkarni  *	@adap: the adapter
1315d429005fSVishal Kulkarni  *	@eq: the Ethernet TX Queue
1316d429005fSVishal Kulkarni  *	@maxreclaim: the maximum number of TX Descriptors to reclaim or -1
1317d429005fSVishal Kulkarni  *
1318d429005fSVishal Kulkarni  *	We're typically called here to update the state of an Ethernet TX
1319d429005fSVishal Kulkarni  *	Queue with respect to the hardware's progress in consuming the TX
1320d429005fSVishal Kulkarni  *	Work Requests that we've put on that Egress Queue.  This happens
1321d429005fSVishal Kulkarni  *	when we get Egress Queue Update messages and also prophylactically
1322d429005fSVishal Kulkarni  *	in regular timer-based Ethernet TX Queue maintenance.
1323d429005fSVishal Kulkarni  */
1324d429005fSVishal Kulkarni int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
1325d429005fSVishal Kulkarni 				 int maxreclaim)
1326d429005fSVishal Kulkarni {
1327d429005fSVishal Kulkarni 	struct sge_txq *q = &eq->q;
1328d429005fSVishal Kulkarni 	unsigned int reclaimed;
1329d429005fSVishal Kulkarni 
1330d429005fSVishal Kulkarni 	if (!q->in_use || !__netif_tx_trylock(eq->txq))
1331d429005fSVishal Kulkarni 		return 0;
1332d429005fSVishal Kulkarni 
1333d429005fSVishal Kulkarni 	/* Reclaim pending completed TX Descriptors. */
1334d429005fSVishal Kulkarni 	reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
1335d429005fSVishal Kulkarni 
1336d429005fSVishal Kulkarni 	/* If the TX Queue is currently stopped and there's now more than half
1337d429005fSVishal Kulkarni 	 * the queue available, restart it.  Otherwise bail out since the rest
1338d429005fSVishal Kulkarni 	 * of what we want do here is with the possibility of shipping any
1339d429005fSVishal Kulkarni 	 * currently buffered Coalesced TX Work Request.
1340d429005fSVishal Kulkarni 	 */
1341d429005fSVishal Kulkarni 	if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) {
1342d429005fSVishal Kulkarni 		netif_tx_wake_queue(eq->txq);
1343d429005fSVishal Kulkarni 		eq->q.restarts++;
1344d429005fSVishal Kulkarni 	}
1345d429005fSVishal Kulkarni 
1346d429005fSVishal Kulkarni 	__netif_tx_unlock(eq->txq);
1347d429005fSVishal Kulkarni 	return reclaimed;
1348d429005fSVishal Kulkarni }
1349d429005fSVishal Kulkarni 
1350d429005fSVishal Kulkarni /**
1351d5fbda61SArjun Vynipadath  *	cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
1352f7917c00SJeff Kirsher  *	@skb: the packet
1353f7917c00SJeff Kirsher  *	@dev: the egress net device
1354f7917c00SJeff Kirsher  *
1355f7917c00SJeff Kirsher  *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
1356f7917c00SJeff Kirsher  */
1357d5fbda61SArjun Vynipadath static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1358f7917c00SJeff Kirsher {
1359a4569504SAtul Gupta 	u32 wr_mid, ctrl0, op;
1360c50ae55eSGanesh Goudar 	u64 cntrl, *end, *sgl;
1361f7917c00SJeff Kirsher 	int qidx, credits;
1362f7917c00SJeff Kirsher 	unsigned int flits, ndesc;
1363f7917c00SJeff Kirsher 	struct adapter *adap;
1364f7917c00SJeff Kirsher 	struct sge_eth_txq *q;
1365f7917c00SJeff Kirsher 	const struct port_info *pi;
1366f7917c00SJeff Kirsher 	struct fw_eth_tx_pkt_wr *wr;
1367f7917c00SJeff Kirsher 	struct cpl_tx_pkt_core *cpl;
1368f7917c00SJeff Kirsher 	const struct skb_shared_info *ssi;
1369f7917c00SJeff Kirsher 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
13700034b298SKumar Sanghvi 	bool immediate = false;
1371637d3e99SHariprasad Shenai 	int len, max_pkt_len;
1372a4569504SAtul Gupta 	bool ptp_enabled = is_ptp_enabled(skb, dev);
1373d0a1299cSGanesh Goudar 	unsigned int chip_ver;
1374d0a1299cSGanesh Goudar 	enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1375d0a1299cSGanesh Goudar 
137684a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE
137784a200b3SVarun Prakash 	int err;
137884a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */
1379f7917c00SJeff Kirsher 
1380f7917c00SJeff Kirsher 	/*
1381f7917c00SJeff Kirsher 	 * The chip min packet length is 10 octets but play safe and reject
1382f7917c00SJeff Kirsher 	 * anything shorter than an Ethernet header.
1383f7917c00SJeff Kirsher 	 */
1384f7917c00SJeff Kirsher 	if (unlikely(skb->len < ETH_HLEN)) {
1385a7525198SEric W. Biederman out_free:	dev_kfree_skb_any(skb);
1386f7917c00SJeff Kirsher 		return NETDEV_TX_OK;
1387f7917c00SJeff Kirsher 	}
1388f7917c00SJeff Kirsher 
1389637d3e99SHariprasad Shenai 	/* Discard the packet if the length is greater than mtu */
1390637d3e99SHariprasad Shenai 	max_pkt_len = ETH_HLEN + dev->mtu;
13918d09e6b8SHariprasad Shenai 	if (skb_vlan_tagged(skb))
1392637d3e99SHariprasad Shenai 		max_pkt_len += VLAN_HLEN;
1393637d3e99SHariprasad Shenai 	if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1394637d3e99SHariprasad Shenai 		goto out_free;
1395637d3e99SHariprasad Shenai 
1396f7917c00SJeff Kirsher 	pi = netdev_priv(dev);
1397f7917c00SJeff Kirsher 	adap = pi->adapter;
1398a6ec572bSAtul Gupta 	ssi = skb_shinfo(skb);
1399a6ec572bSAtul Gupta #ifdef CONFIG_CHELSIO_IPSEC_INLINE
1400a6ec572bSAtul Gupta 	if (xfrm_offload(skb) && !ssi->gso_size)
1401a6ec572bSAtul Gupta 		return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
1402a6ec572bSAtul Gupta #endif /* CHELSIO_IPSEC_INLINE */
1403a6ec572bSAtul Gupta 
1404f7917c00SJeff Kirsher 	qidx = skb_get_queue_mapping(skb);
1405a4569504SAtul Gupta 	if (ptp_enabled) {
1406a4569504SAtul Gupta 		spin_lock(&adap->ptp_lock);
1407a4569504SAtul Gupta 		if (!(adap->ptp_tx_skb)) {
1408a4569504SAtul Gupta 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1409a4569504SAtul Gupta 			adap->ptp_tx_skb = skb_get(skb);
1410a4569504SAtul Gupta 		} else {
1411a4569504SAtul Gupta 			spin_unlock(&adap->ptp_lock);
1412a4569504SAtul Gupta 			goto out_free;
1413a4569504SAtul Gupta 		}
1414a4569504SAtul Gupta 		q = &adap->sge.ptptxq;
1415a4569504SAtul Gupta 	} else {
1416f7917c00SJeff Kirsher 		q = &adap->sge.ethtxq[qidx + pi->first_qset];
1417a4569504SAtul Gupta 	}
1418a4569504SAtul Gupta 	skb_tx_timestamp(skb);
1419f7917c00SJeff Kirsher 
1420d429005fSVishal Kulkarni 	reclaim_completed_tx(adap, &q->q, -1, true);
14211ecc7b7aSHariprasad Shenai 	cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
142284a200b3SVarun Prakash 
142384a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE
142484a200b3SVarun Prakash 	err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1425a4569504SAtul Gupta 	if (unlikely(err == -ENOTSUPP)) {
1426a4569504SAtul Gupta 		if (ptp_enabled)
1427a4569504SAtul Gupta 			spin_unlock(&adap->ptp_lock);
142884a200b3SVarun Prakash 		goto out_free;
1429a4569504SAtul Gupta 	}
143084a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */
1431f7917c00SJeff Kirsher 
1432d0a1299cSGanesh Goudar 	chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
1433d0a1299cSGanesh Goudar 	flits = calc_tx_flits(skb, chip_ver);
1434f7917c00SJeff Kirsher 	ndesc = flits_to_desc(flits);
1435f7917c00SJeff Kirsher 	credits = txq_avail(&q->q) - ndesc;
1436f7917c00SJeff Kirsher 
1437f7917c00SJeff Kirsher 	if (unlikely(credits < 0)) {
1438f7917c00SJeff Kirsher 		eth_txq_stop(q);
1439f7917c00SJeff Kirsher 		dev_err(adap->pdev_dev,
1440f7917c00SJeff Kirsher 			"%s: Tx ring %u full while queue awake!\n",
1441f7917c00SJeff Kirsher 			dev->name, qidx);
1442a4569504SAtul Gupta 		if (ptp_enabled)
1443a4569504SAtul Gupta 			spin_unlock(&adap->ptp_lock);
1444f7917c00SJeff Kirsher 		return NETDEV_TX_BUSY;
1445f7917c00SJeff Kirsher 	}
1446f7917c00SJeff Kirsher 
1447d0a1299cSGanesh Goudar 	if (is_eth_imm(skb, chip_ver))
14480034b298SKumar Sanghvi 		immediate = true;
14490034b298SKumar Sanghvi 
1450d0a1299cSGanesh Goudar 	if (skb->encapsulation && chip_ver > CHELSIO_T5)
1451d0a1299cSGanesh Goudar 		tnl_type = cxgb_encap_offload_supported(skb);
1452d0a1299cSGanesh Goudar 
14530034b298SKumar Sanghvi 	if (!immediate &&
1454a6ec572bSAtul Gupta 	    unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
1455f7917c00SJeff Kirsher 		q->mapping_err++;
1456a4569504SAtul Gupta 		if (ptp_enabled)
1457a4569504SAtul Gupta 			spin_unlock(&adap->ptp_lock);
1458f7917c00SJeff Kirsher 		goto out_free;
1459f7917c00SJeff Kirsher 	}
1460f7917c00SJeff Kirsher 
1461e2ac9628SHariprasad Shenai 	wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1462f7917c00SJeff Kirsher 	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1463d429005fSVishal Kulkarni 		/* After we're done injecting the Work Request for this
1464d429005fSVishal Kulkarni 		 * packet, we'll be below our "stop threshold" so stop the TX
1465d429005fSVishal Kulkarni 		 * Queue now and schedule a request for an SGE Egress Queue
1466d429005fSVishal Kulkarni 		 * Update message. The queue will get started later on when
1467d429005fSVishal Kulkarni 		 * the firmware processes this Work Request and sends us an
1468d429005fSVishal Kulkarni 		 * Egress Queue Status Update message indicating that space
1469d429005fSVishal Kulkarni 		 * has opened up.
1470d429005fSVishal Kulkarni 		 */
1471f7917c00SJeff Kirsher 		eth_txq_stop(q);
1472d429005fSVishal Kulkarni 
1473d429005fSVishal Kulkarni 		/* If we're using the SGE Doorbell Queue Timer facility, we
1474d429005fSVishal Kulkarni 		 * don't need to ask the Firmware to send us Egress Queue CIDX
1475d429005fSVishal Kulkarni 		 * Updates: the Hardware will do this automatically.  And
1476d429005fSVishal Kulkarni 		 * since we send the Ingress Queue CIDX Updates to the
1477d429005fSVishal Kulkarni 		 * corresponding Ethernet Response Queue, we'll get them very
1478d429005fSVishal Kulkarni 		 * quickly.
1479d429005fSVishal Kulkarni 		 */
1480d429005fSVishal Kulkarni 		if (!q->dbqt)
1481e2ac9628SHariprasad Shenai 			wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1482f7917c00SJeff Kirsher 	}
1483f7917c00SJeff Kirsher 
1484f7917c00SJeff Kirsher 	wr = (void *)&q->q.desc[q->q.pidx];
1485f7917c00SJeff Kirsher 	wr->equiq_to_len16 = htonl(wr_mid);
1486f7917c00SJeff Kirsher 	wr->r3 = cpu_to_be64(0);
1487f7917c00SJeff Kirsher 	end = (u64 *)wr + flits;
1488f7917c00SJeff Kirsher 
14890034b298SKumar Sanghvi 	len = immediate ? skb->len : 0;
1490a6076fcdSGanesh Goudar 	len += sizeof(*cpl);
1491f7917c00SJeff Kirsher 	if (ssi->gso_size) {
1492a6076fcdSGanesh Goudar 		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1493f7917c00SJeff Kirsher 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1494f7917c00SJeff Kirsher 		int l3hdr_len = skb_network_header_len(skb);
1495f7917c00SJeff Kirsher 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1496d0a1299cSGanesh Goudar 		struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
1497f7917c00SJeff Kirsher 
1498d0a1299cSGanesh Goudar 		if (tnl_type)
1499d0a1299cSGanesh Goudar 			len += sizeof(*tnl_lso);
1500d0a1299cSGanesh Goudar 		else
15010034b298SKumar Sanghvi 			len += sizeof(*lso);
1502d0a1299cSGanesh Goudar 
1503e2ac9628SHariprasad Shenai 		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1504e2ac9628SHariprasad Shenai 				       FW_WR_IMMDLEN_V(len));
1505d0a1299cSGanesh Goudar 		if (tnl_type) {
1506d0a1299cSGanesh Goudar 			struct iphdr *iph = ip_hdr(skb);
1507d0a1299cSGanesh Goudar 
1508d0a1299cSGanesh Goudar 			t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
1509d0a1299cSGanesh Goudar 			cpl = (void *)(tnl_lso + 1);
1510d0a1299cSGanesh Goudar 			/* Driver is expected to compute partial checksum that
1511d0a1299cSGanesh Goudar 			 * does not include the IP Total Length.
1512d0a1299cSGanesh Goudar 			 */
1513d0a1299cSGanesh Goudar 			if (iph->version == 4) {
1514d0a1299cSGanesh Goudar 				iph->check = 0;
1515d0a1299cSGanesh Goudar 				iph->tot_len = 0;
1516d0a1299cSGanesh Goudar 				iph->check = (u16)(~ip_fast_csum((u8 *)iph,
1517d0a1299cSGanesh Goudar 								 iph->ihl));
1518d0a1299cSGanesh Goudar 			}
1519d0a1299cSGanesh Goudar 			if (skb->ip_summed == CHECKSUM_PARTIAL)
1520d0a1299cSGanesh Goudar 				cntrl = hwcsum(adap->params.chip, skb);
1521d0a1299cSGanesh Goudar 		} else {
1522a6076fcdSGanesh Goudar 			lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
15231ecc7b7aSHariprasad Shenai 					LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
15241ecc7b7aSHariprasad Shenai 					LSO_IPV6_V(v6) |
15251ecc7b7aSHariprasad Shenai 					LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
15261ecc7b7aSHariprasad Shenai 					LSO_IPHDR_LEN_V(l3hdr_len / 4) |
15271ecc7b7aSHariprasad Shenai 					LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1528a6076fcdSGanesh Goudar 			lso->ipid_ofst = htons(0);
1529a6076fcdSGanesh Goudar 			lso->mss = htons(ssi->gso_size);
1530a6076fcdSGanesh Goudar 			lso->seqno_offset = htonl(0);
15317207c0d1SHariprasad Shenai 			if (is_t4(adap->params.chip))
1532a6076fcdSGanesh Goudar 				lso->len = htonl(skb->len);
15337207c0d1SHariprasad Shenai 			else
1534a6076fcdSGanesh Goudar 				lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
1535f7917c00SJeff Kirsher 			cpl = (void *)(lso + 1);
15363ccc6cf7SHariprasad Shenai 
1537d0a1299cSGanesh Goudar 			if (CHELSIO_CHIP_VERSION(adap->params.chip)
1538d0a1299cSGanesh Goudar 			    <= CHELSIO_T5)
15393ccc6cf7SHariprasad Shenai 				cntrl =	TXPKT_ETHHDR_LEN_V(eth_xtra_len);
15403ccc6cf7SHariprasad Shenai 			else
15413ccc6cf7SHariprasad Shenai 				cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
15423ccc6cf7SHariprasad Shenai 
15433ccc6cf7SHariprasad Shenai 			cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
15443ccc6cf7SHariprasad Shenai 				 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
15453ccc6cf7SHariprasad Shenai 				 TXPKT_IPHDR_LEN_V(l3hdr_len);
1546d0a1299cSGanesh Goudar 		}
1547c50ae55eSGanesh Goudar 		sgl = (u64 *)(cpl + 1); /* sgl start here */
1548c50ae55eSGanesh Goudar 		if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
1549c50ae55eSGanesh Goudar 			/* If current position is already at the end of the
1550c50ae55eSGanesh Goudar 			 * txq, reset the current to point to start of the queue
1551c50ae55eSGanesh Goudar 			 * and update the end ptr as well.
1552c50ae55eSGanesh Goudar 			 */
1553c50ae55eSGanesh Goudar 			if (sgl == (u64 *)q->q.stat) {
1554c50ae55eSGanesh Goudar 				int left = (u8 *)end - (u8 *)q->q.stat;
1555c50ae55eSGanesh Goudar 
1556c50ae55eSGanesh Goudar 				end = (void *)q->q.desc + left;
1557c50ae55eSGanesh Goudar 				sgl = (void *)q->q.desc;
1558c50ae55eSGanesh Goudar 			}
1559c50ae55eSGanesh Goudar 		}
1560f7917c00SJeff Kirsher 		q->tso++;
1561f7917c00SJeff Kirsher 		q->tx_cso += ssi->gso_segs;
1562f7917c00SJeff Kirsher 	} else {
1563a4569504SAtul Gupta 		if (ptp_enabled)
1564a4569504SAtul Gupta 			op = FW_PTP_TX_PKT_WR;
1565a4569504SAtul Gupta 		else
1566a4569504SAtul Gupta 			op = FW_ETH_TX_PKT_WR;
1567a4569504SAtul Gupta 		wr->op_immdlen = htonl(FW_WR_OP_V(op) |
1568e2ac9628SHariprasad Shenai 				       FW_WR_IMMDLEN_V(len));
1569f7917c00SJeff Kirsher 		cpl = (void *)(wr + 1);
1570c50ae55eSGanesh Goudar 		sgl = (u64 *)(cpl + 1);
1571f7917c00SJeff Kirsher 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
15723ccc6cf7SHariprasad Shenai 			cntrl = hwcsum(adap->params.chip, skb) |
15733ccc6cf7SHariprasad Shenai 				TXPKT_IPCSUM_DIS_F;
1574f7917c00SJeff Kirsher 			q->tx_cso++;
157584a200b3SVarun Prakash 		}
1576f7917c00SJeff Kirsher 	}
1577f7917c00SJeff Kirsher 
1578df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb)) {
1579f7917c00SJeff Kirsher 		q->vlan_ins++;
15801ecc7b7aSHariprasad Shenai 		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
158184a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE
158284a200b3SVarun Prakash 		if (skb->protocol == htons(ETH_P_FCOE))
15831ecc7b7aSHariprasad Shenai 			cntrl |= TXPKT_VLAN_V(
158484a200b3SVarun Prakash 				 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
158584a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */
1586f7917c00SJeff Kirsher 	}
1587f7917c00SJeff Kirsher 
1588397665daSAnish Bhatt 	ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
1589397665daSAnish Bhatt 		TXPKT_PF_V(adap->pf);
1590a4569504SAtul Gupta 	if (ptp_enabled)
1591a4569504SAtul Gupta 		ctrl0 |= TXPKT_TSTAMP_F;
1592397665daSAnish Bhatt #ifdef CONFIG_CHELSIO_T4_DCB
1593397665daSAnish Bhatt 	if (is_t4(adap->params.chip))
1594397665daSAnish Bhatt 		ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
1595397665daSAnish Bhatt 	else
1596397665daSAnish Bhatt 		ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
1597397665daSAnish Bhatt #endif
1598397665daSAnish Bhatt 	cpl->ctrl0 = htonl(ctrl0);
1599f7917c00SJeff Kirsher 	cpl->pack = htons(0);
1600f7917c00SJeff Kirsher 	cpl->len = htons(skb->len);
1601f7917c00SJeff Kirsher 	cpl->ctrl1 = cpu_to_be64(cntrl);
1602f7917c00SJeff Kirsher 
16030034b298SKumar Sanghvi 	if (immediate) {
1604c50ae55eSGanesh Goudar 		cxgb4_inline_tx_skb(skb, &q->q, sgl);
1605a7525198SEric W. Biederman 		dev_consume_skb_any(skb);
1606f7917c00SJeff Kirsher 	} else {
1607f7917c00SJeff Kirsher 		int last_desc;
1608f7917c00SJeff Kirsher 
1609c50ae55eSGanesh Goudar 		cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr);
1610f7917c00SJeff Kirsher 		skb_orphan(skb);
1611f7917c00SJeff Kirsher 
1612f7917c00SJeff Kirsher 		last_desc = q->q.pidx + ndesc - 1;
1613f7917c00SJeff Kirsher 		if (last_desc >= q->q.size)
1614f7917c00SJeff Kirsher 			last_desc -= q->q.size;
1615f7917c00SJeff Kirsher 		q->q.sdesc[last_desc].skb = skb;
1616a6076fcdSGanesh Goudar 		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
1617f7917c00SJeff Kirsher 	}
1618f7917c00SJeff Kirsher 
1619f7917c00SJeff Kirsher 	txq_advance(&q->q, ndesc);
1620f7917c00SJeff Kirsher 
1621a6ec572bSAtul Gupta 	cxgb4_ring_tx_db(adap, &q->q, ndesc);
1622a4569504SAtul Gupta 	if (ptp_enabled)
1623a4569504SAtul Gupta 		spin_unlock(&adap->ptp_lock);
1624f7917c00SJeff Kirsher 	return NETDEV_TX_OK;
1625f7917c00SJeff Kirsher }
1626f7917c00SJeff Kirsher 
1627d5fbda61SArjun Vynipadath /* Constants ... */
1628d5fbda61SArjun Vynipadath enum {
1629d5fbda61SArjun Vynipadath 	/* Egress Queue sizes, producer and consumer indices are all in units
1630d5fbda61SArjun Vynipadath 	 * of Egress Context Units bytes.  Note that as far as the hardware is
1631d5fbda61SArjun Vynipadath 	 * concerned, the free list is an Egress Queue (the host produces free
1632d5fbda61SArjun Vynipadath 	 * buffers which the hardware consumes) and free list entries are
1633d5fbda61SArjun Vynipadath 	 * 64-bit PCI DMA addresses.
1634d5fbda61SArjun Vynipadath 	 */
1635d5fbda61SArjun Vynipadath 	EQ_UNIT = SGE_EQ_IDXSIZE,
1636d5fbda61SArjun Vynipadath 	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
1637d5fbda61SArjun Vynipadath 	TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
1638d5fbda61SArjun Vynipadath 
1639d5fbda61SArjun Vynipadath 	T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1640d5fbda61SArjun Vynipadath 			       sizeof(struct cpl_tx_pkt_lso_core) +
1641d5fbda61SArjun Vynipadath 			       sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
1642d5fbda61SArjun Vynipadath };
1643d5fbda61SArjun Vynipadath 
1644d5fbda61SArjun Vynipadath /**
1645d5fbda61SArjun Vynipadath  *	t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
1646d5fbda61SArjun Vynipadath  *	@skb: the packet
1647d5fbda61SArjun Vynipadath  *
1648d5fbda61SArjun Vynipadath  *	Returns whether an Ethernet packet is small enough to fit completely as
1649d5fbda61SArjun Vynipadath  *	immediate data.
1650d5fbda61SArjun Vynipadath  */
1651d5fbda61SArjun Vynipadath static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
1652d5fbda61SArjun Vynipadath {
1653d5fbda61SArjun Vynipadath 	/* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
1654d5fbda61SArjun Vynipadath 	 * which does not accommodate immediate data.  We could dike out all
1655d5fbda61SArjun Vynipadath 	 * of the support code for immediate data but that would tie our hands
1656d5fbda61SArjun Vynipadath 	 * too much if we ever want to enhace the firmware.  It would also
1657d5fbda61SArjun Vynipadath 	 * create more differences between the PF and VF Drivers.
1658d5fbda61SArjun Vynipadath 	 */
1659d5fbda61SArjun Vynipadath 	return false;
1660d5fbda61SArjun Vynipadath }
1661d5fbda61SArjun Vynipadath 
1662d5fbda61SArjun Vynipadath /**
1663d5fbda61SArjun Vynipadath  *	t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
1664d5fbda61SArjun Vynipadath  *	@skb: the packet
1665d5fbda61SArjun Vynipadath  *
1666d5fbda61SArjun Vynipadath  *	Returns the number of flits needed for a TX Work Request for the
1667d5fbda61SArjun Vynipadath  *	given Ethernet packet, including the needed WR and CPL headers.
1668d5fbda61SArjun Vynipadath  */
1669d5fbda61SArjun Vynipadath static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
1670d5fbda61SArjun Vynipadath {
1671d5fbda61SArjun Vynipadath 	unsigned int flits;
1672d5fbda61SArjun Vynipadath 
1673d5fbda61SArjun Vynipadath 	/* If the skb is small enough, we can pump it out as a work request
1674d5fbda61SArjun Vynipadath 	 * with only immediate data.  In that case we just have to have the
1675d5fbda61SArjun Vynipadath 	 * TX Packet header plus the skb data in the Work Request.
1676d5fbda61SArjun Vynipadath 	 */
1677d5fbda61SArjun Vynipadath 	if (t4vf_is_eth_imm(skb))
1678d5fbda61SArjun Vynipadath 		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
1679d5fbda61SArjun Vynipadath 				    sizeof(__be64));
1680d5fbda61SArjun Vynipadath 
1681d5fbda61SArjun Vynipadath 	/* Otherwise, we're going to have to construct a Scatter gather list
1682d5fbda61SArjun Vynipadath 	 * of the skb body and fragments.  We also include the flits necessary
1683d5fbda61SArjun Vynipadath 	 * for the TX Packet Work Request and CPL.  We always have a firmware
1684d5fbda61SArjun Vynipadath 	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
1685d5fbda61SArjun Vynipadath 	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
1686d5fbda61SArjun Vynipadath 	 * message or, if we're doing a Large Send Offload, an LSO CPL message
1687d5fbda61SArjun Vynipadath 	 * with an embedded TX Packet Write CPL message.
1688d5fbda61SArjun Vynipadath 	 */
1689d5fbda61SArjun Vynipadath 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
1690d5fbda61SArjun Vynipadath 	if (skb_shinfo(skb)->gso_size)
1691d5fbda61SArjun Vynipadath 		flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1692d5fbda61SArjun Vynipadath 			  sizeof(struct cpl_tx_pkt_lso_core) +
1693d5fbda61SArjun Vynipadath 			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
1694d5fbda61SArjun Vynipadath 	else
1695d5fbda61SArjun Vynipadath 		flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1696d5fbda61SArjun Vynipadath 			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
1697d5fbda61SArjun Vynipadath 	return flits;
1698d5fbda61SArjun Vynipadath }
1699d5fbda61SArjun Vynipadath 
1700d5fbda61SArjun Vynipadath /**
1701d5fbda61SArjun Vynipadath  *	cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
1702d5fbda61SArjun Vynipadath  *	@skb: the packet
1703d5fbda61SArjun Vynipadath  *	@dev: the egress net device
1704d5fbda61SArjun Vynipadath  *
1705d5fbda61SArjun Vynipadath  *	Add a packet to an SGE Ethernet TX queue.  Runs with softirqs disabled.
1706d5fbda61SArjun Vynipadath  */
1707d5fbda61SArjun Vynipadath static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
1708d5fbda61SArjun Vynipadath 				     struct net_device *dev)
1709d5fbda61SArjun Vynipadath {
1710d5fbda61SArjun Vynipadath 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1711d5fbda61SArjun Vynipadath 	const struct skb_shared_info *ssi;
1712d5fbda61SArjun Vynipadath 	struct fw_eth_tx_pkt_vm_wr *wr;
1713d5fbda61SArjun Vynipadath 	int qidx, credits, max_pkt_len;
1714d5fbda61SArjun Vynipadath 	struct cpl_tx_pkt_core *cpl;
1715d5fbda61SArjun Vynipadath 	const struct port_info *pi;
1716d5fbda61SArjun Vynipadath 	unsigned int flits, ndesc;
1717d5fbda61SArjun Vynipadath 	struct sge_eth_txq *txq;
1718d5fbda61SArjun Vynipadath 	struct adapter *adapter;
1719d5fbda61SArjun Vynipadath 	u64 cntrl, *end;
1720d5fbda61SArjun Vynipadath 	u32 wr_mid;
1721d5fbda61SArjun Vynipadath 	const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) +
1722d5fbda61SArjun Vynipadath 				       sizeof(wr->ethmacsrc) +
1723d5fbda61SArjun Vynipadath 				       sizeof(wr->ethtype) +
1724d5fbda61SArjun Vynipadath 				       sizeof(wr->vlantci);
1725d5fbda61SArjun Vynipadath 
1726d5fbda61SArjun Vynipadath 	/* The chip minimum packet length is 10 octets but the firmware
1727d5fbda61SArjun Vynipadath 	 * command that we are using requires that we copy the Ethernet header
1728d5fbda61SArjun Vynipadath 	 * (including the VLAN tag) into the header so we reject anything
1729d5fbda61SArjun Vynipadath 	 * smaller than that ...
1730d5fbda61SArjun Vynipadath 	 */
1731d5fbda61SArjun Vynipadath 	if (unlikely(skb->len < fw_hdr_copy_len))
1732d5fbda61SArjun Vynipadath 		goto out_free;
1733d5fbda61SArjun Vynipadath 
1734d5fbda61SArjun Vynipadath 	/* Discard the packet if the length is greater than mtu */
1735d5fbda61SArjun Vynipadath 	max_pkt_len = ETH_HLEN + dev->mtu;
1736d5fbda61SArjun Vynipadath 	if (skb_vlan_tag_present(skb))
1737d5fbda61SArjun Vynipadath 		max_pkt_len += VLAN_HLEN;
1738d5fbda61SArjun Vynipadath 	if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1739d5fbda61SArjun Vynipadath 		goto out_free;
1740d5fbda61SArjun Vynipadath 
1741d5fbda61SArjun Vynipadath 	/* Figure out which TX Queue we're going to use. */
1742d5fbda61SArjun Vynipadath 	pi = netdev_priv(dev);
1743d5fbda61SArjun Vynipadath 	adapter = pi->adapter;
1744d5fbda61SArjun Vynipadath 	qidx = skb_get_queue_mapping(skb);
1745d5fbda61SArjun Vynipadath 	WARN_ON(qidx >= pi->nqsets);
1746d5fbda61SArjun Vynipadath 	txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1747d5fbda61SArjun Vynipadath 
1748d5fbda61SArjun Vynipadath 	/* Take this opportunity to reclaim any TX Descriptors whose DMA
1749d5fbda61SArjun Vynipadath 	 * transfers have completed.
1750d5fbda61SArjun Vynipadath 	 */
1751d429005fSVishal Kulkarni 	reclaim_completed_tx(adapter, &txq->q, -1, true);
1752d5fbda61SArjun Vynipadath 
1753d5fbda61SArjun Vynipadath 	/* Calculate the number of flits and TX Descriptors we're going to
1754d5fbda61SArjun Vynipadath 	 * need along with how many TX Descriptors will be left over after
1755d5fbda61SArjun Vynipadath 	 * we inject our Work Request.
1756d5fbda61SArjun Vynipadath 	 */
1757d5fbda61SArjun Vynipadath 	flits = t4vf_calc_tx_flits(skb);
1758d5fbda61SArjun Vynipadath 	ndesc = flits_to_desc(flits);
1759d5fbda61SArjun Vynipadath 	credits = txq_avail(&txq->q) - ndesc;
1760d5fbda61SArjun Vynipadath 
1761d5fbda61SArjun Vynipadath 	if (unlikely(credits < 0)) {
1762d5fbda61SArjun Vynipadath 		/* Not enough room for this packet's Work Request.  Stop the
1763d5fbda61SArjun Vynipadath 		 * TX Queue and return a "busy" condition.  The queue will get
1764d5fbda61SArjun Vynipadath 		 * started later on when the firmware informs us that space
1765d5fbda61SArjun Vynipadath 		 * has opened up.
1766d5fbda61SArjun Vynipadath 		 */
1767d5fbda61SArjun Vynipadath 		eth_txq_stop(txq);
1768d5fbda61SArjun Vynipadath 		dev_err(adapter->pdev_dev,
1769d5fbda61SArjun Vynipadath 			"%s: TX ring %u full while queue awake!\n",
1770d5fbda61SArjun Vynipadath 			dev->name, qidx);
1771d5fbda61SArjun Vynipadath 		return NETDEV_TX_BUSY;
1772d5fbda61SArjun Vynipadath 	}
1773d5fbda61SArjun Vynipadath 
1774d5fbda61SArjun Vynipadath 	if (!t4vf_is_eth_imm(skb) &&
1775d5fbda61SArjun Vynipadath 	    unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1776d5fbda61SArjun Vynipadath 		/* We need to map the skb into PCI DMA space (because it can't
1777d5fbda61SArjun Vynipadath 		 * be in-lined directly into the Work Request) and the mapping
1778d5fbda61SArjun Vynipadath 		 * operation failed.  Record the error and drop the packet.
1779d5fbda61SArjun Vynipadath 		 */
1780d5fbda61SArjun Vynipadath 		txq->mapping_err++;
1781d5fbda61SArjun Vynipadath 		goto out_free;
1782d5fbda61SArjun Vynipadath 	}
1783d5fbda61SArjun Vynipadath 
1784d5fbda61SArjun Vynipadath 	wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1785d5fbda61SArjun Vynipadath 	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1786d5fbda61SArjun Vynipadath 		/* After we're done injecting the Work Request for this
1787d5fbda61SArjun Vynipadath 		 * packet, we'll be below our "stop threshold" so stop the TX
1788d5fbda61SArjun Vynipadath 		 * Queue now and schedule a request for an SGE Egress Queue
1789d5fbda61SArjun Vynipadath 		 * Update message.  The queue will get started later on when
1790d5fbda61SArjun Vynipadath 		 * the firmware processes this Work Request and sends us an
1791d5fbda61SArjun Vynipadath 		 * Egress Queue Status Update message indicating that space
1792d5fbda61SArjun Vynipadath 		 * has opened up.
1793d5fbda61SArjun Vynipadath 		 */
1794d5fbda61SArjun Vynipadath 		eth_txq_stop(txq);
1795d429005fSVishal Kulkarni 
1796d429005fSVishal Kulkarni 		/* If we're using the SGE Doorbell Queue Timer facility, we
1797d429005fSVishal Kulkarni 		 * don't need to ask the Firmware to send us Egress Queue CIDX
1798d429005fSVishal Kulkarni 		 * Updates: the Hardware will do this automatically.  And
1799d429005fSVishal Kulkarni 		 * since we send the Ingress Queue CIDX Updates to the
1800d429005fSVishal Kulkarni 		 * corresponding Ethernet Response Queue, we'll get them very
1801d429005fSVishal Kulkarni 		 * quickly.
1802d429005fSVishal Kulkarni 		 */
1803d429005fSVishal Kulkarni 		if (!txq->dbqt)
1804d5fbda61SArjun Vynipadath 			wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1805d5fbda61SArjun Vynipadath 	}
1806d5fbda61SArjun Vynipadath 
1807d5fbda61SArjun Vynipadath 	/* Start filling in our Work Request.  Note that we do _not_ handle
1808d5fbda61SArjun Vynipadath 	 * the WR Header wrapping around the TX Descriptor Ring.  If our
1809d5fbda61SArjun Vynipadath 	 * maximum header size ever exceeds one TX Descriptor, we'll need to
1810d5fbda61SArjun Vynipadath 	 * do something else here.
1811d5fbda61SArjun Vynipadath 	 */
1812d5fbda61SArjun Vynipadath 	WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1813d5fbda61SArjun Vynipadath 	wr = (void *)&txq->q.desc[txq->q.pidx];
1814d5fbda61SArjun Vynipadath 	wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1815d5fbda61SArjun Vynipadath 	wr->r3[0] = cpu_to_be32(0);
1816d5fbda61SArjun Vynipadath 	wr->r3[1] = cpu_to_be32(0);
1817d5fbda61SArjun Vynipadath 	skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1818d5fbda61SArjun Vynipadath 	end = (u64 *)wr + flits;
1819d5fbda61SArjun Vynipadath 
1820d5fbda61SArjun Vynipadath 	/* If this is a Large Send Offload packet we'll put in an LSO CPL
1821d5fbda61SArjun Vynipadath 	 * message with an encapsulated TX Packet CPL message.  Otherwise we
1822d5fbda61SArjun Vynipadath 	 * just use a TX Packet CPL message.
1823d5fbda61SArjun Vynipadath 	 */
1824d5fbda61SArjun Vynipadath 	ssi = skb_shinfo(skb);
1825d5fbda61SArjun Vynipadath 	if (ssi->gso_size) {
1826d5fbda61SArjun Vynipadath 		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1827d5fbda61SArjun Vynipadath 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1828d5fbda61SArjun Vynipadath 		int l3hdr_len = skb_network_header_len(skb);
1829d5fbda61SArjun Vynipadath 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1830d5fbda61SArjun Vynipadath 
1831d5fbda61SArjun Vynipadath 		wr->op_immdlen =
1832d5fbda61SArjun Vynipadath 			cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1833d5fbda61SArjun Vynipadath 				    FW_WR_IMMDLEN_V(sizeof(*lso) +
1834d5fbda61SArjun Vynipadath 						    sizeof(*cpl)));
1835d5fbda61SArjun Vynipadath 		 /* Fill in the LSO CPL message. */
1836d5fbda61SArjun Vynipadath 		lso->lso_ctrl =
1837d5fbda61SArjun Vynipadath 			cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1838d5fbda61SArjun Vynipadath 				    LSO_FIRST_SLICE_F |
1839d5fbda61SArjun Vynipadath 				    LSO_LAST_SLICE_F |
1840d5fbda61SArjun Vynipadath 				    LSO_IPV6_V(v6) |
1841d5fbda61SArjun Vynipadath 				    LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1842d5fbda61SArjun Vynipadath 				    LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1843d5fbda61SArjun Vynipadath 				    LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1844d5fbda61SArjun Vynipadath 		lso->ipid_ofst = cpu_to_be16(0);
1845d5fbda61SArjun Vynipadath 		lso->mss = cpu_to_be16(ssi->gso_size);
1846d5fbda61SArjun Vynipadath 		lso->seqno_offset = cpu_to_be32(0);
1847d5fbda61SArjun Vynipadath 		if (is_t4(adapter->params.chip))
1848d5fbda61SArjun Vynipadath 			lso->len = cpu_to_be32(skb->len);
1849d5fbda61SArjun Vynipadath 		else
1850d5fbda61SArjun Vynipadath 			lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1851d5fbda61SArjun Vynipadath 
1852d5fbda61SArjun Vynipadath 		/* Set up TX Packet CPL pointer, control word and perform
1853d5fbda61SArjun Vynipadath 		 * accounting.
1854d5fbda61SArjun Vynipadath 		 */
1855d5fbda61SArjun Vynipadath 		cpl = (void *)(lso + 1);
1856d5fbda61SArjun Vynipadath 
1857d5fbda61SArjun Vynipadath 		if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1858d5fbda61SArjun Vynipadath 			cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1859d5fbda61SArjun Vynipadath 		else
1860d5fbda61SArjun Vynipadath 			cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1861d5fbda61SArjun Vynipadath 
1862d5fbda61SArjun Vynipadath 		cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1863d5fbda61SArjun Vynipadath 					   TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1864d5fbda61SArjun Vynipadath 			 TXPKT_IPHDR_LEN_V(l3hdr_len);
1865d5fbda61SArjun Vynipadath 		txq->tso++;
1866d5fbda61SArjun Vynipadath 		txq->tx_cso += ssi->gso_segs;
1867d5fbda61SArjun Vynipadath 	} else {
1868d5fbda61SArjun Vynipadath 		int len;
1869d5fbda61SArjun Vynipadath 
1870d5fbda61SArjun Vynipadath 		len = (t4vf_is_eth_imm(skb)
1871d5fbda61SArjun Vynipadath 		       ? skb->len + sizeof(*cpl)
1872d5fbda61SArjun Vynipadath 		       : sizeof(*cpl));
1873d5fbda61SArjun Vynipadath 		wr->op_immdlen =
1874d5fbda61SArjun Vynipadath 			cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1875d5fbda61SArjun Vynipadath 				    FW_WR_IMMDLEN_V(len));
1876d5fbda61SArjun Vynipadath 
1877d5fbda61SArjun Vynipadath 		/* Set up TX Packet CPL pointer, control word and perform
1878d5fbda61SArjun Vynipadath 		 * accounting.
1879d5fbda61SArjun Vynipadath 		 */
1880d5fbda61SArjun Vynipadath 		cpl = (void *)(wr + 1);
1881d5fbda61SArjun Vynipadath 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1882d5fbda61SArjun Vynipadath 			cntrl = hwcsum(adapter->params.chip, skb) |
1883d5fbda61SArjun Vynipadath 				TXPKT_IPCSUM_DIS_F;
1884d5fbda61SArjun Vynipadath 			txq->tx_cso++;
1885d5fbda61SArjun Vynipadath 		} else {
1886d5fbda61SArjun Vynipadath 			cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1887d5fbda61SArjun Vynipadath 		}
1888d5fbda61SArjun Vynipadath 	}
1889d5fbda61SArjun Vynipadath 
1890d5fbda61SArjun Vynipadath 	/* If there's a VLAN tag present, add that to the list of things to
1891d5fbda61SArjun Vynipadath 	 * do in this Work Request.
1892d5fbda61SArjun Vynipadath 	 */
1893d5fbda61SArjun Vynipadath 	if (skb_vlan_tag_present(skb)) {
1894d5fbda61SArjun Vynipadath 		txq->vlan_ins++;
1895d5fbda61SArjun Vynipadath 		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1896d5fbda61SArjun Vynipadath 	}
1897d5fbda61SArjun Vynipadath 
1898d5fbda61SArjun Vynipadath 	 /* Fill in the TX Packet CPL message header. */
1899d5fbda61SArjun Vynipadath 	cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1900d5fbda61SArjun Vynipadath 				 TXPKT_INTF_V(pi->port_id) |
1901d5fbda61SArjun Vynipadath 				 TXPKT_PF_V(0));
1902d5fbda61SArjun Vynipadath 	cpl->pack = cpu_to_be16(0);
1903d5fbda61SArjun Vynipadath 	cpl->len = cpu_to_be16(skb->len);
1904d5fbda61SArjun Vynipadath 	cpl->ctrl1 = cpu_to_be64(cntrl);
1905d5fbda61SArjun Vynipadath 
1906d5fbda61SArjun Vynipadath 	/* Fill in the body of the TX Packet CPL message with either in-lined
1907d5fbda61SArjun Vynipadath 	 * data or a Scatter/Gather List.
1908d5fbda61SArjun Vynipadath 	 */
1909d5fbda61SArjun Vynipadath 	if (t4vf_is_eth_imm(skb)) {
1910d5fbda61SArjun Vynipadath 		/* In-line the packet's data and free the skb since we don't
1911d5fbda61SArjun Vynipadath 		 * need it any longer.
1912d5fbda61SArjun Vynipadath 		 */
1913d5fbda61SArjun Vynipadath 		cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
1914d5fbda61SArjun Vynipadath 		dev_consume_skb_any(skb);
1915d5fbda61SArjun Vynipadath 	} else {
1916d5fbda61SArjun Vynipadath 		/* Write the skb's Scatter/Gather list into the TX Packet CPL
1917d5fbda61SArjun Vynipadath 		 * message and retain a pointer to the skb so we can free it
1918d5fbda61SArjun Vynipadath 		 * later when its DMA completes.  (We store the skb pointer
1919d5fbda61SArjun Vynipadath 		 * in the Software Descriptor corresponding to the last TX
1920d5fbda61SArjun Vynipadath 		 * Descriptor used by the Work Request.)
1921d5fbda61SArjun Vynipadath 		 *
1922d5fbda61SArjun Vynipadath 		 * The retained skb will be freed when the corresponding TX
1923d5fbda61SArjun Vynipadath 		 * Descriptors are reclaimed after their DMAs complete.
1924d5fbda61SArjun Vynipadath 		 * However, this could take quite a while since, in general,
1925d5fbda61SArjun Vynipadath 		 * the hardware is set up to be lazy about sending DMA
1926d5fbda61SArjun Vynipadath 		 * completion notifications to us and we mostly perform TX
1927d5fbda61SArjun Vynipadath 		 * reclaims in the transmit routine.
1928d5fbda61SArjun Vynipadath 		 *
1929d5fbda61SArjun Vynipadath 		 * This is good for performamce but means that we rely on new
1930d5fbda61SArjun Vynipadath 		 * TX packets arriving to run the destructors of completed
1931d5fbda61SArjun Vynipadath 		 * packets, which open up space in their sockets' send queues.
1932d5fbda61SArjun Vynipadath 		 * Sometimes we do not get such new packets causing TX to
1933d5fbda61SArjun Vynipadath 		 * stall.  A single UDP transmitter is a good example of this
1934d5fbda61SArjun Vynipadath 		 * situation.  We have a clean up timer that periodically
1935d5fbda61SArjun Vynipadath 		 * reclaims completed packets but it doesn't run often enough
1936d5fbda61SArjun Vynipadath 		 * (nor do we want it to) to prevent lengthy stalls.  A
1937d5fbda61SArjun Vynipadath 		 * solution to this problem is to run the destructor early,
1938d5fbda61SArjun Vynipadath 		 * after the packet is queued but before it's DMAd.  A con is
1939d5fbda61SArjun Vynipadath 		 * that we lie to socket memory accounting, but the amount of
1940d5fbda61SArjun Vynipadath 		 * extra memory is reasonable (limited by the number of TX
1941d5fbda61SArjun Vynipadath 		 * descriptors), the packets do actually get freed quickly by
1942d5fbda61SArjun Vynipadath 		 * new packets almost always, and for protocols like TCP that
1943d5fbda61SArjun Vynipadath 		 * wait for acks to really free up the data the extra memory
1944d5fbda61SArjun Vynipadath 		 * is even less.  On the positive side we run the destructors
1945d5fbda61SArjun Vynipadath 		 * on the sending CPU rather than on a potentially different
1946d5fbda61SArjun Vynipadath 		 * completing CPU, usually a good thing.
1947d5fbda61SArjun Vynipadath 		 *
1948d5fbda61SArjun Vynipadath 		 * Run the destructor before telling the DMA engine about the
1949d5fbda61SArjun Vynipadath 		 * packet to make sure it doesn't complete and get freed
1950d5fbda61SArjun Vynipadath 		 * prematurely.
1951d5fbda61SArjun Vynipadath 		 */
1952d5fbda61SArjun Vynipadath 		struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1953d5fbda61SArjun Vynipadath 		struct sge_txq *tq = &txq->q;
1954d5fbda61SArjun Vynipadath 		int last_desc;
1955d5fbda61SArjun Vynipadath 
1956d5fbda61SArjun Vynipadath 		/* If the Work Request header was an exact multiple of our TX
1957d5fbda61SArjun Vynipadath 		 * Descriptor length, then it's possible that the starting SGL
1958d5fbda61SArjun Vynipadath 		 * pointer lines up exactly with the end of our TX Descriptor
1959d5fbda61SArjun Vynipadath 		 * ring.  If that's the case, wrap around to the beginning
1960d5fbda61SArjun Vynipadath 		 * here ...
1961d5fbda61SArjun Vynipadath 		 */
1962d5fbda61SArjun Vynipadath 		if (unlikely((void *)sgl == (void *)tq->stat)) {
1963d5fbda61SArjun Vynipadath 			sgl = (void *)tq->desc;
1964d5fbda61SArjun Vynipadath 			end = (void *)((void *)tq->desc +
1965d5fbda61SArjun Vynipadath 				       ((void *)end - (void *)tq->stat));
1966d5fbda61SArjun Vynipadath 		}
1967d5fbda61SArjun Vynipadath 
1968d5fbda61SArjun Vynipadath 		cxgb4_write_sgl(skb, tq, sgl, end, 0, addr);
1969d5fbda61SArjun Vynipadath 		skb_orphan(skb);
1970d5fbda61SArjun Vynipadath 
1971d5fbda61SArjun Vynipadath 		last_desc = tq->pidx + ndesc - 1;
1972d5fbda61SArjun Vynipadath 		if (last_desc >= tq->size)
1973d5fbda61SArjun Vynipadath 			last_desc -= tq->size;
1974d5fbda61SArjun Vynipadath 		tq->sdesc[last_desc].skb = skb;
1975d5fbda61SArjun Vynipadath 		tq->sdesc[last_desc].sgl = sgl;
1976d5fbda61SArjun Vynipadath 	}
1977d5fbda61SArjun Vynipadath 
1978d5fbda61SArjun Vynipadath 	/* Advance our internal TX Queue state, tell the hardware about
1979d5fbda61SArjun Vynipadath 	 * the new TX descriptors and return success.
1980d5fbda61SArjun Vynipadath 	 */
1981d5fbda61SArjun Vynipadath 	txq_advance(&txq->q, ndesc);
1982d5fbda61SArjun Vynipadath 
1983d5fbda61SArjun Vynipadath 	cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
1984d5fbda61SArjun Vynipadath 	return NETDEV_TX_OK;
1985d5fbda61SArjun Vynipadath 
1986d5fbda61SArjun Vynipadath out_free:
1987d5fbda61SArjun Vynipadath 	/* An error of some sort happened.  Free the TX skb and tell the
1988d5fbda61SArjun Vynipadath 	 * OS that we've "dealt" with the packet ...
1989d5fbda61SArjun Vynipadath 	 */
1990d5fbda61SArjun Vynipadath 	dev_kfree_skb_any(skb);
1991d5fbda61SArjun Vynipadath 	return NETDEV_TX_OK;
1992d5fbda61SArjun Vynipadath }
1993d5fbda61SArjun Vynipadath 
1994d5fbda61SArjun Vynipadath netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
1995d5fbda61SArjun Vynipadath {
1996d5fbda61SArjun Vynipadath 	struct port_info *pi = netdev_priv(dev);
1997d5fbda61SArjun Vynipadath 
1998d5fbda61SArjun Vynipadath 	if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
1999d5fbda61SArjun Vynipadath 		return cxgb4_vf_eth_xmit(skb, dev);
2000d5fbda61SArjun Vynipadath 
2001d5fbda61SArjun Vynipadath 	return cxgb4_eth_xmit(skb, dev);
2002d5fbda61SArjun Vynipadath }
2003d5fbda61SArjun Vynipadath 
2004f7917c00SJeff Kirsher /**
2005f7917c00SJeff Kirsher  *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
2006f7917c00SJeff Kirsher  *	@q: the SGE control Tx queue
2007f7917c00SJeff Kirsher  *
2008a6ec572bSAtul Gupta  *	This is a variant of cxgb4_reclaim_completed_tx() that is used
2009a6ec572bSAtul Gupta  *	for Tx queues that send only immediate data (presently just
2010a6ec572bSAtul Gupta  *	the control queues) and	thus do not have any sk_buffs to release.
2011f7917c00SJeff Kirsher  */
2012f7917c00SJeff Kirsher static inline void reclaim_completed_tx_imm(struct sge_txq *q)
2013f7917c00SJeff Kirsher {
20146aa7de05SMark Rutland 	int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
2015f7917c00SJeff Kirsher 	int reclaim = hw_cidx - q->cidx;
2016f7917c00SJeff Kirsher 
2017f7917c00SJeff Kirsher 	if (reclaim < 0)
2018f7917c00SJeff Kirsher 		reclaim += q->size;
2019f7917c00SJeff Kirsher 
2020f7917c00SJeff Kirsher 	q->in_use -= reclaim;
2021f7917c00SJeff Kirsher 	q->cidx = hw_cidx;
2022f7917c00SJeff Kirsher }
2023f7917c00SJeff Kirsher 
2024f7917c00SJeff Kirsher /**
2025f7917c00SJeff Kirsher  *	is_imm - check whether a packet can be sent as immediate data
2026f7917c00SJeff Kirsher  *	@skb: the packet
2027f7917c00SJeff Kirsher  *
2028f7917c00SJeff Kirsher  *	Returns true if a packet can be sent as a WR with immediate data.
2029f7917c00SJeff Kirsher  */
2030f7917c00SJeff Kirsher static inline int is_imm(const struct sk_buff *skb)
2031f7917c00SJeff Kirsher {
2032f7917c00SJeff Kirsher 	return skb->len <= MAX_CTRL_WR_LEN;
2033f7917c00SJeff Kirsher }
2034f7917c00SJeff Kirsher 
2035f7917c00SJeff Kirsher /**
2036f7917c00SJeff Kirsher  *	ctrlq_check_stop - check if a control queue is full and should stop
2037f7917c00SJeff Kirsher  *	@q: the queue
2038f7917c00SJeff Kirsher  *	@wr: most recent WR written to the queue
2039f7917c00SJeff Kirsher  *
2040f7917c00SJeff Kirsher  *	Check if a control queue has become full and should be stopped.
2041f7917c00SJeff Kirsher  *	We clean up control queue descriptors very lazily, only when we are out.
2042f7917c00SJeff Kirsher  *	If the queue is still full after reclaiming any completed descriptors
2043f7917c00SJeff Kirsher  *	we suspend it and have the last WR wake it up.
2044f7917c00SJeff Kirsher  */
2045f7917c00SJeff Kirsher static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
2046f7917c00SJeff Kirsher {
2047f7917c00SJeff Kirsher 	reclaim_completed_tx_imm(&q->q);
2048f7917c00SJeff Kirsher 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
2049e2ac9628SHariprasad Shenai 		wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
2050f7917c00SJeff Kirsher 		q->q.stops++;
2051f7917c00SJeff Kirsher 		q->full = 1;
2052f7917c00SJeff Kirsher 	}
2053f7917c00SJeff Kirsher }
2054f7917c00SJeff Kirsher 
2055f7917c00SJeff Kirsher /**
2056f7917c00SJeff Kirsher  *	ctrl_xmit - send a packet through an SGE control Tx queue
2057f7917c00SJeff Kirsher  *	@q: the control queue
2058f7917c00SJeff Kirsher  *	@skb: the packet
2059f7917c00SJeff Kirsher  *
2060f7917c00SJeff Kirsher  *	Send a packet through an SGE control Tx queue.  Packets sent through
2061f7917c00SJeff Kirsher  *	a control queue must fit entirely as immediate data.
2062f7917c00SJeff Kirsher  */
2063f7917c00SJeff Kirsher static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
2064f7917c00SJeff Kirsher {
2065f7917c00SJeff Kirsher 	unsigned int ndesc;
2066f7917c00SJeff Kirsher 	struct fw_wr_hdr *wr;
2067f7917c00SJeff Kirsher 
2068f7917c00SJeff Kirsher 	if (unlikely(!is_imm(skb))) {
2069f7917c00SJeff Kirsher 		WARN_ON(1);
2070f7917c00SJeff Kirsher 		dev_kfree_skb(skb);
2071f7917c00SJeff Kirsher 		return NET_XMIT_DROP;
2072f7917c00SJeff Kirsher 	}
2073f7917c00SJeff Kirsher 
2074f7917c00SJeff Kirsher 	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
2075f7917c00SJeff Kirsher 	spin_lock(&q->sendq.lock);
2076f7917c00SJeff Kirsher 
2077f7917c00SJeff Kirsher 	if (unlikely(q->full)) {
2078f7917c00SJeff Kirsher 		skb->priority = ndesc;                  /* save for restart */
2079f7917c00SJeff Kirsher 		__skb_queue_tail(&q->sendq, skb);
2080f7917c00SJeff Kirsher 		spin_unlock(&q->sendq.lock);
2081f7917c00SJeff Kirsher 		return NET_XMIT_CN;
2082f7917c00SJeff Kirsher 	}
2083f7917c00SJeff Kirsher 
2084f7917c00SJeff Kirsher 	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
2085a6ec572bSAtul Gupta 	cxgb4_inline_tx_skb(skb, &q->q, wr);
2086f7917c00SJeff Kirsher 
2087f7917c00SJeff Kirsher 	txq_advance(&q->q, ndesc);
2088f7917c00SJeff Kirsher 	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
2089f7917c00SJeff Kirsher 		ctrlq_check_stop(q, wr);
2090f7917c00SJeff Kirsher 
2091a6ec572bSAtul Gupta 	cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
2092f7917c00SJeff Kirsher 	spin_unlock(&q->sendq.lock);
2093f7917c00SJeff Kirsher 
2094f7917c00SJeff Kirsher 	kfree_skb(skb);
2095f7917c00SJeff Kirsher 	return NET_XMIT_SUCCESS;
2096f7917c00SJeff Kirsher }
2097f7917c00SJeff Kirsher 
2098f7917c00SJeff Kirsher /**
2099f7917c00SJeff Kirsher  *	restart_ctrlq - restart a suspended control queue
2100f7917c00SJeff Kirsher  *	@data: the control queue to restart
2101f7917c00SJeff Kirsher  *
2102f7917c00SJeff Kirsher  *	Resumes transmission on a suspended Tx control queue.
2103f7917c00SJeff Kirsher  */
2104f7917c00SJeff Kirsher static void restart_ctrlq(unsigned long data)
2105f7917c00SJeff Kirsher {
2106f7917c00SJeff Kirsher 	struct sk_buff *skb;
2107f7917c00SJeff Kirsher 	unsigned int written = 0;
2108f7917c00SJeff Kirsher 	struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
2109f7917c00SJeff Kirsher 
2110f7917c00SJeff Kirsher 	spin_lock(&q->sendq.lock);
2111f7917c00SJeff Kirsher 	reclaim_completed_tx_imm(&q->q);
2112f7917c00SJeff Kirsher 	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
2113f7917c00SJeff Kirsher 
2114f7917c00SJeff Kirsher 	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
2115f7917c00SJeff Kirsher 		struct fw_wr_hdr *wr;
2116f7917c00SJeff Kirsher 		unsigned int ndesc = skb->priority;     /* previously saved */
2117f7917c00SJeff Kirsher 
2118a4011fd4SHariprasad Shenai 		written += ndesc;
2119a4011fd4SHariprasad Shenai 		/* Write descriptors and free skbs outside the lock to limit
2120f7917c00SJeff Kirsher 		 * wait times.  q->full is still set so new skbs will be queued.
2121f7917c00SJeff Kirsher 		 */
2122a4011fd4SHariprasad Shenai 		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
2123a4011fd4SHariprasad Shenai 		txq_advance(&q->q, ndesc);
2124f7917c00SJeff Kirsher 		spin_unlock(&q->sendq.lock);
2125f7917c00SJeff Kirsher 
2126a6ec572bSAtul Gupta 		cxgb4_inline_tx_skb(skb, &q->q, wr);
2127f7917c00SJeff Kirsher 		kfree_skb(skb);
2128f7917c00SJeff Kirsher 
2129f7917c00SJeff Kirsher 		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
2130f7917c00SJeff Kirsher 			unsigned long old = q->q.stops;
2131f7917c00SJeff Kirsher 
2132f7917c00SJeff Kirsher 			ctrlq_check_stop(q, wr);
2133f7917c00SJeff Kirsher 			if (q->q.stops != old) {          /* suspended anew */
2134f7917c00SJeff Kirsher 				spin_lock(&q->sendq.lock);
2135f7917c00SJeff Kirsher 				goto ringdb;
2136f7917c00SJeff Kirsher 			}
2137f7917c00SJeff Kirsher 		}
2138f7917c00SJeff Kirsher 		if (written > 16) {
2139a6ec572bSAtul Gupta 			cxgb4_ring_tx_db(q->adap, &q->q, written);
2140f7917c00SJeff Kirsher 			written = 0;
2141f7917c00SJeff Kirsher 		}
2142f7917c00SJeff Kirsher 		spin_lock(&q->sendq.lock);
2143f7917c00SJeff Kirsher 	}
2144f7917c00SJeff Kirsher 	q->full = 0;
2145a6ec572bSAtul Gupta ringdb:
2146a6ec572bSAtul Gupta 	if (written)
2147a6ec572bSAtul Gupta 		cxgb4_ring_tx_db(q->adap, &q->q, written);
2148f7917c00SJeff Kirsher 	spin_unlock(&q->sendq.lock);
2149f7917c00SJeff Kirsher }
2150f7917c00SJeff Kirsher 
2151f7917c00SJeff Kirsher /**
2152f7917c00SJeff Kirsher  *	t4_mgmt_tx - send a management message
2153f7917c00SJeff Kirsher  *	@adap: the adapter
2154f7917c00SJeff Kirsher  *	@skb: the packet containing the management message
2155f7917c00SJeff Kirsher  *
2156f7917c00SJeff Kirsher  *	Send a management message through control queue 0.
2157f7917c00SJeff Kirsher  */
2158f7917c00SJeff Kirsher int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
2159f7917c00SJeff Kirsher {
2160f7917c00SJeff Kirsher 	int ret;
2161f7917c00SJeff Kirsher 
2162f7917c00SJeff Kirsher 	local_bh_disable();
2163f7917c00SJeff Kirsher 	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
2164f7917c00SJeff Kirsher 	local_bh_enable();
2165f7917c00SJeff Kirsher 	return ret;
2166f7917c00SJeff Kirsher }
2167f7917c00SJeff Kirsher 
2168f7917c00SJeff Kirsher /**
2169f7917c00SJeff Kirsher  *	is_ofld_imm - check whether a packet can be sent as immediate data
2170f7917c00SJeff Kirsher  *	@skb: the packet
2171f7917c00SJeff Kirsher  *
2172f7917c00SJeff Kirsher  *	Returns true if a packet can be sent as an offload WR with immediate
2173f7917c00SJeff Kirsher  *	data.  We currently use the same limit as for Ethernet packets.
2174f7917c00SJeff Kirsher  */
2175f7917c00SJeff Kirsher static inline int is_ofld_imm(const struct sk_buff *skb)
2176f7917c00SJeff Kirsher {
21772f47d580SHarsh Jain 	struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
21782f47d580SHarsh Jain 	unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
21792f47d580SHarsh Jain 
21802f47d580SHarsh Jain 	if (opcode == FW_CRYPTO_LOOKASIDE_WR)
21812f47d580SHarsh Jain 		return skb->len <= SGE_MAX_WR_LEN;
21822f47d580SHarsh Jain 	else
2183f7917c00SJeff Kirsher 		return skb->len <= MAX_IMM_TX_PKT_LEN;
2184f7917c00SJeff Kirsher }
2185f7917c00SJeff Kirsher 
2186f7917c00SJeff Kirsher /**
2187f7917c00SJeff Kirsher  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
2188f7917c00SJeff Kirsher  *	@skb: the packet
2189f7917c00SJeff Kirsher  *
2190f7917c00SJeff Kirsher  *	Returns the number of flits needed for the given offload packet.
2191f7917c00SJeff Kirsher  *	These packets are already fully constructed and no additional headers
2192f7917c00SJeff Kirsher  *	will be added.
2193f7917c00SJeff Kirsher  */
2194f7917c00SJeff Kirsher static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
2195f7917c00SJeff Kirsher {
2196f7917c00SJeff Kirsher 	unsigned int flits, cnt;
2197f7917c00SJeff Kirsher 
2198f7917c00SJeff Kirsher 	if (is_ofld_imm(skb))
2199f7917c00SJeff Kirsher 		return DIV_ROUND_UP(skb->len, 8);
2200f7917c00SJeff Kirsher 
2201f7917c00SJeff Kirsher 	flits = skb_transport_offset(skb) / 8U;   /* headers */
2202f7917c00SJeff Kirsher 	cnt = skb_shinfo(skb)->nr_frags;
220315dd16c2SLi RongQing 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
2204f7917c00SJeff Kirsher 		cnt++;
2205f7917c00SJeff Kirsher 	return flits + sgl_len(cnt);
2206f7917c00SJeff Kirsher }
2207f7917c00SJeff Kirsher 
2208f7917c00SJeff Kirsher /**
2209f7917c00SJeff Kirsher  *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
2210f7917c00SJeff Kirsher  *	@adap: the adapter
2211f7917c00SJeff Kirsher  *	@q: the queue to stop
2212f7917c00SJeff Kirsher  *
2213f7917c00SJeff Kirsher  *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
2214f7917c00SJeff Kirsher  *	inability to map packets.  A periodic timer attempts to restart
2215f7917c00SJeff Kirsher  *	queues so marked.
2216f7917c00SJeff Kirsher  */
2217ab677ff4SHariprasad Shenai static void txq_stop_maperr(struct sge_uld_txq *q)
2218f7917c00SJeff Kirsher {
2219f7917c00SJeff Kirsher 	q->mapping_err++;
2220f7917c00SJeff Kirsher 	q->q.stops++;
2221f7917c00SJeff Kirsher 	set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
2222f7917c00SJeff Kirsher 		q->adap->sge.txq_maperr);
2223f7917c00SJeff Kirsher }
2224f7917c00SJeff Kirsher 
2225f7917c00SJeff Kirsher /**
2226f7917c00SJeff Kirsher  *	ofldtxq_stop - stop an offload Tx queue that has become full
2227f7917c00SJeff Kirsher  *	@q: the queue to stop
2228e383f248SAtul Gupta  *	@wr: the Work Request causing the queue to become full
2229f7917c00SJeff Kirsher  *
2230f7917c00SJeff Kirsher  *	Stops an offload Tx queue that has become full and modifies the packet
2231f7917c00SJeff Kirsher  *	being written to request a wakeup.
2232f7917c00SJeff Kirsher  */
2233e383f248SAtul Gupta static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
2234f7917c00SJeff Kirsher {
2235e2ac9628SHariprasad Shenai 	wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
2236f7917c00SJeff Kirsher 	q->q.stops++;
2237f7917c00SJeff Kirsher 	q->full = 1;
2238f7917c00SJeff Kirsher }
2239f7917c00SJeff Kirsher 
2240f7917c00SJeff Kirsher /**
2241126fca64SHariprasad Shenai  *	service_ofldq - service/restart a suspended offload queue
2242f7917c00SJeff Kirsher  *	@q: the offload queue
2243f7917c00SJeff Kirsher  *
2244126fca64SHariprasad Shenai  *	Services an offload Tx queue by moving packets from its Pending Send
2245126fca64SHariprasad Shenai  *	Queue to the Hardware TX ring.  The function starts and ends with the
2246126fca64SHariprasad Shenai  *	Send Queue locked, but drops the lock while putting the skb at the
2247126fca64SHariprasad Shenai  *	head of the Send Queue onto the Hardware TX Ring.  Dropping the lock
2248126fca64SHariprasad Shenai  *	allows more skbs to be added to the Send Queue by other threads.
2249126fca64SHariprasad Shenai  *	The packet being processed at the head of the Pending Send Queue is
2250126fca64SHariprasad Shenai  *	left on the queue in case we experience DMA Mapping errors, etc.
2251126fca64SHariprasad Shenai  *	and need to give up and restart later.
2252126fca64SHariprasad Shenai  *
2253126fca64SHariprasad Shenai  *	service_ofldq() can be thought of as a task which opportunistically
2254126fca64SHariprasad Shenai  *	uses other threads execution contexts.  We use the Offload Queue
2255126fca64SHariprasad Shenai  *	boolean "service_ofldq_running" to make sure that only one instance
2256126fca64SHariprasad Shenai  *	is ever running at a time ...
2257f7917c00SJeff Kirsher  */
2258ab677ff4SHariprasad Shenai static void service_ofldq(struct sge_uld_txq *q)
2259f7917c00SJeff Kirsher {
22608d0557d2SHariprasad Shenai 	u64 *pos, *before, *end;
2261f7917c00SJeff Kirsher 	int credits;
2262f7917c00SJeff Kirsher 	struct sk_buff *skb;
22638d0557d2SHariprasad Shenai 	struct sge_txq *txq;
22648d0557d2SHariprasad Shenai 	unsigned int left;
2265f7917c00SJeff Kirsher 	unsigned int written = 0;
2266f7917c00SJeff Kirsher 	unsigned int flits, ndesc;
2267f7917c00SJeff Kirsher 
2268126fca64SHariprasad Shenai 	/* If another thread is currently in service_ofldq() processing the
2269126fca64SHariprasad Shenai 	 * Pending Send Queue then there's nothing to do. Otherwise, flag
2270126fca64SHariprasad Shenai 	 * that we're doing the work and continue.  Examining/modifying
2271126fca64SHariprasad Shenai 	 * the Offload Queue boolean "service_ofldq_running" must be done
2272126fca64SHariprasad Shenai 	 * while holding the Pending Send Queue Lock.
2273126fca64SHariprasad Shenai 	 */
2274126fca64SHariprasad Shenai 	if (q->service_ofldq_running)
2275126fca64SHariprasad Shenai 		return;
2276126fca64SHariprasad Shenai 	q->service_ofldq_running = true;
2277126fca64SHariprasad Shenai 
2278f7917c00SJeff Kirsher 	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
2279126fca64SHariprasad Shenai 		/* We drop the lock while we're working with the skb at the
2280126fca64SHariprasad Shenai 		 * head of the Pending Send Queue.  This allows more skbs to
2281126fca64SHariprasad Shenai 		 * be added to the Pending Send Queue while we're working on
2282126fca64SHariprasad Shenai 		 * this one.  We don't need to lock to guard the TX Ring
2283126fca64SHariprasad Shenai 		 * updates because only one thread of execution is ever
2284126fca64SHariprasad Shenai 		 * allowed into service_ofldq() at a time.
2285f7917c00SJeff Kirsher 		 */
2286f7917c00SJeff Kirsher 		spin_unlock(&q->sendq.lock);
2287f7917c00SJeff Kirsher 
2288a6ec572bSAtul Gupta 		cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
2289f7917c00SJeff Kirsher 
2290f7917c00SJeff Kirsher 		flits = skb->priority;                /* previously saved */
2291f7917c00SJeff Kirsher 		ndesc = flits_to_desc(flits);
2292f7917c00SJeff Kirsher 		credits = txq_avail(&q->q) - ndesc;
2293f7917c00SJeff Kirsher 		BUG_ON(credits < 0);
2294f7917c00SJeff Kirsher 		if (unlikely(credits < TXQ_STOP_THRES))
2295e383f248SAtul Gupta 			ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data);
2296f7917c00SJeff Kirsher 
2297f7917c00SJeff Kirsher 		pos = (u64 *)&q->q.desc[q->q.pidx];
2298f7917c00SJeff Kirsher 		if (is_ofld_imm(skb))
2299a6ec572bSAtul Gupta 			cxgb4_inline_tx_skb(skb, &q->q, pos);
2300a6ec572bSAtul Gupta 		else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
2301f7917c00SJeff Kirsher 				       (dma_addr_t *)skb->head)) {
2302f7917c00SJeff Kirsher 			txq_stop_maperr(q);
2303f7917c00SJeff Kirsher 			spin_lock(&q->sendq.lock);
2304f7917c00SJeff Kirsher 			break;
2305f7917c00SJeff Kirsher 		} else {
2306f7917c00SJeff Kirsher 			int last_desc, hdr_len = skb_transport_offset(skb);
2307f7917c00SJeff Kirsher 
23088d0557d2SHariprasad Shenai 			/* The WR headers  may not fit within one descriptor.
23098d0557d2SHariprasad Shenai 			 * So we need to deal with wrap-around here.
23108d0557d2SHariprasad Shenai 			 */
23118d0557d2SHariprasad Shenai 			before = (u64 *)pos;
23128d0557d2SHariprasad Shenai 			end = (u64 *)pos + flits;
23138d0557d2SHariprasad Shenai 			txq = &q->q;
23148d0557d2SHariprasad Shenai 			pos = (void *)inline_tx_skb_header(skb, &q->q,
23158d0557d2SHariprasad Shenai 							   (void *)pos,
23168d0557d2SHariprasad Shenai 							   hdr_len);
23178d0557d2SHariprasad Shenai 			if (before > (u64 *)pos) {
23188d0557d2SHariprasad Shenai 				left = (u8 *)end - (u8 *)txq->stat;
23198d0557d2SHariprasad Shenai 				end = (void *)txq->desc + left;
23208d0557d2SHariprasad Shenai 			}
23218d0557d2SHariprasad Shenai 
23228d0557d2SHariprasad Shenai 			/* If current position is already at the end of the
23238d0557d2SHariprasad Shenai 			 * ofld queue, reset the current to point to
23248d0557d2SHariprasad Shenai 			 * start of the queue and update the end ptr as well.
23258d0557d2SHariprasad Shenai 			 */
23268d0557d2SHariprasad Shenai 			if (pos == (u64 *)txq->stat) {
23278d0557d2SHariprasad Shenai 				left = (u8 *)end - (u8 *)txq->stat;
23288d0557d2SHariprasad Shenai 				end = (void *)txq->desc + left;
23298d0557d2SHariprasad Shenai 				pos = (void *)txq->desc;
23308d0557d2SHariprasad Shenai 			}
23318d0557d2SHariprasad Shenai 
2332a6ec572bSAtul Gupta 			cxgb4_write_sgl(skb, &q->q, (void *)pos,
23338d0557d2SHariprasad Shenai 					end, hdr_len,
2334f7917c00SJeff Kirsher 					(dma_addr_t *)skb->head);
2335f7917c00SJeff Kirsher #ifdef CONFIG_NEED_DMA_MAP_STATE
2336f7917c00SJeff Kirsher 			skb->dev = q->adap->port[0];
2337f7917c00SJeff Kirsher 			skb->destructor = deferred_unmap_destructor;
2338f7917c00SJeff Kirsher #endif
2339f7917c00SJeff Kirsher 			last_desc = q->q.pidx + ndesc - 1;
2340f7917c00SJeff Kirsher 			if (last_desc >= q->q.size)
2341f7917c00SJeff Kirsher 				last_desc -= q->q.size;
2342f7917c00SJeff Kirsher 			q->q.sdesc[last_desc].skb = skb;
2343f7917c00SJeff Kirsher 		}
2344f7917c00SJeff Kirsher 
2345f7917c00SJeff Kirsher 		txq_advance(&q->q, ndesc);
2346f7917c00SJeff Kirsher 		written += ndesc;
2347f7917c00SJeff Kirsher 		if (unlikely(written > 32)) {
2348a6ec572bSAtul Gupta 			cxgb4_ring_tx_db(q->adap, &q->q, written);
2349f7917c00SJeff Kirsher 			written = 0;
2350f7917c00SJeff Kirsher 		}
2351f7917c00SJeff Kirsher 
2352126fca64SHariprasad Shenai 		/* Reacquire the Pending Send Queue Lock so we can unlink the
2353126fca64SHariprasad Shenai 		 * skb we've just successfully transferred to the TX Ring and
2354126fca64SHariprasad Shenai 		 * loop for the next skb which may be at the head of the
2355126fca64SHariprasad Shenai 		 * Pending Send Queue.
2356126fca64SHariprasad Shenai 		 */
2357f7917c00SJeff Kirsher 		spin_lock(&q->sendq.lock);
2358f7917c00SJeff Kirsher 		__skb_unlink(skb, &q->sendq);
2359f7917c00SJeff Kirsher 		if (is_ofld_imm(skb))
2360f7917c00SJeff Kirsher 			kfree_skb(skb);
2361f7917c00SJeff Kirsher 	}
2362f7917c00SJeff Kirsher 	if (likely(written))
2363a6ec572bSAtul Gupta 		cxgb4_ring_tx_db(q->adap, &q->q, written);
2364126fca64SHariprasad Shenai 
2365126fca64SHariprasad Shenai 	/*Indicate that no thread is processing the Pending Send Queue
2366126fca64SHariprasad Shenai 	 * currently.
2367126fca64SHariprasad Shenai 	 */
2368126fca64SHariprasad Shenai 	q->service_ofldq_running = false;
2369f7917c00SJeff Kirsher }
2370f7917c00SJeff Kirsher 
2371f7917c00SJeff Kirsher /**
2372f7917c00SJeff Kirsher  *	ofld_xmit - send a packet through an offload queue
2373f7917c00SJeff Kirsher  *	@q: the Tx offload queue
2374f7917c00SJeff Kirsher  *	@skb: the packet
2375f7917c00SJeff Kirsher  *
2376f7917c00SJeff Kirsher  *	Send an offload packet through an SGE offload queue.
2377f7917c00SJeff Kirsher  */
2378ab677ff4SHariprasad Shenai static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
2379f7917c00SJeff Kirsher {
2380f7917c00SJeff Kirsher 	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
2381f7917c00SJeff Kirsher 	spin_lock(&q->sendq.lock);
2382126fca64SHariprasad Shenai 
2383126fca64SHariprasad Shenai 	/* Queue the new skb onto the Offload Queue's Pending Send Queue.  If
2384126fca64SHariprasad Shenai 	 * that results in this new skb being the only one on the queue, start
2385126fca64SHariprasad Shenai 	 * servicing it.  If there are other skbs already on the list, then
2386126fca64SHariprasad Shenai 	 * either the queue is currently being processed or it's been stopped
2387126fca64SHariprasad Shenai 	 * for some reason and it'll be restarted at a later time.  Restart
2388126fca64SHariprasad Shenai 	 * paths are triggered by events like experiencing a DMA Mapping Error
2389126fca64SHariprasad Shenai 	 * or filling the Hardware TX Ring.
2390126fca64SHariprasad Shenai 	 */
2391f7917c00SJeff Kirsher 	__skb_queue_tail(&q->sendq, skb);
2392f7917c00SJeff Kirsher 	if (q->sendq.qlen == 1)
2393f7917c00SJeff Kirsher 		service_ofldq(q);
2394126fca64SHariprasad Shenai 
2395f7917c00SJeff Kirsher 	spin_unlock(&q->sendq.lock);
2396f7917c00SJeff Kirsher 	return NET_XMIT_SUCCESS;
2397f7917c00SJeff Kirsher }
2398f7917c00SJeff Kirsher 
2399f7917c00SJeff Kirsher /**
2400f7917c00SJeff Kirsher  *	restart_ofldq - restart a suspended offload queue
2401f7917c00SJeff Kirsher  *	@data: the offload queue to restart
2402f7917c00SJeff Kirsher  *
2403f7917c00SJeff Kirsher  *	Resumes transmission on a suspended Tx offload queue.
2404f7917c00SJeff Kirsher  */
2405f7917c00SJeff Kirsher static void restart_ofldq(unsigned long data)
2406f7917c00SJeff Kirsher {
2407ab677ff4SHariprasad Shenai 	struct sge_uld_txq *q = (struct sge_uld_txq *)data;
2408f7917c00SJeff Kirsher 
2409f7917c00SJeff Kirsher 	spin_lock(&q->sendq.lock);
2410f7917c00SJeff Kirsher 	q->full = 0;            /* the queue actually is completely empty now */
2411f7917c00SJeff Kirsher 	service_ofldq(q);
2412f7917c00SJeff Kirsher 	spin_unlock(&q->sendq.lock);
2413f7917c00SJeff Kirsher }
2414f7917c00SJeff Kirsher 
2415f7917c00SJeff Kirsher /**
2416f7917c00SJeff Kirsher  *	skb_txq - return the Tx queue an offload packet should use
2417f7917c00SJeff Kirsher  *	@skb: the packet
2418f7917c00SJeff Kirsher  *
2419f7917c00SJeff Kirsher  *	Returns the Tx queue an offload packet should use as indicated by bits
2420f7917c00SJeff Kirsher  *	1-15 in the packet's queue_mapping.
2421f7917c00SJeff Kirsher  */
2422f7917c00SJeff Kirsher static inline unsigned int skb_txq(const struct sk_buff *skb)
2423f7917c00SJeff Kirsher {
2424f7917c00SJeff Kirsher 	return skb->queue_mapping >> 1;
2425f7917c00SJeff Kirsher }
2426f7917c00SJeff Kirsher 
2427f7917c00SJeff Kirsher /**
2428f7917c00SJeff Kirsher  *	is_ctrl_pkt - return whether an offload packet is a control packet
2429f7917c00SJeff Kirsher  *	@skb: the packet
2430f7917c00SJeff Kirsher  *
2431f7917c00SJeff Kirsher  *	Returns whether an offload packet should use an OFLD or a CTRL
2432f7917c00SJeff Kirsher  *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
2433f7917c00SJeff Kirsher  */
2434f7917c00SJeff Kirsher static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
2435f7917c00SJeff Kirsher {
2436f7917c00SJeff Kirsher 	return skb->queue_mapping & 1;
2437f7917c00SJeff Kirsher }
2438f7917c00SJeff Kirsher 
2439ab677ff4SHariprasad Shenai static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
2440ab677ff4SHariprasad Shenai 			   unsigned int tx_uld_type)
2441f7917c00SJeff Kirsher {
2442ab677ff4SHariprasad Shenai 	struct sge_uld_txq_info *txq_info;
2443ab677ff4SHariprasad Shenai 	struct sge_uld_txq *txq;
2444f7917c00SJeff Kirsher 	unsigned int idx = skb_txq(skb);
2445f7917c00SJeff Kirsher 
24464fe44dd7SKumar Sanghvi 	if (unlikely(is_ctrl_pkt(skb))) {
24474fe44dd7SKumar Sanghvi 		/* Single ctrl queue is a requirement for LE workaround path */
24484fe44dd7SKumar Sanghvi 		if (adap->tids.nsftids)
24494fe44dd7SKumar Sanghvi 			idx = 0;
2450f7917c00SJeff Kirsher 		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
24514fe44dd7SKumar Sanghvi 	}
24520d4b729dSArjun V 
24530d4b729dSArjun V 	txq_info = adap->sge.uld_txq_info[tx_uld_type];
24540d4b729dSArjun V 	if (unlikely(!txq_info)) {
24550d4b729dSArjun V 		WARN_ON(true);
24560d4b729dSArjun V 		return NET_XMIT_DROP;
24570d4b729dSArjun V 	}
24580d4b729dSArjun V 
24590d4b729dSArjun V 	txq = &txq_info->uldtxq[idx];
2460ab677ff4SHariprasad Shenai 	return ofld_xmit(txq, skb);
2461f7917c00SJeff Kirsher }
2462f7917c00SJeff Kirsher 
2463f7917c00SJeff Kirsher /**
2464f7917c00SJeff Kirsher  *	t4_ofld_send - send an offload packet
2465f7917c00SJeff Kirsher  *	@adap: the adapter
2466f7917c00SJeff Kirsher  *	@skb: the packet
2467f7917c00SJeff Kirsher  *
2468f7917c00SJeff Kirsher  *	Sends an offload packet.  We use the packet queue_mapping to select the
2469f7917c00SJeff Kirsher  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
2470f7917c00SJeff Kirsher  *	should be sent as regular or control, bits 1-15 select the queue.
2471f7917c00SJeff Kirsher  */
2472f7917c00SJeff Kirsher int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
2473f7917c00SJeff Kirsher {
2474f7917c00SJeff Kirsher 	int ret;
2475f7917c00SJeff Kirsher 
2476f7917c00SJeff Kirsher 	local_bh_disable();
2477ab677ff4SHariprasad Shenai 	ret = uld_send(adap, skb, CXGB4_TX_OFLD);
2478f7917c00SJeff Kirsher 	local_bh_enable();
2479f7917c00SJeff Kirsher 	return ret;
2480f7917c00SJeff Kirsher }
2481f7917c00SJeff Kirsher 
2482f7917c00SJeff Kirsher /**
2483f7917c00SJeff Kirsher  *	cxgb4_ofld_send - send an offload packet
2484f7917c00SJeff Kirsher  *	@dev: the net device
2485f7917c00SJeff Kirsher  *	@skb: the packet
2486f7917c00SJeff Kirsher  *
2487f7917c00SJeff Kirsher  *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
2488f7917c00SJeff Kirsher  *	intended for ULDs.
2489f7917c00SJeff Kirsher  */
2490f7917c00SJeff Kirsher int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
2491f7917c00SJeff Kirsher {
2492f7917c00SJeff Kirsher 	return t4_ofld_send(netdev2adap(dev), skb);
2493f7917c00SJeff Kirsher }
2494f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb4_ofld_send);
2495f7917c00SJeff Kirsher 
2496e383f248SAtul Gupta static void *inline_tx_header(const void *src,
2497e383f248SAtul Gupta 			      const struct sge_txq *q,
2498e383f248SAtul Gupta 			      void *pos, int length)
2499e383f248SAtul Gupta {
2500e383f248SAtul Gupta 	int left = (void *)q->stat - pos;
2501e383f248SAtul Gupta 	u64 *p;
2502e383f248SAtul Gupta 
2503e383f248SAtul Gupta 	if (likely(length <= left)) {
2504e383f248SAtul Gupta 		memcpy(pos, src, length);
2505e383f248SAtul Gupta 		pos += length;
2506e383f248SAtul Gupta 	} else {
2507e383f248SAtul Gupta 		memcpy(pos, src, left);
2508e383f248SAtul Gupta 		memcpy(q->desc, src + left, length - left);
2509e383f248SAtul Gupta 		pos = (void *)q->desc + (length - left);
2510e383f248SAtul Gupta 	}
2511e383f248SAtul Gupta 	/* 0-pad to multiple of 16 */
2512e383f248SAtul Gupta 	p = PTR_ALIGN(pos, 8);
2513e383f248SAtul Gupta 	if ((uintptr_t)p & 8) {
2514e383f248SAtul Gupta 		*p = 0;
2515e383f248SAtul Gupta 		return p + 1;
2516e383f248SAtul Gupta 	}
2517e383f248SAtul Gupta 	return p;
2518e383f248SAtul Gupta }
2519e383f248SAtul Gupta 
2520e383f248SAtul Gupta /**
2521e383f248SAtul Gupta  *      ofld_xmit_direct - copy a WR into offload queue
2522e383f248SAtul Gupta  *      @q: the Tx offload queue
2523e383f248SAtul Gupta  *      @src: location of WR
2524e383f248SAtul Gupta  *      @len: WR length
2525e383f248SAtul Gupta  *
2526e383f248SAtul Gupta  *      Copy an immediate WR into an uncontended SGE offload queue.
2527e383f248SAtul Gupta  */
2528e383f248SAtul Gupta static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
2529e383f248SAtul Gupta 			    unsigned int len)
2530e383f248SAtul Gupta {
2531e383f248SAtul Gupta 	unsigned int ndesc;
2532e383f248SAtul Gupta 	int credits;
2533e383f248SAtul Gupta 	u64 *pos;
2534e383f248SAtul Gupta 
2535e383f248SAtul Gupta 	/* Use the lower limit as the cut-off */
2536e383f248SAtul Gupta 	if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) {
2537e383f248SAtul Gupta 		WARN_ON(1);
2538e383f248SAtul Gupta 		return NET_XMIT_DROP;
2539e383f248SAtul Gupta 	}
2540e383f248SAtul Gupta 
2541e383f248SAtul Gupta 	/* Don't return NET_XMIT_CN here as the current
2542e383f248SAtul Gupta 	 * implementation doesn't queue the request
2543e383f248SAtul Gupta 	 * using an skb when the following conditions not met
2544e383f248SAtul Gupta 	 */
2545e383f248SAtul Gupta 	if (!spin_trylock(&q->sendq.lock))
2546e383f248SAtul Gupta 		return NET_XMIT_DROP;
2547e383f248SAtul Gupta 
2548e383f248SAtul Gupta 	if (q->full || !skb_queue_empty(&q->sendq) ||
2549e383f248SAtul Gupta 	    q->service_ofldq_running) {
2550e383f248SAtul Gupta 		spin_unlock(&q->sendq.lock);
2551e383f248SAtul Gupta 		return NET_XMIT_DROP;
2552e383f248SAtul Gupta 	}
2553e383f248SAtul Gupta 	ndesc = flits_to_desc(DIV_ROUND_UP(len, 8));
2554e383f248SAtul Gupta 	credits = txq_avail(&q->q) - ndesc;
2555e383f248SAtul Gupta 	pos = (u64 *)&q->q.desc[q->q.pidx];
2556e383f248SAtul Gupta 
2557e383f248SAtul Gupta 	/* ofldtxq_stop modifies WR header in-situ */
2558e383f248SAtul Gupta 	inline_tx_header(src, &q->q, pos, len);
2559e383f248SAtul Gupta 	if (unlikely(credits < TXQ_STOP_THRES))
2560e383f248SAtul Gupta 		ofldtxq_stop(q, (struct fw_wr_hdr *)pos);
2561e383f248SAtul Gupta 	txq_advance(&q->q, ndesc);
2562e383f248SAtul Gupta 	cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
2563e383f248SAtul Gupta 
2564e383f248SAtul Gupta 	spin_unlock(&q->sendq.lock);
2565e383f248SAtul Gupta 	return NET_XMIT_SUCCESS;
2566e383f248SAtul Gupta }
2567e383f248SAtul Gupta 
2568e383f248SAtul Gupta int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
2569e383f248SAtul Gupta 		       const void *src, unsigned int len)
2570e383f248SAtul Gupta {
2571e383f248SAtul Gupta 	struct sge_uld_txq_info *txq_info;
2572e383f248SAtul Gupta 	struct sge_uld_txq *txq;
2573e383f248SAtul Gupta 	struct adapter *adap;
2574e383f248SAtul Gupta 	int ret;
2575e383f248SAtul Gupta 
2576e383f248SAtul Gupta 	adap = netdev2adap(dev);
2577e383f248SAtul Gupta 
2578e383f248SAtul Gupta 	local_bh_disable();
2579e383f248SAtul Gupta 	txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2580e383f248SAtul Gupta 	if (unlikely(!txq_info)) {
2581e383f248SAtul Gupta 		WARN_ON(true);
2582e383f248SAtul Gupta 		local_bh_enable();
2583e383f248SAtul Gupta 		return NET_XMIT_DROP;
2584e383f248SAtul Gupta 	}
2585e383f248SAtul Gupta 	txq = &txq_info->uldtxq[idx];
2586e383f248SAtul Gupta 
2587e383f248SAtul Gupta 	ret = ofld_xmit_direct(txq, src, len);
2588e383f248SAtul Gupta 	local_bh_enable();
2589e383f248SAtul Gupta 	return net_xmit_eval(ret);
2590e383f248SAtul Gupta }
2591e383f248SAtul Gupta EXPORT_SYMBOL(cxgb4_immdata_send);
2592e383f248SAtul Gupta 
2593ab677ff4SHariprasad Shenai /**
2594ab677ff4SHariprasad Shenai  *	t4_crypto_send - send crypto packet
2595ab677ff4SHariprasad Shenai  *	@adap: the adapter
2596ab677ff4SHariprasad Shenai  *	@skb: the packet
2597ab677ff4SHariprasad Shenai  *
2598ab677ff4SHariprasad Shenai  *	Sends crypto packet.  We use the packet queue_mapping to select the
2599ab677ff4SHariprasad Shenai  *	appropriate Tx queue as follows: bit 0 indicates whether the packet
2600ab677ff4SHariprasad Shenai  *	should be sent as regular or control, bits 1-15 select the queue.
2601ab677ff4SHariprasad Shenai  */
2602ab677ff4SHariprasad Shenai static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
2603ab677ff4SHariprasad Shenai {
2604ab677ff4SHariprasad Shenai 	int ret;
2605ab677ff4SHariprasad Shenai 
2606ab677ff4SHariprasad Shenai 	local_bh_disable();
2607ab677ff4SHariprasad Shenai 	ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
2608ab677ff4SHariprasad Shenai 	local_bh_enable();
2609ab677ff4SHariprasad Shenai 	return ret;
2610ab677ff4SHariprasad Shenai }
2611ab677ff4SHariprasad Shenai 
2612ab677ff4SHariprasad Shenai /**
2613ab677ff4SHariprasad Shenai  *	cxgb4_crypto_send - send crypto packet
2614ab677ff4SHariprasad Shenai  *	@dev: the net device
2615ab677ff4SHariprasad Shenai  *	@skb: the packet
2616ab677ff4SHariprasad Shenai  *
2617ab677ff4SHariprasad Shenai  *	Sends crypto packet.  This is an exported version of @t4_crypto_send,
2618ab677ff4SHariprasad Shenai  *	intended for ULDs.
2619ab677ff4SHariprasad Shenai  */
2620ab677ff4SHariprasad Shenai int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
2621ab677ff4SHariprasad Shenai {
2622ab677ff4SHariprasad Shenai 	return t4_crypto_send(netdev2adap(dev), skb);
2623ab677ff4SHariprasad Shenai }
2624ab677ff4SHariprasad Shenai EXPORT_SYMBOL(cxgb4_crypto_send);
2625ab677ff4SHariprasad Shenai 
2626e91b0f24SIan Campbell static inline void copy_frags(struct sk_buff *skb,
2627f7917c00SJeff Kirsher 			      const struct pkt_gl *gl, unsigned int offset)
2628f7917c00SJeff Kirsher {
2629e91b0f24SIan Campbell 	int i;
2630f7917c00SJeff Kirsher 
2631f7917c00SJeff Kirsher 	/* usually there's just one frag */
2632e91b0f24SIan Campbell 	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
2633e91b0f24SIan Campbell 			     gl->frags[0].offset + offset,
2634e91b0f24SIan Campbell 			     gl->frags[0].size - offset);
2635e91b0f24SIan Campbell 	skb_shinfo(skb)->nr_frags = gl->nfrags;
2636e91b0f24SIan Campbell 	for (i = 1; i < gl->nfrags; i++)
2637e91b0f24SIan Campbell 		__skb_fill_page_desc(skb, i, gl->frags[i].page,
2638e91b0f24SIan Campbell 				     gl->frags[i].offset,
2639e91b0f24SIan Campbell 				     gl->frags[i].size);
2640f7917c00SJeff Kirsher 
2641f7917c00SJeff Kirsher 	/* get a reference to the last page, we don't own it */
2642e91b0f24SIan Campbell 	get_page(gl->frags[gl->nfrags - 1].page);
2643f7917c00SJeff Kirsher }
2644f7917c00SJeff Kirsher 
2645f7917c00SJeff Kirsher /**
2646f7917c00SJeff Kirsher  *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
2647f7917c00SJeff Kirsher  *	@gl: the gather list
2648f7917c00SJeff Kirsher  *	@skb_len: size of sk_buff main body if it carries fragments
2649f7917c00SJeff Kirsher  *	@pull_len: amount of data to move to the sk_buff's main body
2650f7917c00SJeff Kirsher  *
2651f7917c00SJeff Kirsher  *	Builds an sk_buff from the given packet gather list.  Returns the
2652f7917c00SJeff Kirsher  *	sk_buff or %NULL if sk_buff allocation failed.
2653f7917c00SJeff Kirsher  */
2654f7917c00SJeff Kirsher struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
2655f7917c00SJeff Kirsher 				   unsigned int skb_len, unsigned int pull_len)
2656f7917c00SJeff Kirsher {
2657f7917c00SJeff Kirsher 	struct sk_buff *skb;
2658f7917c00SJeff Kirsher 
2659f7917c00SJeff Kirsher 	/*
2660f7917c00SJeff Kirsher 	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
2661f7917c00SJeff Kirsher 	 * size, which is expected since buffers are at least PAGE_SIZEd.
2662f7917c00SJeff Kirsher 	 * In this case packets up to RX_COPY_THRES have only one fragment.
2663f7917c00SJeff Kirsher 	 */
2664f7917c00SJeff Kirsher 	if (gl->tot_len <= RX_COPY_THRES) {
2665f7917c00SJeff Kirsher 		skb = dev_alloc_skb(gl->tot_len);
2666f7917c00SJeff Kirsher 		if (unlikely(!skb))
2667f7917c00SJeff Kirsher 			goto out;
2668f7917c00SJeff Kirsher 		__skb_put(skb, gl->tot_len);
2669f7917c00SJeff Kirsher 		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
2670f7917c00SJeff Kirsher 	} else {
2671f7917c00SJeff Kirsher 		skb = dev_alloc_skb(skb_len);
2672f7917c00SJeff Kirsher 		if (unlikely(!skb))
2673f7917c00SJeff Kirsher 			goto out;
2674f7917c00SJeff Kirsher 		__skb_put(skb, pull_len);
2675f7917c00SJeff Kirsher 		skb_copy_to_linear_data(skb, gl->va, pull_len);
2676f7917c00SJeff Kirsher 
2677e91b0f24SIan Campbell 		copy_frags(skb, gl, pull_len);
2678f7917c00SJeff Kirsher 		skb->len = gl->tot_len;
2679f7917c00SJeff Kirsher 		skb->data_len = skb->len - pull_len;
2680f7917c00SJeff Kirsher 		skb->truesize += skb->data_len;
2681f7917c00SJeff Kirsher 	}
2682f7917c00SJeff Kirsher out:	return skb;
2683f7917c00SJeff Kirsher }
2684f7917c00SJeff Kirsher EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
2685f7917c00SJeff Kirsher 
2686f7917c00SJeff Kirsher /**
2687f7917c00SJeff Kirsher  *	t4_pktgl_free - free a packet gather list
2688f7917c00SJeff Kirsher  *	@gl: the gather list
2689f7917c00SJeff Kirsher  *
2690f7917c00SJeff Kirsher  *	Releases the pages of a packet gather list.  We do not own the last
2691f7917c00SJeff Kirsher  *	page on the list and do not free it.
2692f7917c00SJeff Kirsher  */
2693f7917c00SJeff Kirsher static void t4_pktgl_free(const struct pkt_gl *gl)
2694f7917c00SJeff Kirsher {
2695f7917c00SJeff Kirsher 	int n;
2696e91b0f24SIan Campbell 	const struct page_frag *p;
2697f7917c00SJeff Kirsher 
2698f7917c00SJeff Kirsher 	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
2699f7917c00SJeff Kirsher 		put_page(p->page);
2700f7917c00SJeff Kirsher }
2701f7917c00SJeff Kirsher 
2702f7917c00SJeff Kirsher /*
2703f7917c00SJeff Kirsher  * Process an MPS trace packet.  Give it an unused protocol number so it won't
2704f7917c00SJeff Kirsher  * be delivered to anyone and send it to the stack for capture.
2705f7917c00SJeff Kirsher  */
2706f7917c00SJeff Kirsher static noinline int handle_trace_pkt(struct adapter *adap,
2707f7917c00SJeff Kirsher 				     const struct pkt_gl *gl)
2708f7917c00SJeff Kirsher {
2709f7917c00SJeff Kirsher 	struct sk_buff *skb;
2710f7917c00SJeff Kirsher 
2711f7917c00SJeff Kirsher 	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
2712f7917c00SJeff Kirsher 	if (unlikely(!skb)) {
2713f7917c00SJeff Kirsher 		t4_pktgl_free(gl);
2714f7917c00SJeff Kirsher 		return 0;
2715f7917c00SJeff Kirsher 	}
2716f7917c00SJeff Kirsher 
2717d14807ddSHariprasad Shenai 	if (is_t4(adap->params.chip))
27180a57a536SSantosh Rastapur 		__skb_pull(skb, sizeof(struct cpl_trace_pkt));
27190a57a536SSantosh Rastapur 	else
27200a57a536SSantosh Rastapur 		__skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
27210a57a536SSantosh Rastapur 
2722f7917c00SJeff Kirsher 	skb_reset_mac_header(skb);
2723f7917c00SJeff Kirsher 	skb->protocol = htons(0xffff);
2724f7917c00SJeff Kirsher 	skb->dev = adap->port[0];
2725f7917c00SJeff Kirsher 	netif_receive_skb(skb);
2726f7917c00SJeff Kirsher 	return 0;
2727f7917c00SJeff Kirsher }
2728f7917c00SJeff Kirsher 
27295e2a5ebcSHariprasad Shenai /**
27305e2a5ebcSHariprasad Shenai  * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
27315e2a5ebcSHariprasad Shenai  * @adap: the adapter
27325e2a5ebcSHariprasad Shenai  * @hwtstamps: time stamp structure to update
27335e2a5ebcSHariprasad Shenai  * @sgetstamp: 60bit iqe timestamp
27345e2a5ebcSHariprasad Shenai  *
27355e2a5ebcSHariprasad Shenai  * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
27365e2a5ebcSHariprasad Shenai  * which is in Core Clock ticks into ktime_t and assign it
27375e2a5ebcSHariprasad Shenai  **/
27385e2a5ebcSHariprasad Shenai static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
27395e2a5ebcSHariprasad Shenai 				     struct skb_shared_hwtstamps *hwtstamps,
27405e2a5ebcSHariprasad Shenai 				     u64 sgetstamp)
27415e2a5ebcSHariprasad Shenai {
27425e2a5ebcSHariprasad Shenai 	u64 ns;
27435e2a5ebcSHariprasad Shenai 	u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
27445e2a5ebcSHariprasad Shenai 
27455e2a5ebcSHariprasad Shenai 	ns = div_u64(tmp, adap->params.vpd.cclk);
27465e2a5ebcSHariprasad Shenai 
27475e2a5ebcSHariprasad Shenai 	memset(hwtstamps, 0, sizeof(*hwtstamps));
27485e2a5ebcSHariprasad Shenai 	hwtstamps->hwtstamp = ns_to_ktime(ns);
27495e2a5ebcSHariprasad Shenai }
27505e2a5ebcSHariprasad Shenai 
2751f7917c00SJeff Kirsher static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
2752c50ae55eSGanesh Goudar 		   const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len)
2753f7917c00SJeff Kirsher {
275452367a76SVipul Pandya 	struct adapter *adapter = rxq->rspq.adap;
275552367a76SVipul Pandya 	struct sge *s = &adapter->sge;
27565e2a5ebcSHariprasad Shenai 	struct port_info *pi;
2757f7917c00SJeff Kirsher 	int ret;
2758f7917c00SJeff Kirsher 	struct sk_buff *skb;
2759f7917c00SJeff Kirsher 
2760f7917c00SJeff Kirsher 	skb = napi_get_frags(&rxq->rspq.napi);
2761f7917c00SJeff Kirsher 	if (unlikely(!skb)) {
2762f7917c00SJeff Kirsher 		t4_pktgl_free(gl);
2763f7917c00SJeff Kirsher 		rxq->stats.rx_drops++;
2764f7917c00SJeff Kirsher 		return;
2765f7917c00SJeff Kirsher 	}
2766f7917c00SJeff Kirsher 
276752367a76SVipul Pandya 	copy_frags(skb, gl, s->pktshift);
2768c50ae55eSGanesh Goudar 	if (tnl_hdr_len)
2769c50ae55eSGanesh Goudar 		skb->csum_level = 1;
277052367a76SVipul Pandya 	skb->len = gl->tot_len - s->pktshift;
2771f7917c00SJeff Kirsher 	skb->data_len = skb->len;
2772f7917c00SJeff Kirsher 	skb->truesize += skb->data_len;
2773f7917c00SJeff Kirsher 	skb->ip_summed = CHECKSUM_UNNECESSARY;
2774f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, rxq->rspq.idx);
27755e2a5ebcSHariprasad Shenai 	pi = netdev_priv(skb->dev);
27765e2a5ebcSHariprasad Shenai 	if (pi->rxtstamp)
27775e2a5ebcSHariprasad Shenai 		cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
27785e2a5ebcSHariprasad Shenai 					 gl->sgetstamp);
2779f7917c00SJeff Kirsher 	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
27808264989cSTom Herbert 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
27818264989cSTom Herbert 			     PKT_HASH_TYPE_L3);
2782f7917c00SJeff Kirsher 
2783f7917c00SJeff Kirsher 	if (unlikely(pkt->vlan_ex)) {
278486a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
2785f7917c00SJeff Kirsher 		rxq->stats.vlan_ex++;
2786f7917c00SJeff Kirsher 	}
2787f7917c00SJeff Kirsher 	ret = napi_gro_frags(&rxq->rspq.napi);
2788f7917c00SJeff Kirsher 	if (ret == GRO_HELD)
2789f7917c00SJeff Kirsher 		rxq->stats.lro_pkts++;
2790f7917c00SJeff Kirsher 	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
2791f7917c00SJeff Kirsher 		rxq->stats.lro_merged++;
2792f7917c00SJeff Kirsher 	rxq->stats.pkts++;
2793f7917c00SJeff Kirsher 	rxq->stats.rx_cso++;
2794f7917c00SJeff Kirsher }
2795f7917c00SJeff Kirsher 
2796a4569504SAtul Gupta enum {
2797a4569504SAtul Gupta 	RX_NON_PTP_PKT = 0,
2798a4569504SAtul Gupta 	RX_PTP_PKT_SUC = 1,
2799a4569504SAtul Gupta 	RX_PTP_PKT_ERR = 2
2800a4569504SAtul Gupta };
2801a4569504SAtul Gupta 
2802a4569504SAtul Gupta /**
2803a4569504SAtul Gupta  *     t4_systim_to_hwstamp - read hardware time stamp
2804a4569504SAtul Gupta  *     @adap: the adapter
2805a4569504SAtul Gupta  *     @skb: the packet
2806a4569504SAtul Gupta  *
2807a4569504SAtul Gupta  *     Read Time Stamp from MPS packet and insert in skb which
2808a4569504SAtul Gupta  *     is forwarded to PTP application
2809a4569504SAtul Gupta  */
2810a4569504SAtul Gupta static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
2811a4569504SAtul Gupta 					 struct sk_buff *skb)
2812a4569504SAtul Gupta {
2813a4569504SAtul Gupta 	struct skb_shared_hwtstamps *hwtstamps;
2814a4569504SAtul Gupta 	struct cpl_rx_mps_pkt *cpl = NULL;
2815a4569504SAtul Gupta 	unsigned char *data;
2816a4569504SAtul Gupta 	int offset;
2817a4569504SAtul Gupta 
2818a4569504SAtul Gupta 	cpl = (struct cpl_rx_mps_pkt *)skb->data;
2819a4569504SAtul Gupta 	if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) &
2820a4569504SAtul Gupta 	     X_CPL_RX_MPS_PKT_TYPE_PTP))
2821a4569504SAtul Gupta 		return RX_PTP_PKT_ERR;
2822a4569504SAtul Gupta 
2823a4569504SAtul Gupta 	data = skb->data + sizeof(*cpl);
2824a4569504SAtul Gupta 	skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt));
2825a4569504SAtul Gupta 	offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN;
2826a4569504SAtul Gupta 	if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short))
2827a4569504SAtul Gupta 		return RX_PTP_PKT_ERR;
2828a4569504SAtul Gupta 
2829a4569504SAtul Gupta 	hwtstamps = skb_hwtstamps(skb);
2830a4569504SAtul Gupta 	memset(hwtstamps, 0, sizeof(*hwtstamps));
2831a4569504SAtul Gupta 	hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data)));
2832a4569504SAtul Gupta 
2833a4569504SAtul Gupta 	return RX_PTP_PKT_SUC;
2834a4569504SAtul Gupta }
2835a4569504SAtul Gupta 
2836a4569504SAtul Gupta /**
2837a4569504SAtul Gupta  *     t4_rx_hststamp - Recv PTP Event Message
2838a4569504SAtul Gupta  *     @adap: the adapter
2839a4569504SAtul Gupta  *     @rsp: the response queue descriptor holding the RX_PKT message
2840a4569504SAtul Gupta  *     @skb: the packet
2841a4569504SAtul Gupta  *
2842a4569504SAtul Gupta  *     PTP enabled and MPS packet, read HW timestamp
2843a4569504SAtul Gupta  */
2844a4569504SAtul Gupta static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
2845a4569504SAtul Gupta 			  struct sge_eth_rxq *rxq, struct sk_buff *skb)
2846a4569504SAtul Gupta {
2847a4569504SAtul Gupta 	int ret;
2848a4569504SAtul Gupta 
2849a4569504SAtul Gupta 	if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) &&
2850a4569504SAtul Gupta 		     !is_t4(adapter->params.chip))) {
2851a4569504SAtul Gupta 		ret = t4_systim_to_hwstamp(adapter, skb);
2852a4569504SAtul Gupta 		if (ret == RX_PTP_PKT_ERR) {
2853a4569504SAtul Gupta 			kfree_skb(skb);
2854a4569504SAtul Gupta 			rxq->stats.rx_drops++;
2855a4569504SAtul Gupta 		}
2856a4569504SAtul Gupta 		return ret;
2857a4569504SAtul Gupta 	}
2858a4569504SAtul Gupta 	return RX_NON_PTP_PKT;
2859a4569504SAtul Gupta }
2860a4569504SAtul Gupta 
2861a4569504SAtul Gupta /**
2862a4569504SAtul Gupta  *      t4_tx_hststamp - Loopback PTP Transmit Event Message
2863a4569504SAtul Gupta  *      @adap: the adapter
2864a4569504SAtul Gupta  *      @skb: the packet
2865a4569504SAtul Gupta  *      @dev: the ingress net device
2866a4569504SAtul Gupta  *
2867a4569504SAtul Gupta  *      Read hardware timestamp for the loopback PTP Tx event message
2868a4569504SAtul Gupta  */
2869a4569504SAtul Gupta static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
2870a4569504SAtul Gupta 			  struct net_device *dev)
2871a4569504SAtul Gupta {
2872a4569504SAtul Gupta 	struct port_info *pi = netdev_priv(dev);
2873a4569504SAtul Gupta 
2874a4569504SAtul Gupta 	if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) {
2875a4569504SAtul Gupta 		cxgb4_ptp_read_hwstamp(adapter, pi);
2876a4569504SAtul Gupta 		kfree_skb(skb);
2877a4569504SAtul Gupta 		return 0;
2878a4569504SAtul Gupta 	}
2879a4569504SAtul Gupta 	return 1;
2880a4569504SAtul Gupta }
2881a4569504SAtul Gupta 
2882f7917c00SJeff Kirsher /**
2883d429005fSVishal Kulkarni  *	t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
2884d429005fSVishal Kulkarni  *	@rspq: Ethernet RX Response Queue associated with Ethernet TX Queue
2885d429005fSVishal Kulkarni  *	@rsp: Response Entry pointer into Response Queue
2886d429005fSVishal Kulkarni  *	@gl: Gather List pointer
2887d429005fSVishal Kulkarni  *
2888d429005fSVishal Kulkarni  *	For adapters which support the SGE Doorbell Queue Timer facility,
2889d429005fSVishal Kulkarni  *	we configure the Ethernet TX Queues to send CIDX Updates to the
2890d429005fSVishal Kulkarni  *	Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE
2891d429005fSVishal Kulkarni  *	messages.  This adds a small load to PCIe Link RX bandwidth and,
2892d429005fSVishal Kulkarni  *	potentially, higher CPU Interrupt load, but allows us to respond
2893d429005fSVishal Kulkarni  *	much more quickly to the CIDX Updates.  This is important for
2894d429005fSVishal Kulkarni  *	Upper Layer Software which isn't willing to have a large amount
2895d429005fSVishal Kulkarni  *	of TX Data outstanding before receiving DMA Completions.
2896d429005fSVishal Kulkarni  */
2897d429005fSVishal Kulkarni static void t4_tx_completion_handler(struct sge_rspq *rspq,
2898d429005fSVishal Kulkarni 				     const __be64 *rsp,
2899d429005fSVishal Kulkarni 				     const struct pkt_gl *gl)
2900d429005fSVishal Kulkarni {
2901d429005fSVishal Kulkarni 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
2902d429005fSVishal Kulkarni 	struct port_info *pi = netdev_priv(rspq->netdev);
2903d429005fSVishal Kulkarni 	struct adapter *adapter = rspq->adap;
2904d429005fSVishal Kulkarni 	struct sge *s = &adapter->sge;
2905d429005fSVishal Kulkarni 	struct sge_eth_txq *txq;
2906d429005fSVishal Kulkarni 
2907d429005fSVishal Kulkarni 	/* skip RSS header */
2908d429005fSVishal Kulkarni 	rsp++;
2909d429005fSVishal Kulkarni 
2910d429005fSVishal Kulkarni 	/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
2911d429005fSVishal Kulkarni 	 */
2912d429005fSVishal Kulkarni 	if (unlikely(opcode == CPL_FW4_MSG &&
2913d429005fSVishal Kulkarni 		     ((const struct cpl_fw4_msg *)rsp)->type ==
2914d429005fSVishal Kulkarni 							FW_TYPE_RSSCPL)) {
2915d429005fSVishal Kulkarni 		rsp++;
2916d429005fSVishal Kulkarni 		opcode = ((const struct rss_header *)rsp)->opcode;
2917d429005fSVishal Kulkarni 		rsp++;
2918d429005fSVishal Kulkarni 	}
2919d429005fSVishal Kulkarni 
2920d429005fSVishal Kulkarni 	if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) {
2921d429005fSVishal Kulkarni 		pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n",
2922d429005fSVishal Kulkarni 			__func__, opcode);
2923d429005fSVishal Kulkarni 		return;
2924d429005fSVishal Kulkarni 	}
2925d429005fSVishal Kulkarni 
2926d429005fSVishal Kulkarni 	txq = &s->ethtxq[pi->first_qset + rspq->idx];
2927d429005fSVishal Kulkarni 
2928d429005fSVishal Kulkarni 	/* We've got the Hardware Consumer Index Update in the Egress Update
2929d429005fSVishal Kulkarni 	 * message.  If we're using the SGE Doorbell Queue Timer mechanism,
2930d429005fSVishal Kulkarni 	 * these Egress Update messages will be our sole CIDX Updates we get
2931d429005fSVishal Kulkarni 	 * since we don't want to chew up PCIe bandwidth for both Ingress
2932d429005fSVishal Kulkarni 	 * Messages and Status Page writes.  However, The code which manages
2933d429005fSVishal Kulkarni 	 * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
2934d429005fSVishal Kulkarni 	 * stored in the Status Page at the end of the TX Queue.  It's easiest
2935d429005fSVishal Kulkarni 	 * to simply copy the CIDX Update value from the Egress Update message
2936d429005fSVishal Kulkarni 	 * to the Status Page.  Also note that no Endian issues need to be
2937d429005fSVishal Kulkarni 	 * considered here since both are Big Endian and we're just copying
2938d429005fSVishal Kulkarni 	 * bytes consistently ...
2939d429005fSVishal Kulkarni 	 */
2940d429005fSVishal Kulkarni 	if (txq->dbqt) {
2941d429005fSVishal Kulkarni 		struct cpl_sge_egr_update *egr;
2942d429005fSVishal Kulkarni 
2943d429005fSVishal Kulkarni 		egr = (struct cpl_sge_egr_update *)rsp;
2944d429005fSVishal Kulkarni 		WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
2945d429005fSVishal Kulkarni 	}
2946d429005fSVishal Kulkarni 
2947d429005fSVishal Kulkarni 	t4_sge_eth_txq_egress_update(adapter, txq, -1);
2948d429005fSVishal Kulkarni }
2949d429005fSVishal Kulkarni 
2950d429005fSVishal Kulkarni /**
2951f7917c00SJeff Kirsher  *	t4_ethrx_handler - process an ingress ethernet packet
2952f7917c00SJeff Kirsher  *	@q: the response queue that received the packet
2953f7917c00SJeff Kirsher  *	@rsp: the response queue descriptor holding the RX_PKT message
2954f7917c00SJeff Kirsher  *	@si: the gather list of packet fragments
2955f7917c00SJeff Kirsher  *
2956f7917c00SJeff Kirsher  *	Process an ingress ethernet packet and deliver it to the stack.
2957f7917c00SJeff Kirsher  */
2958f7917c00SJeff Kirsher int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
2959f7917c00SJeff Kirsher 		     const struct pkt_gl *si)
2960f7917c00SJeff Kirsher {
2961f7917c00SJeff Kirsher 	bool csum_ok;
2962f7917c00SJeff Kirsher 	struct sk_buff *skb;
2963f7917c00SJeff Kirsher 	const struct cpl_rx_pkt *pkt;
2964f7917c00SJeff Kirsher 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
2965a4569504SAtul Gupta 	struct adapter *adapter = q->adap;
296652367a76SVipul Pandya 	struct sge *s = &q->adap->sge;
2967d14807ddSHariprasad Shenai 	int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
29680a57a536SSantosh Rastapur 			    CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
2969c50ae55eSGanesh Goudar 	u16 err_vec, tnl_hdr_len = 0;
297084a200b3SVarun Prakash 	struct port_info *pi;
2971a4569504SAtul Gupta 	int ret = 0;
2972f7917c00SJeff Kirsher 
2973d429005fSVishal Kulkarni 	/* If we're looking at TX Queue CIDX Update, handle that separately
2974d429005fSVishal Kulkarni 	 * and return.
2975d429005fSVishal Kulkarni 	 */
2976d429005fSVishal Kulkarni 	if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) ||
2977d429005fSVishal Kulkarni 		     (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) {
2978d429005fSVishal Kulkarni 		t4_tx_completion_handler(q, rsp, si);
2979d429005fSVishal Kulkarni 		return 0;
2980d429005fSVishal Kulkarni 	}
2981d429005fSVishal Kulkarni 
29820a57a536SSantosh Rastapur 	if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
2983f7917c00SJeff Kirsher 		return handle_trace_pkt(q->adap, si);
2984f7917c00SJeff Kirsher 
2985f7917c00SJeff Kirsher 	pkt = (const struct cpl_rx_pkt *)rsp;
29868eb9f2f9SArjun V 	/* Compressed error vector is enabled for T6 only */
2987c50ae55eSGanesh Goudar 	if (q->adap->params.tp.rx_pkt_encap) {
29888eb9f2f9SArjun V 		err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
2989c50ae55eSGanesh Goudar 		tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec));
2990c50ae55eSGanesh Goudar 	} else {
29918eb9f2f9SArjun V 		err_vec = be16_to_cpu(pkt->err_vec);
2992c50ae55eSGanesh Goudar 	}
29938eb9f2f9SArjun V 
29948eb9f2f9SArjun V 	csum_ok = pkt->csum_calc && !err_vec &&
2995cca2822dSHariprasad Shenai 		  (q->netdev->features & NETIF_F_RXCSUM);
2996992bea8eSGanesh Goudar 
2997992bea8eSGanesh Goudar 	if (err_vec)
2998992bea8eSGanesh Goudar 		rxq->stats.bad_rx_pkts++;
2999992bea8eSGanesh Goudar 
3000c50ae55eSGanesh Goudar 	if (((pkt->l2info & htonl(RXF_TCP_F)) ||
3001c50ae55eSGanesh Goudar 	     tnl_hdr_len) &&
3002f7917c00SJeff Kirsher 	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
3003c50ae55eSGanesh Goudar 		do_gro(rxq, si, pkt, tnl_hdr_len);
3004f7917c00SJeff Kirsher 		return 0;
3005f7917c00SJeff Kirsher 	}
3006f7917c00SJeff Kirsher 
3007f7917c00SJeff Kirsher 	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
3008f7917c00SJeff Kirsher 	if (unlikely(!skb)) {
3009f7917c00SJeff Kirsher 		t4_pktgl_free(si);
3010f7917c00SJeff Kirsher 		rxq->stats.rx_drops++;
3011f7917c00SJeff Kirsher 		return 0;
3012f7917c00SJeff Kirsher 	}
3013a4569504SAtul Gupta 	pi = netdev_priv(q->netdev);
3014f7917c00SJeff Kirsher 
3015a4569504SAtul Gupta 	/* Handle PTP Event Rx packet */
3016a4569504SAtul Gupta 	if (unlikely(pi->ptp_enable)) {
3017a4569504SAtul Gupta 		ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
3018a4569504SAtul Gupta 		if (ret == RX_PTP_PKT_ERR)
3019a4569504SAtul Gupta 			return 0;
3020a4569504SAtul Gupta 	}
3021a4569504SAtul Gupta 	if (likely(!ret))
3022a4569504SAtul Gupta 		__skb_pull(skb, s->pktshift); /* remove ethernet header pad */
3023a4569504SAtul Gupta 
3024a4569504SAtul Gupta 	/* Handle the PTP Event Tx Loopback packet */
3025a4569504SAtul Gupta 	if (unlikely(pi->ptp_enable && !ret &&
3026a4569504SAtul Gupta 		     (pkt->l2info & htonl(RXF_UDP_F)) &&
3027a4569504SAtul Gupta 		     cxgb4_ptp_is_ptp_rx(skb))) {
3028a4569504SAtul Gupta 		if (!t4_tx_hststamp(adapter, skb, q->netdev))
3029a4569504SAtul Gupta 			return 0;
3030a4569504SAtul Gupta 	}
3031a4569504SAtul Gupta 
3032f7917c00SJeff Kirsher 	skb->protocol = eth_type_trans(skb, q->netdev);
3033f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, q->idx);
3034f7917c00SJeff Kirsher 	if (skb->dev->features & NETIF_F_RXHASH)
30358264989cSTom Herbert 		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
30368264989cSTom Herbert 			     PKT_HASH_TYPE_L3);
3037f7917c00SJeff Kirsher 
3038f7917c00SJeff Kirsher 	rxq->stats.pkts++;
3039f7917c00SJeff Kirsher 
30405e2a5ebcSHariprasad Shenai 	if (pi->rxtstamp)
30415e2a5ebcSHariprasad Shenai 		cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
30425e2a5ebcSHariprasad Shenai 					 si->sgetstamp);
3043bdc590b9SHariprasad Shenai 	if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
3044f7917c00SJeff Kirsher 		if (!pkt->ip_frag) {
3045f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
3046f7917c00SJeff Kirsher 			rxq->stats.rx_cso++;
3047bdc590b9SHariprasad Shenai 		} else if (pkt->l2info & htonl(RXF_IP_F)) {
3048f7917c00SJeff Kirsher 			__sum16 c = (__force __sum16)pkt->csum;
3049f7917c00SJeff Kirsher 			skb->csum = csum_unfold(c);
3050c50ae55eSGanesh Goudar 
3051c50ae55eSGanesh Goudar 			if (tnl_hdr_len) {
3052c50ae55eSGanesh Goudar 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3053c50ae55eSGanesh Goudar 				skb->csum_level = 1;
3054c50ae55eSGanesh Goudar 			} else {
3055f7917c00SJeff Kirsher 				skb->ip_summed = CHECKSUM_COMPLETE;
3056c50ae55eSGanesh Goudar 			}
3057f7917c00SJeff Kirsher 			rxq->stats.rx_cso++;
3058f7917c00SJeff Kirsher 		}
305984a200b3SVarun Prakash 	} else {
3060f7917c00SJeff Kirsher 		skb_checksum_none_assert(skb);
306184a200b3SVarun Prakash #ifdef CONFIG_CHELSIO_T4_FCOE
306284a200b3SVarun Prakash #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
306384a200b3SVarun Prakash 			  RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
306484a200b3SVarun Prakash 
306584a200b3SVarun Prakash 		if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
306684a200b3SVarun Prakash 			if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
306784a200b3SVarun Prakash 			    (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
30688eb9f2f9SArjun V 				if (q->adap->params.tp.rx_pkt_encap)
30698eb9f2f9SArjun V 					csum_ok = err_vec &
30708eb9f2f9SArjun V 						  T6_COMPR_RXERR_SUM_F;
30718eb9f2f9SArjun V 				else
30728eb9f2f9SArjun V 					csum_ok = err_vec & RXERR_CSUM_F;
30738eb9f2f9SArjun V 				if (!csum_ok)
307484a200b3SVarun Prakash 					skb->ip_summed = CHECKSUM_UNNECESSARY;
307584a200b3SVarun Prakash 			}
307684a200b3SVarun Prakash 		}
307784a200b3SVarun Prakash 
307884a200b3SVarun Prakash #undef CPL_RX_PKT_FLAGS
307984a200b3SVarun Prakash #endif /* CONFIG_CHELSIO_T4_FCOE */
308084a200b3SVarun Prakash 	}
3081f7917c00SJeff Kirsher 
3082f7917c00SJeff Kirsher 	if (unlikely(pkt->vlan_ex)) {
308386a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
3084f7917c00SJeff Kirsher 		rxq->stats.vlan_ex++;
3085f7917c00SJeff Kirsher 	}
30863a336cb1SHariprasad Shenai 	skb_mark_napi_id(skb, &q->napi);
3087f7917c00SJeff Kirsher 	netif_receive_skb(skb);
3088f7917c00SJeff Kirsher 	return 0;
3089f7917c00SJeff Kirsher }
3090f7917c00SJeff Kirsher 
3091f7917c00SJeff Kirsher /**
3092f7917c00SJeff Kirsher  *	restore_rx_bufs - put back a packet's Rx buffers
3093f7917c00SJeff Kirsher  *	@si: the packet gather list
3094f7917c00SJeff Kirsher  *	@q: the SGE free list
3095f7917c00SJeff Kirsher  *	@frags: number of FL buffers to restore
3096f7917c00SJeff Kirsher  *
3097f7917c00SJeff Kirsher  *	Puts back on an FL the Rx buffers associated with @si.  The buffers
3098f7917c00SJeff Kirsher  *	have already been unmapped and are left unmapped, we mark them so to
3099f7917c00SJeff Kirsher  *	prevent further unmapping attempts.
3100f7917c00SJeff Kirsher  *
3101f7917c00SJeff Kirsher  *	This function undoes a series of @unmap_rx_buf calls when we find out
3102f7917c00SJeff Kirsher  *	that the current packet can't be processed right away afterall and we
3103f7917c00SJeff Kirsher  *	need to come back to it later.  This is a very rare event and there's
3104f7917c00SJeff Kirsher  *	no effort to make this particularly efficient.
3105f7917c00SJeff Kirsher  */
3106f7917c00SJeff Kirsher static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
3107f7917c00SJeff Kirsher 			    int frags)
3108f7917c00SJeff Kirsher {
3109f7917c00SJeff Kirsher 	struct rx_sw_desc *d;
3110f7917c00SJeff Kirsher 
3111f7917c00SJeff Kirsher 	while (frags--) {
3112f7917c00SJeff Kirsher 		if (q->cidx == 0)
3113f7917c00SJeff Kirsher 			q->cidx = q->size - 1;
3114f7917c00SJeff Kirsher 		else
3115f7917c00SJeff Kirsher 			q->cidx--;
3116f7917c00SJeff Kirsher 		d = &q->sdesc[q->cidx];
3117f7917c00SJeff Kirsher 		d->page = si->frags[frags].page;
3118f7917c00SJeff Kirsher 		d->dma_addr |= RX_UNMAPPED_BUF;
3119f7917c00SJeff Kirsher 		q->avail++;
3120f7917c00SJeff Kirsher 	}
3121f7917c00SJeff Kirsher }
3122f7917c00SJeff Kirsher 
3123f7917c00SJeff Kirsher /**
3124f7917c00SJeff Kirsher  *	is_new_response - check if a response is newly written
3125f7917c00SJeff Kirsher  *	@r: the response descriptor
3126f7917c00SJeff Kirsher  *	@q: the response queue
3127f7917c00SJeff Kirsher  *
3128f7917c00SJeff Kirsher  *	Returns true if a response descriptor contains a yet unprocessed
3129f7917c00SJeff Kirsher  *	response.
3130f7917c00SJeff Kirsher  */
3131f7917c00SJeff Kirsher static inline bool is_new_response(const struct rsp_ctrl *r,
3132f7917c00SJeff Kirsher 				   const struct sge_rspq *q)
3133f7917c00SJeff Kirsher {
31341ecc7b7aSHariprasad Shenai 	return (r->type_gen >> RSPD_GEN_S) == q->gen;
3135f7917c00SJeff Kirsher }
3136f7917c00SJeff Kirsher 
3137f7917c00SJeff Kirsher /**
3138f7917c00SJeff Kirsher  *	rspq_next - advance to the next entry in a response queue
3139f7917c00SJeff Kirsher  *	@q: the queue
3140f7917c00SJeff Kirsher  *
3141f7917c00SJeff Kirsher  *	Updates the state of a response queue to advance it to the next entry.
3142f7917c00SJeff Kirsher  */
3143f7917c00SJeff Kirsher static inline void rspq_next(struct sge_rspq *q)
3144f7917c00SJeff Kirsher {
3145f7917c00SJeff Kirsher 	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
3146f7917c00SJeff Kirsher 	if (unlikely(++q->cidx == q->size)) {
3147f7917c00SJeff Kirsher 		q->cidx = 0;
3148f7917c00SJeff Kirsher 		q->gen ^= 1;
3149f7917c00SJeff Kirsher 		q->cur_desc = q->desc;
3150f7917c00SJeff Kirsher 	}
3151f7917c00SJeff Kirsher }
3152f7917c00SJeff Kirsher 
3153f7917c00SJeff Kirsher /**
3154f7917c00SJeff Kirsher  *	process_responses - process responses from an SGE response queue
3155f7917c00SJeff Kirsher  *	@q: the ingress queue to process
3156f7917c00SJeff Kirsher  *	@budget: how many responses can be processed in this round
3157f7917c00SJeff Kirsher  *
3158f7917c00SJeff Kirsher  *	Process responses from an SGE response queue up to the supplied budget.
3159f7917c00SJeff Kirsher  *	Responses include received packets as well as control messages from FW
3160f7917c00SJeff Kirsher  *	or HW.
3161f7917c00SJeff Kirsher  *
3162f7917c00SJeff Kirsher  *	Additionally choose the interrupt holdoff time for the next interrupt
3163f7917c00SJeff Kirsher  *	on this queue.  If the system is under memory shortage use a fairly
3164f7917c00SJeff Kirsher  *	long delay to help recovery.
3165f7917c00SJeff Kirsher  */
3166f7917c00SJeff Kirsher static int process_responses(struct sge_rspq *q, int budget)
3167f7917c00SJeff Kirsher {
3168f7917c00SJeff Kirsher 	int ret, rsp_type;
3169f7917c00SJeff Kirsher 	int budget_left = budget;
3170f7917c00SJeff Kirsher 	const struct rsp_ctrl *rc;
3171f7917c00SJeff Kirsher 	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
317252367a76SVipul Pandya 	struct adapter *adapter = q->adap;
317352367a76SVipul Pandya 	struct sge *s = &adapter->sge;
3174f7917c00SJeff Kirsher 
3175f7917c00SJeff Kirsher 	while (likely(budget_left)) {
3176f7917c00SJeff Kirsher 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
31772337ba42SVarun Prakash 		if (!is_new_response(rc, q)) {
31782337ba42SVarun Prakash 			if (q->flush_handler)
31792337ba42SVarun Prakash 				q->flush_handler(q);
3180f7917c00SJeff Kirsher 			break;
31812337ba42SVarun Prakash 		}
3182f7917c00SJeff Kirsher 
3183019be1cfSAlexander Duyck 		dma_rmb();
31841ecc7b7aSHariprasad Shenai 		rsp_type = RSPD_TYPE_G(rc->type_gen);
31851ecc7b7aSHariprasad Shenai 		if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
3186e91b0f24SIan Campbell 			struct page_frag *fp;
3187f7917c00SJeff Kirsher 			struct pkt_gl si;
3188f7917c00SJeff Kirsher 			const struct rx_sw_desc *rsd;
3189f7917c00SJeff Kirsher 			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
3190f7917c00SJeff Kirsher 
31911ecc7b7aSHariprasad Shenai 			if (len & RSPD_NEWBUF_F) {
3192f7917c00SJeff Kirsher 				if (likely(q->offset > 0)) {
3193f7917c00SJeff Kirsher 					free_rx_bufs(q->adap, &rxq->fl, 1);
3194f7917c00SJeff Kirsher 					q->offset = 0;
3195f7917c00SJeff Kirsher 				}
31961ecc7b7aSHariprasad Shenai 				len = RSPD_LEN_G(len);
3197f7917c00SJeff Kirsher 			}
3198f7917c00SJeff Kirsher 			si.tot_len = len;
3199f7917c00SJeff Kirsher 
3200f7917c00SJeff Kirsher 			/* gather packet fragments */
3201f7917c00SJeff Kirsher 			for (frags = 0, fp = si.frags; ; frags++, fp++) {
3202f7917c00SJeff Kirsher 				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
320352367a76SVipul Pandya 				bufsz = get_buf_size(adapter, rsd);
3204f7917c00SJeff Kirsher 				fp->page = rsd->page;
3205e91b0f24SIan Campbell 				fp->offset = q->offset;
3206e91b0f24SIan Campbell 				fp->size = min(bufsz, len);
3207e91b0f24SIan Campbell 				len -= fp->size;
3208f7917c00SJeff Kirsher 				if (!len)
3209f7917c00SJeff Kirsher 					break;
3210f7917c00SJeff Kirsher 				unmap_rx_buf(q->adap, &rxq->fl);
3211f7917c00SJeff Kirsher 			}
3212f7917c00SJeff Kirsher 
32135e2a5ebcSHariprasad Shenai 			si.sgetstamp = SGE_TIMESTAMP_G(
32145e2a5ebcSHariprasad Shenai 					be64_to_cpu(rc->last_flit));
3215f7917c00SJeff Kirsher 			/*
3216f7917c00SJeff Kirsher 			 * Last buffer remains mapped so explicitly make it
3217f7917c00SJeff Kirsher 			 * coherent for CPU access.
3218f7917c00SJeff Kirsher 			 */
3219f7917c00SJeff Kirsher 			dma_sync_single_for_cpu(q->adap->pdev_dev,
3220f7917c00SJeff Kirsher 						get_buf_addr(rsd),
3221e91b0f24SIan Campbell 						fp->size, DMA_FROM_DEVICE);
3222f7917c00SJeff Kirsher 
3223f7917c00SJeff Kirsher 			si.va = page_address(si.frags[0].page) +
3224e91b0f24SIan Campbell 				si.frags[0].offset;
3225f7917c00SJeff Kirsher 			prefetch(si.va);
3226f7917c00SJeff Kirsher 
3227f7917c00SJeff Kirsher 			si.nfrags = frags + 1;
3228f7917c00SJeff Kirsher 			ret = q->handler(q, q->cur_desc, &si);
3229f7917c00SJeff Kirsher 			if (likely(ret == 0))
323052367a76SVipul Pandya 				q->offset += ALIGN(fp->size, s->fl_align);
3231f7917c00SJeff Kirsher 			else
3232f7917c00SJeff Kirsher 				restore_rx_bufs(&si, &rxq->fl, frags);
32331ecc7b7aSHariprasad Shenai 		} else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
3234f7917c00SJeff Kirsher 			ret = q->handler(q, q->cur_desc, NULL);
3235f7917c00SJeff Kirsher 		} else {
3236f7917c00SJeff Kirsher 			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
3237f7917c00SJeff Kirsher 		}
3238f7917c00SJeff Kirsher 
3239f7917c00SJeff Kirsher 		if (unlikely(ret)) {
3240f7917c00SJeff Kirsher 			/* couldn't process descriptor, back off for recovery */
32411ecc7b7aSHariprasad Shenai 			q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
3242f7917c00SJeff Kirsher 			break;
3243f7917c00SJeff Kirsher 		}
3244f7917c00SJeff Kirsher 
3245f7917c00SJeff Kirsher 		rspq_next(q);
3246f7917c00SJeff Kirsher 		budget_left--;
3247f7917c00SJeff Kirsher 	}
3248f7917c00SJeff Kirsher 
3249da08e425SHariprasad Shenai 	if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
3250f7917c00SJeff Kirsher 		__refill_fl(q->adap, &rxq->fl);
3251f7917c00SJeff Kirsher 	return budget - budget_left;
3252f7917c00SJeff Kirsher }
3253f7917c00SJeff Kirsher 
3254f7917c00SJeff Kirsher /**
3255f7917c00SJeff Kirsher  *	napi_rx_handler - the NAPI handler for Rx processing
3256f7917c00SJeff Kirsher  *	@napi: the napi instance
3257f7917c00SJeff Kirsher  *	@budget: how many packets we can process in this round
3258f7917c00SJeff Kirsher  *
3259f7917c00SJeff Kirsher  *	Handler for new data events when using NAPI.  This does not need any
3260f7917c00SJeff Kirsher  *	locking or protection from interrupts as data interrupts are off at
3261f7917c00SJeff Kirsher  *	this point and other adapter interrupts do not interfere (the latter
3262f7917c00SJeff Kirsher  *	in not a concern at all with MSI-X as non-data interrupts then have
3263f7917c00SJeff Kirsher  *	a separate handler).
3264f7917c00SJeff Kirsher  */
3265f7917c00SJeff Kirsher static int napi_rx_handler(struct napi_struct *napi, int budget)
3266f7917c00SJeff Kirsher {
3267f7917c00SJeff Kirsher 	unsigned int params;
3268f7917c00SJeff Kirsher 	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
32693a336cb1SHariprasad Shenai 	int work_done;
3270d63a6dcfSHariprasad Shenai 	u32 val;
3271f7917c00SJeff Kirsher 
32723a336cb1SHariprasad Shenai 	work_done = process_responses(q, budget);
3273f7917c00SJeff Kirsher 	if (likely(work_done < budget)) {
3274e553ec3fSHariprasad Shenai 		int timer_index;
3275e553ec3fSHariprasad Shenai 
3276812787b8SHariprasad Shenai 		napi_complete_done(napi, work_done);
32771ecc7b7aSHariprasad Shenai 		timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
3278e553ec3fSHariprasad Shenai 
3279e553ec3fSHariprasad Shenai 		if (q->adaptive_rx) {
3280e553ec3fSHariprasad Shenai 			if (work_done > max(timer_pkt_quota[timer_index],
3281e553ec3fSHariprasad Shenai 					    MIN_NAPI_WORK))
3282e553ec3fSHariprasad Shenai 				timer_index = (timer_index + 1);
3283e553ec3fSHariprasad Shenai 			else
3284e553ec3fSHariprasad Shenai 				timer_index = timer_index - 1;
3285e553ec3fSHariprasad Shenai 
3286e553ec3fSHariprasad Shenai 			timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
32871ecc7b7aSHariprasad Shenai 			q->next_intr_params =
32881ecc7b7aSHariprasad Shenai 					QINTR_TIMER_IDX_V(timer_index) |
32891ecc7b7aSHariprasad Shenai 					QINTR_CNT_EN_V(0);
3290e553ec3fSHariprasad Shenai 			params = q->next_intr_params;
3291e553ec3fSHariprasad Shenai 		} else {
3292f7917c00SJeff Kirsher 			params = q->next_intr_params;
3293f7917c00SJeff Kirsher 			q->next_intr_params = q->intr_params;
3294e553ec3fSHariprasad Shenai 		}
3295f7917c00SJeff Kirsher 	} else
32961ecc7b7aSHariprasad Shenai 		params = QINTR_TIMER_IDX_V(7);
3297f7917c00SJeff Kirsher 
3298f612b815SHariprasad Shenai 	val = CIDXINC_V(work_done) | SEINTARM_V(params);
3299df64e4d3SHariprasad Shenai 
3300df64e4d3SHariprasad Shenai 	/* If we don't have access to the new User GTS (T5+), use the old
3301df64e4d3SHariprasad Shenai 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
3302df64e4d3SHariprasad Shenai 	 */
3303df64e4d3SHariprasad Shenai 	if (unlikely(q->bar2_addr == NULL)) {
3304f612b815SHariprasad Shenai 		t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
3305f612b815SHariprasad Shenai 			     val | INGRESSQID_V((u32)q->cntxt_id));
3306d63a6dcfSHariprasad Shenai 	} else {
3307f612b815SHariprasad Shenai 		writel(val | INGRESSQID_V(q->bar2_qid),
3308df64e4d3SHariprasad Shenai 		       q->bar2_addr + SGE_UDB_GTS);
3309d63a6dcfSHariprasad Shenai 		wmb();
3310d63a6dcfSHariprasad Shenai 	}
3311f7917c00SJeff Kirsher 	return work_done;
3312f7917c00SJeff Kirsher }
3313f7917c00SJeff Kirsher 
3314f7917c00SJeff Kirsher /*
3315f7917c00SJeff Kirsher  * The MSI-X interrupt handler for an SGE response queue.
3316f7917c00SJeff Kirsher  */
3317f7917c00SJeff Kirsher irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
3318f7917c00SJeff Kirsher {
3319f7917c00SJeff Kirsher 	struct sge_rspq *q = cookie;
3320f7917c00SJeff Kirsher 
3321f7917c00SJeff Kirsher 	napi_schedule(&q->napi);
3322f7917c00SJeff Kirsher 	return IRQ_HANDLED;
3323f7917c00SJeff Kirsher }
3324f7917c00SJeff Kirsher 
3325f7917c00SJeff Kirsher /*
3326f7917c00SJeff Kirsher  * Process the indirect interrupt entries in the interrupt queue and kick off
3327f7917c00SJeff Kirsher  * NAPI for each queue that has generated an entry.
3328f7917c00SJeff Kirsher  */
3329f7917c00SJeff Kirsher static unsigned int process_intrq(struct adapter *adap)
3330f7917c00SJeff Kirsher {
3331f7917c00SJeff Kirsher 	unsigned int credits;
3332f7917c00SJeff Kirsher 	const struct rsp_ctrl *rc;
3333f7917c00SJeff Kirsher 	struct sge_rspq *q = &adap->sge.intrq;
3334d63a6dcfSHariprasad Shenai 	u32 val;
3335f7917c00SJeff Kirsher 
3336f7917c00SJeff Kirsher 	spin_lock(&adap->sge.intrq_lock);
3337f7917c00SJeff Kirsher 	for (credits = 0; ; credits++) {
3338f7917c00SJeff Kirsher 		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
3339f7917c00SJeff Kirsher 		if (!is_new_response(rc, q))
3340f7917c00SJeff Kirsher 			break;
3341f7917c00SJeff Kirsher 
3342019be1cfSAlexander Duyck 		dma_rmb();
33431ecc7b7aSHariprasad Shenai 		if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
3344f7917c00SJeff Kirsher 			unsigned int qid = ntohl(rc->pldbuflen_qid);
3345f7917c00SJeff Kirsher 
3346f7917c00SJeff Kirsher 			qid -= adap->sge.ingr_start;
3347f7917c00SJeff Kirsher 			napi_schedule(&adap->sge.ingr_map[qid]->napi);
3348f7917c00SJeff Kirsher 		}
3349f7917c00SJeff Kirsher 
3350f7917c00SJeff Kirsher 		rspq_next(q);
3351f7917c00SJeff Kirsher 	}
3352f7917c00SJeff Kirsher 
3353f612b815SHariprasad Shenai 	val =  CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
3354df64e4d3SHariprasad Shenai 
3355df64e4d3SHariprasad Shenai 	/* If we don't have access to the new User GTS (T5+), use the old
3356df64e4d3SHariprasad Shenai 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
3357df64e4d3SHariprasad Shenai 	 */
3358df64e4d3SHariprasad Shenai 	if (unlikely(q->bar2_addr == NULL)) {
3359f612b815SHariprasad Shenai 		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
3360f612b815SHariprasad Shenai 			     val | INGRESSQID_V(q->cntxt_id));
3361d63a6dcfSHariprasad Shenai 	} else {
3362f612b815SHariprasad Shenai 		writel(val | INGRESSQID_V(q->bar2_qid),
3363df64e4d3SHariprasad Shenai 		       q->bar2_addr + SGE_UDB_GTS);
3364d63a6dcfSHariprasad Shenai 		wmb();
3365d63a6dcfSHariprasad Shenai 	}
3366f7917c00SJeff Kirsher 	spin_unlock(&adap->sge.intrq_lock);
3367f7917c00SJeff Kirsher 	return credits;
3368f7917c00SJeff Kirsher }
3369f7917c00SJeff Kirsher 
3370f7917c00SJeff Kirsher /*
3371f7917c00SJeff Kirsher  * The MSI interrupt handler, which handles data events from SGE response queues
3372f7917c00SJeff Kirsher  * as well as error and other async events as they all use the same MSI vector.
3373f7917c00SJeff Kirsher  */
3374f7917c00SJeff Kirsher static irqreturn_t t4_intr_msi(int irq, void *cookie)
3375f7917c00SJeff Kirsher {
3376f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
3377f7917c00SJeff Kirsher 
3378c3c7b121SHariprasad Shenai 	if (adap->flags & MASTER_PF)
3379f7917c00SJeff Kirsher 		t4_slow_intr_handler(adap);
3380f7917c00SJeff Kirsher 	process_intrq(adap);
3381f7917c00SJeff Kirsher 	return IRQ_HANDLED;
3382f7917c00SJeff Kirsher }
3383f7917c00SJeff Kirsher 
3384f7917c00SJeff Kirsher /*
3385f7917c00SJeff Kirsher  * Interrupt handler for legacy INTx interrupts.
3386f7917c00SJeff Kirsher  * Handles data events from SGE response queues as well as error and other
3387f7917c00SJeff Kirsher  * async events as they all use the same interrupt line.
3388f7917c00SJeff Kirsher  */
3389f7917c00SJeff Kirsher static irqreturn_t t4_intr_intx(int irq, void *cookie)
3390f7917c00SJeff Kirsher {
3391f7917c00SJeff Kirsher 	struct adapter *adap = cookie;
3392f7917c00SJeff Kirsher 
3393f061de42SHariprasad Shenai 	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
3394c3c7b121SHariprasad Shenai 	if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) |
3395c3c7b121SHariprasad Shenai 	    process_intrq(adap))
3396f7917c00SJeff Kirsher 		return IRQ_HANDLED;
3397f7917c00SJeff Kirsher 	return IRQ_NONE;             /* probably shared interrupt */
3398f7917c00SJeff Kirsher }
3399f7917c00SJeff Kirsher 
3400f7917c00SJeff Kirsher /**
3401f7917c00SJeff Kirsher  *	t4_intr_handler - select the top-level interrupt handler
3402f7917c00SJeff Kirsher  *	@adap: the adapter
3403f7917c00SJeff Kirsher  *
3404f7917c00SJeff Kirsher  *	Selects the top-level interrupt handler based on the type of interrupts
3405f7917c00SJeff Kirsher  *	(MSI-X, MSI, or INTx).
3406f7917c00SJeff Kirsher  */
3407f7917c00SJeff Kirsher irq_handler_t t4_intr_handler(struct adapter *adap)
3408f7917c00SJeff Kirsher {
3409f7917c00SJeff Kirsher 	if (adap->flags & USING_MSIX)
3410f7917c00SJeff Kirsher 		return t4_sge_intr_msix;
3411f7917c00SJeff Kirsher 	if (adap->flags & USING_MSI)
3412f7917c00SJeff Kirsher 		return t4_intr_msi;
3413f7917c00SJeff Kirsher 	return t4_intr_intx;
3414f7917c00SJeff Kirsher }
3415f7917c00SJeff Kirsher 
34160e23daebSKees Cook static void sge_rx_timer_cb(struct timer_list *t)
3417f7917c00SJeff Kirsher {
3418f7917c00SJeff Kirsher 	unsigned long m;
3419a3bfb617SHariprasad Shenai 	unsigned int i;
34200e23daebSKees Cook 	struct adapter *adap = from_timer(adap, t, sge.rx_timer);
3421f7917c00SJeff Kirsher 	struct sge *s = &adap->sge;
3422f7917c00SJeff Kirsher 
34234b8e27a8SHariprasad Shenai 	for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
3424f7917c00SJeff Kirsher 		for (m = s->starving_fl[i]; m; m &= m - 1) {
3425f7917c00SJeff Kirsher 			struct sge_eth_rxq *rxq;
3426f7917c00SJeff Kirsher 			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
3427f7917c00SJeff Kirsher 			struct sge_fl *fl = s->egr_map[id];
3428f7917c00SJeff Kirsher 
3429f7917c00SJeff Kirsher 			clear_bit(id, s->starving_fl);
34304e857c58SPeter Zijlstra 			smp_mb__after_atomic();
3431f7917c00SJeff Kirsher 
3432c098b026SHariprasad Shenai 			if (fl_starving(adap, fl)) {
3433f7917c00SJeff Kirsher 				rxq = container_of(fl, struct sge_eth_rxq, fl);
3434f7917c00SJeff Kirsher 				if (napi_reschedule(&rxq->rspq.napi))
3435f7917c00SJeff Kirsher 					fl->starving++;
3436f7917c00SJeff Kirsher 				else
3437f7917c00SJeff Kirsher 					set_bit(id, s->starving_fl);
3438f7917c00SJeff Kirsher 			}
3439f7917c00SJeff Kirsher 		}
3440a3bfb617SHariprasad Shenai 	/* The remainder of the SGE RX Timer Callback routine is dedicated to
3441a3bfb617SHariprasad Shenai 	 * global Master PF activities like checking for chip ingress stalls,
3442a3bfb617SHariprasad Shenai 	 * etc.
34430f4d201fSKumar Sanghvi 	 */
3444a3bfb617SHariprasad Shenai 	if (!(adap->flags & MASTER_PF))
3445a3bfb617SHariprasad Shenai 		goto done;
34460f4d201fSKumar Sanghvi 
3447a3bfb617SHariprasad Shenai 	t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
34480f4d201fSKumar Sanghvi 
3449a3bfb617SHariprasad Shenai done:
3450f7917c00SJeff Kirsher 	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
3451f7917c00SJeff Kirsher }
3452f7917c00SJeff Kirsher 
34530e23daebSKees Cook static void sge_tx_timer_cb(struct timer_list *t)
3454f7917c00SJeff Kirsher {
34550e23daebSKees Cook 	struct adapter *adap = from_timer(adap, t, sge.tx_timer);
3456f7917c00SJeff Kirsher 	struct sge *s = &adap->sge;
3457d429005fSVishal Kulkarni 	unsigned long m, period;
3458d429005fSVishal Kulkarni 	unsigned int i, budget;
3459f7917c00SJeff Kirsher 
34604b8e27a8SHariprasad Shenai 	for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
3461f7917c00SJeff Kirsher 		for (m = s->txq_maperr[i]; m; m &= m - 1) {
3462f7917c00SJeff Kirsher 			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
3463ab677ff4SHariprasad Shenai 			struct sge_uld_txq *txq = s->egr_map[id];
3464f7917c00SJeff Kirsher 
3465f7917c00SJeff Kirsher 			clear_bit(id, s->txq_maperr);
3466f7917c00SJeff Kirsher 			tasklet_schedule(&txq->qresume_tsk);
3467f7917c00SJeff Kirsher 		}
3468f7917c00SJeff Kirsher 
3469a4569504SAtul Gupta 	if (!is_t4(adap->params.chip)) {
3470a4569504SAtul Gupta 		struct sge_eth_txq *q = &s->ptptxq;
3471a4569504SAtul Gupta 		int avail;
3472a4569504SAtul Gupta 
3473a4569504SAtul Gupta 		spin_lock(&adap->ptp_lock);
3474a4569504SAtul Gupta 		avail = reclaimable(&q->q);
3475a4569504SAtul Gupta 
3476a4569504SAtul Gupta 		if (avail) {
3477a4569504SAtul Gupta 			free_tx_desc(adap, &q->q, avail, false);
3478a4569504SAtul Gupta 			q->q.in_use -= avail;
3479a4569504SAtul Gupta 		}
3480a4569504SAtul Gupta 		spin_unlock(&adap->ptp_lock);
3481a4569504SAtul Gupta 	}
3482a4569504SAtul Gupta 
3483f7917c00SJeff Kirsher 	budget = MAX_TIMER_TX_RECLAIM;
3484f7917c00SJeff Kirsher 	i = s->ethtxq_rover;
3485f7917c00SJeff Kirsher 	do {
3486d429005fSVishal Kulkarni 		budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i],
3487d429005fSVishal Kulkarni 						       budget);
3488d429005fSVishal Kulkarni 		if (!budget)
3489d429005fSVishal Kulkarni 			break;
3490f7917c00SJeff Kirsher 
3491f7917c00SJeff Kirsher 		if (++i >= s->ethqsets)
3492f7917c00SJeff Kirsher 			i = 0;
3493d429005fSVishal Kulkarni 	} while (i != s->ethtxq_rover);
3494f7917c00SJeff Kirsher 	s->ethtxq_rover = i;
3495d429005fSVishal Kulkarni 
3496d429005fSVishal Kulkarni 	if (budget == 0) {
3497d429005fSVishal Kulkarni 		/* If we found too many reclaimable packets schedule a timer
3498d429005fSVishal Kulkarni 		 * in the near future to continue where we left off.
3499d429005fSVishal Kulkarni 		 */
3500d429005fSVishal Kulkarni 		period = 2;
3501d429005fSVishal Kulkarni 	} else {
3502d429005fSVishal Kulkarni 		/* We reclaimed all reclaimable TX Descriptors, so reschedule
3503d429005fSVishal Kulkarni 		 * at the normal period.
3504d429005fSVishal Kulkarni 		 */
3505d429005fSVishal Kulkarni 		period = TX_QCHECK_PERIOD;
3506d429005fSVishal Kulkarni 	}
3507d429005fSVishal Kulkarni 
3508d429005fSVishal Kulkarni 	mod_timer(&s->tx_timer, jiffies + period);
3509f7917c00SJeff Kirsher }
3510f7917c00SJeff Kirsher 
3511d63a6dcfSHariprasad Shenai /**
3512df64e4d3SHariprasad Shenai  *	bar2_address - return the BAR2 address for an SGE Queue's Registers
3513df64e4d3SHariprasad Shenai  *	@adapter: the adapter
3514df64e4d3SHariprasad Shenai  *	@qid: the SGE Queue ID
3515df64e4d3SHariprasad Shenai  *	@qtype: the SGE Queue Type (Egress or Ingress)
3516df64e4d3SHariprasad Shenai  *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
3517d63a6dcfSHariprasad Shenai  *
3518df64e4d3SHariprasad Shenai  *	Returns the BAR2 address for the SGE Queue Registers associated with
3519df64e4d3SHariprasad Shenai  *	@qid.  If BAR2 SGE Registers aren't available, returns NULL.  Also
3520df64e4d3SHariprasad Shenai  *	returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
3521df64e4d3SHariprasad Shenai  *	Queue Registers.  If the BAR2 Queue ID is 0, then "Inferred Queue ID"
3522df64e4d3SHariprasad Shenai  *	Registers are supported (e.g. the Write Combining Doorbell Buffer).
3523d63a6dcfSHariprasad Shenai  */
3524df64e4d3SHariprasad Shenai static void __iomem *bar2_address(struct adapter *adapter,
3525df64e4d3SHariprasad Shenai 				  unsigned int qid,
3526df64e4d3SHariprasad Shenai 				  enum t4_bar2_qtype qtype,
3527df64e4d3SHariprasad Shenai 				  unsigned int *pbar2_qid)
3528d63a6dcfSHariprasad Shenai {
3529df64e4d3SHariprasad Shenai 	u64 bar2_qoffset;
3530df64e4d3SHariprasad Shenai 	int ret;
3531d63a6dcfSHariprasad Shenai 
3532e0456717SLinus Torvalds 	ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
3533df64e4d3SHariprasad Shenai 				&bar2_qoffset, pbar2_qid);
3534df64e4d3SHariprasad Shenai 	if (ret)
3535df64e4d3SHariprasad Shenai 		return NULL;
3536d63a6dcfSHariprasad Shenai 
3537df64e4d3SHariprasad Shenai 	return adapter->bar2 + bar2_qoffset;
3538d63a6dcfSHariprasad Shenai }
3539d63a6dcfSHariprasad Shenai 
3540145ef8a5SHariprasad Shenai /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
3541145ef8a5SHariprasad Shenai  * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
3542145ef8a5SHariprasad Shenai  */
3543f7917c00SJeff Kirsher int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
3544f7917c00SJeff Kirsher 		     struct net_device *dev, int intr_idx,
35452337ba42SVarun Prakash 		     struct sge_fl *fl, rspq_handler_t hnd,
35462337ba42SVarun Prakash 		     rspq_flush_handler_t flush_hnd, int cong)
3547f7917c00SJeff Kirsher {
3548f7917c00SJeff Kirsher 	int ret, flsz = 0;
3549f7917c00SJeff Kirsher 	struct fw_iq_cmd c;
355052367a76SVipul Pandya 	struct sge *s = &adap->sge;
3551f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
3552b0ba9d5fSCasey Leedom 	int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
3553f7917c00SJeff Kirsher 
3554f7917c00SJeff Kirsher 	/* Size needs to be multiple of 16, including status entry. */
3555f7917c00SJeff Kirsher 	iq->size = roundup(iq->size, 16);
3556f7917c00SJeff Kirsher 
3557f7917c00SJeff Kirsher 	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
35580ac5b708SHariprasad Shenai 			      &iq->phys_addr, NULL, 0,
35590ac5b708SHariprasad Shenai 			      dev_to_node(adap->pdev_dev));
3560f7917c00SJeff Kirsher 	if (!iq->desc)
3561f7917c00SJeff Kirsher 		return -ENOMEM;
3562f7917c00SJeff Kirsher 
3563f7917c00SJeff Kirsher 	memset(&c, 0, sizeof(c));
3564e2ac9628SHariprasad Shenai 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
3565e2ac9628SHariprasad Shenai 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3566b2612722SHariprasad Shenai 			    FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
35676e4b51a6SHariprasad Shenai 	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
3568f7917c00SJeff Kirsher 				 FW_LEN16(c));
35696e4b51a6SHariprasad Shenai 	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
35706e4b51a6SHariprasad Shenai 		FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
35711ecc7b7aSHariprasad Shenai 		FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
35721ecc7b7aSHariprasad Shenai 		FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
35736e4b51a6SHariprasad Shenai 		FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
3574f7917c00SJeff Kirsher 							-intr_idx - 1));
35756e4b51a6SHariprasad Shenai 	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
35766e4b51a6SHariprasad Shenai 		FW_IQ_CMD_IQGTSMODE_F |
35776e4b51a6SHariprasad Shenai 		FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
35786e4b51a6SHariprasad Shenai 		FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
3579f7917c00SJeff Kirsher 	c.iqsize = htons(iq->size);
3580f7917c00SJeff Kirsher 	c.iqaddr = cpu_to_be64(iq->phys_addr);
3581145ef8a5SHariprasad Shenai 	if (cong >= 0)
35828dce04f1SArjun Vynipadath 		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F |
35838dce04f1SArjun Vynipadath 				FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC
35848dce04f1SArjun Vynipadath 							:  FW_IQ_IQTYPE_OFLD));
3585f7917c00SJeff Kirsher 
3586f7917c00SJeff Kirsher 	if (fl) {
3587d429005fSVishal Kulkarni 		unsigned int chip_ver =
3588d429005fSVishal Kulkarni 			CHELSIO_CHIP_VERSION(adap->params.chip);
35893ccc6cf7SHariprasad Shenai 
359013432997SHariprasad Shenai 		/* Allocate the ring for the hardware free list (with space
359113432997SHariprasad Shenai 		 * for its status page) along with the associated software
359213432997SHariprasad Shenai 		 * descriptor ring.  The free list size needs to be a multiple
359313432997SHariprasad Shenai 		 * of the Egress Queue Unit and at least 2 Egress Units larger
359413432997SHariprasad Shenai 		 * than the SGE's Egress Congrestion Threshold
359513432997SHariprasad Shenai 		 * (fl_starve_thres - 1).
359613432997SHariprasad Shenai 		 */
359713432997SHariprasad Shenai 		if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
359813432997SHariprasad Shenai 			fl->size = s->fl_starve_thres - 1 + 2 * 8;
3599f7917c00SJeff Kirsher 		fl->size = roundup(fl->size, 8);
3600f7917c00SJeff Kirsher 		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
3601f7917c00SJeff Kirsher 				      sizeof(struct rx_sw_desc), &fl->addr,
36020ac5b708SHariprasad Shenai 				      &fl->sdesc, s->stat_len,
36030ac5b708SHariprasad Shenai 				      dev_to_node(adap->pdev_dev));
3604f7917c00SJeff Kirsher 		if (!fl->desc)
3605f7917c00SJeff Kirsher 			goto fl_nomem;
3606f7917c00SJeff Kirsher 
360752367a76SVipul Pandya 		flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
3608145ef8a5SHariprasad Shenai 		c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
3609b0ba9d5fSCasey Leedom 					     FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
3610b0ba9d5fSCasey Leedom 					     FW_IQ_CMD_FL0DATARO_V(relaxed) |
36116e4b51a6SHariprasad Shenai 					     FW_IQ_CMD_FL0PADEN_F);
3612145ef8a5SHariprasad Shenai 		if (cong >= 0)
3613145ef8a5SHariprasad Shenai 			c.iqns_to_fl0congen |=
3614145ef8a5SHariprasad Shenai 				htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
3615145ef8a5SHariprasad Shenai 				      FW_IQ_CMD_FL0CONGCIF_F |
3616145ef8a5SHariprasad Shenai 				      FW_IQ_CMD_FL0CONGEN_F);
3617edadad80SHariprasad Shenai 		/* In T6, for egress queue type FL there is internal overhead
3618edadad80SHariprasad Shenai 		 * of 16B for header going into FLM module.  Hence the maximum
3619edadad80SHariprasad Shenai 		 * allowed burst size is 448 bytes.  For T4/T5, the hardware
3620edadad80SHariprasad Shenai 		 * doesn't coalesce fetch requests if more than 64 bytes of
3621edadad80SHariprasad Shenai 		 * Free List pointers are provided, so we use a 128-byte Fetch
3622edadad80SHariprasad Shenai 		 * Burst Minimum there (T6 implements coalescing so we can use
3623edadad80SHariprasad Shenai 		 * the smaller 64-byte value there).
3624edadad80SHariprasad Shenai 		 */
36251ecc7b7aSHariprasad Shenai 		c.fl0dcaen_to_fl0cidxfthresh =
3626d429005fSVishal Kulkarni 			htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ?
3627edadad80SHariprasad Shenai 						   FETCHBURSTMIN_128B_X :
3628d429005fSVishal Kulkarni 						   FETCHBURSTMIN_64B_T6_X) |
3629d429005fSVishal Kulkarni 			      FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
36303ccc6cf7SHariprasad Shenai 						   FETCHBURSTMAX_512B_X :
36313ccc6cf7SHariprasad Shenai 						   FETCHBURSTMAX_256B_X));
3632f7917c00SJeff Kirsher 		c.fl0size = htons(flsz);
3633f7917c00SJeff Kirsher 		c.fl0addr = cpu_to_be64(fl->addr);
3634f7917c00SJeff Kirsher 	}
3635f7917c00SJeff Kirsher 
3636b2612722SHariprasad Shenai 	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3637f7917c00SJeff Kirsher 	if (ret)
3638f7917c00SJeff Kirsher 		goto err;
3639f7917c00SJeff Kirsher 
3640f7917c00SJeff Kirsher 	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
3641f7917c00SJeff Kirsher 	iq->cur_desc = iq->desc;
3642f7917c00SJeff Kirsher 	iq->cidx = 0;
3643f7917c00SJeff Kirsher 	iq->gen = 1;
3644f7917c00SJeff Kirsher 	iq->next_intr_params = iq->intr_params;
3645f7917c00SJeff Kirsher 	iq->cntxt_id = ntohs(c.iqid);
3646f7917c00SJeff Kirsher 	iq->abs_id = ntohs(c.physiqid);
3647df64e4d3SHariprasad Shenai 	iq->bar2_addr = bar2_address(adap,
3648df64e4d3SHariprasad Shenai 				     iq->cntxt_id,
3649df64e4d3SHariprasad Shenai 				     T4_BAR2_QTYPE_INGRESS,
3650df64e4d3SHariprasad Shenai 				     &iq->bar2_qid);
3651f7917c00SJeff Kirsher 	iq->size--;                           /* subtract status entry */
3652f7917c00SJeff Kirsher 	iq->netdev = dev;
3653f7917c00SJeff Kirsher 	iq->handler = hnd;
36542337ba42SVarun Prakash 	iq->flush_handler = flush_hnd;
36552337ba42SVarun Prakash 
36562337ba42SVarun Prakash 	memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
36572337ba42SVarun Prakash 	skb_queue_head_init(&iq->lro_mgr.lroq);
3658f7917c00SJeff Kirsher 
3659f7917c00SJeff Kirsher 	/* set offset to -1 to distinguish ingress queues without FL */
3660f7917c00SJeff Kirsher 	iq->offset = fl ? 0 : -1;
3661f7917c00SJeff Kirsher 
3662f7917c00SJeff Kirsher 	adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
3663f7917c00SJeff Kirsher 
3664f7917c00SJeff Kirsher 	if (fl) {
3665f7917c00SJeff Kirsher 		fl->cntxt_id = ntohs(c.fl0id);
3666f7917c00SJeff Kirsher 		fl->avail = fl->pend_cred = 0;
3667f7917c00SJeff Kirsher 		fl->pidx = fl->cidx = 0;
3668f7917c00SJeff Kirsher 		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
3669f7917c00SJeff Kirsher 		adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
3670d63a6dcfSHariprasad Shenai 
3671df64e4d3SHariprasad Shenai 		/* Note, we must initialize the BAR2 Free List User Doorbell
3672df64e4d3SHariprasad Shenai 		 * information before refilling the Free List!
3673d63a6dcfSHariprasad Shenai 		 */
3674df64e4d3SHariprasad Shenai 		fl->bar2_addr = bar2_address(adap,
3675df64e4d3SHariprasad Shenai 					     fl->cntxt_id,
3676df64e4d3SHariprasad Shenai 					     T4_BAR2_QTYPE_EGRESS,
3677df64e4d3SHariprasad Shenai 					     &fl->bar2_qid);
3678f7917c00SJeff Kirsher 		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
3679f7917c00SJeff Kirsher 	}
3680b8b1ae99SHariprasad Shenai 
3681b8b1ae99SHariprasad Shenai 	/* For T5 and later we attempt to set up the Congestion Manager values
3682b8b1ae99SHariprasad Shenai 	 * of the new RX Ethernet Queue.  This should really be handled by
3683b8b1ae99SHariprasad Shenai 	 * firmware because it's more complex than any host driver wants to
3684b8b1ae99SHariprasad Shenai 	 * get involved with and it's different per chip and this is almost
3685b8b1ae99SHariprasad Shenai 	 * certainly wrong.  Firmware would be wrong as well, but it would be
3686b8b1ae99SHariprasad Shenai 	 * a lot easier to fix in one place ...  For now we do something very
3687b8b1ae99SHariprasad Shenai 	 * simple (and hopefully less wrong).
3688b8b1ae99SHariprasad Shenai 	 */
3689b8b1ae99SHariprasad Shenai 	if (!is_t4(adap->params.chip) && cong >= 0) {
36902216d014SHariprasad Shenai 		u32 param, val, ch_map = 0;
3691b8b1ae99SHariprasad Shenai 		int i;
36922216d014SHariprasad Shenai 		u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
3693b8b1ae99SHariprasad Shenai 
3694b8b1ae99SHariprasad Shenai 		param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
3695b8b1ae99SHariprasad Shenai 			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
3696b8b1ae99SHariprasad Shenai 			 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
3697b8b1ae99SHariprasad Shenai 		if (cong == 0) {
3698b8b1ae99SHariprasad Shenai 			val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
3699b8b1ae99SHariprasad Shenai 		} else {
3700b8b1ae99SHariprasad Shenai 			val =
3701b8b1ae99SHariprasad Shenai 			    CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
3702b8b1ae99SHariprasad Shenai 			for (i = 0; i < 4; i++) {
3703b8b1ae99SHariprasad Shenai 				if (cong & (1 << i))
37042216d014SHariprasad Shenai 					ch_map |= 1 << (i << cng_ch_bits_log);
3705b8b1ae99SHariprasad Shenai 			}
37062216d014SHariprasad Shenai 			val |= CONMCTXT_CNGCHMAP_V(ch_map);
3707b8b1ae99SHariprasad Shenai 		}
3708b2612722SHariprasad Shenai 		ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
3709b8b1ae99SHariprasad Shenai 				    &param, &val);
3710b8b1ae99SHariprasad Shenai 		if (ret)
3711b8b1ae99SHariprasad Shenai 			dev_warn(adap->pdev_dev, "Failed to set Congestion"
3712b8b1ae99SHariprasad Shenai 				 " Manager Context for Ingress Queue %d: %d\n",
3713b8b1ae99SHariprasad Shenai 				 iq->cntxt_id, -ret);
3714b8b1ae99SHariprasad Shenai 	}
3715b8b1ae99SHariprasad Shenai 
3716f7917c00SJeff Kirsher 	return 0;
3717f7917c00SJeff Kirsher 
3718f7917c00SJeff Kirsher fl_nomem:
3719f7917c00SJeff Kirsher 	ret = -ENOMEM;
3720f7917c00SJeff Kirsher err:
3721f7917c00SJeff Kirsher 	if (iq->desc) {
3722f7917c00SJeff Kirsher 		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
3723f7917c00SJeff Kirsher 				  iq->desc, iq->phys_addr);
3724f7917c00SJeff Kirsher 		iq->desc = NULL;
3725f7917c00SJeff Kirsher 	}
3726f7917c00SJeff Kirsher 	if (fl && fl->desc) {
3727f7917c00SJeff Kirsher 		kfree(fl->sdesc);
3728f7917c00SJeff Kirsher 		fl->sdesc = NULL;
3729f7917c00SJeff Kirsher 		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
3730f7917c00SJeff Kirsher 				  fl->desc, fl->addr);
3731f7917c00SJeff Kirsher 		fl->desc = NULL;
3732f7917c00SJeff Kirsher 	}
3733f7917c00SJeff Kirsher 	return ret;
3734f7917c00SJeff Kirsher }
3735f7917c00SJeff Kirsher 
3736f7917c00SJeff Kirsher static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
3737f7917c00SJeff Kirsher {
373822adfe0aSSantosh Rastapur 	q->cntxt_id = id;
3739df64e4d3SHariprasad Shenai 	q->bar2_addr = bar2_address(adap,
3740df64e4d3SHariprasad Shenai 				    q->cntxt_id,
3741df64e4d3SHariprasad Shenai 				    T4_BAR2_QTYPE_EGRESS,
3742df64e4d3SHariprasad Shenai 				    &q->bar2_qid);
3743f7917c00SJeff Kirsher 	q->in_use = 0;
3744f7917c00SJeff Kirsher 	q->cidx = q->pidx = 0;
3745f7917c00SJeff Kirsher 	q->stops = q->restarts = 0;
3746f7917c00SJeff Kirsher 	q->stat = (void *)&q->desc[q->size];
37473069ee9bSVipul Pandya 	spin_lock_init(&q->db_lock);
3748f7917c00SJeff Kirsher 	adap->sge.egr_map[id - adap->sge.egr_start] = q;
3749f7917c00SJeff Kirsher }
3750f7917c00SJeff Kirsher 
3751d429005fSVishal Kulkarni /**
3752d429005fSVishal Kulkarni  *	t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
3753d429005fSVishal Kulkarni  *	@adap: the adapter
3754d429005fSVishal Kulkarni  *	@txq: the SGE Ethernet TX Queue to initialize
3755d429005fSVishal Kulkarni  *	@dev: the Linux Network Device
3756d429005fSVishal Kulkarni  *	@netdevq: the corresponding Linux TX Queue
3757d429005fSVishal Kulkarni  *	@iqid: the Ingress Queue to which to deliver CIDX Update messages
3758d429005fSVishal Kulkarni  *	@dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers
3759d429005fSVishal Kulkarni  */
3760f7917c00SJeff Kirsher int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
3761f7917c00SJeff Kirsher 			 struct net_device *dev, struct netdev_queue *netdevq,
3762d429005fSVishal Kulkarni 			 unsigned int iqid, u8 dbqt)
3763f7917c00SJeff Kirsher {
3764d429005fSVishal Kulkarni 	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
3765f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
3766d429005fSVishal Kulkarni 	struct sge *s = &adap->sge;
3767d429005fSVishal Kulkarni 	struct fw_eq_eth_cmd c;
3768d429005fSVishal Kulkarni 	int ret, nentries;
3769f7917c00SJeff Kirsher 
3770f7917c00SJeff Kirsher 	/* Add status entries */
377152367a76SVipul Pandya 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
3772f7917c00SJeff Kirsher 
3773f7917c00SJeff Kirsher 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
3774f7917c00SJeff Kirsher 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
377552367a76SVipul Pandya 			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
3776f7917c00SJeff Kirsher 			netdev_queue_numa_node_read(netdevq));
3777f7917c00SJeff Kirsher 	if (!txq->q.desc)
3778f7917c00SJeff Kirsher 		return -ENOMEM;
3779f7917c00SJeff Kirsher 
3780f7917c00SJeff Kirsher 	memset(&c, 0, sizeof(c));
3781e2ac9628SHariprasad Shenai 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
3782e2ac9628SHariprasad Shenai 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3783b2612722SHariprasad Shenai 			    FW_EQ_ETH_CMD_PFN_V(adap->pf) |
37846e4b51a6SHariprasad Shenai 			    FW_EQ_ETH_CMD_VFN_V(0));
37856e4b51a6SHariprasad Shenai 	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
37866e4b51a6SHariprasad Shenai 				 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
3787d429005fSVishal Kulkarni 
3788d429005fSVishal Kulkarni 	/* For TX Ethernet Queues using the SGE Doorbell Queue Timer
3789d429005fSVishal Kulkarni 	 * mechanism, we use Ingress Queue messages for Hardware Consumer
3790d429005fSVishal Kulkarni 	 * Index Updates on the TX Queue.  Otherwise we have the Hardware
3791d429005fSVishal Kulkarni 	 * write the CIDX Updates into the Status Page at the end of the
3792d429005fSVishal Kulkarni 	 * TX Queue.
3793d429005fSVishal Kulkarni 	 */
3794d429005fSVishal Kulkarni 	c.autoequiqe_to_viid = htonl((dbqt
3795d429005fSVishal Kulkarni 				      ? FW_EQ_ETH_CMD_AUTOEQUIQE_F
3796d429005fSVishal Kulkarni 				      : FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
37976e4b51a6SHariprasad Shenai 				     FW_EQ_ETH_CMD_VIID_V(pi->viid));
3798d429005fSVishal Kulkarni 
37991ecc7b7aSHariprasad Shenai 	c.fetchszm_to_iqid =
3800d429005fSVishal Kulkarni 		htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt
3801d429005fSVishal Kulkarni 						 ? HOSTFCMODE_INGRESS_QUEUE_X
3802d429005fSVishal Kulkarni 						 : HOSTFCMODE_STATUS_PAGE_X) |
38036e4b51a6SHariprasad Shenai 		      FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
38041ecc7b7aSHariprasad Shenai 		      FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
3805d429005fSVishal Kulkarni 
3806d429005fSVishal Kulkarni 	/* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */
38071ecc7b7aSHariprasad Shenai 	c.dcaen_to_eqsize =
3808d429005fSVishal Kulkarni 		htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
3809d429005fSVishal Kulkarni 					    ? FETCHBURSTMIN_64B_X
3810d429005fSVishal Kulkarni 					    : FETCHBURSTMIN_64B_T6_X) |
38111ecc7b7aSHariprasad Shenai 		      FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
38121ecc7b7aSHariprasad Shenai 		      FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
38136e4b51a6SHariprasad Shenai 		      FW_EQ_ETH_CMD_EQSIZE_V(nentries));
3814d429005fSVishal Kulkarni 
3815f7917c00SJeff Kirsher 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
3816f7917c00SJeff Kirsher 
3817d429005fSVishal Kulkarni 	/* If we're using the SGE Doorbell Queue Timer mechanism, pass in the
3818d429005fSVishal Kulkarni 	 * currently configured Timer Index.  THis can be changed later via an
3819d429005fSVishal Kulkarni 	 * ethtool -C tx-usecs {Timer Val} command.  Note that the SGE
3820d429005fSVishal Kulkarni 	 * Doorbell Queue mode is currently automatically enabled in the
3821d429005fSVishal Kulkarni 	 * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ...
3822d429005fSVishal Kulkarni 	 */
3823d429005fSVishal Kulkarni 	if (dbqt)
3824d429005fSVishal Kulkarni 		c.timeren_timerix =
3825d429005fSVishal Kulkarni 			cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F |
3826d429005fSVishal Kulkarni 				    FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix));
3827d429005fSVishal Kulkarni 
3828b2612722SHariprasad Shenai 	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3829f7917c00SJeff Kirsher 	if (ret) {
3830f7917c00SJeff Kirsher 		kfree(txq->q.sdesc);
3831f7917c00SJeff Kirsher 		txq->q.sdesc = NULL;
3832f7917c00SJeff Kirsher 		dma_free_coherent(adap->pdev_dev,
3833f7917c00SJeff Kirsher 				  nentries * sizeof(struct tx_desc),
3834f7917c00SJeff Kirsher 				  txq->q.desc, txq->q.phys_addr);
3835f7917c00SJeff Kirsher 		txq->q.desc = NULL;
3836f7917c00SJeff Kirsher 		return ret;
3837f7917c00SJeff Kirsher 	}
3838f7917c00SJeff Kirsher 
3839ab677ff4SHariprasad Shenai 	txq->q.q_type = CXGB4_TXQ_ETH;
38406e4b51a6SHariprasad Shenai 	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
3841f7917c00SJeff Kirsher 	txq->txq = netdevq;
3842f7917c00SJeff Kirsher 	txq->tso = txq->tx_cso = txq->vlan_ins = 0;
3843f7917c00SJeff Kirsher 	txq->mapping_err = 0;
3844d429005fSVishal Kulkarni 	txq->dbqt = dbqt;
3845d429005fSVishal Kulkarni 
3846f7917c00SJeff Kirsher 	return 0;
3847f7917c00SJeff Kirsher }
3848f7917c00SJeff Kirsher 
3849f7917c00SJeff Kirsher int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
3850f7917c00SJeff Kirsher 			  struct net_device *dev, unsigned int iqid,
3851f7917c00SJeff Kirsher 			  unsigned int cmplqid)
3852f7917c00SJeff Kirsher {
3853d429005fSVishal Kulkarni 	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
3854f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
3855d429005fSVishal Kulkarni 	struct sge *s = &adap->sge;
3856d429005fSVishal Kulkarni 	struct fw_eq_ctrl_cmd c;
3857d429005fSVishal Kulkarni 	int ret, nentries;
3858f7917c00SJeff Kirsher 
3859f7917c00SJeff Kirsher 	/* Add status entries */
386052367a76SVipul Pandya 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
3861f7917c00SJeff Kirsher 
3862f7917c00SJeff Kirsher 	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
3863f7917c00SJeff Kirsher 				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
3864982b81ebSHariprasad Shenai 				 NULL, 0, dev_to_node(adap->pdev_dev));
3865f7917c00SJeff Kirsher 	if (!txq->q.desc)
3866f7917c00SJeff Kirsher 		return -ENOMEM;
3867f7917c00SJeff Kirsher 
3868e2ac9628SHariprasad Shenai 	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
3869e2ac9628SHariprasad Shenai 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3870b2612722SHariprasad Shenai 			    FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
38716e4b51a6SHariprasad Shenai 			    FW_EQ_CTRL_CMD_VFN_V(0));
38726e4b51a6SHariprasad Shenai 	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
38736e4b51a6SHariprasad Shenai 				 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
38746e4b51a6SHariprasad Shenai 	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
3875f7917c00SJeff Kirsher 	c.physeqid_pkd = htonl(0);
38761ecc7b7aSHariprasad Shenai 	c.fetchszm_to_iqid =
38771ecc7b7aSHariprasad Shenai 		htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
38786e4b51a6SHariprasad Shenai 		      FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
38791ecc7b7aSHariprasad Shenai 		      FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
38801ecc7b7aSHariprasad Shenai 	c.dcaen_to_eqsize =
3881d429005fSVishal Kulkarni 		htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
3882d429005fSVishal Kulkarni 					     ? FETCHBURSTMIN_64B_X
3883d429005fSVishal Kulkarni 					     : FETCHBURSTMIN_64B_T6_X) |
38841ecc7b7aSHariprasad Shenai 		      FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
38851ecc7b7aSHariprasad Shenai 		      FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
38866e4b51a6SHariprasad Shenai 		      FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
3887f7917c00SJeff Kirsher 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
3888f7917c00SJeff Kirsher 
3889b2612722SHariprasad Shenai 	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3890f7917c00SJeff Kirsher 	if (ret) {
3891f7917c00SJeff Kirsher 		dma_free_coherent(adap->pdev_dev,
3892f7917c00SJeff Kirsher 				  nentries * sizeof(struct tx_desc),
3893f7917c00SJeff Kirsher 				  txq->q.desc, txq->q.phys_addr);
3894f7917c00SJeff Kirsher 		txq->q.desc = NULL;
3895f7917c00SJeff Kirsher 		return ret;
3896f7917c00SJeff Kirsher 	}
3897f7917c00SJeff Kirsher 
3898ab677ff4SHariprasad Shenai 	txq->q.q_type = CXGB4_TXQ_CTRL;
38996e4b51a6SHariprasad Shenai 	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
3900f7917c00SJeff Kirsher 	txq->adap = adap;
3901f7917c00SJeff Kirsher 	skb_queue_head_init(&txq->sendq);
3902f7917c00SJeff Kirsher 	tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
3903f7917c00SJeff Kirsher 	txq->full = 0;
3904f7917c00SJeff Kirsher 	return 0;
3905f7917c00SJeff Kirsher }
3906f7917c00SJeff Kirsher 
39070fbc81b3SHariprasad Shenai int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
39080fbc81b3SHariprasad Shenai 			unsigned int cmplqid)
39090fbc81b3SHariprasad Shenai {
39100fbc81b3SHariprasad Shenai 	u32 param, val;
39110fbc81b3SHariprasad Shenai 
39120fbc81b3SHariprasad Shenai 	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
39130fbc81b3SHariprasad Shenai 		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
39140fbc81b3SHariprasad Shenai 		 FW_PARAMS_PARAM_YZ_V(eqid));
39150fbc81b3SHariprasad Shenai 	val = cmplqid;
39160fbc81b3SHariprasad Shenai 	return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
39170fbc81b3SHariprasad Shenai }
39180fbc81b3SHariprasad Shenai 
3919ab677ff4SHariprasad Shenai int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
3920ab677ff4SHariprasad Shenai 			 struct net_device *dev, unsigned int iqid,
3921ab677ff4SHariprasad Shenai 			 unsigned int uld_type)
3922f7917c00SJeff Kirsher {
3923d429005fSVishal Kulkarni 	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
3924f7917c00SJeff Kirsher 	int ret, nentries;
3925f7917c00SJeff Kirsher 	struct fw_eq_ofld_cmd c;
392652367a76SVipul Pandya 	struct sge *s = &adap->sge;
3927f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
3928ab677ff4SHariprasad Shenai 	int cmd = FW_EQ_OFLD_CMD;
3929f7917c00SJeff Kirsher 
3930f7917c00SJeff Kirsher 	/* Add status entries */
393152367a76SVipul Pandya 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
3932f7917c00SJeff Kirsher 
3933f7917c00SJeff Kirsher 	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
3934f7917c00SJeff Kirsher 			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
393552367a76SVipul Pandya 			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
3936f7917c00SJeff Kirsher 			NUMA_NO_NODE);
3937f7917c00SJeff Kirsher 	if (!txq->q.desc)
3938f7917c00SJeff Kirsher 		return -ENOMEM;
3939f7917c00SJeff Kirsher 
3940f7917c00SJeff Kirsher 	memset(&c, 0, sizeof(c));
3941ab677ff4SHariprasad Shenai 	if (unlikely(uld_type == CXGB4_TX_CRYPTO))
3942ab677ff4SHariprasad Shenai 		cmd = FW_EQ_CTRL_CMD;
3943ab677ff4SHariprasad Shenai 	c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
3944e2ac9628SHariprasad Shenai 			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3945b2612722SHariprasad Shenai 			    FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
39466e4b51a6SHariprasad Shenai 			    FW_EQ_OFLD_CMD_VFN_V(0));
39476e4b51a6SHariprasad Shenai 	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
39486e4b51a6SHariprasad Shenai 				 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
39491ecc7b7aSHariprasad Shenai 	c.fetchszm_to_iqid =
39501ecc7b7aSHariprasad Shenai 		htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
39516e4b51a6SHariprasad Shenai 		      FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
39521ecc7b7aSHariprasad Shenai 		      FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
39531ecc7b7aSHariprasad Shenai 	c.dcaen_to_eqsize =
3954d429005fSVishal Kulkarni 		htonl(FW_EQ_OFLD_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
3955d429005fSVishal Kulkarni 					     ? FETCHBURSTMIN_64B_X
3956d429005fSVishal Kulkarni 					     : FETCHBURSTMIN_64B_T6_X) |
39571ecc7b7aSHariprasad Shenai 		      FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
39581ecc7b7aSHariprasad Shenai 		      FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
39596e4b51a6SHariprasad Shenai 		      FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
3960f7917c00SJeff Kirsher 	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
3961f7917c00SJeff Kirsher 
3962b2612722SHariprasad Shenai 	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3963f7917c00SJeff Kirsher 	if (ret) {
3964f7917c00SJeff Kirsher 		kfree(txq->q.sdesc);
3965f7917c00SJeff Kirsher 		txq->q.sdesc = NULL;
3966f7917c00SJeff Kirsher 		dma_free_coherent(adap->pdev_dev,
3967f7917c00SJeff Kirsher 				  nentries * sizeof(struct tx_desc),
3968f7917c00SJeff Kirsher 				  txq->q.desc, txq->q.phys_addr);
3969f7917c00SJeff Kirsher 		txq->q.desc = NULL;
3970f7917c00SJeff Kirsher 		return ret;
3971f7917c00SJeff Kirsher 	}
3972f7917c00SJeff Kirsher 
3973ab677ff4SHariprasad Shenai 	txq->q.q_type = CXGB4_TXQ_ULD;
39746e4b51a6SHariprasad Shenai 	init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
3975f7917c00SJeff Kirsher 	txq->adap = adap;
3976f7917c00SJeff Kirsher 	skb_queue_head_init(&txq->sendq);
3977f7917c00SJeff Kirsher 	tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
3978f7917c00SJeff Kirsher 	txq->full = 0;
3979f7917c00SJeff Kirsher 	txq->mapping_err = 0;
3980f7917c00SJeff Kirsher 	return 0;
3981f7917c00SJeff Kirsher }
3982f7917c00SJeff Kirsher 
3983ab677ff4SHariprasad Shenai void free_txq(struct adapter *adap, struct sge_txq *q)
3984f7917c00SJeff Kirsher {
398552367a76SVipul Pandya 	struct sge *s = &adap->sge;
398652367a76SVipul Pandya 
3987f7917c00SJeff Kirsher 	dma_free_coherent(adap->pdev_dev,
398852367a76SVipul Pandya 			  q->size * sizeof(struct tx_desc) + s->stat_len,
3989f7917c00SJeff Kirsher 			  q->desc, q->phys_addr);
3990f7917c00SJeff Kirsher 	q->cntxt_id = 0;
3991f7917c00SJeff Kirsher 	q->sdesc = NULL;
3992f7917c00SJeff Kirsher 	q->desc = NULL;
3993f7917c00SJeff Kirsher }
3994f7917c00SJeff Kirsher 
399594cdb8bbSHariprasad Shenai void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
3996f7917c00SJeff Kirsher 		  struct sge_fl *fl)
3997f7917c00SJeff Kirsher {
399852367a76SVipul Pandya 	struct sge *s = &adap->sge;
3999f7917c00SJeff Kirsher 	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
4000f7917c00SJeff Kirsher 
4001f7917c00SJeff Kirsher 	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
4002b2612722SHariprasad Shenai 	t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
4003f7917c00SJeff Kirsher 		   rq->cntxt_id, fl_id, 0xffff);
4004f7917c00SJeff Kirsher 	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
4005f7917c00SJeff Kirsher 			  rq->desc, rq->phys_addr);
4006f7917c00SJeff Kirsher 	netif_napi_del(&rq->napi);
4007f7917c00SJeff Kirsher 	rq->netdev = NULL;
4008f7917c00SJeff Kirsher 	rq->cntxt_id = rq->abs_id = 0;
4009f7917c00SJeff Kirsher 	rq->desc = NULL;
4010f7917c00SJeff Kirsher 
4011f7917c00SJeff Kirsher 	if (fl) {
4012f7917c00SJeff Kirsher 		free_rx_bufs(adap, fl, fl->avail);
401352367a76SVipul Pandya 		dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
4014f7917c00SJeff Kirsher 				  fl->desc, fl->addr);
4015f7917c00SJeff Kirsher 		kfree(fl->sdesc);
4016f7917c00SJeff Kirsher 		fl->sdesc = NULL;
4017f7917c00SJeff Kirsher 		fl->cntxt_id = 0;
4018f7917c00SJeff Kirsher 		fl->desc = NULL;
4019f7917c00SJeff Kirsher 	}
4020f7917c00SJeff Kirsher }
4021f7917c00SJeff Kirsher 
4022f7917c00SJeff Kirsher /**
40235fa76694SHariprasad Shenai  *      t4_free_ofld_rxqs - free a block of consecutive Rx queues
40245fa76694SHariprasad Shenai  *      @adap: the adapter
40255fa76694SHariprasad Shenai  *      @n: number of queues
40265fa76694SHariprasad Shenai  *      @q: pointer to first queue
40275fa76694SHariprasad Shenai  *
40285fa76694SHariprasad Shenai  *      Release the resources of a consecutive block of offload Rx queues.
40295fa76694SHariprasad Shenai  */
40305fa76694SHariprasad Shenai void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
40315fa76694SHariprasad Shenai {
40325fa76694SHariprasad Shenai 	for ( ; n; n--, q++)
40335fa76694SHariprasad Shenai 		if (q->rspq.desc)
40345fa76694SHariprasad Shenai 			free_rspq_fl(adap, &q->rspq,
40355fa76694SHariprasad Shenai 				     q->fl.size ? &q->fl : NULL);
40365fa76694SHariprasad Shenai }
40375fa76694SHariprasad Shenai 
40385fa76694SHariprasad Shenai /**
4039f7917c00SJeff Kirsher  *	t4_free_sge_resources - free SGE resources
4040f7917c00SJeff Kirsher  *	@adap: the adapter
4041f7917c00SJeff Kirsher  *
4042f7917c00SJeff Kirsher  *	Frees resources used by the SGE queue sets.
4043f7917c00SJeff Kirsher  */
4044f7917c00SJeff Kirsher void t4_free_sge_resources(struct adapter *adap)
4045f7917c00SJeff Kirsher {
4046f7917c00SJeff Kirsher 	int i;
4047ebf4dc2bSHariprasad Shenai 	struct sge_eth_rxq *eq;
4048ebf4dc2bSHariprasad Shenai 	struct sge_eth_txq *etq;
4049ebf4dc2bSHariprasad Shenai 
4050ebf4dc2bSHariprasad Shenai 	/* stop all Rx queues in order to start them draining */
4051ebf4dc2bSHariprasad Shenai 	for (i = 0; i < adap->sge.ethqsets; i++) {
4052ebf4dc2bSHariprasad Shenai 		eq = &adap->sge.ethrxq[i];
4053ebf4dc2bSHariprasad Shenai 		if (eq->rspq.desc)
4054ebf4dc2bSHariprasad Shenai 			t4_iq_stop(adap, adap->mbox, adap->pf, 0,
4055ebf4dc2bSHariprasad Shenai 				   FW_IQ_TYPE_FL_INT_CAP,
4056ebf4dc2bSHariprasad Shenai 				   eq->rspq.cntxt_id,
4057ebf4dc2bSHariprasad Shenai 				   eq->fl.size ? eq->fl.cntxt_id : 0xffff,
4058ebf4dc2bSHariprasad Shenai 				   0xffff);
4059ebf4dc2bSHariprasad Shenai 	}
4060f7917c00SJeff Kirsher 
4061f7917c00SJeff Kirsher 	/* clean up Ethernet Tx/Rx queues */
4062ebf4dc2bSHariprasad Shenai 	for (i = 0; i < adap->sge.ethqsets; i++) {
4063ebf4dc2bSHariprasad Shenai 		eq = &adap->sge.ethrxq[i];
4064f7917c00SJeff Kirsher 		if (eq->rspq.desc)
40655fa76694SHariprasad Shenai 			free_rspq_fl(adap, &eq->rspq,
40665fa76694SHariprasad Shenai 				     eq->fl.size ? &eq->fl : NULL);
4067ebf4dc2bSHariprasad Shenai 
4068ebf4dc2bSHariprasad Shenai 		etq = &adap->sge.ethtxq[i];
4069f7917c00SJeff Kirsher 		if (etq->q.desc) {
4070b2612722SHariprasad Shenai 			t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4071f7917c00SJeff Kirsher 				       etq->q.cntxt_id);
4072fbe80776SHariprasad Shenai 			__netif_tx_lock_bh(etq->txq);
4073f7917c00SJeff Kirsher 			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4074fbe80776SHariprasad Shenai 			__netif_tx_unlock_bh(etq->txq);
4075f7917c00SJeff Kirsher 			kfree(etq->q.sdesc);
4076f7917c00SJeff Kirsher 			free_txq(adap, &etq->q);
4077f7917c00SJeff Kirsher 		}
4078f7917c00SJeff Kirsher 	}
4079f7917c00SJeff Kirsher 
4080f7917c00SJeff Kirsher 	/* clean up control Tx queues */
4081f7917c00SJeff Kirsher 	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
4082f7917c00SJeff Kirsher 		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
4083f7917c00SJeff Kirsher 
4084f7917c00SJeff Kirsher 		if (cq->q.desc) {
4085f7917c00SJeff Kirsher 			tasklet_kill(&cq->qresume_tsk);
4086b2612722SHariprasad Shenai 			t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
4087f7917c00SJeff Kirsher 					cq->q.cntxt_id);
4088f7917c00SJeff Kirsher 			__skb_queue_purge(&cq->sendq);
4089f7917c00SJeff Kirsher 			free_txq(adap, &cq->q);
4090f7917c00SJeff Kirsher 		}
4091f7917c00SJeff Kirsher 	}
4092f7917c00SJeff Kirsher 
4093f7917c00SJeff Kirsher 	if (adap->sge.fw_evtq.desc)
4094f7917c00SJeff Kirsher 		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
4095f7917c00SJeff Kirsher 
4096f7917c00SJeff Kirsher 	if (adap->sge.intrq.desc)
4097f7917c00SJeff Kirsher 		free_rspq_fl(adap, &adap->sge.intrq, NULL);
4098f7917c00SJeff Kirsher 
4099a4569504SAtul Gupta 	if (!is_t4(adap->params.chip)) {
4100a4569504SAtul Gupta 		etq = &adap->sge.ptptxq;
4101a4569504SAtul Gupta 		if (etq->q.desc) {
4102a4569504SAtul Gupta 			t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4103a4569504SAtul Gupta 				       etq->q.cntxt_id);
4104a4569504SAtul Gupta 			spin_lock_bh(&adap->ptp_lock);
4105a4569504SAtul Gupta 			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4106a4569504SAtul Gupta 			spin_unlock_bh(&adap->ptp_lock);
4107a4569504SAtul Gupta 			kfree(etq->q.sdesc);
4108a4569504SAtul Gupta 			free_txq(adap, &etq->q);
4109a4569504SAtul Gupta 		}
4110a4569504SAtul Gupta 	}
4111a4569504SAtul Gupta 
4112f7917c00SJeff Kirsher 	/* clear the reverse egress queue map */
41134b8e27a8SHariprasad Shenai 	memset(adap->sge.egr_map, 0,
41144b8e27a8SHariprasad Shenai 	       adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
4115f7917c00SJeff Kirsher }
4116f7917c00SJeff Kirsher 
4117f7917c00SJeff Kirsher void t4_sge_start(struct adapter *adap)
4118f7917c00SJeff Kirsher {
4119f7917c00SJeff Kirsher 	adap->sge.ethtxq_rover = 0;
4120f7917c00SJeff Kirsher 	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
4121f7917c00SJeff Kirsher 	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
4122f7917c00SJeff Kirsher }
4123f7917c00SJeff Kirsher 
4124f7917c00SJeff Kirsher /**
4125f7917c00SJeff Kirsher  *	t4_sge_stop - disable SGE operation
4126f7917c00SJeff Kirsher  *	@adap: the adapter
4127f7917c00SJeff Kirsher  *
4128f7917c00SJeff Kirsher  *	Stop tasklets and timers associated with the DMA engine.  Note that
4129f7917c00SJeff Kirsher  *	this is effective only if measures have been taken to disable any HW
4130f7917c00SJeff Kirsher  *	events that may restart them.
4131f7917c00SJeff Kirsher  */
4132f7917c00SJeff Kirsher void t4_sge_stop(struct adapter *adap)
4133f7917c00SJeff Kirsher {
4134f7917c00SJeff Kirsher 	int i;
4135f7917c00SJeff Kirsher 	struct sge *s = &adap->sge;
4136f7917c00SJeff Kirsher 
4137f7917c00SJeff Kirsher 	if (in_interrupt())  /* actions below require waiting */
4138f7917c00SJeff Kirsher 		return;
4139f7917c00SJeff Kirsher 
4140f7917c00SJeff Kirsher 	if (s->rx_timer.function)
4141f7917c00SJeff Kirsher 		del_timer_sync(&s->rx_timer);
4142f7917c00SJeff Kirsher 	if (s->tx_timer.function)
4143f7917c00SJeff Kirsher 		del_timer_sync(&s->tx_timer);
4144f7917c00SJeff Kirsher 
4145ab677ff4SHariprasad Shenai 	if (is_offload(adap)) {
4146ab677ff4SHariprasad Shenai 		struct sge_uld_txq_info *txq_info;
4147f7917c00SJeff Kirsher 
4148ab677ff4SHariprasad Shenai 		txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
4149ab677ff4SHariprasad Shenai 		if (txq_info) {
4150ab677ff4SHariprasad Shenai 			struct sge_uld_txq *txq = txq_info->uldtxq;
4151ab677ff4SHariprasad Shenai 
4152ab677ff4SHariprasad Shenai 			for_each_ofldtxq(&adap->sge, i) {
4153ab677ff4SHariprasad Shenai 				if (txq->q.desc)
4154ab677ff4SHariprasad Shenai 					tasklet_kill(&txq->qresume_tsk);
4155f7917c00SJeff Kirsher 			}
4156ab677ff4SHariprasad Shenai 		}
4157ab677ff4SHariprasad Shenai 	}
4158ab677ff4SHariprasad Shenai 
4159ab677ff4SHariprasad Shenai 	if (is_pci_uld(adap)) {
4160ab677ff4SHariprasad Shenai 		struct sge_uld_txq_info *txq_info;
4161ab677ff4SHariprasad Shenai 
4162ab677ff4SHariprasad Shenai 		txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
4163ab677ff4SHariprasad Shenai 		if (txq_info) {
4164ab677ff4SHariprasad Shenai 			struct sge_uld_txq *txq = txq_info->uldtxq;
4165ab677ff4SHariprasad Shenai 
4166ab677ff4SHariprasad Shenai 			for_each_ofldtxq(&adap->sge, i) {
4167ab677ff4SHariprasad Shenai 				if (txq->q.desc)
4168ab677ff4SHariprasad Shenai 					tasklet_kill(&txq->qresume_tsk);
4169ab677ff4SHariprasad Shenai 			}
4170ab677ff4SHariprasad Shenai 		}
4171ab677ff4SHariprasad Shenai 	}
4172ab677ff4SHariprasad Shenai 
4173f7917c00SJeff Kirsher 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
4174f7917c00SJeff Kirsher 		struct sge_ctrl_txq *cq = &s->ctrlq[i];
4175f7917c00SJeff Kirsher 
4176f7917c00SJeff Kirsher 		if (cq->q.desc)
4177f7917c00SJeff Kirsher 			tasklet_kill(&cq->qresume_tsk);
4178f7917c00SJeff Kirsher 	}
4179f7917c00SJeff Kirsher }
4180f7917c00SJeff Kirsher 
4181f7917c00SJeff Kirsher /**
418206640310SHariprasad Shenai  *	t4_sge_init_soft - grab core SGE values needed by SGE code
4183f7917c00SJeff Kirsher  *	@adap: the adapter
4184f7917c00SJeff Kirsher  *
418506640310SHariprasad Shenai  *	We need to grab the SGE operating parameters that we need to have
418606640310SHariprasad Shenai  *	in order to do our job and make sure we can live with them.
4187f7917c00SJeff Kirsher  */
4188f7917c00SJeff Kirsher 
418952367a76SVipul Pandya static int t4_sge_init_soft(struct adapter *adap)
419052367a76SVipul Pandya {
419152367a76SVipul Pandya 	struct sge *s = &adap->sge;
419252367a76SVipul Pandya 	u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
419352367a76SVipul Pandya 	u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
419452367a76SVipul Pandya 	u32 ingress_rx_threshold;
419552367a76SVipul Pandya 
419652367a76SVipul Pandya 	/*
419752367a76SVipul Pandya 	 * Verify that CPL messages are going to the Ingress Queue for
419852367a76SVipul Pandya 	 * process_responses() and that only packet data is going to the
419952367a76SVipul Pandya 	 * Free Lists.
420052367a76SVipul Pandya 	 */
4201f612b815SHariprasad Shenai 	if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
4202f612b815SHariprasad Shenai 	    RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
420352367a76SVipul Pandya 		dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
420452367a76SVipul Pandya 		return -EINVAL;
420552367a76SVipul Pandya 	}
420652367a76SVipul Pandya 
420752367a76SVipul Pandya 	/*
420852367a76SVipul Pandya 	 * Validate the Host Buffer Register Array indices that we want to
420952367a76SVipul Pandya 	 * use ...
421052367a76SVipul Pandya 	 *
421152367a76SVipul Pandya 	 * XXX Note that we should really read through the Host Buffer Size
421252367a76SVipul Pandya 	 * XXX register array and find the indices of the Buffer Sizes which
421352367a76SVipul Pandya 	 * XXX meet our needs!
421452367a76SVipul Pandya 	 */
421552367a76SVipul Pandya 	#define READ_FL_BUF(x) \
4216f612b815SHariprasad Shenai 		t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
421752367a76SVipul Pandya 
421852367a76SVipul Pandya 	fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
421952367a76SVipul Pandya 	fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
422052367a76SVipul Pandya 	fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
422152367a76SVipul Pandya 	fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
422252367a76SVipul Pandya 
422392ddcc7bSKumar Sanghvi 	/* We only bother using the Large Page logic if the Large Page Buffer
422492ddcc7bSKumar Sanghvi 	 * is larger than our Page Size Buffer.
422592ddcc7bSKumar Sanghvi 	 */
422692ddcc7bSKumar Sanghvi 	if (fl_large_pg <= fl_small_pg)
422792ddcc7bSKumar Sanghvi 		fl_large_pg = 0;
422892ddcc7bSKumar Sanghvi 
422952367a76SVipul Pandya 	#undef READ_FL_BUF
423052367a76SVipul Pandya 
423192ddcc7bSKumar Sanghvi 	/* The Page Size Buffer must be exactly equal to our Page Size and the
423292ddcc7bSKumar Sanghvi 	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
423392ddcc7bSKumar Sanghvi 	 */
423452367a76SVipul Pandya 	if (fl_small_pg != PAGE_SIZE ||
423592ddcc7bSKumar Sanghvi 	    (fl_large_pg & (fl_large_pg-1)) != 0) {
423652367a76SVipul Pandya 		dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
423752367a76SVipul Pandya 			fl_small_pg, fl_large_pg);
423852367a76SVipul Pandya 		return -EINVAL;
423952367a76SVipul Pandya 	}
424052367a76SVipul Pandya 	if (fl_large_pg)
424152367a76SVipul Pandya 		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
424252367a76SVipul Pandya 
424352367a76SVipul Pandya 	if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
424452367a76SVipul Pandya 	    fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
424552367a76SVipul Pandya 		dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
424652367a76SVipul Pandya 			fl_small_mtu, fl_large_mtu);
424752367a76SVipul Pandya 		return -EINVAL;
424852367a76SVipul Pandya 	}
424952367a76SVipul Pandya 
425052367a76SVipul Pandya 	/*
425152367a76SVipul Pandya 	 * Retrieve our RX interrupt holdoff timer values and counter
425252367a76SVipul Pandya 	 * threshold values from the SGE parameters.
425352367a76SVipul Pandya 	 */
4254f061de42SHariprasad Shenai 	timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
4255f061de42SHariprasad Shenai 	timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
4256f061de42SHariprasad Shenai 	timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
425752367a76SVipul Pandya 	s->timer_val[0] = core_ticks_to_us(adap,
4258f061de42SHariprasad Shenai 		TIMERVALUE0_G(timer_value_0_and_1));
425952367a76SVipul Pandya 	s->timer_val[1] = core_ticks_to_us(adap,
4260f061de42SHariprasad Shenai 		TIMERVALUE1_G(timer_value_0_and_1));
426152367a76SVipul Pandya 	s->timer_val[2] = core_ticks_to_us(adap,
4262f061de42SHariprasad Shenai 		TIMERVALUE2_G(timer_value_2_and_3));
426352367a76SVipul Pandya 	s->timer_val[3] = core_ticks_to_us(adap,
4264f061de42SHariprasad Shenai 		TIMERVALUE3_G(timer_value_2_and_3));
426552367a76SVipul Pandya 	s->timer_val[4] = core_ticks_to_us(adap,
4266f061de42SHariprasad Shenai 		TIMERVALUE4_G(timer_value_4_and_5));
426752367a76SVipul Pandya 	s->timer_val[5] = core_ticks_to_us(adap,
4268f061de42SHariprasad Shenai 		TIMERVALUE5_G(timer_value_4_and_5));
426952367a76SVipul Pandya 
4270f612b815SHariprasad Shenai 	ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
4271f612b815SHariprasad Shenai 	s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
4272f612b815SHariprasad Shenai 	s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
4273f612b815SHariprasad Shenai 	s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
4274f612b815SHariprasad Shenai 	s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
427552367a76SVipul Pandya 
427652367a76SVipul Pandya 	return 0;
427752367a76SVipul Pandya }
427852367a76SVipul Pandya 
427906640310SHariprasad Shenai /**
428006640310SHariprasad Shenai  *     t4_sge_init - initialize SGE
428106640310SHariprasad Shenai  *     @adap: the adapter
428206640310SHariprasad Shenai  *
428306640310SHariprasad Shenai  *     Perform low-level SGE code initialization needed every time after a
428406640310SHariprasad Shenai  *     chip reset.
428552367a76SVipul Pandya  */
428652367a76SVipul Pandya int t4_sge_init(struct adapter *adap)
428752367a76SVipul Pandya {
428852367a76SVipul Pandya 	struct sge *s = &adap->sge;
4289acac5962SHariprasad Shenai 	u32 sge_control, sge_conm_ctrl;
4290c2b955e0SKumar Sanghvi 	int ret, egress_threshold;
429152367a76SVipul Pandya 
429252367a76SVipul Pandya 	/*
429352367a76SVipul Pandya 	 * Ingress Padding Boundary and Egress Status Page Size are set up by
429452367a76SVipul Pandya 	 * t4_fixup_host_params().
429552367a76SVipul Pandya 	 */
4296f612b815SHariprasad Shenai 	sge_control = t4_read_reg(adap, SGE_CONTROL_A);
4297f612b815SHariprasad Shenai 	s->pktshift = PKTSHIFT_G(sge_control);
4298f612b815SHariprasad Shenai 	s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
4299ce8f407aSHariprasad Shenai 
4300acac5962SHariprasad Shenai 	s->fl_align = t4_fl_pkt_align(adap);
430152367a76SVipul Pandya 	ret = t4_sge_init_soft(adap);
430252367a76SVipul Pandya 	if (ret < 0)
430352367a76SVipul Pandya 		return ret;
430452367a76SVipul Pandya 
430552367a76SVipul Pandya 	/*
430652367a76SVipul Pandya 	 * A FL with <= fl_starve_thres buffers is starving and a periodic
430752367a76SVipul Pandya 	 * timer will attempt to refill it.  This needs to be larger than the
430852367a76SVipul Pandya 	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
430952367a76SVipul Pandya 	 * stuck waiting for new packets while the SGE is waiting for us to
431052367a76SVipul Pandya 	 * give it more Free List entries.  (Note that the SGE's Egress
4311c2b955e0SKumar Sanghvi 	 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
4312c2b955e0SKumar Sanghvi 	 * there was only a single field to control this.  For T5 there's the
4313c2b955e0SKumar Sanghvi 	 * original field which now only applies to Unpacked Mode Free List
4314c2b955e0SKumar Sanghvi 	 * buffers and a new field which only applies to Packed Mode Free List
4315c2b955e0SKumar Sanghvi 	 * buffers.
431652367a76SVipul Pandya 	 */
4317f612b815SHariprasad Shenai 	sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
4318676d6a75SHariprasad Shenai 	switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
4319676d6a75SHariprasad Shenai 	case CHELSIO_T4:
4320f612b815SHariprasad Shenai 		egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
4321676d6a75SHariprasad Shenai 		break;
4322676d6a75SHariprasad Shenai 	case CHELSIO_T5:
4323f612b815SHariprasad Shenai 		egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
4324676d6a75SHariprasad Shenai 		break;
4325676d6a75SHariprasad Shenai 	case CHELSIO_T6:
4326676d6a75SHariprasad Shenai 		egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
4327676d6a75SHariprasad Shenai 		break;
4328676d6a75SHariprasad Shenai 	default:
4329676d6a75SHariprasad Shenai 		dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
4330676d6a75SHariprasad Shenai 			CHELSIO_CHIP_VERSION(adap->params.chip));
4331676d6a75SHariprasad Shenai 		return -EINVAL;
4332676d6a75SHariprasad Shenai 	}
4333c2b955e0SKumar Sanghvi 	s->fl_starve_thres = 2*egress_threshold + 1;
433452367a76SVipul Pandya 
4335a3bfb617SHariprasad Shenai 	t4_idma_monitor_init(adap, &s->idma_monitor);
4336a3bfb617SHariprasad Shenai 
43371ecc7b7aSHariprasad Shenai 	/* Set up timers used for recuring callbacks to process RX and TX
43381ecc7b7aSHariprasad Shenai 	 * administrative tasks.
43391ecc7b7aSHariprasad Shenai 	 */
43400e23daebSKees Cook 	timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
43410e23daebSKees Cook 	timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
4342a3bfb617SHariprasad Shenai 
4343f7917c00SJeff Kirsher 	spin_lock_init(&s->intrq_lock);
434452367a76SVipul Pandya 
434552367a76SVipul Pandya 	return 0;
4346f7917c00SJeff Kirsher }
4347