xref: /openbmc/linux/drivers/net/ethernet/chelsio/cxgb4vf/sge.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1f7917c00SJeff Kirsher /*
2f7917c00SJeff Kirsher  * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3f7917c00SJeff Kirsher  * driver for Linux.
4f7917c00SJeff Kirsher  *
5f7917c00SJeff Kirsher  * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6f7917c00SJeff Kirsher  *
7f7917c00SJeff Kirsher  * This software is available to you under a choice of one of two
8f7917c00SJeff Kirsher  * licenses.  You may choose to be licensed under the terms of the GNU
9f7917c00SJeff Kirsher  * General Public License (GPL) Version 2, available from the file
10f7917c00SJeff Kirsher  * COPYING in the main directory of this source tree, or the
11f7917c00SJeff Kirsher  * OpenIB.org BSD license below:
12f7917c00SJeff Kirsher  *
13f7917c00SJeff Kirsher  *     Redistribution and use in source and binary forms, with or
14f7917c00SJeff Kirsher  *     without modification, are permitted provided that the following
15f7917c00SJeff Kirsher  *     conditions are met:
16f7917c00SJeff Kirsher  *
17f7917c00SJeff Kirsher  *      - Redistributions of source code must retain the above
18f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
19f7917c00SJeff Kirsher  *        disclaimer.
20f7917c00SJeff Kirsher  *
21f7917c00SJeff Kirsher  *      - Redistributions in binary form must reproduce the above
22f7917c00SJeff Kirsher  *        copyright notice, this list of conditions and the following
23f7917c00SJeff Kirsher  *        disclaimer in the documentation and/or other materials
24f7917c00SJeff Kirsher  *        provided with the distribution.
25f7917c00SJeff Kirsher  *
26f7917c00SJeff Kirsher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27f7917c00SJeff Kirsher  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28f7917c00SJeff Kirsher  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29f7917c00SJeff Kirsher  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30f7917c00SJeff Kirsher  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31f7917c00SJeff Kirsher  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32f7917c00SJeff Kirsher  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33f7917c00SJeff Kirsher  * SOFTWARE.
34f7917c00SJeff Kirsher  */
35f7917c00SJeff Kirsher 
36f7917c00SJeff Kirsher #include <linux/skbuff.h>
37f7917c00SJeff Kirsher #include <linux/netdevice.h>
38f7917c00SJeff Kirsher #include <linux/etherdevice.h>
39f7917c00SJeff Kirsher #include <linux/if_vlan.h>
40f7917c00SJeff Kirsher #include <linux/ip.h>
41f7917c00SJeff Kirsher #include <net/ipv6.h>
42f7917c00SJeff Kirsher #include <net/tcp.h>
43f7917c00SJeff Kirsher #include <linux/dma-mapping.h>
44f7917c00SJeff Kirsher #include <linux/prefetch.h>
45f7917c00SJeff Kirsher 
46f7917c00SJeff Kirsher #include "t4vf_common.h"
47f7917c00SJeff Kirsher #include "t4vf_defs.h"
48f7917c00SJeff Kirsher 
49f7917c00SJeff Kirsher #include "../cxgb4/t4_regs.h"
50f612b815SHariprasad Shenai #include "../cxgb4/t4_values.h"
51f7917c00SJeff Kirsher #include "../cxgb4/t4fw_api.h"
52f7917c00SJeff Kirsher #include "../cxgb4/t4_msg.h"
53f7917c00SJeff Kirsher 
54f7917c00SJeff Kirsher /*
55f7917c00SJeff Kirsher  * Constants ...
56f7917c00SJeff Kirsher  */
57f7917c00SJeff Kirsher enum {
58f7917c00SJeff Kirsher 	/*
59f7917c00SJeff Kirsher 	 * Egress Queue sizes, producer and consumer indices are all in units
60f7917c00SJeff Kirsher 	 * of Egress Context Units bytes.  Note that as far as the hardware is
61f7917c00SJeff Kirsher 	 * concerned, the free list is an Egress Queue (the host produces free
62f7917c00SJeff Kirsher 	 * buffers which the hardware consumes) and free list entries are
63f7917c00SJeff Kirsher 	 * 64-bit PCI DMA addresses.
64f7917c00SJeff Kirsher 	 */
65f7917c00SJeff Kirsher 	EQ_UNIT = SGE_EQ_IDXSIZE,
66f7917c00SJeff Kirsher 	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
67f7917c00SJeff Kirsher 	TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
68f7917c00SJeff Kirsher 
69f7917c00SJeff Kirsher 	/*
70f7917c00SJeff Kirsher 	 * Max number of TX descriptors we clean up at a time.  Should be
71f7917c00SJeff Kirsher 	 * modest as freeing skbs isn't cheap and it happens while holding
72f7917c00SJeff Kirsher 	 * locks.  We just need to free packets faster than they arrive, we
73f7917c00SJeff Kirsher 	 * eventually catch up and keep the amortized cost reasonable.
74f7917c00SJeff Kirsher 	 */
75f7917c00SJeff Kirsher 	MAX_TX_RECLAIM = 16,
76f7917c00SJeff Kirsher 
77f7917c00SJeff Kirsher 	/*
78f7917c00SJeff Kirsher 	 * Max number of Rx buffers we replenish at a time.  Again keep this
79f7917c00SJeff Kirsher 	 * modest, allocating buffers isn't cheap either.
80f7917c00SJeff Kirsher 	 */
81f7917c00SJeff Kirsher 	MAX_RX_REFILL = 16,
82f7917c00SJeff Kirsher 
83f7917c00SJeff Kirsher 	/*
84f7917c00SJeff Kirsher 	 * Period of the Rx queue check timer.  This timer is infrequent as it
85f7917c00SJeff Kirsher 	 * has something to do only when the system experiences severe memory
86f7917c00SJeff Kirsher 	 * shortage.
87f7917c00SJeff Kirsher 	 */
88f7917c00SJeff Kirsher 	RX_QCHECK_PERIOD = (HZ / 2),
89f7917c00SJeff Kirsher 
90f7917c00SJeff Kirsher 	/*
91f7917c00SJeff Kirsher 	 * Period of the TX queue check timer and the maximum number of TX
92f7917c00SJeff Kirsher 	 * descriptors to be reclaimed by the TX timer.
93f7917c00SJeff Kirsher 	 */
94f7917c00SJeff Kirsher 	TX_QCHECK_PERIOD = (HZ / 2),
95f7917c00SJeff Kirsher 	MAX_TIMER_TX_RECLAIM = 100,
96f7917c00SJeff Kirsher 
97f7917c00SJeff Kirsher 	/*
98f7917c00SJeff Kirsher 	 * Suspend an Ethernet TX queue with fewer available descriptors than
99f7917c00SJeff Kirsher 	 * this.  We always want to have room for a maximum sized packet:
100f7917c00SJeff Kirsher 	 * inline immediate data + MAX_SKB_FRAGS. This is the same as
101f7917c00SJeff Kirsher 	 * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
102f7917c00SJeff Kirsher 	 * (see that function and its helpers for a description of the
103f7917c00SJeff Kirsher 	 * calculation).
104f7917c00SJeff Kirsher 	 */
105f7917c00SJeff Kirsher 	ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
106f7917c00SJeff Kirsher 	ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
107f7917c00SJeff Kirsher 				   ((ETHTXQ_MAX_FRAGS-1) & 1) +
108f7917c00SJeff Kirsher 				   2),
109f7917c00SJeff Kirsher 	ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
110f7917c00SJeff Kirsher 			  sizeof(struct cpl_tx_pkt_lso_core) +
111f7917c00SJeff Kirsher 			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
112f7917c00SJeff Kirsher 	ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
113f7917c00SJeff Kirsher 
114f7917c00SJeff Kirsher 	ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
115f7917c00SJeff Kirsher 
116f7917c00SJeff Kirsher 	/*
117f7917c00SJeff Kirsher 	 * Max TX descriptor space we allow for an Ethernet packet to be
118f7917c00SJeff Kirsher 	 * inlined into a WR.  This is limited by the maximum value which
119f7917c00SJeff Kirsher 	 * we can specify for immediate data in the firmware Ethernet TX
120f7917c00SJeff Kirsher 	 * Work Request.
121f7917c00SJeff Kirsher 	 */
122e2ac9628SHariprasad Shenai 	MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
123f7917c00SJeff Kirsher 
124f7917c00SJeff Kirsher 	/*
125f7917c00SJeff Kirsher 	 * Max size of a WR sent through a control TX queue.
126f7917c00SJeff Kirsher 	 */
127f7917c00SJeff Kirsher 	MAX_CTRL_WR_LEN = 256,
128f7917c00SJeff Kirsher 
129f7917c00SJeff Kirsher 	/*
130f7917c00SJeff Kirsher 	 * Maximum amount of data which we'll ever need to inline into a
131f7917c00SJeff Kirsher 	 * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
132f7917c00SJeff Kirsher 	 */
133f7917c00SJeff Kirsher 	MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
134f7917c00SJeff Kirsher 			  ? MAX_IMM_TX_PKT_LEN
135f7917c00SJeff Kirsher 			  : MAX_CTRL_WR_LEN),
136f7917c00SJeff Kirsher 
137f7917c00SJeff Kirsher 	/*
138f7917c00SJeff Kirsher 	 * For incoming packets less than RX_COPY_THRES, we copy the data into
139f7917c00SJeff Kirsher 	 * an skb rather than referencing the data.  We allocate enough
140f7917c00SJeff Kirsher 	 * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
141f7917c00SJeff Kirsher 	 * of the data (header).
142f7917c00SJeff Kirsher 	 */
143f7917c00SJeff Kirsher 	RX_COPY_THRES = 256,
144f7917c00SJeff Kirsher 	RX_PULL_LEN = 128,
145f7917c00SJeff Kirsher 
146f7917c00SJeff Kirsher 	/*
147f7917c00SJeff Kirsher 	 * Main body length for sk_buffs used for RX Ethernet packets with
148f7917c00SJeff Kirsher 	 * fragments.  Should be >= RX_PULL_LEN but possibly bigger to give
149f7917c00SJeff Kirsher 	 * pskb_may_pull() some room.
150f7917c00SJeff Kirsher 	 */
151f7917c00SJeff Kirsher 	RX_SKB_LEN = 512,
152f7917c00SJeff Kirsher };
153f7917c00SJeff Kirsher 
154f7917c00SJeff Kirsher /*
155f7917c00SJeff Kirsher  * Software state per TX descriptor.
156f7917c00SJeff Kirsher  */
157f7917c00SJeff Kirsher struct tx_sw_desc {
158f7917c00SJeff Kirsher 	struct sk_buff *skb;		/* socket buffer of TX data source */
159f7917c00SJeff Kirsher 	struct ulptx_sgl *sgl;		/* scatter/gather list in TX Queue */
160f7917c00SJeff Kirsher };
161f7917c00SJeff Kirsher 
162f7917c00SJeff Kirsher /*
163f7917c00SJeff Kirsher  * Software state per RX Free List descriptor.  We keep track of the allocated
164f7917c00SJeff Kirsher  * FL page, its size, and its PCI DMA address (if the page is mapped).  The FL
165f7917c00SJeff Kirsher  * page size and its PCI DMA mapped state are stored in the low bits of the
166f7917c00SJeff Kirsher  * PCI DMA address as per below.
167f7917c00SJeff Kirsher  */
168f7917c00SJeff Kirsher struct rx_sw_desc {
169f7917c00SJeff Kirsher 	struct page *page;		/* Free List page buffer */
170f7917c00SJeff Kirsher 	dma_addr_t dma_addr;		/* PCI DMA address (if mapped) */
171f7917c00SJeff Kirsher 					/*   and flags (see below) */
172f7917c00SJeff Kirsher };
173f7917c00SJeff Kirsher 
174f7917c00SJeff Kirsher /*
175f7917c00SJeff Kirsher  * The low bits of rx_sw_desc.dma_addr have special meaning.  Note that the
176f7917c00SJeff Kirsher  * SGE also uses the low 4 bits to determine the size of the buffer.  It uses
177f7917c00SJeff Kirsher  * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
178f7917c00SJeff Kirsher  * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
179f7917c00SJeff Kirsher  * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
180f7917c00SJeff Kirsher  * to the SGE.  Thus, our software state of "is the buffer mapped for DMA" is
181f7917c00SJeff Kirsher  * maintained in an inverse sense so the hardware never sees that bit high.
182f7917c00SJeff Kirsher  */
183f7917c00SJeff Kirsher enum {
184f7917c00SJeff Kirsher 	RX_LARGE_BUF    = 1 << 0,	/* buffer is SGE_FL_BUFFER_SIZE[1] */
185f7917c00SJeff Kirsher 	RX_UNMAPPED_BUF = 1 << 1,	/* buffer is not mapped */
186f7917c00SJeff Kirsher };
187f7917c00SJeff Kirsher 
188f7917c00SJeff Kirsher /**
189f7917c00SJeff Kirsher  *	get_buf_addr - return DMA buffer address of software descriptor
190f7917c00SJeff Kirsher  *	@sdesc: pointer to the software buffer descriptor
191f7917c00SJeff Kirsher  *
192f7917c00SJeff Kirsher  *	Return the DMA buffer address of a software descriptor (stripping out
193f7917c00SJeff Kirsher  *	our low-order flag bits).
194f7917c00SJeff Kirsher  */
get_buf_addr(const struct rx_sw_desc * sdesc)195f7917c00SJeff Kirsher static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
196f7917c00SJeff Kirsher {
197f7917c00SJeff Kirsher 	return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
198f7917c00SJeff Kirsher }
199f7917c00SJeff Kirsher 
200f7917c00SJeff Kirsher /**
201f7917c00SJeff Kirsher  *	is_buf_mapped - is buffer mapped for DMA?
202f7917c00SJeff Kirsher  *	@sdesc: pointer to the software buffer descriptor
203f7917c00SJeff Kirsher  *
204f7917c00SJeff Kirsher  *	Determine whether the buffer associated with a software descriptor in
205f7917c00SJeff Kirsher  *	mapped for DMA or not.
206f7917c00SJeff Kirsher  */
is_buf_mapped(const struct rx_sw_desc * sdesc)207f7917c00SJeff Kirsher static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
208f7917c00SJeff Kirsher {
209f7917c00SJeff Kirsher 	return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
210f7917c00SJeff Kirsher }
211f7917c00SJeff Kirsher 
212f7917c00SJeff Kirsher /**
213f7917c00SJeff Kirsher  *	need_skb_unmap - does the platform need unmapping of sk_buffs?
214f7917c00SJeff Kirsher  *
215f7917c00SJeff Kirsher  *	Returns true if the platform needs sk_buff unmapping.  The compiler
216f7917c00SJeff Kirsher  *	optimizes away unnecessary code if this returns true.
217f7917c00SJeff Kirsher  */
need_skb_unmap(void)218f7917c00SJeff Kirsher static inline int need_skb_unmap(void)
219f7917c00SJeff Kirsher {
220f7917c00SJeff Kirsher #ifdef CONFIG_NEED_DMA_MAP_STATE
221f7917c00SJeff Kirsher 	return 1;
222f7917c00SJeff Kirsher #else
223f7917c00SJeff Kirsher 	return 0;
224f7917c00SJeff Kirsher #endif
225f7917c00SJeff Kirsher }
226f7917c00SJeff Kirsher 
227f7917c00SJeff Kirsher /**
228f7917c00SJeff Kirsher  *	txq_avail - return the number of available slots in a TX queue
229f7917c00SJeff Kirsher  *	@tq: the TX queue
230f7917c00SJeff Kirsher  *
231f7917c00SJeff Kirsher  *	Returns the number of available descriptors in a TX queue.
232f7917c00SJeff Kirsher  */
txq_avail(const struct sge_txq * tq)233f7917c00SJeff Kirsher static inline unsigned int txq_avail(const struct sge_txq *tq)
234f7917c00SJeff Kirsher {
235f7917c00SJeff Kirsher 	return tq->size - 1 - tq->in_use;
236f7917c00SJeff Kirsher }
237f7917c00SJeff Kirsher 
238f7917c00SJeff Kirsher /**
239f7917c00SJeff Kirsher  *	fl_cap - return the capacity of a Free List
240f7917c00SJeff Kirsher  *	@fl: the Free List
241f7917c00SJeff Kirsher  *
242f7917c00SJeff Kirsher  *	Returns the capacity of a Free List.  The capacity is less than the
243f7917c00SJeff Kirsher  *	size because an Egress Queue Index Unit worth of descriptors needs to
244f7917c00SJeff Kirsher  *	be left unpopulated, otherwise the Producer and Consumer indices PIDX
245f7917c00SJeff Kirsher  *	and CIDX will match and the hardware will think the FL is empty.
246f7917c00SJeff Kirsher  */
fl_cap(const struct sge_fl * fl)247f7917c00SJeff Kirsher static inline unsigned int fl_cap(const struct sge_fl *fl)
248f7917c00SJeff Kirsher {
249f7917c00SJeff Kirsher 	return fl->size - FL_PER_EQ_UNIT;
250f7917c00SJeff Kirsher }
251f7917c00SJeff Kirsher 
252f7917c00SJeff Kirsher /**
253f7917c00SJeff Kirsher  *	fl_starving - return whether a Free List is starving.
25465f6ecc9SHariprasad Shenai  *	@adapter: pointer to the adapter
255f7917c00SJeff Kirsher  *	@fl: the Free List
256f7917c00SJeff Kirsher  *
257f7917c00SJeff Kirsher  *	Tests specified Free List to see whether the number of buffers
258f7917c00SJeff Kirsher  *	available to the hardware has falled below our "starvation"
259f7917c00SJeff Kirsher  *	threshold.
260f7917c00SJeff Kirsher  */
fl_starving(const struct adapter * adapter,const struct sge_fl * fl)26165f6ecc9SHariprasad Shenai static inline bool fl_starving(const struct adapter *adapter,
26265f6ecc9SHariprasad Shenai 			       const struct sge_fl *fl)
263f7917c00SJeff Kirsher {
26465f6ecc9SHariprasad Shenai 	const struct sge *s = &adapter->sge;
26565f6ecc9SHariprasad Shenai 
26665f6ecc9SHariprasad Shenai 	return fl->avail - fl->pend_cred <= s->fl_starve_thres;
267f7917c00SJeff Kirsher }
268f7917c00SJeff Kirsher 
269f7917c00SJeff Kirsher /**
270f7917c00SJeff Kirsher  *	map_skb -  map an skb for DMA to the device
271f7917c00SJeff Kirsher  *	@dev: the egress net device
272f7917c00SJeff Kirsher  *	@skb: the packet to map
273f7917c00SJeff Kirsher  *	@addr: a pointer to the base of the DMA mapping array
274f7917c00SJeff Kirsher  *
275f7917c00SJeff Kirsher  *	Map an skb for DMA to the device and return an array of DMA addresses.
276f7917c00SJeff Kirsher  */
map_skb(struct device * dev,const struct sk_buff * skb,dma_addr_t * addr)277f7917c00SJeff Kirsher static int map_skb(struct device *dev, const struct sk_buff *skb,
278f7917c00SJeff Kirsher 		   dma_addr_t *addr)
279f7917c00SJeff Kirsher {
280f7917c00SJeff Kirsher 	const skb_frag_t *fp, *end;
281f7917c00SJeff Kirsher 	const struct skb_shared_info *si;
282f7917c00SJeff Kirsher 
283f7917c00SJeff Kirsher 	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
284f7917c00SJeff Kirsher 	if (dma_mapping_error(dev, *addr))
285f7917c00SJeff Kirsher 		goto out_err;
286f7917c00SJeff Kirsher 
287f7917c00SJeff Kirsher 	si = skb_shinfo(skb);
288f7917c00SJeff Kirsher 	end = &si->frags[si->nr_frags];
289f7917c00SJeff Kirsher 	for (fp = si->frags; fp < end; fp++) {
290a0006a86SIan Campbell 		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
291a0006a86SIan Campbell 					   DMA_TO_DEVICE);
292f7917c00SJeff Kirsher 		if (dma_mapping_error(dev, *addr))
293f7917c00SJeff Kirsher 			goto unwind;
294f7917c00SJeff Kirsher 	}
295f7917c00SJeff Kirsher 	return 0;
296f7917c00SJeff Kirsher 
297f7917c00SJeff Kirsher unwind:
298f7917c00SJeff Kirsher 	while (fp-- > si->frags)
2999e903e08SEric Dumazet 		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
300f7917c00SJeff Kirsher 	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
301f7917c00SJeff Kirsher 
302f7917c00SJeff Kirsher out_err:
303f7917c00SJeff Kirsher 	return -ENOMEM;
304f7917c00SJeff Kirsher }
305f7917c00SJeff Kirsher 
unmap_sgl(struct device * dev,const struct sk_buff * skb,const struct ulptx_sgl * sgl,const struct sge_txq * tq)306f7917c00SJeff Kirsher static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
307f7917c00SJeff Kirsher 		      const struct ulptx_sgl *sgl, const struct sge_txq *tq)
308f7917c00SJeff Kirsher {
309f7917c00SJeff Kirsher 	const struct ulptx_sge_pair *p;
310f7917c00SJeff Kirsher 	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
311f7917c00SJeff Kirsher 
312f7917c00SJeff Kirsher 	if (likely(skb_headlen(skb)))
313f7917c00SJeff Kirsher 		dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
314f7917c00SJeff Kirsher 				 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
315f7917c00SJeff Kirsher 	else {
316f7917c00SJeff Kirsher 		dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
317f7917c00SJeff Kirsher 			       be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
318f7917c00SJeff Kirsher 		nfrags--;
319f7917c00SJeff Kirsher 	}
320f7917c00SJeff Kirsher 
321f7917c00SJeff Kirsher 	/*
322f7917c00SJeff Kirsher 	 * the complexity below is because of the possibility of a wrap-around
323f7917c00SJeff Kirsher 	 * in the middle of an SGL
324f7917c00SJeff Kirsher 	 */
325f7917c00SJeff Kirsher 	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
326f7917c00SJeff Kirsher 		if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
327f7917c00SJeff Kirsher unmap:
328f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
329f7917c00SJeff Kirsher 				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
330f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
331f7917c00SJeff Kirsher 				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
332f7917c00SJeff Kirsher 			p++;
333f7917c00SJeff Kirsher 		} else if ((u8 *)p == (u8 *)tq->stat) {
334f7917c00SJeff Kirsher 			p = (const struct ulptx_sge_pair *)tq->desc;
335f7917c00SJeff Kirsher 			goto unmap;
336f7917c00SJeff Kirsher 		} else if ((u8 *)p + 8 == (u8 *)tq->stat) {
337f7917c00SJeff Kirsher 			const __be64 *addr = (const __be64 *)tq->desc;
338f7917c00SJeff Kirsher 
339f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
340f7917c00SJeff Kirsher 				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
341f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(addr[1]),
342f7917c00SJeff Kirsher 				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
343f7917c00SJeff Kirsher 			p = (const struct ulptx_sge_pair *)&addr[2];
344f7917c00SJeff Kirsher 		} else {
345f7917c00SJeff Kirsher 			const __be64 *addr = (const __be64 *)tq->desc;
346f7917c00SJeff Kirsher 
347f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
348f7917c00SJeff Kirsher 				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
349f7917c00SJeff Kirsher 			dma_unmap_page(dev, be64_to_cpu(addr[0]),
350f7917c00SJeff Kirsher 				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
351f7917c00SJeff Kirsher 			p = (const struct ulptx_sge_pair *)&addr[1];
352f7917c00SJeff Kirsher 		}
353f7917c00SJeff Kirsher 	}
354f7917c00SJeff Kirsher 	if (nfrags) {
355f7917c00SJeff Kirsher 		__be64 addr;
356f7917c00SJeff Kirsher 
357f7917c00SJeff Kirsher 		if ((u8 *)p == (u8 *)tq->stat)
358f7917c00SJeff Kirsher 			p = (const struct ulptx_sge_pair *)tq->desc;
359f7917c00SJeff Kirsher 		addr = ((u8 *)p + 16 <= (u8 *)tq->stat
360f7917c00SJeff Kirsher 			? p->addr[0]
361f7917c00SJeff Kirsher 			: *(const __be64 *)tq->desc);
362f7917c00SJeff Kirsher 		dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
363f7917c00SJeff Kirsher 			       DMA_TO_DEVICE);
364f7917c00SJeff Kirsher 	}
365f7917c00SJeff Kirsher }
366f7917c00SJeff Kirsher 
367f7917c00SJeff Kirsher /**
368f7917c00SJeff Kirsher  *	free_tx_desc - reclaims TX descriptors and their buffers
369f7917c00SJeff Kirsher  *	@adapter: the adapter
370f7917c00SJeff Kirsher  *	@tq: the TX queue to reclaim descriptors from
371f7917c00SJeff Kirsher  *	@n: the number of descriptors to reclaim
372f7917c00SJeff Kirsher  *	@unmap: whether the buffers should be unmapped for DMA
373f7917c00SJeff Kirsher  *
374f7917c00SJeff Kirsher  *	Reclaims TX descriptors from an SGE TX queue and frees the associated
375f7917c00SJeff Kirsher  *	TX buffers.  Called with the TX queue lock held.
376f7917c00SJeff Kirsher  */
free_tx_desc(struct adapter * adapter,struct sge_txq * tq,unsigned int n,bool unmap)377f7917c00SJeff Kirsher static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
378f7917c00SJeff Kirsher 			 unsigned int n, bool unmap)
379f7917c00SJeff Kirsher {
380f7917c00SJeff Kirsher 	struct tx_sw_desc *sdesc;
381f7917c00SJeff Kirsher 	unsigned int cidx = tq->cidx;
382f7917c00SJeff Kirsher 	struct device *dev = adapter->pdev_dev;
383f7917c00SJeff Kirsher 
384f7917c00SJeff Kirsher 	const int need_unmap = need_skb_unmap() && unmap;
385f7917c00SJeff Kirsher 
386f7917c00SJeff Kirsher 	sdesc = &tq->sdesc[cidx];
387f7917c00SJeff Kirsher 	while (n--) {
388f7917c00SJeff Kirsher 		/*
389f7917c00SJeff Kirsher 		 * If we kept a reference to the original TX skb, we need to
390f7917c00SJeff Kirsher 		 * unmap it from PCI DMA space (if required) and free it.
391f7917c00SJeff Kirsher 		 */
392f7917c00SJeff Kirsher 		if (sdesc->skb) {
393f7917c00SJeff Kirsher 			if (need_unmap)
394f7917c00SJeff Kirsher 				unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
39542ffda5fSEric W. Biederman 			dev_consume_skb_any(sdesc->skb);
396f7917c00SJeff Kirsher 			sdesc->skb = NULL;
397f7917c00SJeff Kirsher 		}
398f7917c00SJeff Kirsher 
399f7917c00SJeff Kirsher 		sdesc++;
400f7917c00SJeff Kirsher 		if (++cidx == tq->size) {
401f7917c00SJeff Kirsher 			cidx = 0;
402f7917c00SJeff Kirsher 			sdesc = tq->sdesc;
403f7917c00SJeff Kirsher 		}
404f7917c00SJeff Kirsher 	}
405f7917c00SJeff Kirsher 	tq->cidx = cidx;
406f7917c00SJeff Kirsher }
407f7917c00SJeff Kirsher 
408f7917c00SJeff Kirsher /*
409f7917c00SJeff Kirsher  * Return the number of reclaimable descriptors in a TX queue.
410f7917c00SJeff Kirsher  */
reclaimable(const struct sge_txq * tq)411f7917c00SJeff Kirsher static inline int reclaimable(const struct sge_txq *tq)
412f7917c00SJeff Kirsher {
413f7917c00SJeff Kirsher 	int hw_cidx = be16_to_cpu(tq->stat->cidx);
414f7917c00SJeff Kirsher 	int reclaimable = hw_cidx - tq->cidx;
415f7917c00SJeff Kirsher 	if (reclaimable < 0)
416f7917c00SJeff Kirsher 		reclaimable += tq->size;
417f7917c00SJeff Kirsher 	return reclaimable;
418f7917c00SJeff Kirsher }
419f7917c00SJeff Kirsher 
420f7917c00SJeff Kirsher /**
421f7917c00SJeff Kirsher  *	reclaim_completed_tx - reclaims completed TX descriptors
422f7917c00SJeff Kirsher  *	@adapter: the adapter
423f7917c00SJeff Kirsher  *	@tq: the TX queue to reclaim completed descriptors from
424f7917c00SJeff Kirsher  *	@unmap: whether the buffers should be unmapped for DMA
425f7917c00SJeff Kirsher  *
426f7917c00SJeff Kirsher  *	Reclaims TX descriptors that the SGE has indicated it has processed,
427f7917c00SJeff Kirsher  *	and frees the associated buffers if possible.  Called with the TX
428f7917c00SJeff Kirsher  *	queue locked.
429f7917c00SJeff Kirsher  */
reclaim_completed_tx(struct adapter * adapter,struct sge_txq * tq,bool unmap)430f7917c00SJeff Kirsher static inline void reclaim_completed_tx(struct adapter *adapter,
431f7917c00SJeff Kirsher 					struct sge_txq *tq,
432f7917c00SJeff Kirsher 					bool unmap)
433f7917c00SJeff Kirsher {
434f7917c00SJeff Kirsher 	int avail = reclaimable(tq);
435f7917c00SJeff Kirsher 
436f7917c00SJeff Kirsher 	if (avail) {
437f7917c00SJeff Kirsher 		/*
438f7917c00SJeff Kirsher 		 * Limit the amount of clean up work we do at a time to keep
439f7917c00SJeff Kirsher 		 * the TX lock hold time O(1).
440f7917c00SJeff Kirsher 		 */
441f7917c00SJeff Kirsher 		if (avail > MAX_TX_RECLAIM)
442f7917c00SJeff Kirsher 			avail = MAX_TX_RECLAIM;
443f7917c00SJeff Kirsher 
444f7917c00SJeff Kirsher 		free_tx_desc(adapter, tq, avail, unmap);
445f7917c00SJeff Kirsher 		tq->in_use -= avail;
446f7917c00SJeff Kirsher 	}
447f7917c00SJeff Kirsher }
448f7917c00SJeff Kirsher 
449f7917c00SJeff Kirsher /**
450f7917c00SJeff Kirsher  *	get_buf_size - return the size of an RX Free List buffer.
45165f6ecc9SHariprasad Shenai  *	@adapter: pointer to the associated adapter
452f7917c00SJeff Kirsher  *	@sdesc: pointer to the software buffer descriptor
453f7917c00SJeff Kirsher  */
get_buf_size(const struct adapter * adapter,const struct rx_sw_desc * sdesc)45465f6ecc9SHariprasad Shenai static inline int get_buf_size(const struct adapter *adapter,
45565f6ecc9SHariprasad Shenai 			       const struct rx_sw_desc *sdesc)
456f7917c00SJeff Kirsher {
45765f6ecc9SHariprasad Shenai 	const struct sge *s = &adapter->sge;
45865f6ecc9SHariprasad Shenai 
45965f6ecc9SHariprasad Shenai 	return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
46065f6ecc9SHariprasad Shenai 		? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
461f7917c00SJeff Kirsher }
462f7917c00SJeff Kirsher 
463f7917c00SJeff Kirsher /**
464f7917c00SJeff Kirsher  *	free_rx_bufs - free RX buffers on an SGE Free List
465f7917c00SJeff Kirsher  *	@adapter: the adapter
466f7917c00SJeff Kirsher  *	@fl: the SGE Free List to free buffers from
467f7917c00SJeff Kirsher  *	@n: how many buffers to free
468f7917c00SJeff Kirsher  *
469f7917c00SJeff Kirsher  *	Release the next @n buffers on an SGE Free List RX queue.   The
470f7917c00SJeff Kirsher  *	buffers must be made inaccessible to hardware before calling this
471f7917c00SJeff Kirsher  *	function.
472f7917c00SJeff Kirsher  */
free_rx_bufs(struct adapter * adapter,struct sge_fl * fl,int n)473f7917c00SJeff Kirsher static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
474f7917c00SJeff Kirsher {
475f7917c00SJeff Kirsher 	while (n--) {
476f7917c00SJeff Kirsher 		struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
477f7917c00SJeff Kirsher 
478f7917c00SJeff Kirsher 		if (is_buf_mapped(sdesc))
479f7917c00SJeff Kirsher 			dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
48065f6ecc9SHariprasad Shenai 				       get_buf_size(adapter, sdesc),
4814489d8f5SChristophe JAILLET 				       DMA_FROM_DEVICE);
482f7917c00SJeff Kirsher 		put_page(sdesc->page);
483f7917c00SJeff Kirsher 		sdesc->page = NULL;
484f7917c00SJeff Kirsher 		if (++fl->cidx == fl->size)
485f7917c00SJeff Kirsher 			fl->cidx = 0;
486f7917c00SJeff Kirsher 		fl->avail--;
487f7917c00SJeff Kirsher 	}
488f7917c00SJeff Kirsher }
489f7917c00SJeff Kirsher 
490f7917c00SJeff Kirsher /**
491f7917c00SJeff Kirsher  *	unmap_rx_buf - unmap the current RX buffer on an SGE Free List
492f7917c00SJeff Kirsher  *	@adapter: the adapter
493f7917c00SJeff Kirsher  *	@fl: the SGE Free List
494f7917c00SJeff Kirsher  *
495f7917c00SJeff Kirsher  *	Unmap the current buffer on an SGE Free List RX queue.   The
496f7917c00SJeff Kirsher  *	buffer must be made inaccessible to HW before calling this function.
497f7917c00SJeff Kirsher  *
498f7917c00SJeff Kirsher  *	This is similar to @free_rx_bufs above but does not free the buffer.
499f7917c00SJeff Kirsher  *	Do note that the FL still loses any further access to the buffer.
500f7917c00SJeff Kirsher  *	This is used predominantly to "transfer ownership" of an FL buffer
501f7917c00SJeff Kirsher  *	to another entity (typically an skb's fragment list).
502f7917c00SJeff Kirsher  */
unmap_rx_buf(struct adapter * adapter,struct sge_fl * fl)503f7917c00SJeff Kirsher static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
504f7917c00SJeff Kirsher {
505f7917c00SJeff Kirsher 	struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
506f7917c00SJeff Kirsher 
507f7917c00SJeff Kirsher 	if (is_buf_mapped(sdesc))
508f7917c00SJeff Kirsher 		dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
50965f6ecc9SHariprasad Shenai 			       get_buf_size(adapter, sdesc),
5104489d8f5SChristophe JAILLET 			       DMA_FROM_DEVICE);
511f7917c00SJeff Kirsher 	sdesc->page = NULL;
512f7917c00SJeff Kirsher 	if (++fl->cidx == fl->size)
513f7917c00SJeff Kirsher 		fl->cidx = 0;
514f7917c00SJeff Kirsher 	fl->avail--;
515f7917c00SJeff Kirsher }
516f7917c00SJeff Kirsher 
517f7917c00SJeff Kirsher /**
518f7917c00SJeff Kirsher  *	ring_fl_db - righ doorbell on free list
519f7917c00SJeff Kirsher  *	@adapter: the adapter
520f7917c00SJeff Kirsher  *	@fl: the Free List whose doorbell should be rung ...
521f7917c00SJeff Kirsher  *
522f7917c00SJeff Kirsher  *	Tell the Scatter Gather Engine that there are new free list entries
523f7917c00SJeff Kirsher  *	available.
524f7917c00SJeff Kirsher  */
ring_fl_db(struct adapter * adapter,struct sge_fl * fl)525f7917c00SJeff Kirsher static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
526f7917c00SJeff Kirsher {
52741fc2e41SHariprasad Shenai 	u32 val = adapter->params.arch.sge_fl_db;
528622c62b5SSantosh Rastapur 
529df64e4d3SHariprasad Shenai 	/* The SGE keeps track of its Producer and Consumer Indices in terms
530f7917c00SJeff Kirsher 	 * of Egress Queue Units so we can only tell it about integral numbers
531f7917c00SJeff Kirsher 	 * of multiples of Free List Entries per Egress Queue Units ...
532f7917c00SJeff Kirsher 	 */
533f7917c00SJeff Kirsher 	if (fl->pend_cred >= FL_PER_EQ_UNIT) {
534df64e4d3SHariprasad Shenai 		if (is_t4(adapter->params.chip))
53541fc2e41SHariprasad Shenai 			val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
536df64e4d3SHariprasad Shenai 		else
53741fc2e41SHariprasad Shenai 			val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
538df64e4d3SHariprasad Shenai 
539df64e4d3SHariprasad Shenai 		/* Make sure all memory writes to the Free List queue are
540df64e4d3SHariprasad Shenai 		 * committed before we tell the hardware about them.
541df64e4d3SHariprasad Shenai 		 */
542f7917c00SJeff Kirsher 		wmb();
543df64e4d3SHariprasad Shenai 
544df64e4d3SHariprasad Shenai 		/* If we don't have access to the new User Doorbell (T5+), use
545df64e4d3SHariprasad Shenai 		 * the old doorbell mechanism; otherwise use the new BAR2
546df64e4d3SHariprasad Shenai 		 * mechanism.
547df64e4d3SHariprasad Shenai 		 */
548df64e4d3SHariprasad Shenai 		if (unlikely(fl->bar2_addr == NULL)) {
549df64e4d3SHariprasad Shenai 			t4_write_reg(adapter,
550df64e4d3SHariprasad Shenai 				     T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
551f612b815SHariprasad Shenai 				     QID_V(fl->cntxt_id) | val);
552df64e4d3SHariprasad Shenai 		} else {
553f612b815SHariprasad Shenai 			writel(val | QID_V(fl->bar2_qid),
554df64e4d3SHariprasad Shenai 			       fl->bar2_addr + SGE_UDB_KDOORBELL);
555df64e4d3SHariprasad Shenai 
556df64e4d3SHariprasad Shenai 			/* This Write memory Barrier will force the write to
557df64e4d3SHariprasad Shenai 			 * the User Doorbell area to be flushed.
558df64e4d3SHariprasad Shenai 			 */
559df64e4d3SHariprasad Shenai 			wmb();
560df64e4d3SHariprasad Shenai 		}
561f7917c00SJeff Kirsher 		fl->pend_cred %= FL_PER_EQ_UNIT;
562f7917c00SJeff Kirsher 	}
563f7917c00SJeff Kirsher }
564f7917c00SJeff Kirsher 
565f7917c00SJeff Kirsher /**
566f7917c00SJeff Kirsher  *	set_rx_sw_desc - initialize software RX buffer descriptor
567f7917c00SJeff Kirsher  *	@sdesc: pointer to the softwore RX buffer descriptor
568f7917c00SJeff Kirsher  *	@page: pointer to the page data structure backing the RX buffer
569f7917c00SJeff Kirsher  *	@dma_addr: PCI DMA address (possibly with low-bit flags)
570f7917c00SJeff Kirsher  */
set_rx_sw_desc(struct rx_sw_desc * sdesc,struct page * page,dma_addr_t dma_addr)571f7917c00SJeff Kirsher static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
572f7917c00SJeff Kirsher 				  dma_addr_t dma_addr)
573f7917c00SJeff Kirsher {
574f7917c00SJeff Kirsher 	sdesc->page = page;
575f7917c00SJeff Kirsher 	sdesc->dma_addr = dma_addr;
576f7917c00SJeff Kirsher }
577f7917c00SJeff Kirsher 
578f7917c00SJeff Kirsher /*
579f7917c00SJeff Kirsher  * Support for poisoning RX buffers ...
580f7917c00SJeff Kirsher  */
581f7917c00SJeff Kirsher #define POISON_BUF_VAL -1
582f7917c00SJeff Kirsher 
poison_buf(struct page * page,size_t sz)583f7917c00SJeff Kirsher static inline void poison_buf(struct page *page, size_t sz)
584f7917c00SJeff Kirsher {
585f7917c00SJeff Kirsher #if POISON_BUF_VAL >= 0
586f7917c00SJeff Kirsher 	memset(page_address(page), POISON_BUF_VAL, sz);
587f7917c00SJeff Kirsher #endif
588f7917c00SJeff Kirsher }
589f7917c00SJeff Kirsher 
590f7917c00SJeff Kirsher /**
591f7917c00SJeff Kirsher  *	refill_fl - refill an SGE RX buffer ring
592f7917c00SJeff Kirsher  *	@adapter: the adapter
593f7917c00SJeff Kirsher  *	@fl: the Free List ring to refill
594f7917c00SJeff Kirsher  *	@n: the number of new buffers to allocate
595f7917c00SJeff Kirsher  *	@gfp: the gfp flags for the allocations
596f7917c00SJeff Kirsher  *
597f7917c00SJeff Kirsher  *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
598f7917c00SJeff Kirsher  *	allocated with the supplied gfp flags.  The caller must assure that
599f7917c00SJeff Kirsher  *	@n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
600f7917c00SJeff Kirsher  *	EGRESS QUEUE UNITS_ indicates an empty Free List!  Returns the number
601f7917c00SJeff Kirsher  *	of buffers allocated.  If afterwards the queue is found critically low,
602f7917c00SJeff Kirsher  *	mark it as starving in the bitmap of starving FLs.
603f7917c00SJeff Kirsher  */
refill_fl(struct adapter * adapter,struct sge_fl * fl,int n,gfp_t gfp)604f7917c00SJeff Kirsher static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
605f7917c00SJeff Kirsher 			      int n, gfp_t gfp)
606f7917c00SJeff Kirsher {
60765f6ecc9SHariprasad Shenai 	struct sge *s = &adapter->sge;
608f7917c00SJeff Kirsher 	struct page *page;
609f7917c00SJeff Kirsher 	dma_addr_t dma_addr;
610f7917c00SJeff Kirsher 	unsigned int cred = fl->avail;
611f7917c00SJeff Kirsher 	__be64 *d = &fl->desc[fl->pidx];
612f7917c00SJeff Kirsher 	struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
613f7917c00SJeff Kirsher 
614f7917c00SJeff Kirsher 	/*
615f7917c00SJeff Kirsher 	 * Sanity: ensure that the result of adding n Free List buffers
616f7917c00SJeff Kirsher 	 * won't result in wrapping the SGE's Producer Index around to
617f7917c00SJeff Kirsher 	 * it's Consumer Index thereby indicating an empty Free List ...
618f7917c00SJeff Kirsher 	 */
619f7917c00SJeff Kirsher 	BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
620f7917c00SJeff Kirsher 
621aa9cd31cSAlexander Duyck 	gfp |= __GFP_NOWARN;
622aa9cd31cSAlexander Duyck 
623f7917c00SJeff Kirsher 	/*
624f7917c00SJeff Kirsher 	 * If we support large pages, prefer large buffers and fail over to
625f7917c00SJeff Kirsher 	 * small pages if we can't allocate large pages to satisfy the refill.
626f7917c00SJeff Kirsher 	 * If we don't support large pages, drop directly into the small page
627f7917c00SJeff Kirsher 	 * allocation code.
628f7917c00SJeff Kirsher 	 */
62965f6ecc9SHariprasad Shenai 	if (s->fl_pg_order == 0)
630f7917c00SJeff Kirsher 		goto alloc_small_pages;
631f7917c00SJeff Kirsher 
632f7917c00SJeff Kirsher 	while (n) {
633076ce448SDavid S. Miller 		page = __dev_alloc_pages(gfp, s->fl_pg_order);
634f7917c00SJeff Kirsher 		if (unlikely(!page)) {
635f7917c00SJeff Kirsher 			/*
636f7917c00SJeff Kirsher 			 * We've failed inour attempt to allocate a "large
637f7917c00SJeff Kirsher 			 * page".  Fail over to the "small page" allocation
638f7917c00SJeff Kirsher 			 * below.
639f7917c00SJeff Kirsher 			 */
640f7917c00SJeff Kirsher 			fl->large_alloc_failed++;
641f7917c00SJeff Kirsher 			break;
642f7917c00SJeff Kirsher 		}
64365f6ecc9SHariprasad Shenai 		poison_buf(page, PAGE_SIZE << s->fl_pg_order);
644f7917c00SJeff Kirsher 
645f7917c00SJeff Kirsher 		dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
64665f6ecc9SHariprasad Shenai 					PAGE_SIZE << s->fl_pg_order,
6474489d8f5SChristophe JAILLET 					DMA_FROM_DEVICE);
648f7917c00SJeff Kirsher 		if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
649f7917c00SJeff Kirsher 			/*
650f7917c00SJeff Kirsher 			 * We've run out of DMA mapping space.  Free up the
651f7917c00SJeff Kirsher 			 * buffer and return with what we've managed to put
652f7917c00SJeff Kirsher 			 * into the free list.  We don't want to fail over to
653f7917c00SJeff Kirsher 			 * the small page allocation below in this case
654f7917c00SJeff Kirsher 			 * because DMA mapping resources are typically
655f7917c00SJeff Kirsher 			 * critical resources once they become scarse.
656f7917c00SJeff Kirsher 			 */
65765f6ecc9SHariprasad Shenai 			__free_pages(page, s->fl_pg_order);
658f7917c00SJeff Kirsher 			goto out;
659f7917c00SJeff Kirsher 		}
660f7917c00SJeff Kirsher 		dma_addr |= RX_LARGE_BUF;
661f7917c00SJeff Kirsher 		*d++ = cpu_to_be64(dma_addr);
662f7917c00SJeff Kirsher 
663f7917c00SJeff Kirsher 		set_rx_sw_desc(sdesc, page, dma_addr);
664f7917c00SJeff Kirsher 		sdesc++;
665f7917c00SJeff Kirsher 
666f7917c00SJeff Kirsher 		fl->avail++;
667f7917c00SJeff Kirsher 		if (++fl->pidx == fl->size) {
668f7917c00SJeff Kirsher 			fl->pidx = 0;
669f7917c00SJeff Kirsher 			sdesc = fl->sdesc;
670f7917c00SJeff Kirsher 			d = fl->desc;
671f7917c00SJeff Kirsher 		}
672f7917c00SJeff Kirsher 		n--;
673f7917c00SJeff Kirsher 	}
674f7917c00SJeff Kirsher 
675f7917c00SJeff Kirsher alloc_small_pages:
676f7917c00SJeff Kirsher 	while (n--) {
677aa9cd31cSAlexander Duyck 		page = __dev_alloc_page(gfp);
678f7917c00SJeff Kirsher 		if (unlikely(!page)) {
679f7917c00SJeff Kirsher 			fl->alloc_failed++;
680f7917c00SJeff Kirsher 			break;
681f7917c00SJeff Kirsher 		}
682f7917c00SJeff Kirsher 		poison_buf(page, PAGE_SIZE);
683f7917c00SJeff Kirsher 
684f7917c00SJeff Kirsher 		dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
6854489d8f5SChristophe JAILLET 				       DMA_FROM_DEVICE);
686f7917c00SJeff Kirsher 		if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
6871f2149c1SEric Dumazet 			put_page(page);
688f7917c00SJeff Kirsher 			break;
689f7917c00SJeff Kirsher 		}
690f7917c00SJeff Kirsher 		*d++ = cpu_to_be64(dma_addr);
691f7917c00SJeff Kirsher 
692f7917c00SJeff Kirsher 		set_rx_sw_desc(sdesc, page, dma_addr);
693f7917c00SJeff Kirsher 		sdesc++;
694f7917c00SJeff Kirsher 
695f7917c00SJeff Kirsher 		fl->avail++;
696f7917c00SJeff Kirsher 		if (++fl->pidx == fl->size) {
697f7917c00SJeff Kirsher 			fl->pidx = 0;
698f7917c00SJeff Kirsher 			sdesc = fl->sdesc;
699f7917c00SJeff Kirsher 			d = fl->desc;
700f7917c00SJeff Kirsher 		}
701f7917c00SJeff Kirsher 	}
702f7917c00SJeff Kirsher 
703f7917c00SJeff Kirsher out:
704f7917c00SJeff Kirsher 	/*
705f7917c00SJeff Kirsher 	 * Update our accounting state to incorporate the new Free List
706f7917c00SJeff Kirsher 	 * buffers, tell the hardware about them and return the number of
70790802ed9SPaul Bolle 	 * buffers which we were able to allocate.
708f7917c00SJeff Kirsher 	 */
709f7917c00SJeff Kirsher 	cred = fl->avail - cred;
710f7917c00SJeff Kirsher 	fl->pend_cred += cred;
711f7917c00SJeff Kirsher 	ring_fl_db(adapter, fl);
712f7917c00SJeff Kirsher 
71365f6ecc9SHariprasad Shenai 	if (unlikely(fl_starving(adapter, fl))) {
714f7917c00SJeff Kirsher 		smp_wmb();
715f7917c00SJeff Kirsher 		set_bit(fl->cntxt_id, adapter->sge.starving_fl);
716f7917c00SJeff Kirsher 	}
717f7917c00SJeff Kirsher 
718f7917c00SJeff Kirsher 	return cred;
719f7917c00SJeff Kirsher }
720f7917c00SJeff Kirsher 
721f7917c00SJeff Kirsher /*
722f7917c00SJeff Kirsher  * Refill a Free List to its capacity or the Maximum Refill Increment,
723f7917c00SJeff Kirsher  * whichever is smaller ...
724f7917c00SJeff Kirsher  */
__refill_fl(struct adapter * adapter,struct sge_fl * fl)725f7917c00SJeff Kirsher static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
726f7917c00SJeff Kirsher {
727f7917c00SJeff Kirsher 	refill_fl(adapter, fl,
728f7917c00SJeff Kirsher 		  min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
729f7917c00SJeff Kirsher 		  GFP_ATOMIC);
730f7917c00SJeff Kirsher }
731f7917c00SJeff Kirsher 
732f7917c00SJeff Kirsher /**
733f7917c00SJeff Kirsher  *	alloc_ring - allocate resources for an SGE descriptor ring
734f7917c00SJeff Kirsher  *	@dev: the PCI device's core device
735f7917c00SJeff Kirsher  *	@nelem: the number of descriptors
736f7917c00SJeff Kirsher  *	@hwsize: the size of each hardware descriptor
737f7917c00SJeff Kirsher  *	@swsize: the size of each software descriptor
738f7917c00SJeff Kirsher  *	@busaddrp: the physical PCI bus address of the allocated ring
739f7917c00SJeff Kirsher  *	@swringp: return address pointer for software ring
740f7917c00SJeff Kirsher  *	@stat_size: extra space in hardware ring for status information
741f7917c00SJeff Kirsher  *
742f7917c00SJeff Kirsher  *	Allocates resources for an SGE descriptor ring, such as TX queues,
743f7917c00SJeff Kirsher  *	free buffer lists, response queues, etc.  Each SGE ring requires
744f7917c00SJeff Kirsher  *	space for its hardware descriptors plus, optionally, space for software
745f7917c00SJeff Kirsher  *	state associated with each hardware entry (the metadata).  The function
746f7917c00SJeff Kirsher  *	returns three values: the virtual address for the hardware ring (the
747f7917c00SJeff Kirsher  *	return value of the function), the PCI bus address of the hardware
748f7917c00SJeff Kirsher  *	ring (in *busaddrp), and the address of the software ring (in swringp).
749f7917c00SJeff Kirsher  *	Both the hardware and software rings are returned zeroed out.
750f7917c00SJeff Kirsher  */
alloc_ring(struct device * dev,size_t nelem,size_t hwsize,size_t swsize,dma_addr_t * busaddrp,void * swringp,size_t stat_size)751f7917c00SJeff Kirsher static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
752f7917c00SJeff Kirsher 			size_t swsize, dma_addr_t *busaddrp, void *swringp,
753f7917c00SJeff Kirsher 			size_t stat_size)
754f7917c00SJeff Kirsher {
755f7917c00SJeff Kirsher 	/*
756f7917c00SJeff Kirsher 	 * Allocate the hardware ring and PCI DMA bus address space for said.
757f7917c00SJeff Kirsher 	 */
758f7917c00SJeff Kirsher 	size_t hwlen = nelem * hwsize + stat_size;
759750afb08SLuis Chamberlain 	void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
760f7917c00SJeff Kirsher 
761f7917c00SJeff Kirsher 	if (!hwring)
762f7917c00SJeff Kirsher 		return NULL;
763f7917c00SJeff Kirsher 
764f7917c00SJeff Kirsher 	/*
765f7917c00SJeff Kirsher 	 * If the caller wants a software ring, allocate it and return a
766f7917c00SJeff Kirsher 	 * pointer to it in *swringp.
767f7917c00SJeff Kirsher 	 */
768f7917c00SJeff Kirsher 	BUG_ON((swsize != 0) != (swringp != NULL));
769f7917c00SJeff Kirsher 	if (swsize) {
770f7917c00SJeff Kirsher 		void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
771f7917c00SJeff Kirsher 
772f7917c00SJeff Kirsher 		if (!swring) {
773f7917c00SJeff Kirsher 			dma_free_coherent(dev, hwlen, hwring, *busaddrp);
774f7917c00SJeff Kirsher 			return NULL;
775f7917c00SJeff Kirsher 		}
776f7917c00SJeff Kirsher 		*(void **)swringp = swring;
777f7917c00SJeff Kirsher 	}
778f7917c00SJeff Kirsher 
779f7917c00SJeff Kirsher 	return hwring;
780f7917c00SJeff Kirsher }
781f7917c00SJeff Kirsher 
782f7917c00SJeff Kirsher /**
783f7917c00SJeff Kirsher  *	sgl_len - calculates the size of an SGL of the given capacity
784f7917c00SJeff Kirsher  *	@n: the number of SGL entries
785f7917c00SJeff Kirsher  *
786f7917c00SJeff Kirsher  *	Calculates the number of flits (8-byte units) needed for a Direct
787f7917c00SJeff Kirsher  *	Scatter/Gather List that can hold the given number of entries.
788f7917c00SJeff Kirsher  */
sgl_len(unsigned int n)789f7917c00SJeff Kirsher static inline unsigned int sgl_len(unsigned int n)
790f7917c00SJeff Kirsher {
791f7917c00SJeff Kirsher 	/*
792f7917c00SJeff Kirsher 	 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
793f7917c00SJeff Kirsher 	 * addresses.  The DSGL Work Request starts off with a 32-bit DSGL
794f7917c00SJeff Kirsher 	 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
795f7917c00SJeff Kirsher 	 * repeated sequences of { Length[i], Length[i+1], Address[i],
796f7917c00SJeff Kirsher 	 * Address[i+1] } (this ensures that all addresses are on 64-bit
797f7917c00SJeff Kirsher 	 * boundaries).  If N is even, then Length[N+1] should be set to 0 and
798f7917c00SJeff Kirsher 	 * Address[N+1] is omitted.
799f7917c00SJeff Kirsher 	 *
800f7917c00SJeff Kirsher 	 * The following calculation incorporates all of the above.  It's
801f7917c00SJeff Kirsher 	 * somewhat hard to follow but, briefly: the "+2" accounts for the
802f7917c00SJeff Kirsher 	 * first two flits which include the DSGL header, Length0 and
803f7917c00SJeff Kirsher 	 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
804f7917c00SJeff Kirsher 	 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
805f7917c00SJeff Kirsher 	 * finally the "+((n-1)&1)" adds the one remaining flit needed if
806f7917c00SJeff Kirsher 	 * (n-1) is odd ...
807f7917c00SJeff Kirsher 	 */
808f7917c00SJeff Kirsher 	n--;
809f7917c00SJeff Kirsher 	return (3 * n) / 2 + (n & 1) + 2;
810f7917c00SJeff Kirsher }
811f7917c00SJeff Kirsher 
812f7917c00SJeff Kirsher /**
813f7917c00SJeff Kirsher  *	flits_to_desc - returns the num of TX descriptors for the given flits
814f7917c00SJeff Kirsher  *	@flits: the number of flits
815f7917c00SJeff Kirsher  *
816f7917c00SJeff Kirsher  *	Returns the number of TX descriptors needed for the supplied number
817f7917c00SJeff Kirsher  *	of flits.
818f7917c00SJeff Kirsher  */
flits_to_desc(unsigned int flits)819f7917c00SJeff Kirsher static inline unsigned int flits_to_desc(unsigned int flits)
820f7917c00SJeff Kirsher {
821f7917c00SJeff Kirsher 	BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
822f7917c00SJeff Kirsher 	return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
823f7917c00SJeff Kirsher }
824f7917c00SJeff Kirsher 
825f7917c00SJeff Kirsher /**
826f7917c00SJeff Kirsher  *	is_eth_imm - can an Ethernet packet be sent as immediate data?
827f7917c00SJeff Kirsher  *	@skb: the packet
828f7917c00SJeff Kirsher  *
829f7917c00SJeff Kirsher  *	Returns whether an Ethernet packet is small enough to fit completely as
830f7917c00SJeff Kirsher  *	immediate data.
831f7917c00SJeff Kirsher  */
is_eth_imm(const struct sk_buff * skb)832f7917c00SJeff Kirsher static inline int is_eth_imm(const struct sk_buff *skb)
833f7917c00SJeff Kirsher {
834f7917c00SJeff Kirsher 	/*
835f7917c00SJeff Kirsher 	 * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
836f7917c00SJeff Kirsher 	 * which does not accommodate immediate data.  We could dike out all
837f7917c00SJeff Kirsher 	 * of the support code for immediate data but that would tie our hands
838f7917c00SJeff Kirsher 	 * too much if we ever want to enhace the firmware.  It would also
839f7917c00SJeff Kirsher 	 * create more differences between the PF and VF Drivers.
840f7917c00SJeff Kirsher 	 */
841f7917c00SJeff Kirsher 	return false;
842f7917c00SJeff Kirsher }
843f7917c00SJeff Kirsher 
844f7917c00SJeff Kirsher /**
845f7917c00SJeff Kirsher  *	calc_tx_flits - calculate the number of flits for a packet TX WR
846f7917c00SJeff Kirsher  *	@skb: the packet
847f7917c00SJeff Kirsher  *
848f7917c00SJeff Kirsher  *	Returns the number of flits needed for a TX Work Request for the
849f7917c00SJeff Kirsher  *	given Ethernet packet, including the needed WR and CPL headers.
850f7917c00SJeff Kirsher  */
calc_tx_flits(const struct sk_buff * skb)851f7917c00SJeff Kirsher static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
852f7917c00SJeff Kirsher {
853f7917c00SJeff Kirsher 	unsigned int flits;
854f7917c00SJeff Kirsher 
855f7917c00SJeff Kirsher 	/*
856f7917c00SJeff Kirsher 	 * If the skb is small enough, we can pump it out as a work request
857f7917c00SJeff Kirsher 	 * with only immediate data.  In that case we just have to have the
858f7917c00SJeff Kirsher 	 * TX Packet header plus the skb data in the Work Request.
859f7917c00SJeff Kirsher 	 */
860f7917c00SJeff Kirsher 	if (is_eth_imm(skb))
861f7917c00SJeff Kirsher 		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
862f7917c00SJeff Kirsher 				    sizeof(__be64));
863f7917c00SJeff Kirsher 
864f7917c00SJeff Kirsher 	/*
865f7917c00SJeff Kirsher 	 * Otherwise, we're going to have to construct a Scatter gather list
866f7917c00SJeff Kirsher 	 * of the skb body and fragments.  We also include the flits necessary
867f7917c00SJeff Kirsher 	 * for the TX Packet Work Request and CPL.  We always have a firmware
868f7917c00SJeff Kirsher 	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
869f7917c00SJeff Kirsher 	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
870f7917c00SJeff Kirsher 	 * message or, if we're doing a Large Send Offload, an LSO CPL message
871dbedd44eSJoe Perches 	 * with an embedded TX Packet Write CPL message.
872f7917c00SJeff Kirsher 	 */
873f7917c00SJeff Kirsher 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
874f7917c00SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
875f7917c00SJeff Kirsher 		flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
876f7917c00SJeff Kirsher 			  sizeof(struct cpl_tx_pkt_lso_core) +
877f7917c00SJeff Kirsher 			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
878f7917c00SJeff Kirsher 	else
879f7917c00SJeff Kirsher 		flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
880f7917c00SJeff Kirsher 			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
881f7917c00SJeff Kirsher 	return flits;
882f7917c00SJeff Kirsher }
883f7917c00SJeff Kirsher 
884f7917c00SJeff Kirsher /**
885f7917c00SJeff Kirsher  *	write_sgl - populate a Scatter/Gather List for a packet
886f7917c00SJeff Kirsher  *	@skb: the packet
887f7917c00SJeff Kirsher  *	@tq: the TX queue we are writing into
888f7917c00SJeff Kirsher  *	@sgl: starting location for writing the SGL
889f7917c00SJeff Kirsher  *	@end: points right after the end of the SGL
890f7917c00SJeff Kirsher  *	@start: start offset into skb main-body data to include in the SGL
891f7917c00SJeff Kirsher  *	@addr: the list of DMA bus addresses for the SGL elements
892f7917c00SJeff Kirsher  *
893f7917c00SJeff Kirsher  *	Generates a Scatter/Gather List for the buffers that make up a packet.
894f7917c00SJeff Kirsher  *	The caller must provide adequate space for the SGL that will be written.
895f7917c00SJeff Kirsher  *	The SGL includes all of the packet's page fragments and the data in its
896f7917c00SJeff Kirsher  *	main body except for the first @start bytes.  @pos must be 16-byte
897f7917c00SJeff Kirsher  *	aligned and within a TX descriptor with available space.  @end points
898f7917c00SJeff Kirsher  *	write after the end of the SGL but does not account for any potential
899f7917c00SJeff Kirsher  *	wrap around, i.e., @end > @tq->stat.
900f7917c00SJeff Kirsher  */
write_sgl(const struct sk_buff * skb,struct sge_txq * tq,struct ulptx_sgl * sgl,u64 * end,unsigned int start,const dma_addr_t * addr)901f7917c00SJeff Kirsher static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
902f7917c00SJeff Kirsher 		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
903f7917c00SJeff Kirsher 		      const dma_addr_t *addr)
904f7917c00SJeff Kirsher {
905f7917c00SJeff Kirsher 	unsigned int i, len;
906f7917c00SJeff Kirsher 	struct ulptx_sge_pair *to;
907f7917c00SJeff Kirsher 	const struct skb_shared_info *si = skb_shinfo(skb);
908f7917c00SJeff Kirsher 	unsigned int nfrags = si->nr_frags;
909f7917c00SJeff Kirsher 	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
910f7917c00SJeff Kirsher 
911f7917c00SJeff Kirsher 	len = skb_headlen(skb) - start;
912f7917c00SJeff Kirsher 	if (likely(len)) {
913f7917c00SJeff Kirsher 		sgl->len0 = htonl(len);
914f7917c00SJeff Kirsher 		sgl->addr0 = cpu_to_be64(addr[0] + start);
915f7917c00SJeff Kirsher 		nfrags++;
916f7917c00SJeff Kirsher 	} else {
9179e903e08SEric Dumazet 		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
918f7917c00SJeff Kirsher 		sgl->addr0 = cpu_to_be64(addr[1]);
919f7917c00SJeff Kirsher 	}
920f7917c00SJeff Kirsher 
921d7990b0cSAnish Bhatt 	sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
922bdc590b9SHariprasad Shenai 			      ULPTX_NSGE_V(nfrags));
923f7917c00SJeff Kirsher 	if (likely(--nfrags == 0))
924f7917c00SJeff Kirsher 		return;
925f7917c00SJeff Kirsher 	/*
926f7917c00SJeff Kirsher 	 * Most of the complexity below deals with the possibility we hit the
927f7917c00SJeff Kirsher 	 * end of the queue in the middle of writing the SGL.  For this case
928f7917c00SJeff Kirsher 	 * only we create the SGL in a temporary buffer and then copy it.
929f7917c00SJeff Kirsher 	 */
930f7917c00SJeff Kirsher 	to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
931f7917c00SJeff Kirsher 
932f7917c00SJeff Kirsher 	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
9339e903e08SEric Dumazet 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
9349e903e08SEric Dumazet 		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
935f7917c00SJeff Kirsher 		to->addr[0] = cpu_to_be64(addr[i]);
936f7917c00SJeff Kirsher 		to->addr[1] = cpu_to_be64(addr[++i]);
937f7917c00SJeff Kirsher 	}
938f7917c00SJeff Kirsher 	if (nfrags) {
9399e903e08SEric Dumazet 		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
940f7917c00SJeff Kirsher 		to->len[1] = cpu_to_be32(0);
941f7917c00SJeff Kirsher 		to->addr[0] = cpu_to_be64(addr[i + 1]);
942f7917c00SJeff Kirsher 	}
943f7917c00SJeff Kirsher 	if (unlikely((u8 *)end > (u8 *)tq->stat)) {
944f7917c00SJeff Kirsher 		unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
945f7917c00SJeff Kirsher 
946f7917c00SJeff Kirsher 		if (likely(part0))
947f7917c00SJeff Kirsher 			memcpy(sgl->sge, buf, part0);
948f7917c00SJeff Kirsher 		part1 = (u8 *)end - (u8 *)tq->stat;
949f7917c00SJeff Kirsher 		memcpy(tq->desc, (u8 *)buf + part0, part1);
950f7917c00SJeff Kirsher 		end = (void *)tq->desc + part1;
951f7917c00SJeff Kirsher 	}
952f7917c00SJeff Kirsher 	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
95364699336SJoe Perches 		*end = 0;
954f7917c00SJeff Kirsher }
955f7917c00SJeff Kirsher 
956f7917c00SJeff Kirsher /**
9571eb00ff5SYang Shen  *	ring_tx_db - check and potentially ring a TX queue's doorbell
958f7917c00SJeff Kirsher  *	@adapter: the adapter
959f7917c00SJeff Kirsher  *	@tq: the TX queue
960f7917c00SJeff Kirsher  *	@n: number of new descriptors to give to HW
961f7917c00SJeff Kirsher  *
962f7917c00SJeff Kirsher  *	Ring the doorbel for a TX queue.
963f7917c00SJeff Kirsher  */
ring_tx_db(struct adapter * adapter,struct sge_txq * tq,int n)964f7917c00SJeff Kirsher static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
965f7917c00SJeff Kirsher 			      int n)
966f7917c00SJeff Kirsher {
967df64e4d3SHariprasad Shenai 	/* Make sure that all writes to the TX Descriptors are committed
968df64e4d3SHariprasad Shenai 	 * before we tell the hardware about them.
969f7917c00SJeff Kirsher 	 */
970f7917c00SJeff Kirsher 	wmb();
971df64e4d3SHariprasad Shenai 
972df64e4d3SHariprasad Shenai 	/* If we don't have access to the new User Doorbell (T5+), use the old
973df64e4d3SHariprasad Shenai 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
974df64e4d3SHariprasad Shenai 	 */
975df64e4d3SHariprasad Shenai 	if (unlikely(tq->bar2_addr == NULL)) {
976f612b815SHariprasad Shenai 		u32 val = PIDX_V(n);
977df64e4d3SHariprasad Shenai 
978f7917c00SJeff Kirsher 		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
979f612b815SHariprasad Shenai 			     QID_V(tq->cntxt_id) | val);
980df64e4d3SHariprasad Shenai 	} else {
981f612b815SHariprasad Shenai 		u32 val = PIDX_T5_V(n);
982df64e4d3SHariprasad Shenai 
983df64e4d3SHariprasad Shenai 		/* T4 and later chips share the same PIDX field offset within
984df64e4d3SHariprasad Shenai 		 * the doorbell, but T5 and later shrank the field in order to
985df64e4d3SHariprasad Shenai 		 * gain a bit for Doorbell Priority.  The field was absurdly
986df64e4d3SHariprasad Shenai 		 * large in the first place (14 bits) so we just use the T5
987df64e4d3SHariprasad Shenai 		 * and later limits and warn if a Queue ID is too large.
988df64e4d3SHariprasad Shenai 		 */
989f612b815SHariprasad Shenai 		WARN_ON(val & DBPRIO_F);
990df64e4d3SHariprasad Shenai 
991df64e4d3SHariprasad Shenai 		/* If we're only writing a single Egress Unit and the BAR2
992df64e4d3SHariprasad Shenai 		 * Queue ID is 0, we can use the Write Combining Doorbell
993df64e4d3SHariprasad Shenai 		 * Gather Buffer; otherwise we use the simple doorbell.
994df64e4d3SHariprasad Shenai 		 */
995df64e4d3SHariprasad Shenai 		if (n == 1 && tq->bar2_qid == 0) {
996df64e4d3SHariprasad Shenai 			unsigned int index = (tq->pidx
997df64e4d3SHariprasad Shenai 					      ? (tq->pidx - 1)
998df64e4d3SHariprasad Shenai 					      : (tq->size - 1));
999df64e4d3SHariprasad Shenai 			__be64 *src = (__be64 *)&tq->desc[index];
10002ff2acf1SHariprasad Shenai 			__be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
1001df64e4d3SHariprasad Shenai 							 SGE_UDB_WCDOORBELL);
1002df64e4d3SHariprasad Shenai 			unsigned int count = EQ_UNIT / sizeof(__be64);
1003df64e4d3SHariprasad Shenai 
1004df64e4d3SHariprasad Shenai 			/* Copy the TX Descriptor in a tight loop in order to
1005df64e4d3SHariprasad Shenai 			 * try to get it to the adapter in a single Write
1006df64e4d3SHariprasad Shenai 			 * Combined transfer on the PCI-E Bus.  If the Write
1007df64e4d3SHariprasad Shenai 			 * Combine fails (say because of an interrupt, etc.)
1008df64e4d3SHariprasad Shenai 			 * the hardware will simply take the last write as a
1009df64e4d3SHariprasad Shenai 			 * simple doorbell write with a PIDX Increment of 1
1010df64e4d3SHariprasad Shenai 			 * and will fetch the TX Descriptor from memory via
1011df64e4d3SHariprasad Shenai 			 * DMA.
1012df64e4d3SHariprasad Shenai 			 */
1013df64e4d3SHariprasad Shenai 			while (count) {
10142ff2acf1SHariprasad Shenai 				/* the (__force u64) is because the compiler
10152ff2acf1SHariprasad Shenai 				 * doesn't understand the endian swizzling
10162ff2acf1SHariprasad Shenai 				 * going on
10172ff2acf1SHariprasad Shenai 				 */
10182ff2acf1SHariprasad Shenai 				writeq((__force u64)*src, dst);
1019df64e4d3SHariprasad Shenai 				src++;
1020df64e4d3SHariprasad Shenai 				dst++;
1021df64e4d3SHariprasad Shenai 				count--;
1022df64e4d3SHariprasad Shenai 			}
1023df64e4d3SHariprasad Shenai 		} else
1024f612b815SHariprasad Shenai 			writel(val | QID_V(tq->bar2_qid),
1025df64e4d3SHariprasad Shenai 			       tq->bar2_addr + SGE_UDB_KDOORBELL);
1026df64e4d3SHariprasad Shenai 
1027df64e4d3SHariprasad Shenai 		/* This Write Memory Barrier will force the write to the User
1028df64e4d3SHariprasad Shenai 		 * Doorbell area to be flushed.  This is needed to prevent
1029df64e4d3SHariprasad Shenai 		 * writes on different CPUs for the same queue from hitting
1030df64e4d3SHariprasad Shenai 		 * the adapter out of order.  This is required when some Work
1031df64e4d3SHariprasad Shenai 		 * Requests take the Write Combine Gather Buffer path (user
1032df64e4d3SHariprasad Shenai 		 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
1033df64e4d3SHariprasad Shenai 		 * take the traditional path where we simply increment the
1034df64e4d3SHariprasad Shenai 		 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1035df64e4d3SHariprasad Shenai 		 * hardware DMA read the actual Work Request.
1036df64e4d3SHariprasad Shenai 		 */
1037df64e4d3SHariprasad Shenai 		wmb();
1038df64e4d3SHariprasad Shenai 	}
1039f7917c00SJeff Kirsher }
1040f7917c00SJeff Kirsher 
1041f7917c00SJeff Kirsher /**
1042f7917c00SJeff Kirsher  *	inline_tx_skb - inline a packet's data into TX descriptors
1043f7917c00SJeff Kirsher  *	@skb: the packet
1044f7917c00SJeff Kirsher  *	@tq: the TX queue where the packet will be inlined
1045f7917c00SJeff Kirsher  *	@pos: starting position in the TX queue to inline the packet
1046f7917c00SJeff Kirsher  *
1047f7917c00SJeff Kirsher  *	Inline a packet's contents directly into TX descriptors, starting at
1048f7917c00SJeff Kirsher  *	the given position within the TX DMA ring.
1049f7917c00SJeff Kirsher  *	Most of the complexity of this operation is dealing with wrap arounds
1050f7917c00SJeff Kirsher  *	in the middle of the packet we want to inline.
1051f7917c00SJeff Kirsher  */
inline_tx_skb(const struct sk_buff * skb,const struct sge_txq * tq,void * pos)1052f7917c00SJeff Kirsher static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
1053f7917c00SJeff Kirsher 			  void *pos)
1054f7917c00SJeff Kirsher {
1055f7917c00SJeff Kirsher 	u64 *p;
1056f7917c00SJeff Kirsher 	int left = (void *)tq->stat - pos;
1057f7917c00SJeff Kirsher 
1058f7917c00SJeff Kirsher 	if (likely(skb->len <= left)) {
1059f7917c00SJeff Kirsher 		if (likely(!skb->data_len))
1060f7917c00SJeff Kirsher 			skb_copy_from_linear_data(skb, pos, skb->len);
1061f7917c00SJeff Kirsher 		else
1062f7917c00SJeff Kirsher 			skb_copy_bits(skb, 0, pos, skb->len);
1063f7917c00SJeff Kirsher 		pos += skb->len;
1064f7917c00SJeff Kirsher 	} else {
1065f7917c00SJeff Kirsher 		skb_copy_bits(skb, 0, pos, left);
1066f7917c00SJeff Kirsher 		skb_copy_bits(skb, left, tq->desc, skb->len - left);
1067f7917c00SJeff Kirsher 		pos = (void *)tq->desc + (skb->len - left);
1068f7917c00SJeff Kirsher 	}
1069f7917c00SJeff Kirsher 
1070f7917c00SJeff Kirsher 	/* 0-pad to multiple of 16 */
1071f7917c00SJeff Kirsher 	p = PTR_ALIGN(pos, 8);
1072f7917c00SJeff Kirsher 	if ((uintptr_t)p & 8)
1073f7917c00SJeff Kirsher 		*p = 0;
1074f7917c00SJeff Kirsher }
1075f7917c00SJeff Kirsher 
1076f7917c00SJeff Kirsher /*
1077f7917c00SJeff Kirsher  * Figure out what HW csum a packet wants and return the appropriate control
1078f7917c00SJeff Kirsher  * bits.
1079f7917c00SJeff Kirsher  */
hwcsum(enum chip_type chip,const struct sk_buff * skb)108041fc2e41SHariprasad Shenai static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1081f7917c00SJeff Kirsher {
1082f7917c00SJeff Kirsher 	int csum_type;
1083f7917c00SJeff Kirsher 	const struct iphdr *iph = ip_hdr(skb);
1084f7917c00SJeff Kirsher 
1085f7917c00SJeff Kirsher 	if (iph->version == 4) {
1086f7917c00SJeff Kirsher 		if (iph->protocol == IPPROTO_TCP)
1087f7917c00SJeff Kirsher 			csum_type = TX_CSUM_TCPIP;
1088f7917c00SJeff Kirsher 		else if (iph->protocol == IPPROTO_UDP)
1089f7917c00SJeff Kirsher 			csum_type = TX_CSUM_UDPIP;
1090f7917c00SJeff Kirsher 		else {
1091f7917c00SJeff Kirsher nocsum:
1092f7917c00SJeff Kirsher 			/*
1093f7917c00SJeff Kirsher 			 * unknown protocol, disable HW csum
1094f7917c00SJeff Kirsher 			 * and hope a bad packet is detected
1095f7917c00SJeff Kirsher 			 */
10961ecc7b7aSHariprasad Shenai 			return TXPKT_L4CSUM_DIS_F;
1097f7917c00SJeff Kirsher 		}
1098f7917c00SJeff Kirsher 	} else {
1099f7917c00SJeff Kirsher 		/*
1100f7917c00SJeff Kirsher 		 * this doesn't work with extension headers
1101f7917c00SJeff Kirsher 		 */
1102f7917c00SJeff Kirsher 		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1103f7917c00SJeff Kirsher 
1104f7917c00SJeff Kirsher 		if (ip6h->nexthdr == IPPROTO_TCP)
1105f7917c00SJeff Kirsher 			csum_type = TX_CSUM_TCPIP6;
1106f7917c00SJeff Kirsher 		else if (ip6h->nexthdr == IPPROTO_UDP)
1107f7917c00SJeff Kirsher 			csum_type = TX_CSUM_UDPIP6;
1108f7917c00SJeff Kirsher 		else
1109f7917c00SJeff Kirsher 			goto nocsum;
1110f7917c00SJeff Kirsher 	}
1111f7917c00SJeff Kirsher 
111241fc2e41SHariprasad Shenai 	if (likely(csum_type >= TX_CSUM_TCPIP)) {
111341fc2e41SHariprasad Shenai 		u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
111441fc2e41SHariprasad Shenai 		int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
111541fc2e41SHariprasad Shenai 
111641fc2e41SHariprasad Shenai 		if (chip <= CHELSIO_T5)
111741fc2e41SHariprasad Shenai 			hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
111841fc2e41SHariprasad Shenai 		else
111941fc2e41SHariprasad Shenai 			hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
112041fc2e41SHariprasad Shenai 		return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
112141fc2e41SHariprasad Shenai 	} else {
1122f7917c00SJeff Kirsher 		int start = skb_transport_offset(skb);
1123f7917c00SJeff Kirsher 
11241ecc7b7aSHariprasad Shenai 		return TXPKT_CSUM_TYPE_V(csum_type) |
11251ecc7b7aSHariprasad Shenai 			TXPKT_CSUM_START_V(start) |
11261ecc7b7aSHariprasad Shenai 			TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1127f7917c00SJeff Kirsher 	}
1128f7917c00SJeff Kirsher }
1129f7917c00SJeff Kirsher 
1130f7917c00SJeff Kirsher /*
1131f7917c00SJeff Kirsher  * Stop an Ethernet TX queue and record that state change.
1132f7917c00SJeff Kirsher  */
txq_stop(struct sge_eth_txq * txq)1133f7917c00SJeff Kirsher static void txq_stop(struct sge_eth_txq *txq)
1134f7917c00SJeff Kirsher {
1135f7917c00SJeff Kirsher 	netif_tx_stop_queue(txq->txq);
1136f7917c00SJeff Kirsher 	txq->q.stops++;
1137f7917c00SJeff Kirsher }
1138f7917c00SJeff Kirsher 
1139f7917c00SJeff Kirsher /*
1140f7917c00SJeff Kirsher  * Advance our software state for a TX queue by adding n in use descriptors.
1141f7917c00SJeff Kirsher  */
txq_advance(struct sge_txq * tq,unsigned int n)1142f7917c00SJeff Kirsher static inline void txq_advance(struct sge_txq *tq, unsigned int n)
1143f7917c00SJeff Kirsher {
1144f7917c00SJeff Kirsher 	tq->in_use += n;
1145f7917c00SJeff Kirsher 	tq->pidx += n;
1146f7917c00SJeff Kirsher 	if (tq->pidx >= tq->size)
1147f7917c00SJeff Kirsher 		tq->pidx -= tq->size;
1148f7917c00SJeff Kirsher }
1149f7917c00SJeff Kirsher 
1150f7917c00SJeff Kirsher /**
1151f7917c00SJeff Kirsher  *	t4vf_eth_xmit - add a packet to an Ethernet TX queue
1152f7917c00SJeff Kirsher  *	@skb: the packet
1153f7917c00SJeff Kirsher  *	@dev: the egress net device
1154f7917c00SJeff Kirsher  *
1155f7917c00SJeff Kirsher  *	Add a packet to an SGE Ethernet TX queue.  Runs with softirqs disabled.
1156f7917c00SJeff Kirsher  */
t4vf_eth_xmit(struct sk_buff * skb,struct net_device * dev)11572a784784SLuc Van Oostenryck netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1158f7917c00SJeff Kirsher {
1159f7917c00SJeff Kirsher 	u32 wr_mid;
1160f7917c00SJeff Kirsher 	u64 cntrl, *end;
1161637d3e99SHariprasad Shenai 	int qidx, credits, max_pkt_len;
1162f7917c00SJeff Kirsher 	unsigned int flits, ndesc;
1163f7917c00SJeff Kirsher 	struct adapter *adapter;
1164f7917c00SJeff Kirsher 	struct sge_eth_txq *txq;
1165f7917c00SJeff Kirsher 	const struct port_info *pi;
1166f7917c00SJeff Kirsher 	struct fw_eth_tx_pkt_vm_wr *wr;
1167f7917c00SJeff Kirsher 	struct cpl_tx_pkt_core *cpl;
1168f7917c00SJeff Kirsher 	const struct skb_shared_info *ssi;
1169f7917c00SJeff Kirsher 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1170641d3ef0SKees Cook 	const size_t fw_hdr_copy_len = sizeof(wr->firmware);
1171f7917c00SJeff Kirsher 
1172f7917c00SJeff Kirsher 	/*
1173f7917c00SJeff Kirsher 	 * The chip minimum packet length is 10 octets but the firmware
1174f7917c00SJeff Kirsher 	 * command that we are using requires that we copy the Ethernet header
1175f7917c00SJeff Kirsher 	 * (including the VLAN tag) into the header so we reject anything
1176f7917c00SJeff Kirsher 	 * smaller than that ...
1177f7917c00SJeff Kirsher 	 */
1178f7917c00SJeff Kirsher 	if (unlikely(skb->len < fw_hdr_copy_len))
1179f7917c00SJeff Kirsher 		goto out_free;
1180f7917c00SJeff Kirsher 
1181637d3e99SHariprasad Shenai 	/* Discard the packet if the length is greater than mtu */
1182637d3e99SHariprasad Shenai 	max_pkt_len = ETH_HLEN + dev->mtu;
11838d09e6b8SHariprasad Shenai 	if (skb_vlan_tagged(skb))
1184637d3e99SHariprasad Shenai 		max_pkt_len += VLAN_HLEN;
1185637d3e99SHariprasad Shenai 	if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1186637d3e99SHariprasad Shenai 		goto out_free;
1187637d3e99SHariprasad Shenai 
1188f7917c00SJeff Kirsher 	/*
1189f7917c00SJeff Kirsher 	 * Figure out which TX Queue we're going to use.
1190f7917c00SJeff Kirsher 	 */
1191f7917c00SJeff Kirsher 	pi = netdev_priv(dev);
1192f7917c00SJeff Kirsher 	adapter = pi->adapter;
1193f7917c00SJeff Kirsher 	qidx = skb_get_queue_mapping(skb);
1194f7917c00SJeff Kirsher 	BUG_ON(qidx >= pi->nqsets);
1195f7917c00SJeff Kirsher 	txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1196f7917c00SJeff Kirsher 
11979d5fd927SGanesh Goudar 	if (pi->vlan_id && !skb_vlan_tag_present(skb))
11989d5fd927SGanesh Goudar 		__vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
11999d5fd927SGanesh Goudar 				       pi->vlan_id);
12009d5fd927SGanesh Goudar 
1201f7917c00SJeff Kirsher 	/*
1202f7917c00SJeff Kirsher 	 * Take this opportunity to reclaim any TX Descriptors whose DMA
1203f7917c00SJeff Kirsher 	 * transfers have completed.
1204f7917c00SJeff Kirsher 	 */
1205f7917c00SJeff Kirsher 	reclaim_completed_tx(adapter, &txq->q, true);
1206f7917c00SJeff Kirsher 
1207f7917c00SJeff Kirsher 	/*
1208f7917c00SJeff Kirsher 	 * Calculate the number of flits and TX Descriptors we're going to
1209f7917c00SJeff Kirsher 	 * need along with how many TX Descriptors will be left over after
1210f7917c00SJeff Kirsher 	 * we inject our Work Request.
1211f7917c00SJeff Kirsher 	 */
1212f7917c00SJeff Kirsher 	flits = calc_tx_flits(skb);
1213f7917c00SJeff Kirsher 	ndesc = flits_to_desc(flits);
1214f7917c00SJeff Kirsher 	credits = txq_avail(&txq->q) - ndesc;
1215f7917c00SJeff Kirsher 
1216f7917c00SJeff Kirsher 	if (unlikely(credits < 0)) {
1217f7917c00SJeff Kirsher 		/*
1218f7917c00SJeff Kirsher 		 * Not enough room for this packet's Work Request.  Stop the
1219f7917c00SJeff Kirsher 		 * TX Queue and return a "busy" condition.  The queue will get
1220f7917c00SJeff Kirsher 		 * started later on when the firmware informs us that space
1221f7917c00SJeff Kirsher 		 * has opened up.
1222f7917c00SJeff Kirsher 		 */
1223f7917c00SJeff Kirsher 		txq_stop(txq);
1224f7917c00SJeff Kirsher 		dev_err(adapter->pdev_dev,
1225f7917c00SJeff Kirsher 			"%s: TX ring %u full while queue awake!\n",
1226f7917c00SJeff Kirsher 			dev->name, qidx);
1227f7917c00SJeff Kirsher 		return NETDEV_TX_BUSY;
1228f7917c00SJeff Kirsher 	}
1229f7917c00SJeff Kirsher 
1230f7917c00SJeff Kirsher 	if (!is_eth_imm(skb) &&
1231f7917c00SJeff Kirsher 	    unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1232f7917c00SJeff Kirsher 		/*
1233f7917c00SJeff Kirsher 		 * We need to map the skb into PCI DMA space (because it can't
1234f7917c00SJeff Kirsher 		 * be in-lined directly into the Work Request) and the mapping
1235f7917c00SJeff Kirsher 		 * operation failed.  Record the error and drop the packet.
1236f7917c00SJeff Kirsher 		 */
1237f7917c00SJeff Kirsher 		txq->mapping_err++;
1238f7917c00SJeff Kirsher 		goto out_free;
1239f7917c00SJeff Kirsher 	}
1240f7917c00SJeff Kirsher 
1241e2ac9628SHariprasad Shenai 	wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1242f7917c00SJeff Kirsher 	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1243f7917c00SJeff Kirsher 		/*
1244f7917c00SJeff Kirsher 		 * After we're done injecting the Work Request for this
1245f7917c00SJeff Kirsher 		 * packet, we'll be below our "stop threshold" so stop the TX
1246f7917c00SJeff Kirsher 		 * Queue now and schedule a request for an SGE Egress Queue
1247f7917c00SJeff Kirsher 		 * Update message.  The queue will get started later on when
1248f7917c00SJeff Kirsher 		 * the firmware processes this Work Request and sends us an
1249f7917c00SJeff Kirsher 		 * Egress Queue Status Update message indicating that space
1250f7917c00SJeff Kirsher 		 * has opened up.
1251f7917c00SJeff Kirsher 		 */
1252f7917c00SJeff Kirsher 		txq_stop(txq);
1253e2ac9628SHariprasad Shenai 		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1254f7917c00SJeff Kirsher 	}
1255f7917c00SJeff Kirsher 
1256f7917c00SJeff Kirsher 	/*
1257f7917c00SJeff Kirsher 	 * Start filling in our Work Request.  Note that we do _not_ handle
1258f7917c00SJeff Kirsher 	 * the WR Header wrapping around the TX Descriptor Ring.  If our
1259f7917c00SJeff Kirsher 	 * maximum header size ever exceeds one TX Descriptor, we'll need to
1260f7917c00SJeff Kirsher 	 * do something else here.
1261f7917c00SJeff Kirsher 	 */
1262f7917c00SJeff Kirsher 	BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1263f7917c00SJeff Kirsher 	wr = (void *)&txq->q.desc[txq->q.pidx];
1264f7917c00SJeff Kirsher 	wr->equiq_to_len16 = cpu_to_be32(wr_mid);
12652ff2acf1SHariprasad Shenai 	wr->r3[0] = cpu_to_be32(0);
12662ff2acf1SHariprasad Shenai 	wr->r3[1] = cpu_to_be32(0);
1267641d3ef0SKees Cook 	skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
1268f7917c00SJeff Kirsher 	end = (u64 *)wr + flits;
1269f7917c00SJeff Kirsher 
1270f7917c00SJeff Kirsher 	/*
1271f7917c00SJeff Kirsher 	 * If this is a Large Send Offload packet we'll put in an LSO CPL
1272f7917c00SJeff Kirsher 	 * message with an encapsulated TX Packet CPL message.  Otherwise we
1273f7917c00SJeff Kirsher 	 * just use a TX Packet CPL message.
1274f7917c00SJeff Kirsher 	 */
1275f7917c00SJeff Kirsher 	ssi = skb_shinfo(skb);
1276f7917c00SJeff Kirsher 	if (ssi->gso_size) {
1277f7917c00SJeff Kirsher 		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1278f7917c00SJeff Kirsher 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1279f7917c00SJeff Kirsher 		int l3hdr_len = skb_network_header_len(skb);
1280f7917c00SJeff Kirsher 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1281f7917c00SJeff Kirsher 
1282f7917c00SJeff Kirsher 		wr->op_immdlen =
1283e2ac9628SHariprasad Shenai 			cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1284e2ac9628SHariprasad Shenai 				    FW_WR_IMMDLEN_V(sizeof(*lso) +
1285f7917c00SJeff Kirsher 						    sizeof(*cpl)));
1286f7917c00SJeff Kirsher 		/*
1287f7917c00SJeff Kirsher 		 * Fill in the LSO CPL message.
1288f7917c00SJeff Kirsher 		 */
1289f7917c00SJeff Kirsher 		lso->lso_ctrl =
12901ecc7b7aSHariprasad Shenai 			cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
12911ecc7b7aSHariprasad Shenai 				    LSO_FIRST_SLICE_F |
12921ecc7b7aSHariprasad Shenai 				    LSO_LAST_SLICE_F |
12931ecc7b7aSHariprasad Shenai 				    LSO_IPV6_V(v6) |
12941ecc7b7aSHariprasad Shenai 				    LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
12951ecc7b7aSHariprasad Shenai 				    LSO_IPHDR_LEN_V(l3hdr_len / 4) |
12961ecc7b7aSHariprasad Shenai 				    LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1297f7917c00SJeff Kirsher 		lso->ipid_ofst = cpu_to_be16(0);
1298f7917c00SJeff Kirsher 		lso->mss = cpu_to_be16(ssi->gso_size);
1299f7917c00SJeff Kirsher 		lso->seqno_offset = cpu_to_be32(0);
13007207c0d1SHariprasad Shenai 		if (is_t4(adapter->params.chip))
1301f7917c00SJeff Kirsher 			lso->len = cpu_to_be32(skb->len);
13027207c0d1SHariprasad Shenai 		else
13031ecc7b7aSHariprasad Shenai 			lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1304f7917c00SJeff Kirsher 
1305f7917c00SJeff Kirsher 		/*
1306f7917c00SJeff Kirsher 		 * Set up TX Packet CPL pointer, control word and perform
1307f7917c00SJeff Kirsher 		 * accounting.
1308f7917c00SJeff Kirsher 		 */
1309f7917c00SJeff Kirsher 		cpl = (void *)(lso + 1);
131041fc2e41SHariprasad Shenai 
131141fc2e41SHariprasad Shenai 		if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
131241fc2e41SHariprasad Shenai 			cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
131341fc2e41SHariprasad Shenai 		else
131441fc2e41SHariprasad Shenai 			cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
131541fc2e41SHariprasad Shenai 
131641fc2e41SHariprasad Shenai 		cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
13171ecc7b7aSHariprasad Shenai 					   TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
131841fc2e41SHariprasad Shenai 			 TXPKT_IPHDR_LEN_V(l3hdr_len);
1319f7917c00SJeff Kirsher 		txq->tso++;
1320f7917c00SJeff Kirsher 		txq->tx_cso += ssi->gso_segs;
1321f7917c00SJeff Kirsher 	} else {
1322f7917c00SJeff Kirsher 		int len;
1323f7917c00SJeff Kirsher 
1324f7917c00SJeff Kirsher 		len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1325f7917c00SJeff Kirsher 		wr->op_immdlen =
1326e2ac9628SHariprasad Shenai 			cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1327e2ac9628SHariprasad Shenai 				    FW_WR_IMMDLEN_V(len));
1328f7917c00SJeff Kirsher 
1329f7917c00SJeff Kirsher 		/*
1330f7917c00SJeff Kirsher 		 * Set up TX Packet CPL pointer, control word and perform
1331f7917c00SJeff Kirsher 		 * accounting.
1332f7917c00SJeff Kirsher 		 */
1333f7917c00SJeff Kirsher 		cpl = (void *)(wr + 1);
1334f7917c00SJeff Kirsher 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
133541fc2e41SHariprasad Shenai 			cntrl = hwcsum(adapter->params.chip, skb) |
133641fc2e41SHariprasad Shenai 				TXPKT_IPCSUM_DIS_F;
1337f7917c00SJeff Kirsher 			txq->tx_cso++;
1338f7917c00SJeff Kirsher 		} else
13391ecc7b7aSHariprasad Shenai 			cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1340f7917c00SJeff Kirsher 	}
1341f7917c00SJeff Kirsher 
1342f7917c00SJeff Kirsher 	/*
1343f7917c00SJeff Kirsher 	 * If there's a VLAN tag present, add that to the list of things to
1344f7917c00SJeff Kirsher 	 * do in this Work Request.
1345f7917c00SJeff Kirsher 	 */
1346df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb)) {
1347f7917c00SJeff Kirsher 		txq->vlan_ins++;
13481ecc7b7aSHariprasad Shenai 		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1349f7917c00SJeff Kirsher 	}
1350f7917c00SJeff Kirsher 
1351f7917c00SJeff Kirsher 	/*
1352f7917c00SJeff Kirsher 	 * Fill in the TX Packet CPL message header.
1353f7917c00SJeff Kirsher 	 */
13541ecc7b7aSHariprasad Shenai 	cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
13551ecc7b7aSHariprasad Shenai 				 TXPKT_INTF_V(pi->port_id) |
13561ecc7b7aSHariprasad Shenai 				 TXPKT_PF_V(0));
1357f7917c00SJeff Kirsher 	cpl->pack = cpu_to_be16(0);
1358f7917c00SJeff Kirsher 	cpl->len = cpu_to_be16(skb->len);
1359f7917c00SJeff Kirsher 	cpl->ctrl1 = cpu_to_be64(cntrl);
1360f7917c00SJeff Kirsher 
1361f7917c00SJeff Kirsher #ifdef T4_TRACE
1362f7917c00SJeff Kirsher 	T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
1363f7917c00SJeff Kirsher 		  "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1364f7917c00SJeff Kirsher 		  ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
1365f7917c00SJeff Kirsher #endif
1366f7917c00SJeff Kirsher 
1367f7917c00SJeff Kirsher 	/*
1368f7917c00SJeff Kirsher 	 * Fill in the body of the TX Packet CPL message with either in-lined
1369f7917c00SJeff Kirsher 	 * data or a Scatter/Gather List.
1370f7917c00SJeff Kirsher 	 */
1371f7917c00SJeff Kirsher 	if (is_eth_imm(skb)) {
1372f7917c00SJeff Kirsher 		/*
1373f7917c00SJeff Kirsher 		 * In-line the packet's data and free the skb since we don't
1374f7917c00SJeff Kirsher 		 * need it any longer.
1375f7917c00SJeff Kirsher 		 */
1376f7917c00SJeff Kirsher 		inline_tx_skb(skb, &txq->q, cpl + 1);
137742ffda5fSEric W. Biederman 		dev_consume_skb_any(skb);
1378f7917c00SJeff Kirsher 	} else {
1379f7917c00SJeff Kirsher 		/*
1380f7917c00SJeff Kirsher 		 * Write the skb's Scatter/Gather list into the TX Packet CPL
1381f7917c00SJeff Kirsher 		 * message and retain a pointer to the skb so we can free it
1382f7917c00SJeff Kirsher 		 * later when its DMA completes.  (We store the skb pointer
1383f7917c00SJeff Kirsher 		 * in the Software Descriptor corresponding to the last TX
1384f7917c00SJeff Kirsher 		 * Descriptor used by the Work Request.)
1385f7917c00SJeff Kirsher 		 *
1386f7917c00SJeff Kirsher 		 * The retained skb will be freed when the corresponding TX
1387f7917c00SJeff Kirsher 		 * Descriptors are reclaimed after their DMAs complete.
1388f7917c00SJeff Kirsher 		 * However, this could take quite a while since, in general,
1389f7917c00SJeff Kirsher 		 * the hardware is set up to be lazy about sending DMA
1390f7917c00SJeff Kirsher 		 * completion notifications to us and we mostly perform TX
1391f7917c00SJeff Kirsher 		 * reclaims in the transmit routine.
1392f7917c00SJeff Kirsher 		 *
1393f7917c00SJeff Kirsher 		 * This is good for performamce but means that we rely on new
1394f7917c00SJeff Kirsher 		 * TX packets arriving to run the destructors of completed
1395f7917c00SJeff Kirsher 		 * packets, which open up space in their sockets' send queues.
1396f7917c00SJeff Kirsher 		 * Sometimes we do not get such new packets causing TX to
1397f7917c00SJeff Kirsher 		 * stall.  A single UDP transmitter is a good example of this
1398f7917c00SJeff Kirsher 		 * situation.  We have a clean up timer that periodically
1399f7917c00SJeff Kirsher 		 * reclaims completed packets but it doesn't run often enough
1400f7917c00SJeff Kirsher 		 * (nor do we want it to) to prevent lengthy stalls.  A
1401f7917c00SJeff Kirsher 		 * solution to this problem is to run the destructor early,
1402f7917c00SJeff Kirsher 		 * after the packet is queued but before it's DMAd.  A con is
1403f7917c00SJeff Kirsher 		 * that we lie to socket memory accounting, but the amount of
1404f7917c00SJeff Kirsher 		 * extra memory is reasonable (limited by the number of TX
1405f7917c00SJeff Kirsher 		 * descriptors), the packets do actually get freed quickly by
1406f7917c00SJeff Kirsher 		 * new packets almost always, and for protocols like TCP that
1407f7917c00SJeff Kirsher 		 * wait for acks to really free up the data the extra memory
1408f7917c00SJeff Kirsher 		 * is even less.  On the positive side we run the destructors
1409f7917c00SJeff Kirsher 		 * on the sending CPU rather than on a potentially different
1410f7917c00SJeff Kirsher 		 * completing CPU, usually a good thing.
1411f7917c00SJeff Kirsher 		 *
1412f7917c00SJeff Kirsher 		 * Run the destructor before telling the DMA engine about the
1413f7917c00SJeff Kirsher 		 * packet to make sure it doesn't complete and get freed
1414f7917c00SJeff Kirsher 		 * prematurely.
1415f7917c00SJeff Kirsher 		 */
1416f7917c00SJeff Kirsher 		struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1417f7917c00SJeff Kirsher 		struct sge_txq *tq = &txq->q;
1418f7917c00SJeff Kirsher 		int last_desc;
1419f7917c00SJeff Kirsher 
1420f7917c00SJeff Kirsher 		/*
1421f7917c00SJeff Kirsher 		 * If the Work Request header was an exact multiple of our TX
1422f7917c00SJeff Kirsher 		 * Descriptor length, then it's possible that the starting SGL
1423f7917c00SJeff Kirsher 		 * pointer lines up exactly with the end of our TX Descriptor
1424f7917c00SJeff Kirsher 		 * ring.  If that's the case, wrap around to the beginning
1425f7917c00SJeff Kirsher 		 * here ...
1426f7917c00SJeff Kirsher 		 */
1427f7917c00SJeff Kirsher 		if (unlikely((void *)sgl == (void *)tq->stat)) {
1428f7917c00SJeff Kirsher 			sgl = (void *)tq->desc;
142964699336SJoe Perches 			end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
1430f7917c00SJeff Kirsher 		}
1431f7917c00SJeff Kirsher 
1432f7917c00SJeff Kirsher 		write_sgl(skb, tq, sgl, end, 0, addr);
1433f7917c00SJeff Kirsher 		skb_orphan(skb);
1434f7917c00SJeff Kirsher 
1435f7917c00SJeff Kirsher 		last_desc = tq->pidx + ndesc - 1;
1436f7917c00SJeff Kirsher 		if (last_desc >= tq->size)
1437f7917c00SJeff Kirsher 			last_desc -= tq->size;
1438f7917c00SJeff Kirsher 		tq->sdesc[last_desc].skb = skb;
1439f7917c00SJeff Kirsher 		tq->sdesc[last_desc].sgl = sgl;
1440f7917c00SJeff Kirsher 	}
1441f7917c00SJeff Kirsher 
1442f7917c00SJeff Kirsher 	/*
1443f7917c00SJeff Kirsher 	 * Advance our internal TX Queue state, tell the hardware about
1444f7917c00SJeff Kirsher 	 * the new TX descriptors and return success.
1445f7917c00SJeff Kirsher 	 */
1446f7917c00SJeff Kirsher 	txq_advance(&txq->q, ndesc);
1447860e9538SFlorian Westphal 	netif_trans_update(dev);
1448f7917c00SJeff Kirsher 	ring_tx_db(adapter, &txq->q, ndesc);
1449f7917c00SJeff Kirsher 	return NETDEV_TX_OK;
1450f7917c00SJeff Kirsher 
1451f7917c00SJeff Kirsher out_free:
1452f7917c00SJeff Kirsher 	/*
1453f7917c00SJeff Kirsher 	 * An error of some sort happened.  Free the TX skb and tell the
1454f7917c00SJeff Kirsher 	 * OS that we've "dealt" with the packet ...
1455f7917c00SJeff Kirsher 	 */
145642ffda5fSEric W. Biederman 	dev_kfree_skb_any(skb);
1457f7917c00SJeff Kirsher 	return NETDEV_TX_OK;
1458f7917c00SJeff Kirsher }
1459f7917c00SJeff Kirsher 
1460f7917c00SJeff Kirsher /**
1461a0006a86SIan Campbell  *	copy_frags - copy fragments from gather list into skb_shared_info
1462a0006a86SIan Campbell  *	@skb: destination skb
1463a0006a86SIan Campbell  *	@gl: source internal packet gather list
1464a0006a86SIan Campbell  *	@offset: packet start offset in first page
1465a0006a86SIan Campbell  *
1466a0006a86SIan Campbell  *	Copy an internal packet gather list into a Linux skb_shared_info
1467a0006a86SIan Campbell  *	structure.
1468a0006a86SIan Campbell  */
copy_frags(struct sk_buff * skb,const struct pkt_gl * gl,unsigned int offset)1469a0006a86SIan Campbell static inline void copy_frags(struct sk_buff *skb,
1470a0006a86SIan Campbell 			      const struct pkt_gl *gl,
1471a0006a86SIan Campbell 			      unsigned int offset)
1472a0006a86SIan Campbell {
1473a0006a86SIan Campbell 	int i;
1474a0006a86SIan Campbell 
1475a0006a86SIan Campbell 	/* usually there's just one frag */
1476a0006a86SIan Campbell 	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
1477a0006a86SIan Campbell 			     gl->frags[0].offset + offset,
1478a0006a86SIan Campbell 			     gl->frags[0].size - offset);
1479a0006a86SIan Campbell 	skb_shinfo(skb)->nr_frags = gl->nfrags;
1480a0006a86SIan Campbell 	for (i = 1; i < gl->nfrags; i++)
1481a0006a86SIan Campbell 		__skb_fill_page_desc(skb, i, gl->frags[i].page,
1482a0006a86SIan Campbell 				     gl->frags[i].offset,
1483a0006a86SIan Campbell 				     gl->frags[i].size);
1484a0006a86SIan Campbell 
1485a0006a86SIan Campbell 	/* get a reference to the last page, we don't own it */
1486a0006a86SIan Campbell 	get_page(gl->frags[gl->nfrags - 1].page);
1487a0006a86SIan Campbell }
1488a0006a86SIan Campbell 
1489a0006a86SIan Campbell /**
1490f7917c00SJeff Kirsher  *	t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1491f7917c00SJeff Kirsher  *	@gl: the gather list
1492f7917c00SJeff Kirsher  *	@skb_len: size of sk_buff main body if it carries fragments
1493f7917c00SJeff Kirsher  *	@pull_len: amount of data to move to the sk_buff's main body
1494f7917c00SJeff Kirsher  *
1495f7917c00SJeff Kirsher  *	Builds an sk_buff from the given packet gather list.  Returns the
1496f7917c00SJeff Kirsher  *	sk_buff or %NULL if sk_buff allocation failed.
1497f7917c00SJeff Kirsher  */
t4vf_pktgl_to_skb(const struct pkt_gl * gl,unsigned int skb_len,unsigned int pull_len)14988a67d1c6SSachin Kamat static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
14998a67d1c6SSachin Kamat 					 unsigned int skb_len,
15008a67d1c6SSachin Kamat 					 unsigned int pull_len)
1501f7917c00SJeff Kirsher {
1502f7917c00SJeff Kirsher 	struct sk_buff *skb;
1503f7917c00SJeff Kirsher 
1504f7917c00SJeff Kirsher 	/*
1505f7917c00SJeff Kirsher 	 * If the ingress packet is small enough, allocate an skb large enough
1506f7917c00SJeff Kirsher 	 * for all of the data and copy it inline.  Otherwise, allocate an skb
1507f7917c00SJeff Kirsher 	 * with enough room to pull in the header and reference the rest of
1508f7917c00SJeff Kirsher 	 * the data via the skb fragment list.
1509f7917c00SJeff Kirsher 	 *
1510f7917c00SJeff Kirsher 	 * Below we rely on RX_COPY_THRES being less than the smallest Rx
1511f7917c00SJeff Kirsher 	 * buff!  size, which is expected since buffers are at least
1512f7917c00SJeff Kirsher 	 * PAGE_SIZEd.  In this case packets up to RX_COPY_THRES have only one
1513f7917c00SJeff Kirsher 	 * fragment.
1514f7917c00SJeff Kirsher 	 */
1515f7917c00SJeff Kirsher 	if (gl->tot_len <= RX_COPY_THRES) {
1516f7917c00SJeff Kirsher 		/* small packets have only one fragment */
1517f7917c00SJeff Kirsher 		skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1518f7917c00SJeff Kirsher 		if (unlikely(!skb))
1519f7917c00SJeff Kirsher 			goto out;
1520f7917c00SJeff Kirsher 		__skb_put(skb, gl->tot_len);
1521f7917c00SJeff Kirsher 		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1522f7917c00SJeff Kirsher 	} else {
1523f7917c00SJeff Kirsher 		skb = alloc_skb(skb_len, GFP_ATOMIC);
1524f7917c00SJeff Kirsher 		if (unlikely(!skb))
1525f7917c00SJeff Kirsher 			goto out;
1526f7917c00SJeff Kirsher 		__skb_put(skb, pull_len);
1527f7917c00SJeff Kirsher 		skb_copy_to_linear_data(skb, gl->va, pull_len);
1528f7917c00SJeff Kirsher 
1529a0006a86SIan Campbell 		copy_frags(skb, gl, pull_len);
1530f7917c00SJeff Kirsher 		skb->len = gl->tot_len;
1531f7917c00SJeff Kirsher 		skb->data_len = skb->len - pull_len;
1532f7917c00SJeff Kirsher 		skb->truesize += skb->data_len;
1533f7917c00SJeff Kirsher 	}
1534f7917c00SJeff Kirsher 
1535f7917c00SJeff Kirsher out:
1536f7917c00SJeff Kirsher 	return skb;
1537f7917c00SJeff Kirsher }
1538f7917c00SJeff Kirsher 
1539f7917c00SJeff Kirsher /**
1540f7917c00SJeff Kirsher  *	t4vf_pktgl_free - free a packet gather list
1541f7917c00SJeff Kirsher  *	@gl: the gather list
1542f7917c00SJeff Kirsher  *
1543f7917c00SJeff Kirsher  *	Releases the pages of a packet gather list.  We do not own the last
1544f7917c00SJeff Kirsher  *	page on the list and do not free it.
1545f7917c00SJeff Kirsher  */
t4vf_pktgl_free(const struct pkt_gl * gl)15468a67d1c6SSachin Kamat static void t4vf_pktgl_free(const struct pkt_gl *gl)
1547f7917c00SJeff Kirsher {
1548f7917c00SJeff Kirsher 	int frag;
1549f7917c00SJeff Kirsher 
1550f7917c00SJeff Kirsher 	frag = gl->nfrags - 1;
1551f7917c00SJeff Kirsher 	while (frag--)
1552f7917c00SJeff Kirsher 		put_page(gl->frags[frag].page);
1553f7917c00SJeff Kirsher }
1554f7917c00SJeff Kirsher 
1555f7917c00SJeff Kirsher /**
1556f7917c00SJeff Kirsher  *	do_gro - perform Generic Receive Offload ingress packet processing
1557f7917c00SJeff Kirsher  *	@rxq: ingress RX Ethernet Queue
1558f7917c00SJeff Kirsher  *	@gl: gather list for ingress packet
1559f7917c00SJeff Kirsher  *	@pkt: CPL header for last packet fragment
1560f7917c00SJeff Kirsher  *
1561f7917c00SJeff Kirsher  *	Perform Generic Receive Offload (GRO) ingress packet processing.
1562f7917c00SJeff Kirsher  *	We use the standard Linux GRO interfaces for this.
1563f7917c00SJeff Kirsher  */
do_gro(struct sge_eth_rxq * rxq,const struct pkt_gl * gl,const struct cpl_rx_pkt * pkt)1564f7917c00SJeff Kirsher static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1565f7917c00SJeff Kirsher 		   const struct cpl_rx_pkt *pkt)
1566f7917c00SJeff Kirsher {
156765f6ecc9SHariprasad Shenai 	struct adapter *adapter = rxq->rspq.adapter;
156865f6ecc9SHariprasad Shenai 	struct sge *s = &adapter->sge;
15699d5fd927SGanesh Goudar 	struct port_info *pi;
1570f7917c00SJeff Kirsher 	int ret;
1571f7917c00SJeff Kirsher 	struct sk_buff *skb;
1572f7917c00SJeff Kirsher 
1573f7917c00SJeff Kirsher 	skb = napi_get_frags(&rxq->rspq.napi);
1574f7917c00SJeff Kirsher 	if (unlikely(!skb)) {
1575f7917c00SJeff Kirsher 		t4vf_pktgl_free(gl);
1576f7917c00SJeff Kirsher 		rxq->stats.rx_drops++;
1577f7917c00SJeff Kirsher 		return;
1578f7917c00SJeff Kirsher 	}
1579f7917c00SJeff Kirsher 
158065f6ecc9SHariprasad Shenai 	copy_frags(skb, gl, s->pktshift);
158165f6ecc9SHariprasad Shenai 	skb->len = gl->tot_len - s->pktshift;
1582f7917c00SJeff Kirsher 	skb->data_len = skb->len;
1583f7917c00SJeff Kirsher 	skb->truesize += skb->data_len;
1584f7917c00SJeff Kirsher 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1585f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, rxq->rspq.idx);
15869d5fd927SGanesh Goudar 	pi = netdev_priv(skb->dev);
1587f7917c00SJeff Kirsher 
15889d5fd927SGanesh Goudar 	if (pkt->vlan_ex && !pi->vlan_id) {
158986a9bad3SPatrick McHardy 		__vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
159086a9bad3SPatrick McHardy 					be16_to_cpu(pkt->vlan));
1591af32de0eSVipul Pandya 		rxq->stats.vlan_ex++;
1592af32de0eSVipul Pandya 	}
1593f7917c00SJeff Kirsher 	ret = napi_gro_frags(&rxq->rspq.napi);
1594f7917c00SJeff Kirsher 
1595f7917c00SJeff Kirsher 	if (ret == GRO_HELD)
1596f7917c00SJeff Kirsher 		rxq->stats.lro_pkts++;
1597f7917c00SJeff Kirsher 	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1598f7917c00SJeff Kirsher 		rxq->stats.lro_merged++;
1599f7917c00SJeff Kirsher 	rxq->stats.pkts++;
1600f7917c00SJeff Kirsher 	rxq->stats.rx_cso++;
1601f7917c00SJeff Kirsher }
1602f7917c00SJeff Kirsher 
1603f7917c00SJeff Kirsher /**
1604f7917c00SJeff Kirsher  *	t4vf_ethrx_handler - process an ingress ethernet packet
1605f7917c00SJeff Kirsher  *	@rspq: the response queue that received the packet
1606f7917c00SJeff Kirsher  *	@rsp: the response queue descriptor holding the RX_PKT message
1607f7917c00SJeff Kirsher  *	@gl: the gather list of packet fragments
1608f7917c00SJeff Kirsher  *
1609f7917c00SJeff Kirsher  *	Process an ingress ethernet packet and deliver it to the stack.
1610f7917c00SJeff Kirsher  */
t4vf_ethrx_handler(struct sge_rspq * rspq,const __be64 * rsp,const struct pkt_gl * gl)1611f7917c00SJeff Kirsher int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1612f7917c00SJeff Kirsher 		       const struct pkt_gl *gl)
1613f7917c00SJeff Kirsher {
1614f7917c00SJeff Kirsher 	struct sk_buff *skb;
16158b9a4d56SVipul Pandya 	const struct cpl_rx_pkt *pkt = (void *)rsp;
1616c3136f55SHariprasad Shenai 	bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1617c3136f55SHariprasad Shenai 		       (rspq->netdev->features & NETIF_F_RXCSUM);
1618f7917c00SJeff Kirsher 	struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
161965f6ecc9SHariprasad Shenai 	struct adapter *adapter = rspq->adapter;
162065f6ecc9SHariprasad Shenai 	struct sge *s = &adapter->sge;
16219d5fd927SGanesh Goudar 	struct port_info *pi;
1622f7917c00SJeff Kirsher 
1623f7917c00SJeff Kirsher 	/*
1624f7917c00SJeff Kirsher 	 * If this is a good TCP packet and we have Generic Receive Offload
1625f7917c00SJeff Kirsher 	 * enabled, handle the packet in the GRO path.
1626f7917c00SJeff Kirsher 	 */
1627bdc590b9SHariprasad Shenai 	if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
1628f7917c00SJeff Kirsher 	    (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1629f7917c00SJeff Kirsher 	    !pkt->ip_frag) {
1630f7917c00SJeff Kirsher 		do_gro(rxq, gl, pkt);
1631f7917c00SJeff Kirsher 		return 0;
1632f7917c00SJeff Kirsher 	}
1633f7917c00SJeff Kirsher 
1634f7917c00SJeff Kirsher 	/*
1635f7917c00SJeff Kirsher 	 * Convert the Packet Gather List into an skb.
1636f7917c00SJeff Kirsher 	 */
1637f7917c00SJeff Kirsher 	skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1638f7917c00SJeff Kirsher 	if (unlikely(!skb)) {
1639f7917c00SJeff Kirsher 		t4vf_pktgl_free(gl);
1640f7917c00SJeff Kirsher 		rxq->stats.rx_drops++;
1641f7917c00SJeff Kirsher 		return 0;
1642f7917c00SJeff Kirsher 	}
164365f6ecc9SHariprasad Shenai 	__skb_pull(skb, s->pktshift);
1644f7917c00SJeff Kirsher 	skb->protocol = eth_type_trans(skb, rspq->netdev);
1645f7917c00SJeff Kirsher 	skb_record_rx_queue(skb, rspq->idx);
16469d5fd927SGanesh Goudar 	pi = netdev_priv(skb->dev);
1647f7917c00SJeff Kirsher 	rxq->stats.pkts++;
1648f7917c00SJeff Kirsher 
1649c3136f55SHariprasad Shenai 	if (csum_ok && !pkt->err_vec &&
1650bdc590b9SHariprasad Shenai 	    (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
16515400e54aSHariprasad Shenai 		if (!pkt->ip_frag) {
1652f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_UNNECESSARY;
16535400e54aSHariprasad Shenai 			rxq->stats.rx_cso++;
16545400e54aSHariprasad Shenai 		} else if (pkt->l2info & htonl(RXF_IP_F)) {
1655f7917c00SJeff Kirsher 			__sum16 c = (__force __sum16)pkt->csum;
1656f7917c00SJeff Kirsher 			skb->csum = csum_unfold(c);
1657f7917c00SJeff Kirsher 			skb->ip_summed = CHECKSUM_COMPLETE;
1658f7917c00SJeff Kirsher 			rxq->stats.rx_cso++;
16595400e54aSHariprasad Shenai 		}
1660f7917c00SJeff Kirsher 	} else
1661f7917c00SJeff Kirsher 		skb_checksum_none_assert(skb);
1662f7917c00SJeff Kirsher 
16639d5fd927SGanesh Goudar 	if (pkt->vlan_ex && !pi->vlan_id) {
1664f7917c00SJeff Kirsher 		rxq->stats.vlan_ex++;
16659d5fd927SGanesh Goudar 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
16669d5fd927SGanesh Goudar 				       be16_to_cpu(pkt->vlan));
1667f7917c00SJeff Kirsher 	}
1668f7917c00SJeff Kirsher 
1669f7917c00SJeff Kirsher 	netif_receive_skb(skb);
1670f7917c00SJeff Kirsher 
1671f7917c00SJeff Kirsher 	return 0;
1672f7917c00SJeff Kirsher }
1673f7917c00SJeff Kirsher 
1674f7917c00SJeff Kirsher /**
1675f7917c00SJeff Kirsher  *	is_new_response - check if a response is newly written
1676f7917c00SJeff Kirsher  *	@rc: the response control descriptor
1677f7917c00SJeff Kirsher  *	@rspq: the response queue
1678f7917c00SJeff Kirsher  *
1679f7917c00SJeff Kirsher  *	Returns true if a response descriptor contains a yet unprocessed
1680f7917c00SJeff Kirsher  *	response.
1681f7917c00SJeff Kirsher  */
is_new_response(const struct rsp_ctrl * rc,const struct sge_rspq * rspq)1682f7917c00SJeff Kirsher static inline bool is_new_response(const struct rsp_ctrl *rc,
1683f7917c00SJeff Kirsher 				   const struct sge_rspq *rspq)
1684f7917c00SJeff Kirsher {
16851ecc7b7aSHariprasad Shenai 	return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
1686f7917c00SJeff Kirsher }
1687f7917c00SJeff Kirsher 
1688f7917c00SJeff Kirsher /**
1689f7917c00SJeff Kirsher  *	restore_rx_bufs - put back a packet's RX buffers
1690f7917c00SJeff Kirsher  *	@gl: the packet gather list
1691f7917c00SJeff Kirsher  *	@fl: the SGE Free List
169220bb0c8fSRahul Lakkireddy  *	@frags: how many fragments in @si
1693f7917c00SJeff Kirsher  *
1694f7917c00SJeff Kirsher  *	Called when we find out that the current packet, @si, can't be
1695f7917c00SJeff Kirsher  *	processed right away for some reason.  This is a very rare event and
1696f7917c00SJeff Kirsher  *	there's no effort to make this suspension/resumption process
1697f7917c00SJeff Kirsher  *	particularly efficient.
1698f7917c00SJeff Kirsher  *
1699f7917c00SJeff Kirsher  *	We implement the suspension by putting all of the RX buffers associated
1700f7917c00SJeff Kirsher  *	with the current packet back on the original Free List.  The buffers
1701f7917c00SJeff Kirsher  *	have already been unmapped and are left unmapped, we mark them as
1702f7917c00SJeff Kirsher  *	unmapped in order to prevent further unmapping attempts.  (Effectively
1703f7917c00SJeff Kirsher  *	this function undoes the series of @unmap_rx_buf calls which were done
1704f7917c00SJeff Kirsher  *	to create the current packet's gather list.)  This leaves us ready to
1705f7917c00SJeff Kirsher  *	restart processing of the packet the next time we start processing the
1706f7917c00SJeff Kirsher  *	RX Queue ...
1707f7917c00SJeff Kirsher  */
restore_rx_bufs(const struct pkt_gl * gl,struct sge_fl * fl,int frags)1708f7917c00SJeff Kirsher static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
1709f7917c00SJeff Kirsher 			    int frags)
1710f7917c00SJeff Kirsher {
1711f7917c00SJeff Kirsher 	struct rx_sw_desc *sdesc;
1712f7917c00SJeff Kirsher 
1713f7917c00SJeff Kirsher 	while (frags--) {
1714f7917c00SJeff Kirsher 		if (fl->cidx == 0)
1715f7917c00SJeff Kirsher 			fl->cidx = fl->size - 1;
1716f7917c00SJeff Kirsher 		else
1717f7917c00SJeff Kirsher 			fl->cidx--;
1718f7917c00SJeff Kirsher 		sdesc = &fl->sdesc[fl->cidx];
1719f7917c00SJeff Kirsher 		sdesc->page = gl->frags[frags].page;
1720f7917c00SJeff Kirsher 		sdesc->dma_addr |= RX_UNMAPPED_BUF;
1721f7917c00SJeff Kirsher 		fl->avail++;
1722f7917c00SJeff Kirsher 	}
1723f7917c00SJeff Kirsher }
1724f7917c00SJeff Kirsher 
1725f7917c00SJeff Kirsher /**
1726f7917c00SJeff Kirsher  *	rspq_next - advance to the next entry in a response queue
1727f7917c00SJeff Kirsher  *	@rspq: the queue
1728f7917c00SJeff Kirsher  *
1729f7917c00SJeff Kirsher  *	Updates the state of a response queue to advance it to the next entry.
1730f7917c00SJeff Kirsher  */
rspq_next(struct sge_rspq * rspq)1731f7917c00SJeff Kirsher static inline void rspq_next(struct sge_rspq *rspq)
1732f7917c00SJeff Kirsher {
1733f7917c00SJeff Kirsher 	rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
1734f7917c00SJeff Kirsher 	if (unlikely(++rspq->cidx == rspq->size)) {
1735f7917c00SJeff Kirsher 		rspq->cidx = 0;
1736f7917c00SJeff Kirsher 		rspq->gen ^= 1;
1737f7917c00SJeff Kirsher 		rspq->cur_desc = rspq->desc;
1738f7917c00SJeff Kirsher 	}
1739f7917c00SJeff Kirsher }
1740f7917c00SJeff Kirsher 
1741f7917c00SJeff Kirsher /**
1742f7917c00SJeff Kirsher  *	process_responses - process responses from an SGE response queue
1743f7917c00SJeff Kirsher  *	@rspq: the ingress response queue to process
1744f7917c00SJeff Kirsher  *	@budget: how many responses can be processed in this round
1745f7917c00SJeff Kirsher  *
1746f7917c00SJeff Kirsher  *	Process responses from a Scatter Gather Engine response queue up to
1747f7917c00SJeff Kirsher  *	the supplied budget.  Responses include received packets as well as
1748f7917c00SJeff Kirsher  *	control messages from firmware or hardware.
1749f7917c00SJeff Kirsher  *
1750f7917c00SJeff Kirsher  *	Additionally choose the interrupt holdoff time for the next interrupt
1751f7917c00SJeff Kirsher  *	on this queue.  If the system is under memory shortage use a fairly
1752f7917c00SJeff Kirsher  *	long delay to help recovery.
1753f7917c00SJeff Kirsher  */
process_responses(struct sge_rspq * rspq,int budget)17548a67d1c6SSachin Kamat static int process_responses(struct sge_rspq *rspq, int budget)
1755f7917c00SJeff Kirsher {
1756f7917c00SJeff Kirsher 	struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
175765f6ecc9SHariprasad Shenai 	struct adapter *adapter = rspq->adapter;
175865f6ecc9SHariprasad Shenai 	struct sge *s = &adapter->sge;
1759f7917c00SJeff Kirsher 	int budget_left = budget;
1760f7917c00SJeff Kirsher 
1761f7917c00SJeff Kirsher 	while (likely(budget_left)) {
1762f7917c00SJeff Kirsher 		int ret, rsp_type;
1763f7917c00SJeff Kirsher 		const struct rsp_ctrl *rc;
1764f7917c00SJeff Kirsher 
1765f7917c00SJeff Kirsher 		rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
1766f7917c00SJeff Kirsher 		if (!is_new_response(rc, rspq))
1767f7917c00SJeff Kirsher 			break;
1768f7917c00SJeff Kirsher 
1769f7917c00SJeff Kirsher 		/*
1770f7917c00SJeff Kirsher 		 * Figure out what kind of response we've received from the
1771f7917c00SJeff Kirsher 		 * SGE.
1772f7917c00SJeff Kirsher 		 */
1773019be1cfSAlexander Duyck 		dma_rmb();
17741ecc7b7aSHariprasad Shenai 		rsp_type = RSPD_TYPE_G(rc->type_gen);
17751ecc7b7aSHariprasad Shenai 		if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
1776a0006a86SIan Campbell 			struct page_frag *fp;
1777f7917c00SJeff Kirsher 			struct pkt_gl gl;
1778f7917c00SJeff Kirsher 			const struct rx_sw_desc *sdesc;
1779f7917c00SJeff Kirsher 			u32 bufsz, frag;
1780f7917c00SJeff Kirsher 			u32 len = be32_to_cpu(rc->pldbuflen_qid);
1781f7917c00SJeff Kirsher 
1782f7917c00SJeff Kirsher 			/*
1783f7917c00SJeff Kirsher 			 * If we get a "new buffer" message from the SGE we
1784f7917c00SJeff Kirsher 			 * need to move on to the next Free List buffer.
1785f7917c00SJeff Kirsher 			 */
17861ecc7b7aSHariprasad Shenai 			if (len & RSPD_NEWBUF_F) {
1787f7917c00SJeff Kirsher 				/*
1788f7917c00SJeff Kirsher 				 * We get one "new buffer" message when we
1789f7917c00SJeff Kirsher 				 * first start up a queue so we need to ignore
1790f7917c00SJeff Kirsher 				 * it when our offset into the buffer is 0.
1791f7917c00SJeff Kirsher 				 */
1792f7917c00SJeff Kirsher 				if (likely(rspq->offset > 0)) {
1793f7917c00SJeff Kirsher 					free_rx_bufs(rspq->adapter, &rxq->fl,
1794f7917c00SJeff Kirsher 						     1);
1795f7917c00SJeff Kirsher 					rspq->offset = 0;
1796f7917c00SJeff Kirsher 				}
17971ecc7b7aSHariprasad Shenai 				len = RSPD_LEN_G(len);
1798f7917c00SJeff Kirsher 			}
1799f7917c00SJeff Kirsher 			gl.tot_len = len;
1800f7917c00SJeff Kirsher 
1801f7917c00SJeff Kirsher 			/*
1802f7917c00SJeff Kirsher 			 * Gather packet fragments.
1803f7917c00SJeff Kirsher 			 */
1804f7917c00SJeff Kirsher 			for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
1805f7917c00SJeff Kirsher 				BUG_ON(frag >= MAX_SKB_FRAGS);
1806f7917c00SJeff Kirsher 				BUG_ON(rxq->fl.avail == 0);
1807f7917c00SJeff Kirsher 				sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
180865f6ecc9SHariprasad Shenai 				bufsz = get_buf_size(adapter, sdesc);
1809f7917c00SJeff Kirsher 				fp->page = sdesc->page;
1810a0006a86SIan Campbell 				fp->offset = rspq->offset;
1811a0006a86SIan Campbell 				fp->size = min(bufsz, len);
1812a0006a86SIan Campbell 				len -= fp->size;
1813f7917c00SJeff Kirsher 				if (!len)
1814f7917c00SJeff Kirsher 					break;
1815f7917c00SJeff Kirsher 				unmap_rx_buf(rspq->adapter, &rxq->fl);
1816f7917c00SJeff Kirsher 			}
1817f7917c00SJeff Kirsher 			gl.nfrags = frag+1;
1818f7917c00SJeff Kirsher 
1819f7917c00SJeff Kirsher 			/*
1820f7917c00SJeff Kirsher 			 * Last buffer remains mapped so explicitly make it
1821f7917c00SJeff Kirsher 			 * coherent for CPU access and start preloading first
1822f7917c00SJeff Kirsher 			 * cache line ...
1823f7917c00SJeff Kirsher 			 */
1824f7917c00SJeff Kirsher 			dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1825f7917c00SJeff Kirsher 						get_buf_addr(sdesc),
1826a0006a86SIan Campbell 						fp->size, DMA_FROM_DEVICE);
1827f7917c00SJeff Kirsher 			gl.va = (page_address(gl.frags[0].page) +
1828a0006a86SIan Campbell 				 gl.frags[0].offset);
1829f7917c00SJeff Kirsher 			prefetch(gl.va);
1830f7917c00SJeff Kirsher 
1831f7917c00SJeff Kirsher 			/*
1832f7917c00SJeff Kirsher 			 * Hand the new ingress packet to the handler for
1833f7917c00SJeff Kirsher 			 * this Response Queue.
1834f7917c00SJeff Kirsher 			 */
1835f7917c00SJeff Kirsher 			ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1836f7917c00SJeff Kirsher 			if (likely(ret == 0))
183765f6ecc9SHariprasad Shenai 				rspq->offset += ALIGN(fp->size, s->fl_align);
1838f7917c00SJeff Kirsher 			else
1839f7917c00SJeff Kirsher 				restore_rx_bufs(&gl, &rxq->fl, frag);
18401ecc7b7aSHariprasad Shenai 		} else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
1841f7917c00SJeff Kirsher 			ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1842f7917c00SJeff Kirsher 		} else {
18431ecc7b7aSHariprasad Shenai 			WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
1844f7917c00SJeff Kirsher 			ret = 0;
1845f7917c00SJeff Kirsher 		}
1846f7917c00SJeff Kirsher 
1847f7917c00SJeff Kirsher 		if (unlikely(ret)) {
1848f7917c00SJeff Kirsher 			/*
1849f7917c00SJeff Kirsher 			 * Couldn't process descriptor, back off for recovery.
1850f7917c00SJeff Kirsher 			 * We use the SGE's last timer which has the longest
1851f7917c00SJeff Kirsher 			 * interrupt coalescing value ...
1852f7917c00SJeff Kirsher 			 */
1853f7917c00SJeff Kirsher 			const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
1854f7917c00SJeff Kirsher 			rspq->next_intr_params =
18551ecc7b7aSHariprasad Shenai 				QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
1856f7917c00SJeff Kirsher 			break;
1857f7917c00SJeff Kirsher 		}
1858f7917c00SJeff Kirsher 
1859f7917c00SJeff Kirsher 		rspq_next(rspq);
1860f7917c00SJeff Kirsher 		budget_left--;
1861f7917c00SJeff Kirsher 	}
1862f7917c00SJeff Kirsher 
1863f7917c00SJeff Kirsher 	/*
1864f7917c00SJeff Kirsher 	 * If this is a Response Queue with an associated Free List and
1865f7917c00SJeff Kirsher 	 * at least two Egress Queue units available in the Free List
1866f7917c00SJeff Kirsher 	 * for new buffer pointers, refill the Free List.
1867f7917c00SJeff Kirsher 	 */
1868f7917c00SJeff Kirsher 	if (rspq->offset >= 0 &&
1869da08e425SHariprasad Shenai 	    fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
1870f7917c00SJeff Kirsher 		__refill_fl(rspq->adapter, &rxq->fl);
1871f7917c00SJeff Kirsher 	return budget - budget_left;
1872f7917c00SJeff Kirsher }
1873f7917c00SJeff Kirsher 
1874f7917c00SJeff Kirsher /**
1875f7917c00SJeff Kirsher  *	napi_rx_handler - the NAPI handler for RX processing
1876f7917c00SJeff Kirsher  *	@napi: the napi instance
1877f7917c00SJeff Kirsher  *	@budget: how many packets we can process in this round
1878f7917c00SJeff Kirsher  *
1879f7917c00SJeff Kirsher  *	Handler for new data events when using NAPI.  This does not need any
1880f7917c00SJeff Kirsher  *	locking or protection from interrupts as data interrupts are off at
1881f7917c00SJeff Kirsher  *	this point and other adapter interrupts do not interfere (the latter
1882f7917c00SJeff Kirsher  *	in not a concern at all with MSI-X as non-data interrupts then have
1883f7917c00SJeff Kirsher  *	a separate handler).
1884f7917c00SJeff Kirsher  */
napi_rx_handler(struct napi_struct * napi,int budget)1885f7917c00SJeff Kirsher static int napi_rx_handler(struct napi_struct *napi, int budget)
1886f7917c00SJeff Kirsher {
1887f7917c00SJeff Kirsher 	unsigned int intr_params;
1888f7917c00SJeff Kirsher 	struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
1889f7917c00SJeff Kirsher 	int work_done = process_responses(rspq, budget);
1890df64e4d3SHariprasad Shenai 	u32 val;
1891f7917c00SJeff Kirsher 
1892f7917c00SJeff Kirsher 	if (likely(work_done < budget)) {
18936ad20165SEric Dumazet 		napi_complete_done(napi, work_done);
1894f7917c00SJeff Kirsher 		intr_params = rspq->next_intr_params;
1895f7917c00SJeff Kirsher 		rspq->next_intr_params = rspq->intr_params;
1896f7917c00SJeff Kirsher 	} else
18971ecc7b7aSHariprasad Shenai 		intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
1898f7917c00SJeff Kirsher 
1899f7917c00SJeff Kirsher 	if (unlikely(work_done == 0))
1900f7917c00SJeff Kirsher 		rspq->unhandled_irqs++;
1901f7917c00SJeff Kirsher 
1902f612b815SHariprasad Shenai 	val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
190371d3c0b4SHariprasad Shenai 	/* If we don't have access to the new User GTS (T5+), use the old
190471d3c0b4SHariprasad Shenai 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
190571d3c0b4SHariprasad Shenai 	 */
190671d3c0b4SHariprasad Shenai 	if (unlikely(!rspq->bar2_addr)) {
1907f7917c00SJeff Kirsher 		t4_write_reg(rspq->adapter,
1908f7917c00SJeff Kirsher 			     T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1909f612b815SHariprasad Shenai 			     val | INGRESSQID_V((u32)rspq->cntxt_id));
1910df64e4d3SHariprasad Shenai 	} else {
1911f612b815SHariprasad Shenai 		writel(val | INGRESSQID_V(rspq->bar2_qid),
1912df64e4d3SHariprasad Shenai 		       rspq->bar2_addr + SGE_UDB_GTS);
1913df64e4d3SHariprasad Shenai 		wmb();
1914df64e4d3SHariprasad Shenai 	}
1915f7917c00SJeff Kirsher 	return work_done;
1916f7917c00SJeff Kirsher }
1917f7917c00SJeff Kirsher 
1918f7917c00SJeff Kirsher /*
1919f7917c00SJeff Kirsher  * The MSI-X interrupt handler for an SGE response queue for the NAPI case
1920f7917c00SJeff Kirsher  * (i.e., response queue serviced by NAPI polling).
1921f7917c00SJeff Kirsher  */
t4vf_sge_intr_msix(int irq,void * cookie)1922f7917c00SJeff Kirsher irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
1923f7917c00SJeff Kirsher {
1924f7917c00SJeff Kirsher 	struct sge_rspq *rspq = cookie;
1925f7917c00SJeff Kirsher 
1926f7917c00SJeff Kirsher 	napi_schedule(&rspq->napi);
1927f7917c00SJeff Kirsher 	return IRQ_HANDLED;
1928f7917c00SJeff Kirsher }
1929f7917c00SJeff Kirsher 
1930f7917c00SJeff Kirsher /*
1931f7917c00SJeff Kirsher  * Process the indirect interrupt entries in the interrupt queue and kick off
1932f7917c00SJeff Kirsher  * NAPI for each queue that has generated an entry.
1933f7917c00SJeff Kirsher  */
process_intrq(struct adapter * adapter)1934f7917c00SJeff Kirsher static unsigned int process_intrq(struct adapter *adapter)
1935f7917c00SJeff Kirsher {
1936f7917c00SJeff Kirsher 	struct sge *s = &adapter->sge;
1937f7917c00SJeff Kirsher 	struct sge_rspq *intrq = &s->intrq;
1938f7917c00SJeff Kirsher 	unsigned int work_done;
1939df64e4d3SHariprasad Shenai 	u32 val;
1940f7917c00SJeff Kirsher 
1941f7917c00SJeff Kirsher 	spin_lock(&adapter->sge.intrq_lock);
1942f7917c00SJeff Kirsher 	for (work_done = 0; ; work_done++) {
1943f7917c00SJeff Kirsher 		const struct rsp_ctrl *rc;
1944f7917c00SJeff Kirsher 		unsigned int qid, iq_idx;
1945f7917c00SJeff Kirsher 		struct sge_rspq *rspq;
1946f7917c00SJeff Kirsher 
1947f7917c00SJeff Kirsher 		/*
1948f7917c00SJeff Kirsher 		 * Grab the next response from the interrupt queue and bail
1949f7917c00SJeff Kirsher 		 * out if it's not a new response.
1950f7917c00SJeff Kirsher 		 */
1951f7917c00SJeff Kirsher 		rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
1952f7917c00SJeff Kirsher 		if (!is_new_response(rc, intrq))
1953f7917c00SJeff Kirsher 			break;
1954f7917c00SJeff Kirsher 
1955f7917c00SJeff Kirsher 		/*
1956f7917c00SJeff Kirsher 		 * If the response isn't a forwarded interrupt message issue a
1957f7917c00SJeff Kirsher 		 * error and go on to the next response message.  This should
1958f7917c00SJeff Kirsher 		 * never happen ...
1959f7917c00SJeff Kirsher 		 */
1960019be1cfSAlexander Duyck 		dma_rmb();
19611ecc7b7aSHariprasad Shenai 		if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
1962f7917c00SJeff Kirsher 			dev_err(adapter->pdev_dev,
1963f7917c00SJeff Kirsher 				"Unexpected INTRQ response type %d\n",
19641ecc7b7aSHariprasad Shenai 				RSPD_TYPE_G(rc->type_gen));
1965f7917c00SJeff Kirsher 			continue;
1966f7917c00SJeff Kirsher 		}
1967f7917c00SJeff Kirsher 
1968f7917c00SJeff Kirsher 		/*
1969f7917c00SJeff Kirsher 		 * Extract the Queue ID from the interrupt message and perform
1970f7917c00SJeff Kirsher 		 * sanity checking to make sure it really refers to one of our
1971f7917c00SJeff Kirsher 		 * Ingress Queues which is active and matches the queue's ID.
1972f7917c00SJeff Kirsher 		 * None of these error conditions should ever happen so we may
1973f7917c00SJeff Kirsher 		 * want to either make them fatal and/or conditionalized under
1974f7917c00SJeff Kirsher 		 * DEBUG.
1975f7917c00SJeff Kirsher 		 */
19761ecc7b7aSHariprasad Shenai 		qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
1977f7917c00SJeff Kirsher 		iq_idx = IQ_IDX(s, qid);
1978f7917c00SJeff Kirsher 		if (unlikely(iq_idx >= MAX_INGQ)) {
1979f7917c00SJeff Kirsher 			dev_err(adapter->pdev_dev,
1980f7917c00SJeff Kirsher 				"Ingress QID %d out of range\n", qid);
1981f7917c00SJeff Kirsher 			continue;
1982f7917c00SJeff Kirsher 		}
1983f7917c00SJeff Kirsher 		rspq = s->ingr_map[iq_idx];
1984f7917c00SJeff Kirsher 		if (unlikely(rspq == NULL)) {
1985f7917c00SJeff Kirsher 			dev_err(adapter->pdev_dev,
1986f7917c00SJeff Kirsher 				"Ingress QID %d RSPQ=NULL\n", qid);
1987f7917c00SJeff Kirsher 			continue;
1988f7917c00SJeff Kirsher 		}
1989f7917c00SJeff Kirsher 		if (unlikely(rspq->abs_id != qid)) {
1990f7917c00SJeff Kirsher 			dev_err(adapter->pdev_dev,
1991f7917c00SJeff Kirsher 				"Ingress QID %d refers to RSPQ %d\n",
1992f7917c00SJeff Kirsher 				qid, rspq->abs_id);
1993f7917c00SJeff Kirsher 			continue;
1994f7917c00SJeff Kirsher 		}
1995f7917c00SJeff Kirsher 
1996f7917c00SJeff Kirsher 		/*
1997f7917c00SJeff Kirsher 		 * Schedule NAPI processing on the indicated Response Queue
1998f7917c00SJeff Kirsher 		 * and move on to the next entry in the Forwarded Interrupt
1999f7917c00SJeff Kirsher 		 * Queue.
2000f7917c00SJeff Kirsher 		 */
2001f7917c00SJeff Kirsher 		napi_schedule(&rspq->napi);
2002f7917c00SJeff Kirsher 		rspq_next(intrq);
2003f7917c00SJeff Kirsher 	}
2004f7917c00SJeff Kirsher 
2005f612b815SHariprasad Shenai 	val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
200671d3c0b4SHariprasad Shenai 	/* If we don't have access to the new User GTS (T5+), use the old
200771d3c0b4SHariprasad Shenai 	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
200871d3c0b4SHariprasad Shenai 	 */
200971d3c0b4SHariprasad Shenai 	if (unlikely(!intrq->bar2_addr)) {
2010f7917c00SJeff Kirsher 		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
2011f612b815SHariprasad Shenai 			     val | INGRESSQID_V(intrq->cntxt_id));
201271d3c0b4SHariprasad Shenai 	} else {
2013f612b815SHariprasad Shenai 		writel(val | INGRESSQID_V(intrq->bar2_qid),
2014df64e4d3SHariprasad Shenai 		       intrq->bar2_addr + SGE_UDB_GTS);
2015df64e4d3SHariprasad Shenai 		wmb();
2016df64e4d3SHariprasad Shenai 	}
2017f7917c00SJeff Kirsher 
2018f7917c00SJeff Kirsher 	spin_unlock(&adapter->sge.intrq_lock);
2019f7917c00SJeff Kirsher 
2020f7917c00SJeff Kirsher 	return work_done;
2021f7917c00SJeff Kirsher }
2022f7917c00SJeff Kirsher 
2023f7917c00SJeff Kirsher /*
2024f7917c00SJeff Kirsher  * The MSI interrupt handler handles data events from SGE response queues as
2025f7917c00SJeff Kirsher  * well as error and other async events as they all use the same MSI vector.
2026f7917c00SJeff Kirsher  */
t4vf_intr_msi(int irq,void * cookie)20278a67d1c6SSachin Kamat static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
2028f7917c00SJeff Kirsher {
2029f7917c00SJeff Kirsher 	struct adapter *adapter = cookie;
2030f7917c00SJeff Kirsher 
2031f7917c00SJeff Kirsher 	process_intrq(adapter);
2032f7917c00SJeff Kirsher 	return IRQ_HANDLED;
2033f7917c00SJeff Kirsher }
2034f7917c00SJeff Kirsher 
2035f7917c00SJeff Kirsher /**
2036f7917c00SJeff Kirsher  *	t4vf_intr_handler - select the top-level interrupt handler
2037f7917c00SJeff Kirsher  *	@adapter: the adapter
2038f7917c00SJeff Kirsher  *
2039f7917c00SJeff Kirsher  *	Selects the top-level interrupt handler based on the type of interrupts
2040f7917c00SJeff Kirsher  *	(MSI-X or MSI).
2041f7917c00SJeff Kirsher  */
t4vf_intr_handler(struct adapter * adapter)2042f7917c00SJeff Kirsher irq_handler_t t4vf_intr_handler(struct adapter *adapter)
2043f7917c00SJeff Kirsher {
20443d78bfaaSArjun Vynipadath 	BUG_ON((adapter->flags &
20453d78bfaaSArjun Vynipadath 	       (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
20463d78bfaaSArjun Vynipadath 	if (adapter->flags & CXGB4VF_USING_MSIX)
2047f7917c00SJeff Kirsher 		return t4vf_sge_intr_msix;
2048f7917c00SJeff Kirsher 	else
2049f7917c00SJeff Kirsher 		return t4vf_intr_msi;
2050f7917c00SJeff Kirsher }
2051f7917c00SJeff Kirsher 
2052f7917c00SJeff Kirsher /**
2053f7917c00SJeff Kirsher  *	sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
205420bb0c8fSRahul Lakkireddy  *	@t: Rx timer
2055f7917c00SJeff Kirsher  *
2056f7917c00SJeff Kirsher  *	Runs periodically from a timer to perform maintenance of SGE RX queues.
2057f7917c00SJeff Kirsher  *
2058f7917c00SJeff Kirsher  *	a) Replenishes RX queues that have run out due to memory shortage.
2059f7917c00SJeff Kirsher  *	Normally new RX buffers are added when existing ones are consumed but
2060f7917c00SJeff Kirsher  *	when out of memory a queue can become empty.  We schedule NAPI to do
2061f7917c00SJeff Kirsher  *	the actual refill.
2062f7917c00SJeff Kirsher  */
sge_rx_timer_cb(struct timer_list * t)20630e23daebSKees Cook static void sge_rx_timer_cb(struct timer_list *t)
2064f7917c00SJeff Kirsher {
20650e23daebSKees Cook 	struct adapter *adapter = from_timer(adapter, t, sge.rx_timer);
2066f7917c00SJeff Kirsher 	struct sge *s = &adapter->sge;
2067f7917c00SJeff Kirsher 	unsigned int i;
2068f7917c00SJeff Kirsher 
2069f7917c00SJeff Kirsher 	/*
2070f7917c00SJeff Kirsher 	 * Scan the "Starving Free Lists" flag array looking for any Free
2071f7917c00SJeff Kirsher 	 * Lists in need of more free buffers.  If we find one and it's not
2072f7917c00SJeff Kirsher 	 * being actively polled, then bump its "starving" counter and attempt
2073f7917c00SJeff Kirsher 	 * to refill it.  If we're successful in adding enough buffers to push
2074f7917c00SJeff Kirsher 	 * the Free List over the starving threshold, then we can clear its
2075f7917c00SJeff Kirsher 	 * "starving" status.
2076f7917c00SJeff Kirsher 	 */
2077f7917c00SJeff Kirsher 	for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
2078f7917c00SJeff Kirsher 		unsigned long m;
2079f7917c00SJeff Kirsher 
2080f7917c00SJeff Kirsher 		for (m = s->starving_fl[i]; m; m &= m - 1) {
2081f7917c00SJeff Kirsher 			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2082f7917c00SJeff Kirsher 			struct sge_fl *fl = s->egr_map[id];
2083f7917c00SJeff Kirsher 
2084f7917c00SJeff Kirsher 			clear_bit(id, s->starving_fl);
20854e857c58SPeter Zijlstra 			smp_mb__after_atomic();
2086f7917c00SJeff Kirsher 
2087f7917c00SJeff Kirsher 			/*
2088f7917c00SJeff Kirsher 			 * Since we are accessing fl without a lock there's a
2089f7917c00SJeff Kirsher 			 * small probability of a false positive where we
2090f7917c00SJeff Kirsher 			 * schedule napi but the FL is no longer starving.
2091f7917c00SJeff Kirsher 			 * No biggie.
2092f7917c00SJeff Kirsher 			 */
209365f6ecc9SHariprasad Shenai 			if (fl_starving(adapter, fl)) {
2094f7917c00SJeff Kirsher 				struct sge_eth_rxq *rxq;
2095f7917c00SJeff Kirsher 
2096f7917c00SJeff Kirsher 				rxq = container_of(fl, struct sge_eth_rxq, fl);
2097f7917c00SJeff Kirsher 				if (napi_reschedule(&rxq->rspq.napi))
2098f7917c00SJeff Kirsher 					fl->starving++;
2099f7917c00SJeff Kirsher 				else
2100f7917c00SJeff Kirsher 					set_bit(id, s->starving_fl);
2101f7917c00SJeff Kirsher 			}
2102f7917c00SJeff Kirsher 		}
2103f7917c00SJeff Kirsher 	}
2104f7917c00SJeff Kirsher 
2105f7917c00SJeff Kirsher 	/*
2106f7917c00SJeff Kirsher 	 * Reschedule the next scan for starving Free Lists ...
2107f7917c00SJeff Kirsher 	 */
2108f7917c00SJeff Kirsher 	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2109f7917c00SJeff Kirsher }
2110f7917c00SJeff Kirsher 
2111f7917c00SJeff Kirsher /**
2112f7917c00SJeff Kirsher  *	sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
211320bb0c8fSRahul Lakkireddy  *	@t: Tx timer
2114f7917c00SJeff Kirsher  *
2115f7917c00SJeff Kirsher  *	Runs periodically from a timer to perform maintenance of SGE TX queues.
2116f7917c00SJeff Kirsher  *
2117f7917c00SJeff Kirsher  *	b) Reclaims completed Tx packets for the Ethernet queues.  Normally
2118f7917c00SJeff Kirsher  *	packets are cleaned up by new Tx packets, this timer cleans up packets
2119f7917c00SJeff Kirsher  *	when no new packets are being submitted.  This is essential for pktgen,
2120f7917c00SJeff Kirsher  *	at least.
2121f7917c00SJeff Kirsher  */
sge_tx_timer_cb(struct timer_list * t)21220e23daebSKees Cook static void sge_tx_timer_cb(struct timer_list *t)
2123f7917c00SJeff Kirsher {
21240e23daebSKees Cook 	struct adapter *adapter = from_timer(adapter, t, sge.tx_timer);
2125f7917c00SJeff Kirsher 	struct sge *s = &adapter->sge;
2126f7917c00SJeff Kirsher 	unsigned int i, budget;
2127f7917c00SJeff Kirsher 
2128f7917c00SJeff Kirsher 	budget = MAX_TIMER_TX_RECLAIM;
2129f7917c00SJeff Kirsher 	i = s->ethtxq_rover;
2130f7917c00SJeff Kirsher 	do {
2131f7917c00SJeff Kirsher 		struct sge_eth_txq *txq = &s->ethtxq[i];
2132f7917c00SJeff Kirsher 
2133f7917c00SJeff Kirsher 		if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
2134f7917c00SJeff Kirsher 			int avail = reclaimable(&txq->q);
2135f7917c00SJeff Kirsher 
2136f7917c00SJeff Kirsher 			if (avail > budget)
2137f7917c00SJeff Kirsher 				avail = budget;
2138f7917c00SJeff Kirsher 
2139f7917c00SJeff Kirsher 			free_tx_desc(adapter, &txq->q, avail, true);
2140f7917c00SJeff Kirsher 			txq->q.in_use -= avail;
2141f7917c00SJeff Kirsher 			__netif_tx_unlock(txq->txq);
2142f7917c00SJeff Kirsher 
2143f7917c00SJeff Kirsher 			budget -= avail;
2144f7917c00SJeff Kirsher 			if (!budget)
2145f7917c00SJeff Kirsher 				break;
2146f7917c00SJeff Kirsher 		}
2147f7917c00SJeff Kirsher 
2148f7917c00SJeff Kirsher 		i++;
2149f7917c00SJeff Kirsher 		if (i >= s->ethqsets)
2150f7917c00SJeff Kirsher 			i = 0;
2151f7917c00SJeff Kirsher 	} while (i != s->ethtxq_rover);
2152f7917c00SJeff Kirsher 	s->ethtxq_rover = i;
2153f7917c00SJeff Kirsher 
2154f7917c00SJeff Kirsher 	/*
2155f7917c00SJeff Kirsher 	 * If we found too many reclaimable packets schedule a timer in the
2156f7917c00SJeff Kirsher 	 * near future to continue where we left off.  Otherwise the next timer
2157f7917c00SJeff Kirsher 	 * will be at its normal interval.
2158f7917c00SJeff Kirsher 	 */
2159f7917c00SJeff Kirsher 	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2160f7917c00SJeff Kirsher }
2161f7917c00SJeff Kirsher 
2162f7917c00SJeff Kirsher /**
2163df64e4d3SHariprasad Shenai  *	bar2_address - return the BAR2 address for an SGE Queue's Registers
2164df64e4d3SHariprasad Shenai  *	@adapter: the adapter
2165df64e4d3SHariprasad Shenai  *	@qid: the SGE Queue ID
2166df64e4d3SHariprasad Shenai  *	@qtype: the SGE Queue Type (Egress or Ingress)
2167df64e4d3SHariprasad Shenai  *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2168df64e4d3SHariprasad Shenai  *
2169df64e4d3SHariprasad Shenai  *	Returns the BAR2 address for the SGE Queue Registers associated with
2170df64e4d3SHariprasad Shenai  *	@qid.  If BAR2 SGE Registers aren't available, returns NULL.  Also
2171df64e4d3SHariprasad Shenai  *	returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2172df64e4d3SHariprasad Shenai  *	Queue Registers.  If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2173df64e4d3SHariprasad Shenai  *	Registers are supported (e.g. the Write Combining Doorbell Buffer).
2174df64e4d3SHariprasad Shenai  */
bar2_address(struct adapter * adapter,unsigned int qid,enum t4_bar2_qtype qtype,unsigned int * pbar2_qid)2175df64e4d3SHariprasad Shenai static void __iomem *bar2_address(struct adapter *adapter,
2176df64e4d3SHariprasad Shenai 				  unsigned int qid,
2177df64e4d3SHariprasad Shenai 				  enum t4_bar2_qtype qtype,
2178df64e4d3SHariprasad Shenai 				  unsigned int *pbar2_qid)
2179df64e4d3SHariprasad Shenai {
2180df64e4d3SHariprasad Shenai 	u64 bar2_qoffset;
2181df64e4d3SHariprasad Shenai 	int ret;
2182df64e4d3SHariprasad Shenai 
2183b2612722SHariprasad Shenai 	ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
2184df64e4d3SHariprasad Shenai 				  &bar2_qoffset, pbar2_qid);
2185df64e4d3SHariprasad Shenai 	if (ret)
2186df64e4d3SHariprasad Shenai 		return NULL;
2187df64e4d3SHariprasad Shenai 
2188df64e4d3SHariprasad Shenai 	return adapter->bar2 + bar2_qoffset;
2189df64e4d3SHariprasad Shenai }
2190df64e4d3SHariprasad Shenai 
2191df64e4d3SHariprasad Shenai /**
2192f7917c00SJeff Kirsher  *	t4vf_sge_alloc_rxq - allocate an SGE RX Queue
2193f7917c00SJeff Kirsher  *	@adapter: the adapter
2194f7917c00SJeff Kirsher  *	@rspq: pointer to to the new rxq's Response Queue to be filled in
2195f7917c00SJeff Kirsher  *	@iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
2196f7917c00SJeff Kirsher  *	@dev: the network device associated with the new rspq
2197f7917c00SJeff Kirsher  *	@intr_dest: MSI-X vector index (overriden in MSI mode)
2198f7917c00SJeff Kirsher  *	@fl: pointer to the new rxq's Free List to be filled in
2199f7917c00SJeff Kirsher  *	@hnd: the interrupt handler to invoke for the rspq
2200f7917c00SJeff Kirsher  */
t4vf_sge_alloc_rxq(struct adapter * adapter,struct sge_rspq * rspq,bool iqasynch,struct net_device * dev,int intr_dest,struct sge_fl * fl,rspq_handler_t hnd)2201f7917c00SJeff Kirsher int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2202f7917c00SJeff Kirsher 		       bool iqasynch, struct net_device *dev,
2203f7917c00SJeff Kirsher 		       int intr_dest,
2204f7917c00SJeff Kirsher 		       struct sge_fl *fl, rspq_handler_t hnd)
2205f7917c00SJeff Kirsher {
220665f6ecc9SHariprasad Shenai 	struct sge *s = &adapter->sge;
2207f7917c00SJeff Kirsher 	struct port_info *pi = netdev_priv(dev);
2208f7917c00SJeff Kirsher 	struct fw_iq_cmd cmd, rpl;
2209f7917c00SJeff Kirsher 	int ret, iqandst, flsz = 0;
22103d78bfaaSArjun Vynipadath 	int relaxed = !(adapter->flags & CXGB4VF_ROOT_NO_RELAXED_ORDERING);
2211f7917c00SJeff Kirsher 
2212f7917c00SJeff Kirsher 	/*
2213f7917c00SJeff Kirsher 	 * If we're using MSI interrupts and we're not initializing the
2214f7917c00SJeff Kirsher 	 * Forwarded Interrupt Queue itself, then set up this queue for
2215f7917c00SJeff Kirsher 	 * indirect interrupts to the Forwarded Interrupt Queue.  Obviously
2216f7917c00SJeff Kirsher 	 * the Forwarded Interrupt Queue must be set up before any other
2217f7917c00SJeff Kirsher 	 * ingress queue ...
2218f7917c00SJeff Kirsher 	 */
22193d78bfaaSArjun Vynipadath 	if ((adapter->flags & CXGB4VF_USING_MSI) &&
22203d78bfaaSArjun Vynipadath 	    rspq != &adapter->sge.intrq) {
2221f7917c00SJeff Kirsher 		iqandst = SGE_INTRDST_IQ;
2222f7917c00SJeff Kirsher 		intr_dest = adapter->sge.intrq.abs_id;
2223f7917c00SJeff Kirsher 	} else
2224f7917c00SJeff Kirsher 		iqandst = SGE_INTRDST_PCI;
2225f7917c00SJeff Kirsher 
2226f7917c00SJeff Kirsher 	/*
2227f7917c00SJeff Kirsher 	 * Allocate the hardware ring for the Response Queue.  The size needs
2228f7917c00SJeff Kirsher 	 * to be a multiple of 16 which includes the mandatory status entry
2229f7917c00SJeff Kirsher 	 * (regardless of whether the Status Page capabilities are enabled or
2230f7917c00SJeff Kirsher 	 * not).
2231f7917c00SJeff Kirsher 	 */
2232f7917c00SJeff Kirsher 	rspq->size = roundup(rspq->size, 16);
2233f7917c00SJeff Kirsher 	rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
2234f7917c00SJeff Kirsher 				0, &rspq->phys_addr, NULL, 0);
2235f7917c00SJeff Kirsher 	if (!rspq->desc)
2236f7917c00SJeff Kirsher 		return -ENOMEM;
2237f7917c00SJeff Kirsher 
2238f7917c00SJeff Kirsher 	/*
2239f7917c00SJeff Kirsher 	 * Fill in the Ingress Queue Command.  Note: Ideally this code would
2240f7917c00SJeff Kirsher 	 * be in t4vf_hw.c but there are so many parameters and dependencies
2241f7917c00SJeff Kirsher 	 * on our Linux SGE state that we would end up having to pass tons of
2242f7917c00SJeff Kirsher 	 * parameters.  We'll have to think about how this might be migrated
2243f7917c00SJeff Kirsher 	 * into OS-independent common code ...
2244f7917c00SJeff Kirsher 	 */
2245f7917c00SJeff Kirsher 	memset(&cmd, 0, sizeof(cmd));
2246e2ac9628SHariprasad Shenai 	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
2247e2ac9628SHariprasad Shenai 				    FW_CMD_REQUEST_F |
2248e2ac9628SHariprasad Shenai 				    FW_CMD_WRITE_F |
2249e2ac9628SHariprasad Shenai 				    FW_CMD_EXEC_F);
22506e4b51a6SHariprasad Shenai 	cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F |
22516e4b51a6SHariprasad Shenai 					 FW_IQ_CMD_IQSTART_F |
2252f7917c00SJeff Kirsher 					 FW_LEN16(cmd));
2253f7917c00SJeff Kirsher 	cmd.type_to_iqandstindex =
22546e4b51a6SHariprasad Shenai 		cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
22556e4b51a6SHariprasad Shenai 			    FW_IQ_CMD_IQASYNCH_V(iqasynch) |
22566e4b51a6SHariprasad Shenai 			    FW_IQ_CMD_VIID_V(pi->viid) |
22576e4b51a6SHariprasad Shenai 			    FW_IQ_CMD_IQANDST_V(iqandst) |
22586e4b51a6SHariprasad Shenai 			    FW_IQ_CMD_IQANUS_V(1) |
22596e4b51a6SHariprasad Shenai 			    FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) |
22606e4b51a6SHariprasad Shenai 			    FW_IQ_CMD_IQANDSTINDEX_V(intr_dest));
2261f7917c00SJeff Kirsher 	cmd.iqdroprss_to_iqesize =
22626e4b51a6SHariprasad Shenai 		cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) |
22636e4b51a6SHariprasad Shenai 			    FW_IQ_CMD_IQGTSMODE_F |
22646e4b51a6SHariprasad Shenai 			    FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) |
22656e4b51a6SHariprasad Shenai 			    FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4));
2266f7917c00SJeff Kirsher 	cmd.iqsize = cpu_to_be16(rspq->size);
2267f7917c00SJeff Kirsher 	cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
2268f7917c00SJeff Kirsher 
2269f7917c00SJeff Kirsher 	if (fl) {
2270d429005fSVishal Kulkarni 		unsigned int chip_ver =
227141fc2e41SHariprasad Shenai 			CHELSIO_CHIP_VERSION(adapter->params.chip);
2272f7917c00SJeff Kirsher 		/*
2273f7917c00SJeff Kirsher 		 * Allocate the ring for the hardware free list (with space
2274f7917c00SJeff Kirsher 		 * for its status page) along with the associated software
2275f7917c00SJeff Kirsher 		 * descriptor ring.  The free list size needs to be a multiple
227613432997SHariprasad Shenai 		 * of the Egress Queue Unit and at least 2 Egress Units larger
227713432997SHariprasad Shenai 		 * than the SGE's Egress Congrestion Threshold
227813432997SHariprasad Shenai 		 * (fl_starve_thres - 1).
2279f7917c00SJeff Kirsher 		 */
228013432997SHariprasad Shenai 		if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
228113432997SHariprasad Shenai 			fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
2282f7917c00SJeff Kirsher 		fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2283f7917c00SJeff Kirsher 		fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2284f7917c00SJeff Kirsher 				      sizeof(__be64), sizeof(struct rx_sw_desc),
228565f6ecc9SHariprasad Shenai 				      &fl->addr, &fl->sdesc, s->stat_len);
2286f7917c00SJeff Kirsher 		if (!fl->desc) {
2287f7917c00SJeff Kirsher 			ret = -ENOMEM;
2288f7917c00SJeff Kirsher 			goto err;
2289f7917c00SJeff Kirsher 		}
2290f7917c00SJeff Kirsher 
2291f7917c00SJeff Kirsher 		/*
2292f7917c00SJeff Kirsher 		 * Calculate the size of the hardware free list ring plus
2293f7917c00SJeff Kirsher 		 * Status Page (which the SGE will place after the end of the
2294f7917c00SJeff Kirsher 		 * free list ring) in Egress Queue Units.
2295f7917c00SJeff Kirsher 		 */
2296f7917c00SJeff Kirsher 		flsz = (fl->size / FL_PER_EQ_UNIT +
229765f6ecc9SHariprasad Shenai 			s->stat_len / EQ_UNIT);
2298f7917c00SJeff Kirsher 
2299f7917c00SJeff Kirsher 		/*
2300f7917c00SJeff Kirsher 		 * Fill in all the relevant firmware Ingress Queue Command
2301f7917c00SJeff Kirsher 		 * fields for the free list.
2302f7917c00SJeff Kirsher 		 */
2303f7917c00SJeff Kirsher 		cmd.iqns_to_fl0congen =
2304f7917c00SJeff Kirsher 			cpu_to_be32(
23056e4b51a6SHariprasad Shenai 				FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
23066e4b51a6SHariprasad Shenai 				FW_IQ_CMD_FL0PACKEN_F |
2307b629276dSCasey Leedom 				FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
2308b629276dSCasey Leedom 				FW_IQ_CMD_FL0DATARO_V(relaxed) |
23096e4b51a6SHariprasad Shenai 				FW_IQ_CMD_FL0PADEN_F);
2310edadad80SHariprasad Shenai 
2311edadad80SHariprasad Shenai 		/* In T6, for egress queue type FL there is internal overhead
2312edadad80SHariprasad Shenai 		 * of 16B for header going into FLM module.  Hence the maximum
2313edadad80SHariprasad Shenai 		 * allowed burst size is 448 bytes.  For T4/T5, the hardware
2314edadad80SHariprasad Shenai 		 * doesn't coalesce fetch requests if more than 64 bytes of
2315edadad80SHariprasad Shenai 		 * Free List pointers are provided, so we use a 128-byte Fetch
2316edadad80SHariprasad Shenai 		 * Burst Minimum there (T6 implements coalescing so we can use
2317edadad80SHariprasad Shenai 		 * the smaller 64-byte value there).
2318edadad80SHariprasad Shenai 		 */
2319f7917c00SJeff Kirsher 		cmd.fl0dcaen_to_fl0cidxfthresh =
2320f7917c00SJeff Kirsher 			cpu_to_be16(
2321d429005fSVishal Kulkarni 				FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5
2322d429005fSVishal Kulkarni 						     ? FETCHBURSTMIN_128B_X
2323d429005fSVishal Kulkarni 						     : FETCHBURSTMIN_64B_T6_X) |
2324d429005fSVishal Kulkarni 				FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
232541fc2e41SHariprasad Shenai 						     FETCHBURSTMAX_512B_X :
232641fc2e41SHariprasad Shenai 						     FETCHBURSTMAX_256B_X));
2327f7917c00SJeff Kirsher 		cmd.fl0size = cpu_to_be16(flsz);
2328f7917c00SJeff Kirsher 		cmd.fl0addr = cpu_to_be64(fl->addr);
2329f7917c00SJeff Kirsher 	}
2330f7917c00SJeff Kirsher 
2331f7917c00SJeff Kirsher 	/*
2332f7917c00SJeff Kirsher 	 * Issue the firmware Ingress Queue Command and extract the results if
2333f7917c00SJeff Kirsher 	 * it completes successfully.
2334f7917c00SJeff Kirsher 	 */
2335f7917c00SJeff Kirsher 	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2336f7917c00SJeff Kirsher 	if (ret)
2337f7917c00SJeff Kirsher 		goto err;
2338f7917c00SJeff Kirsher 
2339*b48b89f9SJakub Kicinski 	netif_napi_add(dev, &rspq->napi, napi_rx_handler);
2340f7917c00SJeff Kirsher 	rspq->cur_desc = rspq->desc;
2341f7917c00SJeff Kirsher 	rspq->cidx = 0;
2342f7917c00SJeff Kirsher 	rspq->gen = 1;
2343f7917c00SJeff Kirsher 	rspq->next_intr_params = rspq->intr_params;
2344f7917c00SJeff Kirsher 	rspq->cntxt_id = be16_to_cpu(rpl.iqid);
2345df64e4d3SHariprasad Shenai 	rspq->bar2_addr = bar2_address(adapter,
2346df64e4d3SHariprasad Shenai 				       rspq->cntxt_id,
2347df64e4d3SHariprasad Shenai 				       T4_BAR2_QTYPE_INGRESS,
2348df64e4d3SHariprasad Shenai 				       &rspq->bar2_qid);
2349f7917c00SJeff Kirsher 	rspq->abs_id = be16_to_cpu(rpl.physiqid);
2350f7917c00SJeff Kirsher 	rspq->size--;			/* subtract status entry */
2351f7917c00SJeff Kirsher 	rspq->adapter = adapter;
2352f7917c00SJeff Kirsher 	rspq->netdev = dev;
2353f7917c00SJeff Kirsher 	rspq->handler = hnd;
2354f7917c00SJeff Kirsher 
2355f7917c00SJeff Kirsher 	/* set offset to -1 to distinguish ingress queues without FL */
2356f7917c00SJeff Kirsher 	rspq->offset = fl ? 0 : -1;
2357f7917c00SJeff Kirsher 
2358f7917c00SJeff Kirsher 	if (fl) {
2359f7917c00SJeff Kirsher 		fl->cntxt_id = be16_to_cpu(rpl.fl0id);
2360f7917c00SJeff Kirsher 		fl->avail = 0;
2361f7917c00SJeff Kirsher 		fl->pend_cred = 0;
2362f7917c00SJeff Kirsher 		fl->pidx = 0;
2363f7917c00SJeff Kirsher 		fl->cidx = 0;
2364f7917c00SJeff Kirsher 		fl->alloc_failed = 0;
2365f7917c00SJeff Kirsher 		fl->large_alloc_failed = 0;
2366f7917c00SJeff Kirsher 		fl->starving = 0;
2367df64e4d3SHariprasad Shenai 
2368df64e4d3SHariprasad Shenai 		/* Note, we must initialize the BAR2 Free List User Doorbell
2369df64e4d3SHariprasad Shenai 		 * information before refilling the Free List!
2370df64e4d3SHariprasad Shenai 		 */
2371df64e4d3SHariprasad Shenai 		fl->bar2_addr = bar2_address(adapter,
2372df64e4d3SHariprasad Shenai 					     fl->cntxt_id,
2373df64e4d3SHariprasad Shenai 					     T4_BAR2_QTYPE_EGRESS,
2374df64e4d3SHariprasad Shenai 					     &fl->bar2_qid);
2375df64e4d3SHariprasad Shenai 
2376f7917c00SJeff Kirsher 		refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
2377f7917c00SJeff Kirsher 	}
2378f7917c00SJeff Kirsher 
2379f7917c00SJeff Kirsher 	return 0;
2380f7917c00SJeff Kirsher 
2381f7917c00SJeff Kirsher err:
2382f7917c00SJeff Kirsher 	/*
2383f7917c00SJeff Kirsher 	 * An error occurred.  Clean up our partial allocation state and
2384f7917c00SJeff Kirsher 	 * return the error.
2385f7917c00SJeff Kirsher 	 */
2386f7917c00SJeff Kirsher 	if (rspq->desc) {
2387f7917c00SJeff Kirsher 		dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
2388f7917c00SJeff Kirsher 				  rspq->desc, rspq->phys_addr);
2389f7917c00SJeff Kirsher 		rspq->desc = NULL;
2390f7917c00SJeff Kirsher 	}
2391f7917c00SJeff Kirsher 	if (fl && fl->desc) {
2392f7917c00SJeff Kirsher 		kfree(fl->sdesc);
2393f7917c00SJeff Kirsher 		fl->sdesc = NULL;
2394f7917c00SJeff Kirsher 		dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
2395f7917c00SJeff Kirsher 				  fl->desc, fl->addr);
2396f7917c00SJeff Kirsher 		fl->desc = NULL;
2397f7917c00SJeff Kirsher 	}
2398f7917c00SJeff Kirsher 	return ret;
2399f7917c00SJeff Kirsher }
2400f7917c00SJeff Kirsher 
2401f7917c00SJeff Kirsher /**
2402f7917c00SJeff Kirsher  *	t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
2403f7917c00SJeff Kirsher  *	@adapter: the adapter
2404f7917c00SJeff Kirsher  *	@txq: pointer to the new txq to be filled in
240520bb0c8fSRahul Lakkireddy  *	@dev: the network device
2406f7917c00SJeff Kirsher  *	@devq: the network TX queue associated with the new txq
2407f7917c00SJeff Kirsher  *	@iqid: the relative ingress queue ID to which events relating to
2408f7917c00SJeff Kirsher  *		the new txq should be directed
2409f7917c00SJeff Kirsher  */
t4vf_sge_alloc_eth_txq(struct adapter * adapter,struct sge_eth_txq * txq,struct net_device * dev,struct netdev_queue * devq,unsigned int iqid)2410f7917c00SJeff Kirsher int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2411f7917c00SJeff Kirsher 			   struct net_device *dev, struct netdev_queue *devq,
2412f7917c00SJeff Kirsher 			   unsigned int iqid)
2413f7917c00SJeff Kirsher {
2414d429005fSVishal Kulkarni 	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
2415d429005fSVishal Kulkarni 	struct port_info *pi = netdev_priv(dev);
2416d429005fSVishal Kulkarni 	struct fw_eq_eth_cmd cmd, rpl;
241765f6ecc9SHariprasad Shenai 	struct sge *s = &adapter->sge;
2418f7917c00SJeff Kirsher 	int ret, nentries;
2419f7917c00SJeff Kirsher 
2420f7917c00SJeff Kirsher 	/*
2421f7917c00SJeff Kirsher 	 * Calculate the size of the hardware TX Queue (including the Status
2422f7917c00SJeff Kirsher 	 * Page on the end of the TX Queue) in units of TX Descriptors.
2423f7917c00SJeff Kirsher 	 */
242465f6ecc9SHariprasad Shenai 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2425f7917c00SJeff Kirsher 
2426f7917c00SJeff Kirsher 	/*
2427f7917c00SJeff Kirsher 	 * Allocate the hardware ring for the TX ring (with space for its
2428f7917c00SJeff Kirsher 	 * status page) along with the associated software descriptor ring.
2429f7917c00SJeff Kirsher 	 */
2430f7917c00SJeff Kirsher 	txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2431f7917c00SJeff Kirsher 				 sizeof(struct tx_desc),
2432f7917c00SJeff Kirsher 				 sizeof(struct tx_sw_desc),
243365f6ecc9SHariprasad Shenai 				 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2434f7917c00SJeff Kirsher 	if (!txq->q.desc)
2435f7917c00SJeff Kirsher 		return -ENOMEM;
2436f7917c00SJeff Kirsher 
2437f7917c00SJeff Kirsher 	/*
2438f7917c00SJeff Kirsher 	 * Fill in the Egress Queue Command.  Note: As with the direct use of
2439f7917c00SJeff Kirsher 	 * the firmware Ingress Queue COmmand above in our RXQ allocation
2440f7917c00SJeff Kirsher 	 * routine, ideally, this code would be in t4vf_hw.c.  Again, we'll
2441f7917c00SJeff Kirsher 	 * have to see if there's some reasonable way to parameterize it
2442f7917c00SJeff Kirsher 	 * into the common code ...
2443f7917c00SJeff Kirsher 	 */
2444f7917c00SJeff Kirsher 	memset(&cmd, 0, sizeof(cmd));
2445e2ac9628SHariprasad Shenai 	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
2446e2ac9628SHariprasad Shenai 				    FW_CMD_REQUEST_F |
2447e2ac9628SHariprasad Shenai 				    FW_CMD_WRITE_F |
2448e2ac9628SHariprasad Shenai 				    FW_CMD_EXEC_F);
24496e4b51a6SHariprasad Shenai 	cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
24506e4b51a6SHariprasad Shenai 					 FW_EQ_ETH_CMD_EQSTART_F |
2451f7917c00SJeff Kirsher 					 FW_LEN16(cmd));
2452d429005fSVishal Kulkarni 	cmd.autoequiqe_to_viid = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
24536e4b51a6SHariprasad Shenai 					     FW_EQ_ETH_CMD_VIID_V(pi->viid));
2454f7917c00SJeff Kirsher 	cmd.fetchszm_to_iqid =
24556e4b51a6SHariprasad Shenai 		cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
24566e4b51a6SHariprasad Shenai 			    FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
24576e4b51a6SHariprasad Shenai 			    FW_EQ_ETH_CMD_IQID_V(iqid));
2458f7917c00SJeff Kirsher 	cmd.dcaen_to_eqsize =
2459d429005fSVishal Kulkarni 		cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
2460d429005fSVishal Kulkarni 						  ? FETCHBURSTMIN_64B_X
2461d429005fSVishal Kulkarni 						  : FETCHBURSTMIN_64B_T6_X) |
2462d429005fSVishal Kulkarni 			    FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
24636e4b51a6SHariprasad Shenai 			    FW_EQ_ETH_CMD_CIDXFTHRESH_V(
2464d429005fSVishal Kulkarni 						CIDXFLUSHTHRESH_32_X) |
24656e4b51a6SHariprasad Shenai 			    FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2466f7917c00SJeff Kirsher 	cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
2467f7917c00SJeff Kirsher 
2468f7917c00SJeff Kirsher 	/*
2469f7917c00SJeff Kirsher 	 * Issue the firmware Egress Queue Command and extract the results if
2470f7917c00SJeff Kirsher 	 * it completes successfully.
2471f7917c00SJeff Kirsher 	 */
2472f7917c00SJeff Kirsher 	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2473f7917c00SJeff Kirsher 	if (ret) {
2474f7917c00SJeff Kirsher 		/*
2475f7917c00SJeff Kirsher 		 * The girmware Ingress Queue Command failed for some reason.
2476f7917c00SJeff Kirsher 		 * Free up our partial allocation state and return the error.
2477f7917c00SJeff Kirsher 		 */
2478f7917c00SJeff Kirsher 		kfree(txq->q.sdesc);
2479f7917c00SJeff Kirsher 		txq->q.sdesc = NULL;
2480f7917c00SJeff Kirsher 		dma_free_coherent(adapter->pdev_dev,
2481f7917c00SJeff Kirsher 				  nentries * sizeof(struct tx_desc),
2482f7917c00SJeff Kirsher 				  txq->q.desc, txq->q.phys_addr);
2483f7917c00SJeff Kirsher 		txq->q.desc = NULL;
2484f7917c00SJeff Kirsher 		return ret;
2485f7917c00SJeff Kirsher 	}
2486f7917c00SJeff Kirsher 
2487f7917c00SJeff Kirsher 	txq->q.in_use = 0;
2488f7917c00SJeff Kirsher 	txq->q.cidx = 0;
2489f7917c00SJeff Kirsher 	txq->q.pidx = 0;
2490f7917c00SJeff Kirsher 	txq->q.stat = (void *)&txq->q.desc[txq->q.size];
24916e4b51a6SHariprasad Shenai 	txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
2492df64e4d3SHariprasad Shenai 	txq->q.bar2_addr = bar2_address(adapter,
2493df64e4d3SHariprasad Shenai 					txq->q.cntxt_id,
2494df64e4d3SHariprasad Shenai 					T4_BAR2_QTYPE_EGRESS,
2495df64e4d3SHariprasad Shenai 					&txq->q.bar2_qid);
2496f7917c00SJeff Kirsher 	txq->q.abs_id =
24976e4b51a6SHariprasad Shenai 		FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd));
2498f7917c00SJeff Kirsher 	txq->txq = devq;
2499f7917c00SJeff Kirsher 	txq->tso = 0;
2500f7917c00SJeff Kirsher 	txq->tx_cso = 0;
2501f7917c00SJeff Kirsher 	txq->vlan_ins = 0;
2502f7917c00SJeff Kirsher 	txq->q.stops = 0;
2503f7917c00SJeff Kirsher 	txq->q.restarts = 0;
2504f7917c00SJeff Kirsher 	txq->mapping_err = 0;
2505f7917c00SJeff Kirsher 	return 0;
2506f7917c00SJeff Kirsher }
2507f7917c00SJeff Kirsher 
2508f7917c00SJeff Kirsher /*
2509f7917c00SJeff Kirsher  * Free the DMA map resources associated with a TX queue.
2510f7917c00SJeff Kirsher  */
free_txq(struct adapter * adapter,struct sge_txq * tq)2511f7917c00SJeff Kirsher static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2512f7917c00SJeff Kirsher {
251365f6ecc9SHariprasad Shenai 	struct sge *s = &adapter->sge;
251465f6ecc9SHariprasad Shenai 
2515f7917c00SJeff Kirsher 	dma_free_coherent(adapter->pdev_dev,
251665f6ecc9SHariprasad Shenai 			  tq->size * sizeof(*tq->desc) + s->stat_len,
2517f7917c00SJeff Kirsher 			  tq->desc, tq->phys_addr);
2518f7917c00SJeff Kirsher 	tq->cntxt_id = 0;
2519f7917c00SJeff Kirsher 	tq->sdesc = NULL;
2520f7917c00SJeff Kirsher 	tq->desc = NULL;
2521f7917c00SJeff Kirsher }
2522f7917c00SJeff Kirsher 
2523f7917c00SJeff Kirsher /*
2524f7917c00SJeff Kirsher  * Free the resources associated with a response queue (possibly including a
2525f7917c00SJeff Kirsher  * free list).
2526f7917c00SJeff Kirsher  */
free_rspq_fl(struct adapter * adapter,struct sge_rspq * rspq,struct sge_fl * fl)2527f7917c00SJeff Kirsher static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2528f7917c00SJeff Kirsher 			 struct sge_fl *fl)
2529f7917c00SJeff Kirsher {
253065f6ecc9SHariprasad Shenai 	struct sge *s = &adapter->sge;
2531f7917c00SJeff Kirsher 	unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2532f7917c00SJeff Kirsher 
2533f7917c00SJeff Kirsher 	t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
2534f7917c00SJeff Kirsher 		     rspq->cntxt_id, flid, 0xffff);
2535f7917c00SJeff Kirsher 	dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
2536f7917c00SJeff Kirsher 			  rspq->desc, rspq->phys_addr);
2537f7917c00SJeff Kirsher 	netif_napi_del(&rspq->napi);
2538f7917c00SJeff Kirsher 	rspq->netdev = NULL;
2539f7917c00SJeff Kirsher 	rspq->cntxt_id = 0;
2540f7917c00SJeff Kirsher 	rspq->abs_id = 0;
2541f7917c00SJeff Kirsher 	rspq->desc = NULL;
2542f7917c00SJeff Kirsher 
2543f7917c00SJeff Kirsher 	if (fl) {
2544f7917c00SJeff Kirsher 		free_rx_bufs(adapter, fl, fl->avail);
2545f7917c00SJeff Kirsher 		dma_free_coherent(adapter->pdev_dev,
254665f6ecc9SHariprasad Shenai 				  fl->size * sizeof(*fl->desc) + s->stat_len,
2547f7917c00SJeff Kirsher 				  fl->desc, fl->addr);
2548f7917c00SJeff Kirsher 		kfree(fl->sdesc);
2549f7917c00SJeff Kirsher 		fl->sdesc = NULL;
2550f7917c00SJeff Kirsher 		fl->cntxt_id = 0;
2551f7917c00SJeff Kirsher 		fl->desc = NULL;
2552f7917c00SJeff Kirsher 	}
2553f7917c00SJeff Kirsher }
2554f7917c00SJeff Kirsher 
2555f7917c00SJeff Kirsher /**
2556f7917c00SJeff Kirsher  *	t4vf_free_sge_resources - free SGE resources
2557f7917c00SJeff Kirsher  *	@adapter: the adapter
2558f7917c00SJeff Kirsher  *
2559f7917c00SJeff Kirsher  *	Frees resources used by the SGE queue sets.
2560f7917c00SJeff Kirsher  */
t4vf_free_sge_resources(struct adapter * adapter)2561f7917c00SJeff Kirsher void t4vf_free_sge_resources(struct adapter *adapter)
2562f7917c00SJeff Kirsher {
2563f7917c00SJeff Kirsher 	struct sge *s = &adapter->sge;
2564f7917c00SJeff Kirsher 	struct sge_eth_rxq *rxq = s->ethrxq;
2565f7917c00SJeff Kirsher 	struct sge_eth_txq *txq = s->ethtxq;
2566f7917c00SJeff Kirsher 	struct sge_rspq *evtq = &s->fw_evtq;
2567f7917c00SJeff Kirsher 	struct sge_rspq *intrq = &s->intrq;
2568f7917c00SJeff Kirsher 	int qs;
2569f7917c00SJeff Kirsher 
2570f7917c00SJeff Kirsher 	for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
2571f7917c00SJeff Kirsher 		if (rxq->rspq.desc)
2572f7917c00SJeff Kirsher 			free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
2573f7917c00SJeff Kirsher 		if (txq->q.desc) {
2574f7917c00SJeff Kirsher 			t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
2575f7917c00SJeff Kirsher 			free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
2576f7917c00SJeff Kirsher 			kfree(txq->q.sdesc);
2577f7917c00SJeff Kirsher 			free_txq(adapter, &txq->q);
2578f7917c00SJeff Kirsher 		}
2579f7917c00SJeff Kirsher 	}
2580f7917c00SJeff Kirsher 	if (evtq->desc)
2581f7917c00SJeff Kirsher 		free_rspq_fl(adapter, evtq, NULL);
2582f7917c00SJeff Kirsher 	if (intrq->desc)
2583f7917c00SJeff Kirsher 		free_rspq_fl(adapter, intrq, NULL);
2584f7917c00SJeff Kirsher }
2585f7917c00SJeff Kirsher 
2586f7917c00SJeff Kirsher /**
2587f7917c00SJeff Kirsher  *	t4vf_sge_start - enable SGE operation
2588f7917c00SJeff Kirsher  *	@adapter: the adapter
2589f7917c00SJeff Kirsher  *
2590f7917c00SJeff Kirsher  *	Start tasklets and timers associated with the DMA engine.
2591f7917c00SJeff Kirsher  */
t4vf_sge_start(struct adapter * adapter)2592f7917c00SJeff Kirsher void t4vf_sge_start(struct adapter *adapter)
2593f7917c00SJeff Kirsher {
2594f7917c00SJeff Kirsher 	adapter->sge.ethtxq_rover = 0;
2595f7917c00SJeff Kirsher 	mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2596f7917c00SJeff Kirsher 	mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2597f7917c00SJeff Kirsher }
2598f7917c00SJeff Kirsher 
2599f7917c00SJeff Kirsher /**
2600f7917c00SJeff Kirsher  *	t4vf_sge_stop - disable SGE operation
2601f7917c00SJeff Kirsher  *	@adapter: the adapter
2602f7917c00SJeff Kirsher  *
2603f7917c00SJeff Kirsher  *	Stop tasklets and timers associated with the DMA engine.  Note that
2604f7917c00SJeff Kirsher  *	this is effective only if measures have been taken to disable any HW
2605f7917c00SJeff Kirsher  *	events that may restart them.
2606f7917c00SJeff Kirsher  */
t4vf_sge_stop(struct adapter * adapter)2607f7917c00SJeff Kirsher void t4vf_sge_stop(struct adapter *adapter)
2608f7917c00SJeff Kirsher {
2609f7917c00SJeff Kirsher 	struct sge *s = &adapter->sge;
2610f7917c00SJeff Kirsher 
2611f7917c00SJeff Kirsher 	if (s->rx_timer.function)
2612f7917c00SJeff Kirsher 		del_timer_sync(&s->rx_timer);
2613f7917c00SJeff Kirsher 	if (s->tx_timer.function)
2614f7917c00SJeff Kirsher 		del_timer_sync(&s->tx_timer);
2615f7917c00SJeff Kirsher }
2616f7917c00SJeff Kirsher 
2617f7917c00SJeff Kirsher /**
2618f7917c00SJeff Kirsher  *	t4vf_sge_init - initialize SGE
2619f7917c00SJeff Kirsher  *	@adapter: the adapter
2620f7917c00SJeff Kirsher  *
2621f7917c00SJeff Kirsher  *	Performs SGE initialization needed every time after a chip reset.
2622f7917c00SJeff Kirsher  *	We do not initialize any of the queue sets here, instead the driver
2623f7917c00SJeff Kirsher  *	top-level must request those individually.  We also do not enable DMA
2624f7917c00SJeff Kirsher  *	here, that should be done after the queues have been set up.
2625f7917c00SJeff Kirsher  */
t4vf_sge_init(struct adapter * adapter)2626f7917c00SJeff Kirsher int t4vf_sge_init(struct adapter *adapter)
2627f7917c00SJeff Kirsher {
2628f7917c00SJeff Kirsher 	struct sge_params *sge_params = &adapter->params.sge;
2629ea0a4210SArjun Vynipadath 	u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
2630ea0a4210SArjun Vynipadath 	u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
2631f7917c00SJeff Kirsher 	struct sge *s = &adapter->sge;
2632f7917c00SJeff Kirsher 
2633f7917c00SJeff Kirsher 	/*
2634f7917c00SJeff Kirsher 	 * Start by vetting the basic SGE parameters which have been set up by
2635f7917c00SJeff Kirsher 	 * the Physical Function Driver.  Ideally we should be able to deal
2636f7917c00SJeff Kirsher 	 * with _any_ configuration.  Practice is different ...
2637f7917c00SJeff Kirsher 	 */
2638ea0a4210SArjun Vynipadath 
2639ea0a4210SArjun Vynipadath 	/* We only bother using the Large Page logic if the Large Page Buffer
2640ea0a4210SArjun Vynipadath 	 * is larger than our Page Size Buffer.
2641ea0a4210SArjun Vynipadath 	 */
2642ea0a4210SArjun Vynipadath 	if (fl_large_pg <= fl_small_pg)
2643ea0a4210SArjun Vynipadath 		fl_large_pg = 0;
2644ea0a4210SArjun Vynipadath 
2645ea0a4210SArjun Vynipadath 	/* The Page Size Buffer must be exactly equal to our Page Size and the
2646ea0a4210SArjun Vynipadath 	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2647ea0a4210SArjun Vynipadath 	 */
2648ea0a4210SArjun Vynipadath 	if (fl_small_pg != PAGE_SIZE ||
2649ea0a4210SArjun Vynipadath 	    (fl_large_pg & (fl_large_pg - 1)) != 0) {
2650f7917c00SJeff Kirsher 		dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2651ea0a4210SArjun Vynipadath 			fl_small_pg, fl_large_pg);
2652f7917c00SJeff Kirsher 		return -EINVAL;
2653f7917c00SJeff Kirsher 	}
2654cb440364SHariprasad Shenai 	if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
2655cb440364SHariprasad Shenai 	    RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
2656f7917c00SJeff Kirsher 		dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2657f7917c00SJeff Kirsher 		return -EINVAL;
2658f7917c00SJeff Kirsher 	}
2659f7917c00SJeff Kirsher 
2660f7917c00SJeff Kirsher 	/*
2661f7917c00SJeff Kirsher 	 * Now translate the adapter parameters into our internal forms.
2662f7917c00SJeff Kirsher 	 */
2663ea0a4210SArjun Vynipadath 	if (fl_large_pg)
2664ea0a4210SArjun Vynipadath 		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2665f612b815SHariprasad Shenai 	s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
266652367a76SVipul Pandya 			? 128 : 64);
2667f612b815SHariprasad Shenai 	s->pktshift = PKTSHIFT_G(sge_params->sge_control);
2668cb440364SHariprasad Shenai 	s->fl_align = t4vf_fl_pkt_align(adapter);
2669f7917c00SJeff Kirsher 
267050d21a66SHariprasad Shenai 	/* A FL with <= fl_starve_thres buffers is starving and a periodic
267150d21a66SHariprasad Shenai 	 * timer will attempt to refill it.  This needs to be larger than the
267250d21a66SHariprasad Shenai 	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
267350d21a66SHariprasad Shenai 	 * stuck waiting for new packets while the SGE is waiting for us to
267450d21a66SHariprasad Shenai 	 * give it more Free List entries.  (Note that the SGE's Egress
267550d21a66SHariprasad Shenai 	 * Congestion Threshold is in units of 2 Free List pointers.)
267650d21a66SHariprasad Shenai 	 */
2677ea6f82feSHariprasad Shenai 	switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
2678ea6f82feSHariprasad Shenai 	case CHELSIO_T4:
2679ea6f82feSHariprasad Shenai 		s->fl_starve_thres =
2680ea6f82feSHariprasad Shenai 		   EGRTHRESHOLD_G(sge_params->sge_congestion_control);
2681ea6f82feSHariprasad Shenai 		break;
2682ea6f82feSHariprasad Shenai 	case CHELSIO_T5:
2683ea6f82feSHariprasad Shenai 		s->fl_starve_thres =
2684ea6f82feSHariprasad Shenai 		   EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2685ea6f82feSHariprasad Shenai 		break;
2686ea6f82feSHariprasad Shenai 	case CHELSIO_T6:
2687ea6f82feSHariprasad Shenai 	default:
2688ea6f82feSHariprasad Shenai 		s->fl_starve_thres =
2689ea6f82feSHariprasad Shenai 		   T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2690ea6f82feSHariprasad Shenai 		break;
2691ea6f82feSHariprasad Shenai 	}
2692ea6f82feSHariprasad Shenai 	s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
2693f7917c00SJeff Kirsher 
2694f7917c00SJeff Kirsher 	/*
2695f7917c00SJeff Kirsher 	 * Set up tasklet timers.
2696f7917c00SJeff Kirsher 	 */
26970e23daebSKees Cook 	timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
26980e23daebSKees Cook 	timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
2699f7917c00SJeff Kirsher 
2700f7917c00SJeff Kirsher 	/*
2701f7917c00SJeff Kirsher 	 * Initialize Forwarded Interrupt Queue lock.
2702f7917c00SJeff Kirsher 	 */
2703f7917c00SJeff Kirsher 	spin_lock_init(&s->intrq_lock);
2704f7917c00SJeff Kirsher 
2705f7917c00SJeff Kirsher 	return 0;
2706f7917c00SJeff Kirsher }
2707