xref: /openbmc/linux/drivers/net/ethernet/sfc/tx.c (revision c2e4e25a)
1874aeea5SJeff Kirsher /****************************************************************************
2874aeea5SJeff Kirsher  * Driver for Solarflare Solarstorm network controllers and boards
3874aeea5SJeff Kirsher  * Copyright 2005-2006 Fen Systems Ltd.
4874aeea5SJeff Kirsher  * Copyright 2005-2010 Solarflare Communications Inc.
5874aeea5SJeff Kirsher  *
6874aeea5SJeff Kirsher  * This program is free software; you can redistribute it and/or modify it
7874aeea5SJeff Kirsher  * under the terms of the GNU General Public License version 2 as published
8874aeea5SJeff Kirsher  * by the Free Software Foundation, incorporated herein by reference.
9874aeea5SJeff Kirsher  */
10874aeea5SJeff Kirsher 
11874aeea5SJeff Kirsher #include <linux/pci.h>
12874aeea5SJeff Kirsher #include <linux/tcp.h>
13874aeea5SJeff Kirsher #include <linux/ip.h>
14874aeea5SJeff Kirsher #include <linux/in.h>
15874aeea5SJeff Kirsher #include <linux/ipv6.h>
16874aeea5SJeff Kirsher #include <linux/slab.h>
17874aeea5SJeff Kirsher #include <net/ipv6.h>
18874aeea5SJeff Kirsher #include <linux/if_ether.h>
19874aeea5SJeff Kirsher #include <linux/highmem.h>
20874aeea5SJeff Kirsher #include "net_driver.h"
21874aeea5SJeff Kirsher #include "efx.h"
22874aeea5SJeff Kirsher #include "nic.h"
23874aeea5SJeff Kirsher #include "workarounds.h"
24874aeea5SJeff Kirsher 
25874aeea5SJeff Kirsher /*
26874aeea5SJeff Kirsher  * TX descriptor ring full threshold
27874aeea5SJeff Kirsher  *
28874aeea5SJeff Kirsher  * The tx_queue descriptor ring fill-level must fall below this value
29874aeea5SJeff Kirsher  * before we restart the netif queue
30874aeea5SJeff Kirsher  */
31874aeea5SJeff Kirsher #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32874aeea5SJeff Kirsher 
33874aeea5SJeff Kirsher static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
34c3940999STom Herbert 			       struct efx_tx_buffer *buffer,
35c3940999STom Herbert 			       unsigned int *pkts_compl,
36c3940999STom Herbert 			       unsigned int *bytes_compl)
37874aeea5SJeff Kirsher {
38874aeea5SJeff Kirsher 	if (buffer->unmap_len) {
39874aeea5SJeff Kirsher 		struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
40874aeea5SJeff Kirsher 		dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
41874aeea5SJeff Kirsher 					 buffer->unmap_len);
42874aeea5SJeff Kirsher 		if (buffer->unmap_single)
43874aeea5SJeff Kirsher 			pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
44874aeea5SJeff Kirsher 					 PCI_DMA_TODEVICE);
45874aeea5SJeff Kirsher 		else
46874aeea5SJeff Kirsher 			pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
47874aeea5SJeff Kirsher 				       PCI_DMA_TODEVICE);
48874aeea5SJeff Kirsher 		buffer->unmap_len = 0;
49874aeea5SJeff Kirsher 		buffer->unmap_single = false;
50874aeea5SJeff Kirsher 	}
51874aeea5SJeff Kirsher 
52874aeea5SJeff Kirsher 	if (buffer->skb) {
53c3940999STom Herbert 		(*pkts_compl)++;
54c3940999STom Herbert 		(*bytes_compl) += buffer->skb->len;
55874aeea5SJeff Kirsher 		dev_kfree_skb_any((struct sk_buff *) buffer->skb);
56874aeea5SJeff Kirsher 		buffer->skb = NULL;
57874aeea5SJeff Kirsher 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
58874aeea5SJeff Kirsher 			   "TX queue %d transmission id %x complete\n",
59874aeea5SJeff Kirsher 			   tx_queue->queue, tx_queue->read_count);
60874aeea5SJeff Kirsher 	}
61874aeea5SJeff Kirsher }
62874aeea5SJeff Kirsher 
63874aeea5SJeff Kirsher /**
64874aeea5SJeff Kirsher  * struct efx_tso_header - a DMA mapped buffer for packet headers
65874aeea5SJeff Kirsher  * @next: Linked list of free ones.
66874aeea5SJeff Kirsher  *	The list is protected by the TX queue lock.
67874aeea5SJeff Kirsher  * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
68874aeea5SJeff Kirsher  * @dma_addr: The DMA address of the header below.
69874aeea5SJeff Kirsher  *
70874aeea5SJeff Kirsher  * This controls the memory used for a TSO header.  Use TSOH_DATA()
71874aeea5SJeff Kirsher  * to find the packet header data.  Use TSOH_SIZE() to calculate the
72874aeea5SJeff Kirsher  * total size required for a given packet header length.  TSO headers
73874aeea5SJeff Kirsher  * in the free list are exactly %TSOH_STD_SIZE bytes in size.
74874aeea5SJeff Kirsher  */
75874aeea5SJeff Kirsher struct efx_tso_header {
76874aeea5SJeff Kirsher 	union {
77874aeea5SJeff Kirsher 		struct efx_tso_header *next;
78874aeea5SJeff Kirsher 		size_t unmap_len;
79874aeea5SJeff Kirsher 	};
80874aeea5SJeff Kirsher 	dma_addr_t dma_addr;
81874aeea5SJeff Kirsher };
82874aeea5SJeff Kirsher 
83874aeea5SJeff Kirsher static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
84874aeea5SJeff Kirsher 			       struct sk_buff *skb);
85874aeea5SJeff Kirsher static void efx_fini_tso(struct efx_tx_queue *tx_queue);
86874aeea5SJeff Kirsher static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
87874aeea5SJeff Kirsher 			       struct efx_tso_header *tsoh);
88874aeea5SJeff Kirsher 
89874aeea5SJeff Kirsher static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
90874aeea5SJeff Kirsher 			  struct efx_tx_buffer *buffer)
91874aeea5SJeff Kirsher {
92874aeea5SJeff Kirsher 	if (buffer->tsoh) {
93874aeea5SJeff Kirsher 		if (likely(!buffer->tsoh->unmap_len)) {
94874aeea5SJeff Kirsher 			buffer->tsoh->next = tx_queue->tso_headers_free;
95874aeea5SJeff Kirsher 			tx_queue->tso_headers_free = buffer->tsoh;
96874aeea5SJeff Kirsher 		} else {
97874aeea5SJeff Kirsher 			efx_tsoh_heap_free(tx_queue, buffer->tsoh);
98874aeea5SJeff Kirsher 		}
99874aeea5SJeff Kirsher 		buffer->tsoh = NULL;
100874aeea5SJeff Kirsher 	}
101874aeea5SJeff Kirsher }
102874aeea5SJeff Kirsher 
103874aeea5SJeff Kirsher 
104874aeea5SJeff Kirsher static inline unsigned
105874aeea5SJeff Kirsher efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
106874aeea5SJeff Kirsher {
107874aeea5SJeff Kirsher 	/* Depending on the NIC revision, we can use descriptor
108874aeea5SJeff Kirsher 	 * lengths up to 8K or 8K-1.  However, since PCI Express
109874aeea5SJeff Kirsher 	 * devices must split read requests at 4K boundaries, there is
110874aeea5SJeff Kirsher 	 * little benefit from using descriptors that cross those
111874aeea5SJeff Kirsher 	 * boundaries and we keep things simple by not doing so.
112874aeea5SJeff Kirsher 	 */
113874aeea5SJeff Kirsher 	unsigned len = (~dma_addr & 0xfff) + 1;
114874aeea5SJeff Kirsher 
115874aeea5SJeff Kirsher 	/* Work around hardware bug for unaligned buffers. */
116874aeea5SJeff Kirsher 	if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
117874aeea5SJeff Kirsher 		len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
118874aeea5SJeff Kirsher 
119874aeea5SJeff Kirsher 	return len;
120874aeea5SJeff Kirsher }
121874aeea5SJeff Kirsher 
122874aeea5SJeff Kirsher /*
123874aeea5SJeff Kirsher  * Add a socket buffer to a TX queue
124874aeea5SJeff Kirsher  *
125874aeea5SJeff Kirsher  * This maps all fragments of a socket buffer for DMA and adds them to
126874aeea5SJeff Kirsher  * the TX queue.  The queue's insert pointer will be incremented by
127874aeea5SJeff Kirsher  * the number of fragments in the socket buffer.
128874aeea5SJeff Kirsher  *
129874aeea5SJeff Kirsher  * If any DMA mapping fails, any mapped fragments will be unmapped,
130874aeea5SJeff Kirsher  * the queue's insert pointer will be restored to its original value.
131874aeea5SJeff Kirsher  *
132874aeea5SJeff Kirsher  * This function is split out from efx_hard_start_xmit to allow the
133874aeea5SJeff Kirsher  * loopback test to direct packets via specific TX queues.
134874aeea5SJeff Kirsher  *
135874aeea5SJeff Kirsher  * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
136874aeea5SJeff Kirsher  * You must hold netif_tx_lock() to call this function.
137874aeea5SJeff Kirsher  */
138874aeea5SJeff Kirsher netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
139874aeea5SJeff Kirsher {
140874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
141874aeea5SJeff Kirsher 	struct pci_dev *pci_dev = efx->pci_dev;
142874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
143874aeea5SJeff Kirsher 	skb_frag_t *fragment;
144874aeea5SJeff Kirsher 	unsigned int len, unmap_len = 0, fill_level, insert_ptr;
145874aeea5SJeff Kirsher 	dma_addr_t dma_addr, unmap_addr = 0;
146874aeea5SJeff Kirsher 	unsigned int dma_len;
147874aeea5SJeff Kirsher 	bool unmap_single;
148874aeea5SJeff Kirsher 	int q_space, i = 0;
149874aeea5SJeff Kirsher 	netdev_tx_t rc = NETDEV_TX_OK;
150874aeea5SJeff Kirsher 
151874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
152874aeea5SJeff Kirsher 
153874aeea5SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
154874aeea5SJeff Kirsher 		return efx_enqueue_skb_tso(tx_queue, skb);
155874aeea5SJeff Kirsher 
156874aeea5SJeff Kirsher 	/* Get size of the initial fragment */
157874aeea5SJeff Kirsher 	len = skb_headlen(skb);
158874aeea5SJeff Kirsher 
159874aeea5SJeff Kirsher 	/* Pad if necessary */
160874aeea5SJeff Kirsher 	if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
161874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(skb->data_len);
162874aeea5SJeff Kirsher 		len = 32 + 1;
163874aeea5SJeff Kirsher 		if (skb_pad(skb, len - skb->len))
164874aeea5SJeff Kirsher 			return NETDEV_TX_OK;
165874aeea5SJeff Kirsher 	}
166874aeea5SJeff Kirsher 
167874aeea5SJeff Kirsher 	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
168874aeea5SJeff Kirsher 	q_space = efx->txq_entries - 1 - fill_level;
169874aeea5SJeff Kirsher 
170874aeea5SJeff Kirsher 	/* Map for DMA.  Use pci_map_single rather than pci_map_page
171874aeea5SJeff Kirsher 	 * since this is more efficient on machines with sparse
172874aeea5SJeff Kirsher 	 * memory.
173874aeea5SJeff Kirsher 	 */
174874aeea5SJeff Kirsher 	unmap_single = true;
175874aeea5SJeff Kirsher 	dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
176874aeea5SJeff Kirsher 
177874aeea5SJeff Kirsher 	/* Process all fragments */
178874aeea5SJeff Kirsher 	while (1) {
179874aeea5SJeff Kirsher 		if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
180874aeea5SJeff Kirsher 			goto pci_err;
181874aeea5SJeff Kirsher 
182874aeea5SJeff Kirsher 		/* Store fields for marking in the per-fragment final
183874aeea5SJeff Kirsher 		 * descriptor */
184874aeea5SJeff Kirsher 		unmap_len = len;
185874aeea5SJeff Kirsher 		unmap_addr = dma_addr;
186874aeea5SJeff Kirsher 
187874aeea5SJeff Kirsher 		/* Add to TX queue, splitting across DMA boundaries */
188874aeea5SJeff Kirsher 		do {
189874aeea5SJeff Kirsher 			if (unlikely(q_space-- <= 0)) {
190874aeea5SJeff Kirsher 				/* It might be that completions have
191874aeea5SJeff Kirsher 				 * happened since the xmit path last
192874aeea5SJeff Kirsher 				 * checked.  Update the xmit path's
193874aeea5SJeff Kirsher 				 * copy of read_count.
194874aeea5SJeff Kirsher 				 */
195874aeea5SJeff Kirsher 				netif_tx_stop_queue(tx_queue->core_txq);
196874aeea5SJeff Kirsher 				/* This memory barrier protects the
197874aeea5SJeff Kirsher 				 * change of queue state from the access
198874aeea5SJeff Kirsher 				 * of read_count. */
199874aeea5SJeff Kirsher 				smp_mb();
200874aeea5SJeff Kirsher 				tx_queue->old_read_count =
201874aeea5SJeff Kirsher 					ACCESS_ONCE(tx_queue->read_count);
202874aeea5SJeff Kirsher 				fill_level = (tx_queue->insert_count
203874aeea5SJeff Kirsher 					      - tx_queue->old_read_count);
204874aeea5SJeff Kirsher 				q_space = efx->txq_entries - 1 - fill_level;
205874aeea5SJeff Kirsher 				if (unlikely(q_space-- <= 0)) {
206874aeea5SJeff Kirsher 					rc = NETDEV_TX_BUSY;
207874aeea5SJeff Kirsher 					goto unwind;
208874aeea5SJeff Kirsher 				}
209874aeea5SJeff Kirsher 				smp_mb();
210874aeea5SJeff Kirsher 				if (likely(!efx->loopback_selftest))
211874aeea5SJeff Kirsher 					netif_tx_start_queue(
212874aeea5SJeff Kirsher 						tx_queue->core_txq);
213874aeea5SJeff Kirsher 			}
214874aeea5SJeff Kirsher 
215874aeea5SJeff Kirsher 			insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
216874aeea5SJeff Kirsher 			buffer = &tx_queue->buffer[insert_ptr];
217874aeea5SJeff Kirsher 			efx_tsoh_free(tx_queue, buffer);
218874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(buffer->tsoh);
219874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(buffer->skb);
220874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(buffer->len);
221874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(!buffer->continuation);
222874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(buffer->unmap_len);
223874aeea5SJeff Kirsher 
224874aeea5SJeff Kirsher 			dma_len = efx_max_tx_len(efx, dma_addr);
225874aeea5SJeff Kirsher 			if (likely(dma_len >= len))
226874aeea5SJeff Kirsher 				dma_len = len;
227874aeea5SJeff Kirsher 
228874aeea5SJeff Kirsher 			/* Fill out per descriptor fields */
229874aeea5SJeff Kirsher 			buffer->len = dma_len;
230874aeea5SJeff Kirsher 			buffer->dma_addr = dma_addr;
231874aeea5SJeff Kirsher 			len -= dma_len;
232874aeea5SJeff Kirsher 			dma_addr += dma_len;
233874aeea5SJeff Kirsher 			++tx_queue->insert_count;
234874aeea5SJeff Kirsher 		} while (len);
235874aeea5SJeff Kirsher 
236874aeea5SJeff Kirsher 		/* Transfer ownership of the unmapping to the final buffer */
237874aeea5SJeff Kirsher 		buffer->unmap_single = unmap_single;
238874aeea5SJeff Kirsher 		buffer->unmap_len = unmap_len;
239874aeea5SJeff Kirsher 		unmap_len = 0;
240874aeea5SJeff Kirsher 
241874aeea5SJeff Kirsher 		/* Get address and size of next fragment */
242874aeea5SJeff Kirsher 		if (i >= skb_shinfo(skb)->nr_frags)
243874aeea5SJeff Kirsher 			break;
244874aeea5SJeff Kirsher 		fragment = &skb_shinfo(skb)->frags[i];
2459e903e08SEric Dumazet 		len = skb_frag_size(fragment);
246874aeea5SJeff Kirsher 		i++;
247874aeea5SJeff Kirsher 		/* Map for DMA */
248874aeea5SJeff Kirsher 		unmap_single = false;
2494a22c4c9SIan Campbell 		dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
2505d6bcdfeSIan Campbell 					    DMA_TO_DEVICE);
251874aeea5SJeff Kirsher 	}
252874aeea5SJeff Kirsher 
253874aeea5SJeff Kirsher 	/* Transfer ownership of the skb to the final buffer */
254874aeea5SJeff Kirsher 	buffer->skb = skb;
255874aeea5SJeff Kirsher 	buffer->continuation = false;
256874aeea5SJeff Kirsher 
257c3940999STom Herbert 	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
258c3940999STom Herbert 
259874aeea5SJeff Kirsher 	/* Pass off to hardware */
260874aeea5SJeff Kirsher 	efx_nic_push_buffers(tx_queue);
261874aeea5SJeff Kirsher 
262874aeea5SJeff Kirsher 	return NETDEV_TX_OK;
263874aeea5SJeff Kirsher 
264874aeea5SJeff Kirsher  pci_err:
265874aeea5SJeff Kirsher 	netif_err(efx, tx_err, efx->net_dev,
266874aeea5SJeff Kirsher 		  " TX queue %d could not map skb with %d bytes %d "
267874aeea5SJeff Kirsher 		  "fragments for DMA\n", tx_queue->queue, skb->len,
268874aeea5SJeff Kirsher 		  skb_shinfo(skb)->nr_frags + 1);
269874aeea5SJeff Kirsher 
270874aeea5SJeff Kirsher 	/* Mark the packet as transmitted, and free the SKB ourselves */
271874aeea5SJeff Kirsher 	dev_kfree_skb_any(skb);
272874aeea5SJeff Kirsher 
273874aeea5SJeff Kirsher  unwind:
274874aeea5SJeff Kirsher 	/* Work backwards until we hit the original insert pointer value */
275874aeea5SJeff Kirsher 	while (tx_queue->insert_count != tx_queue->write_count) {
276c3940999STom Herbert 		unsigned int pkts_compl = 0, bytes_compl = 0;
277874aeea5SJeff Kirsher 		--tx_queue->insert_count;
278874aeea5SJeff Kirsher 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
279874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[insert_ptr];
280c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
281874aeea5SJeff Kirsher 		buffer->len = 0;
282874aeea5SJeff Kirsher 	}
283874aeea5SJeff Kirsher 
284874aeea5SJeff Kirsher 	/* Free the fragment we were mid-way through pushing */
285874aeea5SJeff Kirsher 	if (unmap_len) {
286874aeea5SJeff Kirsher 		if (unmap_single)
287874aeea5SJeff Kirsher 			pci_unmap_single(pci_dev, unmap_addr, unmap_len,
288874aeea5SJeff Kirsher 					 PCI_DMA_TODEVICE);
289874aeea5SJeff Kirsher 		else
290874aeea5SJeff Kirsher 			pci_unmap_page(pci_dev, unmap_addr, unmap_len,
291874aeea5SJeff Kirsher 				       PCI_DMA_TODEVICE);
292874aeea5SJeff Kirsher 	}
293874aeea5SJeff Kirsher 
294874aeea5SJeff Kirsher 	return rc;
295874aeea5SJeff Kirsher }
296874aeea5SJeff Kirsher 
297874aeea5SJeff Kirsher /* Remove packets from the TX queue
298874aeea5SJeff Kirsher  *
299874aeea5SJeff Kirsher  * This removes packets from the TX queue, up to and including the
300874aeea5SJeff Kirsher  * specified index.
301874aeea5SJeff Kirsher  */
302874aeea5SJeff Kirsher static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
303c3940999STom Herbert 				unsigned int index,
304c3940999STom Herbert 				unsigned int *pkts_compl,
305c3940999STom Herbert 				unsigned int *bytes_compl)
306874aeea5SJeff Kirsher {
307874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
308874aeea5SJeff Kirsher 	unsigned int stop_index, read_ptr;
309874aeea5SJeff Kirsher 
310874aeea5SJeff Kirsher 	stop_index = (index + 1) & tx_queue->ptr_mask;
311874aeea5SJeff Kirsher 	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
312874aeea5SJeff Kirsher 
313874aeea5SJeff Kirsher 	while (read_ptr != stop_index) {
314874aeea5SJeff Kirsher 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
315874aeea5SJeff Kirsher 		if (unlikely(buffer->len == 0)) {
316874aeea5SJeff Kirsher 			netif_err(efx, tx_err, efx->net_dev,
317874aeea5SJeff Kirsher 				  "TX queue %d spurious TX completion id %x\n",
318874aeea5SJeff Kirsher 				  tx_queue->queue, read_ptr);
319874aeea5SJeff Kirsher 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
320874aeea5SJeff Kirsher 			return;
321874aeea5SJeff Kirsher 		}
322874aeea5SJeff Kirsher 
323c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
324874aeea5SJeff Kirsher 		buffer->continuation = true;
325874aeea5SJeff Kirsher 		buffer->len = 0;
326874aeea5SJeff Kirsher 
327874aeea5SJeff Kirsher 		++tx_queue->read_count;
328874aeea5SJeff Kirsher 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
329874aeea5SJeff Kirsher 	}
330874aeea5SJeff Kirsher }
331874aeea5SJeff Kirsher 
332874aeea5SJeff Kirsher /* Initiate a packet transmission.  We use one channel per CPU
333874aeea5SJeff Kirsher  * (sharing when we have more CPUs than channels).  On Falcon, the TX
334874aeea5SJeff Kirsher  * completion events will be directed back to the CPU that transmitted
335874aeea5SJeff Kirsher  * the packet, which should be cache-efficient.
336874aeea5SJeff Kirsher  *
337874aeea5SJeff Kirsher  * Context: non-blocking.
338874aeea5SJeff Kirsher  * Note that returning anything other than NETDEV_TX_OK will cause the
339874aeea5SJeff Kirsher  * OS to free the skb.
340874aeea5SJeff Kirsher  */
341874aeea5SJeff Kirsher netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
342874aeea5SJeff Kirsher 				      struct net_device *net_dev)
343874aeea5SJeff Kirsher {
344874aeea5SJeff Kirsher 	struct efx_nic *efx = netdev_priv(net_dev);
345874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
346874aeea5SJeff Kirsher 	unsigned index, type;
347874aeea5SJeff Kirsher 
348874aeea5SJeff Kirsher 	EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
349874aeea5SJeff Kirsher 
350874aeea5SJeff Kirsher 	index = skb_get_queue_mapping(skb);
351874aeea5SJeff Kirsher 	type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
352874aeea5SJeff Kirsher 	if (index >= efx->n_tx_channels) {
353874aeea5SJeff Kirsher 		index -= efx->n_tx_channels;
354874aeea5SJeff Kirsher 		type |= EFX_TXQ_TYPE_HIGHPRI;
355874aeea5SJeff Kirsher 	}
356874aeea5SJeff Kirsher 	tx_queue = efx_get_tx_queue(efx, index, type);
357874aeea5SJeff Kirsher 
358874aeea5SJeff Kirsher 	return efx_enqueue_skb(tx_queue, skb);
359874aeea5SJeff Kirsher }
360874aeea5SJeff Kirsher 
361874aeea5SJeff Kirsher void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
362874aeea5SJeff Kirsher {
363874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
364874aeea5SJeff Kirsher 
365874aeea5SJeff Kirsher 	/* Must be inverse of queue lookup in efx_hard_start_xmit() */
366874aeea5SJeff Kirsher 	tx_queue->core_txq =
367874aeea5SJeff Kirsher 		netdev_get_tx_queue(efx->net_dev,
368874aeea5SJeff Kirsher 				    tx_queue->queue / EFX_TXQ_TYPES +
369874aeea5SJeff Kirsher 				    ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
370874aeea5SJeff Kirsher 				     efx->n_tx_channels : 0));
371874aeea5SJeff Kirsher }
372874aeea5SJeff Kirsher 
373874aeea5SJeff Kirsher int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
374874aeea5SJeff Kirsher {
375874aeea5SJeff Kirsher 	struct efx_nic *efx = netdev_priv(net_dev);
376874aeea5SJeff Kirsher 	struct efx_channel *channel;
377874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
378874aeea5SJeff Kirsher 	unsigned tc;
379874aeea5SJeff Kirsher 	int rc;
380874aeea5SJeff Kirsher 
381874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
382874aeea5SJeff Kirsher 		return -EINVAL;
383874aeea5SJeff Kirsher 
384874aeea5SJeff Kirsher 	if (num_tc == net_dev->num_tc)
385874aeea5SJeff Kirsher 		return 0;
386874aeea5SJeff Kirsher 
387874aeea5SJeff Kirsher 	for (tc = 0; tc < num_tc; tc++) {
388874aeea5SJeff Kirsher 		net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
389874aeea5SJeff Kirsher 		net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
390874aeea5SJeff Kirsher 	}
391874aeea5SJeff Kirsher 
392874aeea5SJeff Kirsher 	if (num_tc > net_dev->num_tc) {
393874aeea5SJeff Kirsher 		/* Initialise high-priority queues as necessary */
394874aeea5SJeff Kirsher 		efx_for_each_channel(channel, efx) {
395874aeea5SJeff Kirsher 			efx_for_each_possible_channel_tx_queue(tx_queue,
396874aeea5SJeff Kirsher 							       channel) {
397874aeea5SJeff Kirsher 				if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
398874aeea5SJeff Kirsher 					continue;
399874aeea5SJeff Kirsher 				if (!tx_queue->buffer) {
400874aeea5SJeff Kirsher 					rc = efx_probe_tx_queue(tx_queue);
401874aeea5SJeff Kirsher 					if (rc)
402874aeea5SJeff Kirsher 						return rc;
403874aeea5SJeff Kirsher 				}
404874aeea5SJeff Kirsher 				if (!tx_queue->initialised)
405874aeea5SJeff Kirsher 					efx_init_tx_queue(tx_queue);
406874aeea5SJeff Kirsher 				efx_init_tx_queue_core_txq(tx_queue);
407874aeea5SJeff Kirsher 			}
408874aeea5SJeff Kirsher 		}
409874aeea5SJeff Kirsher 	} else {
410874aeea5SJeff Kirsher 		/* Reduce number of classes before number of queues */
411874aeea5SJeff Kirsher 		net_dev->num_tc = num_tc;
412874aeea5SJeff Kirsher 	}
413874aeea5SJeff Kirsher 
414874aeea5SJeff Kirsher 	rc = netif_set_real_num_tx_queues(net_dev,
415874aeea5SJeff Kirsher 					  max_t(int, num_tc, 1) *
416874aeea5SJeff Kirsher 					  efx->n_tx_channels);
417874aeea5SJeff Kirsher 	if (rc)
418874aeea5SJeff Kirsher 		return rc;
419874aeea5SJeff Kirsher 
420874aeea5SJeff Kirsher 	/* Do not destroy high-priority queues when they become
421874aeea5SJeff Kirsher 	 * unused.  We would have to flush them first, and it is
422874aeea5SJeff Kirsher 	 * fairly difficult to flush a subset of TX queues.  Leave
423874aeea5SJeff Kirsher 	 * it to efx_fini_channels().
424874aeea5SJeff Kirsher 	 */
425874aeea5SJeff Kirsher 
426874aeea5SJeff Kirsher 	net_dev->num_tc = num_tc;
427874aeea5SJeff Kirsher 	return 0;
428874aeea5SJeff Kirsher }
429874aeea5SJeff Kirsher 
430874aeea5SJeff Kirsher void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
431874aeea5SJeff Kirsher {
432874aeea5SJeff Kirsher 	unsigned fill_level;
433874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
434c3940999STom Herbert 	unsigned int pkts_compl = 0, bytes_compl = 0;
435874aeea5SJeff Kirsher 
436874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
437874aeea5SJeff Kirsher 
438c3940999STom Herbert 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
439c3940999STom Herbert 	netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
440874aeea5SJeff Kirsher 
441874aeea5SJeff Kirsher 	/* See if we need to restart the netif queue.  This barrier
442874aeea5SJeff Kirsher 	 * separates the update of read_count from the test of the
443874aeea5SJeff Kirsher 	 * queue state. */
444874aeea5SJeff Kirsher 	smp_mb();
445874aeea5SJeff Kirsher 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
446874aeea5SJeff Kirsher 	    likely(efx->port_enabled) &&
447874aeea5SJeff Kirsher 	    likely(netif_device_present(efx->net_dev))) {
448874aeea5SJeff Kirsher 		fill_level = tx_queue->insert_count - tx_queue->read_count;
449874aeea5SJeff Kirsher 		if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
450874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
451874aeea5SJeff Kirsher 			netif_tx_wake_queue(tx_queue->core_txq);
452874aeea5SJeff Kirsher 		}
453874aeea5SJeff Kirsher 	}
454874aeea5SJeff Kirsher 
455874aeea5SJeff Kirsher 	/* Check whether the hardware queue is now empty */
456874aeea5SJeff Kirsher 	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
457874aeea5SJeff Kirsher 		tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
458874aeea5SJeff Kirsher 		if (tx_queue->read_count == tx_queue->old_write_count) {
459874aeea5SJeff Kirsher 			smp_mb();
460874aeea5SJeff Kirsher 			tx_queue->empty_read_count =
461874aeea5SJeff Kirsher 				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
462874aeea5SJeff Kirsher 		}
463874aeea5SJeff Kirsher 	}
464874aeea5SJeff Kirsher }
465874aeea5SJeff Kirsher 
466874aeea5SJeff Kirsher int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
467874aeea5SJeff Kirsher {
468874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
469874aeea5SJeff Kirsher 	unsigned int entries;
470874aeea5SJeff Kirsher 	int i, rc;
471874aeea5SJeff Kirsher 
472874aeea5SJeff Kirsher 	/* Create the smallest power-of-two aligned ring */
473874aeea5SJeff Kirsher 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
474874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
475874aeea5SJeff Kirsher 	tx_queue->ptr_mask = entries - 1;
476874aeea5SJeff Kirsher 
477874aeea5SJeff Kirsher 	netif_dbg(efx, probe, efx->net_dev,
478874aeea5SJeff Kirsher 		  "creating TX queue %d size %#x mask %#x\n",
479874aeea5SJeff Kirsher 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
480874aeea5SJeff Kirsher 
481874aeea5SJeff Kirsher 	/* Allocate software ring */
482c2e4e25aSThomas Meyer 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
483874aeea5SJeff Kirsher 				   GFP_KERNEL);
484874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
485874aeea5SJeff Kirsher 		return -ENOMEM;
486874aeea5SJeff Kirsher 	for (i = 0; i <= tx_queue->ptr_mask; ++i)
487874aeea5SJeff Kirsher 		tx_queue->buffer[i].continuation = true;
488874aeea5SJeff Kirsher 
489874aeea5SJeff Kirsher 	/* Allocate hardware ring */
490874aeea5SJeff Kirsher 	rc = efx_nic_probe_tx(tx_queue);
491874aeea5SJeff Kirsher 	if (rc)
492874aeea5SJeff Kirsher 		goto fail;
493874aeea5SJeff Kirsher 
494874aeea5SJeff Kirsher 	return 0;
495874aeea5SJeff Kirsher 
496874aeea5SJeff Kirsher  fail:
497874aeea5SJeff Kirsher 	kfree(tx_queue->buffer);
498874aeea5SJeff Kirsher 	tx_queue->buffer = NULL;
499874aeea5SJeff Kirsher 	return rc;
500874aeea5SJeff Kirsher }
501874aeea5SJeff Kirsher 
502874aeea5SJeff Kirsher void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
503874aeea5SJeff Kirsher {
504874aeea5SJeff Kirsher 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
505874aeea5SJeff Kirsher 		  "initialising TX queue %d\n", tx_queue->queue);
506874aeea5SJeff Kirsher 
507874aeea5SJeff Kirsher 	tx_queue->insert_count = 0;
508874aeea5SJeff Kirsher 	tx_queue->write_count = 0;
509874aeea5SJeff Kirsher 	tx_queue->old_write_count = 0;
510874aeea5SJeff Kirsher 	tx_queue->read_count = 0;
511874aeea5SJeff Kirsher 	tx_queue->old_read_count = 0;
512874aeea5SJeff Kirsher 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
513874aeea5SJeff Kirsher 
514874aeea5SJeff Kirsher 	/* Set up TX descriptor ring */
515874aeea5SJeff Kirsher 	efx_nic_init_tx(tx_queue);
516874aeea5SJeff Kirsher 
517874aeea5SJeff Kirsher 	tx_queue->initialised = true;
518874aeea5SJeff Kirsher }
519874aeea5SJeff Kirsher 
520874aeea5SJeff Kirsher void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
521874aeea5SJeff Kirsher {
522874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
523874aeea5SJeff Kirsher 
524874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
525874aeea5SJeff Kirsher 		return;
526874aeea5SJeff Kirsher 
527874aeea5SJeff Kirsher 	/* Free any buffers left in the ring */
528874aeea5SJeff Kirsher 	while (tx_queue->read_count != tx_queue->write_count) {
529c3940999STom Herbert 		unsigned int pkts_compl = 0, bytes_compl = 0;
530874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
531c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
532874aeea5SJeff Kirsher 		buffer->continuation = true;
533874aeea5SJeff Kirsher 		buffer->len = 0;
534874aeea5SJeff Kirsher 
535874aeea5SJeff Kirsher 		++tx_queue->read_count;
536874aeea5SJeff Kirsher 	}
537c3940999STom Herbert 	netdev_tx_reset_queue(tx_queue->core_txq);
538874aeea5SJeff Kirsher }
539874aeea5SJeff Kirsher 
540874aeea5SJeff Kirsher void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
541874aeea5SJeff Kirsher {
542874aeea5SJeff Kirsher 	if (!tx_queue->initialised)
543874aeea5SJeff Kirsher 		return;
544874aeea5SJeff Kirsher 
545874aeea5SJeff Kirsher 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
546874aeea5SJeff Kirsher 		  "shutting down TX queue %d\n", tx_queue->queue);
547874aeea5SJeff Kirsher 
548874aeea5SJeff Kirsher 	tx_queue->initialised = false;
549874aeea5SJeff Kirsher 
550874aeea5SJeff Kirsher 	/* Flush TX queue, remove descriptor ring */
551874aeea5SJeff Kirsher 	efx_nic_fini_tx(tx_queue);
552874aeea5SJeff Kirsher 
553874aeea5SJeff Kirsher 	efx_release_tx_buffers(tx_queue);
554874aeea5SJeff Kirsher 
555874aeea5SJeff Kirsher 	/* Free up TSO header cache */
556874aeea5SJeff Kirsher 	efx_fini_tso(tx_queue);
557874aeea5SJeff Kirsher }
558874aeea5SJeff Kirsher 
559874aeea5SJeff Kirsher void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
560874aeea5SJeff Kirsher {
561874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
562874aeea5SJeff Kirsher 		return;
563874aeea5SJeff Kirsher 
564874aeea5SJeff Kirsher 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
565874aeea5SJeff Kirsher 		  "destroying TX queue %d\n", tx_queue->queue);
566874aeea5SJeff Kirsher 	efx_nic_remove_tx(tx_queue);
567874aeea5SJeff Kirsher 
568874aeea5SJeff Kirsher 	kfree(tx_queue->buffer);
569874aeea5SJeff Kirsher 	tx_queue->buffer = NULL;
570874aeea5SJeff Kirsher }
571874aeea5SJeff Kirsher 
572874aeea5SJeff Kirsher 
573874aeea5SJeff Kirsher /* Efx TCP segmentation acceleration.
574874aeea5SJeff Kirsher  *
575874aeea5SJeff Kirsher  * Why?  Because by doing it here in the driver we can go significantly
576874aeea5SJeff Kirsher  * faster than the GSO.
577874aeea5SJeff Kirsher  *
578874aeea5SJeff Kirsher  * Requires TX checksum offload support.
579874aeea5SJeff Kirsher  */
580874aeea5SJeff Kirsher 
581874aeea5SJeff Kirsher /* Number of bytes inserted at the start of a TSO header buffer,
582874aeea5SJeff Kirsher  * similar to NET_IP_ALIGN.
583874aeea5SJeff Kirsher  */
584874aeea5SJeff Kirsher #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
585874aeea5SJeff Kirsher #define TSOH_OFFSET	0
586874aeea5SJeff Kirsher #else
587874aeea5SJeff Kirsher #define TSOH_OFFSET	NET_IP_ALIGN
588874aeea5SJeff Kirsher #endif
589874aeea5SJeff Kirsher 
590874aeea5SJeff Kirsher #define TSOH_BUFFER(tsoh)	((u8 *)(tsoh + 1) + TSOH_OFFSET)
591874aeea5SJeff Kirsher 
592874aeea5SJeff Kirsher /* Total size of struct efx_tso_header, buffer and padding */
593874aeea5SJeff Kirsher #define TSOH_SIZE(hdr_len)					\
594874aeea5SJeff Kirsher 	(sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
595874aeea5SJeff Kirsher 
596874aeea5SJeff Kirsher /* Size of blocks on free list.  Larger blocks must be allocated from
597874aeea5SJeff Kirsher  * the heap.
598874aeea5SJeff Kirsher  */
599874aeea5SJeff Kirsher #define TSOH_STD_SIZE		128
600874aeea5SJeff Kirsher 
601874aeea5SJeff Kirsher #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
602874aeea5SJeff Kirsher #define ETH_HDR_LEN(skb)  (skb_network_header(skb) - (skb)->data)
603874aeea5SJeff Kirsher #define SKB_TCP_OFF(skb)  PTR_DIFF(tcp_hdr(skb), (skb)->data)
604874aeea5SJeff Kirsher #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
605874aeea5SJeff Kirsher #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
606874aeea5SJeff Kirsher 
607874aeea5SJeff Kirsher /**
608874aeea5SJeff Kirsher  * struct tso_state - TSO state for an SKB
609874aeea5SJeff Kirsher  * @out_len: Remaining length in current segment
610874aeea5SJeff Kirsher  * @seqnum: Current sequence number
611874aeea5SJeff Kirsher  * @ipv4_id: Current IPv4 ID, host endian
612874aeea5SJeff Kirsher  * @packet_space: Remaining space in current packet
613874aeea5SJeff Kirsher  * @dma_addr: DMA address of current position
614874aeea5SJeff Kirsher  * @in_len: Remaining length in current SKB fragment
615874aeea5SJeff Kirsher  * @unmap_len: Length of SKB fragment
616874aeea5SJeff Kirsher  * @unmap_addr: DMA address of SKB fragment
617874aeea5SJeff Kirsher  * @unmap_single: DMA single vs page mapping flag
618874aeea5SJeff Kirsher  * @protocol: Network protocol (after any VLAN header)
619874aeea5SJeff Kirsher  * @header_len: Number of bytes of header
620874aeea5SJeff Kirsher  * @full_packet_size: Number of bytes to put in each outgoing segment
621874aeea5SJeff Kirsher  *
622874aeea5SJeff Kirsher  * The state used during segmentation.  It is put into this data structure
623874aeea5SJeff Kirsher  * just to make it easy to pass into inline functions.
624874aeea5SJeff Kirsher  */
625874aeea5SJeff Kirsher struct tso_state {
626874aeea5SJeff Kirsher 	/* Output position */
627874aeea5SJeff Kirsher 	unsigned out_len;
628874aeea5SJeff Kirsher 	unsigned seqnum;
629874aeea5SJeff Kirsher 	unsigned ipv4_id;
630874aeea5SJeff Kirsher 	unsigned packet_space;
631874aeea5SJeff Kirsher 
632874aeea5SJeff Kirsher 	/* Input position */
633874aeea5SJeff Kirsher 	dma_addr_t dma_addr;
634874aeea5SJeff Kirsher 	unsigned in_len;
635874aeea5SJeff Kirsher 	unsigned unmap_len;
636874aeea5SJeff Kirsher 	dma_addr_t unmap_addr;
637874aeea5SJeff Kirsher 	bool unmap_single;
638874aeea5SJeff Kirsher 
639874aeea5SJeff Kirsher 	__be16 protocol;
640874aeea5SJeff Kirsher 	unsigned header_len;
641874aeea5SJeff Kirsher 	int full_packet_size;
642874aeea5SJeff Kirsher };
643874aeea5SJeff Kirsher 
644874aeea5SJeff Kirsher 
645874aeea5SJeff Kirsher /*
646874aeea5SJeff Kirsher  * Verify that our various assumptions about sk_buffs and the conditions
647874aeea5SJeff Kirsher  * under which TSO will be attempted hold true.  Return the protocol number.
648874aeea5SJeff Kirsher  */
649874aeea5SJeff Kirsher static __be16 efx_tso_check_protocol(struct sk_buff *skb)
650874aeea5SJeff Kirsher {
651874aeea5SJeff Kirsher 	__be16 protocol = skb->protocol;
652874aeea5SJeff Kirsher 
653874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
654874aeea5SJeff Kirsher 			    protocol);
655874aeea5SJeff Kirsher 	if (protocol == htons(ETH_P_8021Q)) {
656874aeea5SJeff Kirsher 		/* Find the encapsulated protocol; reset network header
657874aeea5SJeff Kirsher 		 * and transport header based on that. */
658874aeea5SJeff Kirsher 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
659874aeea5SJeff Kirsher 		protocol = veh->h_vlan_encapsulated_proto;
660874aeea5SJeff Kirsher 		skb_set_network_header(skb, sizeof(*veh));
661874aeea5SJeff Kirsher 		if (protocol == htons(ETH_P_IP))
662874aeea5SJeff Kirsher 			skb_set_transport_header(skb, sizeof(*veh) +
663874aeea5SJeff Kirsher 						 4 * ip_hdr(skb)->ihl);
664874aeea5SJeff Kirsher 		else if (protocol == htons(ETH_P_IPV6))
665874aeea5SJeff Kirsher 			skb_set_transport_header(skb, sizeof(*veh) +
666874aeea5SJeff Kirsher 						 sizeof(struct ipv6hdr));
667874aeea5SJeff Kirsher 	}
668874aeea5SJeff Kirsher 
669874aeea5SJeff Kirsher 	if (protocol == htons(ETH_P_IP)) {
670874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
671874aeea5SJeff Kirsher 	} else {
672874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
673874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
674874aeea5SJeff Kirsher 	}
675874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
676874aeea5SJeff Kirsher 			     + (tcp_hdr(skb)->doff << 2u)) >
677874aeea5SJeff Kirsher 			    skb_headlen(skb));
678874aeea5SJeff Kirsher 
679874aeea5SJeff Kirsher 	return protocol;
680874aeea5SJeff Kirsher }
681874aeea5SJeff Kirsher 
682874aeea5SJeff Kirsher 
683874aeea5SJeff Kirsher /*
684874aeea5SJeff Kirsher  * Allocate a page worth of efx_tso_header structures, and string them
685874aeea5SJeff Kirsher  * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
686874aeea5SJeff Kirsher  */
687874aeea5SJeff Kirsher static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
688874aeea5SJeff Kirsher {
689874aeea5SJeff Kirsher 
690874aeea5SJeff Kirsher 	struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
691874aeea5SJeff Kirsher 	struct efx_tso_header *tsoh;
692874aeea5SJeff Kirsher 	dma_addr_t dma_addr;
693874aeea5SJeff Kirsher 	u8 *base_kva, *kva;
694874aeea5SJeff Kirsher 
695874aeea5SJeff Kirsher 	base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
696874aeea5SJeff Kirsher 	if (base_kva == NULL) {
697874aeea5SJeff Kirsher 		netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
698874aeea5SJeff Kirsher 			  "Unable to allocate page for TSO headers\n");
699874aeea5SJeff Kirsher 		return -ENOMEM;
700874aeea5SJeff Kirsher 	}
701874aeea5SJeff Kirsher 
702874aeea5SJeff Kirsher 	/* pci_alloc_consistent() allocates pages. */
703874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
704874aeea5SJeff Kirsher 
705874aeea5SJeff Kirsher 	for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
706874aeea5SJeff Kirsher 		tsoh = (struct efx_tso_header *)kva;
707874aeea5SJeff Kirsher 		tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
708874aeea5SJeff Kirsher 		tsoh->next = tx_queue->tso_headers_free;
709874aeea5SJeff Kirsher 		tx_queue->tso_headers_free = tsoh;
710874aeea5SJeff Kirsher 	}
711874aeea5SJeff Kirsher 
712874aeea5SJeff Kirsher 	return 0;
713874aeea5SJeff Kirsher }
714874aeea5SJeff Kirsher 
715874aeea5SJeff Kirsher 
716874aeea5SJeff Kirsher /* Free up a TSO header, and all others in the same page. */
717874aeea5SJeff Kirsher static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
718874aeea5SJeff Kirsher 				struct efx_tso_header *tsoh,
719874aeea5SJeff Kirsher 				struct pci_dev *pci_dev)
720874aeea5SJeff Kirsher {
721874aeea5SJeff Kirsher 	struct efx_tso_header **p;
722874aeea5SJeff Kirsher 	unsigned long base_kva;
723874aeea5SJeff Kirsher 	dma_addr_t base_dma;
724874aeea5SJeff Kirsher 
725874aeea5SJeff Kirsher 	base_kva = (unsigned long)tsoh & PAGE_MASK;
726874aeea5SJeff Kirsher 	base_dma = tsoh->dma_addr & PAGE_MASK;
727874aeea5SJeff Kirsher 
728874aeea5SJeff Kirsher 	p = &tx_queue->tso_headers_free;
729874aeea5SJeff Kirsher 	while (*p != NULL) {
730874aeea5SJeff Kirsher 		if (((unsigned long)*p & PAGE_MASK) == base_kva)
731874aeea5SJeff Kirsher 			*p = (*p)->next;
732874aeea5SJeff Kirsher 		else
733874aeea5SJeff Kirsher 			p = &(*p)->next;
734874aeea5SJeff Kirsher 	}
735874aeea5SJeff Kirsher 
736874aeea5SJeff Kirsher 	pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
737874aeea5SJeff Kirsher }
738874aeea5SJeff Kirsher 
739874aeea5SJeff Kirsher static struct efx_tso_header *
740874aeea5SJeff Kirsher efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
741874aeea5SJeff Kirsher {
742874aeea5SJeff Kirsher 	struct efx_tso_header *tsoh;
743874aeea5SJeff Kirsher 
744874aeea5SJeff Kirsher 	tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
745874aeea5SJeff Kirsher 	if (unlikely(!tsoh))
746874aeea5SJeff Kirsher 		return NULL;
747874aeea5SJeff Kirsher 
748874aeea5SJeff Kirsher 	tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
749874aeea5SJeff Kirsher 					TSOH_BUFFER(tsoh), header_len,
750874aeea5SJeff Kirsher 					PCI_DMA_TODEVICE);
751874aeea5SJeff Kirsher 	if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
752874aeea5SJeff Kirsher 					   tsoh->dma_addr))) {
753874aeea5SJeff Kirsher 		kfree(tsoh);
754874aeea5SJeff Kirsher 		return NULL;
755874aeea5SJeff Kirsher 	}
756874aeea5SJeff Kirsher 
757874aeea5SJeff Kirsher 	tsoh->unmap_len = header_len;
758874aeea5SJeff Kirsher 	return tsoh;
759874aeea5SJeff Kirsher }
760874aeea5SJeff Kirsher 
761874aeea5SJeff Kirsher static void
762874aeea5SJeff Kirsher efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
763874aeea5SJeff Kirsher {
764874aeea5SJeff Kirsher 	pci_unmap_single(tx_queue->efx->pci_dev,
765874aeea5SJeff Kirsher 			 tsoh->dma_addr, tsoh->unmap_len,
766874aeea5SJeff Kirsher 			 PCI_DMA_TODEVICE);
767874aeea5SJeff Kirsher 	kfree(tsoh);
768874aeea5SJeff Kirsher }
769874aeea5SJeff Kirsher 
770874aeea5SJeff Kirsher /**
771874aeea5SJeff Kirsher  * efx_tx_queue_insert - push descriptors onto the TX queue
772874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
773874aeea5SJeff Kirsher  * @dma_addr:		DMA address of fragment
774874aeea5SJeff Kirsher  * @len:		Length of fragment
775874aeea5SJeff Kirsher  * @final_buffer:	The final buffer inserted into the queue
776874aeea5SJeff Kirsher  *
777874aeea5SJeff Kirsher  * Push descriptors onto the TX queue.  Return 0 on success or 1 if
778874aeea5SJeff Kirsher  * @tx_queue full.
779874aeea5SJeff Kirsher  */
780874aeea5SJeff Kirsher static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
781874aeea5SJeff Kirsher 			       dma_addr_t dma_addr, unsigned len,
782874aeea5SJeff Kirsher 			       struct efx_tx_buffer **final_buffer)
783874aeea5SJeff Kirsher {
784874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
785874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
786874aeea5SJeff Kirsher 	unsigned dma_len, fill_level, insert_ptr;
787874aeea5SJeff Kirsher 	int q_space;
788874aeea5SJeff Kirsher 
789874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(len <= 0);
790874aeea5SJeff Kirsher 
791874aeea5SJeff Kirsher 	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
792874aeea5SJeff Kirsher 	/* -1 as there is no way to represent all descriptors used */
793874aeea5SJeff Kirsher 	q_space = efx->txq_entries - 1 - fill_level;
794874aeea5SJeff Kirsher 
795874aeea5SJeff Kirsher 	while (1) {
796874aeea5SJeff Kirsher 		if (unlikely(q_space-- <= 0)) {
797874aeea5SJeff Kirsher 			/* It might be that completions have happened
798874aeea5SJeff Kirsher 			 * since the xmit path last checked.  Update
799874aeea5SJeff Kirsher 			 * the xmit path's copy of read_count.
800874aeea5SJeff Kirsher 			 */
801874aeea5SJeff Kirsher 			netif_tx_stop_queue(tx_queue->core_txq);
802874aeea5SJeff Kirsher 			/* This memory barrier protects the change of
803874aeea5SJeff Kirsher 			 * queue state from the access of read_count. */
804874aeea5SJeff Kirsher 			smp_mb();
805874aeea5SJeff Kirsher 			tx_queue->old_read_count =
806874aeea5SJeff Kirsher 				ACCESS_ONCE(tx_queue->read_count);
807874aeea5SJeff Kirsher 			fill_level = (tx_queue->insert_count
808874aeea5SJeff Kirsher 				      - tx_queue->old_read_count);
809874aeea5SJeff Kirsher 			q_space = efx->txq_entries - 1 - fill_level;
810874aeea5SJeff Kirsher 			if (unlikely(q_space-- <= 0)) {
811874aeea5SJeff Kirsher 				*final_buffer = NULL;
812874aeea5SJeff Kirsher 				return 1;
813874aeea5SJeff Kirsher 			}
814874aeea5SJeff Kirsher 			smp_mb();
815874aeea5SJeff Kirsher 			netif_tx_start_queue(tx_queue->core_txq);
816874aeea5SJeff Kirsher 		}
817874aeea5SJeff Kirsher 
818874aeea5SJeff Kirsher 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
819874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[insert_ptr];
820874aeea5SJeff Kirsher 		++tx_queue->insert_count;
821874aeea5SJeff Kirsher 
822874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(tx_queue->insert_count -
823874aeea5SJeff Kirsher 				    tx_queue->read_count >=
824874aeea5SJeff Kirsher 				    efx->txq_entries);
825874aeea5SJeff Kirsher 
826874aeea5SJeff Kirsher 		efx_tsoh_free(tx_queue, buffer);
827874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(buffer->len);
828874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(buffer->unmap_len);
829874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(buffer->skb);
830874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(!buffer->continuation);
831874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(buffer->tsoh);
832874aeea5SJeff Kirsher 
833874aeea5SJeff Kirsher 		buffer->dma_addr = dma_addr;
834874aeea5SJeff Kirsher 
835874aeea5SJeff Kirsher 		dma_len = efx_max_tx_len(efx, dma_addr);
836874aeea5SJeff Kirsher 
837874aeea5SJeff Kirsher 		/* If there is enough space to send then do so */
838874aeea5SJeff Kirsher 		if (dma_len >= len)
839874aeea5SJeff Kirsher 			break;
840874aeea5SJeff Kirsher 
841874aeea5SJeff Kirsher 		buffer->len = dma_len; /* Don't set the other members */
842874aeea5SJeff Kirsher 		dma_addr += dma_len;
843874aeea5SJeff Kirsher 		len -= dma_len;
844874aeea5SJeff Kirsher 	}
845874aeea5SJeff Kirsher 
846874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(!len);
847874aeea5SJeff Kirsher 	buffer->len = len;
848874aeea5SJeff Kirsher 	*final_buffer = buffer;
849874aeea5SJeff Kirsher 	return 0;
850874aeea5SJeff Kirsher }
851874aeea5SJeff Kirsher 
852874aeea5SJeff Kirsher 
853874aeea5SJeff Kirsher /*
854874aeea5SJeff Kirsher  * Put a TSO header into the TX queue.
855874aeea5SJeff Kirsher  *
856874aeea5SJeff Kirsher  * This is special-cased because we know that it is small enough to fit in
857874aeea5SJeff Kirsher  * a single fragment, and we know it doesn't cross a page boundary.  It
858874aeea5SJeff Kirsher  * also allows us to not worry about end-of-packet etc.
859874aeea5SJeff Kirsher  */
860874aeea5SJeff Kirsher static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
861874aeea5SJeff Kirsher 			       struct efx_tso_header *tsoh, unsigned len)
862874aeea5SJeff Kirsher {
863874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
864874aeea5SJeff Kirsher 
865874aeea5SJeff Kirsher 	buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
866874aeea5SJeff Kirsher 	efx_tsoh_free(tx_queue, buffer);
867874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(buffer->len);
868874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(buffer->unmap_len);
869874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(buffer->skb);
870874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(!buffer->continuation);
871874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(buffer->tsoh);
872874aeea5SJeff Kirsher 	buffer->len = len;
873874aeea5SJeff Kirsher 	buffer->dma_addr = tsoh->dma_addr;
874874aeea5SJeff Kirsher 	buffer->tsoh = tsoh;
875874aeea5SJeff Kirsher 
876874aeea5SJeff Kirsher 	++tx_queue->insert_count;
877874aeea5SJeff Kirsher }
878874aeea5SJeff Kirsher 
879874aeea5SJeff Kirsher 
880874aeea5SJeff Kirsher /* Remove descriptors put into a tx_queue. */
881874aeea5SJeff Kirsher static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
882874aeea5SJeff Kirsher {
883874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
884874aeea5SJeff Kirsher 	dma_addr_t unmap_addr;
885874aeea5SJeff Kirsher 
886874aeea5SJeff Kirsher 	/* Work backwards until we hit the original insert pointer value */
887874aeea5SJeff Kirsher 	while (tx_queue->insert_count != tx_queue->write_count) {
888874aeea5SJeff Kirsher 		--tx_queue->insert_count;
889874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[tx_queue->insert_count &
890874aeea5SJeff Kirsher 					   tx_queue->ptr_mask];
891874aeea5SJeff Kirsher 		efx_tsoh_free(tx_queue, buffer);
892874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(buffer->skb);
893874aeea5SJeff Kirsher 		if (buffer->unmap_len) {
894874aeea5SJeff Kirsher 			unmap_addr = (buffer->dma_addr + buffer->len -
895874aeea5SJeff Kirsher 				      buffer->unmap_len);
896874aeea5SJeff Kirsher 			if (buffer->unmap_single)
897874aeea5SJeff Kirsher 				pci_unmap_single(tx_queue->efx->pci_dev,
898874aeea5SJeff Kirsher 						 unmap_addr, buffer->unmap_len,
899874aeea5SJeff Kirsher 						 PCI_DMA_TODEVICE);
900874aeea5SJeff Kirsher 			else
901874aeea5SJeff Kirsher 				pci_unmap_page(tx_queue->efx->pci_dev,
902874aeea5SJeff Kirsher 					       unmap_addr, buffer->unmap_len,
903874aeea5SJeff Kirsher 					       PCI_DMA_TODEVICE);
904874aeea5SJeff Kirsher 			buffer->unmap_len = 0;
905874aeea5SJeff Kirsher 		}
906874aeea5SJeff Kirsher 		buffer->len = 0;
907874aeea5SJeff Kirsher 		buffer->continuation = true;
908874aeea5SJeff Kirsher 	}
909874aeea5SJeff Kirsher }
910874aeea5SJeff Kirsher 
911874aeea5SJeff Kirsher 
912874aeea5SJeff Kirsher /* Parse the SKB header and initialise state. */
913874aeea5SJeff Kirsher static void tso_start(struct tso_state *st, const struct sk_buff *skb)
914874aeea5SJeff Kirsher {
915874aeea5SJeff Kirsher 	/* All ethernet/IP/TCP headers combined size is TCP header size
916874aeea5SJeff Kirsher 	 * plus offset of TCP header relative to start of packet.
917874aeea5SJeff Kirsher 	 */
918874aeea5SJeff Kirsher 	st->header_len = ((tcp_hdr(skb)->doff << 2u)
919874aeea5SJeff Kirsher 			  + PTR_DIFF(tcp_hdr(skb), skb->data));
920874aeea5SJeff Kirsher 	st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
921874aeea5SJeff Kirsher 
922874aeea5SJeff Kirsher 	if (st->protocol == htons(ETH_P_IP))
923874aeea5SJeff Kirsher 		st->ipv4_id = ntohs(ip_hdr(skb)->id);
924874aeea5SJeff Kirsher 	else
925874aeea5SJeff Kirsher 		st->ipv4_id = 0;
926874aeea5SJeff Kirsher 	st->seqnum = ntohl(tcp_hdr(skb)->seq);
927874aeea5SJeff Kirsher 
928874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
929874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
930874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
931874aeea5SJeff Kirsher 
932874aeea5SJeff Kirsher 	st->packet_space = st->full_packet_size;
933874aeea5SJeff Kirsher 	st->out_len = skb->len - st->header_len;
934874aeea5SJeff Kirsher 	st->unmap_len = 0;
935874aeea5SJeff Kirsher 	st->unmap_single = false;
936874aeea5SJeff Kirsher }
937874aeea5SJeff Kirsher 
938874aeea5SJeff Kirsher static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
939874aeea5SJeff Kirsher 			    skb_frag_t *frag)
940874aeea5SJeff Kirsher {
9414a22c4c9SIan Campbell 	st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
9429e903e08SEric Dumazet 					  skb_frag_size(frag), DMA_TO_DEVICE);
9435d6bcdfeSIan Campbell 	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
944874aeea5SJeff Kirsher 		st->unmap_single = false;
9459e903e08SEric Dumazet 		st->unmap_len = skb_frag_size(frag);
9469e903e08SEric Dumazet 		st->in_len = skb_frag_size(frag);
947874aeea5SJeff Kirsher 		st->dma_addr = st->unmap_addr;
948874aeea5SJeff Kirsher 		return 0;
949874aeea5SJeff Kirsher 	}
950874aeea5SJeff Kirsher 	return -ENOMEM;
951874aeea5SJeff Kirsher }
952874aeea5SJeff Kirsher 
953874aeea5SJeff Kirsher static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
954874aeea5SJeff Kirsher 				 const struct sk_buff *skb)
955874aeea5SJeff Kirsher {
956874aeea5SJeff Kirsher 	int hl = st->header_len;
957874aeea5SJeff Kirsher 	int len = skb_headlen(skb) - hl;
958874aeea5SJeff Kirsher 
959874aeea5SJeff Kirsher 	st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
960874aeea5SJeff Kirsher 					len, PCI_DMA_TODEVICE);
961874aeea5SJeff Kirsher 	if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
962874aeea5SJeff Kirsher 		st->unmap_single = true;
963874aeea5SJeff Kirsher 		st->unmap_len = len;
964874aeea5SJeff Kirsher 		st->in_len = len;
965874aeea5SJeff Kirsher 		st->dma_addr = st->unmap_addr;
966874aeea5SJeff Kirsher 		return 0;
967874aeea5SJeff Kirsher 	}
968874aeea5SJeff Kirsher 	return -ENOMEM;
969874aeea5SJeff Kirsher }
970874aeea5SJeff Kirsher 
971874aeea5SJeff Kirsher 
972874aeea5SJeff Kirsher /**
973874aeea5SJeff Kirsher  * tso_fill_packet_with_fragment - form descriptors for the current fragment
974874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
975874aeea5SJeff Kirsher  * @skb:		Socket buffer
976874aeea5SJeff Kirsher  * @st:			TSO state
977874aeea5SJeff Kirsher  *
978874aeea5SJeff Kirsher  * Form descriptors for the current fragment, until we reach the end
979874aeea5SJeff Kirsher  * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
980874aeea5SJeff Kirsher  * space in @tx_queue.
981874aeea5SJeff Kirsher  */
982874aeea5SJeff Kirsher static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
983874aeea5SJeff Kirsher 					 const struct sk_buff *skb,
984874aeea5SJeff Kirsher 					 struct tso_state *st)
985874aeea5SJeff Kirsher {
986874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
987874aeea5SJeff Kirsher 	int n, end_of_packet, rc;
988874aeea5SJeff Kirsher 
989874aeea5SJeff Kirsher 	if (st->in_len == 0)
990874aeea5SJeff Kirsher 		return 0;
991874aeea5SJeff Kirsher 	if (st->packet_space == 0)
992874aeea5SJeff Kirsher 		return 0;
993874aeea5SJeff Kirsher 
994874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(st->in_len <= 0);
995874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(st->packet_space <= 0);
996874aeea5SJeff Kirsher 
997874aeea5SJeff Kirsher 	n = min(st->in_len, st->packet_space);
998874aeea5SJeff Kirsher 
999874aeea5SJeff Kirsher 	st->packet_space -= n;
1000874aeea5SJeff Kirsher 	st->out_len -= n;
1001874aeea5SJeff Kirsher 	st->in_len -= n;
1002874aeea5SJeff Kirsher 
1003874aeea5SJeff Kirsher 	rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
1004874aeea5SJeff Kirsher 	if (likely(rc == 0)) {
1005874aeea5SJeff Kirsher 		if (st->out_len == 0)
1006874aeea5SJeff Kirsher 			/* Transfer ownership of the skb */
1007874aeea5SJeff Kirsher 			buffer->skb = skb;
1008874aeea5SJeff Kirsher 
1009874aeea5SJeff Kirsher 		end_of_packet = st->out_len == 0 || st->packet_space == 0;
1010874aeea5SJeff Kirsher 		buffer->continuation = !end_of_packet;
1011874aeea5SJeff Kirsher 
1012874aeea5SJeff Kirsher 		if (st->in_len == 0) {
1013874aeea5SJeff Kirsher 			/* Transfer ownership of the pci mapping */
1014874aeea5SJeff Kirsher 			buffer->unmap_len = st->unmap_len;
1015874aeea5SJeff Kirsher 			buffer->unmap_single = st->unmap_single;
1016874aeea5SJeff Kirsher 			st->unmap_len = 0;
1017874aeea5SJeff Kirsher 		}
1018874aeea5SJeff Kirsher 	}
1019874aeea5SJeff Kirsher 
1020874aeea5SJeff Kirsher 	st->dma_addr += n;
1021874aeea5SJeff Kirsher 	return rc;
1022874aeea5SJeff Kirsher }
1023874aeea5SJeff Kirsher 
1024874aeea5SJeff Kirsher 
1025874aeea5SJeff Kirsher /**
1026874aeea5SJeff Kirsher  * tso_start_new_packet - generate a new header and prepare for the new packet
1027874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
1028874aeea5SJeff Kirsher  * @skb:		Socket buffer
1029874aeea5SJeff Kirsher  * @st:			TSO state
1030874aeea5SJeff Kirsher  *
1031874aeea5SJeff Kirsher  * Generate a new header and prepare for the new packet.  Return 0 on
1032874aeea5SJeff Kirsher  * success, or -1 if failed to alloc header.
1033874aeea5SJeff Kirsher  */
1034874aeea5SJeff Kirsher static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1035874aeea5SJeff Kirsher 				const struct sk_buff *skb,
1036874aeea5SJeff Kirsher 				struct tso_state *st)
1037874aeea5SJeff Kirsher {
1038874aeea5SJeff Kirsher 	struct efx_tso_header *tsoh;
1039874aeea5SJeff Kirsher 	struct tcphdr *tsoh_th;
1040874aeea5SJeff Kirsher 	unsigned ip_length;
1041874aeea5SJeff Kirsher 	u8 *header;
1042874aeea5SJeff Kirsher 
1043874aeea5SJeff Kirsher 	/* Allocate a DMA-mapped header buffer. */
1044874aeea5SJeff Kirsher 	if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
1045874aeea5SJeff Kirsher 		if (tx_queue->tso_headers_free == NULL) {
1046874aeea5SJeff Kirsher 			if (efx_tsoh_block_alloc(tx_queue))
1047874aeea5SJeff Kirsher 				return -1;
1048874aeea5SJeff Kirsher 		}
1049874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
1050874aeea5SJeff Kirsher 		tsoh = tx_queue->tso_headers_free;
1051874aeea5SJeff Kirsher 		tx_queue->tso_headers_free = tsoh->next;
1052874aeea5SJeff Kirsher 		tsoh->unmap_len = 0;
1053874aeea5SJeff Kirsher 	} else {
1054874aeea5SJeff Kirsher 		tx_queue->tso_long_headers++;
1055874aeea5SJeff Kirsher 		tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
1056874aeea5SJeff Kirsher 		if (unlikely(!tsoh))
1057874aeea5SJeff Kirsher 			return -1;
1058874aeea5SJeff Kirsher 	}
1059874aeea5SJeff Kirsher 
1060874aeea5SJeff Kirsher 	header = TSOH_BUFFER(tsoh);
1061874aeea5SJeff Kirsher 	tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
1062874aeea5SJeff Kirsher 
1063874aeea5SJeff Kirsher 	/* Copy and update the headers. */
1064874aeea5SJeff Kirsher 	memcpy(header, skb->data, st->header_len);
1065874aeea5SJeff Kirsher 
1066874aeea5SJeff Kirsher 	tsoh_th->seq = htonl(st->seqnum);
1067874aeea5SJeff Kirsher 	st->seqnum += skb_shinfo(skb)->gso_size;
1068874aeea5SJeff Kirsher 	if (st->out_len > skb_shinfo(skb)->gso_size) {
1069874aeea5SJeff Kirsher 		/* This packet will not finish the TSO burst. */
1070874aeea5SJeff Kirsher 		ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
1071874aeea5SJeff Kirsher 		tsoh_th->fin = 0;
1072874aeea5SJeff Kirsher 		tsoh_th->psh = 0;
1073874aeea5SJeff Kirsher 	} else {
1074874aeea5SJeff Kirsher 		/* This packet will be the last in the TSO burst. */
1075874aeea5SJeff Kirsher 		ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
1076874aeea5SJeff Kirsher 		tsoh_th->fin = tcp_hdr(skb)->fin;
1077874aeea5SJeff Kirsher 		tsoh_th->psh = tcp_hdr(skb)->psh;
1078874aeea5SJeff Kirsher 	}
1079874aeea5SJeff Kirsher 
1080874aeea5SJeff Kirsher 	if (st->protocol == htons(ETH_P_IP)) {
1081874aeea5SJeff Kirsher 		struct iphdr *tsoh_iph =
1082874aeea5SJeff Kirsher 			(struct iphdr *)(header + SKB_IPV4_OFF(skb));
1083874aeea5SJeff Kirsher 
1084874aeea5SJeff Kirsher 		tsoh_iph->tot_len = htons(ip_length);
1085874aeea5SJeff Kirsher 
1086874aeea5SJeff Kirsher 		/* Linux leaves suitable gaps in the IP ID space for us to fill. */
1087874aeea5SJeff Kirsher 		tsoh_iph->id = htons(st->ipv4_id);
1088874aeea5SJeff Kirsher 		st->ipv4_id++;
1089874aeea5SJeff Kirsher 	} else {
1090874aeea5SJeff Kirsher 		struct ipv6hdr *tsoh_iph =
1091874aeea5SJeff Kirsher 			(struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
1092874aeea5SJeff Kirsher 
1093874aeea5SJeff Kirsher 		tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
1094874aeea5SJeff Kirsher 	}
1095874aeea5SJeff Kirsher 
1096874aeea5SJeff Kirsher 	st->packet_space = skb_shinfo(skb)->gso_size;
1097874aeea5SJeff Kirsher 	++tx_queue->tso_packets;
1098874aeea5SJeff Kirsher 
1099874aeea5SJeff Kirsher 	/* Form a descriptor for this header. */
1100874aeea5SJeff Kirsher 	efx_tso_put_header(tx_queue, tsoh, st->header_len);
1101874aeea5SJeff Kirsher 
1102874aeea5SJeff Kirsher 	return 0;
1103874aeea5SJeff Kirsher }
1104874aeea5SJeff Kirsher 
1105874aeea5SJeff Kirsher 
1106874aeea5SJeff Kirsher /**
1107874aeea5SJeff Kirsher  * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1108874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
1109874aeea5SJeff Kirsher  * @skb:		Socket buffer
1110874aeea5SJeff Kirsher  *
1111874aeea5SJeff Kirsher  * Context: You must hold netif_tx_lock() to call this function.
1112874aeea5SJeff Kirsher  *
1113874aeea5SJeff Kirsher  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1114874aeea5SJeff Kirsher  * @skb was not enqueued.  In all cases @skb is consumed.  Return
1115874aeea5SJeff Kirsher  * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1116874aeea5SJeff Kirsher  */
1117874aeea5SJeff Kirsher static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1118874aeea5SJeff Kirsher 			       struct sk_buff *skb)
1119874aeea5SJeff Kirsher {
1120874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
1121874aeea5SJeff Kirsher 	int frag_i, rc, rc2 = NETDEV_TX_OK;
1122874aeea5SJeff Kirsher 	struct tso_state state;
1123874aeea5SJeff Kirsher 
1124874aeea5SJeff Kirsher 	/* Find the packet protocol and sanity-check it */
1125874aeea5SJeff Kirsher 	state.protocol = efx_tso_check_protocol(skb);
1126874aeea5SJeff Kirsher 
1127874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1128874aeea5SJeff Kirsher 
1129874aeea5SJeff Kirsher 	tso_start(&state, skb);
1130874aeea5SJeff Kirsher 
1131874aeea5SJeff Kirsher 	/* Assume that skb header area contains exactly the headers, and
1132874aeea5SJeff Kirsher 	 * all payload is in the frag list.
1133874aeea5SJeff Kirsher 	 */
1134874aeea5SJeff Kirsher 	if (skb_headlen(skb) == state.header_len) {
1135874aeea5SJeff Kirsher 		/* Grab the first payload fragment. */
1136874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1137874aeea5SJeff Kirsher 		frag_i = 0;
1138874aeea5SJeff Kirsher 		rc = tso_get_fragment(&state, efx,
1139874aeea5SJeff Kirsher 				      skb_shinfo(skb)->frags + frag_i);
1140874aeea5SJeff Kirsher 		if (rc)
1141874aeea5SJeff Kirsher 			goto mem_err;
1142874aeea5SJeff Kirsher 	} else {
1143874aeea5SJeff Kirsher 		rc = tso_get_head_fragment(&state, efx, skb);
1144874aeea5SJeff Kirsher 		if (rc)
1145874aeea5SJeff Kirsher 			goto mem_err;
1146874aeea5SJeff Kirsher 		frag_i = -1;
1147874aeea5SJeff Kirsher 	}
1148874aeea5SJeff Kirsher 
1149874aeea5SJeff Kirsher 	if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1150874aeea5SJeff Kirsher 		goto mem_err;
1151874aeea5SJeff Kirsher 
1152874aeea5SJeff Kirsher 	while (1) {
1153874aeea5SJeff Kirsher 		rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1154874aeea5SJeff Kirsher 		if (unlikely(rc)) {
1155874aeea5SJeff Kirsher 			rc2 = NETDEV_TX_BUSY;
1156874aeea5SJeff Kirsher 			goto unwind;
1157874aeea5SJeff Kirsher 		}
1158874aeea5SJeff Kirsher 
1159874aeea5SJeff Kirsher 		/* Move onto the next fragment? */
1160874aeea5SJeff Kirsher 		if (state.in_len == 0) {
1161874aeea5SJeff Kirsher 			if (++frag_i >= skb_shinfo(skb)->nr_frags)
1162874aeea5SJeff Kirsher 				/* End of payload reached. */
1163874aeea5SJeff Kirsher 				break;
1164874aeea5SJeff Kirsher 			rc = tso_get_fragment(&state, efx,
1165874aeea5SJeff Kirsher 					      skb_shinfo(skb)->frags + frag_i);
1166874aeea5SJeff Kirsher 			if (rc)
1167874aeea5SJeff Kirsher 				goto mem_err;
1168874aeea5SJeff Kirsher 		}
1169874aeea5SJeff Kirsher 
1170874aeea5SJeff Kirsher 		/* Start at new packet? */
1171874aeea5SJeff Kirsher 		if (state.packet_space == 0 &&
1172874aeea5SJeff Kirsher 		    tso_start_new_packet(tx_queue, skb, &state) < 0)
1173874aeea5SJeff Kirsher 			goto mem_err;
1174874aeea5SJeff Kirsher 	}
1175874aeea5SJeff Kirsher 
1176449fa023SEric Dumazet 	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1177449fa023SEric Dumazet 
1178874aeea5SJeff Kirsher 	/* Pass off to hardware */
1179874aeea5SJeff Kirsher 	efx_nic_push_buffers(tx_queue);
1180874aeea5SJeff Kirsher 
1181874aeea5SJeff Kirsher 	tx_queue->tso_bursts++;
1182874aeea5SJeff Kirsher 	return NETDEV_TX_OK;
1183874aeea5SJeff Kirsher 
1184874aeea5SJeff Kirsher  mem_err:
1185874aeea5SJeff Kirsher 	netif_err(efx, tx_err, efx->net_dev,
1186874aeea5SJeff Kirsher 		  "Out of memory for TSO headers, or PCI mapping error\n");
1187874aeea5SJeff Kirsher 	dev_kfree_skb_any(skb);
1188874aeea5SJeff Kirsher 
1189874aeea5SJeff Kirsher  unwind:
1190874aeea5SJeff Kirsher 	/* Free the DMA mapping we were in the process of writing out */
1191874aeea5SJeff Kirsher 	if (state.unmap_len) {
1192874aeea5SJeff Kirsher 		if (state.unmap_single)
1193874aeea5SJeff Kirsher 			pci_unmap_single(efx->pci_dev, state.unmap_addr,
1194874aeea5SJeff Kirsher 					 state.unmap_len, PCI_DMA_TODEVICE);
1195874aeea5SJeff Kirsher 		else
1196874aeea5SJeff Kirsher 			pci_unmap_page(efx->pci_dev, state.unmap_addr,
1197874aeea5SJeff Kirsher 				       state.unmap_len, PCI_DMA_TODEVICE);
1198874aeea5SJeff Kirsher 	}
1199874aeea5SJeff Kirsher 
1200874aeea5SJeff Kirsher 	efx_enqueue_unwind(tx_queue);
1201874aeea5SJeff Kirsher 	return rc2;
1202874aeea5SJeff Kirsher }
1203874aeea5SJeff Kirsher 
1204874aeea5SJeff Kirsher 
1205874aeea5SJeff Kirsher /*
1206874aeea5SJeff Kirsher  * Free up all TSO datastructures associated with tx_queue. This
1207874aeea5SJeff Kirsher  * routine should be called only once the tx_queue is both empty and
1208874aeea5SJeff Kirsher  * will no longer be used.
1209874aeea5SJeff Kirsher  */
1210874aeea5SJeff Kirsher static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1211874aeea5SJeff Kirsher {
1212874aeea5SJeff Kirsher 	unsigned i;
1213874aeea5SJeff Kirsher 
1214874aeea5SJeff Kirsher 	if (tx_queue->buffer) {
1215874aeea5SJeff Kirsher 		for (i = 0; i <= tx_queue->ptr_mask; ++i)
1216874aeea5SJeff Kirsher 			efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1217874aeea5SJeff Kirsher 	}
1218874aeea5SJeff Kirsher 
1219874aeea5SJeff Kirsher 	while (tx_queue->tso_headers_free != NULL)
1220874aeea5SJeff Kirsher 		efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1221874aeea5SJeff Kirsher 				    tx_queue->efx->pci_dev);
1222874aeea5SJeff Kirsher }
1223