xref: /openbmc/linux/drivers/net/ethernet/sfc/tx.c (revision 5d6bcdfe)
1874aeea5SJeff Kirsher /****************************************************************************
2874aeea5SJeff Kirsher  * Driver for Solarflare Solarstorm network controllers and boards
3874aeea5SJeff Kirsher  * Copyright 2005-2006 Fen Systems Ltd.
4874aeea5SJeff Kirsher  * Copyright 2005-2010 Solarflare Communications Inc.
5874aeea5SJeff Kirsher  *
6874aeea5SJeff Kirsher  * This program is free software; you can redistribute it and/or modify it
7874aeea5SJeff Kirsher  * under the terms of the GNU General Public License version 2 as published
8874aeea5SJeff Kirsher  * by the Free Software Foundation, incorporated herein by reference.
9874aeea5SJeff Kirsher  */
10874aeea5SJeff Kirsher 
11874aeea5SJeff Kirsher #include <linux/pci.h>
12874aeea5SJeff Kirsher #include <linux/tcp.h>
13874aeea5SJeff Kirsher #include <linux/ip.h>
14874aeea5SJeff Kirsher #include <linux/in.h>
15874aeea5SJeff Kirsher #include <linux/ipv6.h>
16874aeea5SJeff Kirsher #include <linux/slab.h>
17874aeea5SJeff Kirsher #include <net/ipv6.h>
18874aeea5SJeff Kirsher #include <linux/if_ether.h>
19874aeea5SJeff Kirsher #include <linux/highmem.h>
20874aeea5SJeff Kirsher #include "net_driver.h"
21874aeea5SJeff Kirsher #include "efx.h"
22874aeea5SJeff Kirsher #include "nic.h"
23874aeea5SJeff Kirsher #include "workarounds.h"
24874aeea5SJeff Kirsher 
25874aeea5SJeff Kirsher /*
26874aeea5SJeff Kirsher  * TX descriptor ring full threshold
27874aeea5SJeff Kirsher  *
28874aeea5SJeff Kirsher  * The tx_queue descriptor ring fill-level must fall below this value
29874aeea5SJeff Kirsher  * before we restart the netif queue
30874aeea5SJeff Kirsher  */
31874aeea5SJeff Kirsher #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32874aeea5SJeff Kirsher 
33874aeea5SJeff Kirsher static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
34874aeea5SJeff Kirsher 			       struct efx_tx_buffer *buffer)
35874aeea5SJeff Kirsher {
36874aeea5SJeff Kirsher 	if (buffer->unmap_len) {
37874aeea5SJeff Kirsher 		struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
38874aeea5SJeff Kirsher 		dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
39874aeea5SJeff Kirsher 					 buffer->unmap_len);
40874aeea5SJeff Kirsher 		if (buffer->unmap_single)
41874aeea5SJeff Kirsher 			pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
42874aeea5SJeff Kirsher 					 PCI_DMA_TODEVICE);
43874aeea5SJeff Kirsher 		else
44874aeea5SJeff Kirsher 			pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
45874aeea5SJeff Kirsher 				       PCI_DMA_TODEVICE);
46874aeea5SJeff Kirsher 		buffer->unmap_len = 0;
47874aeea5SJeff Kirsher 		buffer->unmap_single = false;
48874aeea5SJeff Kirsher 	}
49874aeea5SJeff Kirsher 
50874aeea5SJeff Kirsher 	if (buffer->skb) {
51874aeea5SJeff Kirsher 		dev_kfree_skb_any((struct sk_buff *) buffer->skb);
52874aeea5SJeff Kirsher 		buffer->skb = NULL;
53874aeea5SJeff Kirsher 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
54874aeea5SJeff Kirsher 			   "TX queue %d transmission id %x complete\n",
55874aeea5SJeff Kirsher 			   tx_queue->queue, tx_queue->read_count);
56874aeea5SJeff Kirsher 	}
57874aeea5SJeff Kirsher }
58874aeea5SJeff Kirsher 
59874aeea5SJeff Kirsher /**
60874aeea5SJeff Kirsher  * struct efx_tso_header - a DMA mapped buffer for packet headers
61874aeea5SJeff Kirsher  * @next: Linked list of free ones.
62874aeea5SJeff Kirsher  *	The list is protected by the TX queue lock.
63874aeea5SJeff Kirsher  * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
64874aeea5SJeff Kirsher  * @dma_addr: The DMA address of the header below.
65874aeea5SJeff Kirsher  *
66874aeea5SJeff Kirsher  * This controls the memory used for a TSO header.  Use TSOH_DATA()
67874aeea5SJeff Kirsher  * to find the packet header data.  Use TSOH_SIZE() to calculate the
68874aeea5SJeff Kirsher  * total size required for a given packet header length.  TSO headers
69874aeea5SJeff Kirsher  * in the free list are exactly %TSOH_STD_SIZE bytes in size.
70874aeea5SJeff Kirsher  */
71874aeea5SJeff Kirsher struct efx_tso_header {
72874aeea5SJeff Kirsher 	union {
73874aeea5SJeff Kirsher 		struct efx_tso_header *next;
74874aeea5SJeff Kirsher 		size_t unmap_len;
75874aeea5SJeff Kirsher 	};
76874aeea5SJeff Kirsher 	dma_addr_t dma_addr;
77874aeea5SJeff Kirsher };
78874aeea5SJeff Kirsher 
79874aeea5SJeff Kirsher static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
80874aeea5SJeff Kirsher 			       struct sk_buff *skb);
81874aeea5SJeff Kirsher static void efx_fini_tso(struct efx_tx_queue *tx_queue);
82874aeea5SJeff Kirsher static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
83874aeea5SJeff Kirsher 			       struct efx_tso_header *tsoh);
84874aeea5SJeff Kirsher 
85874aeea5SJeff Kirsher static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
86874aeea5SJeff Kirsher 			  struct efx_tx_buffer *buffer)
87874aeea5SJeff Kirsher {
88874aeea5SJeff Kirsher 	if (buffer->tsoh) {
89874aeea5SJeff Kirsher 		if (likely(!buffer->tsoh->unmap_len)) {
90874aeea5SJeff Kirsher 			buffer->tsoh->next = tx_queue->tso_headers_free;
91874aeea5SJeff Kirsher 			tx_queue->tso_headers_free = buffer->tsoh;
92874aeea5SJeff Kirsher 		} else {
93874aeea5SJeff Kirsher 			efx_tsoh_heap_free(tx_queue, buffer->tsoh);
94874aeea5SJeff Kirsher 		}
95874aeea5SJeff Kirsher 		buffer->tsoh = NULL;
96874aeea5SJeff Kirsher 	}
97874aeea5SJeff Kirsher }
98874aeea5SJeff Kirsher 
99874aeea5SJeff Kirsher 
100874aeea5SJeff Kirsher static inline unsigned
101874aeea5SJeff Kirsher efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
102874aeea5SJeff Kirsher {
103874aeea5SJeff Kirsher 	/* Depending on the NIC revision, we can use descriptor
104874aeea5SJeff Kirsher 	 * lengths up to 8K or 8K-1.  However, since PCI Express
105874aeea5SJeff Kirsher 	 * devices must split read requests at 4K boundaries, there is
106874aeea5SJeff Kirsher 	 * little benefit from using descriptors that cross those
107874aeea5SJeff Kirsher 	 * boundaries and we keep things simple by not doing so.
108874aeea5SJeff Kirsher 	 */
109874aeea5SJeff Kirsher 	unsigned len = (~dma_addr & 0xfff) + 1;
110874aeea5SJeff Kirsher 
111874aeea5SJeff Kirsher 	/* Work around hardware bug for unaligned buffers. */
112874aeea5SJeff Kirsher 	if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
113874aeea5SJeff Kirsher 		len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
114874aeea5SJeff Kirsher 
115874aeea5SJeff Kirsher 	return len;
116874aeea5SJeff Kirsher }
117874aeea5SJeff Kirsher 
118874aeea5SJeff Kirsher /*
119874aeea5SJeff Kirsher  * Add a socket buffer to a TX queue
120874aeea5SJeff Kirsher  *
121874aeea5SJeff Kirsher  * This maps all fragments of a socket buffer for DMA and adds them to
122874aeea5SJeff Kirsher  * the TX queue.  The queue's insert pointer will be incremented by
123874aeea5SJeff Kirsher  * the number of fragments in the socket buffer.
124874aeea5SJeff Kirsher  *
125874aeea5SJeff Kirsher  * If any DMA mapping fails, any mapped fragments will be unmapped,
126874aeea5SJeff Kirsher  * the queue's insert pointer will be restored to its original value.
127874aeea5SJeff Kirsher  *
128874aeea5SJeff Kirsher  * This function is split out from efx_hard_start_xmit to allow the
129874aeea5SJeff Kirsher  * loopback test to direct packets via specific TX queues.
130874aeea5SJeff Kirsher  *
131874aeea5SJeff Kirsher  * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
132874aeea5SJeff Kirsher  * You must hold netif_tx_lock() to call this function.
133874aeea5SJeff Kirsher  */
134874aeea5SJeff Kirsher netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
135874aeea5SJeff Kirsher {
136874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
137874aeea5SJeff Kirsher 	struct pci_dev *pci_dev = efx->pci_dev;
138874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
139874aeea5SJeff Kirsher 	skb_frag_t *fragment;
140874aeea5SJeff Kirsher 	unsigned int len, unmap_len = 0, fill_level, insert_ptr;
141874aeea5SJeff Kirsher 	dma_addr_t dma_addr, unmap_addr = 0;
142874aeea5SJeff Kirsher 	unsigned int dma_len;
143874aeea5SJeff Kirsher 	bool unmap_single;
144874aeea5SJeff Kirsher 	int q_space, i = 0;
145874aeea5SJeff Kirsher 	netdev_tx_t rc = NETDEV_TX_OK;
146874aeea5SJeff Kirsher 
147874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
148874aeea5SJeff Kirsher 
149874aeea5SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
150874aeea5SJeff Kirsher 		return efx_enqueue_skb_tso(tx_queue, skb);
151874aeea5SJeff Kirsher 
152874aeea5SJeff Kirsher 	/* Get size of the initial fragment */
153874aeea5SJeff Kirsher 	len = skb_headlen(skb);
154874aeea5SJeff Kirsher 
155874aeea5SJeff Kirsher 	/* Pad if necessary */
156874aeea5SJeff Kirsher 	if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
157874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(skb->data_len);
158874aeea5SJeff Kirsher 		len = 32 + 1;
159874aeea5SJeff Kirsher 		if (skb_pad(skb, len - skb->len))
160874aeea5SJeff Kirsher 			return NETDEV_TX_OK;
161874aeea5SJeff Kirsher 	}
162874aeea5SJeff Kirsher 
163874aeea5SJeff Kirsher 	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
164874aeea5SJeff Kirsher 	q_space = efx->txq_entries - 1 - fill_level;
165874aeea5SJeff Kirsher 
166874aeea5SJeff Kirsher 	/* Map for DMA.  Use pci_map_single rather than pci_map_page
167874aeea5SJeff Kirsher 	 * since this is more efficient on machines with sparse
168874aeea5SJeff Kirsher 	 * memory.
169874aeea5SJeff Kirsher 	 */
170874aeea5SJeff Kirsher 	unmap_single = true;
171874aeea5SJeff Kirsher 	dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
172874aeea5SJeff Kirsher 
173874aeea5SJeff Kirsher 	/* Process all fragments */
174874aeea5SJeff Kirsher 	while (1) {
175874aeea5SJeff Kirsher 		if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
176874aeea5SJeff Kirsher 			goto pci_err;
177874aeea5SJeff Kirsher 
178874aeea5SJeff Kirsher 		/* Store fields for marking in the per-fragment final
179874aeea5SJeff Kirsher 		 * descriptor */
180874aeea5SJeff Kirsher 		unmap_len = len;
181874aeea5SJeff Kirsher 		unmap_addr = dma_addr;
182874aeea5SJeff Kirsher 
183874aeea5SJeff Kirsher 		/* Add to TX queue, splitting across DMA boundaries */
184874aeea5SJeff Kirsher 		do {
185874aeea5SJeff Kirsher 			if (unlikely(q_space-- <= 0)) {
186874aeea5SJeff Kirsher 				/* It might be that completions have
187874aeea5SJeff Kirsher 				 * happened since the xmit path last
188874aeea5SJeff Kirsher 				 * checked.  Update the xmit path's
189874aeea5SJeff Kirsher 				 * copy of read_count.
190874aeea5SJeff Kirsher 				 */
191874aeea5SJeff Kirsher 				netif_tx_stop_queue(tx_queue->core_txq);
192874aeea5SJeff Kirsher 				/* This memory barrier protects the
193874aeea5SJeff Kirsher 				 * change of queue state from the access
194874aeea5SJeff Kirsher 				 * of read_count. */
195874aeea5SJeff Kirsher 				smp_mb();
196874aeea5SJeff Kirsher 				tx_queue->old_read_count =
197874aeea5SJeff Kirsher 					ACCESS_ONCE(tx_queue->read_count);
198874aeea5SJeff Kirsher 				fill_level = (tx_queue->insert_count
199874aeea5SJeff Kirsher 					      - tx_queue->old_read_count);
200874aeea5SJeff Kirsher 				q_space = efx->txq_entries - 1 - fill_level;
201874aeea5SJeff Kirsher 				if (unlikely(q_space-- <= 0)) {
202874aeea5SJeff Kirsher 					rc = NETDEV_TX_BUSY;
203874aeea5SJeff Kirsher 					goto unwind;
204874aeea5SJeff Kirsher 				}
205874aeea5SJeff Kirsher 				smp_mb();
206874aeea5SJeff Kirsher 				if (likely(!efx->loopback_selftest))
207874aeea5SJeff Kirsher 					netif_tx_start_queue(
208874aeea5SJeff Kirsher 						tx_queue->core_txq);
209874aeea5SJeff Kirsher 			}
210874aeea5SJeff Kirsher 
211874aeea5SJeff Kirsher 			insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
212874aeea5SJeff Kirsher 			buffer = &tx_queue->buffer[insert_ptr];
213874aeea5SJeff Kirsher 			efx_tsoh_free(tx_queue, buffer);
214874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(buffer->tsoh);
215874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(buffer->skb);
216874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(buffer->len);
217874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(!buffer->continuation);
218874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(buffer->unmap_len);
219874aeea5SJeff Kirsher 
220874aeea5SJeff Kirsher 			dma_len = efx_max_tx_len(efx, dma_addr);
221874aeea5SJeff Kirsher 			if (likely(dma_len >= len))
222874aeea5SJeff Kirsher 				dma_len = len;
223874aeea5SJeff Kirsher 
224874aeea5SJeff Kirsher 			/* Fill out per descriptor fields */
225874aeea5SJeff Kirsher 			buffer->len = dma_len;
226874aeea5SJeff Kirsher 			buffer->dma_addr = dma_addr;
227874aeea5SJeff Kirsher 			len -= dma_len;
228874aeea5SJeff Kirsher 			dma_addr += dma_len;
229874aeea5SJeff Kirsher 			++tx_queue->insert_count;
230874aeea5SJeff Kirsher 		} while (len);
231874aeea5SJeff Kirsher 
232874aeea5SJeff Kirsher 		/* Transfer ownership of the unmapping to the final buffer */
233874aeea5SJeff Kirsher 		buffer->unmap_single = unmap_single;
234874aeea5SJeff Kirsher 		buffer->unmap_len = unmap_len;
235874aeea5SJeff Kirsher 		unmap_len = 0;
236874aeea5SJeff Kirsher 
237874aeea5SJeff Kirsher 		/* Get address and size of next fragment */
238874aeea5SJeff Kirsher 		if (i >= skb_shinfo(skb)->nr_frags)
239874aeea5SJeff Kirsher 			break;
240874aeea5SJeff Kirsher 		fragment = &skb_shinfo(skb)->frags[i];
241874aeea5SJeff Kirsher 		len = fragment->size;
242874aeea5SJeff Kirsher 		i++;
243874aeea5SJeff Kirsher 		/* Map for DMA */
244874aeea5SJeff Kirsher 		unmap_single = false;
2454a22c4c9SIan Campbell 		dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
2465d6bcdfeSIan Campbell 					    DMA_TO_DEVICE);
247874aeea5SJeff Kirsher 	}
248874aeea5SJeff Kirsher 
249874aeea5SJeff Kirsher 	/* Transfer ownership of the skb to the final buffer */
250874aeea5SJeff Kirsher 	buffer->skb = skb;
251874aeea5SJeff Kirsher 	buffer->continuation = false;
252874aeea5SJeff Kirsher 
253874aeea5SJeff Kirsher 	/* Pass off to hardware */
254874aeea5SJeff Kirsher 	efx_nic_push_buffers(tx_queue);
255874aeea5SJeff Kirsher 
256874aeea5SJeff Kirsher 	return NETDEV_TX_OK;
257874aeea5SJeff Kirsher 
258874aeea5SJeff Kirsher  pci_err:
259874aeea5SJeff Kirsher 	netif_err(efx, tx_err, efx->net_dev,
260874aeea5SJeff Kirsher 		  " TX queue %d could not map skb with %d bytes %d "
261874aeea5SJeff Kirsher 		  "fragments for DMA\n", tx_queue->queue, skb->len,
262874aeea5SJeff Kirsher 		  skb_shinfo(skb)->nr_frags + 1);
263874aeea5SJeff Kirsher 
264874aeea5SJeff Kirsher 	/* Mark the packet as transmitted, and free the SKB ourselves */
265874aeea5SJeff Kirsher 	dev_kfree_skb_any(skb);
266874aeea5SJeff Kirsher 
267874aeea5SJeff Kirsher  unwind:
268874aeea5SJeff Kirsher 	/* Work backwards until we hit the original insert pointer value */
269874aeea5SJeff Kirsher 	while (tx_queue->insert_count != tx_queue->write_count) {
270874aeea5SJeff Kirsher 		--tx_queue->insert_count;
271874aeea5SJeff Kirsher 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
272874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[insert_ptr];
273874aeea5SJeff Kirsher 		efx_dequeue_buffer(tx_queue, buffer);
274874aeea5SJeff Kirsher 		buffer->len = 0;
275874aeea5SJeff Kirsher 	}
276874aeea5SJeff Kirsher 
277874aeea5SJeff Kirsher 	/* Free the fragment we were mid-way through pushing */
278874aeea5SJeff Kirsher 	if (unmap_len) {
279874aeea5SJeff Kirsher 		if (unmap_single)
280874aeea5SJeff Kirsher 			pci_unmap_single(pci_dev, unmap_addr, unmap_len,
281874aeea5SJeff Kirsher 					 PCI_DMA_TODEVICE);
282874aeea5SJeff Kirsher 		else
283874aeea5SJeff Kirsher 			pci_unmap_page(pci_dev, unmap_addr, unmap_len,
284874aeea5SJeff Kirsher 				       PCI_DMA_TODEVICE);
285874aeea5SJeff Kirsher 	}
286874aeea5SJeff Kirsher 
287874aeea5SJeff Kirsher 	return rc;
288874aeea5SJeff Kirsher }
289874aeea5SJeff Kirsher 
290874aeea5SJeff Kirsher /* Remove packets from the TX queue
291874aeea5SJeff Kirsher  *
292874aeea5SJeff Kirsher  * This removes packets from the TX queue, up to and including the
293874aeea5SJeff Kirsher  * specified index.
294874aeea5SJeff Kirsher  */
295874aeea5SJeff Kirsher static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
296874aeea5SJeff Kirsher 				unsigned int index)
297874aeea5SJeff Kirsher {
298874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
299874aeea5SJeff Kirsher 	unsigned int stop_index, read_ptr;
300874aeea5SJeff Kirsher 
301874aeea5SJeff Kirsher 	stop_index = (index + 1) & tx_queue->ptr_mask;
302874aeea5SJeff Kirsher 	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
303874aeea5SJeff Kirsher 
304874aeea5SJeff Kirsher 	while (read_ptr != stop_index) {
305874aeea5SJeff Kirsher 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
306874aeea5SJeff Kirsher 		if (unlikely(buffer->len == 0)) {
307874aeea5SJeff Kirsher 			netif_err(efx, tx_err, efx->net_dev,
308874aeea5SJeff Kirsher 				  "TX queue %d spurious TX completion id %x\n",
309874aeea5SJeff Kirsher 				  tx_queue->queue, read_ptr);
310874aeea5SJeff Kirsher 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
311874aeea5SJeff Kirsher 			return;
312874aeea5SJeff Kirsher 		}
313874aeea5SJeff Kirsher 
314874aeea5SJeff Kirsher 		efx_dequeue_buffer(tx_queue, buffer);
315874aeea5SJeff Kirsher 		buffer->continuation = true;
316874aeea5SJeff Kirsher 		buffer->len = 0;
317874aeea5SJeff Kirsher 
318874aeea5SJeff Kirsher 		++tx_queue->read_count;
319874aeea5SJeff Kirsher 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
320874aeea5SJeff Kirsher 	}
321874aeea5SJeff Kirsher }
322874aeea5SJeff Kirsher 
323874aeea5SJeff Kirsher /* Initiate a packet transmission.  We use one channel per CPU
324874aeea5SJeff Kirsher  * (sharing when we have more CPUs than channels).  On Falcon, the TX
325874aeea5SJeff Kirsher  * completion events will be directed back to the CPU that transmitted
326874aeea5SJeff Kirsher  * the packet, which should be cache-efficient.
327874aeea5SJeff Kirsher  *
328874aeea5SJeff Kirsher  * Context: non-blocking.
329874aeea5SJeff Kirsher  * Note that returning anything other than NETDEV_TX_OK will cause the
330874aeea5SJeff Kirsher  * OS to free the skb.
331874aeea5SJeff Kirsher  */
332874aeea5SJeff Kirsher netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
333874aeea5SJeff Kirsher 				      struct net_device *net_dev)
334874aeea5SJeff Kirsher {
335874aeea5SJeff Kirsher 	struct efx_nic *efx = netdev_priv(net_dev);
336874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
337874aeea5SJeff Kirsher 	unsigned index, type;
338874aeea5SJeff Kirsher 
339874aeea5SJeff Kirsher 	EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
340874aeea5SJeff Kirsher 
341874aeea5SJeff Kirsher 	index = skb_get_queue_mapping(skb);
342874aeea5SJeff Kirsher 	type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
343874aeea5SJeff Kirsher 	if (index >= efx->n_tx_channels) {
344874aeea5SJeff Kirsher 		index -= efx->n_tx_channels;
345874aeea5SJeff Kirsher 		type |= EFX_TXQ_TYPE_HIGHPRI;
346874aeea5SJeff Kirsher 	}
347874aeea5SJeff Kirsher 	tx_queue = efx_get_tx_queue(efx, index, type);
348874aeea5SJeff Kirsher 
349874aeea5SJeff Kirsher 	return efx_enqueue_skb(tx_queue, skb);
350874aeea5SJeff Kirsher }
351874aeea5SJeff Kirsher 
352874aeea5SJeff Kirsher void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
353874aeea5SJeff Kirsher {
354874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
355874aeea5SJeff Kirsher 
356874aeea5SJeff Kirsher 	/* Must be inverse of queue lookup in efx_hard_start_xmit() */
357874aeea5SJeff Kirsher 	tx_queue->core_txq =
358874aeea5SJeff Kirsher 		netdev_get_tx_queue(efx->net_dev,
359874aeea5SJeff Kirsher 				    tx_queue->queue / EFX_TXQ_TYPES +
360874aeea5SJeff Kirsher 				    ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
361874aeea5SJeff Kirsher 				     efx->n_tx_channels : 0));
362874aeea5SJeff Kirsher }
363874aeea5SJeff Kirsher 
364874aeea5SJeff Kirsher int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
365874aeea5SJeff Kirsher {
366874aeea5SJeff Kirsher 	struct efx_nic *efx = netdev_priv(net_dev);
367874aeea5SJeff Kirsher 	struct efx_channel *channel;
368874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
369874aeea5SJeff Kirsher 	unsigned tc;
370874aeea5SJeff Kirsher 	int rc;
371874aeea5SJeff Kirsher 
372874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
373874aeea5SJeff Kirsher 		return -EINVAL;
374874aeea5SJeff Kirsher 
375874aeea5SJeff Kirsher 	if (num_tc == net_dev->num_tc)
376874aeea5SJeff Kirsher 		return 0;
377874aeea5SJeff Kirsher 
378874aeea5SJeff Kirsher 	for (tc = 0; tc < num_tc; tc++) {
379874aeea5SJeff Kirsher 		net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
380874aeea5SJeff Kirsher 		net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
381874aeea5SJeff Kirsher 	}
382874aeea5SJeff Kirsher 
383874aeea5SJeff Kirsher 	if (num_tc > net_dev->num_tc) {
384874aeea5SJeff Kirsher 		/* Initialise high-priority queues as necessary */
385874aeea5SJeff Kirsher 		efx_for_each_channel(channel, efx) {
386874aeea5SJeff Kirsher 			efx_for_each_possible_channel_tx_queue(tx_queue,
387874aeea5SJeff Kirsher 							       channel) {
388874aeea5SJeff Kirsher 				if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
389874aeea5SJeff Kirsher 					continue;
390874aeea5SJeff Kirsher 				if (!tx_queue->buffer) {
391874aeea5SJeff Kirsher 					rc = efx_probe_tx_queue(tx_queue);
392874aeea5SJeff Kirsher 					if (rc)
393874aeea5SJeff Kirsher 						return rc;
394874aeea5SJeff Kirsher 				}
395874aeea5SJeff Kirsher 				if (!tx_queue->initialised)
396874aeea5SJeff Kirsher 					efx_init_tx_queue(tx_queue);
397874aeea5SJeff Kirsher 				efx_init_tx_queue_core_txq(tx_queue);
398874aeea5SJeff Kirsher 			}
399874aeea5SJeff Kirsher 		}
400874aeea5SJeff Kirsher 	} else {
401874aeea5SJeff Kirsher 		/* Reduce number of classes before number of queues */
402874aeea5SJeff Kirsher 		net_dev->num_tc = num_tc;
403874aeea5SJeff Kirsher 	}
404874aeea5SJeff Kirsher 
405874aeea5SJeff Kirsher 	rc = netif_set_real_num_tx_queues(net_dev,
406874aeea5SJeff Kirsher 					  max_t(int, num_tc, 1) *
407874aeea5SJeff Kirsher 					  efx->n_tx_channels);
408874aeea5SJeff Kirsher 	if (rc)
409874aeea5SJeff Kirsher 		return rc;
410874aeea5SJeff Kirsher 
411874aeea5SJeff Kirsher 	/* Do not destroy high-priority queues when they become
412874aeea5SJeff Kirsher 	 * unused.  We would have to flush them first, and it is
413874aeea5SJeff Kirsher 	 * fairly difficult to flush a subset of TX queues.  Leave
414874aeea5SJeff Kirsher 	 * it to efx_fini_channels().
415874aeea5SJeff Kirsher 	 */
416874aeea5SJeff Kirsher 
417874aeea5SJeff Kirsher 	net_dev->num_tc = num_tc;
418874aeea5SJeff Kirsher 	return 0;
419874aeea5SJeff Kirsher }
420874aeea5SJeff Kirsher 
421874aeea5SJeff Kirsher void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
422874aeea5SJeff Kirsher {
423874aeea5SJeff Kirsher 	unsigned fill_level;
424874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
425874aeea5SJeff Kirsher 
426874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
427874aeea5SJeff Kirsher 
428874aeea5SJeff Kirsher 	efx_dequeue_buffers(tx_queue, index);
429874aeea5SJeff Kirsher 
430874aeea5SJeff Kirsher 	/* See if we need to restart the netif queue.  This barrier
431874aeea5SJeff Kirsher 	 * separates the update of read_count from the test of the
432874aeea5SJeff Kirsher 	 * queue state. */
433874aeea5SJeff Kirsher 	smp_mb();
434874aeea5SJeff Kirsher 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
435874aeea5SJeff Kirsher 	    likely(efx->port_enabled) &&
436874aeea5SJeff Kirsher 	    likely(netif_device_present(efx->net_dev))) {
437874aeea5SJeff Kirsher 		fill_level = tx_queue->insert_count - tx_queue->read_count;
438874aeea5SJeff Kirsher 		if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
439874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
440874aeea5SJeff Kirsher 			netif_tx_wake_queue(tx_queue->core_txq);
441874aeea5SJeff Kirsher 		}
442874aeea5SJeff Kirsher 	}
443874aeea5SJeff Kirsher 
444874aeea5SJeff Kirsher 	/* Check whether the hardware queue is now empty */
445874aeea5SJeff Kirsher 	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
446874aeea5SJeff Kirsher 		tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
447874aeea5SJeff Kirsher 		if (tx_queue->read_count == tx_queue->old_write_count) {
448874aeea5SJeff Kirsher 			smp_mb();
449874aeea5SJeff Kirsher 			tx_queue->empty_read_count =
450874aeea5SJeff Kirsher 				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
451874aeea5SJeff Kirsher 		}
452874aeea5SJeff Kirsher 	}
453874aeea5SJeff Kirsher }
454874aeea5SJeff Kirsher 
455874aeea5SJeff Kirsher int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
456874aeea5SJeff Kirsher {
457874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
458874aeea5SJeff Kirsher 	unsigned int entries;
459874aeea5SJeff Kirsher 	int i, rc;
460874aeea5SJeff Kirsher 
461874aeea5SJeff Kirsher 	/* Create the smallest power-of-two aligned ring */
462874aeea5SJeff Kirsher 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
463874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
464874aeea5SJeff Kirsher 	tx_queue->ptr_mask = entries - 1;
465874aeea5SJeff Kirsher 
466874aeea5SJeff Kirsher 	netif_dbg(efx, probe, efx->net_dev,
467874aeea5SJeff Kirsher 		  "creating TX queue %d size %#x mask %#x\n",
468874aeea5SJeff Kirsher 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
469874aeea5SJeff Kirsher 
470874aeea5SJeff Kirsher 	/* Allocate software ring */
471874aeea5SJeff Kirsher 	tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
472874aeea5SJeff Kirsher 				   GFP_KERNEL);
473874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
474874aeea5SJeff Kirsher 		return -ENOMEM;
475874aeea5SJeff Kirsher 	for (i = 0; i <= tx_queue->ptr_mask; ++i)
476874aeea5SJeff Kirsher 		tx_queue->buffer[i].continuation = true;
477874aeea5SJeff Kirsher 
478874aeea5SJeff Kirsher 	/* Allocate hardware ring */
479874aeea5SJeff Kirsher 	rc = efx_nic_probe_tx(tx_queue);
480874aeea5SJeff Kirsher 	if (rc)
481874aeea5SJeff Kirsher 		goto fail;
482874aeea5SJeff Kirsher 
483874aeea5SJeff Kirsher 	return 0;
484874aeea5SJeff Kirsher 
485874aeea5SJeff Kirsher  fail:
486874aeea5SJeff Kirsher 	kfree(tx_queue->buffer);
487874aeea5SJeff Kirsher 	tx_queue->buffer = NULL;
488874aeea5SJeff Kirsher 	return rc;
489874aeea5SJeff Kirsher }
490874aeea5SJeff Kirsher 
491874aeea5SJeff Kirsher void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
492874aeea5SJeff Kirsher {
493874aeea5SJeff Kirsher 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
494874aeea5SJeff Kirsher 		  "initialising TX queue %d\n", tx_queue->queue);
495874aeea5SJeff Kirsher 
496874aeea5SJeff Kirsher 	tx_queue->insert_count = 0;
497874aeea5SJeff Kirsher 	tx_queue->write_count = 0;
498874aeea5SJeff Kirsher 	tx_queue->old_write_count = 0;
499874aeea5SJeff Kirsher 	tx_queue->read_count = 0;
500874aeea5SJeff Kirsher 	tx_queue->old_read_count = 0;
501874aeea5SJeff Kirsher 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
502874aeea5SJeff Kirsher 
503874aeea5SJeff Kirsher 	/* Set up TX descriptor ring */
504874aeea5SJeff Kirsher 	efx_nic_init_tx(tx_queue);
505874aeea5SJeff Kirsher 
506874aeea5SJeff Kirsher 	tx_queue->initialised = true;
507874aeea5SJeff Kirsher }
508874aeea5SJeff Kirsher 
509874aeea5SJeff Kirsher void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
510874aeea5SJeff Kirsher {
511874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
512874aeea5SJeff Kirsher 
513874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
514874aeea5SJeff Kirsher 		return;
515874aeea5SJeff Kirsher 
516874aeea5SJeff Kirsher 	/* Free any buffers left in the ring */
517874aeea5SJeff Kirsher 	while (tx_queue->read_count != tx_queue->write_count) {
518874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
519874aeea5SJeff Kirsher 		efx_dequeue_buffer(tx_queue, buffer);
520874aeea5SJeff Kirsher 		buffer->continuation = true;
521874aeea5SJeff Kirsher 		buffer->len = 0;
522874aeea5SJeff Kirsher 
523874aeea5SJeff Kirsher 		++tx_queue->read_count;
524874aeea5SJeff Kirsher 	}
525874aeea5SJeff Kirsher }
526874aeea5SJeff Kirsher 
527874aeea5SJeff Kirsher void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
528874aeea5SJeff Kirsher {
529874aeea5SJeff Kirsher 	if (!tx_queue->initialised)
530874aeea5SJeff Kirsher 		return;
531874aeea5SJeff Kirsher 
532874aeea5SJeff Kirsher 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
533874aeea5SJeff Kirsher 		  "shutting down TX queue %d\n", tx_queue->queue);
534874aeea5SJeff Kirsher 
535874aeea5SJeff Kirsher 	tx_queue->initialised = false;
536874aeea5SJeff Kirsher 
537874aeea5SJeff Kirsher 	/* Flush TX queue, remove descriptor ring */
538874aeea5SJeff Kirsher 	efx_nic_fini_tx(tx_queue);
539874aeea5SJeff Kirsher 
540874aeea5SJeff Kirsher 	efx_release_tx_buffers(tx_queue);
541874aeea5SJeff Kirsher 
542874aeea5SJeff Kirsher 	/* Free up TSO header cache */
543874aeea5SJeff Kirsher 	efx_fini_tso(tx_queue);
544874aeea5SJeff Kirsher }
545874aeea5SJeff Kirsher 
546874aeea5SJeff Kirsher void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
547874aeea5SJeff Kirsher {
548874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
549874aeea5SJeff Kirsher 		return;
550874aeea5SJeff Kirsher 
551874aeea5SJeff Kirsher 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
552874aeea5SJeff Kirsher 		  "destroying TX queue %d\n", tx_queue->queue);
553874aeea5SJeff Kirsher 	efx_nic_remove_tx(tx_queue);
554874aeea5SJeff Kirsher 
555874aeea5SJeff Kirsher 	kfree(tx_queue->buffer);
556874aeea5SJeff Kirsher 	tx_queue->buffer = NULL;
557874aeea5SJeff Kirsher }
558874aeea5SJeff Kirsher 
559874aeea5SJeff Kirsher 
560874aeea5SJeff Kirsher /* Efx TCP segmentation acceleration.
561874aeea5SJeff Kirsher  *
562874aeea5SJeff Kirsher  * Why?  Because by doing it here in the driver we can go significantly
563874aeea5SJeff Kirsher  * faster than the GSO.
564874aeea5SJeff Kirsher  *
565874aeea5SJeff Kirsher  * Requires TX checksum offload support.
566874aeea5SJeff Kirsher  */
567874aeea5SJeff Kirsher 
568874aeea5SJeff Kirsher /* Number of bytes inserted at the start of a TSO header buffer,
569874aeea5SJeff Kirsher  * similar to NET_IP_ALIGN.
570874aeea5SJeff Kirsher  */
571874aeea5SJeff Kirsher #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
572874aeea5SJeff Kirsher #define TSOH_OFFSET	0
573874aeea5SJeff Kirsher #else
574874aeea5SJeff Kirsher #define TSOH_OFFSET	NET_IP_ALIGN
575874aeea5SJeff Kirsher #endif
576874aeea5SJeff Kirsher 
577874aeea5SJeff Kirsher #define TSOH_BUFFER(tsoh)	((u8 *)(tsoh + 1) + TSOH_OFFSET)
578874aeea5SJeff Kirsher 
579874aeea5SJeff Kirsher /* Total size of struct efx_tso_header, buffer and padding */
580874aeea5SJeff Kirsher #define TSOH_SIZE(hdr_len)					\
581874aeea5SJeff Kirsher 	(sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
582874aeea5SJeff Kirsher 
583874aeea5SJeff Kirsher /* Size of blocks on free list.  Larger blocks must be allocated from
584874aeea5SJeff Kirsher  * the heap.
585874aeea5SJeff Kirsher  */
586874aeea5SJeff Kirsher #define TSOH_STD_SIZE		128
587874aeea5SJeff Kirsher 
588874aeea5SJeff Kirsher #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
589874aeea5SJeff Kirsher #define ETH_HDR_LEN(skb)  (skb_network_header(skb) - (skb)->data)
590874aeea5SJeff Kirsher #define SKB_TCP_OFF(skb)  PTR_DIFF(tcp_hdr(skb), (skb)->data)
591874aeea5SJeff Kirsher #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
592874aeea5SJeff Kirsher #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
593874aeea5SJeff Kirsher 
594874aeea5SJeff Kirsher /**
595874aeea5SJeff Kirsher  * struct tso_state - TSO state for an SKB
596874aeea5SJeff Kirsher  * @out_len: Remaining length in current segment
597874aeea5SJeff Kirsher  * @seqnum: Current sequence number
598874aeea5SJeff Kirsher  * @ipv4_id: Current IPv4 ID, host endian
599874aeea5SJeff Kirsher  * @packet_space: Remaining space in current packet
600874aeea5SJeff Kirsher  * @dma_addr: DMA address of current position
601874aeea5SJeff Kirsher  * @in_len: Remaining length in current SKB fragment
602874aeea5SJeff Kirsher  * @unmap_len: Length of SKB fragment
603874aeea5SJeff Kirsher  * @unmap_addr: DMA address of SKB fragment
604874aeea5SJeff Kirsher  * @unmap_single: DMA single vs page mapping flag
605874aeea5SJeff Kirsher  * @protocol: Network protocol (after any VLAN header)
606874aeea5SJeff Kirsher  * @header_len: Number of bytes of header
607874aeea5SJeff Kirsher  * @full_packet_size: Number of bytes to put in each outgoing segment
608874aeea5SJeff Kirsher  *
609874aeea5SJeff Kirsher  * The state used during segmentation.  It is put into this data structure
610874aeea5SJeff Kirsher  * just to make it easy to pass into inline functions.
611874aeea5SJeff Kirsher  */
612874aeea5SJeff Kirsher struct tso_state {
613874aeea5SJeff Kirsher 	/* Output position */
614874aeea5SJeff Kirsher 	unsigned out_len;
615874aeea5SJeff Kirsher 	unsigned seqnum;
616874aeea5SJeff Kirsher 	unsigned ipv4_id;
617874aeea5SJeff Kirsher 	unsigned packet_space;
618874aeea5SJeff Kirsher 
619874aeea5SJeff Kirsher 	/* Input position */
620874aeea5SJeff Kirsher 	dma_addr_t dma_addr;
621874aeea5SJeff Kirsher 	unsigned in_len;
622874aeea5SJeff Kirsher 	unsigned unmap_len;
623874aeea5SJeff Kirsher 	dma_addr_t unmap_addr;
624874aeea5SJeff Kirsher 	bool unmap_single;
625874aeea5SJeff Kirsher 
626874aeea5SJeff Kirsher 	__be16 protocol;
627874aeea5SJeff Kirsher 	unsigned header_len;
628874aeea5SJeff Kirsher 	int full_packet_size;
629874aeea5SJeff Kirsher };
630874aeea5SJeff Kirsher 
631874aeea5SJeff Kirsher 
632874aeea5SJeff Kirsher /*
633874aeea5SJeff Kirsher  * Verify that our various assumptions about sk_buffs and the conditions
634874aeea5SJeff Kirsher  * under which TSO will be attempted hold true.  Return the protocol number.
635874aeea5SJeff Kirsher  */
636874aeea5SJeff Kirsher static __be16 efx_tso_check_protocol(struct sk_buff *skb)
637874aeea5SJeff Kirsher {
638874aeea5SJeff Kirsher 	__be16 protocol = skb->protocol;
639874aeea5SJeff Kirsher 
640874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
641874aeea5SJeff Kirsher 			    protocol);
642874aeea5SJeff Kirsher 	if (protocol == htons(ETH_P_8021Q)) {
643874aeea5SJeff Kirsher 		/* Find the encapsulated protocol; reset network header
644874aeea5SJeff Kirsher 		 * and transport header based on that. */
645874aeea5SJeff Kirsher 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
646874aeea5SJeff Kirsher 		protocol = veh->h_vlan_encapsulated_proto;
647874aeea5SJeff Kirsher 		skb_set_network_header(skb, sizeof(*veh));
648874aeea5SJeff Kirsher 		if (protocol == htons(ETH_P_IP))
649874aeea5SJeff Kirsher 			skb_set_transport_header(skb, sizeof(*veh) +
650874aeea5SJeff Kirsher 						 4 * ip_hdr(skb)->ihl);
651874aeea5SJeff Kirsher 		else if (protocol == htons(ETH_P_IPV6))
652874aeea5SJeff Kirsher 			skb_set_transport_header(skb, sizeof(*veh) +
653874aeea5SJeff Kirsher 						 sizeof(struct ipv6hdr));
654874aeea5SJeff Kirsher 	}
655874aeea5SJeff Kirsher 
656874aeea5SJeff Kirsher 	if (protocol == htons(ETH_P_IP)) {
657874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
658874aeea5SJeff Kirsher 	} else {
659874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
660874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
661874aeea5SJeff Kirsher 	}
662874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
663874aeea5SJeff Kirsher 			     + (tcp_hdr(skb)->doff << 2u)) >
664874aeea5SJeff Kirsher 			    skb_headlen(skb));
665874aeea5SJeff Kirsher 
666874aeea5SJeff Kirsher 	return protocol;
667874aeea5SJeff Kirsher }
668874aeea5SJeff Kirsher 
669874aeea5SJeff Kirsher 
670874aeea5SJeff Kirsher /*
671874aeea5SJeff Kirsher  * Allocate a page worth of efx_tso_header structures, and string them
672874aeea5SJeff Kirsher  * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
673874aeea5SJeff Kirsher  */
674874aeea5SJeff Kirsher static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
675874aeea5SJeff Kirsher {
676874aeea5SJeff Kirsher 
677874aeea5SJeff Kirsher 	struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
678874aeea5SJeff Kirsher 	struct efx_tso_header *tsoh;
679874aeea5SJeff Kirsher 	dma_addr_t dma_addr;
680874aeea5SJeff Kirsher 	u8 *base_kva, *kva;
681874aeea5SJeff Kirsher 
682874aeea5SJeff Kirsher 	base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
683874aeea5SJeff Kirsher 	if (base_kva == NULL) {
684874aeea5SJeff Kirsher 		netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
685874aeea5SJeff Kirsher 			  "Unable to allocate page for TSO headers\n");
686874aeea5SJeff Kirsher 		return -ENOMEM;
687874aeea5SJeff Kirsher 	}
688874aeea5SJeff Kirsher 
689874aeea5SJeff Kirsher 	/* pci_alloc_consistent() allocates pages. */
690874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
691874aeea5SJeff Kirsher 
692874aeea5SJeff Kirsher 	for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
693874aeea5SJeff Kirsher 		tsoh = (struct efx_tso_header *)kva;
694874aeea5SJeff Kirsher 		tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
695874aeea5SJeff Kirsher 		tsoh->next = tx_queue->tso_headers_free;
696874aeea5SJeff Kirsher 		tx_queue->tso_headers_free = tsoh;
697874aeea5SJeff Kirsher 	}
698874aeea5SJeff Kirsher 
699874aeea5SJeff Kirsher 	return 0;
700874aeea5SJeff Kirsher }
701874aeea5SJeff Kirsher 
702874aeea5SJeff Kirsher 
703874aeea5SJeff Kirsher /* Free up a TSO header, and all others in the same page. */
704874aeea5SJeff Kirsher static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
705874aeea5SJeff Kirsher 				struct efx_tso_header *tsoh,
706874aeea5SJeff Kirsher 				struct pci_dev *pci_dev)
707874aeea5SJeff Kirsher {
708874aeea5SJeff Kirsher 	struct efx_tso_header **p;
709874aeea5SJeff Kirsher 	unsigned long base_kva;
710874aeea5SJeff Kirsher 	dma_addr_t base_dma;
711874aeea5SJeff Kirsher 
712874aeea5SJeff Kirsher 	base_kva = (unsigned long)tsoh & PAGE_MASK;
713874aeea5SJeff Kirsher 	base_dma = tsoh->dma_addr & PAGE_MASK;
714874aeea5SJeff Kirsher 
715874aeea5SJeff Kirsher 	p = &tx_queue->tso_headers_free;
716874aeea5SJeff Kirsher 	while (*p != NULL) {
717874aeea5SJeff Kirsher 		if (((unsigned long)*p & PAGE_MASK) == base_kva)
718874aeea5SJeff Kirsher 			*p = (*p)->next;
719874aeea5SJeff Kirsher 		else
720874aeea5SJeff Kirsher 			p = &(*p)->next;
721874aeea5SJeff Kirsher 	}
722874aeea5SJeff Kirsher 
723874aeea5SJeff Kirsher 	pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
724874aeea5SJeff Kirsher }
725874aeea5SJeff Kirsher 
726874aeea5SJeff Kirsher static struct efx_tso_header *
727874aeea5SJeff Kirsher efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
728874aeea5SJeff Kirsher {
729874aeea5SJeff Kirsher 	struct efx_tso_header *tsoh;
730874aeea5SJeff Kirsher 
731874aeea5SJeff Kirsher 	tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
732874aeea5SJeff Kirsher 	if (unlikely(!tsoh))
733874aeea5SJeff Kirsher 		return NULL;
734874aeea5SJeff Kirsher 
735874aeea5SJeff Kirsher 	tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
736874aeea5SJeff Kirsher 					TSOH_BUFFER(tsoh), header_len,
737874aeea5SJeff Kirsher 					PCI_DMA_TODEVICE);
738874aeea5SJeff Kirsher 	if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
739874aeea5SJeff Kirsher 					   tsoh->dma_addr))) {
740874aeea5SJeff Kirsher 		kfree(tsoh);
741874aeea5SJeff Kirsher 		return NULL;
742874aeea5SJeff Kirsher 	}
743874aeea5SJeff Kirsher 
744874aeea5SJeff Kirsher 	tsoh->unmap_len = header_len;
745874aeea5SJeff Kirsher 	return tsoh;
746874aeea5SJeff Kirsher }
747874aeea5SJeff Kirsher 
748874aeea5SJeff Kirsher static void
749874aeea5SJeff Kirsher efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
750874aeea5SJeff Kirsher {
751874aeea5SJeff Kirsher 	pci_unmap_single(tx_queue->efx->pci_dev,
752874aeea5SJeff Kirsher 			 tsoh->dma_addr, tsoh->unmap_len,
753874aeea5SJeff Kirsher 			 PCI_DMA_TODEVICE);
754874aeea5SJeff Kirsher 	kfree(tsoh);
755874aeea5SJeff Kirsher }
756874aeea5SJeff Kirsher 
757874aeea5SJeff Kirsher /**
758874aeea5SJeff Kirsher  * efx_tx_queue_insert - push descriptors onto the TX queue
759874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
760874aeea5SJeff Kirsher  * @dma_addr:		DMA address of fragment
761874aeea5SJeff Kirsher  * @len:		Length of fragment
762874aeea5SJeff Kirsher  * @final_buffer:	The final buffer inserted into the queue
763874aeea5SJeff Kirsher  *
764874aeea5SJeff Kirsher  * Push descriptors onto the TX queue.  Return 0 on success or 1 if
765874aeea5SJeff Kirsher  * @tx_queue full.
766874aeea5SJeff Kirsher  */
767874aeea5SJeff Kirsher static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
768874aeea5SJeff Kirsher 			       dma_addr_t dma_addr, unsigned len,
769874aeea5SJeff Kirsher 			       struct efx_tx_buffer **final_buffer)
770874aeea5SJeff Kirsher {
771874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
772874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
773874aeea5SJeff Kirsher 	unsigned dma_len, fill_level, insert_ptr;
774874aeea5SJeff Kirsher 	int q_space;
775874aeea5SJeff Kirsher 
776874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(len <= 0);
777874aeea5SJeff Kirsher 
778874aeea5SJeff Kirsher 	fill_level = tx_queue->insert_count - tx_queue->old_read_count;
779874aeea5SJeff Kirsher 	/* -1 as there is no way to represent all descriptors used */
780874aeea5SJeff Kirsher 	q_space = efx->txq_entries - 1 - fill_level;
781874aeea5SJeff Kirsher 
782874aeea5SJeff Kirsher 	while (1) {
783874aeea5SJeff Kirsher 		if (unlikely(q_space-- <= 0)) {
784874aeea5SJeff Kirsher 			/* It might be that completions have happened
785874aeea5SJeff Kirsher 			 * since the xmit path last checked.  Update
786874aeea5SJeff Kirsher 			 * the xmit path's copy of read_count.
787874aeea5SJeff Kirsher 			 */
788874aeea5SJeff Kirsher 			netif_tx_stop_queue(tx_queue->core_txq);
789874aeea5SJeff Kirsher 			/* This memory barrier protects the change of
790874aeea5SJeff Kirsher 			 * queue state from the access of read_count. */
791874aeea5SJeff Kirsher 			smp_mb();
792874aeea5SJeff Kirsher 			tx_queue->old_read_count =
793874aeea5SJeff Kirsher 				ACCESS_ONCE(tx_queue->read_count);
794874aeea5SJeff Kirsher 			fill_level = (tx_queue->insert_count
795874aeea5SJeff Kirsher 				      - tx_queue->old_read_count);
796874aeea5SJeff Kirsher 			q_space = efx->txq_entries - 1 - fill_level;
797874aeea5SJeff Kirsher 			if (unlikely(q_space-- <= 0)) {
798874aeea5SJeff Kirsher 				*final_buffer = NULL;
799874aeea5SJeff Kirsher 				return 1;
800874aeea5SJeff Kirsher 			}
801874aeea5SJeff Kirsher 			smp_mb();
802874aeea5SJeff Kirsher 			netif_tx_start_queue(tx_queue->core_txq);
803874aeea5SJeff Kirsher 		}
804874aeea5SJeff Kirsher 
805874aeea5SJeff Kirsher 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
806874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[insert_ptr];
807874aeea5SJeff Kirsher 		++tx_queue->insert_count;
808874aeea5SJeff Kirsher 
809874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(tx_queue->insert_count -
810874aeea5SJeff Kirsher 				    tx_queue->read_count >=
811874aeea5SJeff Kirsher 				    efx->txq_entries);
812874aeea5SJeff Kirsher 
813874aeea5SJeff Kirsher 		efx_tsoh_free(tx_queue, buffer);
814874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(buffer->len);
815874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(buffer->unmap_len);
816874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(buffer->skb);
817874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(!buffer->continuation);
818874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(buffer->tsoh);
819874aeea5SJeff Kirsher 
820874aeea5SJeff Kirsher 		buffer->dma_addr = dma_addr;
821874aeea5SJeff Kirsher 
822874aeea5SJeff Kirsher 		dma_len = efx_max_tx_len(efx, dma_addr);
823874aeea5SJeff Kirsher 
824874aeea5SJeff Kirsher 		/* If there is enough space to send then do so */
825874aeea5SJeff Kirsher 		if (dma_len >= len)
826874aeea5SJeff Kirsher 			break;
827874aeea5SJeff Kirsher 
828874aeea5SJeff Kirsher 		buffer->len = dma_len; /* Don't set the other members */
829874aeea5SJeff Kirsher 		dma_addr += dma_len;
830874aeea5SJeff Kirsher 		len -= dma_len;
831874aeea5SJeff Kirsher 	}
832874aeea5SJeff Kirsher 
833874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(!len);
834874aeea5SJeff Kirsher 	buffer->len = len;
835874aeea5SJeff Kirsher 	*final_buffer = buffer;
836874aeea5SJeff Kirsher 	return 0;
837874aeea5SJeff Kirsher }
838874aeea5SJeff Kirsher 
839874aeea5SJeff Kirsher 
840874aeea5SJeff Kirsher /*
841874aeea5SJeff Kirsher  * Put a TSO header into the TX queue.
842874aeea5SJeff Kirsher  *
843874aeea5SJeff Kirsher  * This is special-cased because we know that it is small enough to fit in
844874aeea5SJeff Kirsher  * a single fragment, and we know it doesn't cross a page boundary.  It
845874aeea5SJeff Kirsher  * also allows us to not worry about end-of-packet etc.
846874aeea5SJeff Kirsher  */
847874aeea5SJeff Kirsher static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
848874aeea5SJeff Kirsher 			       struct efx_tso_header *tsoh, unsigned len)
849874aeea5SJeff Kirsher {
850874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
851874aeea5SJeff Kirsher 
852874aeea5SJeff Kirsher 	buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
853874aeea5SJeff Kirsher 	efx_tsoh_free(tx_queue, buffer);
854874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(buffer->len);
855874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(buffer->unmap_len);
856874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(buffer->skb);
857874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(!buffer->continuation);
858874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(buffer->tsoh);
859874aeea5SJeff Kirsher 	buffer->len = len;
860874aeea5SJeff Kirsher 	buffer->dma_addr = tsoh->dma_addr;
861874aeea5SJeff Kirsher 	buffer->tsoh = tsoh;
862874aeea5SJeff Kirsher 
863874aeea5SJeff Kirsher 	++tx_queue->insert_count;
864874aeea5SJeff Kirsher }
865874aeea5SJeff Kirsher 
866874aeea5SJeff Kirsher 
867874aeea5SJeff Kirsher /* Remove descriptors put into a tx_queue. */
868874aeea5SJeff Kirsher static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
869874aeea5SJeff Kirsher {
870874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
871874aeea5SJeff Kirsher 	dma_addr_t unmap_addr;
872874aeea5SJeff Kirsher 
873874aeea5SJeff Kirsher 	/* Work backwards until we hit the original insert pointer value */
874874aeea5SJeff Kirsher 	while (tx_queue->insert_count != tx_queue->write_count) {
875874aeea5SJeff Kirsher 		--tx_queue->insert_count;
876874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[tx_queue->insert_count &
877874aeea5SJeff Kirsher 					   tx_queue->ptr_mask];
878874aeea5SJeff Kirsher 		efx_tsoh_free(tx_queue, buffer);
879874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(buffer->skb);
880874aeea5SJeff Kirsher 		if (buffer->unmap_len) {
881874aeea5SJeff Kirsher 			unmap_addr = (buffer->dma_addr + buffer->len -
882874aeea5SJeff Kirsher 				      buffer->unmap_len);
883874aeea5SJeff Kirsher 			if (buffer->unmap_single)
884874aeea5SJeff Kirsher 				pci_unmap_single(tx_queue->efx->pci_dev,
885874aeea5SJeff Kirsher 						 unmap_addr, buffer->unmap_len,
886874aeea5SJeff Kirsher 						 PCI_DMA_TODEVICE);
887874aeea5SJeff Kirsher 			else
888874aeea5SJeff Kirsher 				pci_unmap_page(tx_queue->efx->pci_dev,
889874aeea5SJeff Kirsher 					       unmap_addr, buffer->unmap_len,
890874aeea5SJeff Kirsher 					       PCI_DMA_TODEVICE);
891874aeea5SJeff Kirsher 			buffer->unmap_len = 0;
892874aeea5SJeff Kirsher 		}
893874aeea5SJeff Kirsher 		buffer->len = 0;
894874aeea5SJeff Kirsher 		buffer->continuation = true;
895874aeea5SJeff Kirsher 	}
896874aeea5SJeff Kirsher }
897874aeea5SJeff Kirsher 
898874aeea5SJeff Kirsher 
899874aeea5SJeff Kirsher /* Parse the SKB header and initialise state. */
900874aeea5SJeff Kirsher static void tso_start(struct tso_state *st, const struct sk_buff *skb)
901874aeea5SJeff Kirsher {
902874aeea5SJeff Kirsher 	/* All ethernet/IP/TCP headers combined size is TCP header size
903874aeea5SJeff Kirsher 	 * plus offset of TCP header relative to start of packet.
904874aeea5SJeff Kirsher 	 */
905874aeea5SJeff Kirsher 	st->header_len = ((tcp_hdr(skb)->doff << 2u)
906874aeea5SJeff Kirsher 			  + PTR_DIFF(tcp_hdr(skb), skb->data));
907874aeea5SJeff Kirsher 	st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
908874aeea5SJeff Kirsher 
909874aeea5SJeff Kirsher 	if (st->protocol == htons(ETH_P_IP))
910874aeea5SJeff Kirsher 		st->ipv4_id = ntohs(ip_hdr(skb)->id);
911874aeea5SJeff Kirsher 	else
912874aeea5SJeff Kirsher 		st->ipv4_id = 0;
913874aeea5SJeff Kirsher 	st->seqnum = ntohl(tcp_hdr(skb)->seq);
914874aeea5SJeff Kirsher 
915874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
916874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
917874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
918874aeea5SJeff Kirsher 
919874aeea5SJeff Kirsher 	st->packet_space = st->full_packet_size;
920874aeea5SJeff Kirsher 	st->out_len = skb->len - st->header_len;
921874aeea5SJeff Kirsher 	st->unmap_len = 0;
922874aeea5SJeff Kirsher 	st->unmap_single = false;
923874aeea5SJeff Kirsher }
924874aeea5SJeff Kirsher 
925874aeea5SJeff Kirsher static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
926874aeea5SJeff Kirsher 			    skb_frag_t *frag)
927874aeea5SJeff Kirsher {
9284a22c4c9SIan Campbell 	st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
9295d6bcdfeSIan Campbell 					  frag->size, DMA_TO_DEVICE);
9305d6bcdfeSIan Campbell 	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
931874aeea5SJeff Kirsher 		st->unmap_single = false;
932874aeea5SJeff Kirsher 		st->unmap_len = frag->size;
933874aeea5SJeff Kirsher 		st->in_len = frag->size;
934874aeea5SJeff Kirsher 		st->dma_addr = st->unmap_addr;
935874aeea5SJeff Kirsher 		return 0;
936874aeea5SJeff Kirsher 	}
937874aeea5SJeff Kirsher 	return -ENOMEM;
938874aeea5SJeff Kirsher }
939874aeea5SJeff Kirsher 
940874aeea5SJeff Kirsher static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
941874aeea5SJeff Kirsher 				 const struct sk_buff *skb)
942874aeea5SJeff Kirsher {
943874aeea5SJeff Kirsher 	int hl = st->header_len;
944874aeea5SJeff Kirsher 	int len = skb_headlen(skb) - hl;
945874aeea5SJeff Kirsher 
946874aeea5SJeff Kirsher 	st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
947874aeea5SJeff Kirsher 					len, PCI_DMA_TODEVICE);
948874aeea5SJeff Kirsher 	if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
949874aeea5SJeff Kirsher 		st->unmap_single = true;
950874aeea5SJeff Kirsher 		st->unmap_len = len;
951874aeea5SJeff Kirsher 		st->in_len = len;
952874aeea5SJeff Kirsher 		st->dma_addr = st->unmap_addr;
953874aeea5SJeff Kirsher 		return 0;
954874aeea5SJeff Kirsher 	}
955874aeea5SJeff Kirsher 	return -ENOMEM;
956874aeea5SJeff Kirsher }
957874aeea5SJeff Kirsher 
958874aeea5SJeff Kirsher 
959874aeea5SJeff Kirsher /**
960874aeea5SJeff Kirsher  * tso_fill_packet_with_fragment - form descriptors for the current fragment
961874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
962874aeea5SJeff Kirsher  * @skb:		Socket buffer
963874aeea5SJeff Kirsher  * @st:			TSO state
964874aeea5SJeff Kirsher  *
965874aeea5SJeff Kirsher  * Form descriptors for the current fragment, until we reach the end
966874aeea5SJeff Kirsher  * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
967874aeea5SJeff Kirsher  * space in @tx_queue.
968874aeea5SJeff Kirsher  */
969874aeea5SJeff Kirsher static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
970874aeea5SJeff Kirsher 					 const struct sk_buff *skb,
971874aeea5SJeff Kirsher 					 struct tso_state *st)
972874aeea5SJeff Kirsher {
973874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
974874aeea5SJeff Kirsher 	int n, end_of_packet, rc;
975874aeea5SJeff Kirsher 
976874aeea5SJeff Kirsher 	if (st->in_len == 0)
977874aeea5SJeff Kirsher 		return 0;
978874aeea5SJeff Kirsher 	if (st->packet_space == 0)
979874aeea5SJeff Kirsher 		return 0;
980874aeea5SJeff Kirsher 
981874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(st->in_len <= 0);
982874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(st->packet_space <= 0);
983874aeea5SJeff Kirsher 
984874aeea5SJeff Kirsher 	n = min(st->in_len, st->packet_space);
985874aeea5SJeff Kirsher 
986874aeea5SJeff Kirsher 	st->packet_space -= n;
987874aeea5SJeff Kirsher 	st->out_len -= n;
988874aeea5SJeff Kirsher 	st->in_len -= n;
989874aeea5SJeff Kirsher 
990874aeea5SJeff Kirsher 	rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
991874aeea5SJeff Kirsher 	if (likely(rc == 0)) {
992874aeea5SJeff Kirsher 		if (st->out_len == 0)
993874aeea5SJeff Kirsher 			/* Transfer ownership of the skb */
994874aeea5SJeff Kirsher 			buffer->skb = skb;
995874aeea5SJeff Kirsher 
996874aeea5SJeff Kirsher 		end_of_packet = st->out_len == 0 || st->packet_space == 0;
997874aeea5SJeff Kirsher 		buffer->continuation = !end_of_packet;
998874aeea5SJeff Kirsher 
999874aeea5SJeff Kirsher 		if (st->in_len == 0) {
1000874aeea5SJeff Kirsher 			/* Transfer ownership of the pci mapping */
1001874aeea5SJeff Kirsher 			buffer->unmap_len = st->unmap_len;
1002874aeea5SJeff Kirsher 			buffer->unmap_single = st->unmap_single;
1003874aeea5SJeff Kirsher 			st->unmap_len = 0;
1004874aeea5SJeff Kirsher 		}
1005874aeea5SJeff Kirsher 	}
1006874aeea5SJeff Kirsher 
1007874aeea5SJeff Kirsher 	st->dma_addr += n;
1008874aeea5SJeff Kirsher 	return rc;
1009874aeea5SJeff Kirsher }
1010874aeea5SJeff Kirsher 
1011874aeea5SJeff Kirsher 
1012874aeea5SJeff Kirsher /**
1013874aeea5SJeff Kirsher  * tso_start_new_packet - generate a new header and prepare for the new packet
1014874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
1015874aeea5SJeff Kirsher  * @skb:		Socket buffer
1016874aeea5SJeff Kirsher  * @st:			TSO state
1017874aeea5SJeff Kirsher  *
1018874aeea5SJeff Kirsher  * Generate a new header and prepare for the new packet.  Return 0 on
1019874aeea5SJeff Kirsher  * success, or -1 if failed to alloc header.
1020874aeea5SJeff Kirsher  */
1021874aeea5SJeff Kirsher static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1022874aeea5SJeff Kirsher 				const struct sk_buff *skb,
1023874aeea5SJeff Kirsher 				struct tso_state *st)
1024874aeea5SJeff Kirsher {
1025874aeea5SJeff Kirsher 	struct efx_tso_header *tsoh;
1026874aeea5SJeff Kirsher 	struct tcphdr *tsoh_th;
1027874aeea5SJeff Kirsher 	unsigned ip_length;
1028874aeea5SJeff Kirsher 	u8 *header;
1029874aeea5SJeff Kirsher 
1030874aeea5SJeff Kirsher 	/* Allocate a DMA-mapped header buffer. */
1031874aeea5SJeff Kirsher 	if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
1032874aeea5SJeff Kirsher 		if (tx_queue->tso_headers_free == NULL) {
1033874aeea5SJeff Kirsher 			if (efx_tsoh_block_alloc(tx_queue))
1034874aeea5SJeff Kirsher 				return -1;
1035874aeea5SJeff Kirsher 		}
1036874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
1037874aeea5SJeff Kirsher 		tsoh = tx_queue->tso_headers_free;
1038874aeea5SJeff Kirsher 		tx_queue->tso_headers_free = tsoh->next;
1039874aeea5SJeff Kirsher 		tsoh->unmap_len = 0;
1040874aeea5SJeff Kirsher 	} else {
1041874aeea5SJeff Kirsher 		tx_queue->tso_long_headers++;
1042874aeea5SJeff Kirsher 		tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
1043874aeea5SJeff Kirsher 		if (unlikely(!tsoh))
1044874aeea5SJeff Kirsher 			return -1;
1045874aeea5SJeff Kirsher 	}
1046874aeea5SJeff Kirsher 
1047874aeea5SJeff Kirsher 	header = TSOH_BUFFER(tsoh);
1048874aeea5SJeff Kirsher 	tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
1049874aeea5SJeff Kirsher 
1050874aeea5SJeff Kirsher 	/* Copy and update the headers. */
1051874aeea5SJeff Kirsher 	memcpy(header, skb->data, st->header_len);
1052874aeea5SJeff Kirsher 
1053874aeea5SJeff Kirsher 	tsoh_th->seq = htonl(st->seqnum);
1054874aeea5SJeff Kirsher 	st->seqnum += skb_shinfo(skb)->gso_size;
1055874aeea5SJeff Kirsher 	if (st->out_len > skb_shinfo(skb)->gso_size) {
1056874aeea5SJeff Kirsher 		/* This packet will not finish the TSO burst. */
1057874aeea5SJeff Kirsher 		ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
1058874aeea5SJeff Kirsher 		tsoh_th->fin = 0;
1059874aeea5SJeff Kirsher 		tsoh_th->psh = 0;
1060874aeea5SJeff Kirsher 	} else {
1061874aeea5SJeff Kirsher 		/* This packet will be the last in the TSO burst. */
1062874aeea5SJeff Kirsher 		ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
1063874aeea5SJeff Kirsher 		tsoh_th->fin = tcp_hdr(skb)->fin;
1064874aeea5SJeff Kirsher 		tsoh_th->psh = tcp_hdr(skb)->psh;
1065874aeea5SJeff Kirsher 	}
1066874aeea5SJeff Kirsher 
1067874aeea5SJeff Kirsher 	if (st->protocol == htons(ETH_P_IP)) {
1068874aeea5SJeff Kirsher 		struct iphdr *tsoh_iph =
1069874aeea5SJeff Kirsher 			(struct iphdr *)(header + SKB_IPV4_OFF(skb));
1070874aeea5SJeff Kirsher 
1071874aeea5SJeff Kirsher 		tsoh_iph->tot_len = htons(ip_length);
1072874aeea5SJeff Kirsher 
1073874aeea5SJeff Kirsher 		/* Linux leaves suitable gaps in the IP ID space for us to fill. */
1074874aeea5SJeff Kirsher 		tsoh_iph->id = htons(st->ipv4_id);
1075874aeea5SJeff Kirsher 		st->ipv4_id++;
1076874aeea5SJeff Kirsher 	} else {
1077874aeea5SJeff Kirsher 		struct ipv6hdr *tsoh_iph =
1078874aeea5SJeff Kirsher 			(struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
1079874aeea5SJeff Kirsher 
1080874aeea5SJeff Kirsher 		tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
1081874aeea5SJeff Kirsher 	}
1082874aeea5SJeff Kirsher 
1083874aeea5SJeff Kirsher 	st->packet_space = skb_shinfo(skb)->gso_size;
1084874aeea5SJeff Kirsher 	++tx_queue->tso_packets;
1085874aeea5SJeff Kirsher 
1086874aeea5SJeff Kirsher 	/* Form a descriptor for this header. */
1087874aeea5SJeff Kirsher 	efx_tso_put_header(tx_queue, tsoh, st->header_len);
1088874aeea5SJeff Kirsher 
1089874aeea5SJeff Kirsher 	return 0;
1090874aeea5SJeff Kirsher }
1091874aeea5SJeff Kirsher 
1092874aeea5SJeff Kirsher 
1093874aeea5SJeff Kirsher /**
1094874aeea5SJeff Kirsher  * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1095874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
1096874aeea5SJeff Kirsher  * @skb:		Socket buffer
1097874aeea5SJeff Kirsher  *
1098874aeea5SJeff Kirsher  * Context: You must hold netif_tx_lock() to call this function.
1099874aeea5SJeff Kirsher  *
1100874aeea5SJeff Kirsher  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1101874aeea5SJeff Kirsher  * @skb was not enqueued.  In all cases @skb is consumed.  Return
1102874aeea5SJeff Kirsher  * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1103874aeea5SJeff Kirsher  */
1104874aeea5SJeff Kirsher static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1105874aeea5SJeff Kirsher 			       struct sk_buff *skb)
1106874aeea5SJeff Kirsher {
1107874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
1108874aeea5SJeff Kirsher 	int frag_i, rc, rc2 = NETDEV_TX_OK;
1109874aeea5SJeff Kirsher 	struct tso_state state;
1110874aeea5SJeff Kirsher 
1111874aeea5SJeff Kirsher 	/* Find the packet protocol and sanity-check it */
1112874aeea5SJeff Kirsher 	state.protocol = efx_tso_check_protocol(skb);
1113874aeea5SJeff Kirsher 
1114874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1115874aeea5SJeff Kirsher 
1116874aeea5SJeff Kirsher 	tso_start(&state, skb);
1117874aeea5SJeff Kirsher 
1118874aeea5SJeff Kirsher 	/* Assume that skb header area contains exactly the headers, and
1119874aeea5SJeff Kirsher 	 * all payload is in the frag list.
1120874aeea5SJeff Kirsher 	 */
1121874aeea5SJeff Kirsher 	if (skb_headlen(skb) == state.header_len) {
1122874aeea5SJeff Kirsher 		/* Grab the first payload fragment. */
1123874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1124874aeea5SJeff Kirsher 		frag_i = 0;
1125874aeea5SJeff Kirsher 		rc = tso_get_fragment(&state, efx,
1126874aeea5SJeff Kirsher 				      skb_shinfo(skb)->frags + frag_i);
1127874aeea5SJeff Kirsher 		if (rc)
1128874aeea5SJeff Kirsher 			goto mem_err;
1129874aeea5SJeff Kirsher 	} else {
1130874aeea5SJeff Kirsher 		rc = tso_get_head_fragment(&state, efx, skb);
1131874aeea5SJeff Kirsher 		if (rc)
1132874aeea5SJeff Kirsher 			goto mem_err;
1133874aeea5SJeff Kirsher 		frag_i = -1;
1134874aeea5SJeff Kirsher 	}
1135874aeea5SJeff Kirsher 
1136874aeea5SJeff Kirsher 	if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1137874aeea5SJeff Kirsher 		goto mem_err;
1138874aeea5SJeff Kirsher 
1139874aeea5SJeff Kirsher 	while (1) {
1140874aeea5SJeff Kirsher 		rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1141874aeea5SJeff Kirsher 		if (unlikely(rc)) {
1142874aeea5SJeff Kirsher 			rc2 = NETDEV_TX_BUSY;
1143874aeea5SJeff Kirsher 			goto unwind;
1144874aeea5SJeff Kirsher 		}
1145874aeea5SJeff Kirsher 
1146874aeea5SJeff Kirsher 		/* Move onto the next fragment? */
1147874aeea5SJeff Kirsher 		if (state.in_len == 0) {
1148874aeea5SJeff Kirsher 			if (++frag_i >= skb_shinfo(skb)->nr_frags)
1149874aeea5SJeff Kirsher 				/* End of payload reached. */
1150874aeea5SJeff Kirsher 				break;
1151874aeea5SJeff Kirsher 			rc = tso_get_fragment(&state, efx,
1152874aeea5SJeff Kirsher 					      skb_shinfo(skb)->frags + frag_i);
1153874aeea5SJeff Kirsher 			if (rc)
1154874aeea5SJeff Kirsher 				goto mem_err;
1155874aeea5SJeff Kirsher 		}
1156874aeea5SJeff Kirsher 
1157874aeea5SJeff Kirsher 		/* Start at new packet? */
1158874aeea5SJeff Kirsher 		if (state.packet_space == 0 &&
1159874aeea5SJeff Kirsher 		    tso_start_new_packet(tx_queue, skb, &state) < 0)
1160874aeea5SJeff Kirsher 			goto mem_err;
1161874aeea5SJeff Kirsher 	}
1162874aeea5SJeff Kirsher 
1163874aeea5SJeff Kirsher 	/* Pass off to hardware */
1164874aeea5SJeff Kirsher 	efx_nic_push_buffers(tx_queue);
1165874aeea5SJeff Kirsher 
1166874aeea5SJeff Kirsher 	tx_queue->tso_bursts++;
1167874aeea5SJeff Kirsher 	return NETDEV_TX_OK;
1168874aeea5SJeff Kirsher 
1169874aeea5SJeff Kirsher  mem_err:
1170874aeea5SJeff Kirsher 	netif_err(efx, tx_err, efx->net_dev,
1171874aeea5SJeff Kirsher 		  "Out of memory for TSO headers, or PCI mapping error\n");
1172874aeea5SJeff Kirsher 	dev_kfree_skb_any(skb);
1173874aeea5SJeff Kirsher 
1174874aeea5SJeff Kirsher  unwind:
1175874aeea5SJeff Kirsher 	/* Free the DMA mapping we were in the process of writing out */
1176874aeea5SJeff Kirsher 	if (state.unmap_len) {
1177874aeea5SJeff Kirsher 		if (state.unmap_single)
1178874aeea5SJeff Kirsher 			pci_unmap_single(efx->pci_dev, state.unmap_addr,
1179874aeea5SJeff Kirsher 					 state.unmap_len, PCI_DMA_TODEVICE);
1180874aeea5SJeff Kirsher 		else
1181874aeea5SJeff Kirsher 			pci_unmap_page(efx->pci_dev, state.unmap_addr,
1182874aeea5SJeff Kirsher 				       state.unmap_len, PCI_DMA_TODEVICE);
1183874aeea5SJeff Kirsher 	}
1184874aeea5SJeff Kirsher 
1185874aeea5SJeff Kirsher 	efx_enqueue_unwind(tx_queue);
1186874aeea5SJeff Kirsher 	return rc2;
1187874aeea5SJeff Kirsher }
1188874aeea5SJeff Kirsher 
1189874aeea5SJeff Kirsher 
1190874aeea5SJeff Kirsher /*
1191874aeea5SJeff Kirsher  * Free up all TSO datastructures associated with tx_queue. This
1192874aeea5SJeff Kirsher  * routine should be called only once the tx_queue is both empty and
1193874aeea5SJeff Kirsher  * will no longer be used.
1194874aeea5SJeff Kirsher  */
1195874aeea5SJeff Kirsher static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1196874aeea5SJeff Kirsher {
1197874aeea5SJeff Kirsher 	unsigned i;
1198874aeea5SJeff Kirsher 
1199874aeea5SJeff Kirsher 	if (tx_queue->buffer) {
1200874aeea5SJeff Kirsher 		for (i = 0; i <= tx_queue->ptr_mask; ++i)
1201874aeea5SJeff Kirsher 			efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1202874aeea5SJeff Kirsher 	}
1203874aeea5SJeff Kirsher 
1204874aeea5SJeff Kirsher 	while (tx_queue->tso_headers_free != NULL)
1205874aeea5SJeff Kirsher 		efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1206874aeea5SJeff Kirsher 				    tx_queue->efx->pci_dev);
1207874aeea5SJeff Kirsher }
1208