xref: /openbmc/linux/drivers/net/ethernet/sfc/tx.c (revision dfa50be9)
1874aeea5SJeff Kirsher /****************************************************************************
2f7a6d2c4SBen Hutchings  * Driver for Solarflare network controllers and boards
3874aeea5SJeff Kirsher  * Copyright 2005-2006 Fen Systems Ltd.
4f7a6d2c4SBen Hutchings  * Copyright 2005-2013 Solarflare Communications Inc.
5874aeea5SJeff Kirsher  *
6874aeea5SJeff Kirsher  * This program is free software; you can redistribute it and/or modify it
7874aeea5SJeff Kirsher  * under the terms of the GNU General Public License version 2 as published
8874aeea5SJeff Kirsher  * by the Free Software Foundation, incorporated herein by reference.
9874aeea5SJeff Kirsher  */
10874aeea5SJeff Kirsher 
11874aeea5SJeff Kirsher #include <linux/pci.h>
12874aeea5SJeff Kirsher #include <linux/tcp.h>
13874aeea5SJeff Kirsher #include <linux/ip.h>
14874aeea5SJeff Kirsher #include <linux/in.h>
15874aeea5SJeff Kirsher #include <linux/ipv6.h>
16874aeea5SJeff Kirsher #include <linux/slab.h>
17874aeea5SJeff Kirsher #include <net/ipv6.h>
18874aeea5SJeff Kirsher #include <linux/if_ether.h>
19874aeea5SJeff Kirsher #include <linux/highmem.h>
20874aeea5SJeff Kirsher #include "net_driver.h"
21874aeea5SJeff Kirsher #include "efx.h"
22874aeea5SJeff Kirsher #include "nic.h"
23874aeea5SJeff Kirsher #include "workarounds.h"
24dfa50be9SBen Hutchings #include "ef10_regs.h"
25874aeea5SJeff Kirsher 
26874aeea5SJeff Kirsher static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
27c3940999STom Herbert 			       struct efx_tx_buffer *buffer,
28c3940999STom Herbert 			       unsigned int *pkts_compl,
29c3940999STom Herbert 			       unsigned int *bytes_compl)
30874aeea5SJeff Kirsher {
31874aeea5SJeff Kirsher 	if (buffer->unmap_len) {
320e33d870SBen Hutchings 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
33874aeea5SJeff Kirsher 		dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
34874aeea5SJeff Kirsher 					 buffer->unmap_len);
357668ff9cSBen Hutchings 		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
360e33d870SBen Hutchings 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
370e33d870SBen Hutchings 					 DMA_TO_DEVICE);
38874aeea5SJeff Kirsher 		else
390e33d870SBen Hutchings 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
400e33d870SBen Hutchings 				       DMA_TO_DEVICE);
41874aeea5SJeff Kirsher 		buffer->unmap_len = 0;
42874aeea5SJeff Kirsher 	}
43874aeea5SJeff Kirsher 
447668ff9cSBen Hutchings 	if (buffer->flags & EFX_TX_BUF_SKB) {
45c3940999STom Herbert 		(*pkts_compl)++;
46c3940999STom Herbert 		(*bytes_compl) += buffer->skb->len;
47874aeea5SJeff Kirsher 		dev_kfree_skb_any((struct sk_buff *) buffer->skb);
48874aeea5SJeff Kirsher 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
49874aeea5SJeff Kirsher 			   "TX queue %d transmission id %x complete\n",
50874aeea5SJeff Kirsher 			   tx_queue->queue, tx_queue->read_count);
51f7251a9cSBen Hutchings 	} else if (buffer->flags & EFX_TX_BUF_HEAP) {
52f7251a9cSBen Hutchings 		kfree(buffer->heap_buf);
53874aeea5SJeff Kirsher 	}
547668ff9cSBen Hutchings 
55f7251a9cSBen Hutchings 	buffer->len = 0;
56f7251a9cSBen Hutchings 	buffer->flags = 0;
57874aeea5SJeff Kirsher }
58874aeea5SJeff Kirsher 
59874aeea5SJeff Kirsher static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
60874aeea5SJeff Kirsher 			       struct sk_buff *skb);
61874aeea5SJeff Kirsher 
62874aeea5SJeff Kirsher static inline unsigned
63874aeea5SJeff Kirsher efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
64874aeea5SJeff Kirsher {
65874aeea5SJeff Kirsher 	/* Depending on the NIC revision, we can use descriptor
66874aeea5SJeff Kirsher 	 * lengths up to 8K or 8K-1.  However, since PCI Express
67874aeea5SJeff Kirsher 	 * devices must split read requests at 4K boundaries, there is
68874aeea5SJeff Kirsher 	 * little benefit from using descriptors that cross those
69874aeea5SJeff Kirsher 	 * boundaries and we keep things simple by not doing so.
70874aeea5SJeff Kirsher 	 */
715b6262d0SBen Hutchings 	unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
72874aeea5SJeff Kirsher 
73874aeea5SJeff Kirsher 	/* Work around hardware bug for unaligned buffers. */
74874aeea5SJeff Kirsher 	if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
75874aeea5SJeff Kirsher 		len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
76874aeea5SJeff Kirsher 
77874aeea5SJeff Kirsher 	return len;
78874aeea5SJeff Kirsher }
79874aeea5SJeff Kirsher 
807e6d06f0SBen Hutchings unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
817e6d06f0SBen Hutchings {
827e6d06f0SBen Hutchings 	/* Header and payload descriptor for each output segment, plus
837e6d06f0SBen Hutchings 	 * one for every input fragment boundary within a segment
847e6d06f0SBen Hutchings 	 */
857e6d06f0SBen Hutchings 	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
867e6d06f0SBen Hutchings 
87dfa50be9SBen Hutchings 	/* Possibly one more per segment for the alignment workaround,
88dfa50be9SBen Hutchings 	 * or for option descriptors
89dfa50be9SBen Hutchings 	 */
90dfa50be9SBen Hutchings 	if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
917e6d06f0SBen Hutchings 		max_descs += EFX_TSO_MAX_SEGS;
927e6d06f0SBen Hutchings 
937e6d06f0SBen Hutchings 	/* Possibly more for PCIe page boundaries within input fragments */
947e6d06f0SBen Hutchings 	if (PAGE_SIZE > EFX_PAGE_SIZE)
957e6d06f0SBen Hutchings 		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
967e6d06f0SBen Hutchings 				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
977e6d06f0SBen Hutchings 
987e6d06f0SBen Hutchings 	return max_descs;
997e6d06f0SBen Hutchings }
1007e6d06f0SBen Hutchings 
10114bf718fSBen Hutchings /* Get partner of a TX queue, seen as part of the same net core queue */
10214bf718fSBen Hutchings static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
10314bf718fSBen Hutchings {
10414bf718fSBen Hutchings 	if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
10514bf718fSBen Hutchings 		return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
10614bf718fSBen Hutchings 	else
10714bf718fSBen Hutchings 		return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
10814bf718fSBen Hutchings }
10914bf718fSBen Hutchings 
11014bf718fSBen Hutchings static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
11114bf718fSBen Hutchings {
11214bf718fSBen Hutchings 	/* We need to consider both queues that the net core sees as one */
11314bf718fSBen Hutchings 	struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
11414bf718fSBen Hutchings 	struct efx_nic *efx = txq1->efx;
11514bf718fSBen Hutchings 	unsigned int fill_level;
11614bf718fSBen Hutchings 
11714bf718fSBen Hutchings 	fill_level = max(txq1->insert_count - txq1->old_read_count,
11814bf718fSBen Hutchings 			 txq2->insert_count - txq2->old_read_count);
11914bf718fSBen Hutchings 	if (likely(fill_level < efx->txq_stop_thresh))
12014bf718fSBen Hutchings 		return;
12114bf718fSBen Hutchings 
12214bf718fSBen Hutchings 	/* We used the stale old_read_count above, which gives us a
12314bf718fSBen Hutchings 	 * pessimistic estimate of the fill level (which may even
12414bf718fSBen Hutchings 	 * validly be >= efx->txq_entries).  Now try again using
12514bf718fSBen Hutchings 	 * read_count (more likely to be a cache miss).
12614bf718fSBen Hutchings 	 *
12714bf718fSBen Hutchings 	 * If we read read_count and then conditionally stop the
12814bf718fSBen Hutchings 	 * queue, it is possible for the completion path to race with
12914bf718fSBen Hutchings 	 * us and complete all outstanding descriptors in the middle,
13014bf718fSBen Hutchings 	 * after which there will be no more completions to wake it.
13114bf718fSBen Hutchings 	 * Therefore we stop the queue first, then read read_count
13214bf718fSBen Hutchings 	 * (with a memory barrier to ensure the ordering), then
13314bf718fSBen Hutchings 	 * restart the queue if the fill level turns out to be low
13414bf718fSBen Hutchings 	 * enough.
13514bf718fSBen Hutchings 	 */
13614bf718fSBen Hutchings 	netif_tx_stop_queue(txq1->core_txq);
13714bf718fSBen Hutchings 	smp_mb();
13814bf718fSBen Hutchings 	txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
13914bf718fSBen Hutchings 	txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
14014bf718fSBen Hutchings 
14114bf718fSBen Hutchings 	fill_level = max(txq1->insert_count - txq1->old_read_count,
14214bf718fSBen Hutchings 			 txq2->insert_count - txq2->old_read_count);
14314bf718fSBen Hutchings 	EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
14414bf718fSBen Hutchings 	if (likely(fill_level < efx->txq_stop_thresh)) {
14514bf718fSBen Hutchings 		smp_mb();
14614bf718fSBen Hutchings 		if (likely(!efx->loopback_selftest))
14714bf718fSBen Hutchings 			netif_tx_start_queue(txq1->core_txq);
14814bf718fSBen Hutchings 	}
14914bf718fSBen Hutchings }
15014bf718fSBen Hutchings 
151874aeea5SJeff Kirsher /*
152874aeea5SJeff Kirsher  * Add a socket buffer to a TX queue
153874aeea5SJeff Kirsher  *
154874aeea5SJeff Kirsher  * This maps all fragments of a socket buffer for DMA and adds them to
155874aeea5SJeff Kirsher  * the TX queue.  The queue's insert pointer will be incremented by
156874aeea5SJeff Kirsher  * the number of fragments in the socket buffer.
157874aeea5SJeff Kirsher  *
158874aeea5SJeff Kirsher  * If any DMA mapping fails, any mapped fragments will be unmapped,
159874aeea5SJeff Kirsher  * the queue's insert pointer will be restored to its original value.
160874aeea5SJeff Kirsher  *
161874aeea5SJeff Kirsher  * This function is split out from efx_hard_start_xmit to allow the
162874aeea5SJeff Kirsher  * loopback test to direct packets via specific TX queues.
163874aeea5SJeff Kirsher  *
16414bf718fSBen Hutchings  * Returns NETDEV_TX_OK.
165874aeea5SJeff Kirsher  * You must hold netif_tx_lock() to call this function.
166874aeea5SJeff Kirsher  */
167874aeea5SJeff Kirsher netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
168874aeea5SJeff Kirsher {
169874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
1700e33d870SBen Hutchings 	struct device *dma_dev = &efx->pci_dev->dev;
171874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
172874aeea5SJeff Kirsher 	skb_frag_t *fragment;
17314bf718fSBen Hutchings 	unsigned int len, unmap_len = 0, insert_ptr;
174874aeea5SJeff Kirsher 	dma_addr_t dma_addr, unmap_addr = 0;
175874aeea5SJeff Kirsher 	unsigned int dma_len;
1767668ff9cSBen Hutchings 	unsigned short dma_flags;
17714bf718fSBen Hutchings 	int i = 0;
178874aeea5SJeff Kirsher 
179874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
180874aeea5SJeff Kirsher 
181874aeea5SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
182874aeea5SJeff Kirsher 		return efx_enqueue_skb_tso(tx_queue, skb);
183874aeea5SJeff Kirsher 
184874aeea5SJeff Kirsher 	/* Get size of the initial fragment */
185874aeea5SJeff Kirsher 	len = skb_headlen(skb);
186874aeea5SJeff Kirsher 
187874aeea5SJeff Kirsher 	/* Pad if necessary */
188874aeea5SJeff Kirsher 	if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
189874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(skb->data_len);
190874aeea5SJeff Kirsher 		len = 32 + 1;
191874aeea5SJeff Kirsher 		if (skb_pad(skb, len - skb->len))
192874aeea5SJeff Kirsher 			return NETDEV_TX_OK;
193874aeea5SJeff Kirsher 	}
194874aeea5SJeff Kirsher 
1950e33d870SBen Hutchings 	/* Map for DMA.  Use dma_map_single rather than dma_map_page
196874aeea5SJeff Kirsher 	 * since this is more efficient on machines with sparse
197874aeea5SJeff Kirsher 	 * memory.
198874aeea5SJeff Kirsher 	 */
1997668ff9cSBen Hutchings 	dma_flags = EFX_TX_BUF_MAP_SINGLE;
2000e33d870SBen Hutchings 	dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
201874aeea5SJeff Kirsher 
202874aeea5SJeff Kirsher 	/* Process all fragments */
203874aeea5SJeff Kirsher 	while (1) {
2040e33d870SBen Hutchings 		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
2050e33d870SBen Hutchings 			goto dma_err;
206874aeea5SJeff Kirsher 
207874aeea5SJeff Kirsher 		/* Store fields for marking in the per-fragment final
208874aeea5SJeff Kirsher 		 * descriptor */
209874aeea5SJeff Kirsher 		unmap_len = len;
210874aeea5SJeff Kirsher 		unmap_addr = dma_addr;
211874aeea5SJeff Kirsher 
212874aeea5SJeff Kirsher 		/* Add to TX queue, splitting across DMA boundaries */
213874aeea5SJeff Kirsher 		do {
214874aeea5SJeff Kirsher 			insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
215874aeea5SJeff Kirsher 			buffer = &tx_queue->buffer[insert_ptr];
2167668ff9cSBen Hutchings 			EFX_BUG_ON_PARANOID(buffer->flags);
217874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(buffer->len);
218874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(buffer->unmap_len);
219874aeea5SJeff Kirsher 
220874aeea5SJeff Kirsher 			dma_len = efx_max_tx_len(efx, dma_addr);
221874aeea5SJeff Kirsher 			if (likely(dma_len >= len))
222874aeea5SJeff Kirsher 				dma_len = len;
223874aeea5SJeff Kirsher 
224874aeea5SJeff Kirsher 			/* Fill out per descriptor fields */
225874aeea5SJeff Kirsher 			buffer->len = dma_len;
226874aeea5SJeff Kirsher 			buffer->dma_addr = dma_addr;
2277668ff9cSBen Hutchings 			buffer->flags = EFX_TX_BUF_CONT;
228874aeea5SJeff Kirsher 			len -= dma_len;
229874aeea5SJeff Kirsher 			dma_addr += dma_len;
230874aeea5SJeff Kirsher 			++tx_queue->insert_count;
231874aeea5SJeff Kirsher 		} while (len);
232874aeea5SJeff Kirsher 
233874aeea5SJeff Kirsher 		/* Transfer ownership of the unmapping to the final buffer */
2347668ff9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
235874aeea5SJeff Kirsher 		buffer->unmap_len = unmap_len;
236874aeea5SJeff Kirsher 		unmap_len = 0;
237874aeea5SJeff Kirsher 
238874aeea5SJeff Kirsher 		/* Get address and size of next fragment */
239874aeea5SJeff Kirsher 		if (i >= skb_shinfo(skb)->nr_frags)
240874aeea5SJeff Kirsher 			break;
241874aeea5SJeff Kirsher 		fragment = &skb_shinfo(skb)->frags[i];
2429e903e08SEric Dumazet 		len = skb_frag_size(fragment);
243874aeea5SJeff Kirsher 		i++;
244874aeea5SJeff Kirsher 		/* Map for DMA */
2457668ff9cSBen Hutchings 		dma_flags = 0;
2460e33d870SBen Hutchings 		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
2475d6bcdfeSIan Campbell 					    DMA_TO_DEVICE);
248874aeea5SJeff Kirsher 	}
249874aeea5SJeff Kirsher 
250874aeea5SJeff Kirsher 	/* Transfer ownership of the skb to the final buffer */
251874aeea5SJeff Kirsher 	buffer->skb = skb;
2527668ff9cSBen Hutchings 	buffer->flags = EFX_TX_BUF_SKB | dma_flags;
253874aeea5SJeff Kirsher 
254c3940999STom Herbert 	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
255c3940999STom Herbert 
256874aeea5SJeff Kirsher 	/* Pass off to hardware */
257874aeea5SJeff Kirsher 	efx_nic_push_buffers(tx_queue);
258874aeea5SJeff Kirsher 
25914bf718fSBen Hutchings 	efx_tx_maybe_stop_queue(tx_queue);
26014bf718fSBen Hutchings 
261874aeea5SJeff Kirsher 	return NETDEV_TX_OK;
262874aeea5SJeff Kirsher 
2630e33d870SBen Hutchings  dma_err:
264874aeea5SJeff Kirsher 	netif_err(efx, tx_err, efx->net_dev,
265874aeea5SJeff Kirsher 		  " TX queue %d could not map skb with %d bytes %d "
266874aeea5SJeff Kirsher 		  "fragments for DMA\n", tx_queue->queue, skb->len,
267874aeea5SJeff Kirsher 		  skb_shinfo(skb)->nr_frags + 1);
268874aeea5SJeff Kirsher 
269874aeea5SJeff Kirsher 	/* Mark the packet as transmitted, and free the SKB ourselves */
270874aeea5SJeff Kirsher 	dev_kfree_skb_any(skb);
271874aeea5SJeff Kirsher 
272874aeea5SJeff Kirsher 	/* Work backwards until we hit the original insert pointer value */
273874aeea5SJeff Kirsher 	while (tx_queue->insert_count != tx_queue->write_count) {
274c3940999STom Herbert 		unsigned int pkts_compl = 0, bytes_compl = 0;
275874aeea5SJeff Kirsher 		--tx_queue->insert_count;
276874aeea5SJeff Kirsher 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
277874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[insert_ptr];
278c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
279874aeea5SJeff Kirsher 	}
280874aeea5SJeff Kirsher 
281874aeea5SJeff Kirsher 	/* Free the fragment we were mid-way through pushing */
282874aeea5SJeff Kirsher 	if (unmap_len) {
2837668ff9cSBen Hutchings 		if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
2840e33d870SBen Hutchings 			dma_unmap_single(dma_dev, unmap_addr, unmap_len,
2850e33d870SBen Hutchings 					 DMA_TO_DEVICE);
286874aeea5SJeff Kirsher 		else
2870e33d870SBen Hutchings 			dma_unmap_page(dma_dev, unmap_addr, unmap_len,
2880e33d870SBen Hutchings 				       DMA_TO_DEVICE);
289874aeea5SJeff Kirsher 	}
290874aeea5SJeff Kirsher 
29114bf718fSBen Hutchings 	return NETDEV_TX_OK;
292874aeea5SJeff Kirsher }
293874aeea5SJeff Kirsher 
294874aeea5SJeff Kirsher /* Remove packets from the TX queue
295874aeea5SJeff Kirsher  *
296874aeea5SJeff Kirsher  * This removes packets from the TX queue, up to and including the
297874aeea5SJeff Kirsher  * specified index.
298874aeea5SJeff Kirsher  */
299874aeea5SJeff Kirsher static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
300c3940999STom Herbert 				unsigned int index,
301c3940999STom Herbert 				unsigned int *pkts_compl,
302c3940999STom Herbert 				unsigned int *bytes_compl)
303874aeea5SJeff Kirsher {
304874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
305874aeea5SJeff Kirsher 	unsigned int stop_index, read_ptr;
306874aeea5SJeff Kirsher 
307874aeea5SJeff Kirsher 	stop_index = (index + 1) & tx_queue->ptr_mask;
308874aeea5SJeff Kirsher 	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
309874aeea5SJeff Kirsher 
310874aeea5SJeff Kirsher 	while (read_ptr != stop_index) {
311874aeea5SJeff Kirsher 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
312ba8977bdSBen Hutchings 
313ba8977bdSBen Hutchings 		if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
314ba8977bdSBen Hutchings 		    unlikely(buffer->len == 0)) {
315874aeea5SJeff Kirsher 			netif_err(efx, tx_err, efx->net_dev,
316874aeea5SJeff Kirsher 				  "TX queue %d spurious TX completion id %x\n",
317874aeea5SJeff Kirsher 				  tx_queue->queue, read_ptr);
318874aeea5SJeff Kirsher 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
319874aeea5SJeff Kirsher 			return;
320874aeea5SJeff Kirsher 		}
321874aeea5SJeff Kirsher 
322c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
323874aeea5SJeff Kirsher 
324874aeea5SJeff Kirsher 		++tx_queue->read_count;
325874aeea5SJeff Kirsher 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
326874aeea5SJeff Kirsher 	}
327874aeea5SJeff Kirsher }
328874aeea5SJeff Kirsher 
329874aeea5SJeff Kirsher /* Initiate a packet transmission.  We use one channel per CPU
330874aeea5SJeff Kirsher  * (sharing when we have more CPUs than channels).  On Falcon, the TX
331874aeea5SJeff Kirsher  * completion events will be directed back to the CPU that transmitted
332874aeea5SJeff Kirsher  * the packet, which should be cache-efficient.
333874aeea5SJeff Kirsher  *
334874aeea5SJeff Kirsher  * Context: non-blocking.
335874aeea5SJeff Kirsher  * Note that returning anything other than NETDEV_TX_OK will cause the
336874aeea5SJeff Kirsher  * OS to free the skb.
337874aeea5SJeff Kirsher  */
338874aeea5SJeff Kirsher netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
339874aeea5SJeff Kirsher 				struct net_device *net_dev)
340874aeea5SJeff Kirsher {
341874aeea5SJeff Kirsher 	struct efx_nic *efx = netdev_priv(net_dev);
342874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
343874aeea5SJeff Kirsher 	unsigned index, type;
344874aeea5SJeff Kirsher 
345874aeea5SJeff Kirsher 	EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
346874aeea5SJeff Kirsher 
3477c236c43SStuart Hodgson 	/* PTP "event" packet */
3487c236c43SStuart Hodgson 	if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
3497c236c43SStuart Hodgson 	    unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
3507c236c43SStuart Hodgson 		return efx_ptp_tx(efx, skb);
3517c236c43SStuart Hodgson 	}
3527c236c43SStuart Hodgson 
353874aeea5SJeff Kirsher 	index = skb_get_queue_mapping(skb);
354874aeea5SJeff Kirsher 	type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
355874aeea5SJeff Kirsher 	if (index >= efx->n_tx_channels) {
356874aeea5SJeff Kirsher 		index -= efx->n_tx_channels;
357874aeea5SJeff Kirsher 		type |= EFX_TXQ_TYPE_HIGHPRI;
358874aeea5SJeff Kirsher 	}
359874aeea5SJeff Kirsher 	tx_queue = efx_get_tx_queue(efx, index, type);
360874aeea5SJeff Kirsher 
361874aeea5SJeff Kirsher 	return efx_enqueue_skb(tx_queue, skb);
362874aeea5SJeff Kirsher }
363874aeea5SJeff Kirsher 
364874aeea5SJeff Kirsher void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
365874aeea5SJeff Kirsher {
366874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
367874aeea5SJeff Kirsher 
368874aeea5SJeff Kirsher 	/* Must be inverse of queue lookup in efx_hard_start_xmit() */
369874aeea5SJeff Kirsher 	tx_queue->core_txq =
370874aeea5SJeff Kirsher 		netdev_get_tx_queue(efx->net_dev,
371874aeea5SJeff Kirsher 				    tx_queue->queue / EFX_TXQ_TYPES +
372874aeea5SJeff Kirsher 				    ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
373874aeea5SJeff Kirsher 				     efx->n_tx_channels : 0));
374874aeea5SJeff Kirsher }
375874aeea5SJeff Kirsher 
376874aeea5SJeff Kirsher int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
377874aeea5SJeff Kirsher {
378874aeea5SJeff Kirsher 	struct efx_nic *efx = netdev_priv(net_dev);
379874aeea5SJeff Kirsher 	struct efx_channel *channel;
380874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
381874aeea5SJeff Kirsher 	unsigned tc;
382874aeea5SJeff Kirsher 	int rc;
383874aeea5SJeff Kirsher 
384874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
385874aeea5SJeff Kirsher 		return -EINVAL;
386874aeea5SJeff Kirsher 
387874aeea5SJeff Kirsher 	if (num_tc == net_dev->num_tc)
388874aeea5SJeff Kirsher 		return 0;
389874aeea5SJeff Kirsher 
390874aeea5SJeff Kirsher 	for (tc = 0; tc < num_tc; tc++) {
391874aeea5SJeff Kirsher 		net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
392874aeea5SJeff Kirsher 		net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
393874aeea5SJeff Kirsher 	}
394874aeea5SJeff Kirsher 
395874aeea5SJeff Kirsher 	if (num_tc > net_dev->num_tc) {
396874aeea5SJeff Kirsher 		/* Initialise high-priority queues as necessary */
397874aeea5SJeff Kirsher 		efx_for_each_channel(channel, efx) {
398874aeea5SJeff Kirsher 			efx_for_each_possible_channel_tx_queue(tx_queue,
399874aeea5SJeff Kirsher 							       channel) {
400874aeea5SJeff Kirsher 				if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
401874aeea5SJeff Kirsher 					continue;
402874aeea5SJeff Kirsher 				if (!tx_queue->buffer) {
403874aeea5SJeff Kirsher 					rc = efx_probe_tx_queue(tx_queue);
404874aeea5SJeff Kirsher 					if (rc)
405874aeea5SJeff Kirsher 						return rc;
406874aeea5SJeff Kirsher 				}
407874aeea5SJeff Kirsher 				if (!tx_queue->initialised)
408874aeea5SJeff Kirsher 					efx_init_tx_queue(tx_queue);
409874aeea5SJeff Kirsher 				efx_init_tx_queue_core_txq(tx_queue);
410874aeea5SJeff Kirsher 			}
411874aeea5SJeff Kirsher 		}
412874aeea5SJeff Kirsher 	} else {
413874aeea5SJeff Kirsher 		/* Reduce number of classes before number of queues */
414874aeea5SJeff Kirsher 		net_dev->num_tc = num_tc;
415874aeea5SJeff Kirsher 	}
416874aeea5SJeff Kirsher 
417874aeea5SJeff Kirsher 	rc = netif_set_real_num_tx_queues(net_dev,
418874aeea5SJeff Kirsher 					  max_t(int, num_tc, 1) *
419874aeea5SJeff Kirsher 					  efx->n_tx_channels);
420874aeea5SJeff Kirsher 	if (rc)
421874aeea5SJeff Kirsher 		return rc;
422874aeea5SJeff Kirsher 
423874aeea5SJeff Kirsher 	/* Do not destroy high-priority queues when they become
424874aeea5SJeff Kirsher 	 * unused.  We would have to flush them first, and it is
425874aeea5SJeff Kirsher 	 * fairly difficult to flush a subset of TX queues.  Leave
426874aeea5SJeff Kirsher 	 * it to efx_fini_channels().
427874aeea5SJeff Kirsher 	 */
428874aeea5SJeff Kirsher 
429874aeea5SJeff Kirsher 	net_dev->num_tc = num_tc;
430874aeea5SJeff Kirsher 	return 0;
431874aeea5SJeff Kirsher }
432874aeea5SJeff Kirsher 
433874aeea5SJeff Kirsher void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
434874aeea5SJeff Kirsher {
435874aeea5SJeff Kirsher 	unsigned fill_level;
436874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
43714bf718fSBen Hutchings 	struct efx_tx_queue *txq2;
438c3940999STom Herbert 	unsigned int pkts_compl = 0, bytes_compl = 0;
439874aeea5SJeff Kirsher 
440874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
441874aeea5SJeff Kirsher 
442c3940999STom Herbert 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
443c3940999STom Herbert 	netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
444874aeea5SJeff Kirsher 
44502e12165SBen Hutchings 	if (pkts_compl > 1)
44602e12165SBen Hutchings 		++tx_queue->merge_events;
44702e12165SBen Hutchings 
44814bf718fSBen Hutchings 	/* See if we need to restart the netif queue.  This memory
44914bf718fSBen Hutchings 	 * barrier ensures that we write read_count (inside
45014bf718fSBen Hutchings 	 * efx_dequeue_buffers()) before reading the queue status.
45114bf718fSBen Hutchings 	 */
452874aeea5SJeff Kirsher 	smp_mb();
453874aeea5SJeff Kirsher 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
454874aeea5SJeff Kirsher 	    likely(efx->port_enabled) &&
455874aeea5SJeff Kirsher 	    likely(netif_device_present(efx->net_dev))) {
45614bf718fSBen Hutchings 		txq2 = efx_tx_queue_partner(tx_queue);
45714bf718fSBen Hutchings 		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
45814bf718fSBen Hutchings 				 txq2->insert_count - txq2->read_count);
45914bf718fSBen Hutchings 		if (fill_level <= efx->txq_wake_thresh)
460874aeea5SJeff Kirsher 			netif_tx_wake_queue(tx_queue->core_txq);
461874aeea5SJeff Kirsher 	}
462874aeea5SJeff Kirsher 
463874aeea5SJeff Kirsher 	/* Check whether the hardware queue is now empty */
464874aeea5SJeff Kirsher 	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
465874aeea5SJeff Kirsher 		tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
466874aeea5SJeff Kirsher 		if (tx_queue->read_count == tx_queue->old_write_count) {
467874aeea5SJeff Kirsher 			smp_mb();
468874aeea5SJeff Kirsher 			tx_queue->empty_read_count =
469874aeea5SJeff Kirsher 				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
470874aeea5SJeff Kirsher 		}
471874aeea5SJeff Kirsher 	}
472874aeea5SJeff Kirsher }
473874aeea5SJeff Kirsher 
474f7251a9cSBen Hutchings /* Size of page-based TSO header buffers.  Larger blocks must be
475f7251a9cSBen Hutchings  * allocated from the heap.
476f7251a9cSBen Hutchings  */
477f7251a9cSBen Hutchings #define TSOH_STD_SIZE	128
478f7251a9cSBen Hutchings #define TSOH_PER_PAGE	(PAGE_SIZE / TSOH_STD_SIZE)
479f7251a9cSBen Hutchings 
480f7251a9cSBen Hutchings /* At most half the descriptors in the queue at any time will refer to
481f7251a9cSBen Hutchings  * a TSO header buffer, since they must always be followed by a
482f7251a9cSBen Hutchings  * payload descriptor referring to an skb.
483f7251a9cSBen Hutchings  */
484f7251a9cSBen Hutchings static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
485f7251a9cSBen Hutchings {
486f7251a9cSBen Hutchings 	return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
487f7251a9cSBen Hutchings }
488f7251a9cSBen Hutchings 
489874aeea5SJeff Kirsher int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
490874aeea5SJeff Kirsher {
491874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
492874aeea5SJeff Kirsher 	unsigned int entries;
4937668ff9cSBen Hutchings 	int rc;
494874aeea5SJeff Kirsher 
495874aeea5SJeff Kirsher 	/* Create the smallest power-of-two aligned ring */
496874aeea5SJeff Kirsher 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
497874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
498874aeea5SJeff Kirsher 	tx_queue->ptr_mask = entries - 1;
499874aeea5SJeff Kirsher 
500874aeea5SJeff Kirsher 	netif_dbg(efx, probe, efx->net_dev,
501874aeea5SJeff Kirsher 		  "creating TX queue %d size %#x mask %#x\n",
502874aeea5SJeff Kirsher 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
503874aeea5SJeff Kirsher 
504874aeea5SJeff Kirsher 	/* Allocate software ring */
505c2e4e25aSThomas Meyer 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
506874aeea5SJeff Kirsher 				   GFP_KERNEL);
507874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
508874aeea5SJeff Kirsher 		return -ENOMEM;
509874aeea5SJeff Kirsher 
510f7251a9cSBen Hutchings 	if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
511f7251a9cSBen Hutchings 		tx_queue->tsoh_page =
512f7251a9cSBen Hutchings 			kcalloc(efx_tsoh_page_count(tx_queue),
513f7251a9cSBen Hutchings 				sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
514f7251a9cSBen Hutchings 		if (!tx_queue->tsoh_page) {
515f7251a9cSBen Hutchings 			rc = -ENOMEM;
516f7251a9cSBen Hutchings 			goto fail1;
517f7251a9cSBen Hutchings 		}
518f7251a9cSBen Hutchings 	}
519f7251a9cSBen Hutchings 
520874aeea5SJeff Kirsher 	/* Allocate hardware ring */
521874aeea5SJeff Kirsher 	rc = efx_nic_probe_tx(tx_queue);
522874aeea5SJeff Kirsher 	if (rc)
523f7251a9cSBen Hutchings 		goto fail2;
524874aeea5SJeff Kirsher 
525874aeea5SJeff Kirsher 	return 0;
526874aeea5SJeff Kirsher 
527f7251a9cSBen Hutchings fail2:
528f7251a9cSBen Hutchings 	kfree(tx_queue->tsoh_page);
529f7251a9cSBen Hutchings 	tx_queue->tsoh_page = NULL;
530f7251a9cSBen Hutchings fail1:
531874aeea5SJeff Kirsher 	kfree(tx_queue->buffer);
532874aeea5SJeff Kirsher 	tx_queue->buffer = NULL;
533874aeea5SJeff Kirsher 	return rc;
534874aeea5SJeff Kirsher }
535874aeea5SJeff Kirsher 
536874aeea5SJeff Kirsher void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
537874aeea5SJeff Kirsher {
538874aeea5SJeff Kirsher 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
539874aeea5SJeff Kirsher 		  "initialising TX queue %d\n", tx_queue->queue);
540874aeea5SJeff Kirsher 
541874aeea5SJeff Kirsher 	tx_queue->insert_count = 0;
542874aeea5SJeff Kirsher 	tx_queue->write_count = 0;
543874aeea5SJeff Kirsher 	tx_queue->old_write_count = 0;
544874aeea5SJeff Kirsher 	tx_queue->read_count = 0;
545874aeea5SJeff Kirsher 	tx_queue->old_read_count = 0;
546874aeea5SJeff Kirsher 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
547874aeea5SJeff Kirsher 
548874aeea5SJeff Kirsher 	/* Set up TX descriptor ring */
549874aeea5SJeff Kirsher 	efx_nic_init_tx(tx_queue);
550874aeea5SJeff Kirsher 
551874aeea5SJeff Kirsher 	tx_queue->initialised = true;
552874aeea5SJeff Kirsher }
553874aeea5SJeff Kirsher 
554e42c3d85SBen Hutchings void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
555874aeea5SJeff Kirsher {
556874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
557874aeea5SJeff Kirsher 
558e42c3d85SBen Hutchings 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
559e42c3d85SBen Hutchings 		  "shutting down TX queue %d\n", tx_queue->queue);
560e42c3d85SBen Hutchings 
561874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
562874aeea5SJeff Kirsher 		return;
563874aeea5SJeff Kirsher 
564874aeea5SJeff Kirsher 	/* Free any buffers left in the ring */
565874aeea5SJeff Kirsher 	while (tx_queue->read_count != tx_queue->write_count) {
566c3940999STom Herbert 		unsigned int pkts_compl = 0, bytes_compl = 0;
567874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
568c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
569874aeea5SJeff Kirsher 
570874aeea5SJeff Kirsher 		++tx_queue->read_count;
571874aeea5SJeff Kirsher 	}
572c3940999STom Herbert 	netdev_tx_reset_queue(tx_queue->core_txq);
573874aeea5SJeff Kirsher }
574874aeea5SJeff Kirsher 
575874aeea5SJeff Kirsher void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
576874aeea5SJeff Kirsher {
577f7251a9cSBen Hutchings 	int i;
578f7251a9cSBen Hutchings 
579874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
580874aeea5SJeff Kirsher 		return;
581874aeea5SJeff Kirsher 
582874aeea5SJeff Kirsher 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
583874aeea5SJeff Kirsher 		  "destroying TX queue %d\n", tx_queue->queue);
584874aeea5SJeff Kirsher 	efx_nic_remove_tx(tx_queue);
585874aeea5SJeff Kirsher 
586f7251a9cSBen Hutchings 	if (tx_queue->tsoh_page) {
587f7251a9cSBen Hutchings 		for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
588f7251a9cSBen Hutchings 			efx_nic_free_buffer(tx_queue->efx,
589f7251a9cSBen Hutchings 					    &tx_queue->tsoh_page[i]);
590f7251a9cSBen Hutchings 		kfree(tx_queue->tsoh_page);
591f7251a9cSBen Hutchings 		tx_queue->tsoh_page = NULL;
592f7251a9cSBen Hutchings 	}
593f7251a9cSBen Hutchings 
594874aeea5SJeff Kirsher 	kfree(tx_queue->buffer);
595874aeea5SJeff Kirsher 	tx_queue->buffer = NULL;
596874aeea5SJeff Kirsher }
597874aeea5SJeff Kirsher 
598874aeea5SJeff Kirsher 
599874aeea5SJeff Kirsher /* Efx TCP segmentation acceleration.
600874aeea5SJeff Kirsher  *
601874aeea5SJeff Kirsher  * Why?  Because by doing it here in the driver we can go significantly
602874aeea5SJeff Kirsher  * faster than the GSO.
603874aeea5SJeff Kirsher  *
604874aeea5SJeff Kirsher  * Requires TX checksum offload support.
605874aeea5SJeff Kirsher  */
606874aeea5SJeff Kirsher 
607874aeea5SJeff Kirsher /* Number of bytes inserted at the start of a TSO header buffer,
608874aeea5SJeff Kirsher  * similar to NET_IP_ALIGN.
609874aeea5SJeff Kirsher  */
610874aeea5SJeff Kirsher #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
611874aeea5SJeff Kirsher #define TSOH_OFFSET	0
612874aeea5SJeff Kirsher #else
613874aeea5SJeff Kirsher #define TSOH_OFFSET	NET_IP_ALIGN
614874aeea5SJeff Kirsher #endif
615874aeea5SJeff Kirsher 
616874aeea5SJeff Kirsher #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
617874aeea5SJeff Kirsher 
618874aeea5SJeff Kirsher /**
619874aeea5SJeff Kirsher  * struct tso_state - TSO state for an SKB
620874aeea5SJeff Kirsher  * @out_len: Remaining length in current segment
621874aeea5SJeff Kirsher  * @seqnum: Current sequence number
622874aeea5SJeff Kirsher  * @ipv4_id: Current IPv4 ID, host endian
623874aeea5SJeff Kirsher  * @packet_space: Remaining space in current packet
624874aeea5SJeff Kirsher  * @dma_addr: DMA address of current position
625874aeea5SJeff Kirsher  * @in_len: Remaining length in current SKB fragment
626874aeea5SJeff Kirsher  * @unmap_len: Length of SKB fragment
627874aeea5SJeff Kirsher  * @unmap_addr: DMA address of SKB fragment
6287668ff9cSBen Hutchings  * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
629874aeea5SJeff Kirsher  * @protocol: Network protocol (after any VLAN header)
6309714284fSBen Hutchings  * @ip_off: Offset of IP header
6319714284fSBen Hutchings  * @tcp_off: Offset of TCP header
632874aeea5SJeff Kirsher  * @header_len: Number of bytes of header
63353cb13c6SBen Hutchings  * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
634dfa50be9SBen Hutchings  * @header_dma_addr: Header DMA address, when using option descriptors
635dfa50be9SBen Hutchings  * @header_unmap_len: Header DMA mapped length, or 0 if not using option
636dfa50be9SBen Hutchings  *	descriptors
637874aeea5SJeff Kirsher  *
638874aeea5SJeff Kirsher  * The state used during segmentation.  It is put into this data structure
639874aeea5SJeff Kirsher  * just to make it easy to pass into inline functions.
640874aeea5SJeff Kirsher  */
641874aeea5SJeff Kirsher struct tso_state {
642874aeea5SJeff Kirsher 	/* Output position */
643874aeea5SJeff Kirsher 	unsigned out_len;
644874aeea5SJeff Kirsher 	unsigned seqnum;
645dfa50be9SBen Hutchings 	u16 ipv4_id;
646874aeea5SJeff Kirsher 	unsigned packet_space;
647874aeea5SJeff Kirsher 
648874aeea5SJeff Kirsher 	/* Input position */
649874aeea5SJeff Kirsher 	dma_addr_t dma_addr;
650874aeea5SJeff Kirsher 	unsigned in_len;
651874aeea5SJeff Kirsher 	unsigned unmap_len;
652874aeea5SJeff Kirsher 	dma_addr_t unmap_addr;
6537668ff9cSBen Hutchings 	unsigned short dma_flags;
654874aeea5SJeff Kirsher 
655874aeea5SJeff Kirsher 	__be16 protocol;
6569714284fSBen Hutchings 	unsigned int ip_off;
6579714284fSBen Hutchings 	unsigned int tcp_off;
658874aeea5SJeff Kirsher 	unsigned header_len;
65953cb13c6SBen Hutchings 	unsigned int ip_base_len;
660dfa50be9SBen Hutchings 	dma_addr_t header_dma_addr;
661dfa50be9SBen Hutchings 	unsigned int header_unmap_len;
662874aeea5SJeff Kirsher };
663874aeea5SJeff Kirsher 
664874aeea5SJeff Kirsher 
665874aeea5SJeff Kirsher /*
666874aeea5SJeff Kirsher  * Verify that our various assumptions about sk_buffs and the conditions
667874aeea5SJeff Kirsher  * under which TSO will be attempted hold true.  Return the protocol number.
668874aeea5SJeff Kirsher  */
669874aeea5SJeff Kirsher static __be16 efx_tso_check_protocol(struct sk_buff *skb)
670874aeea5SJeff Kirsher {
671874aeea5SJeff Kirsher 	__be16 protocol = skb->protocol;
672874aeea5SJeff Kirsher 
673874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
674874aeea5SJeff Kirsher 			    protocol);
675874aeea5SJeff Kirsher 	if (protocol == htons(ETH_P_8021Q)) {
676874aeea5SJeff Kirsher 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
677874aeea5SJeff Kirsher 		protocol = veh->h_vlan_encapsulated_proto;
678874aeea5SJeff Kirsher 	}
679874aeea5SJeff Kirsher 
680874aeea5SJeff Kirsher 	if (protocol == htons(ETH_P_IP)) {
681874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
682874aeea5SJeff Kirsher 	} else {
683874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
684874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
685874aeea5SJeff Kirsher 	}
686874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
687874aeea5SJeff Kirsher 			     + (tcp_hdr(skb)->doff << 2u)) >
688874aeea5SJeff Kirsher 			    skb_headlen(skb));
689874aeea5SJeff Kirsher 
690874aeea5SJeff Kirsher 	return protocol;
691874aeea5SJeff Kirsher }
692874aeea5SJeff Kirsher 
693f7251a9cSBen Hutchings static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
694f7251a9cSBen Hutchings 			       struct efx_tx_buffer *buffer, unsigned int len)
695874aeea5SJeff Kirsher {
696f7251a9cSBen Hutchings 	u8 *result;
697874aeea5SJeff Kirsher 
698f7251a9cSBen Hutchings 	EFX_BUG_ON_PARANOID(buffer->len);
699f7251a9cSBen Hutchings 	EFX_BUG_ON_PARANOID(buffer->flags);
700f7251a9cSBen Hutchings 	EFX_BUG_ON_PARANOID(buffer->unmap_len);
701874aeea5SJeff Kirsher 
702f7251a9cSBen Hutchings 	if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
703f7251a9cSBen Hutchings 		unsigned index =
704f7251a9cSBen Hutchings 			(tx_queue->insert_count & tx_queue->ptr_mask) / 2;
705f7251a9cSBen Hutchings 		struct efx_buffer *page_buf =
706f7251a9cSBen Hutchings 			&tx_queue->tsoh_page[index / TSOH_PER_PAGE];
707f7251a9cSBen Hutchings 		unsigned offset =
708f7251a9cSBen Hutchings 			TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
709874aeea5SJeff Kirsher 
710f7251a9cSBen Hutchings 		if (unlikely(!page_buf->addr) &&
7110d19a540SBen Hutchings 		    efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
7120d19a540SBen Hutchings 					 GFP_ATOMIC))
713874aeea5SJeff Kirsher 			return NULL;
714874aeea5SJeff Kirsher 
715f7251a9cSBen Hutchings 		result = (u8 *)page_buf->addr + offset;
716f7251a9cSBen Hutchings 		buffer->dma_addr = page_buf->dma_addr + offset;
717f7251a9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT;
718f7251a9cSBen Hutchings 	} else {
719f7251a9cSBen Hutchings 		tx_queue->tso_long_headers++;
720f7251a9cSBen Hutchings 
721f7251a9cSBen Hutchings 		buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
722f7251a9cSBen Hutchings 		if (unlikely(!buffer->heap_buf))
723874aeea5SJeff Kirsher 			return NULL;
724f7251a9cSBen Hutchings 		result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
725f7251a9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
726874aeea5SJeff Kirsher 	}
727874aeea5SJeff Kirsher 
728f7251a9cSBen Hutchings 	buffer->len = len;
729874aeea5SJeff Kirsher 
730f7251a9cSBen Hutchings 	return result;
731874aeea5SJeff Kirsher }
732874aeea5SJeff Kirsher 
733874aeea5SJeff Kirsher /**
734874aeea5SJeff Kirsher  * efx_tx_queue_insert - push descriptors onto the TX queue
735874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
736874aeea5SJeff Kirsher  * @dma_addr:		DMA address of fragment
737874aeea5SJeff Kirsher  * @len:		Length of fragment
738874aeea5SJeff Kirsher  * @final_buffer:	The final buffer inserted into the queue
739874aeea5SJeff Kirsher  *
74014bf718fSBen Hutchings  * Push descriptors onto the TX queue.
741874aeea5SJeff Kirsher  */
74214bf718fSBen Hutchings static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
743874aeea5SJeff Kirsher 				dma_addr_t dma_addr, unsigned len,
744874aeea5SJeff Kirsher 				struct efx_tx_buffer **final_buffer)
745874aeea5SJeff Kirsher {
746874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
747874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
74814bf718fSBen Hutchings 	unsigned dma_len, insert_ptr;
749874aeea5SJeff Kirsher 
750874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(len <= 0);
751874aeea5SJeff Kirsher 
752874aeea5SJeff Kirsher 	while (1) {
753874aeea5SJeff Kirsher 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
754874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[insert_ptr];
755874aeea5SJeff Kirsher 		++tx_queue->insert_count;
756874aeea5SJeff Kirsher 
757874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(tx_queue->insert_count -
758874aeea5SJeff Kirsher 				    tx_queue->read_count >=
759874aeea5SJeff Kirsher 				    efx->txq_entries);
760874aeea5SJeff Kirsher 
761874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(buffer->len);
762874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(buffer->unmap_len);
7637668ff9cSBen Hutchings 		EFX_BUG_ON_PARANOID(buffer->flags);
764874aeea5SJeff Kirsher 
765874aeea5SJeff Kirsher 		buffer->dma_addr = dma_addr;
766874aeea5SJeff Kirsher 
767874aeea5SJeff Kirsher 		dma_len = efx_max_tx_len(efx, dma_addr);
768874aeea5SJeff Kirsher 
769874aeea5SJeff Kirsher 		/* If there is enough space to send then do so */
770874aeea5SJeff Kirsher 		if (dma_len >= len)
771874aeea5SJeff Kirsher 			break;
772874aeea5SJeff Kirsher 
7737668ff9cSBen Hutchings 		buffer->len = dma_len;
7747668ff9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT;
775874aeea5SJeff Kirsher 		dma_addr += dma_len;
776874aeea5SJeff Kirsher 		len -= dma_len;
777874aeea5SJeff Kirsher 	}
778874aeea5SJeff Kirsher 
779874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(!len);
780874aeea5SJeff Kirsher 	buffer->len = len;
781874aeea5SJeff Kirsher 	*final_buffer = buffer;
782874aeea5SJeff Kirsher }
783874aeea5SJeff Kirsher 
784874aeea5SJeff Kirsher 
785874aeea5SJeff Kirsher /*
786874aeea5SJeff Kirsher  * Put a TSO header into the TX queue.
787874aeea5SJeff Kirsher  *
788874aeea5SJeff Kirsher  * This is special-cased because we know that it is small enough to fit in
789874aeea5SJeff Kirsher  * a single fragment, and we know it doesn't cross a page boundary.  It
790874aeea5SJeff Kirsher  * also allows us to not worry about end-of-packet etc.
791874aeea5SJeff Kirsher  */
792f7251a9cSBen Hutchings static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
793f7251a9cSBen Hutchings 			      struct efx_tx_buffer *buffer, u8 *header)
794874aeea5SJeff Kirsher {
795f7251a9cSBen Hutchings 	if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
796f7251a9cSBen Hutchings 		buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
797f7251a9cSBen Hutchings 						  header, buffer->len,
798f7251a9cSBen Hutchings 						  DMA_TO_DEVICE);
799f7251a9cSBen Hutchings 		if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
800f7251a9cSBen Hutchings 					       buffer->dma_addr))) {
801f7251a9cSBen Hutchings 			kfree(buffer->heap_buf);
802f7251a9cSBen Hutchings 			buffer->len = 0;
803f7251a9cSBen Hutchings 			buffer->flags = 0;
804f7251a9cSBen Hutchings 			return -ENOMEM;
805f7251a9cSBen Hutchings 		}
806f7251a9cSBen Hutchings 		buffer->unmap_len = buffer->len;
807f7251a9cSBen Hutchings 		buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
808f7251a9cSBen Hutchings 	}
809874aeea5SJeff Kirsher 
810874aeea5SJeff Kirsher 	++tx_queue->insert_count;
811f7251a9cSBen Hutchings 	return 0;
812874aeea5SJeff Kirsher }
813874aeea5SJeff Kirsher 
814874aeea5SJeff Kirsher 
815f7251a9cSBen Hutchings /* Remove buffers put into a tx_queue.  None of the buffers must have
816f7251a9cSBen Hutchings  * an skb attached.
817f7251a9cSBen Hutchings  */
818874aeea5SJeff Kirsher static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
819874aeea5SJeff Kirsher {
820874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
821874aeea5SJeff Kirsher 
822874aeea5SJeff Kirsher 	/* Work backwards until we hit the original insert pointer value */
823874aeea5SJeff Kirsher 	while (tx_queue->insert_count != tx_queue->write_count) {
824874aeea5SJeff Kirsher 		--tx_queue->insert_count;
825874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[tx_queue->insert_count &
826874aeea5SJeff Kirsher 					   tx_queue->ptr_mask];
827f7251a9cSBen Hutchings 		efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
828874aeea5SJeff Kirsher 	}
829874aeea5SJeff Kirsher }
830874aeea5SJeff Kirsher 
831874aeea5SJeff Kirsher 
832874aeea5SJeff Kirsher /* Parse the SKB header and initialise state. */
833c78c39e6SBen Hutchings static int tso_start(struct tso_state *st, struct efx_nic *efx,
834c78c39e6SBen Hutchings 		     const struct sk_buff *skb)
835874aeea5SJeff Kirsher {
836dfa50be9SBen Hutchings 	bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
837dfa50be9SBen Hutchings 	struct device *dma_dev = &efx->pci_dev->dev;
838c78c39e6SBen Hutchings 	unsigned int header_len, in_len;
839dfa50be9SBen Hutchings 	dma_addr_t dma_addr;
840c78c39e6SBen Hutchings 
8419714284fSBen Hutchings 	st->ip_off = skb_network_header(skb) - skb->data;
8429714284fSBen Hutchings 	st->tcp_off = skb_transport_header(skb) - skb->data;
843c78c39e6SBen Hutchings 	header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
844c78c39e6SBen Hutchings 	in_len = skb_headlen(skb) - header_len;
845c78c39e6SBen Hutchings 	st->header_len = header_len;
846c78c39e6SBen Hutchings 	st->in_len = in_len;
84753cb13c6SBen Hutchings 	if (st->protocol == htons(ETH_P_IP)) {
8489714284fSBen Hutchings 		st->ip_base_len = st->header_len - st->ip_off;
849874aeea5SJeff Kirsher 		st->ipv4_id = ntohs(ip_hdr(skb)->id);
85053cb13c6SBen Hutchings 	} else {
8519714284fSBen Hutchings 		st->ip_base_len = st->header_len - st->tcp_off;
852874aeea5SJeff Kirsher 		st->ipv4_id = 0;
85353cb13c6SBen Hutchings 	}
854874aeea5SJeff Kirsher 	st->seqnum = ntohl(tcp_hdr(skb)->seq);
855874aeea5SJeff Kirsher 
856874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
857874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
858874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
859874aeea5SJeff Kirsher 
860c78c39e6SBen Hutchings 	st->out_len = skb->len - header_len;
861c78c39e6SBen Hutchings 
862dfa50be9SBen Hutchings 	if (!use_options) {
863dfa50be9SBen Hutchings 		st->header_unmap_len = 0;
864dfa50be9SBen Hutchings 
865c78c39e6SBen Hutchings 		if (likely(in_len == 0)) {
8667668ff9cSBen Hutchings 			st->dma_flags = 0;
867dfa50be9SBen Hutchings 			st->unmap_len = 0;
868c78c39e6SBen Hutchings 			return 0;
869c78c39e6SBen Hutchings 		}
870c78c39e6SBen Hutchings 
871dfa50be9SBen Hutchings 		dma_addr = dma_map_single(dma_dev, skb->data + header_len,
872dfa50be9SBen Hutchings 					  in_len, DMA_TO_DEVICE);
873c78c39e6SBen Hutchings 		st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
874dfa50be9SBen Hutchings 		st->dma_addr = dma_addr;
875dfa50be9SBen Hutchings 		st->unmap_addr = dma_addr;
876c78c39e6SBen Hutchings 		st->unmap_len = in_len;
877dfa50be9SBen Hutchings 	} else {
878dfa50be9SBen Hutchings 		dma_addr = dma_map_single(dma_dev, skb->data,
879dfa50be9SBen Hutchings 					  skb_headlen(skb), DMA_TO_DEVICE);
880dfa50be9SBen Hutchings 		st->header_dma_addr = dma_addr;
881dfa50be9SBen Hutchings 		st->header_unmap_len = skb_headlen(skb);
882dfa50be9SBen Hutchings 		st->dma_flags = 0;
883dfa50be9SBen Hutchings 		st->dma_addr = dma_addr + header_len;
884dfa50be9SBen Hutchings 		st->unmap_len = 0;
885dfa50be9SBen Hutchings 	}
886dfa50be9SBen Hutchings 
887dfa50be9SBen Hutchings 	return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
888874aeea5SJeff Kirsher }
889874aeea5SJeff Kirsher 
890874aeea5SJeff Kirsher static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
891874aeea5SJeff Kirsher 			    skb_frag_t *frag)
892874aeea5SJeff Kirsher {
8934a22c4c9SIan Campbell 	st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
8949e903e08SEric Dumazet 					  skb_frag_size(frag), DMA_TO_DEVICE);
8955d6bcdfeSIan Campbell 	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
8967668ff9cSBen Hutchings 		st->dma_flags = 0;
8979e903e08SEric Dumazet 		st->unmap_len = skb_frag_size(frag);
8989e903e08SEric Dumazet 		st->in_len = skb_frag_size(frag);
899874aeea5SJeff Kirsher 		st->dma_addr = st->unmap_addr;
900874aeea5SJeff Kirsher 		return 0;
901874aeea5SJeff Kirsher 	}
902874aeea5SJeff Kirsher 	return -ENOMEM;
903874aeea5SJeff Kirsher }
904874aeea5SJeff Kirsher 
905874aeea5SJeff Kirsher 
906874aeea5SJeff Kirsher /**
907874aeea5SJeff Kirsher  * tso_fill_packet_with_fragment - form descriptors for the current fragment
908874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
909874aeea5SJeff Kirsher  * @skb:		Socket buffer
910874aeea5SJeff Kirsher  * @st:			TSO state
911874aeea5SJeff Kirsher  *
912874aeea5SJeff Kirsher  * Form descriptors for the current fragment, until we reach the end
91314bf718fSBen Hutchings  * of fragment or end-of-packet.
914874aeea5SJeff Kirsher  */
91514bf718fSBen Hutchings static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
916874aeea5SJeff Kirsher 					  const struct sk_buff *skb,
917874aeea5SJeff Kirsher 					  struct tso_state *st)
918874aeea5SJeff Kirsher {
919874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
92014bf718fSBen Hutchings 	int n;
921874aeea5SJeff Kirsher 
922874aeea5SJeff Kirsher 	if (st->in_len == 0)
92314bf718fSBen Hutchings 		return;
924874aeea5SJeff Kirsher 	if (st->packet_space == 0)
92514bf718fSBen Hutchings 		return;
926874aeea5SJeff Kirsher 
927874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(st->in_len <= 0);
928874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(st->packet_space <= 0);
929874aeea5SJeff Kirsher 
930874aeea5SJeff Kirsher 	n = min(st->in_len, st->packet_space);
931874aeea5SJeff Kirsher 
932874aeea5SJeff Kirsher 	st->packet_space -= n;
933874aeea5SJeff Kirsher 	st->out_len -= n;
934874aeea5SJeff Kirsher 	st->in_len -= n;
935874aeea5SJeff Kirsher 
93614bf718fSBen Hutchings 	efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
93714bf718fSBen Hutchings 
9387668ff9cSBen Hutchings 	if (st->out_len == 0) {
939874aeea5SJeff Kirsher 		/* Transfer ownership of the skb */
940874aeea5SJeff Kirsher 		buffer->skb = skb;
9417668ff9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_SKB;
9427668ff9cSBen Hutchings 	} else if (st->packet_space != 0) {
9437668ff9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT;
9447668ff9cSBen Hutchings 	}
945874aeea5SJeff Kirsher 
946874aeea5SJeff Kirsher 	if (st->in_len == 0) {
9470e33d870SBen Hutchings 		/* Transfer ownership of the DMA mapping */
948874aeea5SJeff Kirsher 		buffer->unmap_len = st->unmap_len;
9497668ff9cSBen Hutchings 		buffer->flags |= st->dma_flags;
950874aeea5SJeff Kirsher 		st->unmap_len = 0;
951874aeea5SJeff Kirsher 	}
952874aeea5SJeff Kirsher 
953874aeea5SJeff Kirsher 	st->dma_addr += n;
954874aeea5SJeff Kirsher }
955874aeea5SJeff Kirsher 
956874aeea5SJeff Kirsher 
957874aeea5SJeff Kirsher /**
958874aeea5SJeff Kirsher  * tso_start_new_packet - generate a new header and prepare for the new packet
959874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
960874aeea5SJeff Kirsher  * @skb:		Socket buffer
961874aeea5SJeff Kirsher  * @st:			TSO state
962874aeea5SJeff Kirsher  *
963874aeea5SJeff Kirsher  * Generate a new header and prepare for the new packet.  Return 0 on
964f7251a9cSBen Hutchings  * success, or -%ENOMEM if failed to alloc header.
965874aeea5SJeff Kirsher  */
966874aeea5SJeff Kirsher static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
967874aeea5SJeff Kirsher 				const struct sk_buff *skb,
968874aeea5SJeff Kirsher 				struct tso_state *st)
969874aeea5SJeff Kirsher {
970f7251a9cSBen Hutchings 	struct efx_tx_buffer *buffer =
971f7251a9cSBen Hutchings 		&tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
972dfa50be9SBen Hutchings 	bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
973dfa50be9SBen Hutchings 	u8 tcp_flags_clear;
974dfa50be9SBen Hutchings 
975dfa50be9SBen Hutchings 	if (!is_last) {
976dfa50be9SBen Hutchings 		st->packet_space = skb_shinfo(skb)->gso_size;
977dfa50be9SBen Hutchings 		tcp_flags_clear = 0x09; /* mask out FIN and PSH */
978dfa50be9SBen Hutchings 	} else {
979dfa50be9SBen Hutchings 		st->packet_space = st->out_len;
980dfa50be9SBen Hutchings 		tcp_flags_clear = 0x00;
981dfa50be9SBen Hutchings 	}
982dfa50be9SBen Hutchings 
983dfa50be9SBen Hutchings 	if (!st->header_unmap_len) {
984dfa50be9SBen Hutchings 		/* Allocate and insert a DMA-mapped header buffer. */
985874aeea5SJeff Kirsher 		struct tcphdr *tsoh_th;
986874aeea5SJeff Kirsher 		unsigned ip_length;
987874aeea5SJeff Kirsher 		u8 *header;
988f7251a9cSBen Hutchings 		int rc;
989874aeea5SJeff Kirsher 
990f7251a9cSBen Hutchings 		header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
991f7251a9cSBen Hutchings 		if (!header)
992f7251a9cSBen Hutchings 			return -ENOMEM;
993874aeea5SJeff Kirsher 
9949714284fSBen Hutchings 		tsoh_th = (struct tcphdr *)(header + st->tcp_off);
995874aeea5SJeff Kirsher 
996874aeea5SJeff Kirsher 		/* Copy and update the headers. */
997874aeea5SJeff Kirsher 		memcpy(header, skb->data, st->header_len);
998874aeea5SJeff Kirsher 
999874aeea5SJeff Kirsher 		tsoh_th->seq = htonl(st->seqnum);
1000dfa50be9SBen Hutchings 		((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
1001dfa50be9SBen Hutchings 
100253cb13c6SBen Hutchings 		ip_length = st->ip_base_len + st->packet_space;
1003874aeea5SJeff Kirsher 
1004874aeea5SJeff Kirsher 		if (st->protocol == htons(ETH_P_IP)) {
1005dfa50be9SBen Hutchings 			struct iphdr *tsoh_iph =
1006dfa50be9SBen Hutchings 				(struct iphdr *)(header + st->ip_off);
1007874aeea5SJeff Kirsher 
1008874aeea5SJeff Kirsher 			tsoh_iph->tot_len = htons(ip_length);
1009874aeea5SJeff Kirsher 			tsoh_iph->id = htons(st->ipv4_id);
1010874aeea5SJeff Kirsher 		} else {
1011874aeea5SJeff Kirsher 			struct ipv6hdr *tsoh_iph =
10129714284fSBen Hutchings 				(struct ipv6hdr *)(header + st->ip_off);
1013874aeea5SJeff Kirsher 
101453cb13c6SBen Hutchings 			tsoh_iph->payload_len = htons(ip_length);
1015874aeea5SJeff Kirsher 		}
1016874aeea5SJeff Kirsher 
1017f7251a9cSBen Hutchings 		rc = efx_tso_put_header(tx_queue, buffer, header);
1018f7251a9cSBen Hutchings 		if (unlikely(rc))
1019f7251a9cSBen Hutchings 			return rc;
1020dfa50be9SBen Hutchings 	} else {
1021dfa50be9SBen Hutchings 		/* Send the original headers with a TSO option descriptor
1022dfa50be9SBen Hutchings 		 * in front
1023dfa50be9SBen Hutchings 		 */
1024dfa50be9SBen Hutchings 		u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
1025dfa50be9SBen Hutchings 
1026dfa50be9SBen Hutchings 		buffer->flags = EFX_TX_BUF_OPTION;
1027dfa50be9SBen Hutchings 		buffer->len = 0;
1028dfa50be9SBen Hutchings 		buffer->unmap_len = 0;
1029dfa50be9SBen Hutchings 		EFX_POPULATE_QWORD_5(buffer->option,
1030dfa50be9SBen Hutchings 				     ESF_DZ_TX_DESC_IS_OPT, 1,
1031dfa50be9SBen Hutchings 				     ESF_DZ_TX_OPTION_TYPE,
1032dfa50be9SBen Hutchings 				     ESE_DZ_TX_OPTION_DESC_TSO,
1033dfa50be9SBen Hutchings 				     ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
1034dfa50be9SBen Hutchings 				     ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
1035dfa50be9SBen Hutchings 				     ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
1036dfa50be9SBen Hutchings 		++tx_queue->insert_count;
1037dfa50be9SBen Hutchings 
1038dfa50be9SBen Hutchings 		/* We mapped the headers in tso_start().  Unmap them
1039dfa50be9SBen Hutchings 		 * when the last segment is completed.
1040dfa50be9SBen Hutchings 		 */
1041dfa50be9SBen Hutchings 		buffer = &tx_queue->buffer[tx_queue->insert_count &
1042dfa50be9SBen Hutchings 					   tx_queue->ptr_mask];
1043dfa50be9SBen Hutchings 		buffer->dma_addr = st->header_dma_addr;
1044dfa50be9SBen Hutchings 		buffer->len = st->header_len;
1045dfa50be9SBen Hutchings 		if (is_last) {
1046dfa50be9SBen Hutchings 			buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
1047dfa50be9SBen Hutchings 			buffer->unmap_len = st->header_unmap_len;
1048dfa50be9SBen Hutchings 			/* Ensure we only unmap them once in case of a
1049dfa50be9SBen Hutchings 			 * later DMA mapping error and rollback
1050dfa50be9SBen Hutchings 			 */
1051dfa50be9SBen Hutchings 			st->header_unmap_len = 0;
1052dfa50be9SBen Hutchings 		} else {
1053dfa50be9SBen Hutchings 			buffer->flags = EFX_TX_BUF_CONT;
1054dfa50be9SBen Hutchings 			buffer->unmap_len = 0;
1055dfa50be9SBen Hutchings 		}
1056dfa50be9SBen Hutchings 		++tx_queue->insert_count;
1057dfa50be9SBen Hutchings 	}
1058dfa50be9SBen Hutchings 
1059dfa50be9SBen Hutchings 	st->seqnum += skb_shinfo(skb)->gso_size;
1060dfa50be9SBen Hutchings 
1061dfa50be9SBen Hutchings 	/* Linux leaves suitable gaps in the IP ID space for us to fill. */
1062dfa50be9SBen Hutchings 	++st->ipv4_id;
1063f7251a9cSBen Hutchings 
1064874aeea5SJeff Kirsher 	++tx_queue->tso_packets;
1065874aeea5SJeff Kirsher 
1066874aeea5SJeff Kirsher 	return 0;
1067874aeea5SJeff Kirsher }
1068874aeea5SJeff Kirsher 
1069874aeea5SJeff Kirsher 
1070874aeea5SJeff Kirsher /**
1071874aeea5SJeff Kirsher  * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1072874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
1073874aeea5SJeff Kirsher  * @skb:		Socket buffer
1074874aeea5SJeff Kirsher  *
1075874aeea5SJeff Kirsher  * Context: You must hold netif_tx_lock() to call this function.
1076874aeea5SJeff Kirsher  *
1077874aeea5SJeff Kirsher  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1078874aeea5SJeff Kirsher  * @skb was not enqueued.  In all cases @skb is consumed.  Return
107914bf718fSBen Hutchings  * %NETDEV_TX_OK.
1080874aeea5SJeff Kirsher  */
1081874aeea5SJeff Kirsher static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1082874aeea5SJeff Kirsher 			       struct sk_buff *skb)
1083874aeea5SJeff Kirsher {
1084874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
108514bf718fSBen Hutchings 	int frag_i, rc;
1086874aeea5SJeff Kirsher 	struct tso_state state;
1087874aeea5SJeff Kirsher 
1088874aeea5SJeff Kirsher 	/* Find the packet protocol and sanity-check it */
1089874aeea5SJeff Kirsher 	state.protocol = efx_tso_check_protocol(skb);
1090874aeea5SJeff Kirsher 
1091874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1092874aeea5SJeff Kirsher 
1093c78c39e6SBen Hutchings 	rc = tso_start(&state, efx, skb);
1094c78c39e6SBen Hutchings 	if (rc)
1095c78c39e6SBen Hutchings 		goto mem_err;
1096874aeea5SJeff Kirsher 
1097c78c39e6SBen Hutchings 	if (likely(state.in_len == 0)) {
1098874aeea5SJeff Kirsher 		/* Grab the first payload fragment. */
1099874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1100874aeea5SJeff Kirsher 		frag_i = 0;
1101874aeea5SJeff Kirsher 		rc = tso_get_fragment(&state, efx,
1102874aeea5SJeff Kirsher 				      skb_shinfo(skb)->frags + frag_i);
1103874aeea5SJeff Kirsher 		if (rc)
1104874aeea5SJeff Kirsher 			goto mem_err;
1105874aeea5SJeff Kirsher 	} else {
1106c78c39e6SBen Hutchings 		/* Payload starts in the header area. */
1107874aeea5SJeff Kirsher 		frag_i = -1;
1108874aeea5SJeff Kirsher 	}
1109874aeea5SJeff Kirsher 
1110874aeea5SJeff Kirsher 	if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1111874aeea5SJeff Kirsher 		goto mem_err;
1112874aeea5SJeff Kirsher 
1113874aeea5SJeff Kirsher 	while (1) {
111414bf718fSBen Hutchings 		tso_fill_packet_with_fragment(tx_queue, skb, &state);
1115874aeea5SJeff Kirsher 
1116874aeea5SJeff Kirsher 		/* Move onto the next fragment? */
1117874aeea5SJeff Kirsher 		if (state.in_len == 0) {
1118874aeea5SJeff Kirsher 			if (++frag_i >= skb_shinfo(skb)->nr_frags)
1119874aeea5SJeff Kirsher 				/* End of payload reached. */
1120874aeea5SJeff Kirsher 				break;
1121874aeea5SJeff Kirsher 			rc = tso_get_fragment(&state, efx,
1122874aeea5SJeff Kirsher 					      skb_shinfo(skb)->frags + frag_i);
1123874aeea5SJeff Kirsher 			if (rc)
1124874aeea5SJeff Kirsher 				goto mem_err;
1125874aeea5SJeff Kirsher 		}
1126874aeea5SJeff Kirsher 
1127874aeea5SJeff Kirsher 		/* Start at new packet? */
1128874aeea5SJeff Kirsher 		if (state.packet_space == 0 &&
1129874aeea5SJeff Kirsher 		    tso_start_new_packet(tx_queue, skb, &state) < 0)
1130874aeea5SJeff Kirsher 			goto mem_err;
1131874aeea5SJeff Kirsher 	}
1132874aeea5SJeff Kirsher 
1133449fa023SEric Dumazet 	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1134449fa023SEric Dumazet 
1135874aeea5SJeff Kirsher 	/* Pass off to hardware */
1136874aeea5SJeff Kirsher 	efx_nic_push_buffers(tx_queue);
1137874aeea5SJeff Kirsher 
113814bf718fSBen Hutchings 	efx_tx_maybe_stop_queue(tx_queue);
113914bf718fSBen Hutchings 
1140874aeea5SJeff Kirsher 	tx_queue->tso_bursts++;
1141874aeea5SJeff Kirsher 	return NETDEV_TX_OK;
1142874aeea5SJeff Kirsher 
1143874aeea5SJeff Kirsher  mem_err:
1144874aeea5SJeff Kirsher 	netif_err(efx, tx_err, efx->net_dev,
11450e33d870SBen Hutchings 		  "Out of memory for TSO headers, or DMA mapping error\n");
1146874aeea5SJeff Kirsher 	dev_kfree_skb_any(skb);
1147874aeea5SJeff Kirsher 
1148874aeea5SJeff Kirsher 	/* Free the DMA mapping we were in the process of writing out */
1149874aeea5SJeff Kirsher 	if (state.unmap_len) {
11507668ff9cSBen Hutchings 		if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
11510e33d870SBen Hutchings 			dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
11520e33d870SBen Hutchings 					 state.unmap_len, DMA_TO_DEVICE);
1153874aeea5SJeff Kirsher 		else
11540e33d870SBen Hutchings 			dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
11550e33d870SBen Hutchings 				       state.unmap_len, DMA_TO_DEVICE);
1156874aeea5SJeff Kirsher 	}
1157874aeea5SJeff Kirsher 
1158dfa50be9SBen Hutchings 	/* Free the header DMA mapping, if using option descriptors */
1159dfa50be9SBen Hutchings 	if (state.header_unmap_len)
1160dfa50be9SBen Hutchings 		dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
1161dfa50be9SBen Hutchings 				 state.header_unmap_len, DMA_TO_DEVICE);
1162dfa50be9SBen Hutchings 
1163874aeea5SJeff Kirsher 	efx_enqueue_unwind(tx_queue);
116414bf718fSBen Hutchings 	return NETDEV_TX_OK;
1165874aeea5SJeff Kirsher }
1166