xref: /openbmc/linux/drivers/net/ethernet/sfc/tx.c (revision 0fe5565b)
1874aeea5SJeff Kirsher /****************************************************************************
2f7a6d2c4SBen Hutchings  * Driver for Solarflare network controllers and boards
3874aeea5SJeff Kirsher  * Copyright 2005-2006 Fen Systems Ltd.
4f7a6d2c4SBen Hutchings  * Copyright 2005-2013 Solarflare Communications Inc.
5874aeea5SJeff Kirsher  *
6874aeea5SJeff Kirsher  * This program is free software; you can redistribute it and/or modify it
7874aeea5SJeff Kirsher  * under the terms of the GNU General Public License version 2 as published
8874aeea5SJeff Kirsher  * by the Free Software Foundation, incorporated herein by reference.
9874aeea5SJeff Kirsher  */
10874aeea5SJeff Kirsher 
11874aeea5SJeff Kirsher #include <linux/pci.h>
12874aeea5SJeff Kirsher #include <linux/tcp.h>
13874aeea5SJeff Kirsher #include <linux/ip.h>
14874aeea5SJeff Kirsher #include <linux/in.h>
15874aeea5SJeff Kirsher #include <linux/ipv6.h>
16874aeea5SJeff Kirsher #include <linux/slab.h>
17874aeea5SJeff Kirsher #include <net/ipv6.h>
18874aeea5SJeff Kirsher #include <linux/if_ether.h>
19874aeea5SJeff Kirsher #include <linux/highmem.h>
20183233beSBen Hutchings #include <linux/cache.h>
21874aeea5SJeff Kirsher #include "net_driver.h"
22874aeea5SJeff Kirsher #include "efx.h"
23183233beSBen Hutchings #include "io.h"
24874aeea5SJeff Kirsher #include "nic.h"
25874aeea5SJeff Kirsher #include "workarounds.h"
26dfa50be9SBen Hutchings #include "ef10_regs.h"
27874aeea5SJeff Kirsher 
28183233beSBen Hutchings #ifdef EFX_USE_PIO
29183233beSBen Hutchings 
30183233beSBen Hutchings #define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
31183233beSBen Hutchings #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
32183233beSBen Hutchings unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
33183233beSBen Hutchings 
34183233beSBen Hutchings #endif /* EFX_USE_PIO */
35183233beSBen Hutchings 
360fe5565bSBen Hutchings static inline unsigned int
370fe5565bSBen Hutchings efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
380fe5565bSBen Hutchings {
390fe5565bSBen Hutchings 	return tx_queue->insert_count & tx_queue->ptr_mask;
400fe5565bSBen Hutchings }
410fe5565bSBen Hutchings 
420fe5565bSBen Hutchings static inline struct efx_tx_buffer *
430fe5565bSBen Hutchings __efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
440fe5565bSBen Hutchings {
450fe5565bSBen Hutchings 	return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
460fe5565bSBen Hutchings }
470fe5565bSBen Hutchings 
480fe5565bSBen Hutchings static inline struct efx_tx_buffer *
490fe5565bSBen Hutchings efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
500fe5565bSBen Hutchings {
510fe5565bSBen Hutchings 	struct efx_tx_buffer *buffer =
520fe5565bSBen Hutchings 		__efx_tx_queue_get_insert_buffer(tx_queue);
530fe5565bSBen Hutchings 
540fe5565bSBen Hutchings 	EFX_BUG_ON_PARANOID(buffer->len);
550fe5565bSBen Hutchings 	EFX_BUG_ON_PARANOID(buffer->flags);
560fe5565bSBen Hutchings 	EFX_BUG_ON_PARANOID(buffer->unmap_len);
570fe5565bSBen Hutchings 
580fe5565bSBen Hutchings 	return buffer;
590fe5565bSBen Hutchings }
600fe5565bSBen Hutchings 
61874aeea5SJeff Kirsher static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
62c3940999STom Herbert 			       struct efx_tx_buffer *buffer,
63c3940999STom Herbert 			       unsigned int *pkts_compl,
64c3940999STom Herbert 			       unsigned int *bytes_compl)
65874aeea5SJeff Kirsher {
66874aeea5SJeff Kirsher 	if (buffer->unmap_len) {
670e33d870SBen Hutchings 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
68874aeea5SJeff Kirsher 		dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
69874aeea5SJeff Kirsher 					 buffer->unmap_len);
707668ff9cSBen Hutchings 		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
710e33d870SBen Hutchings 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
720e33d870SBen Hutchings 					 DMA_TO_DEVICE);
73874aeea5SJeff Kirsher 		else
740e33d870SBen Hutchings 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
750e33d870SBen Hutchings 				       DMA_TO_DEVICE);
76874aeea5SJeff Kirsher 		buffer->unmap_len = 0;
77874aeea5SJeff Kirsher 	}
78874aeea5SJeff Kirsher 
797668ff9cSBen Hutchings 	if (buffer->flags & EFX_TX_BUF_SKB) {
80c3940999STom Herbert 		(*pkts_compl)++;
81c3940999STom Herbert 		(*bytes_compl) += buffer->skb->len;
82874aeea5SJeff Kirsher 		dev_kfree_skb_any((struct sk_buff *) buffer->skb);
83874aeea5SJeff Kirsher 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
84874aeea5SJeff Kirsher 			   "TX queue %d transmission id %x complete\n",
85874aeea5SJeff Kirsher 			   tx_queue->queue, tx_queue->read_count);
86f7251a9cSBen Hutchings 	} else if (buffer->flags & EFX_TX_BUF_HEAP) {
87f7251a9cSBen Hutchings 		kfree(buffer->heap_buf);
88874aeea5SJeff Kirsher 	}
897668ff9cSBen Hutchings 
90f7251a9cSBen Hutchings 	buffer->len = 0;
91f7251a9cSBen Hutchings 	buffer->flags = 0;
92874aeea5SJeff Kirsher }
93874aeea5SJeff Kirsher 
94874aeea5SJeff Kirsher static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
95874aeea5SJeff Kirsher 			       struct sk_buff *skb);
96874aeea5SJeff Kirsher 
97874aeea5SJeff Kirsher static inline unsigned
98874aeea5SJeff Kirsher efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
99874aeea5SJeff Kirsher {
100874aeea5SJeff Kirsher 	/* Depending on the NIC revision, we can use descriptor
101874aeea5SJeff Kirsher 	 * lengths up to 8K or 8K-1.  However, since PCI Express
102874aeea5SJeff Kirsher 	 * devices must split read requests at 4K boundaries, there is
103874aeea5SJeff Kirsher 	 * little benefit from using descriptors that cross those
104874aeea5SJeff Kirsher 	 * boundaries and we keep things simple by not doing so.
105874aeea5SJeff Kirsher 	 */
1065b6262d0SBen Hutchings 	unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
107874aeea5SJeff Kirsher 
108874aeea5SJeff Kirsher 	/* Work around hardware bug for unaligned buffers. */
109874aeea5SJeff Kirsher 	if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
110874aeea5SJeff Kirsher 		len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
111874aeea5SJeff Kirsher 
112874aeea5SJeff Kirsher 	return len;
113874aeea5SJeff Kirsher }
114874aeea5SJeff Kirsher 
1157e6d06f0SBen Hutchings unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
1167e6d06f0SBen Hutchings {
1177e6d06f0SBen Hutchings 	/* Header and payload descriptor for each output segment, plus
1187e6d06f0SBen Hutchings 	 * one for every input fragment boundary within a segment
1197e6d06f0SBen Hutchings 	 */
1207e6d06f0SBen Hutchings 	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
1217e6d06f0SBen Hutchings 
122dfa50be9SBen Hutchings 	/* Possibly one more per segment for the alignment workaround,
123dfa50be9SBen Hutchings 	 * or for option descriptors
124dfa50be9SBen Hutchings 	 */
125dfa50be9SBen Hutchings 	if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
1267e6d06f0SBen Hutchings 		max_descs += EFX_TSO_MAX_SEGS;
1277e6d06f0SBen Hutchings 
1287e6d06f0SBen Hutchings 	/* Possibly more for PCIe page boundaries within input fragments */
1297e6d06f0SBen Hutchings 	if (PAGE_SIZE > EFX_PAGE_SIZE)
1307e6d06f0SBen Hutchings 		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
1317e6d06f0SBen Hutchings 				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
1327e6d06f0SBen Hutchings 
1337e6d06f0SBen Hutchings 	return max_descs;
1347e6d06f0SBen Hutchings }
1357e6d06f0SBen Hutchings 
13614bf718fSBen Hutchings /* Get partner of a TX queue, seen as part of the same net core queue */
13714bf718fSBen Hutchings static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
13814bf718fSBen Hutchings {
13914bf718fSBen Hutchings 	if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
14014bf718fSBen Hutchings 		return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
14114bf718fSBen Hutchings 	else
14214bf718fSBen Hutchings 		return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
14314bf718fSBen Hutchings }
14414bf718fSBen Hutchings 
14514bf718fSBen Hutchings static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
14614bf718fSBen Hutchings {
14714bf718fSBen Hutchings 	/* We need to consider both queues that the net core sees as one */
14814bf718fSBen Hutchings 	struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
14914bf718fSBen Hutchings 	struct efx_nic *efx = txq1->efx;
15014bf718fSBen Hutchings 	unsigned int fill_level;
15114bf718fSBen Hutchings 
15214bf718fSBen Hutchings 	fill_level = max(txq1->insert_count - txq1->old_read_count,
15314bf718fSBen Hutchings 			 txq2->insert_count - txq2->old_read_count);
15414bf718fSBen Hutchings 	if (likely(fill_level < efx->txq_stop_thresh))
15514bf718fSBen Hutchings 		return;
15614bf718fSBen Hutchings 
15714bf718fSBen Hutchings 	/* We used the stale old_read_count above, which gives us a
15814bf718fSBen Hutchings 	 * pessimistic estimate of the fill level (which may even
15914bf718fSBen Hutchings 	 * validly be >= efx->txq_entries).  Now try again using
16014bf718fSBen Hutchings 	 * read_count (more likely to be a cache miss).
16114bf718fSBen Hutchings 	 *
16214bf718fSBen Hutchings 	 * If we read read_count and then conditionally stop the
16314bf718fSBen Hutchings 	 * queue, it is possible for the completion path to race with
16414bf718fSBen Hutchings 	 * us and complete all outstanding descriptors in the middle,
16514bf718fSBen Hutchings 	 * after which there will be no more completions to wake it.
16614bf718fSBen Hutchings 	 * Therefore we stop the queue first, then read read_count
16714bf718fSBen Hutchings 	 * (with a memory barrier to ensure the ordering), then
16814bf718fSBen Hutchings 	 * restart the queue if the fill level turns out to be low
16914bf718fSBen Hutchings 	 * enough.
17014bf718fSBen Hutchings 	 */
17114bf718fSBen Hutchings 	netif_tx_stop_queue(txq1->core_txq);
17214bf718fSBen Hutchings 	smp_mb();
17314bf718fSBen Hutchings 	txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
17414bf718fSBen Hutchings 	txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
17514bf718fSBen Hutchings 
17614bf718fSBen Hutchings 	fill_level = max(txq1->insert_count - txq1->old_read_count,
17714bf718fSBen Hutchings 			 txq2->insert_count - txq2->old_read_count);
17814bf718fSBen Hutchings 	EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
17914bf718fSBen Hutchings 	if (likely(fill_level < efx->txq_stop_thresh)) {
18014bf718fSBen Hutchings 		smp_mb();
18114bf718fSBen Hutchings 		if (likely(!efx->loopback_selftest))
18214bf718fSBen Hutchings 			netif_tx_start_queue(txq1->core_txq);
18314bf718fSBen Hutchings 	}
18414bf718fSBen Hutchings }
18514bf718fSBen Hutchings 
186874aeea5SJeff Kirsher /*
187874aeea5SJeff Kirsher  * Add a socket buffer to a TX queue
188874aeea5SJeff Kirsher  *
189874aeea5SJeff Kirsher  * This maps all fragments of a socket buffer for DMA and adds them to
190874aeea5SJeff Kirsher  * the TX queue.  The queue's insert pointer will be incremented by
191874aeea5SJeff Kirsher  * the number of fragments in the socket buffer.
192874aeea5SJeff Kirsher  *
193874aeea5SJeff Kirsher  * If any DMA mapping fails, any mapped fragments will be unmapped,
194874aeea5SJeff Kirsher  * the queue's insert pointer will be restored to its original value.
195874aeea5SJeff Kirsher  *
196874aeea5SJeff Kirsher  * This function is split out from efx_hard_start_xmit to allow the
197874aeea5SJeff Kirsher  * loopback test to direct packets via specific TX queues.
198874aeea5SJeff Kirsher  *
19914bf718fSBen Hutchings  * Returns NETDEV_TX_OK.
200874aeea5SJeff Kirsher  * You must hold netif_tx_lock() to call this function.
201874aeea5SJeff Kirsher  */
202874aeea5SJeff Kirsher netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
203874aeea5SJeff Kirsher {
204874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
2050e33d870SBen Hutchings 	struct device *dma_dev = &efx->pci_dev->dev;
206874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
207874aeea5SJeff Kirsher 	skb_frag_t *fragment;
2080fe5565bSBen Hutchings 	unsigned int len, unmap_len = 0;
209874aeea5SJeff Kirsher 	dma_addr_t dma_addr, unmap_addr = 0;
210874aeea5SJeff Kirsher 	unsigned int dma_len;
2117668ff9cSBen Hutchings 	unsigned short dma_flags;
21214bf718fSBen Hutchings 	int i = 0;
213874aeea5SJeff Kirsher 
214874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
215874aeea5SJeff Kirsher 
216874aeea5SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
217874aeea5SJeff Kirsher 		return efx_enqueue_skb_tso(tx_queue, skb);
218874aeea5SJeff Kirsher 
219874aeea5SJeff Kirsher 	/* Get size of the initial fragment */
220874aeea5SJeff Kirsher 	len = skb_headlen(skb);
221874aeea5SJeff Kirsher 
222874aeea5SJeff Kirsher 	/* Pad if necessary */
223874aeea5SJeff Kirsher 	if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
224874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(skb->data_len);
225874aeea5SJeff Kirsher 		len = 32 + 1;
226874aeea5SJeff Kirsher 		if (skb_pad(skb, len - skb->len))
227874aeea5SJeff Kirsher 			return NETDEV_TX_OK;
228874aeea5SJeff Kirsher 	}
229874aeea5SJeff Kirsher 
2300e33d870SBen Hutchings 	/* Map for DMA.  Use dma_map_single rather than dma_map_page
231874aeea5SJeff Kirsher 	 * since this is more efficient on machines with sparse
232874aeea5SJeff Kirsher 	 * memory.
233874aeea5SJeff Kirsher 	 */
2347668ff9cSBen Hutchings 	dma_flags = EFX_TX_BUF_MAP_SINGLE;
2350e33d870SBen Hutchings 	dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
236874aeea5SJeff Kirsher 
237874aeea5SJeff Kirsher 	/* Process all fragments */
238874aeea5SJeff Kirsher 	while (1) {
2390e33d870SBen Hutchings 		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
2400e33d870SBen Hutchings 			goto dma_err;
241874aeea5SJeff Kirsher 
242874aeea5SJeff Kirsher 		/* Store fields for marking in the per-fragment final
243874aeea5SJeff Kirsher 		 * descriptor */
244874aeea5SJeff Kirsher 		unmap_len = len;
245874aeea5SJeff Kirsher 		unmap_addr = dma_addr;
246874aeea5SJeff Kirsher 
247874aeea5SJeff Kirsher 		/* Add to TX queue, splitting across DMA boundaries */
248874aeea5SJeff Kirsher 		do {
2490fe5565bSBen Hutchings 			buffer = efx_tx_queue_get_insert_buffer(tx_queue);
250874aeea5SJeff Kirsher 
251874aeea5SJeff Kirsher 			dma_len = efx_max_tx_len(efx, dma_addr);
252874aeea5SJeff Kirsher 			if (likely(dma_len >= len))
253874aeea5SJeff Kirsher 				dma_len = len;
254874aeea5SJeff Kirsher 
255874aeea5SJeff Kirsher 			/* Fill out per descriptor fields */
256874aeea5SJeff Kirsher 			buffer->len = dma_len;
257874aeea5SJeff Kirsher 			buffer->dma_addr = dma_addr;
2587668ff9cSBen Hutchings 			buffer->flags = EFX_TX_BUF_CONT;
259874aeea5SJeff Kirsher 			len -= dma_len;
260874aeea5SJeff Kirsher 			dma_addr += dma_len;
261874aeea5SJeff Kirsher 			++tx_queue->insert_count;
262874aeea5SJeff Kirsher 		} while (len);
263874aeea5SJeff Kirsher 
264874aeea5SJeff Kirsher 		/* Transfer ownership of the unmapping to the final buffer */
2657668ff9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
266874aeea5SJeff Kirsher 		buffer->unmap_len = unmap_len;
267874aeea5SJeff Kirsher 		unmap_len = 0;
268874aeea5SJeff Kirsher 
269874aeea5SJeff Kirsher 		/* Get address and size of next fragment */
270874aeea5SJeff Kirsher 		if (i >= skb_shinfo(skb)->nr_frags)
271874aeea5SJeff Kirsher 			break;
272874aeea5SJeff Kirsher 		fragment = &skb_shinfo(skb)->frags[i];
2739e903e08SEric Dumazet 		len = skb_frag_size(fragment);
274874aeea5SJeff Kirsher 		i++;
275874aeea5SJeff Kirsher 		/* Map for DMA */
2767668ff9cSBen Hutchings 		dma_flags = 0;
2770e33d870SBen Hutchings 		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
2785d6bcdfeSIan Campbell 					    DMA_TO_DEVICE);
279874aeea5SJeff Kirsher 	}
280874aeea5SJeff Kirsher 
281874aeea5SJeff Kirsher 	/* Transfer ownership of the skb to the final buffer */
282874aeea5SJeff Kirsher 	buffer->skb = skb;
2837668ff9cSBen Hutchings 	buffer->flags = EFX_TX_BUF_SKB | dma_flags;
284874aeea5SJeff Kirsher 
285c3940999STom Herbert 	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
286c3940999STom Herbert 
287874aeea5SJeff Kirsher 	/* Pass off to hardware */
288874aeea5SJeff Kirsher 	efx_nic_push_buffers(tx_queue);
289874aeea5SJeff Kirsher 
29014bf718fSBen Hutchings 	efx_tx_maybe_stop_queue(tx_queue);
29114bf718fSBen Hutchings 
292874aeea5SJeff Kirsher 	return NETDEV_TX_OK;
293874aeea5SJeff Kirsher 
2940e33d870SBen Hutchings  dma_err:
295874aeea5SJeff Kirsher 	netif_err(efx, tx_err, efx->net_dev,
296874aeea5SJeff Kirsher 		  " TX queue %d could not map skb with %d bytes %d "
297874aeea5SJeff Kirsher 		  "fragments for DMA\n", tx_queue->queue, skb->len,
298874aeea5SJeff Kirsher 		  skb_shinfo(skb)->nr_frags + 1);
299874aeea5SJeff Kirsher 
300874aeea5SJeff Kirsher 	/* Mark the packet as transmitted, and free the SKB ourselves */
301874aeea5SJeff Kirsher 	dev_kfree_skb_any(skb);
302874aeea5SJeff Kirsher 
303874aeea5SJeff Kirsher 	/* Work backwards until we hit the original insert pointer value */
304874aeea5SJeff Kirsher 	while (tx_queue->insert_count != tx_queue->write_count) {
305c3940999STom Herbert 		unsigned int pkts_compl = 0, bytes_compl = 0;
306874aeea5SJeff Kirsher 		--tx_queue->insert_count;
3070fe5565bSBen Hutchings 		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
308c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
309874aeea5SJeff Kirsher 	}
310874aeea5SJeff Kirsher 
311874aeea5SJeff Kirsher 	/* Free the fragment we were mid-way through pushing */
312874aeea5SJeff Kirsher 	if (unmap_len) {
3137668ff9cSBen Hutchings 		if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
3140e33d870SBen Hutchings 			dma_unmap_single(dma_dev, unmap_addr, unmap_len,
3150e33d870SBen Hutchings 					 DMA_TO_DEVICE);
316874aeea5SJeff Kirsher 		else
3170e33d870SBen Hutchings 			dma_unmap_page(dma_dev, unmap_addr, unmap_len,
3180e33d870SBen Hutchings 				       DMA_TO_DEVICE);
319874aeea5SJeff Kirsher 	}
320874aeea5SJeff Kirsher 
32114bf718fSBen Hutchings 	return NETDEV_TX_OK;
322874aeea5SJeff Kirsher }
323874aeea5SJeff Kirsher 
324874aeea5SJeff Kirsher /* Remove packets from the TX queue
325874aeea5SJeff Kirsher  *
326874aeea5SJeff Kirsher  * This removes packets from the TX queue, up to and including the
327874aeea5SJeff Kirsher  * specified index.
328874aeea5SJeff Kirsher  */
329874aeea5SJeff Kirsher static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
330c3940999STom Herbert 				unsigned int index,
331c3940999STom Herbert 				unsigned int *pkts_compl,
332c3940999STom Herbert 				unsigned int *bytes_compl)
333874aeea5SJeff Kirsher {
334874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
335874aeea5SJeff Kirsher 	unsigned int stop_index, read_ptr;
336874aeea5SJeff Kirsher 
337874aeea5SJeff Kirsher 	stop_index = (index + 1) & tx_queue->ptr_mask;
338874aeea5SJeff Kirsher 	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
339874aeea5SJeff Kirsher 
340874aeea5SJeff Kirsher 	while (read_ptr != stop_index) {
341874aeea5SJeff Kirsher 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
342ba8977bdSBen Hutchings 
343ba8977bdSBen Hutchings 		if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
344ba8977bdSBen Hutchings 		    unlikely(buffer->len == 0)) {
345874aeea5SJeff Kirsher 			netif_err(efx, tx_err, efx->net_dev,
346874aeea5SJeff Kirsher 				  "TX queue %d spurious TX completion id %x\n",
347874aeea5SJeff Kirsher 				  tx_queue->queue, read_ptr);
348874aeea5SJeff Kirsher 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
349874aeea5SJeff Kirsher 			return;
350874aeea5SJeff Kirsher 		}
351874aeea5SJeff Kirsher 
352c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
353874aeea5SJeff Kirsher 
354874aeea5SJeff Kirsher 		++tx_queue->read_count;
355874aeea5SJeff Kirsher 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
356874aeea5SJeff Kirsher 	}
357874aeea5SJeff Kirsher }
358874aeea5SJeff Kirsher 
359874aeea5SJeff Kirsher /* Initiate a packet transmission.  We use one channel per CPU
360874aeea5SJeff Kirsher  * (sharing when we have more CPUs than channels).  On Falcon, the TX
361874aeea5SJeff Kirsher  * completion events will be directed back to the CPU that transmitted
362874aeea5SJeff Kirsher  * the packet, which should be cache-efficient.
363874aeea5SJeff Kirsher  *
364874aeea5SJeff Kirsher  * Context: non-blocking.
365874aeea5SJeff Kirsher  * Note that returning anything other than NETDEV_TX_OK will cause the
366874aeea5SJeff Kirsher  * OS to free the skb.
367874aeea5SJeff Kirsher  */
368874aeea5SJeff Kirsher netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
369874aeea5SJeff Kirsher 				struct net_device *net_dev)
370874aeea5SJeff Kirsher {
371874aeea5SJeff Kirsher 	struct efx_nic *efx = netdev_priv(net_dev);
372874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
373874aeea5SJeff Kirsher 	unsigned index, type;
374874aeea5SJeff Kirsher 
375874aeea5SJeff Kirsher 	EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
376874aeea5SJeff Kirsher 
3777c236c43SStuart Hodgson 	/* PTP "event" packet */
3787c236c43SStuart Hodgson 	if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
3797c236c43SStuart Hodgson 	    unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
3807c236c43SStuart Hodgson 		return efx_ptp_tx(efx, skb);
3817c236c43SStuart Hodgson 	}
3827c236c43SStuart Hodgson 
383874aeea5SJeff Kirsher 	index = skb_get_queue_mapping(skb);
384874aeea5SJeff Kirsher 	type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
385874aeea5SJeff Kirsher 	if (index >= efx->n_tx_channels) {
386874aeea5SJeff Kirsher 		index -= efx->n_tx_channels;
387874aeea5SJeff Kirsher 		type |= EFX_TXQ_TYPE_HIGHPRI;
388874aeea5SJeff Kirsher 	}
389874aeea5SJeff Kirsher 	tx_queue = efx_get_tx_queue(efx, index, type);
390874aeea5SJeff Kirsher 
391874aeea5SJeff Kirsher 	return efx_enqueue_skb(tx_queue, skb);
392874aeea5SJeff Kirsher }
393874aeea5SJeff Kirsher 
394874aeea5SJeff Kirsher void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
395874aeea5SJeff Kirsher {
396874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
397874aeea5SJeff Kirsher 
398874aeea5SJeff Kirsher 	/* Must be inverse of queue lookup in efx_hard_start_xmit() */
399874aeea5SJeff Kirsher 	tx_queue->core_txq =
400874aeea5SJeff Kirsher 		netdev_get_tx_queue(efx->net_dev,
401874aeea5SJeff Kirsher 				    tx_queue->queue / EFX_TXQ_TYPES +
402874aeea5SJeff Kirsher 				    ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
403874aeea5SJeff Kirsher 				     efx->n_tx_channels : 0));
404874aeea5SJeff Kirsher }
405874aeea5SJeff Kirsher 
406874aeea5SJeff Kirsher int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
407874aeea5SJeff Kirsher {
408874aeea5SJeff Kirsher 	struct efx_nic *efx = netdev_priv(net_dev);
409874aeea5SJeff Kirsher 	struct efx_channel *channel;
410874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
411874aeea5SJeff Kirsher 	unsigned tc;
412874aeea5SJeff Kirsher 	int rc;
413874aeea5SJeff Kirsher 
414874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
415874aeea5SJeff Kirsher 		return -EINVAL;
416874aeea5SJeff Kirsher 
417874aeea5SJeff Kirsher 	if (num_tc == net_dev->num_tc)
418874aeea5SJeff Kirsher 		return 0;
419874aeea5SJeff Kirsher 
420874aeea5SJeff Kirsher 	for (tc = 0; tc < num_tc; tc++) {
421874aeea5SJeff Kirsher 		net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
422874aeea5SJeff Kirsher 		net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
423874aeea5SJeff Kirsher 	}
424874aeea5SJeff Kirsher 
425874aeea5SJeff Kirsher 	if (num_tc > net_dev->num_tc) {
426874aeea5SJeff Kirsher 		/* Initialise high-priority queues as necessary */
427874aeea5SJeff Kirsher 		efx_for_each_channel(channel, efx) {
428874aeea5SJeff Kirsher 			efx_for_each_possible_channel_tx_queue(tx_queue,
429874aeea5SJeff Kirsher 							       channel) {
430874aeea5SJeff Kirsher 				if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
431874aeea5SJeff Kirsher 					continue;
432874aeea5SJeff Kirsher 				if (!tx_queue->buffer) {
433874aeea5SJeff Kirsher 					rc = efx_probe_tx_queue(tx_queue);
434874aeea5SJeff Kirsher 					if (rc)
435874aeea5SJeff Kirsher 						return rc;
436874aeea5SJeff Kirsher 				}
437874aeea5SJeff Kirsher 				if (!tx_queue->initialised)
438874aeea5SJeff Kirsher 					efx_init_tx_queue(tx_queue);
439874aeea5SJeff Kirsher 				efx_init_tx_queue_core_txq(tx_queue);
440874aeea5SJeff Kirsher 			}
441874aeea5SJeff Kirsher 		}
442874aeea5SJeff Kirsher 	} else {
443874aeea5SJeff Kirsher 		/* Reduce number of classes before number of queues */
444874aeea5SJeff Kirsher 		net_dev->num_tc = num_tc;
445874aeea5SJeff Kirsher 	}
446874aeea5SJeff Kirsher 
447874aeea5SJeff Kirsher 	rc = netif_set_real_num_tx_queues(net_dev,
448874aeea5SJeff Kirsher 					  max_t(int, num_tc, 1) *
449874aeea5SJeff Kirsher 					  efx->n_tx_channels);
450874aeea5SJeff Kirsher 	if (rc)
451874aeea5SJeff Kirsher 		return rc;
452874aeea5SJeff Kirsher 
453874aeea5SJeff Kirsher 	/* Do not destroy high-priority queues when they become
454874aeea5SJeff Kirsher 	 * unused.  We would have to flush them first, and it is
455874aeea5SJeff Kirsher 	 * fairly difficult to flush a subset of TX queues.  Leave
456874aeea5SJeff Kirsher 	 * it to efx_fini_channels().
457874aeea5SJeff Kirsher 	 */
458874aeea5SJeff Kirsher 
459874aeea5SJeff Kirsher 	net_dev->num_tc = num_tc;
460874aeea5SJeff Kirsher 	return 0;
461874aeea5SJeff Kirsher }
462874aeea5SJeff Kirsher 
463874aeea5SJeff Kirsher void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
464874aeea5SJeff Kirsher {
465874aeea5SJeff Kirsher 	unsigned fill_level;
466874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
46714bf718fSBen Hutchings 	struct efx_tx_queue *txq2;
468c3940999STom Herbert 	unsigned int pkts_compl = 0, bytes_compl = 0;
469874aeea5SJeff Kirsher 
470874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
471874aeea5SJeff Kirsher 
472c3940999STom Herbert 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
473c3940999STom Herbert 	netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
474874aeea5SJeff Kirsher 
47502e12165SBen Hutchings 	if (pkts_compl > 1)
47602e12165SBen Hutchings 		++tx_queue->merge_events;
47702e12165SBen Hutchings 
47814bf718fSBen Hutchings 	/* See if we need to restart the netif queue.  This memory
47914bf718fSBen Hutchings 	 * barrier ensures that we write read_count (inside
48014bf718fSBen Hutchings 	 * efx_dequeue_buffers()) before reading the queue status.
48114bf718fSBen Hutchings 	 */
482874aeea5SJeff Kirsher 	smp_mb();
483874aeea5SJeff Kirsher 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
484874aeea5SJeff Kirsher 	    likely(efx->port_enabled) &&
485874aeea5SJeff Kirsher 	    likely(netif_device_present(efx->net_dev))) {
48614bf718fSBen Hutchings 		txq2 = efx_tx_queue_partner(tx_queue);
48714bf718fSBen Hutchings 		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
48814bf718fSBen Hutchings 				 txq2->insert_count - txq2->read_count);
48914bf718fSBen Hutchings 		if (fill_level <= efx->txq_wake_thresh)
490874aeea5SJeff Kirsher 			netif_tx_wake_queue(tx_queue->core_txq);
491874aeea5SJeff Kirsher 	}
492874aeea5SJeff Kirsher 
493874aeea5SJeff Kirsher 	/* Check whether the hardware queue is now empty */
494874aeea5SJeff Kirsher 	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
495874aeea5SJeff Kirsher 		tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
496874aeea5SJeff Kirsher 		if (tx_queue->read_count == tx_queue->old_write_count) {
497874aeea5SJeff Kirsher 			smp_mb();
498874aeea5SJeff Kirsher 			tx_queue->empty_read_count =
499874aeea5SJeff Kirsher 				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
500874aeea5SJeff Kirsher 		}
501874aeea5SJeff Kirsher 	}
502874aeea5SJeff Kirsher }
503874aeea5SJeff Kirsher 
504f7251a9cSBen Hutchings /* Size of page-based TSO header buffers.  Larger blocks must be
505f7251a9cSBen Hutchings  * allocated from the heap.
506f7251a9cSBen Hutchings  */
507f7251a9cSBen Hutchings #define TSOH_STD_SIZE	128
508f7251a9cSBen Hutchings #define TSOH_PER_PAGE	(PAGE_SIZE / TSOH_STD_SIZE)
509f7251a9cSBen Hutchings 
510f7251a9cSBen Hutchings /* At most half the descriptors in the queue at any time will refer to
511f7251a9cSBen Hutchings  * a TSO header buffer, since they must always be followed by a
512f7251a9cSBen Hutchings  * payload descriptor referring to an skb.
513f7251a9cSBen Hutchings  */
514f7251a9cSBen Hutchings static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
515f7251a9cSBen Hutchings {
516f7251a9cSBen Hutchings 	return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
517f7251a9cSBen Hutchings }
518f7251a9cSBen Hutchings 
519874aeea5SJeff Kirsher int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
520874aeea5SJeff Kirsher {
521874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
522874aeea5SJeff Kirsher 	unsigned int entries;
5237668ff9cSBen Hutchings 	int rc;
524874aeea5SJeff Kirsher 
525874aeea5SJeff Kirsher 	/* Create the smallest power-of-two aligned ring */
526874aeea5SJeff Kirsher 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
527874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
528874aeea5SJeff Kirsher 	tx_queue->ptr_mask = entries - 1;
529874aeea5SJeff Kirsher 
530874aeea5SJeff Kirsher 	netif_dbg(efx, probe, efx->net_dev,
531874aeea5SJeff Kirsher 		  "creating TX queue %d size %#x mask %#x\n",
532874aeea5SJeff Kirsher 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
533874aeea5SJeff Kirsher 
534874aeea5SJeff Kirsher 	/* Allocate software ring */
535c2e4e25aSThomas Meyer 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
536874aeea5SJeff Kirsher 				   GFP_KERNEL);
537874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
538874aeea5SJeff Kirsher 		return -ENOMEM;
539874aeea5SJeff Kirsher 
540f7251a9cSBen Hutchings 	if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
541f7251a9cSBen Hutchings 		tx_queue->tsoh_page =
542f7251a9cSBen Hutchings 			kcalloc(efx_tsoh_page_count(tx_queue),
543f7251a9cSBen Hutchings 				sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
544f7251a9cSBen Hutchings 		if (!tx_queue->tsoh_page) {
545f7251a9cSBen Hutchings 			rc = -ENOMEM;
546f7251a9cSBen Hutchings 			goto fail1;
547f7251a9cSBen Hutchings 		}
548f7251a9cSBen Hutchings 	}
549f7251a9cSBen Hutchings 
550874aeea5SJeff Kirsher 	/* Allocate hardware ring */
551874aeea5SJeff Kirsher 	rc = efx_nic_probe_tx(tx_queue);
552874aeea5SJeff Kirsher 	if (rc)
553f7251a9cSBen Hutchings 		goto fail2;
554874aeea5SJeff Kirsher 
555874aeea5SJeff Kirsher 	return 0;
556874aeea5SJeff Kirsher 
557f7251a9cSBen Hutchings fail2:
558f7251a9cSBen Hutchings 	kfree(tx_queue->tsoh_page);
559f7251a9cSBen Hutchings 	tx_queue->tsoh_page = NULL;
560f7251a9cSBen Hutchings fail1:
561874aeea5SJeff Kirsher 	kfree(tx_queue->buffer);
562874aeea5SJeff Kirsher 	tx_queue->buffer = NULL;
563874aeea5SJeff Kirsher 	return rc;
564874aeea5SJeff Kirsher }
565874aeea5SJeff Kirsher 
566874aeea5SJeff Kirsher void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
567874aeea5SJeff Kirsher {
568874aeea5SJeff Kirsher 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
569874aeea5SJeff Kirsher 		  "initialising TX queue %d\n", tx_queue->queue);
570874aeea5SJeff Kirsher 
571874aeea5SJeff Kirsher 	tx_queue->insert_count = 0;
572874aeea5SJeff Kirsher 	tx_queue->write_count = 0;
573874aeea5SJeff Kirsher 	tx_queue->old_write_count = 0;
574874aeea5SJeff Kirsher 	tx_queue->read_count = 0;
575874aeea5SJeff Kirsher 	tx_queue->old_read_count = 0;
576874aeea5SJeff Kirsher 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
577874aeea5SJeff Kirsher 
578874aeea5SJeff Kirsher 	/* Set up TX descriptor ring */
579874aeea5SJeff Kirsher 	efx_nic_init_tx(tx_queue);
580874aeea5SJeff Kirsher 
581874aeea5SJeff Kirsher 	tx_queue->initialised = true;
582874aeea5SJeff Kirsher }
583874aeea5SJeff Kirsher 
584e42c3d85SBen Hutchings void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
585874aeea5SJeff Kirsher {
586874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
587874aeea5SJeff Kirsher 
588e42c3d85SBen Hutchings 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
589e42c3d85SBen Hutchings 		  "shutting down TX queue %d\n", tx_queue->queue);
590e42c3d85SBen Hutchings 
591874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
592874aeea5SJeff Kirsher 		return;
593874aeea5SJeff Kirsher 
594874aeea5SJeff Kirsher 	/* Free any buffers left in the ring */
595874aeea5SJeff Kirsher 	while (tx_queue->read_count != tx_queue->write_count) {
596c3940999STom Herbert 		unsigned int pkts_compl = 0, bytes_compl = 0;
597874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
598c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
599874aeea5SJeff Kirsher 
600874aeea5SJeff Kirsher 		++tx_queue->read_count;
601874aeea5SJeff Kirsher 	}
602c3940999STom Herbert 	netdev_tx_reset_queue(tx_queue->core_txq);
603874aeea5SJeff Kirsher }
604874aeea5SJeff Kirsher 
605874aeea5SJeff Kirsher void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
606874aeea5SJeff Kirsher {
607f7251a9cSBen Hutchings 	int i;
608f7251a9cSBen Hutchings 
609874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
610874aeea5SJeff Kirsher 		return;
611874aeea5SJeff Kirsher 
612874aeea5SJeff Kirsher 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
613874aeea5SJeff Kirsher 		  "destroying TX queue %d\n", tx_queue->queue);
614874aeea5SJeff Kirsher 	efx_nic_remove_tx(tx_queue);
615874aeea5SJeff Kirsher 
616f7251a9cSBen Hutchings 	if (tx_queue->tsoh_page) {
617f7251a9cSBen Hutchings 		for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
618f7251a9cSBen Hutchings 			efx_nic_free_buffer(tx_queue->efx,
619f7251a9cSBen Hutchings 					    &tx_queue->tsoh_page[i]);
620f7251a9cSBen Hutchings 		kfree(tx_queue->tsoh_page);
621f7251a9cSBen Hutchings 		tx_queue->tsoh_page = NULL;
622f7251a9cSBen Hutchings 	}
623f7251a9cSBen Hutchings 
624874aeea5SJeff Kirsher 	kfree(tx_queue->buffer);
625874aeea5SJeff Kirsher 	tx_queue->buffer = NULL;
626874aeea5SJeff Kirsher }
627874aeea5SJeff Kirsher 
628874aeea5SJeff Kirsher 
629874aeea5SJeff Kirsher /* Efx TCP segmentation acceleration.
630874aeea5SJeff Kirsher  *
631874aeea5SJeff Kirsher  * Why?  Because by doing it here in the driver we can go significantly
632874aeea5SJeff Kirsher  * faster than the GSO.
633874aeea5SJeff Kirsher  *
634874aeea5SJeff Kirsher  * Requires TX checksum offload support.
635874aeea5SJeff Kirsher  */
636874aeea5SJeff Kirsher 
637874aeea5SJeff Kirsher /* Number of bytes inserted at the start of a TSO header buffer,
638874aeea5SJeff Kirsher  * similar to NET_IP_ALIGN.
639874aeea5SJeff Kirsher  */
640874aeea5SJeff Kirsher #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
641874aeea5SJeff Kirsher #define TSOH_OFFSET	0
642874aeea5SJeff Kirsher #else
643874aeea5SJeff Kirsher #define TSOH_OFFSET	NET_IP_ALIGN
644874aeea5SJeff Kirsher #endif
645874aeea5SJeff Kirsher 
646874aeea5SJeff Kirsher #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
647874aeea5SJeff Kirsher 
648874aeea5SJeff Kirsher /**
649874aeea5SJeff Kirsher  * struct tso_state - TSO state for an SKB
650874aeea5SJeff Kirsher  * @out_len: Remaining length in current segment
651874aeea5SJeff Kirsher  * @seqnum: Current sequence number
652874aeea5SJeff Kirsher  * @ipv4_id: Current IPv4 ID, host endian
653874aeea5SJeff Kirsher  * @packet_space: Remaining space in current packet
654874aeea5SJeff Kirsher  * @dma_addr: DMA address of current position
655874aeea5SJeff Kirsher  * @in_len: Remaining length in current SKB fragment
656874aeea5SJeff Kirsher  * @unmap_len: Length of SKB fragment
657874aeea5SJeff Kirsher  * @unmap_addr: DMA address of SKB fragment
6587668ff9cSBen Hutchings  * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
659874aeea5SJeff Kirsher  * @protocol: Network protocol (after any VLAN header)
6609714284fSBen Hutchings  * @ip_off: Offset of IP header
6619714284fSBen Hutchings  * @tcp_off: Offset of TCP header
662874aeea5SJeff Kirsher  * @header_len: Number of bytes of header
66353cb13c6SBen Hutchings  * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
664dfa50be9SBen Hutchings  * @header_dma_addr: Header DMA address, when using option descriptors
665dfa50be9SBen Hutchings  * @header_unmap_len: Header DMA mapped length, or 0 if not using option
666dfa50be9SBen Hutchings  *	descriptors
667874aeea5SJeff Kirsher  *
668874aeea5SJeff Kirsher  * The state used during segmentation.  It is put into this data structure
669874aeea5SJeff Kirsher  * just to make it easy to pass into inline functions.
670874aeea5SJeff Kirsher  */
671874aeea5SJeff Kirsher struct tso_state {
672874aeea5SJeff Kirsher 	/* Output position */
673874aeea5SJeff Kirsher 	unsigned out_len;
674874aeea5SJeff Kirsher 	unsigned seqnum;
675dfa50be9SBen Hutchings 	u16 ipv4_id;
676874aeea5SJeff Kirsher 	unsigned packet_space;
677874aeea5SJeff Kirsher 
678874aeea5SJeff Kirsher 	/* Input position */
679874aeea5SJeff Kirsher 	dma_addr_t dma_addr;
680874aeea5SJeff Kirsher 	unsigned in_len;
681874aeea5SJeff Kirsher 	unsigned unmap_len;
682874aeea5SJeff Kirsher 	dma_addr_t unmap_addr;
6837668ff9cSBen Hutchings 	unsigned short dma_flags;
684874aeea5SJeff Kirsher 
685874aeea5SJeff Kirsher 	__be16 protocol;
6869714284fSBen Hutchings 	unsigned int ip_off;
6879714284fSBen Hutchings 	unsigned int tcp_off;
688874aeea5SJeff Kirsher 	unsigned header_len;
68953cb13c6SBen Hutchings 	unsigned int ip_base_len;
690dfa50be9SBen Hutchings 	dma_addr_t header_dma_addr;
691dfa50be9SBen Hutchings 	unsigned int header_unmap_len;
692874aeea5SJeff Kirsher };
693874aeea5SJeff Kirsher 
694874aeea5SJeff Kirsher 
695874aeea5SJeff Kirsher /*
696874aeea5SJeff Kirsher  * Verify that our various assumptions about sk_buffs and the conditions
697874aeea5SJeff Kirsher  * under which TSO will be attempted hold true.  Return the protocol number.
698874aeea5SJeff Kirsher  */
699874aeea5SJeff Kirsher static __be16 efx_tso_check_protocol(struct sk_buff *skb)
700874aeea5SJeff Kirsher {
701874aeea5SJeff Kirsher 	__be16 protocol = skb->protocol;
702874aeea5SJeff Kirsher 
703874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
704874aeea5SJeff Kirsher 			    protocol);
705874aeea5SJeff Kirsher 	if (protocol == htons(ETH_P_8021Q)) {
706874aeea5SJeff Kirsher 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
707874aeea5SJeff Kirsher 		protocol = veh->h_vlan_encapsulated_proto;
708874aeea5SJeff Kirsher 	}
709874aeea5SJeff Kirsher 
710874aeea5SJeff Kirsher 	if (protocol == htons(ETH_P_IP)) {
711874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
712874aeea5SJeff Kirsher 	} else {
713874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
714874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
715874aeea5SJeff Kirsher 	}
716874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
717874aeea5SJeff Kirsher 			     + (tcp_hdr(skb)->doff << 2u)) >
718874aeea5SJeff Kirsher 			    skb_headlen(skb));
719874aeea5SJeff Kirsher 
720874aeea5SJeff Kirsher 	return protocol;
721874aeea5SJeff Kirsher }
722874aeea5SJeff Kirsher 
723f7251a9cSBen Hutchings static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
724f7251a9cSBen Hutchings 			       struct efx_tx_buffer *buffer, unsigned int len)
725874aeea5SJeff Kirsher {
726f7251a9cSBen Hutchings 	u8 *result;
727874aeea5SJeff Kirsher 
728f7251a9cSBen Hutchings 	EFX_BUG_ON_PARANOID(buffer->len);
729f7251a9cSBen Hutchings 	EFX_BUG_ON_PARANOID(buffer->flags);
730f7251a9cSBen Hutchings 	EFX_BUG_ON_PARANOID(buffer->unmap_len);
731874aeea5SJeff Kirsher 
732f7251a9cSBen Hutchings 	if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
733f7251a9cSBen Hutchings 		unsigned index =
734f7251a9cSBen Hutchings 			(tx_queue->insert_count & tx_queue->ptr_mask) / 2;
735f7251a9cSBen Hutchings 		struct efx_buffer *page_buf =
736f7251a9cSBen Hutchings 			&tx_queue->tsoh_page[index / TSOH_PER_PAGE];
737f7251a9cSBen Hutchings 		unsigned offset =
738f7251a9cSBen Hutchings 			TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
739874aeea5SJeff Kirsher 
740f7251a9cSBen Hutchings 		if (unlikely(!page_buf->addr) &&
7410d19a540SBen Hutchings 		    efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
7420d19a540SBen Hutchings 					 GFP_ATOMIC))
743874aeea5SJeff Kirsher 			return NULL;
744874aeea5SJeff Kirsher 
745f7251a9cSBen Hutchings 		result = (u8 *)page_buf->addr + offset;
746f7251a9cSBen Hutchings 		buffer->dma_addr = page_buf->dma_addr + offset;
747f7251a9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT;
748f7251a9cSBen Hutchings 	} else {
749f7251a9cSBen Hutchings 		tx_queue->tso_long_headers++;
750f7251a9cSBen Hutchings 
751f7251a9cSBen Hutchings 		buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
752f7251a9cSBen Hutchings 		if (unlikely(!buffer->heap_buf))
753874aeea5SJeff Kirsher 			return NULL;
754f7251a9cSBen Hutchings 		result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
755f7251a9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
756874aeea5SJeff Kirsher 	}
757874aeea5SJeff Kirsher 
758f7251a9cSBen Hutchings 	buffer->len = len;
759874aeea5SJeff Kirsher 
760f7251a9cSBen Hutchings 	return result;
761874aeea5SJeff Kirsher }
762874aeea5SJeff Kirsher 
763874aeea5SJeff Kirsher /**
764874aeea5SJeff Kirsher  * efx_tx_queue_insert - push descriptors onto the TX queue
765874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
766874aeea5SJeff Kirsher  * @dma_addr:		DMA address of fragment
767874aeea5SJeff Kirsher  * @len:		Length of fragment
768874aeea5SJeff Kirsher  * @final_buffer:	The final buffer inserted into the queue
769874aeea5SJeff Kirsher  *
77014bf718fSBen Hutchings  * Push descriptors onto the TX queue.
771874aeea5SJeff Kirsher  */
77214bf718fSBen Hutchings static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
773874aeea5SJeff Kirsher 				dma_addr_t dma_addr, unsigned len,
774874aeea5SJeff Kirsher 				struct efx_tx_buffer **final_buffer)
775874aeea5SJeff Kirsher {
776874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
777874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
7780fe5565bSBen Hutchings 	unsigned dma_len;
779874aeea5SJeff Kirsher 
780874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(len <= 0);
781874aeea5SJeff Kirsher 
782874aeea5SJeff Kirsher 	while (1) {
7830fe5565bSBen Hutchings 		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
784874aeea5SJeff Kirsher 		++tx_queue->insert_count;
785874aeea5SJeff Kirsher 
786874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(tx_queue->insert_count -
787874aeea5SJeff Kirsher 				    tx_queue->read_count >=
788874aeea5SJeff Kirsher 				    efx->txq_entries);
789874aeea5SJeff Kirsher 
790874aeea5SJeff Kirsher 		buffer->dma_addr = dma_addr;
791874aeea5SJeff Kirsher 
792874aeea5SJeff Kirsher 		dma_len = efx_max_tx_len(efx, dma_addr);
793874aeea5SJeff Kirsher 
794874aeea5SJeff Kirsher 		/* If there is enough space to send then do so */
795874aeea5SJeff Kirsher 		if (dma_len >= len)
796874aeea5SJeff Kirsher 			break;
797874aeea5SJeff Kirsher 
7987668ff9cSBen Hutchings 		buffer->len = dma_len;
7997668ff9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT;
800874aeea5SJeff Kirsher 		dma_addr += dma_len;
801874aeea5SJeff Kirsher 		len -= dma_len;
802874aeea5SJeff Kirsher 	}
803874aeea5SJeff Kirsher 
804874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(!len);
805874aeea5SJeff Kirsher 	buffer->len = len;
806874aeea5SJeff Kirsher 	*final_buffer = buffer;
807874aeea5SJeff Kirsher }
808874aeea5SJeff Kirsher 
809874aeea5SJeff Kirsher 
810874aeea5SJeff Kirsher /*
811874aeea5SJeff Kirsher  * Put a TSO header into the TX queue.
812874aeea5SJeff Kirsher  *
813874aeea5SJeff Kirsher  * This is special-cased because we know that it is small enough to fit in
814874aeea5SJeff Kirsher  * a single fragment, and we know it doesn't cross a page boundary.  It
815874aeea5SJeff Kirsher  * also allows us to not worry about end-of-packet etc.
816874aeea5SJeff Kirsher  */
817f7251a9cSBen Hutchings static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
818f7251a9cSBen Hutchings 			      struct efx_tx_buffer *buffer, u8 *header)
819874aeea5SJeff Kirsher {
820f7251a9cSBen Hutchings 	if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
821f7251a9cSBen Hutchings 		buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
822f7251a9cSBen Hutchings 						  header, buffer->len,
823f7251a9cSBen Hutchings 						  DMA_TO_DEVICE);
824f7251a9cSBen Hutchings 		if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
825f7251a9cSBen Hutchings 					       buffer->dma_addr))) {
826f7251a9cSBen Hutchings 			kfree(buffer->heap_buf);
827f7251a9cSBen Hutchings 			buffer->len = 0;
828f7251a9cSBen Hutchings 			buffer->flags = 0;
829f7251a9cSBen Hutchings 			return -ENOMEM;
830f7251a9cSBen Hutchings 		}
831f7251a9cSBen Hutchings 		buffer->unmap_len = buffer->len;
832f7251a9cSBen Hutchings 		buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
833f7251a9cSBen Hutchings 	}
834874aeea5SJeff Kirsher 
835874aeea5SJeff Kirsher 	++tx_queue->insert_count;
836f7251a9cSBen Hutchings 	return 0;
837874aeea5SJeff Kirsher }
838874aeea5SJeff Kirsher 
839874aeea5SJeff Kirsher 
840f7251a9cSBen Hutchings /* Remove buffers put into a tx_queue.  None of the buffers must have
841f7251a9cSBen Hutchings  * an skb attached.
842f7251a9cSBen Hutchings  */
843874aeea5SJeff Kirsher static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
844874aeea5SJeff Kirsher {
845874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
846874aeea5SJeff Kirsher 
847874aeea5SJeff Kirsher 	/* Work backwards until we hit the original insert pointer value */
848874aeea5SJeff Kirsher 	while (tx_queue->insert_count != tx_queue->write_count) {
849874aeea5SJeff Kirsher 		--tx_queue->insert_count;
8500fe5565bSBen Hutchings 		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
851f7251a9cSBen Hutchings 		efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
852874aeea5SJeff Kirsher 	}
853874aeea5SJeff Kirsher }
854874aeea5SJeff Kirsher 
855874aeea5SJeff Kirsher 
856874aeea5SJeff Kirsher /* Parse the SKB header and initialise state. */
857c78c39e6SBen Hutchings static int tso_start(struct tso_state *st, struct efx_nic *efx,
858c78c39e6SBen Hutchings 		     const struct sk_buff *skb)
859874aeea5SJeff Kirsher {
860dfa50be9SBen Hutchings 	bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
861dfa50be9SBen Hutchings 	struct device *dma_dev = &efx->pci_dev->dev;
862c78c39e6SBen Hutchings 	unsigned int header_len, in_len;
863dfa50be9SBen Hutchings 	dma_addr_t dma_addr;
864c78c39e6SBen Hutchings 
8659714284fSBen Hutchings 	st->ip_off = skb_network_header(skb) - skb->data;
8669714284fSBen Hutchings 	st->tcp_off = skb_transport_header(skb) - skb->data;
867c78c39e6SBen Hutchings 	header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
868c78c39e6SBen Hutchings 	in_len = skb_headlen(skb) - header_len;
869c78c39e6SBen Hutchings 	st->header_len = header_len;
870c78c39e6SBen Hutchings 	st->in_len = in_len;
87153cb13c6SBen Hutchings 	if (st->protocol == htons(ETH_P_IP)) {
8729714284fSBen Hutchings 		st->ip_base_len = st->header_len - st->ip_off;
873874aeea5SJeff Kirsher 		st->ipv4_id = ntohs(ip_hdr(skb)->id);
87453cb13c6SBen Hutchings 	} else {
8759714284fSBen Hutchings 		st->ip_base_len = st->header_len - st->tcp_off;
876874aeea5SJeff Kirsher 		st->ipv4_id = 0;
87753cb13c6SBen Hutchings 	}
878874aeea5SJeff Kirsher 	st->seqnum = ntohl(tcp_hdr(skb)->seq);
879874aeea5SJeff Kirsher 
880874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
881874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
882874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
883874aeea5SJeff Kirsher 
884c78c39e6SBen Hutchings 	st->out_len = skb->len - header_len;
885c78c39e6SBen Hutchings 
886dfa50be9SBen Hutchings 	if (!use_options) {
887dfa50be9SBen Hutchings 		st->header_unmap_len = 0;
888dfa50be9SBen Hutchings 
889c78c39e6SBen Hutchings 		if (likely(in_len == 0)) {
8907668ff9cSBen Hutchings 			st->dma_flags = 0;
891dfa50be9SBen Hutchings 			st->unmap_len = 0;
892c78c39e6SBen Hutchings 			return 0;
893c78c39e6SBen Hutchings 		}
894c78c39e6SBen Hutchings 
895dfa50be9SBen Hutchings 		dma_addr = dma_map_single(dma_dev, skb->data + header_len,
896dfa50be9SBen Hutchings 					  in_len, DMA_TO_DEVICE);
897c78c39e6SBen Hutchings 		st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
898dfa50be9SBen Hutchings 		st->dma_addr = dma_addr;
899dfa50be9SBen Hutchings 		st->unmap_addr = dma_addr;
900c78c39e6SBen Hutchings 		st->unmap_len = in_len;
901dfa50be9SBen Hutchings 	} else {
902dfa50be9SBen Hutchings 		dma_addr = dma_map_single(dma_dev, skb->data,
903dfa50be9SBen Hutchings 					  skb_headlen(skb), DMA_TO_DEVICE);
904dfa50be9SBen Hutchings 		st->header_dma_addr = dma_addr;
905dfa50be9SBen Hutchings 		st->header_unmap_len = skb_headlen(skb);
906dfa50be9SBen Hutchings 		st->dma_flags = 0;
907dfa50be9SBen Hutchings 		st->dma_addr = dma_addr + header_len;
908dfa50be9SBen Hutchings 		st->unmap_len = 0;
909dfa50be9SBen Hutchings 	}
910dfa50be9SBen Hutchings 
911dfa50be9SBen Hutchings 	return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
912874aeea5SJeff Kirsher }
913874aeea5SJeff Kirsher 
914874aeea5SJeff Kirsher static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
915874aeea5SJeff Kirsher 			    skb_frag_t *frag)
916874aeea5SJeff Kirsher {
9174a22c4c9SIan Campbell 	st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
9189e903e08SEric Dumazet 					  skb_frag_size(frag), DMA_TO_DEVICE);
9195d6bcdfeSIan Campbell 	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
9207668ff9cSBen Hutchings 		st->dma_flags = 0;
9219e903e08SEric Dumazet 		st->unmap_len = skb_frag_size(frag);
9229e903e08SEric Dumazet 		st->in_len = skb_frag_size(frag);
923874aeea5SJeff Kirsher 		st->dma_addr = st->unmap_addr;
924874aeea5SJeff Kirsher 		return 0;
925874aeea5SJeff Kirsher 	}
926874aeea5SJeff Kirsher 	return -ENOMEM;
927874aeea5SJeff Kirsher }
928874aeea5SJeff Kirsher 
929874aeea5SJeff Kirsher 
930874aeea5SJeff Kirsher /**
931874aeea5SJeff Kirsher  * tso_fill_packet_with_fragment - form descriptors for the current fragment
932874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
933874aeea5SJeff Kirsher  * @skb:		Socket buffer
934874aeea5SJeff Kirsher  * @st:			TSO state
935874aeea5SJeff Kirsher  *
936874aeea5SJeff Kirsher  * Form descriptors for the current fragment, until we reach the end
93714bf718fSBen Hutchings  * of fragment or end-of-packet.
938874aeea5SJeff Kirsher  */
93914bf718fSBen Hutchings static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
940874aeea5SJeff Kirsher 					  const struct sk_buff *skb,
941874aeea5SJeff Kirsher 					  struct tso_state *st)
942874aeea5SJeff Kirsher {
943874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
94414bf718fSBen Hutchings 	int n;
945874aeea5SJeff Kirsher 
946874aeea5SJeff Kirsher 	if (st->in_len == 0)
94714bf718fSBen Hutchings 		return;
948874aeea5SJeff Kirsher 	if (st->packet_space == 0)
94914bf718fSBen Hutchings 		return;
950874aeea5SJeff Kirsher 
951874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(st->in_len <= 0);
952874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(st->packet_space <= 0);
953874aeea5SJeff Kirsher 
954874aeea5SJeff Kirsher 	n = min(st->in_len, st->packet_space);
955874aeea5SJeff Kirsher 
956874aeea5SJeff Kirsher 	st->packet_space -= n;
957874aeea5SJeff Kirsher 	st->out_len -= n;
958874aeea5SJeff Kirsher 	st->in_len -= n;
959874aeea5SJeff Kirsher 
96014bf718fSBen Hutchings 	efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
96114bf718fSBen Hutchings 
9627668ff9cSBen Hutchings 	if (st->out_len == 0) {
963874aeea5SJeff Kirsher 		/* Transfer ownership of the skb */
964874aeea5SJeff Kirsher 		buffer->skb = skb;
9657668ff9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_SKB;
9667668ff9cSBen Hutchings 	} else if (st->packet_space != 0) {
9677668ff9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT;
9687668ff9cSBen Hutchings 	}
969874aeea5SJeff Kirsher 
970874aeea5SJeff Kirsher 	if (st->in_len == 0) {
9710e33d870SBen Hutchings 		/* Transfer ownership of the DMA mapping */
972874aeea5SJeff Kirsher 		buffer->unmap_len = st->unmap_len;
9737668ff9cSBen Hutchings 		buffer->flags |= st->dma_flags;
974874aeea5SJeff Kirsher 		st->unmap_len = 0;
975874aeea5SJeff Kirsher 	}
976874aeea5SJeff Kirsher 
977874aeea5SJeff Kirsher 	st->dma_addr += n;
978874aeea5SJeff Kirsher }
979874aeea5SJeff Kirsher 
980874aeea5SJeff Kirsher 
981874aeea5SJeff Kirsher /**
982874aeea5SJeff Kirsher  * tso_start_new_packet - generate a new header and prepare for the new packet
983874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
984874aeea5SJeff Kirsher  * @skb:		Socket buffer
985874aeea5SJeff Kirsher  * @st:			TSO state
986874aeea5SJeff Kirsher  *
987874aeea5SJeff Kirsher  * Generate a new header and prepare for the new packet.  Return 0 on
988f7251a9cSBen Hutchings  * success, or -%ENOMEM if failed to alloc header.
989874aeea5SJeff Kirsher  */
990874aeea5SJeff Kirsher static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
991874aeea5SJeff Kirsher 				const struct sk_buff *skb,
992874aeea5SJeff Kirsher 				struct tso_state *st)
993874aeea5SJeff Kirsher {
994f7251a9cSBen Hutchings 	struct efx_tx_buffer *buffer =
9950fe5565bSBen Hutchings 		efx_tx_queue_get_insert_buffer(tx_queue);
996dfa50be9SBen Hutchings 	bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
997dfa50be9SBen Hutchings 	u8 tcp_flags_clear;
998dfa50be9SBen Hutchings 
999dfa50be9SBen Hutchings 	if (!is_last) {
1000dfa50be9SBen Hutchings 		st->packet_space = skb_shinfo(skb)->gso_size;
1001dfa50be9SBen Hutchings 		tcp_flags_clear = 0x09; /* mask out FIN and PSH */
1002dfa50be9SBen Hutchings 	} else {
1003dfa50be9SBen Hutchings 		st->packet_space = st->out_len;
1004dfa50be9SBen Hutchings 		tcp_flags_clear = 0x00;
1005dfa50be9SBen Hutchings 	}
1006dfa50be9SBen Hutchings 
1007dfa50be9SBen Hutchings 	if (!st->header_unmap_len) {
1008dfa50be9SBen Hutchings 		/* Allocate and insert a DMA-mapped header buffer. */
1009874aeea5SJeff Kirsher 		struct tcphdr *tsoh_th;
1010874aeea5SJeff Kirsher 		unsigned ip_length;
1011874aeea5SJeff Kirsher 		u8 *header;
1012f7251a9cSBen Hutchings 		int rc;
1013874aeea5SJeff Kirsher 
1014f7251a9cSBen Hutchings 		header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
1015f7251a9cSBen Hutchings 		if (!header)
1016f7251a9cSBen Hutchings 			return -ENOMEM;
1017874aeea5SJeff Kirsher 
10189714284fSBen Hutchings 		tsoh_th = (struct tcphdr *)(header + st->tcp_off);
1019874aeea5SJeff Kirsher 
1020874aeea5SJeff Kirsher 		/* Copy and update the headers. */
1021874aeea5SJeff Kirsher 		memcpy(header, skb->data, st->header_len);
1022874aeea5SJeff Kirsher 
1023874aeea5SJeff Kirsher 		tsoh_th->seq = htonl(st->seqnum);
1024dfa50be9SBen Hutchings 		((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
1025dfa50be9SBen Hutchings 
102653cb13c6SBen Hutchings 		ip_length = st->ip_base_len + st->packet_space;
1027874aeea5SJeff Kirsher 
1028874aeea5SJeff Kirsher 		if (st->protocol == htons(ETH_P_IP)) {
1029dfa50be9SBen Hutchings 			struct iphdr *tsoh_iph =
1030dfa50be9SBen Hutchings 				(struct iphdr *)(header + st->ip_off);
1031874aeea5SJeff Kirsher 
1032874aeea5SJeff Kirsher 			tsoh_iph->tot_len = htons(ip_length);
1033874aeea5SJeff Kirsher 			tsoh_iph->id = htons(st->ipv4_id);
1034874aeea5SJeff Kirsher 		} else {
1035874aeea5SJeff Kirsher 			struct ipv6hdr *tsoh_iph =
10369714284fSBen Hutchings 				(struct ipv6hdr *)(header + st->ip_off);
1037874aeea5SJeff Kirsher 
103853cb13c6SBen Hutchings 			tsoh_iph->payload_len = htons(ip_length);
1039874aeea5SJeff Kirsher 		}
1040874aeea5SJeff Kirsher 
1041f7251a9cSBen Hutchings 		rc = efx_tso_put_header(tx_queue, buffer, header);
1042f7251a9cSBen Hutchings 		if (unlikely(rc))
1043f7251a9cSBen Hutchings 			return rc;
1044dfa50be9SBen Hutchings 	} else {
1045dfa50be9SBen Hutchings 		/* Send the original headers with a TSO option descriptor
1046dfa50be9SBen Hutchings 		 * in front
1047dfa50be9SBen Hutchings 		 */
1048dfa50be9SBen Hutchings 		u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
1049dfa50be9SBen Hutchings 
1050dfa50be9SBen Hutchings 		buffer->flags = EFX_TX_BUF_OPTION;
1051dfa50be9SBen Hutchings 		buffer->len = 0;
1052dfa50be9SBen Hutchings 		buffer->unmap_len = 0;
1053dfa50be9SBen Hutchings 		EFX_POPULATE_QWORD_5(buffer->option,
1054dfa50be9SBen Hutchings 				     ESF_DZ_TX_DESC_IS_OPT, 1,
1055dfa50be9SBen Hutchings 				     ESF_DZ_TX_OPTION_TYPE,
1056dfa50be9SBen Hutchings 				     ESE_DZ_TX_OPTION_DESC_TSO,
1057dfa50be9SBen Hutchings 				     ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
1058dfa50be9SBen Hutchings 				     ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
1059dfa50be9SBen Hutchings 				     ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
1060dfa50be9SBen Hutchings 		++tx_queue->insert_count;
1061dfa50be9SBen Hutchings 
1062dfa50be9SBen Hutchings 		/* We mapped the headers in tso_start().  Unmap them
1063dfa50be9SBen Hutchings 		 * when the last segment is completed.
1064dfa50be9SBen Hutchings 		 */
10650fe5565bSBen Hutchings 		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
1066dfa50be9SBen Hutchings 		buffer->dma_addr = st->header_dma_addr;
1067dfa50be9SBen Hutchings 		buffer->len = st->header_len;
1068dfa50be9SBen Hutchings 		if (is_last) {
1069dfa50be9SBen Hutchings 			buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
1070dfa50be9SBen Hutchings 			buffer->unmap_len = st->header_unmap_len;
1071dfa50be9SBen Hutchings 			/* Ensure we only unmap them once in case of a
1072dfa50be9SBen Hutchings 			 * later DMA mapping error and rollback
1073dfa50be9SBen Hutchings 			 */
1074dfa50be9SBen Hutchings 			st->header_unmap_len = 0;
1075dfa50be9SBen Hutchings 		} else {
1076dfa50be9SBen Hutchings 			buffer->flags = EFX_TX_BUF_CONT;
1077dfa50be9SBen Hutchings 			buffer->unmap_len = 0;
1078dfa50be9SBen Hutchings 		}
1079dfa50be9SBen Hutchings 		++tx_queue->insert_count;
1080dfa50be9SBen Hutchings 	}
1081dfa50be9SBen Hutchings 
1082dfa50be9SBen Hutchings 	st->seqnum += skb_shinfo(skb)->gso_size;
1083dfa50be9SBen Hutchings 
1084dfa50be9SBen Hutchings 	/* Linux leaves suitable gaps in the IP ID space for us to fill. */
1085dfa50be9SBen Hutchings 	++st->ipv4_id;
1086f7251a9cSBen Hutchings 
1087874aeea5SJeff Kirsher 	++tx_queue->tso_packets;
1088874aeea5SJeff Kirsher 
1089874aeea5SJeff Kirsher 	return 0;
1090874aeea5SJeff Kirsher }
1091874aeea5SJeff Kirsher 
1092874aeea5SJeff Kirsher 
1093874aeea5SJeff Kirsher /**
1094874aeea5SJeff Kirsher  * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1095874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
1096874aeea5SJeff Kirsher  * @skb:		Socket buffer
1097874aeea5SJeff Kirsher  *
1098874aeea5SJeff Kirsher  * Context: You must hold netif_tx_lock() to call this function.
1099874aeea5SJeff Kirsher  *
1100874aeea5SJeff Kirsher  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1101874aeea5SJeff Kirsher  * @skb was not enqueued.  In all cases @skb is consumed.  Return
110214bf718fSBen Hutchings  * %NETDEV_TX_OK.
1103874aeea5SJeff Kirsher  */
1104874aeea5SJeff Kirsher static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1105874aeea5SJeff Kirsher 			       struct sk_buff *skb)
1106874aeea5SJeff Kirsher {
1107874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
110814bf718fSBen Hutchings 	int frag_i, rc;
1109874aeea5SJeff Kirsher 	struct tso_state state;
1110874aeea5SJeff Kirsher 
1111874aeea5SJeff Kirsher 	/* Find the packet protocol and sanity-check it */
1112874aeea5SJeff Kirsher 	state.protocol = efx_tso_check_protocol(skb);
1113874aeea5SJeff Kirsher 
1114874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1115874aeea5SJeff Kirsher 
1116c78c39e6SBen Hutchings 	rc = tso_start(&state, efx, skb);
1117c78c39e6SBen Hutchings 	if (rc)
1118c78c39e6SBen Hutchings 		goto mem_err;
1119874aeea5SJeff Kirsher 
1120c78c39e6SBen Hutchings 	if (likely(state.in_len == 0)) {
1121874aeea5SJeff Kirsher 		/* Grab the first payload fragment. */
1122874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1123874aeea5SJeff Kirsher 		frag_i = 0;
1124874aeea5SJeff Kirsher 		rc = tso_get_fragment(&state, efx,
1125874aeea5SJeff Kirsher 				      skb_shinfo(skb)->frags + frag_i);
1126874aeea5SJeff Kirsher 		if (rc)
1127874aeea5SJeff Kirsher 			goto mem_err;
1128874aeea5SJeff Kirsher 	} else {
1129c78c39e6SBen Hutchings 		/* Payload starts in the header area. */
1130874aeea5SJeff Kirsher 		frag_i = -1;
1131874aeea5SJeff Kirsher 	}
1132874aeea5SJeff Kirsher 
1133874aeea5SJeff Kirsher 	if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1134874aeea5SJeff Kirsher 		goto mem_err;
1135874aeea5SJeff Kirsher 
1136874aeea5SJeff Kirsher 	while (1) {
113714bf718fSBen Hutchings 		tso_fill_packet_with_fragment(tx_queue, skb, &state);
1138874aeea5SJeff Kirsher 
1139874aeea5SJeff Kirsher 		/* Move onto the next fragment? */
1140874aeea5SJeff Kirsher 		if (state.in_len == 0) {
1141874aeea5SJeff Kirsher 			if (++frag_i >= skb_shinfo(skb)->nr_frags)
1142874aeea5SJeff Kirsher 				/* End of payload reached. */
1143874aeea5SJeff Kirsher 				break;
1144874aeea5SJeff Kirsher 			rc = tso_get_fragment(&state, efx,
1145874aeea5SJeff Kirsher 					      skb_shinfo(skb)->frags + frag_i);
1146874aeea5SJeff Kirsher 			if (rc)
1147874aeea5SJeff Kirsher 				goto mem_err;
1148874aeea5SJeff Kirsher 		}
1149874aeea5SJeff Kirsher 
1150874aeea5SJeff Kirsher 		/* Start at new packet? */
1151874aeea5SJeff Kirsher 		if (state.packet_space == 0 &&
1152874aeea5SJeff Kirsher 		    tso_start_new_packet(tx_queue, skb, &state) < 0)
1153874aeea5SJeff Kirsher 			goto mem_err;
1154874aeea5SJeff Kirsher 	}
1155874aeea5SJeff Kirsher 
1156449fa023SEric Dumazet 	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1157449fa023SEric Dumazet 
1158874aeea5SJeff Kirsher 	/* Pass off to hardware */
1159874aeea5SJeff Kirsher 	efx_nic_push_buffers(tx_queue);
1160874aeea5SJeff Kirsher 
116114bf718fSBen Hutchings 	efx_tx_maybe_stop_queue(tx_queue);
116214bf718fSBen Hutchings 
1163874aeea5SJeff Kirsher 	tx_queue->tso_bursts++;
1164874aeea5SJeff Kirsher 	return NETDEV_TX_OK;
1165874aeea5SJeff Kirsher 
1166874aeea5SJeff Kirsher  mem_err:
1167874aeea5SJeff Kirsher 	netif_err(efx, tx_err, efx->net_dev,
11680e33d870SBen Hutchings 		  "Out of memory for TSO headers, or DMA mapping error\n");
1169874aeea5SJeff Kirsher 	dev_kfree_skb_any(skb);
1170874aeea5SJeff Kirsher 
1171874aeea5SJeff Kirsher 	/* Free the DMA mapping we were in the process of writing out */
1172874aeea5SJeff Kirsher 	if (state.unmap_len) {
11737668ff9cSBen Hutchings 		if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
11740e33d870SBen Hutchings 			dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
11750e33d870SBen Hutchings 					 state.unmap_len, DMA_TO_DEVICE);
1176874aeea5SJeff Kirsher 		else
11770e33d870SBen Hutchings 			dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
11780e33d870SBen Hutchings 				       state.unmap_len, DMA_TO_DEVICE);
1179874aeea5SJeff Kirsher 	}
1180874aeea5SJeff Kirsher 
1181dfa50be9SBen Hutchings 	/* Free the header DMA mapping, if using option descriptors */
1182dfa50be9SBen Hutchings 	if (state.header_unmap_len)
1183dfa50be9SBen Hutchings 		dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
1184dfa50be9SBen Hutchings 				 state.header_unmap_len, DMA_TO_DEVICE);
1185dfa50be9SBen Hutchings 
1186874aeea5SJeff Kirsher 	efx_enqueue_unwind(tx_queue);
118714bf718fSBen Hutchings 	return NETDEV_TX_OK;
1188874aeea5SJeff Kirsher }
1189