xref: /openbmc/linux/drivers/net/ethernet/sfc/tx.c (revision f7a6d2c4)
1874aeea5SJeff Kirsher /****************************************************************************
2f7a6d2c4SBen Hutchings  * Driver for Solarflare network controllers and boards
3874aeea5SJeff Kirsher  * Copyright 2005-2006 Fen Systems Ltd.
4f7a6d2c4SBen Hutchings  * Copyright 2005-2013 Solarflare Communications Inc.
5874aeea5SJeff Kirsher  *
6874aeea5SJeff Kirsher  * This program is free software; you can redistribute it and/or modify it
7874aeea5SJeff Kirsher  * under the terms of the GNU General Public License version 2 as published
8874aeea5SJeff Kirsher  * by the Free Software Foundation, incorporated herein by reference.
9874aeea5SJeff Kirsher  */
10874aeea5SJeff Kirsher 
11874aeea5SJeff Kirsher #include <linux/pci.h>
12874aeea5SJeff Kirsher #include <linux/tcp.h>
13874aeea5SJeff Kirsher #include <linux/ip.h>
14874aeea5SJeff Kirsher #include <linux/in.h>
15874aeea5SJeff Kirsher #include <linux/ipv6.h>
16874aeea5SJeff Kirsher #include <linux/slab.h>
17874aeea5SJeff Kirsher #include <net/ipv6.h>
18874aeea5SJeff Kirsher #include <linux/if_ether.h>
19874aeea5SJeff Kirsher #include <linux/highmem.h>
20874aeea5SJeff Kirsher #include "net_driver.h"
21874aeea5SJeff Kirsher #include "efx.h"
22874aeea5SJeff Kirsher #include "nic.h"
23874aeea5SJeff Kirsher #include "workarounds.h"
24874aeea5SJeff Kirsher 
25874aeea5SJeff Kirsher static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
26c3940999STom Herbert 			       struct efx_tx_buffer *buffer,
27c3940999STom Herbert 			       unsigned int *pkts_compl,
28c3940999STom Herbert 			       unsigned int *bytes_compl)
29874aeea5SJeff Kirsher {
30874aeea5SJeff Kirsher 	if (buffer->unmap_len) {
310e33d870SBen Hutchings 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
32874aeea5SJeff Kirsher 		dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
33874aeea5SJeff Kirsher 					 buffer->unmap_len);
347668ff9cSBen Hutchings 		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
350e33d870SBen Hutchings 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
360e33d870SBen Hutchings 					 DMA_TO_DEVICE);
37874aeea5SJeff Kirsher 		else
380e33d870SBen Hutchings 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
390e33d870SBen Hutchings 				       DMA_TO_DEVICE);
40874aeea5SJeff Kirsher 		buffer->unmap_len = 0;
41874aeea5SJeff Kirsher 	}
42874aeea5SJeff Kirsher 
437668ff9cSBen Hutchings 	if (buffer->flags & EFX_TX_BUF_SKB) {
44c3940999STom Herbert 		(*pkts_compl)++;
45c3940999STom Herbert 		(*bytes_compl) += buffer->skb->len;
46874aeea5SJeff Kirsher 		dev_kfree_skb_any((struct sk_buff *) buffer->skb);
47874aeea5SJeff Kirsher 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
48874aeea5SJeff Kirsher 			   "TX queue %d transmission id %x complete\n",
49874aeea5SJeff Kirsher 			   tx_queue->queue, tx_queue->read_count);
50f7251a9cSBen Hutchings 	} else if (buffer->flags & EFX_TX_BUF_HEAP) {
51f7251a9cSBen Hutchings 		kfree(buffer->heap_buf);
52874aeea5SJeff Kirsher 	}
537668ff9cSBen Hutchings 
54f7251a9cSBen Hutchings 	buffer->len = 0;
55f7251a9cSBen Hutchings 	buffer->flags = 0;
56874aeea5SJeff Kirsher }
57874aeea5SJeff Kirsher 
58874aeea5SJeff Kirsher static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
59874aeea5SJeff Kirsher 			       struct sk_buff *skb);
60874aeea5SJeff Kirsher 
61874aeea5SJeff Kirsher static inline unsigned
62874aeea5SJeff Kirsher efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
63874aeea5SJeff Kirsher {
64874aeea5SJeff Kirsher 	/* Depending on the NIC revision, we can use descriptor
65874aeea5SJeff Kirsher 	 * lengths up to 8K or 8K-1.  However, since PCI Express
66874aeea5SJeff Kirsher 	 * devices must split read requests at 4K boundaries, there is
67874aeea5SJeff Kirsher 	 * little benefit from using descriptors that cross those
68874aeea5SJeff Kirsher 	 * boundaries and we keep things simple by not doing so.
69874aeea5SJeff Kirsher 	 */
705b6262d0SBen Hutchings 	unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
71874aeea5SJeff Kirsher 
72874aeea5SJeff Kirsher 	/* Work around hardware bug for unaligned buffers. */
73874aeea5SJeff Kirsher 	if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
74874aeea5SJeff Kirsher 		len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
75874aeea5SJeff Kirsher 
76874aeea5SJeff Kirsher 	return len;
77874aeea5SJeff Kirsher }
78874aeea5SJeff Kirsher 
797e6d06f0SBen Hutchings unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
807e6d06f0SBen Hutchings {
817e6d06f0SBen Hutchings 	/* Header and payload descriptor for each output segment, plus
827e6d06f0SBen Hutchings 	 * one for every input fragment boundary within a segment
837e6d06f0SBen Hutchings 	 */
847e6d06f0SBen Hutchings 	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
857e6d06f0SBen Hutchings 
867e6d06f0SBen Hutchings 	/* Possibly one more per segment for the alignment workaround */
877e6d06f0SBen Hutchings 	if (EFX_WORKAROUND_5391(efx))
887e6d06f0SBen Hutchings 		max_descs += EFX_TSO_MAX_SEGS;
897e6d06f0SBen Hutchings 
907e6d06f0SBen Hutchings 	/* Possibly more for PCIe page boundaries within input fragments */
917e6d06f0SBen Hutchings 	if (PAGE_SIZE > EFX_PAGE_SIZE)
927e6d06f0SBen Hutchings 		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
937e6d06f0SBen Hutchings 				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
947e6d06f0SBen Hutchings 
957e6d06f0SBen Hutchings 	return max_descs;
967e6d06f0SBen Hutchings }
977e6d06f0SBen Hutchings 
9814bf718fSBen Hutchings /* Get partner of a TX queue, seen as part of the same net core queue */
9914bf718fSBen Hutchings static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
10014bf718fSBen Hutchings {
10114bf718fSBen Hutchings 	if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
10214bf718fSBen Hutchings 		return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
10314bf718fSBen Hutchings 	else
10414bf718fSBen Hutchings 		return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
10514bf718fSBen Hutchings }
10614bf718fSBen Hutchings 
10714bf718fSBen Hutchings static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
10814bf718fSBen Hutchings {
10914bf718fSBen Hutchings 	/* We need to consider both queues that the net core sees as one */
11014bf718fSBen Hutchings 	struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
11114bf718fSBen Hutchings 	struct efx_nic *efx = txq1->efx;
11214bf718fSBen Hutchings 	unsigned int fill_level;
11314bf718fSBen Hutchings 
11414bf718fSBen Hutchings 	fill_level = max(txq1->insert_count - txq1->old_read_count,
11514bf718fSBen Hutchings 			 txq2->insert_count - txq2->old_read_count);
11614bf718fSBen Hutchings 	if (likely(fill_level < efx->txq_stop_thresh))
11714bf718fSBen Hutchings 		return;
11814bf718fSBen Hutchings 
11914bf718fSBen Hutchings 	/* We used the stale old_read_count above, which gives us a
12014bf718fSBen Hutchings 	 * pessimistic estimate of the fill level (which may even
12114bf718fSBen Hutchings 	 * validly be >= efx->txq_entries).  Now try again using
12214bf718fSBen Hutchings 	 * read_count (more likely to be a cache miss).
12314bf718fSBen Hutchings 	 *
12414bf718fSBen Hutchings 	 * If we read read_count and then conditionally stop the
12514bf718fSBen Hutchings 	 * queue, it is possible for the completion path to race with
12614bf718fSBen Hutchings 	 * us and complete all outstanding descriptors in the middle,
12714bf718fSBen Hutchings 	 * after which there will be no more completions to wake it.
12814bf718fSBen Hutchings 	 * Therefore we stop the queue first, then read read_count
12914bf718fSBen Hutchings 	 * (with a memory barrier to ensure the ordering), then
13014bf718fSBen Hutchings 	 * restart the queue if the fill level turns out to be low
13114bf718fSBen Hutchings 	 * enough.
13214bf718fSBen Hutchings 	 */
13314bf718fSBen Hutchings 	netif_tx_stop_queue(txq1->core_txq);
13414bf718fSBen Hutchings 	smp_mb();
13514bf718fSBen Hutchings 	txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
13614bf718fSBen Hutchings 	txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
13714bf718fSBen Hutchings 
13814bf718fSBen Hutchings 	fill_level = max(txq1->insert_count - txq1->old_read_count,
13914bf718fSBen Hutchings 			 txq2->insert_count - txq2->old_read_count);
14014bf718fSBen Hutchings 	EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
14114bf718fSBen Hutchings 	if (likely(fill_level < efx->txq_stop_thresh)) {
14214bf718fSBen Hutchings 		smp_mb();
14314bf718fSBen Hutchings 		if (likely(!efx->loopback_selftest))
14414bf718fSBen Hutchings 			netif_tx_start_queue(txq1->core_txq);
14514bf718fSBen Hutchings 	}
14614bf718fSBen Hutchings }
14714bf718fSBen Hutchings 
148874aeea5SJeff Kirsher /*
149874aeea5SJeff Kirsher  * Add a socket buffer to a TX queue
150874aeea5SJeff Kirsher  *
151874aeea5SJeff Kirsher  * This maps all fragments of a socket buffer for DMA and adds them to
152874aeea5SJeff Kirsher  * the TX queue.  The queue's insert pointer will be incremented by
153874aeea5SJeff Kirsher  * the number of fragments in the socket buffer.
154874aeea5SJeff Kirsher  *
155874aeea5SJeff Kirsher  * If any DMA mapping fails, any mapped fragments will be unmapped,
156874aeea5SJeff Kirsher  * the queue's insert pointer will be restored to its original value.
157874aeea5SJeff Kirsher  *
158874aeea5SJeff Kirsher  * This function is split out from efx_hard_start_xmit to allow the
159874aeea5SJeff Kirsher  * loopback test to direct packets via specific TX queues.
160874aeea5SJeff Kirsher  *
16114bf718fSBen Hutchings  * Returns NETDEV_TX_OK.
162874aeea5SJeff Kirsher  * You must hold netif_tx_lock() to call this function.
163874aeea5SJeff Kirsher  */
164874aeea5SJeff Kirsher netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
165874aeea5SJeff Kirsher {
166874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
1670e33d870SBen Hutchings 	struct device *dma_dev = &efx->pci_dev->dev;
168874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
169874aeea5SJeff Kirsher 	skb_frag_t *fragment;
17014bf718fSBen Hutchings 	unsigned int len, unmap_len = 0, insert_ptr;
171874aeea5SJeff Kirsher 	dma_addr_t dma_addr, unmap_addr = 0;
172874aeea5SJeff Kirsher 	unsigned int dma_len;
1737668ff9cSBen Hutchings 	unsigned short dma_flags;
17414bf718fSBen Hutchings 	int i = 0;
175874aeea5SJeff Kirsher 
176874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
177874aeea5SJeff Kirsher 
178874aeea5SJeff Kirsher 	if (skb_shinfo(skb)->gso_size)
179874aeea5SJeff Kirsher 		return efx_enqueue_skb_tso(tx_queue, skb);
180874aeea5SJeff Kirsher 
181874aeea5SJeff Kirsher 	/* Get size of the initial fragment */
182874aeea5SJeff Kirsher 	len = skb_headlen(skb);
183874aeea5SJeff Kirsher 
184874aeea5SJeff Kirsher 	/* Pad if necessary */
185874aeea5SJeff Kirsher 	if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
186874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(skb->data_len);
187874aeea5SJeff Kirsher 		len = 32 + 1;
188874aeea5SJeff Kirsher 		if (skb_pad(skb, len - skb->len))
189874aeea5SJeff Kirsher 			return NETDEV_TX_OK;
190874aeea5SJeff Kirsher 	}
191874aeea5SJeff Kirsher 
1920e33d870SBen Hutchings 	/* Map for DMA.  Use dma_map_single rather than dma_map_page
193874aeea5SJeff Kirsher 	 * since this is more efficient on machines with sparse
194874aeea5SJeff Kirsher 	 * memory.
195874aeea5SJeff Kirsher 	 */
1967668ff9cSBen Hutchings 	dma_flags = EFX_TX_BUF_MAP_SINGLE;
1970e33d870SBen Hutchings 	dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
198874aeea5SJeff Kirsher 
199874aeea5SJeff Kirsher 	/* Process all fragments */
200874aeea5SJeff Kirsher 	while (1) {
2010e33d870SBen Hutchings 		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
2020e33d870SBen Hutchings 			goto dma_err;
203874aeea5SJeff Kirsher 
204874aeea5SJeff Kirsher 		/* Store fields for marking in the per-fragment final
205874aeea5SJeff Kirsher 		 * descriptor */
206874aeea5SJeff Kirsher 		unmap_len = len;
207874aeea5SJeff Kirsher 		unmap_addr = dma_addr;
208874aeea5SJeff Kirsher 
209874aeea5SJeff Kirsher 		/* Add to TX queue, splitting across DMA boundaries */
210874aeea5SJeff Kirsher 		do {
211874aeea5SJeff Kirsher 			insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
212874aeea5SJeff Kirsher 			buffer = &tx_queue->buffer[insert_ptr];
2137668ff9cSBen Hutchings 			EFX_BUG_ON_PARANOID(buffer->flags);
214874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(buffer->len);
215874aeea5SJeff Kirsher 			EFX_BUG_ON_PARANOID(buffer->unmap_len);
216874aeea5SJeff Kirsher 
217874aeea5SJeff Kirsher 			dma_len = efx_max_tx_len(efx, dma_addr);
218874aeea5SJeff Kirsher 			if (likely(dma_len >= len))
219874aeea5SJeff Kirsher 				dma_len = len;
220874aeea5SJeff Kirsher 
221874aeea5SJeff Kirsher 			/* Fill out per descriptor fields */
222874aeea5SJeff Kirsher 			buffer->len = dma_len;
223874aeea5SJeff Kirsher 			buffer->dma_addr = dma_addr;
2247668ff9cSBen Hutchings 			buffer->flags = EFX_TX_BUF_CONT;
225874aeea5SJeff Kirsher 			len -= dma_len;
226874aeea5SJeff Kirsher 			dma_addr += dma_len;
227874aeea5SJeff Kirsher 			++tx_queue->insert_count;
228874aeea5SJeff Kirsher 		} while (len);
229874aeea5SJeff Kirsher 
230874aeea5SJeff Kirsher 		/* Transfer ownership of the unmapping to the final buffer */
2317668ff9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
232874aeea5SJeff Kirsher 		buffer->unmap_len = unmap_len;
233874aeea5SJeff Kirsher 		unmap_len = 0;
234874aeea5SJeff Kirsher 
235874aeea5SJeff Kirsher 		/* Get address and size of next fragment */
236874aeea5SJeff Kirsher 		if (i >= skb_shinfo(skb)->nr_frags)
237874aeea5SJeff Kirsher 			break;
238874aeea5SJeff Kirsher 		fragment = &skb_shinfo(skb)->frags[i];
2399e903e08SEric Dumazet 		len = skb_frag_size(fragment);
240874aeea5SJeff Kirsher 		i++;
241874aeea5SJeff Kirsher 		/* Map for DMA */
2427668ff9cSBen Hutchings 		dma_flags = 0;
2430e33d870SBen Hutchings 		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
2445d6bcdfeSIan Campbell 					    DMA_TO_DEVICE);
245874aeea5SJeff Kirsher 	}
246874aeea5SJeff Kirsher 
247874aeea5SJeff Kirsher 	/* Transfer ownership of the skb to the final buffer */
248874aeea5SJeff Kirsher 	buffer->skb = skb;
2497668ff9cSBen Hutchings 	buffer->flags = EFX_TX_BUF_SKB | dma_flags;
250874aeea5SJeff Kirsher 
251c3940999STom Herbert 	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
252c3940999STom Herbert 
253874aeea5SJeff Kirsher 	/* Pass off to hardware */
254874aeea5SJeff Kirsher 	efx_nic_push_buffers(tx_queue);
255874aeea5SJeff Kirsher 
25614bf718fSBen Hutchings 	efx_tx_maybe_stop_queue(tx_queue);
25714bf718fSBen Hutchings 
258874aeea5SJeff Kirsher 	return NETDEV_TX_OK;
259874aeea5SJeff Kirsher 
2600e33d870SBen Hutchings  dma_err:
261874aeea5SJeff Kirsher 	netif_err(efx, tx_err, efx->net_dev,
262874aeea5SJeff Kirsher 		  " TX queue %d could not map skb with %d bytes %d "
263874aeea5SJeff Kirsher 		  "fragments for DMA\n", tx_queue->queue, skb->len,
264874aeea5SJeff Kirsher 		  skb_shinfo(skb)->nr_frags + 1);
265874aeea5SJeff Kirsher 
266874aeea5SJeff Kirsher 	/* Mark the packet as transmitted, and free the SKB ourselves */
267874aeea5SJeff Kirsher 	dev_kfree_skb_any(skb);
268874aeea5SJeff Kirsher 
269874aeea5SJeff Kirsher 	/* Work backwards until we hit the original insert pointer value */
270874aeea5SJeff Kirsher 	while (tx_queue->insert_count != tx_queue->write_count) {
271c3940999STom Herbert 		unsigned int pkts_compl = 0, bytes_compl = 0;
272874aeea5SJeff Kirsher 		--tx_queue->insert_count;
273874aeea5SJeff Kirsher 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
274874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[insert_ptr];
275c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
276874aeea5SJeff Kirsher 	}
277874aeea5SJeff Kirsher 
278874aeea5SJeff Kirsher 	/* Free the fragment we were mid-way through pushing */
279874aeea5SJeff Kirsher 	if (unmap_len) {
2807668ff9cSBen Hutchings 		if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
2810e33d870SBen Hutchings 			dma_unmap_single(dma_dev, unmap_addr, unmap_len,
2820e33d870SBen Hutchings 					 DMA_TO_DEVICE);
283874aeea5SJeff Kirsher 		else
2840e33d870SBen Hutchings 			dma_unmap_page(dma_dev, unmap_addr, unmap_len,
2850e33d870SBen Hutchings 				       DMA_TO_DEVICE);
286874aeea5SJeff Kirsher 	}
287874aeea5SJeff Kirsher 
28814bf718fSBen Hutchings 	return NETDEV_TX_OK;
289874aeea5SJeff Kirsher }
290874aeea5SJeff Kirsher 
291874aeea5SJeff Kirsher /* Remove packets from the TX queue
292874aeea5SJeff Kirsher  *
293874aeea5SJeff Kirsher  * This removes packets from the TX queue, up to and including the
294874aeea5SJeff Kirsher  * specified index.
295874aeea5SJeff Kirsher  */
296874aeea5SJeff Kirsher static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
297c3940999STom Herbert 				unsigned int index,
298c3940999STom Herbert 				unsigned int *pkts_compl,
299c3940999STom Herbert 				unsigned int *bytes_compl)
300874aeea5SJeff Kirsher {
301874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
302874aeea5SJeff Kirsher 	unsigned int stop_index, read_ptr;
303874aeea5SJeff Kirsher 
304874aeea5SJeff Kirsher 	stop_index = (index + 1) & tx_queue->ptr_mask;
305874aeea5SJeff Kirsher 	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
306874aeea5SJeff Kirsher 
307874aeea5SJeff Kirsher 	while (read_ptr != stop_index) {
308874aeea5SJeff Kirsher 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
309ba8977bdSBen Hutchings 
310ba8977bdSBen Hutchings 		if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
311ba8977bdSBen Hutchings 		    unlikely(buffer->len == 0)) {
312874aeea5SJeff Kirsher 			netif_err(efx, tx_err, efx->net_dev,
313874aeea5SJeff Kirsher 				  "TX queue %d spurious TX completion id %x\n",
314874aeea5SJeff Kirsher 				  tx_queue->queue, read_ptr);
315874aeea5SJeff Kirsher 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
316874aeea5SJeff Kirsher 			return;
317874aeea5SJeff Kirsher 		}
318874aeea5SJeff Kirsher 
319c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
320874aeea5SJeff Kirsher 
321874aeea5SJeff Kirsher 		++tx_queue->read_count;
322874aeea5SJeff Kirsher 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
323874aeea5SJeff Kirsher 	}
324874aeea5SJeff Kirsher }
325874aeea5SJeff Kirsher 
326874aeea5SJeff Kirsher /* Initiate a packet transmission.  We use one channel per CPU
327874aeea5SJeff Kirsher  * (sharing when we have more CPUs than channels).  On Falcon, the TX
328874aeea5SJeff Kirsher  * completion events will be directed back to the CPU that transmitted
329874aeea5SJeff Kirsher  * the packet, which should be cache-efficient.
330874aeea5SJeff Kirsher  *
331874aeea5SJeff Kirsher  * Context: non-blocking.
332874aeea5SJeff Kirsher  * Note that returning anything other than NETDEV_TX_OK will cause the
333874aeea5SJeff Kirsher  * OS to free the skb.
334874aeea5SJeff Kirsher  */
335874aeea5SJeff Kirsher netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
336874aeea5SJeff Kirsher 				struct net_device *net_dev)
337874aeea5SJeff Kirsher {
338874aeea5SJeff Kirsher 	struct efx_nic *efx = netdev_priv(net_dev);
339874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
340874aeea5SJeff Kirsher 	unsigned index, type;
341874aeea5SJeff Kirsher 
342874aeea5SJeff Kirsher 	EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
343874aeea5SJeff Kirsher 
3447c236c43SStuart Hodgson 	/* PTP "event" packet */
3457c236c43SStuart Hodgson 	if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
3467c236c43SStuart Hodgson 	    unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
3477c236c43SStuart Hodgson 		return efx_ptp_tx(efx, skb);
3487c236c43SStuart Hodgson 	}
3497c236c43SStuart Hodgson 
350874aeea5SJeff Kirsher 	index = skb_get_queue_mapping(skb);
351874aeea5SJeff Kirsher 	type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
352874aeea5SJeff Kirsher 	if (index >= efx->n_tx_channels) {
353874aeea5SJeff Kirsher 		index -= efx->n_tx_channels;
354874aeea5SJeff Kirsher 		type |= EFX_TXQ_TYPE_HIGHPRI;
355874aeea5SJeff Kirsher 	}
356874aeea5SJeff Kirsher 	tx_queue = efx_get_tx_queue(efx, index, type);
357874aeea5SJeff Kirsher 
358874aeea5SJeff Kirsher 	return efx_enqueue_skb(tx_queue, skb);
359874aeea5SJeff Kirsher }
360874aeea5SJeff Kirsher 
361874aeea5SJeff Kirsher void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
362874aeea5SJeff Kirsher {
363874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
364874aeea5SJeff Kirsher 
365874aeea5SJeff Kirsher 	/* Must be inverse of queue lookup in efx_hard_start_xmit() */
366874aeea5SJeff Kirsher 	tx_queue->core_txq =
367874aeea5SJeff Kirsher 		netdev_get_tx_queue(efx->net_dev,
368874aeea5SJeff Kirsher 				    tx_queue->queue / EFX_TXQ_TYPES +
369874aeea5SJeff Kirsher 				    ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
370874aeea5SJeff Kirsher 				     efx->n_tx_channels : 0));
371874aeea5SJeff Kirsher }
372874aeea5SJeff Kirsher 
373874aeea5SJeff Kirsher int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
374874aeea5SJeff Kirsher {
375874aeea5SJeff Kirsher 	struct efx_nic *efx = netdev_priv(net_dev);
376874aeea5SJeff Kirsher 	struct efx_channel *channel;
377874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
378874aeea5SJeff Kirsher 	unsigned tc;
379874aeea5SJeff Kirsher 	int rc;
380874aeea5SJeff Kirsher 
381874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
382874aeea5SJeff Kirsher 		return -EINVAL;
383874aeea5SJeff Kirsher 
384874aeea5SJeff Kirsher 	if (num_tc == net_dev->num_tc)
385874aeea5SJeff Kirsher 		return 0;
386874aeea5SJeff Kirsher 
387874aeea5SJeff Kirsher 	for (tc = 0; tc < num_tc; tc++) {
388874aeea5SJeff Kirsher 		net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
389874aeea5SJeff Kirsher 		net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
390874aeea5SJeff Kirsher 	}
391874aeea5SJeff Kirsher 
392874aeea5SJeff Kirsher 	if (num_tc > net_dev->num_tc) {
393874aeea5SJeff Kirsher 		/* Initialise high-priority queues as necessary */
394874aeea5SJeff Kirsher 		efx_for_each_channel(channel, efx) {
395874aeea5SJeff Kirsher 			efx_for_each_possible_channel_tx_queue(tx_queue,
396874aeea5SJeff Kirsher 							       channel) {
397874aeea5SJeff Kirsher 				if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
398874aeea5SJeff Kirsher 					continue;
399874aeea5SJeff Kirsher 				if (!tx_queue->buffer) {
400874aeea5SJeff Kirsher 					rc = efx_probe_tx_queue(tx_queue);
401874aeea5SJeff Kirsher 					if (rc)
402874aeea5SJeff Kirsher 						return rc;
403874aeea5SJeff Kirsher 				}
404874aeea5SJeff Kirsher 				if (!tx_queue->initialised)
405874aeea5SJeff Kirsher 					efx_init_tx_queue(tx_queue);
406874aeea5SJeff Kirsher 				efx_init_tx_queue_core_txq(tx_queue);
407874aeea5SJeff Kirsher 			}
408874aeea5SJeff Kirsher 		}
409874aeea5SJeff Kirsher 	} else {
410874aeea5SJeff Kirsher 		/* Reduce number of classes before number of queues */
411874aeea5SJeff Kirsher 		net_dev->num_tc = num_tc;
412874aeea5SJeff Kirsher 	}
413874aeea5SJeff Kirsher 
414874aeea5SJeff Kirsher 	rc = netif_set_real_num_tx_queues(net_dev,
415874aeea5SJeff Kirsher 					  max_t(int, num_tc, 1) *
416874aeea5SJeff Kirsher 					  efx->n_tx_channels);
417874aeea5SJeff Kirsher 	if (rc)
418874aeea5SJeff Kirsher 		return rc;
419874aeea5SJeff Kirsher 
420874aeea5SJeff Kirsher 	/* Do not destroy high-priority queues when they become
421874aeea5SJeff Kirsher 	 * unused.  We would have to flush them first, and it is
422874aeea5SJeff Kirsher 	 * fairly difficult to flush a subset of TX queues.  Leave
423874aeea5SJeff Kirsher 	 * it to efx_fini_channels().
424874aeea5SJeff Kirsher 	 */
425874aeea5SJeff Kirsher 
426874aeea5SJeff Kirsher 	net_dev->num_tc = num_tc;
427874aeea5SJeff Kirsher 	return 0;
428874aeea5SJeff Kirsher }
429874aeea5SJeff Kirsher 
430874aeea5SJeff Kirsher void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
431874aeea5SJeff Kirsher {
432874aeea5SJeff Kirsher 	unsigned fill_level;
433874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
43414bf718fSBen Hutchings 	struct efx_tx_queue *txq2;
435c3940999STom Herbert 	unsigned int pkts_compl = 0, bytes_compl = 0;
436874aeea5SJeff Kirsher 
437874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
438874aeea5SJeff Kirsher 
439c3940999STom Herbert 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
440c3940999STom Herbert 	netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
441874aeea5SJeff Kirsher 
44202e12165SBen Hutchings 	if (pkts_compl > 1)
44302e12165SBen Hutchings 		++tx_queue->merge_events;
44402e12165SBen Hutchings 
44514bf718fSBen Hutchings 	/* See if we need to restart the netif queue.  This memory
44614bf718fSBen Hutchings 	 * barrier ensures that we write read_count (inside
44714bf718fSBen Hutchings 	 * efx_dequeue_buffers()) before reading the queue status.
44814bf718fSBen Hutchings 	 */
449874aeea5SJeff Kirsher 	smp_mb();
450874aeea5SJeff Kirsher 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
451874aeea5SJeff Kirsher 	    likely(efx->port_enabled) &&
452874aeea5SJeff Kirsher 	    likely(netif_device_present(efx->net_dev))) {
45314bf718fSBen Hutchings 		txq2 = efx_tx_queue_partner(tx_queue);
45414bf718fSBen Hutchings 		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
45514bf718fSBen Hutchings 				 txq2->insert_count - txq2->read_count);
45614bf718fSBen Hutchings 		if (fill_level <= efx->txq_wake_thresh)
457874aeea5SJeff Kirsher 			netif_tx_wake_queue(tx_queue->core_txq);
458874aeea5SJeff Kirsher 	}
459874aeea5SJeff Kirsher 
460874aeea5SJeff Kirsher 	/* Check whether the hardware queue is now empty */
461874aeea5SJeff Kirsher 	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
462874aeea5SJeff Kirsher 		tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
463874aeea5SJeff Kirsher 		if (tx_queue->read_count == tx_queue->old_write_count) {
464874aeea5SJeff Kirsher 			smp_mb();
465874aeea5SJeff Kirsher 			tx_queue->empty_read_count =
466874aeea5SJeff Kirsher 				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
467874aeea5SJeff Kirsher 		}
468874aeea5SJeff Kirsher 	}
469874aeea5SJeff Kirsher }
470874aeea5SJeff Kirsher 
471f7251a9cSBen Hutchings /* Size of page-based TSO header buffers.  Larger blocks must be
472f7251a9cSBen Hutchings  * allocated from the heap.
473f7251a9cSBen Hutchings  */
474f7251a9cSBen Hutchings #define TSOH_STD_SIZE	128
475f7251a9cSBen Hutchings #define TSOH_PER_PAGE	(PAGE_SIZE / TSOH_STD_SIZE)
476f7251a9cSBen Hutchings 
477f7251a9cSBen Hutchings /* At most half the descriptors in the queue at any time will refer to
478f7251a9cSBen Hutchings  * a TSO header buffer, since they must always be followed by a
479f7251a9cSBen Hutchings  * payload descriptor referring to an skb.
480f7251a9cSBen Hutchings  */
481f7251a9cSBen Hutchings static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
482f7251a9cSBen Hutchings {
483f7251a9cSBen Hutchings 	return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
484f7251a9cSBen Hutchings }
485f7251a9cSBen Hutchings 
486874aeea5SJeff Kirsher int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
487874aeea5SJeff Kirsher {
488874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
489874aeea5SJeff Kirsher 	unsigned int entries;
4907668ff9cSBen Hutchings 	int rc;
491874aeea5SJeff Kirsher 
492874aeea5SJeff Kirsher 	/* Create the smallest power-of-two aligned ring */
493874aeea5SJeff Kirsher 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
494874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
495874aeea5SJeff Kirsher 	tx_queue->ptr_mask = entries - 1;
496874aeea5SJeff Kirsher 
497874aeea5SJeff Kirsher 	netif_dbg(efx, probe, efx->net_dev,
498874aeea5SJeff Kirsher 		  "creating TX queue %d size %#x mask %#x\n",
499874aeea5SJeff Kirsher 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
500874aeea5SJeff Kirsher 
501874aeea5SJeff Kirsher 	/* Allocate software ring */
502c2e4e25aSThomas Meyer 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
503874aeea5SJeff Kirsher 				   GFP_KERNEL);
504874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
505874aeea5SJeff Kirsher 		return -ENOMEM;
506874aeea5SJeff Kirsher 
507f7251a9cSBen Hutchings 	if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
508f7251a9cSBen Hutchings 		tx_queue->tsoh_page =
509f7251a9cSBen Hutchings 			kcalloc(efx_tsoh_page_count(tx_queue),
510f7251a9cSBen Hutchings 				sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
511f7251a9cSBen Hutchings 		if (!tx_queue->tsoh_page) {
512f7251a9cSBen Hutchings 			rc = -ENOMEM;
513f7251a9cSBen Hutchings 			goto fail1;
514f7251a9cSBen Hutchings 		}
515f7251a9cSBen Hutchings 	}
516f7251a9cSBen Hutchings 
517874aeea5SJeff Kirsher 	/* Allocate hardware ring */
518874aeea5SJeff Kirsher 	rc = efx_nic_probe_tx(tx_queue);
519874aeea5SJeff Kirsher 	if (rc)
520f7251a9cSBen Hutchings 		goto fail2;
521874aeea5SJeff Kirsher 
522874aeea5SJeff Kirsher 	return 0;
523874aeea5SJeff Kirsher 
524f7251a9cSBen Hutchings fail2:
525f7251a9cSBen Hutchings 	kfree(tx_queue->tsoh_page);
526f7251a9cSBen Hutchings 	tx_queue->tsoh_page = NULL;
527f7251a9cSBen Hutchings fail1:
528874aeea5SJeff Kirsher 	kfree(tx_queue->buffer);
529874aeea5SJeff Kirsher 	tx_queue->buffer = NULL;
530874aeea5SJeff Kirsher 	return rc;
531874aeea5SJeff Kirsher }
532874aeea5SJeff Kirsher 
533874aeea5SJeff Kirsher void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
534874aeea5SJeff Kirsher {
535874aeea5SJeff Kirsher 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
536874aeea5SJeff Kirsher 		  "initialising TX queue %d\n", tx_queue->queue);
537874aeea5SJeff Kirsher 
538874aeea5SJeff Kirsher 	tx_queue->insert_count = 0;
539874aeea5SJeff Kirsher 	tx_queue->write_count = 0;
540874aeea5SJeff Kirsher 	tx_queue->old_write_count = 0;
541874aeea5SJeff Kirsher 	tx_queue->read_count = 0;
542874aeea5SJeff Kirsher 	tx_queue->old_read_count = 0;
543874aeea5SJeff Kirsher 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
544874aeea5SJeff Kirsher 
545874aeea5SJeff Kirsher 	/* Set up TX descriptor ring */
546874aeea5SJeff Kirsher 	efx_nic_init_tx(tx_queue);
547874aeea5SJeff Kirsher 
548874aeea5SJeff Kirsher 	tx_queue->initialised = true;
549874aeea5SJeff Kirsher }
550874aeea5SJeff Kirsher 
551e42c3d85SBen Hutchings void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
552874aeea5SJeff Kirsher {
553874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
554874aeea5SJeff Kirsher 
555e42c3d85SBen Hutchings 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
556e42c3d85SBen Hutchings 		  "shutting down TX queue %d\n", tx_queue->queue);
557e42c3d85SBen Hutchings 
558874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
559874aeea5SJeff Kirsher 		return;
560874aeea5SJeff Kirsher 
561874aeea5SJeff Kirsher 	/* Free any buffers left in the ring */
562874aeea5SJeff Kirsher 	while (tx_queue->read_count != tx_queue->write_count) {
563c3940999STom Herbert 		unsigned int pkts_compl = 0, bytes_compl = 0;
564874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
565c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
566874aeea5SJeff Kirsher 
567874aeea5SJeff Kirsher 		++tx_queue->read_count;
568874aeea5SJeff Kirsher 	}
569c3940999STom Herbert 	netdev_tx_reset_queue(tx_queue->core_txq);
570874aeea5SJeff Kirsher }
571874aeea5SJeff Kirsher 
572874aeea5SJeff Kirsher void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
573874aeea5SJeff Kirsher {
574f7251a9cSBen Hutchings 	int i;
575f7251a9cSBen Hutchings 
576874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
577874aeea5SJeff Kirsher 		return;
578874aeea5SJeff Kirsher 
579874aeea5SJeff Kirsher 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
580874aeea5SJeff Kirsher 		  "destroying TX queue %d\n", tx_queue->queue);
581874aeea5SJeff Kirsher 	efx_nic_remove_tx(tx_queue);
582874aeea5SJeff Kirsher 
583f7251a9cSBen Hutchings 	if (tx_queue->tsoh_page) {
584f7251a9cSBen Hutchings 		for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
585f7251a9cSBen Hutchings 			efx_nic_free_buffer(tx_queue->efx,
586f7251a9cSBen Hutchings 					    &tx_queue->tsoh_page[i]);
587f7251a9cSBen Hutchings 		kfree(tx_queue->tsoh_page);
588f7251a9cSBen Hutchings 		tx_queue->tsoh_page = NULL;
589f7251a9cSBen Hutchings 	}
590f7251a9cSBen Hutchings 
591874aeea5SJeff Kirsher 	kfree(tx_queue->buffer);
592874aeea5SJeff Kirsher 	tx_queue->buffer = NULL;
593874aeea5SJeff Kirsher }
594874aeea5SJeff Kirsher 
595874aeea5SJeff Kirsher 
596874aeea5SJeff Kirsher /* Efx TCP segmentation acceleration.
597874aeea5SJeff Kirsher  *
598874aeea5SJeff Kirsher  * Why?  Because by doing it here in the driver we can go significantly
599874aeea5SJeff Kirsher  * faster than the GSO.
600874aeea5SJeff Kirsher  *
601874aeea5SJeff Kirsher  * Requires TX checksum offload support.
602874aeea5SJeff Kirsher  */
603874aeea5SJeff Kirsher 
604874aeea5SJeff Kirsher /* Number of bytes inserted at the start of a TSO header buffer,
605874aeea5SJeff Kirsher  * similar to NET_IP_ALIGN.
606874aeea5SJeff Kirsher  */
607874aeea5SJeff Kirsher #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
608874aeea5SJeff Kirsher #define TSOH_OFFSET	0
609874aeea5SJeff Kirsher #else
610874aeea5SJeff Kirsher #define TSOH_OFFSET	NET_IP_ALIGN
611874aeea5SJeff Kirsher #endif
612874aeea5SJeff Kirsher 
613874aeea5SJeff Kirsher #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
614874aeea5SJeff Kirsher 
615874aeea5SJeff Kirsher /**
616874aeea5SJeff Kirsher  * struct tso_state - TSO state for an SKB
617874aeea5SJeff Kirsher  * @out_len: Remaining length in current segment
618874aeea5SJeff Kirsher  * @seqnum: Current sequence number
619874aeea5SJeff Kirsher  * @ipv4_id: Current IPv4 ID, host endian
620874aeea5SJeff Kirsher  * @packet_space: Remaining space in current packet
621874aeea5SJeff Kirsher  * @dma_addr: DMA address of current position
622874aeea5SJeff Kirsher  * @in_len: Remaining length in current SKB fragment
623874aeea5SJeff Kirsher  * @unmap_len: Length of SKB fragment
624874aeea5SJeff Kirsher  * @unmap_addr: DMA address of SKB fragment
6257668ff9cSBen Hutchings  * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
626874aeea5SJeff Kirsher  * @protocol: Network protocol (after any VLAN header)
6279714284fSBen Hutchings  * @ip_off: Offset of IP header
6289714284fSBen Hutchings  * @tcp_off: Offset of TCP header
629874aeea5SJeff Kirsher  * @header_len: Number of bytes of header
63053cb13c6SBen Hutchings  * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
631874aeea5SJeff Kirsher  *
632874aeea5SJeff Kirsher  * The state used during segmentation.  It is put into this data structure
633874aeea5SJeff Kirsher  * just to make it easy to pass into inline functions.
634874aeea5SJeff Kirsher  */
635874aeea5SJeff Kirsher struct tso_state {
636874aeea5SJeff Kirsher 	/* Output position */
637874aeea5SJeff Kirsher 	unsigned out_len;
638874aeea5SJeff Kirsher 	unsigned seqnum;
639874aeea5SJeff Kirsher 	unsigned ipv4_id;
640874aeea5SJeff Kirsher 	unsigned packet_space;
641874aeea5SJeff Kirsher 
642874aeea5SJeff Kirsher 	/* Input position */
643874aeea5SJeff Kirsher 	dma_addr_t dma_addr;
644874aeea5SJeff Kirsher 	unsigned in_len;
645874aeea5SJeff Kirsher 	unsigned unmap_len;
646874aeea5SJeff Kirsher 	dma_addr_t unmap_addr;
6477668ff9cSBen Hutchings 	unsigned short dma_flags;
648874aeea5SJeff Kirsher 
649874aeea5SJeff Kirsher 	__be16 protocol;
6509714284fSBen Hutchings 	unsigned int ip_off;
6519714284fSBen Hutchings 	unsigned int tcp_off;
652874aeea5SJeff Kirsher 	unsigned header_len;
65353cb13c6SBen Hutchings 	unsigned int ip_base_len;
654874aeea5SJeff Kirsher };
655874aeea5SJeff Kirsher 
656874aeea5SJeff Kirsher 
657874aeea5SJeff Kirsher /*
658874aeea5SJeff Kirsher  * Verify that our various assumptions about sk_buffs and the conditions
659874aeea5SJeff Kirsher  * under which TSO will be attempted hold true.  Return the protocol number.
660874aeea5SJeff Kirsher  */
661874aeea5SJeff Kirsher static __be16 efx_tso_check_protocol(struct sk_buff *skb)
662874aeea5SJeff Kirsher {
663874aeea5SJeff Kirsher 	__be16 protocol = skb->protocol;
664874aeea5SJeff Kirsher 
665874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
666874aeea5SJeff Kirsher 			    protocol);
667874aeea5SJeff Kirsher 	if (protocol == htons(ETH_P_8021Q)) {
668874aeea5SJeff Kirsher 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
669874aeea5SJeff Kirsher 		protocol = veh->h_vlan_encapsulated_proto;
670874aeea5SJeff Kirsher 	}
671874aeea5SJeff Kirsher 
672874aeea5SJeff Kirsher 	if (protocol == htons(ETH_P_IP)) {
673874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
674874aeea5SJeff Kirsher 	} else {
675874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
676874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
677874aeea5SJeff Kirsher 	}
678874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
679874aeea5SJeff Kirsher 			     + (tcp_hdr(skb)->doff << 2u)) >
680874aeea5SJeff Kirsher 			    skb_headlen(skb));
681874aeea5SJeff Kirsher 
682874aeea5SJeff Kirsher 	return protocol;
683874aeea5SJeff Kirsher }
684874aeea5SJeff Kirsher 
685f7251a9cSBen Hutchings static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
686f7251a9cSBen Hutchings 			       struct efx_tx_buffer *buffer, unsigned int len)
687874aeea5SJeff Kirsher {
688f7251a9cSBen Hutchings 	u8 *result;
689874aeea5SJeff Kirsher 
690f7251a9cSBen Hutchings 	EFX_BUG_ON_PARANOID(buffer->len);
691f7251a9cSBen Hutchings 	EFX_BUG_ON_PARANOID(buffer->flags);
692f7251a9cSBen Hutchings 	EFX_BUG_ON_PARANOID(buffer->unmap_len);
693874aeea5SJeff Kirsher 
694f7251a9cSBen Hutchings 	if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
695f7251a9cSBen Hutchings 		unsigned index =
696f7251a9cSBen Hutchings 			(tx_queue->insert_count & tx_queue->ptr_mask) / 2;
697f7251a9cSBen Hutchings 		struct efx_buffer *page_buf =
698f7251a9cSBen Hutchings 			&tx_queue->tsoh_page[index / TSOH_PER_PAGE];
699f7251a9cSBen Hutchings 		unsigned offset =
700f7251a9cSBen Hutchings 			TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
701874aeea5SJeff Kirsher 
702f7251a9cSBen Hutchings 		if (unlikely(!page_buf->addr) &&
7030d19a540SBen Hutchings 		    efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
7040d19a540SBen Hutchings 					 GFP_ATOMIC))
705874aeea5SJeff Kirsher 			return NULL;
706874aeea5SJeff Kirsher 
707f7251a9cSBen Hutchings 		result = (u8 *)page_buf->addr + offset;
708f7251a9cSBen Hutchings 		buffer->dma_addr = page_buf->dma_addr + offset;
709f7251a9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT;
710f7251a9cSBen Hutchings 	} else {
711f7251a9cSBen Hutchings 		tx_queue->tso_long_headers++;
712f7251a9cSBen Hutchings 
713f7251a9cSBen Hutchings 		buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
714f7251a9cSBen Hutchings 		if (unlikely(!buffer->heap_buf))
715874aeea5SJeff Kirsher 			return NULL;
716f7251a9cSBen Hutchings 		result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
717f7251a9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
718874aeea5SJeff Kirsher 	}
719874aeea5SJeff Kirsher 
720f7251a9cSBen Hutchings 	buffer->len = len;
721874aeea5SJeff Kirsher 
722f7251a9cSBen Hutchings 	return result;
723874aeea5SJeff Kirsher }
724874aeea5SJeff Kirsher 
725874aeea5SJeff Kirsher /**
726874aeea5SJeff Kirsher  * efx_tx_queue_insert - push descriptors onto the TX queue
727874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
728874aeea5SJeff Kirsher  * @dma_addr:		DMA address of fragment
729874aeea5SJeff Kirsher  * @len:		Length of fragment
730874aeea5SJeff Kirsher  * @final_buffer:	The final buffer inserted into the queue
731874aeea5SJeff Kirsher  *
73214bf718fSBen Hutchings  * Push descriptors onto the TX queue.
733874aeea5SJeff Kirsher  */
73414bf718fSBen Hutchings static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
735874aeea5SJeff Kirsher 				dma_addr_t dma_addr, unsigned len,
736874aeea5SJeff Kirsher 				struct efx_tx_buffer **final_buffer)
737874aeea5SJeff Kirsher {
738874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
739874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
74014bf718fSBen Hutchings 	unsigned dma_len, insert_ptr;
741874aeea5SJeff Kirsher 
742874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(len <= 0);
743874aeea5SJeff Kirsher 
744874aeea5SJeff Kirsher 	while (1) {
745874aeea5SJeff Kirsher 		insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
746874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[insert_ptr];
747874aeea5SJeff Kirsher 		++tx_queue->insert_count;
748874aeea5SJeff Kirsher 
749874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(tx_queue->insert_count -
750874aeea5SJeff Kirsher 				    tx_queue->read_count >=
751874aeea5SJeff Kirsher 				    efx->txq_entries);
752874aeea5SJeff Kirsher 
753874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(buffer->len);
754874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(buffer->unmap_len);
7557668ff9cSBen Hutchings 		EFX_BUG_ON_PARANOID(buffer->flags);
756874aeea5SJeff Kirsher 
757874aeea5SJeff Kirsher 		buffer->dma_addr = dma_addr;
758874aeea5SJeff Kirsher 
759874aeea5SJeff Kirsher 		dma_len = efx_max_tx_len(efx, dma_addr);
760874aeea5SJeff Kirsher 
761874aeea5SJeff Kirsher 		/* If there is enough space to send then do so */
762874aeea5SJeff Kirsher 		if (dma_len >= len)
763874aeea5SJeff Kirsher 			break;
764874aeea5SJeff Kirsher 
7657668ff9cSBen Hutchings 		buffer->len = dma_len;
7667668ff9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT;
767874aeea5SJeff Kirsher 		dma_addr += dma_len;
768874aeea5SJeff Kirsher 		len -= dma_len;
769874aeea5SJeff Kirsher 	}
770874aeea5SJeff Kirsher 
771874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(!len);
772874aeea5SJeff Kirsher 	buffer->len = len;
773874aeea5SJeff Kirsher 	*final_buffer = buffer;
774874aeea5SJeff Kirsher }
775874aeea5SJeff Kirsher 
776874aeea5SJeff Kirsher 
777874aeea5SJeff Kirsher /*
778874aeea5SJeff Kirsher  * Put a TSO header into the TX queue.
779874aeea5SJeff Kirsher  *
780874aeea5SJeff Kirsher  * This is special-cased because we know that it is small enough to fit in
781874aeea5SJeff Kirsher  * a single fragment, and we know it doesn't cross a page boundary.  It
782874aeea5SJeff Kirsher  * also allows us to not worry about end-of-packet etc.
783874aeea5SJeff Kirsher  */
784f7251a9cSBen Hutchings static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
785f7251a9cSBen Hutchings 			      struct efx_tx_buffer *buffer, u8 *header)
786874aeea5SJeff Kirsher {
787f7251a9cSBen Hutchings 	if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
788f7251a9cSBen Hutchings 		buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
789f7251a9cSBen Hutchings 						  header, buffer->len,
790f7251a9cSBen Hutchings 						  DMA_TO_DEVICE);
791f7251a9cSBen Hutchings 		if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
792f7251a9cSBen Hutchings 					       buffer->dma_addr))) {
793f7251a9cSBen Hutchings 			kfree(buffer->heap_buf);
794f7251a9cSBen Hutchings 			buffer->len = 0;
795f7251a9cSBen Hutchings 			buffer->flags = 0;
796f7251a9cSBen Hutchings 			return -ENOMEM;
797f7251a9cSBen Hutchings 		}
798f7251a9cSBen Hutchings 		buffer->unmap_len = buffer->len;
799f7251a9cSBen Hutchings 		buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
800f7251a9cSBen Hutchings 	}
801874aeea5SJeff Kirsher 
802874aeea5SJeff Kirsher 	++tx_queue->insert_count;
803f7251a9cSBen Hutchings 	return 0;
804874aeea5SJeff Kirsher }
805874aeea5SJeff Kirsher 
806874aeea5SJeff Kirsher 
807f7251a9cSBen Hutchings /* Remove buffers put into a tx_queue.  None of the buffers must have
808f7251a9cSBen Hutchings  * an skb attached.
809f7251a9cSBen Hutchings  */
810874aeea5SJeff Kirsher static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
811874aeea5SJeff Kirsher {
812874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
813874aeea5SJeff Kirsher 
814874aeea5SJeff Kirsher 	/* Work backwards until we hit the original insert pointer value */
815874aeea5SJeff Kirsher 	while (tx_queue->insert_count != tx_queue->write_count) {
816874aeea5SJeff Kirsher 		--tx_queue->insert_count;
817874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[tx_queue->insert_count &
818874aeea5SJeff Kirsher 					   tx_queue->ptr_mask];
819f7251a9cSBen Hutchings 		efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
820874aeea5SJeff Kirsher 	}
821874aeea5SJeff Kirsher }
822874aeea5SJeff Kirsher 
823874aeea5SJeff Kirsher 
824874aeea5SJeff Kirsher /* Parse the SKB header and initialise state. */
825874aeea5SJeff Kirsher static void tso_start(struct tso_state *st, const struct sk_buff *skb)
826874aeea5SJeff Kirsher {
8279714284fSBen Hutchings 	st->ip_off = skb_network_header(skb) - skb->data;
8289714284fSBen Hutchings 	st->tcp_off = skb_transport_header(skb) - skb->data;
8299714284fSBen Hutchings 	st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
83053cb13c6SBen Hutchings 	if (st->protocol == htons(ETH_P_IP)) {
8319714284fSBen Hutchings 		st->ip_base_len = st->header_len - st->ip_off;
832874aeea5SJeff Kirsher 		st->ipv4_id = ntohs(ip_hdr(skb)->id);
83353cb13c6SBen Hutchings 	} else {
8349714284fSBen Hutchings 		st->ip_base_len = st->header_len - st->tcp_off;
835874aeea5SJeff Kirsher 		st->ipv4_id = 0;
83653cb13c6SBen Hutchings 	}
837874aeea5SJeff Kirsher 	st->seqnum = ntohl(tcp_hdr(skb)->seq);
838874aeea5SJeff Kirsher 
839874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
840874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
841874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
842874aeea5SJeff Kirsher 
843874aeea5SJeff Kirsher 	st->out_len = skb->len - st->header_len;
844874aeea5SJeff Kirsher 	st->unmap_len = 0;
8457668ff9cSBen Hutchings 	st->dma_flags = 0;
846874aeea5SJeff Kirsher }
847874aeea5SJeff Kirsher 
848874aeea5SJeff Kirsher static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
849874aeea5SJeff Kirsher 			    skb_frag_t *frag)
850874aeea5SJeff Kirsher {
8514a22c4c9SIan Campbell 	st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
8529e903e08SEric Dumazet 					  skb_frag_size(frag), DMA_TO_DEVICE);
8535d6bcdfeSIan Campbell 	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
8547668ff9cSBen Hutchings 		st->dma_flags = 0;
8559e903e08SEric Dumazet 		st->unmap_len = skb_frag_size(frag);
8569e903e08SEric Dumazet 		st->in_len = skb_frag_size(frag);
857874aeea5SJeff Kirsher 		st->dma_addr = st->unmap_addr;
858874aeea5SJeff Kirsher 		return 0;
859874aeea5SJeff Kirsher 	}
860874aeea5SJeff Kirsher 	return -ENOMEM;
861874aeea5SJeff Kirsher }
862874aeea5SJeff Kirsher 
863874aeea5SJeff Kirsher static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
864874aeea5SJeff Kirsher 				 const struct sk_buff *skb)
865874aeea5SJeff Kirsher {
866874aeea5SJeff Kirsher 	int hl = st->header_len;
867874aeea5SJeff Kirsher 	int len = skb_headlen(skb) - hl;
868874aeea5SJeff Kirsher 
8690e33d870SBen Hutchings 	st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
8700e33d870SBen Hutchings 					len, DMA_TO_DEVICE);
8710e33d870SBen Hutchings 	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
8727668ff9cSBen Hutchings 		st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
873874aeea5SJeff Kirsher 		st->unmap_len = len;
874874aeea5SJeff Kirsher 		st->in_len = len;
875874aeea5SJeff Kirsher 		st->dma_addr = st->unmap_addr;
876874aeea5SJeff Kirsher 		return 0;
877874aeea5SJeff Kirsher 	}
878874aeea5SJeff Kirsher 	return -ENOMEM;
879874aeea5SJeff Kirsher }
880874aeea5SJeff Kirsher 
881874aeea5SJeff Kirsher 
882874aeea5SJeff Kirsher /**
883874aeea5SJeff Kirsher  * tso_fill_packet_with_fragment - form descriptors for the current fragment
884874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
885874aeea5SJeff Kirsher  * @skb:		Socket buffer
886874aeea5SJeff Kirsher  * @st:			TSO state
887874aeea5SJeff Kirsher  *
888874aeea5SJeff Kirsher  * Form descriptors for the current fragment, until we reach the end
88914bf718fSBen Hutchings  * of fragment or end-of-packet.
890874aeea5SJeff Kirsher  */
89114bf718fSBen Hutchings static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
892874aeea5SJeff Kirsher 					  const struct sk_buff *skb,
893874aeea5SJeff Kirsher 					  struct tso_state *st)
894874aeea5SJeff Kirsher {
895874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
89614bf718fSBen Hutchings 	int n;
897874aeea5SJeff Kirsher 
898874aeea5SJeff Kirsher 	if (st->in_len == 0)
89914bf718fSBen Hutchings 		return;
900874aeea5SJeff Kirsher 	if (st->packet_space == 0)
90114bf718fSBen Hutchings 		return;
902874aeea5SJeff Kirsher 
903874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(st->in_len <= 0);
904874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(st->packet_space <= 0);
905874aeea5SJeff Kirsher 
906874aeea5SJeff Kirsher 	n = min(st->in_len, st->packet_space);
907874aeea5SJeff Kirsher 
908874aeea5SJeff Kirsher 	st->packet_space -= n;
909874aeea5SJeff Kirsher 	st->out_len -= n;
910874aeea5SJeff Kirsher 	st->in_len -= n;
911874aeea5SJeff Kirsher 
91214bf718fSBen Hutchings 	efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
91314bf718fSBen Hutchings 
9147668ff9cSBen Hutchings 	if (st->out_len == 0) {
915874aeea5SJeff Kirsher 		/* Transfer ownership of the skb */
916874aeea5SJeff Kirsher 		buffer->skb = skb;
9177668ff9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_SKB;
9187668ff9cSBen Hutchings 	} else if (st->packet_space != 0) {
9197668ff9cSBen Hutchings 		buffer->flags = EFX_TX_BUF_CONT;
9207668ff9cSBen Hutchings 	}
921874aeea5SJeff Kirsher 
922874aeea5SJeff Kirsher 	if (st->in_len == 0) {
9230e33d870SBen Hutchings 		/* Transfer ownership of the DMA mapping */
924874aeea5SJeff Kirsher 		buffer->unmap_len = st->unmap_len;
9257668ff9cSBen Hutchings 		buffer->flags |= st->dma_flags;
926874aeea5SJeff Kirsher 		st->unmap_len = 0;
927874aeea5SJeff Kirsher 	}
928874aeea5SJeff Kirsher 
929874aeea5SJeff Kirsher 	st->dma_addr += n;
930874aeea5SJeff Kirsher }
931874aeea5SJeff Kirsher 
932874aeea5SJeff Kirsher 
933874aeea5SJeff Kirsher /**
934874aeea5SJeff Kirsher  * tso_start_new_packet - generate a new header and prepare for the new packet
935874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
936874aeea5SJeff Kirsher  * @skb:		Socket buffer
937874aeea5SJeff Kirsher  * @st:			TSO state
938874aeea5SJeff Kirsher  *
939874aeea5SJeff Kirsher  * Generate a new header and prepare for the new packet.  Return 0 on
940f7251a9cSBen Hutchings  * success, or -%ENOMEM if failed to alloc header.
941874aeea5SJeff Kirsher  */
942874aeea5SJeff Kirsher static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
943874aeea5SJeff Kirsher 				const struct sk_buff *skb,
944874aeea5SJeff Kirsher 				struct tso_state *st)
945874aeea5SJeff Kirsher {
946f7251a9cSBen Hutchings 	struct efx_tx_buffer *buffer =
947f7251a9cSBen Hutchings 		&tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
948874aeea5SJeff Kirsher 	struct tcphdr *tsoh_th;
949874aeea5SJeff Kirsher 	unsigned ip_length;
950874aeea5SJeff Kirsher 	u8 *header;
951f7251a9cSBen Hutchings 	int rc;
952874aeea5SJeff Kirsher 
953f7251a9cSBen Hutchings 	/* Allocate and insert a DMA-mapped header buffer. */
954f7251a9cSBen Hutchings 	header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
955f7251a9cSBen Hutchings 	if (!header)
956f7251a9cSBen Hutchings 		return -ENOMEM;
957874aeea5SJeff Kirsher 
9589714284fSBen Hutchings 	tsoh_th = (struct tcphdr *)(header + st->tcp_off);
959874aeea5SJeff Kirsher 
960874aeea5SJeff Kirsher 	/* Copy and update the headers. */
961874aeea5SJeff Kirsher 	memcpy(header, skb->data, st->header_len);
962874aeea5SJeff Kirsher 
963874aeea5SJeff Kirsher 	tsoh_th->seq = htonl(st->seqnum);
964874aeea5SJeff Kirsher 	st->seqnum += skb_shinfo(skb)->gso_size;
965874aeea5SJeff Kirsher 	if (st->out_len > skb_shinfo(skb)->gso_size) {
966874aeea5SJeff Kirsher 		/* This packet will not finish the TSO burst. */
96753cb13c6SBen Hutchings 		st->packet_space = skb_shinfo(skb)->gso_size;
968874aeea5SJeff Kirsher 		tsoh_th->fin = 0;
969874aeea5SJeff Kirsher 		tsoh_th->psh = 0;
970874aeea5SJeff Kirsher 	} else {
971874aeea5SJeff Kirsher 		/* This packet will be the last in the TSO burst. */
97253cb13c6SBen Hutchings 		st->packet_space = st->out_len;
973874aeea5SJeff Kirsher 		tsoh_th->fin = tcp_hdr(skb)->fin;
974874aeea5SJeff Kirsher 		tsoh_th->psh = tcp_hdr(skb)->psh;
975874aeea5SJeff Kirsher 	}
97653cb13c6SBen Hutchings 	ip_length = st->ip_base_len + st->packet_space;
977874aeea5SJeff Kirsher 
978874aeea5SJeff Kirsher 	if (st->protocol == htons(ETH_P_IP)) {
9799714284fSBen Hutchings 		struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
980874aeea5SJeff Kirsher 
981874aeea5SJeff Kirsher 		tsoh_iph->tot_len = htons(ip_length);
982874aeea5SJeff Kirsher 
983874aeea5SJeff Kirsher 		/* Linux leaves suitable gaps in the IP ID space for us to fill. */
984874aeea5SJeff Kirsher 		tsoh_iph->id = htons(st->ipv4_id);
985874aeea5SJeff Kirsher 		st->ipv4_id++;
986874aeea5SJeff Kirsher 	} else {
987874aeea5SJeff Kirsher 		struct ipv6hdr *tsoh_iph =
9889714284fSBen Hutchings 			(struct ipv6hdr *)(header + st->ip_off);
989874aeea5SJeff Kirsher 
99053cb13c6SBen Hutchings 		tsoh_iph->payload_len = htons(ip_length);
991874aeea5SJeff Kirsher 	}
992874aeea5SJeff Kirsher 
993f7251a9cSBen Hutchings 	rc = efx_tso_put_header(tx_queue, buffer, header);
994f7251a9cSBen Hutchings 	if (unlikely(rc))
995f7251a9cSBen Hutchings 		return rc;
996f7251a9cSBen Hutchings 
997874aeea5SJeff Kirsher 	++tx_queue->tso_packets;
998874aeea5SJeff Kirsher 
999874aeea5SJeff Kirsher 	return 0;
1000874aeea5SJeff Kirsher }
1001874aeea5SJeff Kirsher 
1002874aeea5SJeff Kirsher 
1003874aeea5SJeff Kirsher /**
1004874aeea5SJeff Kirsher  * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1005874aeea5SJeff Kirsher  * @tx_queue:		Efx TX queue
1006874aeea5SJeff Kirsher  * @skb:		Socket buffer
1007874aeea5SJeff Kirsher  *
1008874aeea5SJeff Kirsher  * Context: You must hold netif_tx_lock() to call this function.
1009874aeea5SJeff Kirsher  *
1010874aeea5SJeff Kirsher  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1011874aeea5SJeff Kirsher  * @skb was not enqueued.  In all cases @skb is consumed.  Return
101214bf718fSBen Hutchings  * %NETDEV_TX_OK.
1013874aeea5SJeff Kirsher  */
1014874aeea5SJeff Kirsher static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1015874aeea5SJeff Kirsher 			       struct sk_buff *skb)
1016874aeea5SJeff Kirsher {
1017874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
101814bf718fSBen Hutchings 	int frag_i, rc;
1019874aeea5SJeff Kirsher 	struct tso_state state;
1020874aeea5SJeff Kirsher 
1021874aeea5SJeff Kirsher 	/* Find the packet protocol and sanity-check it */
1022874aeea5SJeff Kirsher 	state.protocol = efx_tso_check_protocol(skb);
1023874aeea5SJeff Kirsher 
1024874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1025874aeea5SJeff Kirsher 
1026874aeea5SJeff Kirsher 	tso_start(&state, skb);
1027874aeea5SJeff Kirsher 
1028874aeea5SJeff Kirsher 	/* Assume that skb header area contains exactly the headers, and
1029874aeea5SJeff Kirsher 	 * all payload is in the frag list.
1030874aeea5SJeff Kirsher 	 */
1031874aeea5SJeff Kirsher 	if (skb_headlen(skb) == state.header_len) {
1032874aeea5SJeff Kirsher 		/* Grab the first payload fragment. */
1033874aeea5SJeff Kirsher 		EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1034874aeea5SJeff Kirsher 		frag_i = 0;
1035874aeea5SJeff Kirsher 		rc = tso_get_fragment(&state, efx,
1036874aeea5SJeff Kirsher 				      skb_shinfo(skb)->frags + frag_i);
1037874aeea5SJeff Kirsher 		if (rc)
1038874aeea5SJeff Kirsher 			goto mem_err;
1039874aeea5SJeff Kirsher 	} else {
1040874aeea5SJeff Kirsher 		rc = tso_get_head_fragment(&state, efx, skb);
1041874aeea5SJeff Kirsher 		if (rc)
1042874aeea5SJeff Kirsher 			goto mem_err;
1043874aeea5SJeff Kirsher 		frag_i = -1;
1044874aeea5SJeff Kirsher 	}
1045874aeea5SJeff Kirsher 
1046874aeea5SJeff Kirsher 	if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1047874aeea5SJeff Kirsher 		goto mem_err;
1048874aeea5SJeff Kirsher 
1049874aeea5SJeff Kirsher 	while (1) {
105014bf718fSBen Hutchings 		tso_fill_packet_with_fragment(tx_queue, skb, &state);
1051874aeea5SJeff Kirsher 
1052874aeea5SJeff Kirsher 		/* Move onto the next fragment? */
1053874aeea5SJeff Kirsher 		if (state.in_len == 0) {
1054874aeea5SJeff Kirsher 			if (++frag_i >= skb_shinfo(skb)->nr_frags)
1055874aeea5SJeff Kirsher 				/* End of payload reached. */
1056874aeea5SJeff Kirsher 				break;
1057874aeea5SJeff Kirsher 			rc = tso_get_fragment(&state, efx,
1058874aeea5SJeff Kirsher 					      skb_shinfo(skb)->frags + frag_i);
1059874aeea5SJeff Kirsher 			if (rc)
1060874aeea5SJeff Kirsher 				goto mem_err;
1061874aeea5SJeff Kirsher 		}
1062874aeea5SJeff Kirsher 
1063874aeea5SJeff Kirsher 		/* Start at new packet? */
1064874aeea5SJeff Kirsher 		if (state.packet_space == 0 &&
1065874aeea5SJeff Kirsher 		    tso_start_new_packet(tx_queue, skb, &state) < 0)
1066874aeea5SJeff Kirsher 			goto mem_err;
1067874aeea5SJeff Kirsher 	}
1068874aeea5SJeff Kirsher 
1069449fa023SEric Dumazet 	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1070449fa023SEric Dumazet 
1071874aeea5SJeff Kirsher 	/* Pass off to hardware */
1072874aeea5SJeff Kirsher 	efx_nic_push_buffers(tx_queue);
1073874aeea5SJeff Kirsher 
107414bf718fSBen Hutchings 	efx_tx_maybe_stop_queue(tx_queue);
107514bf718fSBen Hutchings 
1076874aeea5SJeff Kirsher 	tx_queue->tso_bursts++;
1077874aeea5SJeff Kirsher 	return NETDEV_TX_OK;
1078874aeea5SJeff Kirsher 
1079874aeea5SJeff Kirsher  mem_err:
1080874aeea5SJeff Kirsher 	netif_err(efx, tx_err, efx->net_dev,
10810e33d870SBen Hutchings 		  "Out of memory for TSO headers, or DMA mapping error\n");
1082874aeea5SJeff Kirsher 	dev_kfree_skb_any(skb);
1083874aeea5SJeff Kirsher 
1084874aeea5SJeff Kirsher 	/* Free the DMA mapping we were in the process of writing out */
1085874aeea5SJeff Kirsher 	if (state.unmap_len) {
10867668ff9cSBen Hutchings 		if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
10870e33d870SBen Hutchings 			dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
10880e33d870SBen Hutchings 					 state.unmap_len, DMA_TO_DEVICE);
1089874aeea5SJeff Kirsher 		else
10900e33d870SBen Hutchings 			dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
10910e33d870SBen Hutchings 				       state.unmap_len, DMA_TO_DEVICE);
1092874aeea5SJeff Kirsher 	}
1093874aeea5SJeff Kirsher 
1094874aeea5SJeff Kirsher 	efx_enqueue_unwind(tx_queue);
109514bf718fSBen Hutchings 	return NETDEV_TX_OK;
1096874aeea5SJeff Kirsher }
1097