xref: /openbmc/linux/drivers/net/ethernet/sfc/tx_common.c (revision 740acc15c8a52c959111a9fbad974e9b0e5b4eb7)
117d3b21cSAlex Maftei (amaftei) // SPDX-License-Identifier: GPL-2.0-only
217d3b21cSAlex Maftei (amaftei) /****************************************************************************
317d3b21cSAlex Maftei (amaftei)  * Driver for Solarflare network controllers and boards
417d3b21cSAlex Maftei (amaftei)  * Copyright 2018 Solarflare Communications Inc.
517d3b21cSAlex Maftei (amaftei)  *
617d3b21cSAlex Maftei (amaftei)  * This program is free software; you can redistribute it and/or modify it
717d3b21cSAlex Maftei (amaftei)  * under the terms of the GNU General Public License version 2 as published
817d3b21cSAlex Maftei (amaftei)  * by the Free Software Foundation, incorporated herein by reference.
917d3b21cSAlex Maftei (amaftei)  */
1017d3b21cSAlex Maftei (amaftei) 
1117d3b21cSAlex Maftei (amaftei) #include "net_driver.h"
1217d3b21cSAlex Maftei (amaftei) #include "efx.h"
1317d3b21cSAlex Maftei (amaftei) #include "nic.h"
1417d3b21cSAlex Maftei (amaftei) #include "tx_common.h"
1517d3b21cSAlex Maftei (amaftei) 
1617d3b21cSAlex Maftei (amaftei) static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
1717d3b21cSAlex Maftei (amaftei) {
1817d3b21cSAlex Maftei (amaftei) 	return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
1917d3b21cSAlex Maftei (amaftei) 			    PAGE_SIZE >> EFX_TX_CB_ORDER);
2017d3b21cSAlex Maftei (amaftei) }
2117d3b21cSAlex Maftei (amaftei) 
2217d3b21cSAlex Maftei (amaftei) int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
2317d3b21cSAlex Maftei (amaftei) {
2417d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
2517d3b21cSAlex Maftei (amaftei) 	unsigned int entries;
2617d3b21cSAlex Maftei (amaftei) 	int rc;
2717d3b21cSAlex Maftei (amaftei) 
2817d3b21cSAlex Maftei (amaftei) 	/* Create the smallest power-of-two aligned ring */
2917d3b21cSAlex Maftei (amaftei) 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
3017d3b21cSAlex Maftei (amaftei) 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
3117d3b21cSAlex Maftei (amaftei) 	tx_queue->ptr_mask = entries - 1;
3217d3b21cSAlex Maftei (amaftei) 
3317d3b21cSAlex Maftei (amaftei) 	netif_dbg(efx, probe, efx->net_dev,
3417d3b21cSAlex Maftei (amaftei) 		  "creating TX queue %d size %#x mask %#x\n",
3517d3b21cSAlex Maftei (amaftei) 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
3617d3b21cSAlex Maftei (amaftei) 
3717d3b21cSAlex Maftei (amaftei) 	/* Allocate software ring */
3817d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
3917d3b21cSAlex Maftei (amaftei) 				   GFP_KERNEL);
4017d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
4117d3b21cSAlex Maftei (amaftei) 		return -ENOMEM;
4217d3b21cSAlex Maftei (amaftei) 
4317d3b21cSAlex Maftei (amaftei) 	tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
4417d3b21cSAlex Maftei (amaftei) 				    sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
4517d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->cb_page) {
4617d3b21cSAlex Maftei (amaftei) 		rc = -ENOMEM;
4717d3b21cSAlex Maftei (amaftei) 		goto fail1;
4817d3b21cSAlex Maftei (amaftei) 	}
4917d3b21cSAlex Maftei (amaftei) 
5017d3b21cSAlex Maftei (amaftei) 	/* Allocate hardware ring */
5117d3b21cSAlex Maftei (amaftei) 	rc = efx_nic_probe_tx(tx_queue);
5217d3b21cSAlex Maftei (amaftei) 	if (rc)
5317d3b21cSAlex Maftei (amaftei) 		goto fail2;
5417d3b21cSAlex Maftei (amaftei) 
5517d3b21cSAlex Maftei (amaftei) 	return 0;
5617d3b21cSAlex Maftei (amaftei) 
5717d3b21cSAlex Maftei (amaftei) fail2:
5817d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->cb_page);
5917d3b21cSAlex Maftei (amaftei) 	tx_queue->cb_page = NULL;
6017d3b21cSAlex Maftei (amaftei) fail1:
6117d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->buffer);
6217d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = NULL;
6317d3b21cSAlex Maftei (amaftei) 	return rc;
6417d3b21cSAlex Maftei (amaftei) }
6517d3b21cSAlex Maftei (amaftei) 
6617d3b21cSAlex Maftei (amaftei) void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
6717d3b21cSAlex Maftei (amaftei) {
6817d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
6917d3b21cSAlex Maftei (amaftei) 
7017d3b21cSAlex Maftei (amaftei) 	netif_dbg(efx, drv, efx->net_dev,
7117d3b21cSAlex Maftei (amaftei) 		  "initialising TX queue %d\n", tx_queue->queue);
7217d3b21cSAlex Maftei (amaftei) 
7317d3b21cSAlex Maftei (amaftei) 	tx_queue->insert_count = 0;
7417d3b21cSAlex Maftei (amaftei) 	tx_queue->write_count = 0;
7517d3b21cSAlex Maftei (amaftei) 	tx_queue->packet_write_count = 0;
7617d3b21cSAlex Maftei (amaftei) 	tx_queue->old_write_count = 0;
7717d3b21cSAlex Maftei (amaftei) 	tx_queue->read_count = 0;
7817d3b21cSAlex Maftei (amaftei) 	tx_queue->old_read_count = 0;
7917d3b21cSAlex Maftei (amaftei) 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
8017d3b21cSAlex Maftei (amaftei) 	tx_queue->xmit_more_available = false;
8117d3b21cSAlex Maftei (amaftei) 	tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
8217d3b21cSAlex Maftei (amaftei) 				  tx_queue->channel == efx_ptp_channel(efx));
8317d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_timestamp_major = 0;
8417d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_timestamp_minor = 0;
8517d3b21cSAlex Maftei (amaftei) 
8617d3b21cSAlex Maftei (amaftei) 	tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
8717d3b21cSAlex Maftei (amaftei) 
8817d3b21cSAlex Maftei (amaftei) 	/* Set up default function pointers. These may get replaced by
8917d3b21cSAlex Maftei (amaftei) 	 * efx_nic_init_tx() based off NIC/queue capabilities.
9017d3b21cSAlex Maftei (amaftei) 	 */
9117d3b21cSAlex Maftei (amaftei) 	tx_queue->handle_tso = efx_enqueue_skb_tso;
9217d3b21cSAlex Maftei (amaftei) 
9317d3b21cSAlex Maftei (amaftei) 	/* Set up TX descriptor ring */
9417d3b21cSAlex Maftei (amaftei) 	efx_nic_init_tx(tx_queue);
9517d3b21cSAlex Maftei (amaftei) 
9617d3b21cSAlex Maftei (amaftei) 	tx_queue->initialised = true;
9717d3b21cSAlex Maftei (amaftei) }
9817d3b21cSAlex Maftei (amaftei) 
9917d3b21cSAlex Maftei (amaftei) void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
10017d3b21cSAlex Maftei (amaftei) {
10117d3b21cSAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
10217d3b21cSAlex Maftei (amaftei) 
10317d3b21cSAlex Maftei (amaftei) 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
10417d3b21cSAlex Maftei (amaftei) 		  "shutting down TX queue %d\n", tx_queue->queue);
10517d3b21cSAlex Maftei (amaftei) 
10617d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
10717d3b21cSAlex Maftei (amaftei) 		return;
10817d3b21cSAlex Maftei (amaftei) 
10917d3b21cSAlex Maftei (amaftei) 	/* Free any buffers left in the ring */
11017d3b21cSAlex Maftei (amaftei) 	while (tx_queue->read_count != tx_queue->write_count) {
11117d3b21cSAlex Maftei (amaftei) 		unsigned int pkts_compl = 0, bytes_compl = 0;
11217d3b21cSAlex Maftei (amaftei) 
11317d3b21cSAlex Maftei (amaftei) 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
11417d3b21cSAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
11517d3b21cSAlex Maftei (amaftei) 
11617d3b21cSAlex Maftei (amaftei) 		++tx_queue->read_count;
11717d3b21cSAlex Maftei (amaftei) 	}
11817d3b21cSAlex Maftei (amaftei) 	tx_queue->xmit_more_available = false;
11917d3b21cSAlex Maftei (amaftei) 	netdev_tx_reset_queue(tx_queue->core_txq);
12017d3b21cSAlex Maftei (amaftei) }
12117d3b21cSAlex Maftei (amaftei) 
12217d3b21cSAlex Maftei (amaftei) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
12317d3b21cSAlex Maftei (amaftei) {
12417d3b21cSAlex Maftei (amaftei) 	int i;
12517d3b21cSAlex Maftei (amaftei) 
12617d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
12717d3b21cSAlex Maftei (amaftei) 		return;
12817d3b21cSAlex Maftei (amaftei) 
12917d3b21cSAlex Maftei (amaftei) 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
13017d3b21cSAlex Maftei (amaftei) 		  "destroying TX queue %d\n", tx_queue->queue);
13117d3b21cSAlex Maftei (amaftei) 	efx_nic_remove_tx(tx_queue);
13217d3b21cSAlex Maftei (amaftei) 
13317d3b21cSAlex Maftei (amaftei) 	if (tx_queue->cb_page) {
13417d3b21cSAlex Maftei (amaftei) 		for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
13517d3b21cSAlex Maftei (amaftei) 			efx_nic_free_buffer(tx_queue->efx,
13617d3b21cSAlex Maftei (amaftei) 					    &tx_queue->cb_page[i]);
13717d3b21cSAlex Maftei (amaftei) 		kfree(tx_queue->cb_page);
13817d3b21cSAlex Maftei (amaftei) 		tx_queue->cb_page = NULL;
13917d3b21cSAlex Maftei (amaftei) 	}
14017d3b21cSAlex Maftei (amaftei) 
14117d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->buffer);
14217d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = NULL;
14317d3b21cSAlex Maftei (amaftei) }
14417d3b21cSAlex Maftei (amaftei) 
14517d3b21cSAlex Maftei (amaftei) void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
14617d3b21cSAlex Maftei (amaftei) 			struct efx_tx_buffer *buffer,
14717d3b21cSAlex Maftei (amaftei) 			unsigned int *pkts_compl,
14817d3b21cSAlex Maftei (amaftei) 			unsigned int *bytes_compl)
14917d3b21cSAlex Maftei (amaftei) {
15017d3b21cSAlex Maftei (amaftei) 	if (buffer->unmap_len) {
15117d3b21cSAlex Maftei (amaftei) 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
15217d3b21cSAlex Maftei (amaftei) 		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
15317d3b21cSAlex Maftei (amaftei) 
15417d3b21cSAlex Maftei (amaftei) 		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
15517d3b21cSAlex Maftei (amaftei) 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
15617d3b21cSAlex Maftei (amaftei) 					 DMA_TO_DEVICE);
15717d3b21cSAlex Maftei (amaftei) 		else
15817d3b21cSAlex Maftei (amaftei) 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
15917d3b21cSAlex Maftei (amaftei) 				       DMA_TO_DEVICE);
16017d3b21cSAlex Maftei (amaftei) 		buffer->unmap_len = 0;
16117d3b21cSAlex Maftei (amaftei) 	}
16217d3b21cSAlex Maftei (amaftei) 
16317d3b21cSAlex Maftei (amaftei) 	if (buffer->flags & EFX_TX_BUF_SKB) {
16417d3b21cSAlex Maftei (amaftei) 		struct sk_buff *skb = (struct sk_buff *)buffer->skb;
16517d3b21cSAlex Maftei (amaftei) 
16617d3b21cSAlex Maftei (amaftei) 		EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
16717d3b21cSAlex Maftei (amaftei) 		(*pkts_compl)++;
16817d3b21cSAlex Maftei (amaftei) 		(*bytes_compl) += skb->len;
16917d3b21cSAlex Maftei (amaftei) 		if (tx_queue->timestamping &&
17017d3b21cSAlex Maftei (amaftei) 		    (tx_queue->completed_timestamp_major ||
17117d3b21cSAlex Maftei (amaftei) 		     tx_queue->completed_timestamp_minor)) {
17217d3b21cSAlex Maftei (amaftei) 			struct skb_shared_hwtstamps hwtstamp;
17317d3b21cSAlex Maftei (amaftei) 
17417d3b21cSAlex Maftei (amaftei) 			hwtstamp.hwtstamp =
17517d3b21cSAlex Maftei (amaftei) 				efx_ptp_nic_to_kernel_time(tx_queue);
17617d3b21cSAlex Maftei (amaftei) 			skb_tstamp_tx(skb, &hwtstamp);
17717d3b21cSAlex Maftei (amaftei) 
17817d3b21cSAlex Maftei (amaftei) 			tx_queue->completed_timestamp_major = 0;
17917d3b21cSAlex Maftei (amaftei) 			tx_queue->completed_timestamp_minor = 0;
18017d3b21cSAlex Maftei (amaftei) 		}
18117d3b21cSAlex Maftei (amaftei) 		dev_consume_skb_any((struct sk_buff *)buffer->skb);
18217d3b21cSAlex Maftei (amaftei) 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
18317d3b21cSAlex Maftei (amaftei) 			   "TX queue %d transmission id %x complete\n",
18417d3b21cSAlex Maftei (amaftei) 			   tx_queue->queue, tx_queue->read_count);
18517d3b21cSAlex Maftei (amaftei) 	} else if (buffer->flags & EFX_TX_BUF_XDP) {
18617d3b21cSAlex Maftei (amaftei) 		xdp_return_frame_rx_napi(buffer->xdpf);
18717d3b21cSAlex Maftei (amaftei) 	}
18817d3b21cSAlex Maftei (amaftei) 
18917d3b21cSAlex Maftei (amaftei) 	buffer->len = 0;
19017d3b21cSAlex Maftei (amaftei) 	buffer->flags = 0;
19117d3b21cSAlex Maftei (amaftei) }
19217d3b21cSAlex Maftei (amaftei) 
193b8cd9499SAlex Maftei (amaftei) /* Remove packets from the TX queue
194b8cd9499SAlex Maftei (amaftei)  *
195b8cd9499SAlex Maftei (amaftei)  * This removes packets from the TX queue, up to and including the
196b8cd9499SAlex Maftei (amaftei)  * specified index.
197b8cd9499SAlex Maftei (amaftei)  */
198b8cd9499SAlex Maftei (amaftei) static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
199b8cd9499SAlex Maftei (amaftei) 				unsigned int index,
200b8cd9499SAlex Maftei (amaftei) 				unsigned int *pkts_compl,
201b8cd9499SAlex Maftei (amaftei) 				unsigned int *bytes_compl)
202b8cd9499SAlex Maftei (amaftei) {
203b8cd9499SAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
204b8cd9499SAlex Maftei (amaftei) 	unsigned int stop_index, read_ptr;
205b8cd9499SAlex Maftei (amaftei) 
206b8cd9499SAlex Maftei (amaftei) 	stop_index = (index + 1) & tx_queue->ptr_mask;
207b8cd9499SAlex Maftei (amaftei) 	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
208b8cd9499SAlex Maftei (amaftei) 
209b8cd9499SAlex Maftei (amaftei) 	while (read_ptr != stop_index) {
210b8cd9499SAlex Maftei (amaftei) 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
211b8cd9499SAlex Maftei (amaftei) 
2123b4f06c7STom Zhao 		if (!efx_tx_buffer_in_use(buffer)) {
213b8cd9499SAlex Maftei (amaftei) 			netif_err(efx, tx_err, efx->net_dev,
2143b4f06c7STom Zhao 				  "TX queue %d spurious TX completion id %d\n",
215b8cd9499SAlex Maftei (amaftei) 				  tx_queue->queue, read_ptr);
216b8cd9499SAlex Maftei (amaftei) 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
217b8cd9499SAlex Maftei (amaftei) 			return;
218b8cd9499SAlex Maftei (amaftei) 		}
219b8cd9499SAlex Maftei (amaftei) 
220b8cd9499SAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
221b8cd9499SAlex Maftei (amaftei) 
222b8cd9499SAlex Maftei (amaftei) 		++tx_queue->read_count;
223b8cd9499SAlex Maftei (amaftei) 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
224b8cd9499SAlex Maftei (amaftei) 	}
225b8cd9499SAlex Maftei (amaftei) }
226b8cd9499SAlex Maftei (amaftei) 
2273b4f06c7STom Zhao void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
2283b4f06c7STom Zhao {
2293b4f06c7STom Zhao 	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
2303b4f06c7STom Zhao 		tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
2313b4f06c7STom Zhao 		if (tx_queue->read_count == tx_queue->old_write_count) {
2323b4f06c7STom Zhao 			/* Ensure that read_count is flushed. */
2333b4f06c7STom Zhao 			smp_mb();
2343b4f06c7STom Zhao 			tx_queue->empty_read_count =
2353b4f06c7STom Zhao 				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
2363b4f06c7STom Zhao 		}
2373b4f06c7STom Zhao 	}
2383b4f06c7STom Zhao }
2393b4f06c7STom Zhao 
240b8cd9499SAlex Maftei (amaftei) void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
241b8cd9499SAlex Maftei (amaftei) {
242b8cd9499SAlex Maftei (amaftei) 	unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
243b8cd9499SAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
244b8cd9499SAlex Maftei (amaftei) 	struct efx_tx_queue *txq2;
245b8cd9499SAlex Maftei (amaftei) 
246b8cd9499SAlex Maftei (amaftei) 	EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
247b8cd9499SAlex Maftei (amaftei) 
248b8cd9499SAlex Maftei (amaftei) 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
249b8cd9499SAlex Maftei (amaftei) 	tx_queue->pkts_compl += pkts_compl;
250b8cd9499SAlex Maftei (amaftei) 	tx_queue->bytes_compl += bytes_compl;
251b8cd9499SAlex Maftei (amaftei) 
252b8cd9499SAlex Maftei (amaftei) 	if (pkts_compl > 1)
253b8cd9499SAlex Maftei (amaftei) 		++tx_queue->merge_events;
254b8cd9499SAlex Maftei (amaftei) 
255b8cd9499SAlex Maftei (amaftei) 	/* See if we need to restart the netif queue.  This memory
256b8cd9499SAlex Maftei (amaftei) 	 * barrier ensures that we write read_count (inside
257b8cd9499SAlex Maftei (amaftei) 	 * efx_dequeue_buffers()) before reading the queue status.
258b8cd9499SAlex Maftei (amaftei) 	 */
259b8cd9499SAlex Maftei (amaftei) 	smp_mb();
260b8cd9499SAlex Maftei (amaftei) 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
261b8cd9499SAlex Maftei (amaftei) 	    likely(efx->port_enabled) &&
262b8cd9499SAlex Maftei (amaftei) 	    likely(netif_device_present(efx->net_dev))) {
263b8cd9499SAlex Maftei (amaftei) 		txq2 = efx_tx_queue_partner(tx_queue);
264b8cd9499SAlex Maftei (amaftei) 		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
265b8cd9499SAlex Maftei (amaftei) 				 txq2->insert_count - txq2->read_count);
266b8cd9499SAlex Maftei (amaftei) 		if (fill_level <= efx->txq_wake_thresh)
267b8cd9499SAlex Maftei (amaftei) 			netif_tx_wake_queue(tx_queue->core_txq);
268b8cd9499SAlex Maftei (amaftei) 	}
269b8cd9499SAlex Maftei (amaftei) 
2703b4f06c7STom Zhao 	efx_xmit_done_check_empty(tx_queue);
271b8cd9499SAlex Maftei (amaftei) }
272b8cd9499SAlex Maftei (amaftei) 
27388f7df35SAlex Maftei (amaftei) /* Remove buffers put into a tx_queue for the current packet.
27488f7df35SAlex Maftei (amaftei)  * None of the buffers must have an skb attached.
27588f7df35SAlex Maftei (amaftei)  */
27688f7df35SAlex Maftei (amaftei) void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
27788f7df35SAlex Maftei (amaftei) 			unsigned int insert_count)
27888f7df35SAlex Maftei (amaftei) {
27988f7df35SAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
28088f7df35SAlex Maftei (amaftei) 	unsigned int bytes_compl = 0;
28188f7df35SAlex Maftei (amaftei) 	unsigned int pkts_compl = 0;
28288f7df35SAlex Maftei (amaftei) 
28388f7df35SAlex Maftei (amaftei) 	/* Work backwards until we hit the original insert pointer value */
28488f7df35SAlex Maftei (amaftei) 	while (tx_queue->insert_count != insert_count) {
28588f7df35SAlex Maftei (amaftei) 		--tx_queue->insert_count;
28688f7df35SAlex Maftei (amaftei) 		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
28788f7df35SAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
28888f7df35SAlex Maftei (amaftei) 	}
28988f7df35SAlex Maftei (amaftei) }
29088f7df35SAlex Maftei (amaftei) 
29117d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
29217d3b21cSAlex Maftei (amaftei) 				       dma_addr_t dma_addr, size_t len)
29317d3b21cSAlex Maftei (amaftei) {
29417d3b21cSAlex Maftei (amaftei) 	const struct efx_nic_type *nic_type = tx_queue->efx->type;
29517d3b21cSAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
29617d3b21cSAlex Maftei (amaftei) 	unsigned int dma_len;
29717d3b21cSAlex Maftei (amaftei) 
29817d3b21cSAlex Maftei (amaftei) 	/* Map the fragment taking account of NIC-dependent DMA limits. */
29917d3b21cSAlex Maftei (amaftei) 	do {
30017d3b21cSAlex Maftei (amaftei) 		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
30117d3b21cSAlex Maftei (amaftei) 		dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
30217d3b21cSAlex Maftei (amaftei) 
30317d3b21cSAlex Maftei (amaftei) 		buffer->len = dma_len;
30417d3b21cSAlex Maftei (amaftei) 		buffer->dma_addr = dma_addr;
30517d3b21cSAlex Maftei (amaftei) 		buffer->flags = EFX_TX_BUF_CONT;
30617d3b21cSAlex Maftei (amaftei) 		len -= dma_len;
30717d3b21cSAlex Maftei (amaftei) 		dma_addr += dma_len;
30817d3b21cSAlex Maftei (amaftei) 		++tx_queue->insert_count;
30917d3b21cSAlex Maftei (amaftei) 	} while (len);
31017d3b21cSAlex Maftei (amaftei) 
31117d3b21cSAlex Maftei (amaftei) 	return buffer;
31217d3b21cSAlex Maftei (amaftei) }
31317d3b21cSAlex Maftei (amaftei) 
31417d3b21cSAlex Maftei (amaftei) /* Map all data from an SKB for DMA and create descriptors on the queue. */
31517d3b21cSAlex Maftei (amaftei) int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
31617d3b21cSAlex Maftei (amaftei) 		    unsigned int segment_count)
31717d3b21cSAlex Maftei (amaftei) {
31817d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
31917d3b21cSAlex Maftei (amaftei) 	struct device *dma_dev = &efx->pci_dev->dev;
32017d3b21cSAlex Maftei (amaftei) 	unsigned int frag_index, nr_frags;
32117d3b21cSAlex Maftei (amaftei) 	dma_addr_t dma_addr, unmap_addr;
32217d3b21cSAlex Maftei (amaftei) 	unsigned short dma_flags;
32317d3b21cSAlex Maftei (amaftei) 	size_t len, unmap_len;
32417d3b21cSAlex Maftei (amaftei) 
32517d3b21cSAlex Maftei (amaftei) 	nr_frags = skb_shinfo(skb)->nr_frags;
32617d3b21cSAlex Maftei (amaftei) 	frag_index = 0;
32717d3b21cSAlex Maftei (amaftei) 
32817d3b21cSAlex Maftei (amaftei) 	/* Map header data. */
32917d3b21cSAlex Maftei (amaftei) 	len = skb_headlen(skb);
33017d3b21cSAlex Maftei (amaftei) 	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
33117d3b21cSAlex Maftei (amaftei) 	dma_flags = EFX_TX_BUF_MAP_SINGLE;
33217d3b21cSAlex Maftei (amaftei) 	unmap_len = len;
33317d3b21cSAlex Maftei (amaftei) 	unmap_addr = dma_addr;
33417d3b21cSAlex Maftei (amaftei) 
33517d3b21cSAlex Maftei (amaftei) 	if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
33617d3b21cSAlex Maftei (amaftei) 		return -EIO;
33717d3b21cSAlex Maftei (amaftei) 
33817d3b21cSAlex Maftei (amaftei) 	if (segment_count) {
33917d3b21cSAlex Maftei (amaftei) 		/* For TSO we need to put the header in to a separate
34017d3b21cSAlex Maftei (amaftei) 		 * descriptor. Map this separately if necessary.
34117d3b21cSAlex Maftei (amaftei) 		 */
34217d3b21cSAlex Maftei (amaftei) 		size_t header_len = skb_transport_header(skb) - skb->data +
34317d3b21cSAlex Maftei (amaftei) 				(tcp_hdr(skb)->doff << 2u);
34417d3b21cSAlex Maftei (amaftei) 
34517d3b21cSAlex Maftei (amaftei) 		if (header_len != len) {
34617d3b21cSAlex Maftei (amaftei) 			tx_queue->tso_long_headers++;
34717d3b21cSAlex Maftei (amaftei) 			efx_tx_map_chunk(tx_queue, dma_addr, header_len);
34817d3b21cSAlex Maftei (amaftei) 			len -= header_len;
34917d3b21cSAlex Maftei (amaftei) 			dma_addr += header_len;
35017d3b21cSAlex Maftei (amaftei) 		}
35117d3b21cSAlex Maftei (amaftei) 	}
35217d3b21cSAlex Maftei (amaftei) 
35317d3b21cSAlex Maftei (amaftei) 	/* Add descriptors for each fragment. */
35417d3b21cSAlex Maftei (amaftei) 	do {
35517d3b21cSAlex Maftei (amaftei) 		struct efx_tx_buffer *buffer;
35617d3b21cSAlex Maftei (amaftei) 		skb_frag_t *fragment;
35717d3b21cSAlex Maftei (amaftei) 
35817d3b21cSAlex Maftei (amaftei) 		buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
35917d3b21cSAlex Maftei (amaftei) 
36017d3b21cSAlex Maftei (amaftei) 		/* The final descriptor for a fragment is responsible for
36117d3b21cSAlex Maftei (amaftei) 		 * unmapping the whole fragment.
36217d3b21cSAlex Maftei (amaftei) 		 */
36317d3b21cSAlex Maftei (amaftei) 		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
36417d3b21cSAlex Maftei (amaftei) 		buffer->unmap_len = unmap_len;
36517d3b21cSAlex Maftei (amaftei) 		buffer->dma_offset = buffer->dma_addr - unmap_addr;
36617d3b21cSAlex Maftei (amaftei) 
36717d3b21cSAlex Maftei (amaftei) 		if (frag_index >= nr_frags) {
36817d3b21cSAlex Maftei (amaftei) 			/* Store SKB details with the final buffer for
36917d3b21cSAlex Maftei (amaftei) 			 * the completion.
37017d3b21cSAlex Maftei (amaftei) 			 */
37117d3b21cSAlex Maftei (amaftei) 			buffer->skb = skb;
37217d3b21cSAlex Maftei (amaftei) 			buffer->flags = EFX_TX_BUF_SKB | dma_flags;
37317d3b21cSAlex Maftei (amaftei) 			return 0;
37417d3b21cSAlex Maftei (amaftei) 		}
37517d3b21cSAlex Maftei (amaftei) 
37617d3b21cSAlex Maftei (amaftei) 		/* Move on to the next fragment. */
37717d3b21cSAlex Maftei (amaftei) 		fragment = &skb_shinfo(skb)->frags[frag_index++];
37817d3b21cSAlex Maftei (amaftei) 		len = skb_frag_size(fragment);
37917d3b21cSAlex Maftei (amaftei) 		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
38017d3b21cSAlex Maftei (amaftei) 					    DMA_TO_DEVICE);
38117d3b21cSAlex Maftei (amaftei) 		dma_flags = 0;
38217d3b21cSAlex Maftei (amaftei) 		unmap_len = len;
38317d3b21cSAlex Maftei (amaftei) 		unmap_addr = dma_addr;
38417d3b21cSAlex Maftei (amaftei) 
38517d3b21cSAlex Maftei (amaftei) 		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
38617d3b21cSAlex Maftei (amaftei) 			return -EIO;
38717d3b21cSAlex Maftei (amaftei) 	} while (1);
38817d3b21cSAlex Maftei (amaftei) }
38917d3b21cSAlex Maftei (amaftei) 
39017d3b21cSAlex Maftei (amaftei) unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
39117d3b21cSAlex Maftei (amaftei) {
39217d3b21cSAlex Maftei (amaftei) 	/* Header and payload descriptor for each output segment, plus
39317d3b21cSAlex Maftei (amaftei) 	 * one for every input fragment boundary within a segment
39417d3b21cSAlex Maftei (amaftei) 	 */
39517d3b21cSAlex Maftei (amaftei) 	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
39617d3b21cSAlex Maftei (amaftei) 
39717d3b21cSAlex Maftei (amaftei) 	/* Possibly one more per segment for option descriptors */
39817d3b21cSAlex Maftei (amaftei) 	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
39917d3b21cSAlex Maftei (amaftei) 		max_descs += EFX_TSO_MAX_SEGS;
40017d3b21cSAlex Maftei (amaftei) 
40117d3b21cSAlex Maftei (amaftei) 	/* Possibly more for PCIe page boundaries within input fragments */
40217d3b21cSAlex Maftei (amaftei) 	if (PAGE_SIZE > EFX_PAGE_SIZE)
40317d3b21cSAlex Maftei (amaftei) 		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
40417d3b21cSAlex Maftei (amaftei) 				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
40517d3b21cSAlex Maftei (amaftei) 
40617d3b21cSAlex Maftei (amaftei) 	return max_descs;
40717d3b21cSAlex Maftei (amaftei) }
408*740acc15SEdward Cree 
409*740acc15SEdward Cree /*
410*740acc15SEdward Cree  * Fallback to software TSO.
411*740acc15SEdward Cree  *
412*740acc15SEdward Cree  * This is used if we are unable to send a GSO packet through hardware TSO.
413*740acc15SEdward Cree  * This should only ever happen due to per-queue restrictions - unsupported
414*740acc15SEdward Cree  * packets should first be filtered by the feature flags.
415*740acc15SEdward Cree  *
416*740acc15SEdward Cree  * Returns 0 on success, error code otherwise.
417*740acc15SEdward Cree  */
418*740acc15SEdward Cree int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
419*740acc15SEdward Cree {
420*740acc15SEdward Cree 	struct sk_buff *segments, *next;
421*740acc15SEdward Cree 
422*740acc15SEdward Cree 	segments = skb_gso_segment(skb, 0);
423*740acc15SEdward Cree 	if (IS_ERR(segments))
424*740acc15SEdward Cree 		return PTR_ERR(segments);
425*740acc15SEdward Cree 
426*740acc15SEdward Cree 	dev_consume_skb_any(skb);
427*740acc15SEdward Cree 
428*740acc15SEdward Cree 	skb_list_walk_safe(segments, skb, next) {
429*740acc15SEdward Cree 		skb_mark_not_on_list(skb);
430*740acc15SEdward Cree 		efx_enqueue_skb(tx_queue, skb);
431*740acc15SEdward Cree 	}
432*740acc15SEdward Cree 
433*740acc15SEdward Cree 	return 0;
434*740acc15SEdward Cree }
435