xref: /openbmc/linux/drivers/net/ethernet/sfc/tx_common.c (revision 79de6e7cb8ac9bd0c98be663d414f85ef90c1196)
117d3b21cSAlex Maftei (amaftei) // SPDX-License-Identifier: GPL-2.0-only
217d3b21cSAlex Maftei (amaftei) /****************************************************************************
317d3b21cSAlex Maftei (amaftei)  * Driver for Solarflare network controllers and boards
417d3b21cSAlex Maftei (amaftei)  * Copyright 2018 Solarflare Communications Inc.
517d3b21cSAlex Maftei (amaftei)  *
617d3b21cSAlex Maftei (amaftei)  * This program is free software; you can redistribute it and/or modify it
717d3b21cSAlex Maftei (amaftei)  * under the terms of the GNU General Public License version 2 as published
817d3b21cSAlex Maftei (amaftei)  * by the Free Software Foundation, incorporated herein by reference.
917d3b21cSAlex Maftei (amaftei)  */
1017d3b21cSAlex Maftei (amaftei) 
1117d3b21cSAlex Maftei (amaftei) #include "net_driver.h"
1217d3b21cSAlex Maftei (amaftei) #include "efx.h"
1393841000SEdward Cree #include "nic_common.h"
1417d3b21cSAlex Maftei (amaftei) #include "tx_common.h"
1517d3b21cSAlex Maftei (amaftei) 
1617d3b21cSAlex Maftei (amaftei) static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
1717d3b21cSAlex Maftei (amaftei) {
1817d3b21cSAlex Maftei (amaftei) 	return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
1917d3b21cSAlex Maftei (amaftei) 			    PAGE_SIZE >> EFX_TX_CB_ORDER);
2017d3b21cSAlex Maftei (amaftei) }
2117d3b21cSAlex Maftei (amaftei) 
2217d3b21cSAlex Maftei (amaftei) int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
2317d3b21cSAlex Maftei (amaftei) {
2417d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
2517d3b21cSAlex Maftei (amaftei) 	unsigned int entries;
2617d3b21cSAlex Maftei (amaftei) 	int rc;
2717d3b21cSAlex Maftei (amaftei) 
2817d3b21cSAlex Maftei (amaftei) 	/* Create the smallest power-of-two aligned ring */
2917d3b21cSAlex Maftei (amaftei) 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
3017d3b21cSAlex Maftei (amaftei) 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
3117d3b21cSAlex Maftei (amaftei) 	tx_queue->ptr_mask = entries - 1;
3217d3b21cSAlex Maftei (amaftei) 
3317d3b21cSAlex Maftei (amaftei) 	netif_dbg(efx, probe, efx->net_dev,
3417d3b21cSAlex Maftei (amaftei) 		  "creating TX queue %d size %#x mask %#x\n",
3517d3b21cSAlex Maftei (amaftei) 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
3617d3b21cSAlex Maftei (amaftei) 
3717d3b21cSAlex Maftei (amaftei) 	/* Allocate software ring */
3817d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
3917d3b21cSAlex Maftei (amaftei) 				   GFP_KERNEL);
4017d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
4117d3b21cSAlex Maftei (amaftei) 		return -ENOMEM;
4217d3b21cSAlex Maftei (amaftei) 
4317d3b21cSAlex Maftei (amaftei) 	tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
4417d3b21cSAlex Maftei (amaftei) 				    sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
4517d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->cb_page) {
4617d3b21cSAlex Maftei (amaftei) 		rc = -ENOMEM;
4717d3b21cSAlex Maftei (amaftei) 		goto fail1;
4817d3b21cSAlex Maftei (amaftei) 	}
4917d3b21cSAlex Maftei (amaftei) 
5017d3b21cSAlex Maftei (amaftei) 	/* Allocate hardware ring */
5117d3b21cSAlex Maftei (amaftei) 	rc = efx_nic_probe_tx(tx_queue);
5217d3b21cSAlex Maftei (amaftei) 	if (rc)
5317d3b21cSAlex Maftei (amaftei) 		goto fail2;
5417d3b21cSAlex Maftei (amaftei) 
5517d3b21cSAlex Maftei (amaftei) 	return 0;
5617d3b21cSAlex Maftei (amaftei) 
5717d3b21cSAlex Maftei (amaftei) fail2:
5817d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->cb_page);
5917d3b21cSAlex Maftei (amaftei) 	tx_queue->cb_page = NULL;
6017d3b21cSAlex Maftei (amaftei) fail1:
6117d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->buffer);
6217d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = NULL;
6317d3b21cSAlex Maftei (amaftei) 	return rc;
6417d3b21cSAlex Maftei (amaftei) }
6517d3b21cSAlex Maftei (amaftei) 
6617d3b21cSAlex Maftei (amaftei) void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
6717d3b21cSAlex Maftei (amaftei) {
6817d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
6917d3b21cSAlex Maftei (amaftei) 
7017d3b21cSAlex Maftei (amaftei) 	netif_dbg(efx, drv, efx->net_dev,
7117d3b21cSAlex Maftei (amaftei) 		  "initialising TX queue %d\n", tx_queue->queue);
7217d3b21cSAlex Maftei (amaftei) 
7317d3b21cSAlex Maftei (amaftei) 	tx_queue->insert_count = 0;
7417d3b21cSAlex Maftei (amaftei) 	tx_queue->write_count = 0;
7517d3b21cSAlex Maftei (amaftei) 	tx_queue->packet_write_count = 0;
7617d3b21cSAlex Maftei (amaftei) 	tx_queue->old_write_count = 0;
7717d3b21cSAlex Maftei (amaftei) 	tx_queue->read_count = 0;
7817d3b21cSAlex Maftei (amaftei) 	tx_queue->old_read_count = 0;
7917d3b21cSAlex Maftei (amaftei) 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
8017d3b21cSAlex Maftei (amaftei) 	tx_queue->xmit_more_available = false;
8117d3b21cSAlex Maftei (amaftei) 	tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
8217d3b21cSAlex Maftei (amaftei) 				  tx_queue->channel == efx_ptp_channel(efx));
8317d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_timestamp_major = 0;
8417d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_timestamp_minor = 0;
8517d3b21cSAlex Maftei (amaftei) 
8617d3b21cSAlex Maftei (amaftei) 	tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
8717d3b21cSAlex Maftei (amaftei) 
8817d3b21cSAlex Maftei (amaftei) 	/* Set up default function pointers. These may get replaced by
8917d3b21cSAlex Maftei (amaftei) 	 * efx_nic_init_tx() based off NIC/queue capabilities.
9017d3b21cSAlex Maftei (amaftei) 	 */
9117d3b21cSAlex Maftei (amaftei) 	tx_queue->handle_tso = efx_enqueue_skb_tso;
9217d3b21cSAlex Maftei (amaftei) 
9317d3b21cSAlex Maftei (amaftei) 	/* Set up TX descriptor ring */
9417d3b21cSAlex Maftei (amaftei) 	efx_nic_init_tx(tx_queue);
9517d3b21cSAlex Maftei (amaftei) 
9617d3b21cSAlex Maftei (amaftei) 	tx_queue->initialised = true;
9717d3b21cSAlex Maftei (amaftei) }
9817d3b21cSAlex Maftei (amaftei) 
9917d3b21cSAlex Maftei (amaftei) void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
10017d3b21cSAlex Maftei (amaftei) {
10117d3b21cSAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
10217d3b21cSAlex Maftei (amaftei) 
10317d3b21cSAlex Maftei (amaftei) 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
10417d3b21cSAlex Maftei (amaftei) 		  "shutting down TX queue %d\n", tx_queue->queue);
10517d3b21cSAlex Maftei (amaftei) 
10617d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
10717d3b21cSAlex Maftei (amaftei) 		return;
10817d3b21cSAlex Maftei (amaftei) 
10917d3b21cSAlex Maftei (amaftei) 	/* Free any buffers left in the ring */
11017d3b21cSAlex Maftei (amaftei) 	while (tx_queue->read_count != tx_queue->write_count) {
11117d3b21cSAlex Maftei (amaftei) 		unsigned int pkts_compl = 0, bytes_compl = 0;
11217d3b21cSAlex Maftei (amaftei) 
11317d3b21cSAlex Maftei (amaftei) 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
11417d3b21cSAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
11517d3b21cSAlex Maftei (amaftei) 
11617d3b21cSAlex Maftei (amaftei) 		++tx_queue->read_count;
11717d3b21cSAlex Maftei (amaftei) 	}
11817d3b21cSAlex Maftei (amaftei) 	tx_queue->xmit_more_available = false;
11917d3b21cSAlex Maftei (amaftei) 	netdev_tx_reset_queue(tx_queue->core_txq);
12017d3b21cSAlex Maftei (amaftei) }
12117d3b21cSAlex Maftei (amaftei) 
12217d3b21cSAlex Maftei (amaftei) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
12317d3b21cSAlex Maftei (amaftei) {
12417d3b21cSAlex Maftei (amaftei) 	int i;
12517d3b21cSAlex Maftei (amaftei) 
12617d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
12717d3b21cSAlex Maftei (amaftei) 		return;
12817d3b21cSAlex Maftei (amaftei) 
12917d3b21cSAlex Maftei (amaftei) 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
13017d3b21cSAlex Maftei (amaftei) 		  "destroying TX queue %d\n", tx_queue->queue);
13117d3b21cSAlex Maftei (amaftei) 	efx_nic_remove_tx(tx_queue);
13217d3b21cSAlex Maftei (amaftei) 
13317d3b21cSAlex Maftei (amaftei) 	if (tx_queue->cb_page) {
13417d3b21cSAlex Maftei (amaftei) 		for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
13517d3b21cSAlex Maftei (amaftei) 			efx_nic_free_buffer(tx_queue->efx,
13617d3b21cSAlex Maftei (amaftei) 					    &tx_queue->cb_page[i]);
13717d3b21cSAlex Maftei (amaftei) 		kfree(tx_queue->cb_page);
13817d3b21cSAlex Maftei (amaftei) 		tx_queue->cb_page = NULL;
13917d3b21cSAlex Maftei (amaftei) 	}
14017d3b21cSAlex Maftei (amaftei) 
14117d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->buffer);
14217d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = NULL;
14317d3b21cSAlex Maftei (amaftei) }
14417d3b21cSAlex Maftei (amaftei) 
14517d3b21cSAlex Maftei (amaftei) void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
14617d3b21cSAlex Maftei (amaftei) 			struct efx_tx_buffer *buffer,
14717d3b21cSAlex Maftei (amaftei) 			unsigned int *pkts_compl,
14817d3b21cSAlex Maftei (amaftei) 			unsigned int *bytes_compl)
14917d3b21cSAlex Maftei (amaftei) {
15017d3b21cSAlex Maftei (amaftei) 	if (buffer->unmap_len) {
15117d3b21cSAlex Maftei (amaftei) 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
15217d3b21cSAlex Maftei (amaftei) 		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
15317d3b21cSAlex Maftei (amaftei) 
15417d3b21cSAlex Maftei (amaftei) 		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
15517d3b21cSAlex Maftei (amaftei) 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
15617d3b21cSAlex Maftei (amaftei) 					 DMA_TO_DEVICE);
15717d3b21cSAlex Maftei (amaftei) 		else
15817d3b21cSAlex Maftei (amaftei) 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
15917d3b21cSAlex Maftei (amaftei) 				       DMA_TO_DEVICE);
16017d3b21cSAlex Maftei (amaftei) 		buffer->unmap_len = 0;
16117d3b21cSAlex Maftei (amaftei) 	}
16217d3b21cSAlex Maftei (amaftei) 
16317d3b21cSAlex Maftei (amaftei) 	if (buffer->flags & EFX_TX_BUF_SKB) {
16417d3b21cSAlex Maftei (amaftei) 		struct sk_buff *skb = (struct sk_buff *)buffer->skb;
16517d3b21cSAlex Maftei (amaftei) 
16617d3b21cSAlex Maftei (amaftei) 		EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
16717d3b21cSAlex Maftei (amaftei) 		(*pkts_compl)++;
16817d3b21cSAlex Maftei (amaftei) 		(*bytes_compl) += skb->len;
16917d3b21cSAlex Maftei (amaftei) 		if (tx_queue->timestamping &&
17017d3b21cSAlex Maftei (amaftei) 		    (tx_queue->completed_timestamp_major ||
17117d3b21cSAlex Maftei (amaftei) 		     tx_queue->completed_timestamp_minor)) {
17217d3b21cSAlex Maftei (amaftei) 			struct skb_shared_hwtstamps hwtstamp;
17317d3b21cSAlex Maftei (amaftei) 
17417d3b21cSAlex Maftei (amaftei) 			hwtstamp.hwtstamp =
17517d3b21cSAlex Maftei (amaftei) 				efx_ptp_nic_to_kernel_time(tx_queue);
17617d3b21cSAlex Maftei (amaftei) 			skb_tstamp_tx(skb, &hwtstamp);
17717d3b21cSAlex Maftei (amaftei) 
17817d3b21cSAlex Maftei (amaftei) 			tx_queue->completed_timestamp_major = 0;
17917d3b21cSAlex Maftei (amaftei) 			tx_queue->completed_timestamp_minor = 0;
18017d3b21cSAlex Maftei (amaftei) 		}
18117d3b21cSAlex Maftei (amaftei) 		dev_consume_skb_any((struct sk_buff *)buffer->skb);
18217d3b21cSAlex Maftei (amaftei) 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
18317d3b21cSAlex Maftei (amaftei) 			   "TX queue %d transmission id %x complete\n",
18417d3b21cSAlex Maftei (amaftei) 			   tx_queue->queue, tx_queue->read_count);
18517d3b21cSAlex Maftei (amaftei) 	} else if (buffer->flags & EFX_TX_BUF_XDP) {
18617d3b21cSAlex Maftei (amaftei) 		xdp_return_frame_rx_napi(buffer->xdpf);
18717d3b21cSAlex Maftei (amaftei) 	}
18817d3b21cSAlex Maftei (amaftei) 
18917d3b21cSAlex Maftei (amaftei) 	buffer->len = 0;
19017d3b21cSAlex Maftei (amaftei) 	buffer->flags = 0;
19117d3b21cSAlex Maftei (amaftei) }
19217d3b21cSAlex Maftei (amaftei) 
193b8cd9499SAlex Maftei (amaftei) /* Remove packets from the TX queue
194b8cd9499SAlex Maftei (amaftei)  *
195b8cd9499SAlex Maftei (amaftei)  * This removes packets from the TX queue, up to and including the
196b8cd9499SAlex Maftei (amaftei)  * specified index.
197b8cd9499SAlex Maftei (amaftei)  */
198b8cd9499SAlex Maftei (amaftei) static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
199b8cd9499SAlex Maftei (amaftei) 				unsigned int index,
200b8cd9499SAlex Maftei (amaftei) 				unsigned int *pkts_compl,
201b8cd9499SAlex Maftei (amaftei) 				unsigned int *bytes_compl)
202b8cd9499SAlex Maftei (amaftei) {
203b8cd9499SAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
204b8cd9499SAlex Maftei (amaftei) 	unsigned int stop_index, read_ptr;
205b8cd9499SAlex Maftei (amaftei) 
206b8cd9499SAlex Maftei (amaftei) 	stop_index = (index + 1) & tx_queue->ptr_mask;
207b8cd9499SAlex Maftei (amaftei) 	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
208b8cd9499SAlex Maftei (amaftei) 
209b8cd9499SAlex Maftei (amaftei) 	while (read_ptr != stop_index) {
210b8cd9499SAlex Maftei (amaftei) 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
211b8cd9499SAlex Maftei (amaftei) 
2123b4f06c7STom Zhao 		if (!efx_tx_buffer_in_use(buffer)) {
213b8cd9499SAlex Maftei (amaftei) 			netif_err(efx, tx_err, efx->net_dev,
2143b4f06c7STom Zhao 				  "TX queue %d spurious TX completion id %d\n",
215b8cd9499SAlex Maftei (amaftei) 				  tx_queue->queue, read_ptr);
216b8cd9499SAlex Maftei (amaftei) 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
217b8cd9499SAlex Maftei (amaftei) 			return;
218b8cd9499SAlex Maftei (amaftei) 		}
219b8cd9499SAlex Maftei (amaftei) 
220b8cd9499SAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
221b8cd9499SAlex Maftei (amaftei) 
222b8cd9499SAlex Maftei (amaftei) 		++tx_queue->read_count;
223b8cd9499SAlex Maftei (amaftei) 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
224b8cd9499SAlex Maftei (amaftei) 	}
225b8cd9499SAlex Maftei (amaftei) }
226b8cd9499SAlex Maftei (amaftei) 
2273b4f06c7STom Zhao void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
2283b4f06c7STom Zhao {
2293b4f06c7STom Zhao 	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
2303b4f06c7STom Zhao 		tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
2313b4f06c7STom Zhao 		if (tx_queue->read_count == tx_queue->old_write_count) {
2323b4f06c7STom Zhao 			/* Ensure that read_count is flushed. */
2333b4f06c7STom Zhao 			smp_mb();
2343b4f06c7STom Zhao 			tx_queue->empty_read_count =
2353b4f06c7STom Zhao 				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
2363b4f06c7STom Zhao 		}
2373b4f06c7STom Zhao 	}
2383b4f06c7STom Zhao }
2393b4f06c7STom Zhao 
240b8cd9499SAlex Maftei (amaftei) void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
241b8cd9499SAlex Maftei (amaftei) {
242b8cd9499SAlex Maftei (amaftei) 	unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
243b8cd9499SAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
244b8cd9499SAlex Maftei (amaftei) 	struct efx_tx_queue *txq2;
245b8cd9499SAlex Maftei (amaftei) 
246b8cd9499SAlex Maftei (amaftei) 	EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
247b8cd9499SAlex Maftei (amaftei) 
248b8cd9499SAlex Maftei (amaftei) 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
249b8cd9499SAlex Maftei (amaftei) 	tx_queue->pkts_compl += pkts_compl;
250b8cd9499SAlex Maftei (amaftei) 	tx_queue->bytes_compl += bytes_compl;
251b8cd9499SAlex Maftei (amaftei) 
252b8cd9499SAlex Maftei (amaftei) 	if (pkts_compl > 1)
253b8cd9499SAlex Maftei (amaftei) 		++tx_queue->merge_events;
254b8cd9499SAlex Maftei (amaftei) 
255b8cd9499SAlex Maftei (amaftei) 	/* See if we need to restart the netif queue.  This memory
256b8cd9499SAlex Maftei (amaftei) 	 * barrier ensures that we write read_count (inside
257b8cd9499SAlex Maftei (amaftei) 	 * efx_dequeue_buffers()) before reading the queue status.
258b8cd9499SAlex Maftei (amaftei) 	 */
259b8cd9499SAlex Maftei (amaftei) 	smp_mb();
260b8cd9499SAlex Maftei (amaftei) 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
261b8cd9499SAlex Maftei (amaftei) 	    likely(efx->port_enabled) &&
262b8cd9499SAlex Maftei (amaftei) 	    likely(netif_device_present(efx->net_dev))) {
263b8cd9499SAlex Maftei (amaftei) 		txq2 = efx_tx_queue_partner(tx_queue);
264b8cd9499SAlex Maftei (amaftei) 		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
265b8cd9499SAlex Maftei (amaftei) 				 txq2->insert_count - txq2->read_count);
266b8cd9499SAlex Maftei (amaftei) 		if (fill_level <= efx->txq_wake_thresh)
267b8cd9499SAlex Maftei (amaftei) 			netif_tx_wake_queue(tx_queue->core_txq);
268b8cd9499SAlex Maftei (amaftei) 	}
269b8cd9499SAlex Maftei (amaftei) 
2703b4f06c7STom Zhao 	efx_xmit_done_check_empty(tx_queue);
271b8cd9499SAlex Maftei (amaftei) }
272b8cd9499SAlex Maftei (amaftei) 
27388f7df35SAlex Maftei (amaftei) /* Remove buffers put into a tx_queue for the current packet.
27488f7df35SAlex Maftei (amaftei)  * None of the buffers must have an skb attached.
27588f7df35SAlex Maftei (amaftei)  */
27688f7df35SAlex Maftei (amaftei) void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
27788f7df35SAlex Maftei (amaftei) 			unsigned int insert_count)
27888f7df35SAlex Maftei (amaftei) {
27988f7df35SAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
28088f7df35SAlex Maftei (amaftei) 	unsigned int bytes_compl = 0;
28188f7df35SAlex Maftei (amaftei) 	unsigned int pkts_compl = 0;
28288f7df35SAlex Maftei (amaftei) 
28388f7df35SAlex Maftei (amaftei) 	/* Work backwards until we hit the original insert pointer value */
28488f7df35SAlex Maftei (amaftei) 	while (tx_queue->insert_count != insert_count) {
28588f7df35SAlex Maftei (amaftei) 		--tx_queue->insert_count;
28688f7df35SAlex Maftei (amaftei) 		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
28788f7df35SAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
28888f7df35SAlex Maftei (amaftei) 	}
28988f7df35SAlex Maftei (amaftei) }
29088f7df35SAlex Maftei (amaftei) 
29117d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
29217d3b21cSAlex Maftei (amaftei) 				       dma_addr_t dma_addr, size_t len)
29317d3b21cSAlex Maftei (amaftei) {
29417d3b21cSAlex Maftei (amaftei) 	const struct efx_nic_type *nic_type = tx_queue->efx->type;
29517d3b21cSAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
29617d3b21cSAlex Maftei (amaftei) 	unsigned int dma_len;
29717d3b21cSAlex Maftei (amaftei) 
29817d3b21cSAlex Maftei (amaftei) 	/* Map the fragment taking account of NIC-dependent DMA limits. */
29917d3b21cSAlex Maftei (amaftei) 	do {
30017d3b21cSAlex Maftei (amaftei) 		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
301*79de6e7cSEdward Cree 
302*79de6e7cSEdward Cree 		if (nic_type->tx_limit_len)
30317d3b21cSAlex Maftei (amaftei) 			dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
304*79de6e7cSEdward Cree 		else
305*79de6e7cSEdward Cree 			dma_len = len;
30617d3b21cSAlex Maftei (amaftei) 
30717d3b21cSAlex Maftei (amaftei) 		buffer->len = dma_len;
30817d3b21cSAlex Maftei (amaftei) 		buffer->dma_addr = dma_addr;
30917d3b21cSAlex Maftei (amaftei) 		buffer->flags = EFX_TX_BUF_CONT;
31017d3b21cSAlex Maftei (amaftei) 		len -= dma_len;
31117d3b21cSAlex Maftei (amaftei) 		dma_addr += dma_len;
31217d3b21cSAlex Maftei (amaftei) 		++tx_queue->insert_count;
31317d3b21cSAlex Maftei (amaftei) 	} while (len);
31417d3b21cSAlex Maftei (amaftei) 
31517d3b21cSAlex Maftei (amaftei) 	return buffer;
31617d3b21cSAlex Maftei (amaftei) }
31717d3b21cSAlex Maftei (amaftei) 
318e7a25685SEdward Cree int efx_tx_tso_header_length(struct sk_buff *skb)
319e7a25685SEdward Cree {
320e7a25685SEdward Cree 	size_t header_len;
321e7a25685SEdward Cree 
322e7a25685SEdward Cree 	if (skb->encapsulation)
323e7a25685SEdward Cree 		header_len = skb_inner_transport_header(skb) -
324e7a25685SEdward Cree 				skb->data +
325e7a25685SEdward Cree 				(inner_tcp_hdr(skb)->doff << 2u);
326e7a25685SEdward Cree 	else
327e7a25685SEdward Cree 		header_len = skb_transport_header(skb) - skb->data +
328e7a25685SEdward Cree 				(tcp_hdr(skb)->doff << 2u);
329e7a25685SEdward Cree 	return header_len;
330e7a25685SEdward Cree }
331e7a25685SEdward Cree 
33217d3b21cSAlex Maftei (amaftei) /* Map all data from an SKB for DMA and create descriptors on the queue. */
33317d3b21cSAlex Maftei (amaftei) int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
33417d3b21cSAlex Maftei (amaftei) 		    unsigned int segment_count)
33517d3b21cSAlex Maftei (amaftei) {
33617d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
33717d3b21cSAlex Maftei (amaftei) 	struct device *dma_dev = &efx->pci_dev->dev;
33817d3b21cSAlex Maftei (amaftei) 	unsigned int frag_index, nr_frags;
33917d3b21cSAlex Maftei (amaftei) 	dma_addr_t dma_addr, unmap_addr;
34017d3b21cSAlex Maftei (amaftei) 	unsigned short dma_flags;
34117d3b21cSAlex Maftei (amaftei) 	size_t len, unmap_len;
34217d3b21cSAlex Maftei (amaftei) 
34317d3b21cSAlex Maftei (amaftei) 	nr_frags = skb_shinfo(skb)->nr_frags;
34417d3b21cSAlex Maftei (amaftei) 	frag_index = 0;
34517d3b21cSAlex Maftei (amaftei) 
34617d3b21cSAlex Maftei (amaftei) 	/* Map header data. */
34717d3b21cSAlex Maftei (amaftei) 	len = skb_headlen(skb);
34817d3b21cSAlex Maftei (amaftei) 	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
34917d3b21cSAlex Maftei (amaftei) 	dma_flags = EFX_TX_BUF_MAP_SINGLE;
35017d3b21cSAlex Maftei (amaftei) 	unmap_len = len;
35117d3b21cSAlex Maftei (amaftei) 	unmap_addr = dma_addr;
35217d3b21cSAlex Maftei (amaftei) 
35317d3b21cSAlex Maftei (amaftei) 	if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
35417d3b21cSAlex Maftei (amaftei) 		return -EIO;
35517d3b21cSAlex Maftei (amaftei) 
35617d3b21cSAlex Maftei (amaftei) 	if (segment_count) {
35717d3b21cSAlex Maftei (amaftei) 		/* For TSO we need to put the header in to a separate
35817d3b21cSAlex Maftei (amaftei) 		 * descriptor. Map this separately if necessary.
35917d3b21cSAlex Maftei (amaftei) 		 */
360e7a25685SEdward Cree 		size_t header_len = efx_tx_tso_header_length(skb);
36117d3b21cSAlex Maftei (amaftei) 
36217d3b21cSAlex Maftei (amaftei) 		if (header_len != len) {
36317d3b21cSAlex Maftei (amaftei) 			tx_queue->tso_long_headers++;
36417d3b21cSAlex Maftei (amaftei) 			efx_tx_map_chunk(tx_queue, dma_addr, header_len);
36517d3b21cSAlex Maftei (amaftei) 			len -= header_len;
36617d3b21cSAlex Maftei (amaftei) 			dma_addr += header_len;
36717d3b21cSAlex Maftei (amaftei) 		}
36817d3b21cSAlex Maftei (amaftei) 	}
36917d3b21cSAlex Maftei (amaftei) 
37017d3b21cSAlex Maftei (amaftei) 	/* Add descriptors for each fragment. */
37117d3b21cSAlex Maftei (amaftei) 	do {
37217d3b21cSAlex Maftei (amaftei) 		struct efx_tx_buffer *buffer;
37317d3b21cSAlex Maftei (amaftei) 		skb_frag_t *fragment;
37417d3b21cSAlex Maftei (amaftei) 
37517d3b21cSAlex Maftei (amaftei) 		buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
37617d3b21cSAlex Maftei (amaftei) 
37717d3b21cSAlex Maftei (amaftei) 		/* The final descriptor for a fragment is responsible for
37817d3b21cSAlex Maftei (amaftei) 		 * unmapping the whole fragment.
37917d3b21cSAlex Maftei (amaftei) 		 */
38017d3b21cSAlex Maftei (amaftei) 		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
38117d3b21cSAlex Maftei (amaftei) 		buffer->unmap_len = unmap_len;
38217d3b21cSAlex Maftei (amaftei) 		buffer->dma_offset = buffer->dma_addr - unmap_addr;
38317d3b21cSAlex Maftei (amaftei) 
38417d3b21cSAlex Maftei (amaftei) 		if (frag_index >= nr_frags) {
38517d3b21cSAlex Maftei (amaftei) 			/* Store SKB details with the final buffer for
38617d3b21cSAlex Maftei (amaftei) 			 * the completion.
38717d3b21cSAlex Maftei (amaftei) 			 */
38817d3b21cSAlex Maftei (amaftei) 			buffer->skb = skb;
38917d3b21cSAlex Maftei (amaftei) 			buffer->flags = EFX_TX_BUF_SKB | dma_flags;
39017d3b21cSAlex Maftei (amaftei) 			return 0;
39117d3b21cSAlex Maftei (amaftei) 		}
39217d3b21cSAlex Maftei (amaftei) 
39317d3b21cSAlex Maftei (amaftei) 		/* Move on to the next fragment. */
39417d3b21cSAlex Maftei (amaftei) 		fragment = &skb_shinfo(skb)->frags[frag_index++];
39517d3b21cSAlex Maftei (amaftei) 		len = skb_frag_size(fragment);
39617d3b21cSAlex Maftei (amaftei) 		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
39717d3b21cSAlex Maftei (amaftei) 					    DMA_TO_DEVICE);
39817d3b21cSAlex Maftei (amaftei) 		dma_flags = 0;
39917d3b21cSAlex Maftei (amaftei) 		unmap_len = len;
40017d3b21cSAlex Maftei (amaftei) 		unmap_addr = dma_addr;
40117d3b21cSAlex Maftei (amaftei) 
40217d3b21cSAlex Maftei (amaftei) 		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
40317d3b21cSAlex Maftei (amaftei) 			return -EIO;
40417d3b21cSAlex Maftei (amaftei) 	} while (1);
40517d3b21cSAlex Maftei (amaftei) }
40617d3b21cSAlex Maftei (amaftei) 
40717d3b21cSAlex Maftei (amaftei) unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
40817d3b21cSAlex Maftei (amaftei) {
40917d3b21cSAlex Maftei (amaftei) 	/* Header and payload descriptor for each output segment, plus
41017d3b21cSAlex Maftei (amaftei) 	 * one for every input fragment boundary within a segment
41117d3b21cSAlex Maftei (amaftei) 	 */
41217d3b21cSAlex Maftei (amaftei) 	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
41317d3b21cSAlex Maftei (amaftei) 
41417d3b21cSAlex Maftei (amaftei) 	/* Possibly one more per segment for option descriptors */
41517d3b21cSAlex Maftei (amaftei) 	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
41617d3b21cSAlex Maftei (amaftei) 		max_descs += EFX_TSO_MAX_SEGS;
41717d3b21cSAlex Maftei (amaftei) 
41817d3b21cSAlex Maftei (amaftei) 	/* Possibly more for PCIe page boundaries within input fragments */
41917d3b21cSAlex Maftei (amaftei) 	if (PAGE_SIZE > EFX_PAGE_SIZE)
42017d3b21cSAlex Maftei (amaftei) 		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
42117d3b21cSAlex Maftei (amaftei) 				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
42217d3b21cSAlex Maftei (amaftei) 
42317d3b21cSAlex Maftei (amaftei) 	return max_descs;
42417d3b21cSAlex Maftei (amaftei) }
425740acc15SEdward Cree 
426740acc15SEdward Cree /*
427740acc15SEdward Cree  * Fallback to software TSO.
428740acc15SEdward Cree  *
429740acc15SEdward Cree  * This is used if we are unable to send a GSO packet through hardware TSO.
430740acc15SEdward Cree  * This should only ever happen due to per-queue restrictions - unsupported
431740acc15SEdward Cree  * packets should first be filtered by the feature flags.
432740acc15SEdward Cree  *
433740acc15SEdward Cree  * Returns 0 on success, error code otherwise.
434740acc15SEdward Cree  */
435740acc15SEdward Cree int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
436740acc15SEdward Cree {
437740acc15SEdward Cree 	struct sk_buff *segments, *next;
438740acc15SEdward Cree 
439740acc15SEdward Cree 	segments = skb_gso_segment(skb, 0);
440740acc15SEdward Cree 	if (IS_ERR(segments))
441740acc15SEdward Cree 		return PTR_ERR(segments);
442740acc15SEdward Cree 
443740acc15SEdward Cree 	dev_consume_skb_any(skb);
444740acc15SEdward Cree 
445740acc15SEdward Cree 	skb_list_walk_safe(segments, skb, next) {
446740acc15SEdward Cree 		skb_mark_not_on_list(skb);
447740acc15SEdward Cree 		efx_enqueue_skb(tx_queue, skb);
448740acc15SEdward Cree 	}
449740acc15SEdward Cree 
450740acc15SEdward Cree 	return 0;
451740acc15SEdward Cree }
452