xref: /openbmc/linux/drivers/net/ethernet/sfc/tx_common.c (revision d19a5372186336df8a90391c1ae2011e03310dca)
117d3b21cSAlex Maftei (amaftei) // SPDX-License-Identifier: GPL-2.0-only
217d3b21cSAlex Maftei (amaftei) /****************************************************************************
317d3b21cSAlex Maftei (amaftei)  * Driver for Solarflare network controllers and boards
417d3b21cSAlex Maftei (amaftei)  * Copyright 2018 Solarflare Communications Inc.
517d3b21cSAlex Maftei (amaftei)  *
617d3b21cSAlex Maftei (amaftei)  * This program is free software; you can redistribute it and/or modify it
717d3b21cSAlex Maftei (amaftei)  * under the terms of the GNU General Public License version 2 as published
817d3b21cSAlex Maftei (amaftei)  * by the Free Software Foundation, incorporated herein by reference.
917d3b21cSAlex Maftei (amaftei)  */
1017d3b21cSAlex Maftei (amaftei) 
1117d3b21cSAlex Maftei (amaftei) #include "net_driver.h"
1217d3b21cSAlex Maftei (amaftei) #include "efx.h"
1393841000SEdward Cree #include "nic_common.h"
1417d3b21cSAlex Maftei (amaftei) #include "tx_common.h"
1517d3b21cSAlex Maftei (amaftei) 
1617d3b21cSAlex Maftei (amaftei) static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
1717d3b21cSAlex Maftei (amaftei) {
1817d3b21cSAlex Maftei (amaftei) 	return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
1917d3b21cSAlex Maftei (amaftei) 			    PAGE_SIZE >> EFX_TX_CB_ORDER);
2017d3b21cSAlex Maftei (amaftei) }
2117d3b21cSAlex Maftei (amaftei) 
2217d3b21cSAlex Maftei (amaftei) int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
2317d3b21cSAlex Maftei (amaftei) {
2417d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
2517d3b21cSAlex Maftei (amaftei) 	unsigned int entries;
2617d3b21cSAlex Maftei (amaftei) 	int rc;
2717d3b21cSAlex Maftei (amaftei) 
2817d3b21cSAlex Maftei (amaftei) 	/* Create the smallest power-of-two aligned ring */
2917d3b21cSAlex Maftei (amaftei) 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
3017d3b21cSAlex Maftei (amaftei) 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
3117d3b21cSAlex Maftei (amaftei) 	tx_queue->ptr_mask = entries - 1;
3217d3b21cSAlex Maftei (amaftei) 
3317d3b21cSAlex Maftei (amaftei) 	netif_dbg(efx, probe, efx->net_dev,
3417d3b21cSAlex Maftei (amaftei) 		  "creating TX queue %d size %#x mask %#x\n",
3517d3b21cSAlex Maftei (amaftei) 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
3617d3b21cSAlex Maftei (amaftei) 
3717d3b21cSAlex Maftei (amaftei) 	/* Allocate software ring */
3817d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
3917d3b21cSAlex Maftei (amaftei) 				   GFP_KERNEL);
4017d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
4117d3b21cSAlex Maftei (amaftei) 		return -ENOMEM;
4217d3b21cSAlex Maftei (amaftei) 
4317d3b21cSAlex Maftei (amaftei) 	tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
4417d3b21cSAlex Maftei (amaftei) 				    sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
4517d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->cb_page) {
4617d3b21cSAlex Maftei (amaftei) 		rc = -ENOMEM;
4717d3b21cSAlex Maftei (amaftei) 		goto fail1;
4817d3b21cSAlex Maftei (amaftei) 	}
4917d3b21cSAlex Maftei (amaftei) 
5017d3b21cSAlex Maftei (amaftei) 	/* Allocate hardware ring */
5117d3b21cSAlex Maftei (amaftei) 	rc = efx_nic_probe_tx(tx_queue);
5217d3b21cSAlex Maftei (amaftei) 	if (rc)
5317d3b21cSAlex Maftei (amaftei) 		goto fail2;
5417d3b21cSAlex Maftei (amaftei) 
5517d3b21cSAlex Maftei (amaftei) 	return 0;
5617d3b21cSAlex Maftei (amaftei) 
5717d3b21cSAlex Maftei (amaftei) fail2:
5817d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->cb_page);
5917d3b21cSAlex Maftei (amaftei) 	tx_queue->cb_page = NULL;
6017d3b21cSAlex Maftei (amaftei) fail1:
6117d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->buffer);
6217d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = NULL;
6317d3b21cSAlex Maftei (amaftei) 	return rc;
6417d3b21cSAlex Maftei (amaftei) }
6517d3b21cSAlex Maftei (amaftei) 
6617d3b21cSAlex Maftei (amaftei) void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
6717d3b21cSAlex Maftei (amaftei) {
6817d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
6917d3b21cSAlex Maftei (amaftei) 
7017d3b21cSAlex Maftei (amaftei) 	netif_dbg(efx, drv, efx->net_dev,
7117d3b21cSAlex Maftei (amaftei) 		  "initialising TX queue %d\n", tx_queue->queue);
7217d3b21cSAlex Maftei (amaftei) 
7317d3b21cSAlex Maftei (amaftei) 	tx_queue->insert_count = 0;
74*d19a5372SEdward Cree 	tx_queue->notify_count = 0;
7517d3b21cSAlex Maftei (amaftei) 	tx_queue->write_count = 0;
7617d3b21cSAlex Maftei (amaftei) 	tx_queue->packet_write_count = 0;
7717d3b21cSAlex Maftei (amaftei) 	tx_queue->old_write_count = 0;
7817d3b21cSAlex Maftei (amaftei) 	tx_queue->read_count = 0;
7917d3b21cSAlex Maftei (amaftei) 	tx_queue->old_read_count = 0;
8017d3b21cSAlex Maftei (amaftei) 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
8117d3b21cSAlex Maftei (amaftei) 	tx_queue->xmit_more_available = false;
8217d3b21cSAlex Maftei (amaftei) 	tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
8317d3b21cSAlex Maftei (amaftei) 				  tx_queue->channel == efx_ptp_channel(efx));
8417d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_timestamp_major = 0;
8517d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_timestamp_minor = 0;
8617d3b21cSAlex Maftei (amaftei) 
8717d3b21cSAlex Maftei (amaftei) 	tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
8817d3b21cSAlex Maftei (amaftei) 
8917d3b21cSAlex Maftei (amaftei) 	/* Set up default function pointers. These may get replaced by
9017d3b21cSAlex Maftei (amaftei) 	 * efx_nic_init_tx() based off NIC/queue capabilities.
9117d3b21cSAlex Maftei (amaftei) 	 */
9217d3b21cSAlex Maftei (amaftei) 	tx_queue->handle_tso = efx_enqueue_skb_tso;
9317d3b21cSAlex Maftei (amaftei) 
9417d3b21cSAlex Maftei (amaftei) 	/* Set up TX descriptor ring */
9517d3b21cSAlex Maftei (amaftei) 	efx_nic_init_tx(tx_queue);
9617d3b21cSAlex Maftei (amaftei) 
9717d3b21cSAlex Maftei (amaftei) 	tx_queue->initialised = true;
9817d3b21cSAlex Maftei (amaftei) }
9917d3b21cSAlex Maftei (amaftei) 
10017d3b21cSAlex Maftei (amaftei) void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
10117d3b21cSAlex Maftei (amaftei) {
10217d3b21cSAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
10317d3b21cSAlex Maftei (amaftei) 
10417d3b21cSAlex Maftei (amaftei) 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
10517d3b21cSAlex Maftei (amaftei) 		  "shutting down TX queue %d\n", tx_queue->queue);
10617d3b21cSAlex Maftei (amaftei) 
10717d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
10817d3b21cSAlex Maftei (amaftei) 		return;
10917d3b21cSAlex Maftei (amaftei) 
11017d3b21cSAlex Maftei (amaftei) 	/* Free any buffers left in the ring */
11117d3b21cSAlex Maftei (amaftei) 	while (tx_queue->read_count != tx_queue->write_count) {
11217d3b21cSAlex Maftei (amaftei) 		unsigned int pkts_compl = 0, bytes_compl = 0;
11317d3b21cSAlex Maftei (amaftei) 
11417d3b21cSAlex Maftei (amaftei) 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
11517d3b21cSAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
11617d3b21cSAlex Maftei (amaftei) 
11717d3b21cSAlex Maftei (amaftei) 		++tx_queue->read_count;
11817d3b21cSAlex Maftei (amaftei) 	}
11917d3b21cSAlex Maftei (amaftei) 	tx_queue->xmit_more_available = false;
12017d3b21cSAlex Maftei (amaftei) 	netdev_tx_reset_queue(tx_queue->core_txq);
12117d3b21cSAlex Maftei (amaftei) }
12217d3b21cSAlex Maftei (amaftei) 
12317d3b21cSAlex Maftei (amaftei) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
12417d3b21cSAlex Maftei (amaftei) {
12517d3b21cSAlex Maftei (amaftei) 	int i;
12617d3b21cSAlex Maftei (amaftei) 
12717d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
12817d3b21cSAlex Maftei (amaftei) 		return;
12917d3b21cSAlex Maftei (amaftei) 
13017d3b21cSAlex Maftei (amaftei) 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
13117d3b21cSAlex Maftei (amaftei) 		  "destroying TX queue %d\n", tx_queue->queue);
13217d3b21cSAlex Maftei (amaftei) 	efx_nic_remove_tx(tx_queue);
13317d3b21cSAlex Maftei (amaftei) 
13417d3b21cSAlex Maftei (amaftei) 	if (tx_queue->cb_page) {
13517d3b21cSAlex Maftei (amaftei) 		for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
13617d3b21cSAlex Maftei (amaftei) 			efx_nic_free_buffer(tx_queue->efx,
13717d3b21cSAlex Maftei (amaftei) 					    &tx_queue->cb_page[i]);
13817d3b21cSAlex Maftei (amaftei) 		kfree(tx_queue->cb_page);
13917d3b21cSAlex Maftei (amaftei) 		tx_queue->cb_page = NULL;
14017d3b21cSAlex Maftei (amaftei) 	}
14117d3b21cSAlex Maftei (amaftei) 
14217d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->buffer);
14317d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = NULL;
14417d3b21cSAlex Maftei (amaftei) }
14517d3b21cSAlex Maftei (amaftei) 
14617d3b21cSAlex Maftei (amaftei) void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
14717d3b21cSAlex Maftei (amaftei) 			struct efx_tx_buffer *buffer,
14817d3b21cSAlex Maftei (amaftei) 			unsigned int *pkts_compl,
14917d3b21cSAlex Maftei (amaftei) 			unsigned int *bytes_compl)
15017d3b21cSAlex Maftei (amaftei) {
15117d3b21cSAlex Maftei (amaftei) 	if (buffer->unmap_len) {
15217d3b21cSAlex Maftei (amaftei) 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
15317d3b21cSAlex Maftei (amaftei) 		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
15417d3b21cSAlex Maftei (amaftei) 
15517d3b21cSAlex Maftei (amaftei) 		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
15617d3b21cSAlex Maftei (amaftei) 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
15717d3b21cSAlex Maftei (amaftei) 					 DMA_TO_DEVICE);
15817d3b21cSAlex Maftei (amaftei) 		else
15917d3b21cSAlex Maftei (amaftei) 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
16017d3b21cSAlex Maftei (amaftei) 				       DMA_TO_DEVICE);
16117d3b21cSAlex Maftei (amaftei) 		buffer->unmap_len = 0;
16217d3b21cSAlex Maftei (amaftei) 	}
16317d3b21cSAlex Maftei (amaftei) 
16417d3b21cSAlex Maftei (amaftei) 	if (buffer->flags & EFX_TX_BUF_SKB) {
16517d3b21cSAlex Maftei (amaftei) 		struct sk_buff *skb = (struct sk_buff *)buffer->skb;
16617d3b21cSAlex Maftei (amaftei) 
16717d3b21cSAlex Maftei (amaftei) 		EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
16817d3b21cSAlex Maftei (amaftei) 		(*pkts_compl)++;
16917d3b21cSAlex Maftei (amaftei) 		(*bytes_compl) += skb->len;
17017d3b21cSAlex Maftei (amaftei) 		if (tx_queue->timestamping &&
17117d3b21cSAlex Maftei (amaftei) 		    (tx_queue->completed_timestamp_major ||
17217d3b21cSAlex Maftei (amaftei) 		     tx_queue->completed_timestamp_minor)) {
17317d3b21cSAlex Maftei (amaftei) 			struct skb_shared_hwtstamps hwtstamp;
17417d3b21cSAlex Maftei (amaftei) 
17517d3b21cSAlex Maftei (amaftei) 			hwtstamp.hwtstamp =
17617d3b21cSAlex Maftei (amaftei) 				efx_ptp_nic_to_kernel_time(tx_queue);
17717d3b21cSAlex Maftei (amaftei) 			skb_tstamp_tx(skb, &hwtstamp);
17817d3b21cSAlex Maftei (amaftei) 
17917d3b21cSAlex Maftei (amaftei) 			tx_queue->completed_timestamp_major = 0;
18017d3b21cSAlex Maftei (amaftei) 			tx_queue->completed_timestamp_minor = 0;
18117d3b21cSAlex Maftei (amaftei) 		}
18217d3b21cSAlex Maftei (amaftei) 		dev_consume_skb_any((struct sk_buff *)buffer->skb);
18317d3b21cSAlex Maftei (amaftei) 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
18417d3b21cSAlex Maftei (amaftei) 			   "TX queue %d transmission id %x complete\n",
18517d3b21cSAlex Maftei (amaftei) 			   tx_queue->queue, tx_queue->read_count);
18617d3b21cSAlex Maftei (amaftei) 	} else if (buffer->flags & EFX_TX_BUF_XDP) {
18717d3b21cSAlex Maftei (amaftei) 		xdp_return_frame_rx_napi(buffer->xdpf);
18817d3b21cSAlex Maftei (amaftei) 	}
18917d3b21cSAlex Maftei (amaftei) 
19017d3b21cSAlex Maftei (amaftei) 	buffer->len = 0;
19117d3b21cSAlex Maftei (amaftei) 	buffer->flags = 0;
19217d3b21cSAlex Maftei (amaftei) }
19317d3b21cSAlex Maftei (amaftei) 
194b8cd9499SAlex Maftei (amaftei) /* Remove packets from the TX queue
195b8cd9499SAlex Maftei (amaftei)  *
196b8cd9499SAlex Maftei (amaftei)  * This removes packets from the TX queue, up to and including the
197b8cd9499SAlex Maftei (amaftei)  * specified index.
198b8cd9499SAlex Maftei (amaftei)  */
199b8cd9499SAlex Maftei (amaftei) static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
200b8cd9499SAlex Maftei (amaftei) 				unsigned int index,
201b8cd9499SAlex Maftei (amaftei) 				unsigned int *pkts_compl,
202b8cd9499SAlex Maftei (amaftei) 				unsigned int *bytes_compl)
203b8cd9499SAlex Maftei (amaftei) {
204b8cd9499SAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
205b8cd9499SAlex Maftei (amaftei) 	unsigned int stop_index, read_ptr;
206b8cd9499SAlex Maftei (amaftei) 
207b8cd9499SAlex Maftei (amaftei) 	stop_index = (index + 1) & tx_queue->ptr_mask;
208b8cd9499SAlex Maftei (amaftei) 	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
209b8cd9499SAlex Maftei (amaftei) 
210b8cd9499SAlex Maftei (amaftei) 	while (read_ptr != stop_index) {
211b8cd9499SAlex Maftei (amaftei) 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
212b8cd9499SAlex Maftei (amaftei) 
2133b4f06c7STom Zhao 		if (!efx_tx_buffer_in_use(buffer)) {
214b8cd9499SAlex Maftei (amaftei) 			netif_err(efx, tx_err, efx->net_dev,
2153b4f06c7STom Zhao 				  "TX queue %d spurious TX completion id %d\n",
216b8cd9499SAlex Maftei (amaftei) 				  tx_queue->queue, read_ptr);
217b8cd9499SAlex Maftei (amaftei) 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
218b8cd9499SAlex Maftei (amaftei) 			return;
219b8cd9499SAlex Maftei (amaftei) 		}
220b8cd9499SAlex Maftei (amaftei) 
221b8cd9499SAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
222b8cd9499SAlex Maftei (amaftei) 
223b8cd9499SAlex Maftei (amaftei) 		++tx_queue->read_count;
224b8cd9499SAlex Maftei (amaftei) 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
225b8cd9499SAlex Maftei (amaftei) 	}
226b8cd9499SAlex Maftei (amaftei) }
227b8cd9499SAlex Maftei (amaftei) 
2283b4f06c7STom Zhao void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
2293b4f06c7STom Zhao {
2303b4f06c7STom Zhao 	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
2313b4f06c7STom Zhao 		tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
2323b4f06c7STom Zhao 		if (tx_queue->read_count == tx_queue->old_write_count) {
2333b4f06c7STom Zhao 			/* Ensure that read_count is flushed. */
2343b4f06c7STom Zhao 			smp_mb();
2353b4f06c7STom Zhao 			tx_queue->empty_read_count =
2363b4f06c7STom Zhao 				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
2373b4f06c7STom Zhao 		}
2383b4f06c7STom Zhao 	}
2393b4f06c7STom Zhao }
2403b4f06c7STom Zhao 
241b8cd9499SAlex Maftei (amaftei) void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
242b8cd9499SAlex Maftei (amaftei) {
243b8cd9499SAlex Maftei (amaftei) 	unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
244b8cd9499SAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
245b8cd9499SAlex Maftei (amaftei) 	struct efx_tx_queue *txq2;
246b8cd9499SAlex Maftei (amaftei) 
247b8cd9499SAlex Maftei (amaftei) 	EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
248b8cd9499SAlex Maftei (amaftei) 
249b8cd9499SAlex Maftei (amaftei) 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
250b8cd9499SAlex Maftei (amaftei) 	tx_queue->pkts_compl += pkts_compl;
251b8cd9499SAlex Maftei (amaftei) 	tx_queue->bytes_compl += bytes_compl;
252b8cd9499SAlex Maftei (amaftei) 
253b8cd9499SAlex Maftei (amaftei) 	if (pkts_compl > 1)
254b8cd9499SAlex Maftei (amaftei) 		++tx_queue->merge_events;
255b8cd9499SAlex Maftei (amaftei) 
256b8cd9499SAlex Maftei (amaftei) 	/* See if we need to restart the netif queue.  This memory
257b8cd9499SAlex Maftei (amaftei) 	 * barrier ensures that we write read_count (inside
258b8cd9499SAlex Maftei (amaftei) 	 * efx_dequeue_buffers()) before reading the queue status.
259b8cd9499SAlex Maftei (amaftei) 	 */
260b8cd9499SAlex Maftei (amaftei) 	smp_mb();
261b8cd9499SAlex Maftei (amaftei) 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
262b8cd9499SAlex Maftei (amaftei) 	    likely(efx->port_enabled) &&
263b8cd9499SAlex Maftei (amaftei) 	    likely(netif_device_present(efx->net_dev))) {
264b8cd9499SAlex Maftei (amaftei) 		txq2 = efx_tx_queue_partner(tx_queue);
265b8cd9499SAlex Maftei (amaftei) 		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
266b8cd9499SAlex Maftei (amaftei) 				 txq2->insert_count - txq2->read_count);
267b8cd9499SAlex Maftei (amaftei) 		if (fill_level <= efx->txq_wake_thresh)
268b8cd9499SAlex Maftei (amaftei) 			netif_tx_wake_queue(tx_queue->core_txq);
269b8cd9499SAlex Maftei (amaftei) 	}
270b8cd9499SAlex Maftei (amaftei) 
2713b4f06c7STom Zhao 	efx_xmit_done_check_empty(tx_queue);
272b8cd9499SAlex Maftei (amaftei) }
273b8cd9499SAlex Maftei (amaftei) 
27488f7df35SAlex Maftei (amaftei) /* Remove buffers put into a tx_queue for the current packet.
27588f7df35SAlex Maftei (amaftei)  * None of the buffers must have an skb attached.
27688f7df35SAlex Maftei (amaftei)  */
27788f7df35SAlex Maftei (amaftei) void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
27888f7df35SAlex Maftei (amaftei) 			unsigned int insert_count)
27988f7df35SAlex Maftei (amaftei) {
28088f7df35SAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
28188f7df35SAlex Maftei (amaftei) 	unsigned int bytes_compl = 0;
28288f7df35SAlex Maftei (amaftei) 	unsigned int pkts_compl = 0;
28388f7df35SAlex Maftei (amaftei) 
28488f7df35SAlex Maftei (amaftei) 	/* Work backwards until we hit the original insert pointer value */
28588f7df35SAlex Maftei (amaftei) 	while (tx_queue->insert_count != insert_count) {
28688f7df35SAlex Maftei (amaftei) 		--tx_queue->insert_count;
28788f7df35SAlex Maftei (amaftei) 		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
28888f7df35SAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
28988f7df35SAlex Maftei (amaftei) 	}
29088f7df35SAlex Maftei (amaftei) }
29188f7df35SAlex Maftei (amaftei) 
29217d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
29317d3b21cSAlex Maftei (amaftei) 				       dma_addr_t dma_addr, size_t len)
29417d3b21cSAlex Maftei (amaftei) {
29517d3b21cSAlex Maftei (amaftei) 	const struct efx_nic_type *nic_type = tx_queue->efx->type;
29617d3b21cSAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
29717d3b21cSAlex Maftei (amaftei) 	unsigned int dma_len;
29817d3b21cSAlex Maftei (amaftei) 
29917d3b21cSAlex Maftei (amaftei) 	/* Map the fragment taking account of NIC-dependent DMA limits. */
30017d3b21cSAlex Maftei (amaftei) 	do {
30117d3b21cSAlex Maftei (amaftei) 		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
30279de6e7cSEdward Cree 
30379de6e7cSEdward Cree 		if (nic_type->tx_limit_len)
30417d3b21cSAlex Maftei (amaftei) 			dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
30579de6e7cSEdward Cree 		else
30679de6e7cSEdward Cree 			dma_len = len;
30717d3b21cSAlex Maftei (amaftei) 
30817d3b21cSAlex Maftei (amaftei) 		buffer->len = dma_len;
30917d3b21cSAlex Maftei (amaftei) 		buffer->dma_addr = dma_addr;
31017d3b21cSAlex Maftei (amaftei) 		buffer->flags = EFX_TX_BUF_CONT;
31117d3b21cSAlex Maftei (amaftei) 		len -= dma_len;
31217d3b21cSAlex Maftei (amaftei) 		dma_addr += dma_len;
31317d3b21cSAlex Maftei (amaftei) 		++tx_queue->insert_count;
31417d3b21cSAlex Maftei (amaftei) 	} while (len);
31517d3b21cSAlex Maftei (amaftei) 
31617d3b21cSAlex Maftei (amaftei) 	return buffer;
31717d3b21cSAlex Maftei (amaftei) }
31817d3b21cSAlex Maftei (amaftei) 
319e7a25685SEdward Cree int efx_tx_tso_header_length(struct sk_buff *skb)
320e7a25685SEdward Cree {
321e7a25685SEdward Cree 	size_t header_len;
322e7a25685SEdward Cree 
323e7a25685SEdward Cree 	if (skb->encapsulation)
324e7a25685SEdward Cree 		header_len = skb_inner_transport_header(skb) -
325e7a25685SEdward Cree 				skb->data +
326e7a25685SEdward Cree 				(inner_tcp_hdr(skb)->doff << 2u);
327e7a25685SEdward Cree 	else
328e7a25685SEdward Cree 		header_len = skb_transport_header(skb) - skb->data +
329e7a25685SEdward Cree 				(tcp_hdr(skb)->doff << 2u);
330e7a25685SEdward Cree 	return header_len;
331e7a25685SEdward Cree }
332e7a25685SEdward Cree 
33317d3b21cSAlex Maftei (amaftei) /* Map all data from an SKB for DMA and create descriptors on the queue. */
33417d3b21cSAlex Maftei (amaftei) int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
33517d3b21cSAlex Maftei (amaftei) 		    unsigned int segment_count)
33617d3b21cSAlex Maftei (amaftei) {
33717d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
33817d3b21cSAlex Maftei (amaftei) 	struct device *dma_dev = &efx->pci_dev->dev;
33917d3b21cSAlex Maftei (amaftei) 	unsigned int frag_index, nr_frags;
34017d3b21cSAlex Maftei (amaftei) 	dma_addr_t dma_addr, unmap_addr;
34117d3b21cSAlex Maftei (amaftei) 	unsigned short dma_flags;
34217d3b21cSAlex Maftei (amaftei) 	size_t len, unmap_len;
34317d3b21cSAlex Maftei (amaftei) 
34417d3b21cSAlex Maftei (amaftei) 	nr_frags = skb_shinfo(skb)->nr_frags;
34517d3b21cSAlex Maftei (amaftei) 	frag_index = 0;
34617d3b21cSAlex Maftei (amaftei) 
34717d3b21cSAlex Maftei (amaftei) 	/* Map header data. */
34817d3b21cSAlex Maftei (amaftei) 	len = skb_headlen(skb);
34917d3b21cSAlex Maftei (amaftei) 	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
35017d3b21cSAlex Maftei (amaftei) 	dma_flags = EFX_TX_BUF_MAP_SINGLE;
35117d3b21cSAlex Maftei (amaftei) 	unmap_len = len;
35217d3b21cSAlex Maftei (amaftei) 	unmap_addr = dma_addr;
35317d3b21cSAlex Maftei (amaftei) 
35417d3b21cSAlex Maftei (amaftei) 	if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
35517d3b21cSAlex Maftei (amaftei) 		return -EIO;
35617d3b21cSAlex Maftei (amaftei) 
35717d3b21cSAlex Maftei (amaftei) 	if (segment_count) {
35817d3b21cSAlex Maftei (amaftei) 		/* For TSO we need to put the header in to a separate
35917d3b21cSAlex Maftei (amaftei) 		 * descriptor. Map this separately if necessary.
36017d3b21cSAlex Maftei (amaftei) 		 */
361e7a25685SEdward Cree 		size_t header_len = efx_tx_tso_header_length(skb);
36217d3b21cSAlex Maftei (amaftei) 
36317d3b21cSAlex Maftei (amaftei) 		if (header_len != len) {
36417d3b21cSAlex Maftei (amaftei) 			tx_queue->tso_long_headers++;
36517d3b21cSAlex Maftei (amaftei) 			efx_tx_map_chunk(tx_queue, dma_addr, header_len);
36617d3b21cSAlex Maftei (amaftei) 			len -= header_len;
36717d3b21cSAlex Maftei (amaftei) 			dma_addr += header_len;
36817d3b21cSAlex Maftei (amaftei) 		}
36917d3b21cSAlex Maftei (amaftei) 	}
37017d3b21cSAlex Maftei (amaftei) 
37117d3b21cSAlex Maftei (amaftei) 	/* Add descriptors for each fragment. */
37217d3b21cSAlex Maftei (amaftei) 	do {
37317d3b21cSAlex Maftei (amaftei) 		struct efx_tx_buffer *buffer;
37417d3b21cSAlex Maftei (amaftei) 		skb_frag_t *fragment;
37517d3b21cSAlex Maftei (amaftei) 
37617d3b21cSAlex Maftei (amaftei) 		buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
37717d3b21cSAlex Maftei (amaftei) 
37817d3b21cSAlex Maftei (amaftei) 		/* The final descriptor for a fragment is responsible for
37917d3b21cSAlex Maftei (amaftei) 		 * unmapping the whole fragment.
38017d3b21cSAlex Maftei (amaftei) 		 */
38117d3b21cSAlex Maftei (amaftei) 		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
38217d3b21cSAlex Maftei (amaftei) 		buffer->unmap_len = unmap_len;
38317d3b21cSAlex Maftei (amaftei) 		buffer->dma_offset = buffer->dma_addr - unmap_addr;
38417d3b21cSAlex Maftei (amaftei) 
38517d3b21cSAlex Maftei (amaftei) 		if (frag_index >= nr_frags) {
38617d3b21cSAlex Maftei (amaftei) 			/* Store SKB details with the final buffer for
38717d3b21cSAlex Maftei (amaftei) 			 * the completion.
38817d3b21cSAlex Maftei (amaftei) 			 */
38917d3b21cSAlex Maftei (amaftei) 			buffer->skb = skb;
39017d3b21cSAlex Maftei (amaftei) 			buffer->flags = EFX_TX_BUF_SKB | dma_flags;
39117d3b21cSAlex Maftei (amaftei) 			return 0;
39217d3b21cSAlex Maftei (amaftei) 		}
39317d3b21cSAlex Maftei (amaftei) 
39417d3b21cSAlex Maftei (amaftei) 		/* Move on to the next fragment. */
39517d3b21cSAlex Maftei (amaftei) 		fragment = &skb_shinfo(skb)->frags[frag_index++];
39617d3b21cSAlex Maftei (amaftei) 		len = skb_frag_size(fragment);
39717d3b21cSAlex Maftei (amaftei) 		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
39817d3b21cSAlex Maftei (amaftei) 					    DMA_TO_DEVICE);
39917d3b21cSAlex Maftei (amaftei) 		dma_flags = 0;
40017d3b21cSAlex Maftei (amaftei) 		unmap_len = len;
40117d3b21cSAlex Maftei (amaftei) 		unmap_addr = dma_addr;
40217d3b21cSAlex Maftei (amaftei) 
40317d3b21cSAlex Maftei (amaftei) 		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
40417d3b21cSAlex Maftei (amaftei) 			return -EIO;
40517d3b21cSAlex Maftei (amaftei) 	} while (1);
40617d3b21cSAlex Maftei (amaftei) }
40717d3b21cSAlex Maftei (amaftei) 
40817d3b21cSAlex Maftei (amaftei) unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
40917d3b21cSAlex Maftei (amaftei) {
41017d3b21cSAlex Maftei (amaftei) 	/* Header and payload descriptor for each output segment, plus
41117d3b21cSAlex Maftei (amaftei) 	 * one for every input fragment boundary within a segment
41217d3b21cSAlex Maftei (amaftei) 	 */
41317d3b21cSAlex Maftei (amaftei) 	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
41417d3b21cSAlex Maftei (amaftei) 
41517d3b21cSAlex Maftei (amaftei) 	/* Possibly one more per segment for option descriptors */
41617d3b21cSAlex Maftei (amaftei) 	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
41717d3b21cSAlex Maftei (amaftei) 		max_descs += EFX_TSO_MAX_SEGS;
41817d3b21cSAlex Maftei (amaftei) 
41917d3b21cSAlex Maftei (amaftei) 	/* Possibly more for PCIe page boundaries within input fragments */
42017d3b21cSAlex Maftei (amaftei) 	if (PAGE_SIZE > EFX_PAGE_SIZE)
42117d3b21cSAlex Maftei (amaftei) 		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
42217d3b21cSAlex Maftei (amaftei) 				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
42317d3b21cSAlex Maftei (amaftei) 
42417d3b21cSAlex Maftei (amaftei) 	return max_descs;
42517d3b21cSAlex Maftei (amaftei) }
426740acc15SEdward Cree 
427740acc15SEdward Cree /*
428740acc15SEdward Cree  * Fallback to software TSO.
429740acc15SEdward Cree  *
430740acc15SEdward Cree  * This is used if we are unable to send a GSO packet through hardware TSO.
431740acc15SEdward Cree  * This should only ever happen due to per-queue restrictions - unsupported
432740acc15SEdward Cree  * packets should first be filtered by the feature flags.
433740acc15SEdward Cree  *
434740acc15SEdward Cree  * Returns 0 on success, error code otherwise.
435740acc15SEdward Cree  */
436740acc15SEdward Cree int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
437740acc15SEdward Cree {
438740acc15SEdward Cree 	struct sk_buff *segments, *next;
439740acc15SEdward Cree 
440740acc15SEdward Cree 	segments = skb_gso_segment(skb, 0);
441740acc15SEdward Cree 	if (IS_ERR(segments))
442740acc15SEdward Cree 		return PTR_ERR(segments);
443740acc15SEdward Cree 
444740acc15SEdward Cree 	dev_consume_skb_any(skb);
445740acc15SEdward Cree 
446740acc15SEdward Cree 	skb_list_walk_safe(segments, skb, next) {
447740acc15SEdward Cree 		skb_mark_not_on_list(skb);
448740acc15SEdward Cree 		efx_enqueue_skb(tx_queue, skb);
449740acc15SEdward Cree 	}
450740acc15SEdward Cree 
451740acc15SEdward Cree 	return 0;
452740acc15SEdward Cree }
453