xref: /openbmc/linux/drivers/net/ethernet/sfc/tx_common.c (revision 88f7df35ceac7bdd4db33ceefcb1fe0b90112cda)
117d3b21cSAlex Maftei (amaftei) // SPDX-License-Identifier: GPL-2.0-only
217d3b21cSAlex Maftei (amaftei) /****************************************************************************
317d3b21cSAlex Maftei (amaftei)  * Driver for Solarflare network controllers and boards
417d3b21cSAlex Maftei (amaftei)  * Copyright 2018 Solarflare Communications Inc.
517d3b21cSAlex Maftei (amaftei)  *
617d3b21cSAlex Maftei (amaftei)  * This program is free software; you can redistribute it and/or modify it
717d3b21cSAlex Maftei (amaftei)  * under the terms of the GNU General Public License version 2 as published
817d3b21cSAlex Maftei (amaftei)  * by the Free Software Foundation, incorporated herein by reference.
917d3b21cSAlex Maftei (amaftei)  */
1017d3b21cSAlex Maftei (amaftei) 
1117d3b21cSAlex Maftei (amaftei) #include "net_driver.h"
1217d3b21cSAlex Maftei (amaftei) #include "efx.h"
1317d3b21cSAlex Maftei (amaftei) #include "nic.h"
1417d3b21cSAlex Maftei (amaftei) #include "tx_common.h"
1517d3b21cSAlex Maftei (amaftei) 
1617d3b21cSAlex Maftei (amaftei) static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
1717d3b21cSAlex Maftei (amaftei) {
1817d3b21cSAlex Maftei (amaftei) 	return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
1917d3b21cSAlex Maftei (amaftei) 			    PAGE_SIZE >> EFX_TX_CB_ORDER);
2017d3b21cSAlex Maftei (amaftei) }
2117d3b21cSAlex Maftei (amaftei) 
2217d3b21cSAlex Maftei (amaftei) int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
2317d3b21cSAlex Maftei (amaftei) {
2417d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
2517d3b21cSAlex Maftei (amaftei) 	unsigned int entries;
2617d3b21cSAlex Maftei (amaftei) 	int rc;
2717d3b21cSAlex Maftei (amaftei) 
2817d3b21cSAlex Maftei (amaftei) 	/* Create the smallest power-of-two aligned ring */
2917d3b21cSAlex Maftei (amaftei) 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
3017d3b21cSAlex Maftei (amaftei) 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
3117d3b21cSAlex Maftei (amaftei) 	tx_queue->ptr_mask = entries - 1;
3217d3b21cSAlex Maftei (amaftei) 
3317d3b21cSAlex Maftei (amaftei) 	netif_dbg(efx, probe, efx->net_dev,
3417d3b21cSAlex Maftei (amaftei) 		  "creating TX queue %d size %#x mask %#x\n",
3517d3b21cSAlex Maftei (amaftei) 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
3617d3b21cSAlex Maftei (amaftei) 
3717d3b21cSAlex Maftei (amaftei) 	/* Allocate software ring */
3817d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
3917d3b21cSAlex Maftei (amaftei) 				   GFP_KERNEL);
4017d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
4117d3b21cSAlex Maftei (amaftei) 		return -ENOMEM;
4217d3b21cSAlex Maftei (amaftei) 
4317d3b21cSAlex Maftei (amaftei) 	tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
4417d3b21cSAlex Maftei (amaftei) 				    sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
4517d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->cb_page) {
4617d3b21cSAlex Maftei (amaftei) 		rc = -ENOMEM;
4717d3b21cSAlex Maftei (amaftei) 		goto fail1;
4817d3b21cSAlex Maftei (amaftei) 	}
4917d3b21cSAlex Maftei (amaftei) 
5017d3b21cSAlex Maftei (amaftei) 	/* Allocate hardware ring */
5117d3b21cSAlex Maftei (amaftei) 	rc = efx_nic_probe_tx(tx_queue);
5217d3b21cSAlex Maftei (amaftei) 	if (rc)
5317d3b21cSAlex Maftei (amaftei) 		goto fail2;
5417d3b21cSAlex Maftei (amaftei) 
5517d3b21cSAlex Maftei (amaftei) 	return 0;
5617d3b21cSAlex Maftei (amaftei) 
5717d3b21cSAlex Maftei (amaftei) fail2:
5817d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->cb_page);
5917d3b21cSAlex Maftei (amaftei) 	tx_queue->cb_page = NULL;
6017d3b21cSAlex Maftei (amaftei) fail1:
6117d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->buffer);
6217d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = NULL;
6317d3b21cSAlex Maftei (amaftei) 	return rc;
6417d3b21cSAlex Maftei (amaftei) }
6517d3b21cSAlex Maftei (amaftei) 
6617d3b21cSAlex Maftei (amaftei) void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
6717d3b21cSAlex Maftei (amaftei) {
6817d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
6917d3b21cSAlex Maftei (amaftei) 
7017d3b21cSAlex Maftei (amaftei) 	netif_dbg(efx, drv, efx->net_dev,
7117d3b21cSAlex Maftei (amaftei) 		  "initialising TX queue %d\n", tx_queue->queue);
7217d3b21cSAlex Maftei (amaftei) 
7317d3b21cSAlex Maftei (amaftei) 	tx_queue->insert_count = 0;
7417d3b21cSAlex Maftei (amaftei) 	tx_queue->write_count = 0;
7517d3b21cSAlex Maftei (amaftei) 	tx_queue->packet_write_count = 0;
7617d3b21cSAlex Maftei (amaftei) 	tx_queue->old_write_count = 0;
7717d3b21cSAlex Maftei (amaftei) 	tx_queue->read_count = 0;
7817d3b21cSAlex Maftei (amaftei) 	tx_queue->old_read_count = 0;
7917d3b21cSAlex Maftei (amaftei) 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
8017d3b21cSAlex Maftei (amaftei) 	tx_queue->xmit_more_available = false;
8117d3b21cSAlex Maftei (amaftei) 	tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
8217d3b21cSAlex Maftei (amaftei) 				  tx_queue->channel == efx_ptp_channel(efx));
8317d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
8417d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_timestamp_major = 0;
8517d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_timestamp_minor = 0;
8617d3b21cSAlex Maftei (amaftei) 
8717d3b21cSAlex Maftei (amaftei) 	tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
8817d3b21cSAlex Maftei (amaftei) 
8917d3b21cSAlex Maftei (amaftei) 	/* Set up default function pointers. These may get replaced by
9017d3b21cSAlex Maftei (amaftei) 	 * efx_nic_init_tx() based off NIC/queue capabilities.
9117d3b21cSAlex Maftei (amaftei) 	 */
9217d3b21cSAlex Maftei (amaftei) 	tx_queue->handle_tso = efx_enqueue_skb_tso;
9317d3b21cSAlex Maftei (amaftei) 
9417d3b21cSAlex Maftei (amaftei) 	/* Set up TX descriptor ring */
9517d3b21cSAlex Maftei (amaftei) 	efx_nic_init_tx(tx_queue);
9617d3b21cSAlex Maftei (amaftei) 
9717d3b21cSAlex Maftei (amaftei) 	tx_queue->initialised = true;
9817d3b21cSAlex Maftei (amaftei) }
9917d3b21cSAlex Maftei (amaftei) 
10017d3b21cSAlex Maftei (amaftei) void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
10117d3b21cSAlex Maftei (amaftei) {
10217d3b21cSAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
10317d3b21cSAlex Maftei (amaftei) 
10417d3b21cSAlex Maftei (amaftei) 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
10517d3b21cSAlex Maftei (amaftei) 		  "shutting down TX queue %d\n", tx_queue->queue);
10617d3b21cSAlex Maftei (amaftei) 
10717d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
10817d3b21cSAlex Maftei (amaftei) 		return;
10917d3b21cSAlex Maftei (amaftei) 
11017d3b21cSAlex Maftei (amaftei) 	/* Free any buffers left in the ring */
11117d3b21cSAlex Maftei (amaftei) 	while (tx_queue->read_count != tx_queue->write_count) {
11217d3b21cSAlex Maftei (amaftei) 		unsigned int pkts_compl = 0, bytes_compl = 0;
11317d3b21cSAlex Maftei (amaftei) 
11417d3b21cSAlex Maftei (amaftei) 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
11517d3b21cSAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
11617d3b21cSAlex Maftei (amaftei) 
11717d3b21cSAlex Maftei (amaftei) 		++tx_queue->read_count;
11817d3b21cSAlex Maftei (amaftei) 	}
11917d3b21cSAlex Maftei (amaftei) 	tx_queue->xmit_more_available = false;
12017d3b21cSAlex Maftei (amaftei) 	netdev_tx_reset_queue(tx_queue->core_txq);
12117d3b21cSAlex Maftei (amaftei) }
12217d3b21cSAlex Maftei (amaftei) 
12317d3b21cSAlex Maftei (amaftei) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
12417d3b21cSAlex Maftei (amaftei) {
12517d3b21cSAlex Maftei (amaftei) 	int i;
12617d3b21cSAlex Maftei (amaftei) 
12717d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
12817d3b21cSAlex Maftei (amaftei) 		return;
12917d3b21cSAlex Maftei (amaftei) 
13017d3b21cSAlex Maftei (amaftei) 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
13117d3b21cSAlex Maftei (amaftei) 		  "destroying TX queue %d\n", tx_queue->queue);
13217d3b21cSAlex Maftei (amaftei) 	efx_nic_remove_tx(tx_queue);
13317d3b21cSAlex Maftei (amaftei) 
13417d3b21cSAlex Maftei (amaftei) 	if (tx_queue->cb_page) {
13517d3b21cSAlex Maftei (amaftei) 		for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
13617d3b21cSAlex Maftei (amaftei) 			efx_nic_free_buffer(tx_queue->efx,
13717d3b21cSAlex Maftei (amaftei) 					    &tx_queue->cb_page[i]);
13817d3b21cSAlex Maftei (amaftei) 		kfree(tx_queue->cb_page);
13917d3b21cSAlex Maftei (amaftei) 		tx_queue->cb_page = NULL;
14017d3b21cSAlex Maftei (amaftei) 	}
14117d3b21cSAlex Maftei (amaftei) 
14217d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->buffer);
14317d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = NULL;
14417d3b21cSAlex Maftei (amaftei) }
14517d3b21cSAlex Maftei (amaftei) 
14617d3b21cSAlex Maftei (amaftei) void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
14717d3b21cSAlex Maftei (amaftei) 			struct efx_tx_buffer *buffer,
14817d3b21cSAlex Maftei (amaftei) 			unsigned int *pkts_compl,
14917d3b21cSAlex Maftei (amaftei) 			unsigned int *bytes_compl)
15017d3b21cSAlex Maftei (amaftei) {
15117d3b21cSAlex Maftei (amaftei) 	if (buffer->unmap_len) {
15217d3b21cSAlex Maftei (amaftei) 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
15317d3b21cSAlex Maftei (amaftei) 		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
15417d3b21cSAlex Maftei (amaftei) 
15517d3b21cSAlex Maftei (amaftei) 		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
15617d3b21cSAlex Maftei (amaftei) 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
15717d3b21cSAlex Maftei (amaftei) 					 DMA_TO_DEVICE);
15817d3b21cSAlex Maftei (amaftei) 		else
15917d3b21cSAlex Maftei (amaftei) 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
16017d3b21cSAlex Maftei (amaftei) 				       DMA_TO_DEVICE);
16117d3b21cSAlex Maftei (amaftei) 		buffer->unmap_len = 0;
16217d3b21cSAlex Maftei (amaftei) 	}
16317d3b21cSAlex Maftei (amaftei) 
16417d3b21cSAlex Maftei (amaftei) 	if (buffer->flags & EFX_TX_BUF_SKB) {
16517d3b21cSAlex Maftei (amaftei) 		struct sk_buff *skb = (struct sk_buff *)buffer->skb;
16617d3b21cSAlex Maftei (amaftei) 
16717d3b21cSAlex Maftei (amaftei) 		EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
16817d3b21cSAlex Maftei (amaftei) 		(*pkts_compl)++;
16917d3b21cSAlex Maftei (amaftei) 		(*bytes_compl) += skb->len;
17017d3b21cSAlex Maftei (amaftei) 		if (tx_queue->timestamping &&
17117d3b21cSAlex Maftei (amaftei) 		    (tx_queue->completed_timestamp_major ||
17217d3b21cSAlex Maftei (amaftei) 		     tx_queue->completed_timestamp_minor)) {
17317d3b21cSAlex Maftei (amaftei) 			struct skb_shared_hwtstamps hwtstamp;
17417d3b21cSAlex Maftei (amaftei) 
17517d3b21cSAlex Maftei (amaftei) 			hwtstamp.hwtstamp =
17617d3b21cSAlex Maftei (amaftei) 				efx_ptp_nic_to_kernel_time(tx_queue);
17717d3b21cSAlex Maftei (amaftei) 			skb_tstamp_tx(skb, &hwtstamp);
17817d3b21cSAlex Maftei (amaftei) 
17917d3b21cSAlex Maftei (amaftei) 			tx_queue->completed_timestamp_major = 0;
18017d3b21cSAlex Maftei (amaftei) 			tx_queue->completed_timestamp_minor = 0;
18117d3b21cSAlex Maftei (amaftei) 		}
18217d3b21cSAlex Maftei (amaftei) 		dev_consume_skb_any((struct sk_buff *)buffer->skb);
18317d3b21cSAlex Maftei (amaftei) 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
18417d3b21cSAlex Maftei (amaftei) 			   "TX queue %d transmission id %x complete\n",
18517d3b21cSAlex Maftei (amaftei) 			   tx_queue->queue, tx_queue->read_count);
18617d3b21cSAlex Maftei (amaftei) 	} else if (buffer->flags & EFX_TX_BUF_XDP) {
18717d3b21cSAlex Maftei (amaftei) 		xdp_return_frame_rx_napi(buffer->xdpf);
18817d3b21cSAlex Maftei (amaftei) 	}
18917d3b21cSAlex Maftei (amaftei) 
19017d3b21cSAlex Maftei (amaftei) 	buffer->len = 0;
19117d3b21cSAlex Maftei (amaftei) 	buffer->flags = 0;
19217d3b21cSAlex Maftei (amaftei) }
19317d3b21cSAlex Maftei (amaftei) 
194b8cd9499SAlex Maftei (amaftei) /* Remove packets from the TX queue
195b8cd9499SAlex Maftei (amaftei)  *
196b8cd9499SAlex Maftei (amaftei)  * This removes packets from the TX queue, up to and including the
197b8cd9499SAlex Maftei (amaftei)  * specified index.
198b8cd9499SAlex Maftei (amaftei)  */
199b8cd9499SAlex Maftei (amaftei) static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
200b8cd9499SAlex Maftei (amaftei) 				unsigned int index,
201b8cd9499SAlex Maftei (amaftei) 				unsigned int *pkts_compl,
202b8cd9499SAlex Maftei (amaftei) 				unsigned int *bytes_compl)
203b8cd9499SAlex Maftei (amaftei) {
204b8cd9499SAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
205b8cd9499SAlex Maftei (amaftei) 	unsigned int stop_index, read_ptr;
206b8cd9499SAlex Maftei (amaftei) 
207b8cd9499SAlex Maftei (amaftei) 	stop_index = (index + 1) & tx_queue->ptr_mask;
208b8cd9499SAlex Maftei (amaftei) 	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
209b8cd9499SAlex Maftei (amaftei) 
210b8cd9499SAlex Maftei (amaftei) 	while (read_ptr != stop_index) {
211b8cd9499SAlex Maftei (amaftei) 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
212b8cd9499SAlex Maftei (amaftei) 
213b8cd9499SAlex Maftei (amaftei) 		if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
214b8cd9499SAlex Maftei (amaftei) 		    unlikely(buffer->len == 0)) {
215b8cd9499SAlex Maftei (amaftei) 			netif_err(efx, tx_err, efx->net_dev,
216b8cd9499SAlex Maftei (amaftei) 				  "TX queue %d spurious TX completion id %x\n",
217b8cd9499SAlex Maftei (amaftei) 				  tx_queue->queue, read_ptr);
218b8cd9499SAlex Maftei (amaftei) 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
219b8cd9499SAlex Maftei (amaftei) 			return;
220b8cd9499SAlex Maftei (amaftei) 		}
221b8cd9499SAlex Maftei (amaftei) 
222b8cd9499SAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
223b8cd9499SAlex Maftei (amaftei) 
224b8cd9499SAlex Maftei (amaftei) 		++tx_queue->read_count;
225b8cd9499SAlex Maftei (amaftei) 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
226b8cd9499SAlex Maftei (amaftei) 	}
227b8cd9499SAlex Maftei (amaftei) }
228b8cd9499SAlex Maftei (amaftei) 
229b8cd9499SAlex Maftei (amaftei) void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
230b8cd9499SAlex Maftei (amaftei) {
231b8cd9499SAlex Maftei (amaftei) 	unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
232b8cd9499SAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
233b8cd9499SAlex Maftei (amaftei) 	struct efx_tx_queue *txq2;
234b8cd9499SAlex Maftei (amaftei) 
235b8cd9499SAlex Maftei (amaftei) 	EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
236b8cd9499SAlex Maftei (amaftei) 
237b8cd9499SAlex Maftei (amaftei) 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
238b8cd9499SAlex Maftei (amaftei) 	tx_queue->pkts_compl += pkts_compl;
239b8cd9499SAlex Maftei (amaftei) 	tx_queue->bytes_compl += bytes_compl;
240b8cd9499SAlex Maftei (amaftei) 
241b8cd9499SAlex Maftei (amaftei) 	if (pkts_compl > 1)
242b8cd9499SAlex Maftei (amaftei) 		++tx_queue->merge_events;
243b8cd9499SAlex Maftei (amaftei) 
244b8cd9499SAlex Maftei (amaftei) 	/* See if we need to restart the netif queue.  This memory
245b8cd9499SAlex Maftei (amaftei) 	 * barrier ensures that we write read_count (inside
246b8cd9499SAlex Maftei (amaftei) 	 * efx_dequeue_buffers()) before reading the queue status.
247b8cd9499SAlex Maftei (amaftei) 	 */
248b8cd9499SAlex Maftei (amaftei) 	smp_mb();
249b8cd9499SAlex Maftei (amaftei) 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
250b8cd9499SAlex Maftei (amaftei) 	    likely(efx->port_enabled) &&
251b8cd9499SAlex Maftei (amaftei) 	    likely(netif_device_present(efx->net_dev))) {
252b8cd9499SAlex Maftei (amaftei) 		txq2 = efx_tx_queue_partner(tx_queue);
253b8cd9499SAlex Maftei (amaftei) 		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
254b8cd9499SAlex Maftei (amaftei) 				 txq2->insert_count - txq2->read_count);
255b8cd9499SAlex Maftei (amaftei) 		if (fill_level <= efx->txq_wake_thresh)
256b8cd9499SAlex Maftei (amaftei) 			netif_tx_wake_queue(tx_queue->core_txq);
257b8cd9499SAlex Maftei (amaftei) 	}
258b8cd9499SAlex Maftei (amaftei) 
259b8cd9499SAlex Maftei (amaftei) 	/* Check whether the hardware queue is now empty */
260b8cd9499SAlex Maftei (amaftei) 	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
261b8cd9499SAlex Maftei (amaftei) 		tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
262b8cd9499SAlex Maftei (amaftei) 		if (tx_queue->read_count == tx_queue->old_write_count) {
263b8cd9499SAlex Maftei (amaftei) 			smp_mb();
264b8cd9499SAlex Maftei (amaftei) 			tx_queue->empty_read_count =
265b8cd9499SAlex Maftei (amaftei) 				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
266b8cd9499SAlex Maftei (amaftei) 		}
267b8cd9499SAlex Maftei (amaftei) 	}
268b8cd9499SAlex Maftei (amaftei) }
269b8cd9499SAlex Maftei (amaftei) 
270*88f7df35SAlex Maftei (amaftei) /* Remove buffers put into a tx_queue for the current packet.
271*88f7df35SAlex Maftei (amaftei)  * None of the buffers must have an skb attached.
272*88f7df35SAlex Maftei (amaftei)  */
273*88f7df35SAlex Maftei (amaftei) void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
274*88f7df35SAlex Maftei (amaftei) 			unsigned int insert_count)
275*88f7df35SAlex Maftei (amaftei) {
276*88f7df35SAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
277*88f7df35SAlex Maftei (amaftei) 	unsigned int bytes_compl = 0;
278*88f7df35SAlex Maftei (amaftei) 	unsigned int pkts_compl = 0;
279*88f7df35SAlex Maftei (amaftei) 
280*88f7df35SAlex Maftei (amaftei) 	/* Work backwards until we hit the original insert pointer value */
281*88f7df35SAlex Maftei (amaftei) 	while (tx_queue->insert_count != insert_count) {
282*88f7df35SAlex Maftei (amaftei) 		--tx_queue->insert_count;
283*88f7df35SAlex Maftei (amaftei) 		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
284*88f7df35SAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
285*88f7df35SAlex Maftei (amaftei) 	}
286*88f7df35SAlex Maftei (amaftei) }
287*88f7df35SAlex Maftei (amaftei) 
28817d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
28917d3b21cSAlex Maftei (amaftei) 				       dma_addr_t dma_addr, size_t len)
29017d3b21cSAlex Maftei (amaftei) {
29117d3b21cSAlex Maftei (amaftei) 	const struct efx_nic_type *nic_type = tx_queue->efx->type;
29217d3b21cSAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
29317d3b21cSAlex Maftei (amaftei) 	unsigned int dma_len;
29417d3b21cSAlex Maftei (amaftei) 
29517d3b21cSAlex Maftei (amaftei) 	/* Map the fragment taking account of NIC-dependent DMA limits. */
29617d3b21cSAlex Maftei (amaftei) 	do {
29717d3b21cSAlex Maftei (amaftei) 		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
29817d3b21cSAlex Maftei (amaftei) 		dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
29917d3b21cSAlex Maftei (amaftei) 
30017d3b21cSAlex Maftei (amaftei) 		buffer->len = dma_len;
30117d3b21cSAlex Maftei (amaftei) 		buffer->dma_addr = dma_addr;
30217d3b21cSAlex Maftei (amaftei) 		buffer->flags = EFX_TX_BUF_CONT;
30317d3b21cSAlex Maftei (amaftei) 		len -= dma_len;
30417d3b21cSAlex Maftei (amaftei) 		dma_addr += dma_len;
30517d3b21cSAlex Maftei (amaftei) 		++tx_queue->insert_count;
30617d3b21cSAlex Maftei (amaftei) 	} while (len);
30717d3b21cSAlex Maftei (amaftei) 
30817d3b21cSAlex Maftei (amaftei) 	return buffer;
30917d3b21cSAlex Maftei (amaftei) }
31017d3b21cSAlex Maftei (amaftei) 
31117d3b21cSAlex Maftei (amaftei) /* Map all data from an SKB for DMA and create descriptors on the queue. */
31217d3b21cSAlex Maftei (amaftei) int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
31317d3b21cSAlex Maftei (amaftei) 		    unsigned int segment_count)
31417d3b21cSAlex Maftei (amaftei) {
31517d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
31617d3b21cSAlex Maftei (amaftei) 	struct device *dma_dev = &efx->pci_dev->dev;
31717d3b21cSAlex Maftei (amaftei) 	unsigned int frag_index, nr_frags;
31817d3b21cSAlex Maftei (amaftei) 	dma_addr_t dma_addr, unmap_addr;
31917d3b21cSAlex Maftei (amaftei) 	unsigned short dma_flags;
32017d3b21cSAlex Maftei (amaftei) 	size_t len, unmap_len;
32117d3b21cSAlex Maftei (amaftei) 
32217d3b21cSAlex Maftei (amaftei) 	nr_frags = skb_shinfo(skb)->nr_frags;
32317d3b21cSAlex Maftei (amaftei) 	frag_index = 0;
32417d3b21cSAlex Maftei (amaftei) 
32517d3b21cSAlex Maftei (amaftei) 	/* Map header data. */
32617d3b21cSAlex Maftei (amaftei) 	len = skb_headlen(skb);
32717d3b21cSAlex Maftei (amaftei) 	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
32817d3b21cSAlex Maftei (amaftei) 	dma_flags = EFX_TX_BUF_MAP_SINGLE;
32917d3b21cSAlex Maftei (amaftei) 	unmap_len = len;
33017d3b21cSAlex Maftei (amaftei) 	unmap_addr = dma_addr;
33117d3b21cSAlex Maftei (amaftei) 
33217d3b21cSAlex Maftei (amaftei) 	if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
33317d3b21cSAlex Maftei (amaftei) 		return -EIO;
33417d3b21cSAlex Maftei (amaftei) 
33517d3b21cSAlex Maftei (amaftei) 	if (segment_count) {
33617d3b21cSAlex Maftei (amaftei) 		/* For TSO we need to put the header in to a separate
33717d3b21cSAlex Maftei (amaftei) 		 * descriptor. Map this separately if necessary.
33817d3b21cSAlex Maftei (amaftei) 		 */
33917d3b21cSAlex Maftei (amaftei) 		size_t header_len = skb_transport_header(skb) - skb->data +
34017d3b21cSAlex Maftei (amaftei) 				(tcp_hdr(skb)->doff << 2u);
34117d3b21cSAlex Maftei (amaftei) 
34217d3b21cSAlex Maftei (amaftei) 		if (header_len != len) {
34317d3b21cSAlex Maftei (amaftei) 			tx_queue->tso_long_headers++;
34417d3b21cSAlex Maftei (amaftei) 			efx_tx_map_chunk(tx_queue, dma_addr, header_len);
34517d3b21cSAlex Maftei (amaftei) 			len -= header_len;
34617d3b21cSAlex Maftei (amaftei) 			dma_addr += header_len;
34717d3b21cSAlex Maftei (amaftei) 		}
34817d3b21cSAlex Maftei (amaftei) 	}
34917d3b21cSAlex Maftei (amaftei) 
35017d3b21cSAlex Maftei (amaftei) 	/* Add descriptors for each fragment. */
35117d3b21cSAlex Maftei (amaftei) 	do {
35217d3b21cSAlex Maftei (amaftei) 		struct efx_tx_buffer *buffer;
35317d3b21cSAlex Maftei (amaftei) 		skb_frag_t *fragment;
35417d3b21cSAlex Maftei (amaftei) 
35517d3b21cSAlex Maftei (amaftei) 		buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
35617d3b21cSAlex Maftei (amaftei) 
35717d3b21cSAlex Maftei (amaftei) 		/* The final descriptor for a fragment is responsible for
35817d3b21cSAlex Maftei (amaftei) 		 * unmapping the whole fragment.
35917d3b21cSAlex Maftei (amaftei) 		 */
36017d3b21cSAlex Maftei (amaftei) 		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
36117d3b21cSAlex Maftei (amaftei) 		buffer->unmap_len = unmap_len;
36217d3b21cSAlex Maftei (amaftei) 		buffer->dma_offset = buffer->dma_addr - unmap_addr;
36317d3b21cSAlex Maftei (amaftei) 
36417d3b21cSAlex Maftei (amaftei) 		if (frag_index >= nr_frags) {
36517d3b21cSAlex Maftei (amaftei) 			/* Store SKB details with the final buffer for
36617d3b21cSAlex Maftei (amaftei) 			 * the completion.
36717d3b21cSAlex Maftei (amaftei) 			 */
36817d3b21cSAlex Maftei (amaftei) 			buffer->skb = skb;
36917d3b21cSAlex Maftei (amaftei) 			buffer->flags = EFX_TX_BUF_SKB | dma_flags;
37017d3b21cSAlex Maftei (amaftei) 			return 0;
37117d3b21cSAlex Maftei (amaftei) 		}
37217d3b21cSAlex Maftei (amaftei) 
37317d3b21cSAlex Maftei (amaftei) 		/* Move on to the next fragment. */
37417d3b21cSAlex Maftei (amaftei) 		fragment = &skb_shinfo(skb)->frags[frag_index++];
37517d3b21cSAlex Maftei (amaftei) 		len = skb_frag_size(fragment);
37617d3b21cSAlex Maftei (amaftei) 		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
37717d3b21cSAlex Maftei (amaftei) 					    DMA_TO_DEVICE);
37817d3b21cSAlex Maftei (amaftei) 		dma_flags = 0;
37917d3b21cSAlex Maftei (amaftei) 		unmap_len = len;
38017d3b21cSAlex Maftei (amaftei) 		unmap_addr = dma_addr;
38117d3b21cSAlex Maftei (amaftei) 
38217d3b21cSAlex Maftei (amaftei) 		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
38317d3b21cSAlex Maftei (amaftei) 			return -EIO;
38417d3b21cSAlex Maftei (amaftei) 	} while (1);
38517d3b21cSAlex Maftei (amaftei) }
38617d3b21cSAlex Maftei (amaftei) 
38717d3b21cSAlex Maftei (amaftei) unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
38817d3b21cSAlex Maftei (amaftei) {
38917d3b21cSAlex Maftei (amaftei) 	/* Header and payload descriptor for each output segment, plus
39017d3b21cSAlex Maftei (amaftei) 	 * one for every input fragment boundary within a segment
39117d3b21cSAlex Maftei (amaftei) 	 */
39217d3b21cSAlex Maftei (amaftei) 	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
39317d3b21cSAlex Maftei (amaftei) 
39417d3b21cSAlex Maftei (amaftei) 	/* Possibly one more per segment for option descriptors */
39517d3b21cSAlex Maftei (amaftei) 	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
39617d3b21cSAlex Maftei (amaftei) 		max_descs += EFX_TSO_MAX_SEGS;
39717d3b21cSAlex Maftei (amaftei) 
39817d3b21cSAlex Maftei (amaftei) 	/* Possibly more for PCIe page boundaries within input fragments */
39917d3b21cSAlex Maftei (amaftei) 	if (PAGE_SIZE > EFX_PAGE_SIZE)
40017d3b21cSAlex Maftei (amaftei) 		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
40117d3b21cSAlex Maftei (amaftei) 				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
40217d3b21cSAlex Maftei (amaftei) 
40317d3b21cSAlex Maftei (amaftei) 	return max_descs;
40417d3b21cSAlex Maftei (amaftei) }
405