xref: /openbmc/linux/drivers/net/ethernet/sfc/tx_common.c (revision b8cd94992f1758d1574f5871335fdaaf726c0944)
117d3b21cSAlex Maftei (amaftei) // SPDX-License-Identifier: GPL-2.0-only
217d3b21cSAlex Maftei (amaftei) /****************************************************************************
317d3b21cSAlex Maftei (amaftei)  * Driver for Solarflare network controllers and boards
417d3b21cSAlex Maftei (amaftei)  * Copyright 2018 Solarflare Communications Inc.
517d3b21cSAlex Maftei (amaftei)  *
617d3b21cSAlex Maftei (amaftei)  * This program is free software; you can redistribute it and/or modify it
717d3b21cSAlex Maftei (amaftei)  * under the terms of the GNU General Public License version 2 as published
817d3b21cSAlex Maftei (amaftei)  * by the Free Software Foundation, incorporated herein by reference.
917d3b21cSAlex Maftei (amaftei)  */
1017d3b21cSAlex Maftei (amaftei) 
1117d3b21cSAlex Maftei (amaftei) #include "net_driver.h"
1217d3b21cSAlex Maftei (amaftei) #include "efx.h"
1317d3b21cSAlex Maftei (amaftei) #include "nic.h"
1417d3b21cSAlex Maftei (amaftei) #include "tx_common.h"
1517d3b21cSAlex Maftei (amaftei) 
1617d3b21cSAlex Maftei (amaftei) static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
1717d3b21cSAlex Maftei (amaftei) {
1817d3b21cSAlex Maftei (amaftei) 	return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
1917d3b21cSAlex Maftei (amaftei) 			    PAGE_SIZE >> EFX_TX_CB_ORDER);
2017d3b21cSAlex Maftei (amaftei) }
2117d3b21cSAlex Maftei (amaftei) 
2217d3b21cSAlex Maftei (amaftei) int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
2317d3b21cSAlex Maftei (amaftei) {
2417d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
2517d3b21cSAlex Maftei (amaftei) 	unsigned int entries;
2617d3b21cSAlex Maftei (amaftei) 	int rc;
2717d3b21cSAlex Maftei (amaftei) 
2817d3b21cSAlex Maftei (amaftei) 	/* Create the smallest power-of-two aligned ring */
2917d3b21cSAlex Maftei (amaftei) 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
3017d3b21cSAlex Maftei (amaftei) 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
3117d3b21cSAlex Maftei (amaftei) 	tx_queue->ptr_mask = entries - 1;
3217d3b21cSAlex Maftei (amaftei) 
3317d3b21cSAlex Maftei (amaftei) 	netif_dbg(efx, probe, efx->net_dev,
3417d3b21cSAlex Maftei (amaftei) 		  "creating TX queue %d size %#x mask %#x\n",
3517d3b21cSAlex Maftei (amaftei) 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
3617d3b21cSAlex Maftei (amaftei) 
3717d3b21cSAlex Maftei (amaftei) 	/* Allocate software ring */
3817d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
3917d3b21cSAlex Maftei (amaftei) 				   GFP_KERNEL);
4017d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
4117d3b21cSAlex Maftei (amaftei) 		return -ENOMEM;
4217d3b21cSAlex Maftei (amaftei) 
4317d3b21cSAlex Maftei (amaftei) 	tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
4417d3b21cSAlex Maftei (amaftei) 				    sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
4517d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->cb_page) {
4617d3b21cSAlex Maftei (amaftei) 		rc = -ENOMEM;
4717d3b21cSAlex Maftei (amaftei) 		goto fail1;
4817d3b21cSAlex Maftei (amaftei) 	}
4917d3b21cSAlex Maftei (amaftei) 
5017d3b21cSAlex Maftei (amaftei) 	/* Allocate hardware ring */
5117d3b21cSAlex Maftei (amaftei) 	rc = efx_nic_probe_tx(tx_queue);
5217d3b21cSAlex Maftei (amaftei) 	if (rc)
5317d3b21cSAlex Maftei (amaftei) 		goto fail2;
5417d3b21cSAlex Maftei (amaftei) 
5517d3b21cSAlex Maftei (amaftei) 	return 0;
5617d3b21cSAlex Maftei (amaftei) 
5717d3b21cSAlex Maftei (amaftei) fail2:
5817d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->cb_page);
5917d3b21cSAlex Maftei (amaftei) 	tx_queue->cb_page = NULL;
6017d3b21cSAlex Maftei (amaftei) fail1:
6117d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->buffer);
6217d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = NULL;
6317d3b21cSAlex Maftei (amaftei) 	return rc;
6417d3b21cSAlex Maftei (amaftei) }
6517d3b21cSAlex Maftei (amaftei) 
6617d3b21cSAlex Maftei (amaftei) void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
6717d3b21cSAlex Maftei (amaftei) {
6817d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
6917d3b21cSAlex Maftei (amaftei) 
7017d3b21cSAlex Maftei (amaftei) 	netif_dbg(efx, drv, efx->net_dev,
7117d3b21cSAlex Maftei (amaftei) 		  "initialising TX queue %d\n", tx_queue->queue);
7217d3b21cSAlex Maftei (amaftei) 
7317d3b21cSAlex Maftei (amaftei) 	tx_queue->insert_count = 0;
7417d3b21cSAlex Maftei (amaftei) 	tx_queue->write_count = 0;
7517d3b21cSAlex Maftei (amaftei) 	tx_queue->packet_write_count = 0;
7617d3b21cSAlex Maftei (amaftei) 	tx_queue->old_write_count = 0;
7717d3b21cSAlex Maftei (amaftei) 	tx_queue->read_count = 0;
7817d3b21cSAlex Maftei (amaftei) 	tx_queue->old_read_count = 0;
7917d3b21cSAlex Maftei (amaftei) 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
8017d3b21cSAlex Maftei (amaftei) 	tx_queue->xmit_more_available = false;
8117d3b21cSAlex Maftei (amaftei) 	tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
8217d3b21cSAlex Maftei (amaftei) 				  tx_queue->channel == efx_ptp_channel(efx));
8317d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
8417d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_timestamp_major = 0;
8517d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_timestamp_minor = 0;
8617d3b21cSAlex Maftei (amaftei) 
8717d3b21cSAlex Maftei (amaftei) 	tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
8817d3b21cSAlex Maftei (amaftei) 
8917d3b21cSAlex Maftei (amaftei) 	/* Set up default function pointers. These may get replaced by
9017d3b21cSAlex Maftei (amaftei) 	 * efx_nic_init_tx() based off NIC/queue capabilities.
9117d3b21cSAlex Maftei (amaftei) 	 */
9217d3b21cSAlex Maftei (amaftei) 	tx_queue->handle_tso = efx_enqueue_skb_tso;
9317d3b21cSAlex Maftei (amaftei) 
9417d3b21cSAlex Maftei (amaftei) 	/* Set up TX descriptor ring */
9517d3b21cSAlex Maftei (amaftei) 	efx_nic_init_tx(tx_queue);
9617d3b21cSAlex Maftei (amaftei) 
9717d3b21cSAlex Maftei (amaftei) 	tx_queue->initialised = true;
9817d3b21cSAlex Maftei (amaftei) }
9917d3b21cSAlex Maftei (amaftei) 
10017d3b21cSAlex Maftei (amaftei) void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
10117d3b21cSAlex Maftei (amaftei) {
10217d3b21cSAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
10317d3b21cSAlex Maftei (amaftei) 
10417d3b21cSAlex Maftei (amaftei) 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
10517d3b21cSAlex Maftei (amaftei) 		  "shutting down TX queue %d\n", tx_queue->queue);
10617d3b21cSAlex Maftei (amaftei) 
10717d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
10817d3b21cSAlex Maftei (amaftei) 		return;
10917d3b21cSAlex Maftei (amaftei) 
11017d3b21cSAlex Maftei (amaftei) 	/* Free any buffers left in the ring */
11117d3b21cSAlex Maftei (amaftei) 	while (tx_queue->read_count != tx_queue->write_count) {
11217d3b21cSAlex Maftei (amaftei) 		unsigned int pkts_compl = 0, bytes_compl = 0;
11317d3b21cSAlex Maftei (amaftei) 
11417d3b21cSAlex Maftei (amaftei) 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
11517d3b21cSAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
11617d3b21cSAlex Maftei (amaftei) 
11717d3b21cSAlex Maftei (amaftei) 		++tx_queue->read_count;
11817d3b21cSAlex Maftei (amaftei) 	}
11917d3b21cSAlex Maftei (amaftei) 	tx_queue->xmit_more_available = false;
12017d3b21cSAlex Maftei (amaftei) 	netdev_tx_reset_queue(tx_queue->core_txq);
12117d3b21cSAlex Maftei (amaftei) }
12217d3b21cSAlex Maftei (amaftei) 
12317d3b21cSAlex Maftei (amaftei) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
12417d3b21cSAlex Maftei (amaftei) {
12517d3b21cSAlex Maftei (amaftei) 	int i;
12617d3b21cSAlex Maftei (amaftei) 
12717d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
12817d3b21cSAlex Maftei (amaftei) 		return;
12917d3b21cSAlex Maftei (amaftei) 
13017d3b21cSAlex Maftei (amaftei) 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
13117d3b21cSAlex Maftei (amaftei) 		  "destroying TX queue %d\n", tx_queue->queue);
13217d3b21cSAlex Maftei (amaftei) 	efx_nic_remove_tx(tx_queue);
13317d3b21cSAlex Maftei (amaftei) 
13417d3b21cSAlex Maftei (amaftei) 	if (tx_queue->cb_page) {
13517d3b21cSAlex Maftei (amaftei) 		for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
13617d3b21cSAlex Maftei (amaftei) 			efx_nic_free_buffer(tx_queue->efx,
13717d3b21cSAlex Maftei (amaftei) 					    &tx_queue->cb_page[i]);
13817d3b21cSAlex Maftei (amaftei) 		kfree(tx_queue->cb_page);
13917d3b21cSAlex Maftei (amaftei) 		tx_queue->cb_page = NULL;
14017d3b21cSAlex Maftei (amaftei) 	}
14117d3b21cSAlex Maftei (amaftei) 
14217d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->buffer);
14317d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = NULL;
14417d3b21cSAlex Maftei (amaftei) }
14517d3b21cSAlex Maftei (amaftei) 
14617d3b21cSAlex Maftei (amaftei) void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
14717d3b21cSAlex Maftei (amaftei) 			struct efx_tx_buffer *buffer,
14817d3b21cSAlex Maftei (amaftei) 			unsigned int *pkts_compl,
14917d3b21cSAlex Maftei (amaftei) 			unsigned int *bytes_compl)
15017d3b21cSAlex Maftei (amaftei) {
15117d3b21cSAlex Maftei (amaftei) 	if (buffer->unmap_len) {
15217d3b21cSAlex Maftei (amaftei) 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
15317d3b21cSAlex Maftei (amaftei) 		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
15417d3b21cSAlex Maftei (amaftei) 
15517d3b21cSAlex Maftei (amaftei) 		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
15617d3b21cSAlex Maftei (amaftei) 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
15717d3b21cSAlex Maftei (amaftei) 					 DMA_TO_DEVICE);
15817d3b21cSAlex Maftei (amaftei) 		else
15917d3b21cSAlex Maftei (amaftei) 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
16017d3b21cSAlex Maftei (amaftei) 				       DMA_TO_DEVICE);
16117d3b21cSAlex Maftei (amaftei) 		buffer->unmap_len = 0;
16217d3b21cSAlex Maftei (amaftei) 	}
16317d3b21cSAlex Maftei (amaftei) 
16417d3b21cSAlex Maftei (amaftei) 	if (buffer->flags & EFX_TX_BUF_SKB) {
16517d3b21cSAlex Maftei (amaftei) 		struct sk_buff *skb = (struct sk_buff *)buffer->skb;
16617d3b21cSAlex Maftei (amaftei) 
16717d3b21cSAlex Maftei (amaftei) 		EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
16817d3b21cSAlex Maftei (amaftei) 		(*pkts_compl)++;
16917d3b21cSAlex Maftei (amaftei) 		(*bytes_compl) += skb->len;
17017d3b21cSAlex Maftei (amaftei) 		if (tx_queue->timestamping &&
17117d3b21cSAlex Maftei (amaftei) 		    (tx_queue->completed_timestamp_major ||
17217d3b21cSAlex Maftei (amaftei) 		     tx_queue->completed_timestamp_minor)) {
17317d3b21cSAlex Maftei (amaftei) 			struct skb_shared_hwtstamps hwtstamp;
17417d3b21cSAlex Maftei (amaftei) 
17517d3b21cSAlex Maftei (amaftei) 			hwtstamp.hwtstamp =
17617d3b21cSAlex Maftei (amaftei) 				efx_ptp_nic_to_kernel_time(tx_queue);
17717d3b21cSAlex Maftei (amaftei) 			skb_tstamp_tx(skb, &hwtstamp);
17817d3b21cSAlex Maftei (amaftei) 
17917d3b21cSAlex Maftei (amaftei) 			tx_queue->completed_timestamp_major = 0;
18017d3b21cSAlex Maftei (amaftei) 			tx_queue->completed_timestamp_minor = 0;
18117d3b21cSAlex Maftei (amaftei) 		}
18217d3b21cSAlex Maftei (amaftei) 		dev_consume_skb_any((struct sk_buff *)buffer->skb);
18317d3b21cSAlex Maftei (amaftei) 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
18417d3b21cSAlex Maftei (amaftei) 			   "TX queue %d transmission id %x complete\n",
18517d3b21cSAlex Maftei (amaftei) 			   tx_queue->queue, tx_queue->read_count);
18617d3b21cSAlex Maftei (amaftei) 	} else if (buffer->flags & EFX_TX_BUF_XDP) {
18717d3b21cSAlex Maftei (amaftei) 		xdp_return_frame_rx_napi(buffer->xdpf);
18817d3b21cSAlex Maftei (amaftei) 	}
18917d3b21cSAlex Maftei (amaftei) 
19017d3b21cSAlex Maftei (amaftei) 	buffer->len = 0;
19117d3b21cSAlex Maftei (amaftei) 	buffer->flags = 0;
19217d3b21cSAlex Maftei (amaftei) }
19317d3b21cSAlex Maftei (amaftei) 
194*b8cd9499SAlex Maftei (amaftei) /* Remove packets from the TX queue
195*b8cd9499SAlex Maftei (amaftei)  *
196*b8cd9499SAlex Maftei (amaftei)  * This removes packets from the TX queue, up to and including the
197*b8cd9499SAlex Maftei (amaftei)  * specified index.
198*b8cd9499SAlex Maftei (amaftei)  */
199*b8cd9499SAlex Maftei (amaftei) static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
200*b8cd9499SAlex Maftei (amaftei) 				unsigned int index,
201*b8cd9499SAlex Maftei (amaftei) 				unsigned int *pkts_compl,
202*b8cd9499SAlex Maftei (amaftei) 				unsigned int *bytes_compl)
203*b8cd9499SAlex Maftei (amaftei) {
204*b8cd9499SAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
205*b8cd9499SAlex Maftei (amaftei) 	unsigned int stop_index, read_ptr;
206*b8cd9499SAlex Maftei (amaftei) 
207*b8cd9499SAlex Maftei (amaftei) 	stop_index = (index + 1) & tx_queue->ptr_mask;
208*b8cd9499SAlex Maftei (amaftei) 	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
209*b8cd9499SAlex Maftei (amaftei) 
210*b8cd9499SAlex Maftei (amaftei) 	while (read_ptr != stop_index) {
211*b8cd9499SAlex Maftei (amaftei) 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
212*b8cd9499SAlex Maftei (amaftei) 
213*b8cd9499SAlex Maftei (amaftei) 		if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
214*b8cd9499SAlex Maftei (amaftei) 		    unlikely(buffer->len == 0)) {
215*b8cd9499SAlex Maftei (amaftei) 			netif_err(efx, tx_err, efx->net_dev,
216*b8cd9499SAlex Maftei (amaftei) 				  "TX queue %d spurious TX completion id %x\n",
217*b8cd9499SAlex Maftei (amaftei) 				  tx_queue->queue, read_ptr);
218*b8cd9499SAlex Maftei (amaftei) 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
219*b8cd9499SAlex Maftei (amaftei) 			return;
220*b8cd9499SAlex Maftei (amaftei) 		}
221*b8cd9499SAlex Maftei (amaftei) 
222*b8cd9499SAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
223*b8cd9499SAlex Maftei (amaftei) 
224*b8cd9499SAlex Maftei (amaftei) 		++tx_queue->read_count;
225*b8cd9499SAlex Maftei (amaftei) 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
226*b8cd9499SAlex Maftei (amaftei) 	}
227*b8cd9499SAlex Maftei (amaftei) }
228*b8cd9499SAlex Maftei (amaftei) 
229*b8cd9499SAlex Maftei (amaftei) void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
230*b8cd9499SAlex Maftei (amaftei) {
231*b8cd9499SAlex Maftei (amaftei) 	unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
232*b8cd9499SAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
233*b8cd9499SAlex Maftei (amaftei) 	struct efx_tx_queue *txq2;
234*b8cd9499SAlex Maftei (amaftei) 
235*b8cd9499SAlex Maftei (amaftei) 	EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
236*b8cd9499SAlex Maftei (amaftei) 
237*b8cd9499SAlex Maftei (amaftei) 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
238*b8cd9499SAlex Maftei (amaftei) 	tx_queue->pkts_compl += pkts_compl;
239*b8cd9499SAlex Maftei (amaftei) 	tx_queue->bytes_compl += bytes_compl;
240*b8cd9499SAlex Maftei (amaftei) 
241*b8cd9499SAlex Maftei (amaftei) 	if (pkts_compl > 1)
242*b8cd9499SAlex Maftei (amaftei) 		++tx_queue->merge_events;
243*b8cd9499SAlex Maftei (amaftei) 
244*b8cd9499SAlex Maftei (amaftei) 	/* See if we need to restart the netif queue.  This memory
245*b8cd9499SAlex Maftei (amaftei) 	 * barrier ensures that we write read_count (inside
246*b8cd9499SAlex Maftei (amaftei) 	 * efx_dequeue_buffers()) before reading the queue status.
247*b8cd9499SAlex Maftei (amaftei) 	 */
248*b8cd9499SAlex Maftei (amaftei) 	smp_mb();
249*b8cd9499SAlex Maftei (amaftei) 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
250*b8cd9499SAlex Maftei (amaftei) 	    likely(efx->port_enabled) &&
251*b8cd9499SAlex Maftei (amaftei) 	    likely(netif_device_present(efx->net_dev))) {
252*b8cd9499SAlex Maftei (amaftei) 		txq2 = efx_tx_queue_partner(tx_queue);
253*b8cd9499SAlex Maftei (amaftei) 		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
254*b8cd9499SAlex Maftei (amaftei) 				 txq2->insert_count - txq2->read_count);
255*b8cd9499SAlex Maftei (amaftei) 		if (fill_level <= efx->txq_wake_thresh)
256*b8cd9499SAlex Maftei (amaftei) 			netif_tx_wake_queue(tx_queue->core_txq);
257*b8cd9499SAlex Maftei (amaftei) 	}
258*b8cd9499SAlex Maftei (amaftei) 
259*b8cd9499SAlex Maftei (amaftei) 	/* Check whether the hardware queue is now empty */
260*b8cd9499SAlex Maftei (amaftei) 	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
261*b8cd9499SAlex Maftei (amaftei) 		tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
262*b8cd9499SAlex Maftei (amaftei) 		if (tx_queue->read_count == tx_queue->old_write_count) {
263*b8cd9499SAlex Maftei (amaftei) 			smp_mb();
264*b8cd9499SAlex Maftei (amaftei) 			tx_queue->empty_read_count =
265*b8cd9499SAlex Maftei (amaftei) 				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
266*b8cd9499SAlex Maftei (amaftei) 		}
267*b8cd9499SAlex Maftei (amaftei) 	}
268*b8cd9499SAlex Maftei (amaftei) }
269*b8cd9499SAlex Maftei (amaftei) 
27017d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
27117d3b21cSAlex Maftei (amaftei) 				       dma_addr_t dma_addr, size_t len)
27217d3b21cSAlex Maftei (amaftei) {
27317d3b21cSAlex Maftei (amaftei) 	const struct efx_nic_type *nic_type = tx_queue->efx->type;
27417d3b21cSAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
27517d3b21cSAlex Maftei (amaftei) 	unsigned int dma_len;
27617d3b21cSAlex Maftei (amaftei) 
27717d3b21cSAlex Maftei (amaftei) 	/* Map the fragment taking account of NIC-dependent DMA limits. */
27817d3b21cSAlex Maftei (amaftei) 	do {
27917d3b21cSAlex Maftei (amaftei) 		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
28017d3b21cSAlex Maftei (amaftei) 		dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
28117d3b21cSAlex Maftei (amaftei) 
28217d3b21cSAlex Maftei (amaftei) 		buffer->len = dma_len;
28317d3b21cSAlex Maftei (amaftei) 		buffer->dma_addr = dma_addr;
28417d3b21cSAlex Maftei (amaftei) 		buffer->flags = EFX_TX_BUF_CONT;
28517d3b21cSAlex Maftei (amaftei) 		len -= dma_len;
28617d3b21cSAlex Maftei (amaftei) 		dma_addr += dma_len;
28717d3b21cSAlex Maftei (amaftei) 		++tx_queue->insert_count;
28817d3b21cSAlex Maftei (amaftei) 	} while (len);
28917d3b21cSAlex Maftei (amaftei) 
29017d3b21cSAlex Maftei (amaftei) 	return buffer;
29117d3b21cSAlex Maftei (amaftei) }
29217d3b21cSAlex Maftei (amaftei) 
29317d3b21cSAlex Maftei (amaftei) /* Map all data from an SKB for DMA and create descriptors on the queue. */
29417d3b21cSAlex Maftei (amaftei) int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
29517d3b21cSAlex Maftei (amaftei) 		    unsigned int segment_count)
29617d3b21cSAlex Maftei (amaftei) {
29717d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
29817d3b21cSAlex Maftei (amaftei) 	struct device *dma_dev = &efx->pci_dev->dev;
29917d3b21cSAlex Maftei (amaftei) 	unsigned int frag_index, nr_frags;
30017d3b21cSAlex Maftei (amaftei) 	dma_addr_t dma_addr, unmap_addr;
30117d3b21cSAlex Maftei (amaftei) 	unsigned short dma_flags;
30217d3b21cSAlex Maftei (amaftei) 	size_t len, unmap_len;
30317d3b21cSAlex Maftei (amaftei) 
30417d3b21cSAlex Maftei (amaftei) 	nr_frags = skb_shinfo(skb)->nr_frags;
30517d3b21cSAlex Maftei (amaftei) 	frag_index = 0;
30617d3b21cSAlex Maftei (amaftei) 
30717d3b21cSAlex Maftei (amaftei) 	/* Map header data. */
30817d3b21cSAlex Maftei (amaftei) 	len = skb_headlen(skb);
30917d3b21cSAlex Maftei (amaftei) 	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
31017d3b21cSAlex Maftei (amaftei) 	dma_flags = EFX_TX_BUF_MAP_SINGLE;
31117d3b21cSAlex Maftei (amaftei) 	unmap_len = len;
31217d3b21cSAlex Maftei (amaftei) 	unmap_addr = dma_addr;
31317d3b21cSAlex Maftei (amaftei) 
31417d3b21cSAlex Maftei (amaftei) 	if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
31517d3b21cSAlex Maftei (amaftei) 		return -EIO;
31617d3b21cSAlex Maftei (amaftei) 
31717d3b21cSAlex Maftei (amaftei) 	if (segment_count) {
31817d3b21cSAlex Maftei (amaftei) 		/* For TSO we need to put the header in to a separate
31917d3b21cSAlex Maftei (amaftei) 		 * descriptor. Map this separately if necessary.
32017d3b21cSAlex Maftei (amaftei) 		 */
32117d3b21cSAlex Maftei (amaftei) 		size_t header_len = skb_transport_header(skb) - skb->data +
32217d3b21cSAlex Maftei (amaftei) 				(tcp_hdr(skb)->doff << 2u);
32317d3b21cSAlex Maftei (amaftei) 
32417d3b21cSAlex Maftei (amaftei) 		if (header_len != len) {
32517d3b21cSAlex Maftei (amaftei) 			tx_queue->tso_long_headers++;
32617d3b21cSAlex Maftei (amaftei) 			efx_tx_map_chunk(tx_queue, dma_addr, header_len);
32717d3b21cSAlex Maftei (amaftei) 			len -= header_len;
32817d3b21cSAlex Maftei (amaftei) 			dma_addr += header_len;
32917d3b21cSAlex Maftei (amaftei) 		}
33017d3b21cSAlex Maftei (amaftei) 	}
33117d3b21cSAlex Maftei (amaftei) 
33217d3b21cSAlex Maftei (amaftei) 	/* Add descriptors for each fragment. */
33317d3b21cSAlex Maftei (amaftei) 	do {
33417d3b21cSAlex Maftei (amaftei) 		struct efx_tx_buffer *buffer;
33517d3b21cSAlex Maftei (amaftei) 		skb_frag_t *fragment;
33617d3b21cSAlex Maftei (amaftei) 
33717d3b21cSAlex Maftei (amaftei) 		buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
33817d3b21cSAlex Maftei (amaftei) 
33917d3b21cSAlex Maftei (amaftei) 		/* The final descriptor for a fragment is responsible for
34017d3b21cSAlex Maftei (amaftei) 		 * unmapping the whole fragment.
34117d3b21cSAlex Maftei (amaftei) 		 */
34217d3b21cSAlex Maftei (amaftei) 		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
34317d3b21cSAlex Maftei (amaftei) 		buffer->unmap_len = unmap_len;
34417d3b21cSAlex Maftei (amaftei) 		buffer->dma_offset = buffer->dma_addr - unmap_addr;
34517d3b21cSAlex Maftei (amaftei) 
34617d3b21cSAlex Maftei (amaftei) 		if (frag_index >= nr_frags) {
34717d3b21cSAlex Maftei (amaftei) 			/* Store SKB details with the final buffer for
34817d3b21cSAlex Maftei (amaftei) 			 * the completion.
34917d3b21cSAlex Maftei (amaftei) 			 */
35017d3b21cSAlex Maftei (amaftei) 			buffer->skb = skb;
35117d3b21cSAlex Maftei (amaftei) 			buffer->flags = EFX_TX_BUF_SKB | dma_flags;
35217d3b21cSAlex Maftei (amaftei) 			return 0;
35317d3b21cSAlex Maftei (amaftei) 		}
35417d3b21cSAlex Maftei (amaftei) 
35517d3b21cSAlex Maftei (amaftei) 		/* Move on to the next fragment. */
35617d3b21cSAlex Maftei (amaftei) 		fragment = &skb_shinfo(skb)->frags[frag_index++];
35717d3b21cSAlex Maftei (amaftei) 		len = skb_frag_size(fragment);
35817d3b21cSAlex Maftei (amaftei) 		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
35917d3b21cSAlex Maftei (amaftei) 					    DMA_TO_DEVICE);
36017d3b21cSAlex Maftei (amaftei) 		dma_flags = 0;
36117d3b21cSAlex Maftei (amaftei) 		unmap_len = len;
36217d3b21cSAlex Maftei (amaftei) 		unmap_addr = dma_addr;
36317d3b21cSAlex Maftei (amaftei) 
36417d3b21cSAlex Maftei (amaftei) 		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
36517d3b21cSAlex Maftei (amaftei) 			return -EIO;
36617d3b21cSAlex Maftei (amaftei) 	} while (1);
36717d3b21cSAlex Maftei (amaftei) }
36817d3b21cSAlex Maftei (amaftei) 
36917d3b21cSAlex Maftei (amaftei) unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
37017d3b21cSAlex Maftei (amaftei) {
37117d3b21cSAlex Maftei (amaftei) 	/* Header and payload descriptor for each output segment, plus
37217d3b21cSAlex Maftei (amaftei) 	 * one for every input fragment boundary within a segment
37317d3b21cSAlex Maftei (amaftei) 	 */
37417d3b21cSAlex Maftei (amaftei) 	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
37517d3b21cSAlex Maftei (amaftei) 
37617d3b21cSAlex Maftei (amaftei) 	/* Possibly one more per segment for option descriptors */
37717d3b21cSAlex Maftei (amaftei) 	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
37817d3b21cSAlex Maftei (amaftei) 		max_descs += EFX_TSO_MAX_SEGS;
37917d3b21cSAlex Maftei (amaftei) 
38017d3b21cSAlex Maftei (amaftei) 	/* Possibly more for PCIe page boundaries within input fragments */
38117d3b21cSAlex Maftei (amaftei) 	if (PAGE_SIZE > EFX_PAGE_SIZE)
38217d3b21cSAlex Maftei (amaftei) 		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
38317d3b21cSAlex Maftei (amaftei) 				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
38417d3b21cSAlex Maftei (amaftei) 
38517d3b21cSAlex Maftei (amaftei) 	return max_descs;
38617d3b21cSAlex Maftei (amaftei) }
387