xref: /openbmc/linux/drivers/net/ethernet/sfc/tx_common.c (revision 17d3b21c7ba82cd88855a798b931bb3ac1388cba)
1*17d3b21cSAlex Maftei (amaftei) // SPDX-License-Identifier: GPL-2.0-only
2*17d3b21cSAlex Maftei (amaftei) /****************************************************************************
3*17d3b21cSAlex Maftei (amaftei)  * Driver for Solarflare network controllers and boards
4*17d3b21cSAlex Maftei (amaftei)  * Copyright 2018 Solarflare Communications Inc.
5*17d3b21cSAlex Maftei (amaftei)  *
6*17d3b21cSAlex Maftei (amaftei)  * This program is free software; you can redistribute it and/or modify it
7*17d3b21cSAlex Maftei (amaftei)  * under the terms of the GNU General Public License version 2 as published
8*17d3b21cSAlex Maftei (amaftei)  * by the Free Software Foundation, incorporated herein by reference.
9*17d3b21cSAlex Maftei (amaftei)  */
10*17d3b21cSAlex Maftei (amaftei) 
11*17d3b21cSAlex Maftei (amaftei) #include "net_driver.h"
12*17d3b21cSAlex Maftei (amaftei) #include "efx.h"
13*17d3b21cSAlex Maftei (amaftei) #include "nic.h"
14*17d3b21cSAlex Maftei (amaftei) #include "tx_common.h"
15*17d3b21cSAlex Maftei (amaftei) 
16*17d3b21cSAlex Maftei (amaftei) static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
17*17d3b21cSAlex Maftei (amaftei) {
18*17d3b21cSAlex Maftei (amaftei) 	return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
19*17d3b21cSAlex Maftei (amaftei) 			    PAGE_SIZE >> EFX_TX_CB_ORDER);
20*17d3b21cSAlex Maftei (amaftei) }
21*17d3b21cSAlex Maftei (amaftei) 
22*17d3b21cSAlex Maftei (amaftei) int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
23*17d3b21cSAlex Maftei (amaftei) {
24*17d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
25*17d3b21cSAlex Maftei (amaftei) 	unsigned int entries;
26*17d3b21cSAlex Maftei (amaftei) 	int rc;
27*17d3b21cSAlex Maftei (amaftei) 
28*17d3b21cSAlex Maftei (amaftei) 	/* Create the smallest power-of-two aligned ring */
29*17d3b21cSAlex Maftei (amaftei) 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
30*17d3b21cSAlex Maftei (amaftei) 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
31*17d3b21cSAlex Maftei (amaftei) 	tx_queue->ptr_mask = entries - 1;
32*17d3b21cSAlex Maftei (amaftei) 
33*17d3b21cSAlex Maftei (amaftei) 	netif_dbg(efx, probe, efx->net_dev,
34*17d3b21cSAlex Maftei (amaftei) 		  "creating TX queue %d size %#x mask %#x\n",
35*17d3b21cSAlex Maftei (amaftei) 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
36*17d3b21cSAlex Maftei (amaftei) 
37*17d3b21cSAlex Maftei (amaftei) 	/* Allocate software ring */
38*17d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
39*17d3b21cSAlex Maftei (amaftei) 				   GFP_KERNEL);
40*17d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
41*17d3b21cSAlex Maftei (amaftei) 		return -ENOMEM;
42*17d3b21cSAlex Maftei (amaftei) 
43*17d3b21cSAlex Maftei (amaftei) 	tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
44*17d3b21cSAlex Maftei (amaftei) 				    sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
45*17d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->cb_page) {
46*17d3b21cSAlex Maftei (amaftei) 		rc = -ENOMEM;
47*17d3b21cSAlex Maftei (amaftei) 		goto fail1;
48*17d3b21cSAlex Maftei (amaftei) 	}
49*17d3b21cSAlex Maftei (amaftei) 
50*17d3b21cSAlex Maftei (amaftei) 	/* Allocate hardware ring */
51*17d3b21cSAlex Maftei (amaftei) 	rc = efx_nic_probe_tx(tx_queue);
52*17d3b21cSAlex Maftei (amaftei) 	if (rc)
53*17d3b21cSAlex Maftei (amaftei) 		goto fail2;
54*17d3b21cSAlex Maftei (amaftei) 
55*17d3b21cSAlex Maftei (amaftei) 	return 0;
56*17d3b21cSAlex Maftei (amaftei) 
57*17d3b21cSAlex Maftei (amaftei) fail2:
58*17d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->cb_page);
59*17d3b21cSAlex Maftei (amaftei) 	tx_queue->cb_page = NULL;
60*17d3b21cSAlex Maftei (amaftei) fail1:
61*17d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->buffer);
62*17d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = NULL;
63*17d3b21cSAlex Maftei (amaftei) 	return rc;
64*17d3b21cSAlex Maftei (amaftei) }
65*17d3b21cSAlex Maftei (amaftei) 
66*17d3b21cSAlex Maftei (amaftei) void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
67*17d3b21cSAlex Maftei (amaftei) {
68*17d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
69*17d3b21cSAlex Maftei (amaftei) 
70*17d3b21cSAlex Maftei (amaftei) 	netif_dbg(efx, drv, efx->net_dev,
71*17d3b21cSAlex Maftei (amaftei) 		  "initialising TX queue %d\n", tx_queue->queue);
72*17d3b21cSAlex Maftei (amaftei) 
73*17d3b21cSAlex Maftei (amaftei) 	tx_queue->insert_count = 0;
74*17d3b21cSAlex Maftei (amaftei) 	tx_queue->write_count = 0;
75*17d3b21cSAlex Maftei (amaftei) 	tx_queue->packet_write_count = 0;
76*17d3b21cSAlex Maftei (amaftei) 	tx_queue->old_write_count = 0;
77*17d3b21cSAlex Maftei (amaftei) 	tx_queue->read_count = 0;
78*17d3b21cSAlex Maftei (amaftei) 	tx_queue->old_read_count = 0;
79*17d3b21cSAlex Maftei (amaftei) 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
80*17d3b21cSAlex Maftei (amaftei) 	tx_queue->xmit_more_available = false;
81*17d3b21cSAlex Maftei (amaftei) 	tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
82*17d3b21cSAlex Maftei (amaftei) 				  tx_queue->channel == efx_ptp_channel(efx));
83*17d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
84*17d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_timestamp_major = 0;
85*17d3b21cSAlex Maftei (amaftei) 	tx_queue->completed_timestamp_minor = 0;
86*17d3b21cSAlex Maftei (amaftei) 
87*17d3b21cSAlex Maftei (amaftei) 	tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
88*17d3b21cSAlex Maftei (amaftei) 
89*17d3b21cSAlex Maftei (amaftei) 	/* Set up default function pointers. These may get replaced by
90*17d3b21cSAlex Maftei (amaftei) 	 * efx_nic_init_tx() based off NIC/queue capabilities.
91*17d3b21cSAlex Maftei (amaftei) 	 */
92*17d3b21cSAlex Maftei (amaftei) 	tx_queue->handle_tso = efx_enqueue_skb_tso;
93*17d3b21cSAlex Maftei (amaftei) 
94*17d3b21cSAlex Maftei (amaftei) 	/* Set up TX descriptor ring */
95*17d3b21cSAlex Maftei (amaftei) 	efx_nic_init_tx(tx_queue);
96*17d3b21cSAlex Maftei (amaftei) 
97*17d3b21cSAlex Maftei (amaftei) 	tx_queue->initialised = true;
98*17d3b21cSAlex Maftei (amaftei) }
99*17d3b21cSAlex Maftei (amaftei) 
100*17d3b21cSAlex Maftei (amaftei) void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
101*17d3b21cSAlex Maftei (amaftei) {
102*17d3b21cSAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
103*17d3b21cSAlex Maftei (amaftei) 
104*17d3b21cSAlex Maftei (amaftei) 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
105*17d3b21cSAlex Maftei (amaftei) 		  "shutting down TX queue %d\n", tx_queue->queue);
106*17d3b21cSAlex Maftei (amaftei) 
107*17d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
108*17d3b21cSAlex Maftei (amaftei) 		return;
109*17d3b21cSAlex Maftei (amaftei) 
110*17d3b21cSAlex Maftei (amaftei) 	/* Free any buffers left in the ring */
111*17d3b21cSAlex Maftei (amaftei) 	while (tx_queue->read_count != tx_queue->write_count) {
112*17d3b21cSAlex Maftei (amaftei) 		unsigned int pkts_compl = 0, bytes_compl = 0;
113*17d3b21cSAlex Maftei (amaftei) 
114*17d3b21cSAlex Maftei (amaftei) 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
115*17d3b21cSAlex Maftei (amaftei) 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
116*17d3b21cSAlex Maftei (amaftei) 
117*17d3b21cSAlex Maftei (amaftei) 		++tx_queue->read_count;
118*17d3b21cSAlex Maftei (amaftei) 	}
119*17d3b21cSAlex Maftei (amaftei) 	tx_queue->xmit_more_available = false;
120*17d3b21cSAlex Maftei (amaftei) 	netdev_tx_reset_queue(tx_queue->core_txq);
121*17d3b21cSAlex Maftei (amaftei) }
122*17d3b21cSAlex Maftei (amaftei) 
123*17d3b21cSAlex Maftei (amaftei) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
124*17d3b21cSAlex Maftei (amaftei) {
125*17d3b21cSAlex Maftei (amaftei) 	int i;
126*17d3b21cSAlex Maftei (amaftei) 
127*17d3b21cSAlex Maftei (amaftei) 	if (!tx_queue->buffer)
128*17d3b21cSAlex Maftei (amaftei) 		return;
129*17d3b21cSAlex Maftei (amaftei) 
130*17d3b21cSAlex Maftei (amaftei) 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
131*17d3b21cSAlex Maftei (amaftei) 		  "destroying TX queue %d\n", tx_queue->queue);
132*17d3b21cSAlex Maftei (amaftei) 	efx_nic_remove_tx(tx_queue);
133*17d3b21cSAlex Maftei (amaftei) 
134*17d3b21cSAlex Maftei (amaftei) 	if (tx_queue->cb_page) {
135*17d3b21cSAlex Maftei (amaftei) 		for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
136*17d3b21cSAlex Maftei (amaftei) 			efx_nic_free_buffer(tx_queue->efx,
137*17d3b21cSAlex Maftei (amaftei) 					    &tx_queue->cb_page[i]);
138*17d3b21cSAlex Maftei (amaftei) 		kfree(tx_queue->cb_page);
139*17d3b21cSAlex Maftei (amaftei) 		tx_queue->cb_page = NULL;
140*17d3b21cSAlex Maftei (amaftei) 	}
141*17d3b21cSAlex Maftei (amaftei) 
142*17d3b21cSAlex Maftei (amaftei) 	kfree(tx_queue->buffer);
143*17d3b21cSAlex Maftei (amaftei) 	tx_queue->buffer = NULL;
144*17d3b21cSAlex Maftei (amaftei) }
145*17d3b21cSAlex Maftei (amaftei) 
146*17d3b21cSAlex Maftei (amaftei) void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
147*17d3b21cSAlex Maftei (amaftei) 			struct efx_tx_buffer *buffer,
148*17d3b21cSAlex Maftei (amaftei) 			unsigned int *pkts_compl,
149*17d3b21cSAlex Maftei (amaftei) 			unsigned int *bytes_compl)
150*17d3b21cSAlex Maftei (amaftei) {
151*17d3b21cSAlex Maftei (amaftei) 	if (buffer->unmap_len) {
152*17d3b21cSAlex Maftei (amaftei) 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
153*17d3b21cSAlex Maftei (amaftei) 		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
154*17d3b21cSAlex Maftei (amaftei) 
155*17d3b21cSAlex Maftei (amaftei) 		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
156*17d3b21cSAlex Maftei (amaftei) 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
157*17d3b21cSAlex Maftei (amaftei) 					 DMA_TO_DEVICE);
158*17d3b21cSAlex Maftei (amaftei) 		else
159*17d3b21cSAlex Maftei (amaftei) 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
160*17d3b21cSAlex Maftei (amaftei) 				       DMA_TO_DEVICE);
161*17d3b21cSAlex Maftei (amaftei) 		buffer->unmap_len = 0;
162*17d3b21cSAlex Maftei (amaftei) 	}
163*17d3b21cSAlex Maftei (amaftei) 
164*17d3b21cSAlex Maftei (amaftei) 	if (buffer->flags & EFX_TX_BUF_SKB) {
165*17d3b21cSAlex Maftei (amaftei) 		struct sk_buff *skb = (struct sk_buff *)buffer->skb;
166*17d3b21cSAlex Maftei (amaftei) 
167*17d3b21cSAlex Maftei (amaftei) 		EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
168*17d3b21cSAlex Maftei (amaftei) 		(*pkts_compl)++;
169*17d3b21cSAlex Maftei (amaftei) 		(*bytes_compl) += skb->len;
170*17d3b21cSAlex Maftei (amaftei) 		if (tx_queue->timestamping &&
171*17d3b21cSAlex Maftei (amaftei) 		    (tx_queue->completed_timestamp_major ||
172*17d3b21cSAlex Maftei (amaftei) 		     tx_queue->completed_timestamp_minor)) {
173*17d3b21cSAlex Maftei (amaftei) 			struct skb_shared_hwtstamps hwtstamp;
174*17d3b21cSAlex Maftei (amaftei) 
175*17d3b21cSAlex Maftei (amaftei) 			hwtstamp.hwtstamp =
176*17d3b21cSAlex Maftei (amaftei) 				efx_ptp_nic_to_kernel_time(tx_queue);
177*17d3b21cSAlex Maftei (amaftei) 			skb_tstamp_tx(skb, &hwtstamp);
178*17d3b21cSAlex Maftei (amaftei) 
179*17d3b21cSAlex Maftei (amaftei) 			tx_queue->completed_timestamp_major = 0;
180*17d3b21cSAlex Maftei (amaftei) 			tx_queue->completed_timestamp_minor = 0;
181*17d3b21cSAlex Maftei (amaftei) 		}
182*17d3b21cSAlex Maftei (amaftei) 		dev_consume_skb_any((struct sk_buff *)buffer->skb);
183*17d3b21cSAlex Maftei (amaftei) 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
184*17d3b21cSAlex Maftei (amaftei) 			   "TX queue %d transmission id %x complete\n",
185*17d3b21cSAlex Maftei (amaftei) 			   tx_queue->queue, tx_queue->read_count);
186*17d3b21cSAlex Maftei (amaftei) 	} else if (buffer->flags & EFX_TX_BUF_XDP) {
187*17d3b21cSAlex Maftei (amaftei) 		xdp_return_frame_rx_napi(buffer->xdpf);
188*17d3b21cSAlex Maftei (amaftei) 	}
189*17d3b21cSAlex Maftei (amaftei) 
190*17d3b21cSAlex Maftei (amaftei) 	buffer->len = 0;
191*17d3b21cSAlex Maftei (amaftei) 	buffer->flags = 0;
192*17d3b21cSAlex Maftei (amaftei) }
193*17d3b21cSAlex Maftei (amaftei) 
194*17d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
195*17d3b21cSAlex Maftei (amaftei) 				       dma_addr_t dma_addr, size_t len)
196*17d3b21cSAlex Maftei (amaftei) {
197*17d3b21cSAlex Maftei (amaftei) 	const struct efx_nic_type *nic_type = tx_queue->efx->type;
198*17d3b21cSAlex Maftei (amaftei) 	struct efx_tx_buffer *buffer;
199*17d3b21cSAlex Maftei (amaftei) 	unsigned int dma_len;
200*17d3b21cSAlex Maftei (amaftei) 
201*17d3b21cSAlex Maftei (amaftei) 	/* Map the fragment taking account of NIC-dependent DMA limits. */
202*17d3b21cSAlex Maftei (amaftei) 	do {
203*17d3b21cSAlex Maftei (amaftei) 		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
204*17d3b21cSAlex Maftei (amaftei) 		dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
205*17d3b21cSAlex Maftei (amaftei) 
206*17d3b21cSAlex Maftei (amaftei) 		buffer->len = dma_len;
207*17d3b21cSAlex Maftei (amaftei) 		buffer->dma_addr = dma_addr;
208*17d3b21cSAlex Maftei (amaftei) 		buffer->flags = EFX_TX_BUF_CONT;
209*17d3b21cSAlex Maftei (amaftei) 		len -= dma_len;
210*17d3b21cSAlex Maftei (amaftei) 		dma_addr += dma_len;
211*17d3b21cSAlex Maftei (amaftei) 		++tx_queue->insert_count;
212*17d3b21cSAlex Maftei (amaftei) 	} while (len);
213*17d3b21cSAlex Maftei (amaftei) 
214*17d3b21cSAlex Maftei (amaftei) 	return buffer;
215*17d3b21cSAlex Maftei (amaftei) }
216*17d3b21cSAlex Maftei (amaftei) 
217*17d3b21cSAlex Maftei (amaftei) /* Map all data from an SKB for DMA and create descriptors on the queue. */
218*17d3b21cSAlex Maftei (amaftei) int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
219*17d3b21cSAlex Maftei (amaftei) 		    unsigned int segment_count)
220*17d3b21cSAlex Maftei (amaftei) {
221*17d3b21cSAlex Maftei (amaftei) 	struct efx_nic *efx = tx_queue->efx;
222*17d3b21cSAlex Maftei (amaftei) 	struct device *dma_dev = &efx->pci_dev->dev;
223*17d3b21cSAlex Maftei (amaftei) 	unsigned int frag_index, nr_frags;
224*17d3b21cSAlex Maftei (amaftei) 	dma_addr_t dma_addr, unmap_addr;
225*17d3b21cSAlex Maftei (amaftei) 	unsigned short dma_flags;
226*17d3b21cSAlex Maftei (amaftei) 	size_t len, unmap_len;
227*17d3b21cSAlex Maftei (amaftei) 
228*17d3b21cSAlex Maftei (amaftei) 	nr_frags = skb_shinfo(skb)->nr_frags;
229*17d3b21cSAlex Maftei (amaftei) 	frag_index = 0;
230*17d3b21cSAlex Maftei (amaftei) 
231*17d3b21cSAlex Maftei (amaftei) 	/* Map header data. */
232*17d3b21cSAlex Maftei (amaftei) 	len = skb_headlen(skb);
233*17d3b21cSAlex Maftei (amaftei) 	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
234*17d3b21cSAlex Maftei (amaftei) 	dma_flags = EFX_TX_BUF_MAP_SINGLE;
235*17d3b21cSAlex Maftei (amaftei) 	unmap_len = len;
236*17d3b21cSAlex Maftei (amaftei) 	unmap_addr = dma_addr;
237*17d3b21cSAlex Maftei (amaftei) 
238*17d3b21cSAlex Maftei (amaftei) 	if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
239*17d3b21cSAlex Maftei (amaftei) 		return -EIO;
240*17d3b21cSAlex Maftei (amaftei) 
241*17d3b21cSAlex Maftei (amaftei) 	if (segment_count) {
242*17d3b21cSAlex Maftei (amaftei) 		/* For TSO we need to put the header in to a separate
243*17d3b21cSAlex Maftei (amaftei) 		 * descriptor. Map this separately if necessary.
244*17d3b21cSAlex Maftei (amaftei) 		 */
245*17d3b21cSAlex Maftei (amaftei) 		size_t header_len = skb_transport_header(skb) - skb->data +
246*17d3b21cSAlex Maftei (amaftei) 				(tcp_hdr(skb)->doff << 2u);
247*17d3b21cSAlex Maftei (amaftei) 
248*17d3b21cSAlex Maftei (amaftei) 		if (header_len != len) {
249*17d3b21cSAlex Maftei (amaftei) 			tx_queue->tso_long_headers++;
250*17d3b21cSAlex Maftei (amaftei) 			efx_tx_map_chunk(tx_queue, dma_addr, header_len);
251*17d3b21cSAlex Maftei (amaftei) 			len -= header_len;
252*17d3b21cSAlex Maftei (amaftei) 			dma_addr += header_len;
253*17d3b21cSAlex Maftei (amaftei) 		}
254*17d3b21cSAlex Maftei (amaftei) 	}
255*17d3b21cSAlex Maftei (amaftei) 
256*17d3b21cSAlex Maftei (amaftei) 	/* Add descriptors for each fragment. */
257*17d3b21cSAlex Maftei (amaftei) 	do {
258*17d3b21cSAlex Maftei (amaftei) 		struct efx_tx_buffer *buffer;
259*17d3b21cSAlex Maftei (amaftei) 		skb_frag_t *fragment;
260*17d3b21cSAlex Maftei (amaftei) 
261*17d3b21cSAlex Maftei (amaftei) 		buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
262*17d3b21cSAlex Maftei (amaftei) 
263*17d3b21cSAlex Maftei (amaftei) 		/* The final descriptor for a fragment is responsible for
264*17d3b21cSAlex Maftei (amaftei) 		 * unmapping the whole fragment.
265*17d3b21cSAlex Maftei (amaftei) 		 */
266*17d3b21cSAlex Maftei (amaftei) 		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
267*17d3b21cSAlex Maftei (amaftei) 		buffer->unmap_len = unmap_len;
268*17d3b21cSAlex Maftei (amaftei) 		buffer->dma_offset = buffer->dma_addr - unmap_addr;
269*17d3b21cSAlex Maftei (amaftei) 
270*17d3b21cSAlex Maftei (amaftei) 		if (frag_index >= nr_frags) {
271*17d3b21cSAlex Maftei (amaftei) 			/* Store SKB details with the final buffer for
272*17d3b21cSAlex Maftei (amaftei) 			 * the completion.
273*17d3b21cSAlex Maftei (amaftei) 			 */
274*17d3b21cSAlex Maftei (amaftei) 			buffer->skb = skb;
275*17d3b21cSAlex Maftei (amaftei) 			buffer->flags = EFX_TX_BUF_SKB | dma_flags;
276*17d3b21cSAlex Maftei (amaftei) 			return 0;
277*17d3b21cSAlex Maftei (amaftei) 		}
278*17d3b21cSAlex Maftei (amaftei) 
279*17d3b21cSAlex Maftei (amaftei) 		/* Move on to the next fragment. */
280*17d3b21cSAlex Maftei (amaftei) 		fragment = &skb_shinfo(skb)->frags[frag_index++];
281*17d3b21cSAlex Maftei (amaftei) 		len = skb_frag_size(fragment);
282*17d3b21cSAlex Maftei (amaftei) 		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
283*17d3b21cSAlex Maftei (amaftei) 					    DMA_TO_DEVICE);
284*17d3b21cSAlex Maftei (amaftei) 		dma_flags = 0;
285*17d3b21cSAlex Maftei (amaftei) 		unmap_len = len;
286*17d3b21cSAlex Maftei (amaftei) 		unmap_addr = dma_addr;
287*17d3b21cSAlex Maftei (amaftei) 
288*17d3b21cSAlex Maftei (amaftei) 		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
289*17d3b21cSAlex Maftei (amaftei) 			return -EIO;
290*17d3b21cSAlex Maftei (amaftei) 	} while (1);
291*17d3b21cSAlex Maftei (amaftei) }
292*17d3b21cSAlex Maftei (amaftei) 
293*17d3b21cSAlex Maftei (amaftei) unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
294*17d3b21cSAlex Maftei (amaftei) {
295*17d3b21cSAlex Maftei (amaftei) 	/* Header and payload descriptor for each output segment, plus
296*17d3b21cSAlex Maftei (amaftei) 	 * one for every input fragment boundary within a segment
297*17d3b21cSAlex Maftei (amaftei) 	 */
298*17d3b21cSAlex Maftei (amaftei) 	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
299*17d3b21cSAlex Maftei (amaftei) 
300*17d3b21cSAlex Maftei (amaftei) 	/* Possibly one more per segment for option descriptors */
301*17d3b21cSAlex Maftei (amaftei) 	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
302*17d3b21cSAlex Maftei (amaftei) 		max_descs += EFX_TSO_MAX_SEGS;
303*17d3b21cSAlex Maftei (amaftei) 
304*17d3b21cSAlex Maftei (amaftei) 	/* Possibly more for PCIe page boundaries within input fragments */
305*17d3b21cSAlex Maftei (amaftei) 	if (PAGE_SIZE > EFX_PAGE_SIZE)
306*17d3b21cSAlex Maftei (amaftei) 		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
307*17d3b21cSAlex Maftei (amaftei) 				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
308*17d3b21cSAlex Maftei (amaftei) 
309*17d3b21cSAlex Maftei (amaftei) 	return max_descs;
310*17d3b21cSAlex Maftei (amaftei) }
311