xref: /openbmc/linux/drivers/net/ethernet/sfc/tx.c (revision f79c957a)
1874aeea5SJeff Kirsher /****************************************************************************
2f7a6d2c4SBen Hutchings  * Driver for Solarflare network controllers and boards
3874aeea5SJeff Kirsher  * Copyright 2005-2006 Fen Systems Ltd.
4f7a6d2c4SBen Hutchings  * Copyright 2005-2013 Solarflare Communications Inc.
5874aeea5SJeff Kirsher  *
6874aeea5SJeff Kirsher  * This program is free software; you can redistribute it and/or modify it
7874aeea5SJeff Kirsher  * under the terms of the GNU General Public License version 2 as published
8874aeea5SJeff Kirsher  * by the Free Software Foundation, incorporated herein by reference.
9874aeea5SJeff Kirsher  */
10874aeea5SJeff Kirsher 
11874aeea5SJeff Kirsher #include <linux/pci.h>
12874aeea5SJeff Kirsher #include <linux/tcp.h>
13874aeea5SJeff Kirsher #include <linux/ip.h>
14874aeea5SJeff Kirsher #include <linux/in.h>
15874aeea5SJeff Kirsher #include <linux/ipv6.h>
16874aeea5SJeff Kirsher #include <linux/slab.h>
17874aeea5SJeff Kirsher #include <net/ipv6.h>
18874aeea5SJeff Kirsher #include <linux/if_ether.h>
19874aeea5SJeff Kirsher #include <linux/highmem.h>
20183233beSBen Hutchings #include <linux/cache.h>
21874aeea5SJeff Kirsher #include "net_driver.h"
22874aeea5SJeff Kirsher #include "efx.h"
23183233beSBen Hutchings #include "io.h"
24874aeea5SJeff Kirsher #include "nic.h"
25e9117e50SBert Kenward #include "tx.h"
26874aeea5SJeff Kirsher #include "workarounds.h"
27dfa50be9SBen Hutchings #include "ef10_regs.h"
28874aeea5SJeff Kirsher 
29183233beSBen Hutchings #ifdef EFX_USE_PIO
30183233beSBen Hutchings 
31183233beSBen Hutchings #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
32183233beSBen Hutchings unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
33183233beSBen Hutchings 
34183233beSBen Hutchings #endif /* EFX_USE_PIO */
35183233beSBen Hutchings 
36e9117e50SBert Kenward static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
37e9117e50SBert Kenward 					 struct efx_tx_buffer *buffer)
380fe5565bSBen Hutchings {
39e9117e50SBert Kenward 	unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
40e9117e50SBert Kenward 	struct efx_buffer *page_buf =
41e9117e50SBert Kenward 		&tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
42e9117e50SBert Kenward 	unsigned int offset =
43e9117e50SBert Kenward 		((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
44e9117e50SBert Kenward 
45e9117e50SBert Kenward 	if (unlikely(!page_buf->addr) &&
46e9117e50SBert Kenward 	    efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
47e9117e50SBert Kenward 				 GFP_ATOMIC))
48e9117e50SBert Kenward 		return NULL;
49e9117e50SBert Kenward 	buffer->dma_addr = page_buf->dma_addr + offset;
50e9117e50SBert Kenward 	buffer->unmap_len = 0;
51e9117e50SBert Kenward 	return (u8 *)page_buf->addr + offset;
520fe5565bSBen Hutchings }
530fe5565bSBen Hutchings 
54e9117e50SBert Kenward u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
55e9117e50SBert Kenward 				   struct efx_tx_buffer *buffer, size_t len)
560fe5565bSBen Hutchings {
57e9117e50SBert Kenward 	if (len > EFX_TX_CB_SIZE)
58e9117e50SBert Kenward 		return NULL;
59e9117e50SBert Kenward 	return efx_tx_get_copy_buffer(tx_queue, buffer);
600fe5565bSBen Hutchings }
610fe5565bSBen Hutchings 
62874aeea5SJeff Kirsher static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
63c3940999STom Herbert 			       struct efx_tx_buffer *buffer,
64c3940999STom Herbert 			       unsigned int *pkts_compl,
65c3940999STom Herbert 			       unsigned int *bytes_compl)
66874aeea5SJeff Kirsher {
67874aeea5SJeff Kirsher 	if (buffer->unmap_len) {
680e33d870SBen Hutchings 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
692acdb92eSAlexandre Rames 		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
707668ff9cSBen Hutchings 		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
710e33d870SBen Hutchings 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
720e33d870SBen Hutchings 					 DMA_TO_DEVICE);
73874aeea5SJeff Kirsher 		else
740e33d870SBen Hutchings 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
750e33d870SBen Hutchings 				       DMA_TO_DEVICE);
76874aeea5SJeff Kirsher 		buffer->unmap_len = 0;
77874aeea5SJeff Kirsher 	}
78874aeea5SJeff Kirsher 
797668ff9cSBen Hutchings 	if (buffer->flags & EFX_TX_BUF_SKB) {
80b9b603d4SMartin Habets 		struct sk_buff *skb = (struct sk_buff *)buffer->skb;
81b9b603d4SMartin Habets 
82d4a7a889SBert Kenward 		EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
83c3940999STom Herbert 		(*pkts_compl)++;
84b9b603d4SMartin Habets 		(*bytes_compl) += skb->len;
85b9b603d4SMartin Habets 		if (tx_queue->timestamping &&
86b9b603d4SMartin Habets 		    (tx_queue->completed_timestamp_major ||
87b9b603d4SMartin Habets 		     tx_queue->completed_timestamp_minor)) {
88b9b603d4SMartin Habets 			struct skb_shared_hwtstamps hwtstamp;
89b9b603d4SMartin Habets 
90b9b603d4SMartin Habets 			hwtstamp.hwtstamp =
91b9b603d4SMartin Habets 				efx_ptp_nic_to_kernel_time(tx_queue);
92b9b603d4SMartin Habets 			skb_tstamp_tx(skb, &hwtstamp);
93b9b603d4SMartin Habets 
94b9b603d4SMartin Habets 			tx_queue->completed_timestamp_major = 0;
95b9b603d4SMartin Habets 			tx_queue->completed_timestamp_minor = 0;
96b9b603d4SMartin Habets 		}
974ef6dae4SRick Jones 		dev_consume_skb_any((struct sk_buff *)buffer->skb);
98874aeea5SJeff Kirsher 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
99874aeea5SJeff Kirsher 			   "TX queue %d transmission id %x complete\n",
100874aeea5SJeff Kirsher 			   tx_queue->queue, tx_queue->read_count);
101874aeea5SJeff Kirsher 	}
1027668ff9cSBen Hutchings 
103f7251a9cSBen Hutchings 	buffer->len = 0;
104f7251a9cSBen Hutchings 	buffer->flags = 0;
105874aeea5SJeff Kirsher }
106874aeea5SJeff Kirsher 
1077e6d06f0SBen Hutchings unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
1087e6d06f0SBen Hutchings {
1097e6d06f0SBen Hutchings 	/* Header and payload descriptor for each output segment, plus
1107e6d06f0SBen Hutchings 	 * one for every input fragment boundary within a segment
1117e6d06f0SBen Hutchings 	 */
1127e6d06f0SBen Hutchings 	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
1137e6d06f0SBen Hutchings 
1145a6681e2SEdward Cree 	/* Possibly one more per segment for option descriptors */
1155a6681e2SEdward Cree 	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
1167e6d06f0SBen Hutchings 		max_descs += EFX_TSO_MAX_SEGS;
1177e6d06f0SBen Hutchings 
1187e6d06f0SBen Hutchings 	/* Possibly more for PCIe page boundaries within input fragments */
1197e6d06f0SBen Hutchings 	if (PAGE_SIZE > EFX_PAGE_SIZE)
1207e6d06f0SBen Hutchings 		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
1217e6d06f0SBen Hutchings 				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
1227e6d06f0SBen Hutchings 
1237e6d06f0SBen Hutchings 	return max_descs;
1247e6d06f0SBen Hutchings }
1257e6d06f0SBen Hutchings 
12614bf718fSBen Hutchings static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
12714bf718fSBen Hutchings {
12814bf718fSBen Hutchings 	/* We need to consider both queues that the net core sees as one */
12914bf718fSBen Hutchings 	struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
13014bf718fSBen Hutchings 	struct efx_nic *efx = txq1->efx;
13114bf718fSBen Hutchings 	unsigned int fill_level;
13214bf718fSBen Hutchings 
13314bf718fSBen Hutchings 	fill_level = max(txq1->insert_count - txq1->old_read_count,
13414bf718fSBen Hutchings 			 txq2->insert_count - txq2->old_read_count);
13514bf718fSBen Hutchings 	if (likely(fill_level < efx->txq_stop_thresh))
13614bf718fSBen Hutchings 		return;
13714bf718fSBen Hutchings 
13814bf718fSBen Hutchings 	/* We used the stale old_read_count above, which gives us a
13914bf718fSBen Hutchings 	 * pessimistic estimate of the fill level (which may even
14014bf718fSBen Hutchings 	 * validly be >= efx->txq_entries).  Now try again using
14114bf718fSBen Hutchings 	 * read_count (more likely to be a cache miss).
14214bf718fSBen Hutchings 	 *
14314bf718fSBen Hutchings 	 * If we read read_count and then conditionally stop the
14414bf718fSBen Hutchings 	 * queue, it is possible for the completion path to race with
14514bf718fSBen Hutchings 	 * us and complete all outstanding descriptors in the middle,
14614bf718fSBen Hutchings 	 * after which there will be no more completions to wake it.
14714bf718fSBen Hutchings 	 * Therefore we stop the queue first, then read read_count
14814bf718fSBen Hutchings 	 * (with a memory barrier to ensure the ordering), then
14914bf718fSBen Hutchings 	 * restart the queue if the fill level turns out to be low
15014bf718fSBen Hutchings 	 * enough.
15114bf718fSBen Hutchings 	 */
15214bf718fSBen Hutchings 	netif_tx_stop_queue(txq1->core_txq);
15314bf718fSBen Hutchings 	smp_mb();
1546aa7de05SMark Rutland 	txq1->old_read_count = READ_ONCE(txq1->read_count);
1556aa7de05SMark Rutland 	txq2->old_read_count = READ_ONCE(txq2->read_count);
15614bf718fSBen Hutchings 
15714bf718fSBen Hutchings 	fill_level = max(txq1->insert_count - txq1->old_read_count,
15814bf718fSBen Hutchings 			 txq2->insert_count - txq2->old_read_count);
159e01b16a7SEdward Cree 	EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
16014bf718fSBen Hutchings 	if (likely(fill_level < efx->txq_stop_thresh)) {
16114bf718fSBen Hutchings 		smp_mb();
16214bf718fSBen Hutchings 		if (likely(!efx->loopback_selftest))
16314bf718fSBen Hutchings 			netif_tx_start_queue(txq1->core_txq);
16414bf718fSBen Hutchings 	}
16514bf718fSBen Hutchings }
16614bf718fSBen Hutchings 
167e9117e50SBert Kenward static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
168e9117e50SBert Kenward 				struct sk_buff *skb)
169e9117e50SBert Kenward {
170e9117e50SBert Kenward 	unsigned int copy_len = skb->len;
171e9117e50SBert Kenward 	struct efx_tx_buffer *buffer;
172e9117e50SBert Kenward 	u8 *copy_buffer;
173e9117e50SBert Kenward 	int rc;
174e9117e50SBert Kenward 
175e01b16a7SEdward Cree 	EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
176e9117e50SBert Kenward 
177e9117e50SBert Kenward 	buffer = efx_tx_queue_get_insert_buffer(tx_queue);
178e9117e50SBert Kenward 
179e9117e50SBert Kenward 	copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
180e9117e50SBert Kenward 	if (unlikely(!copy_buffer))
181e9117e50SBert Kenward 		return -ENOMEM;
182e9117e50SBert Kenward 
183e9117e50SBert Kenward 	rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
184e9117e50SBert Kenward 	EFX_WARN_ON_PARANOID(rc);
185e9117e50SBert Kenward 	buffer->len = copy_len;
186e9117e50SBert Kenward 
187e9117e50SBert Kenward 	buffer->skb = skb;
188e9117e50SBert Kenward 	buffer->flags = EFX_TX_BUF_SKB;
189e9117e50SBert Kenward 
190e9117e50SBert Kenward 	++tx_queue->insert_count;
191e9117e50SBert Kenward 	return rc;
192e9117e50SBert Kenward }
193e9117e50SBert Kenward 
194ee45fd92SJon Cooper #ifdef EFX_USE_PIO
195ee45fd92SJon Cooper 
196ee45fd92SJon Cooper struct efx_short_copy_buffer {
197ee45fd92SJon Cooper 	int used;
198ee45fd92SJon Cooper 	u8 buf[L1_CACHE_BYTES];
199ee45fd92SJon Cooper };
200ee45fd92SJon Cooper 
201ee45fd92SJon Cooper /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
202ee45fd92SJon Cooper  * Advances piobuf pointer. Leaves additional data in the copy buffer.
203ee45fd92SJon Cooper  */
204ee45fd92SJon Cooper static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
205ee45fd92SJon Cooper 				    u8 *data, int len,
206ee45fd92SJon Cooper 				    struct efx_short_copy_buffer *copy_buf)
207ee45fd92SJon Cooper {
208ee45fd92SJon Cooper 	int block_len = len & ~(sizeof(copy_buf->buf) - 1);
209ee45fd92SJon Cooper 
2104984c237SBen Hutchings 	__iowrite64_copy(*piobuf, data, block_len >> 3);
211ee45fd92SJon Cooper 	*piobuf += block_len;
212ee45fd92SJon Cooper 	len -= block_len;
213ee45fd92SJon Cooper 
214ee45fd92SJon Cooper 	if (len) {
215ee45fd92SJon Cooper 		data += block_len;
216ee45fd92SJon Cooper 		BUG_ON(copy_buf->used);
217ee45fd92SJon Cooper 		BUG_ON(len > sizeof(copy_buf->buf));
218ee45fd92SJon Cooper 		memcpy(copy_buf->buf, data, len);
219ee45fd92SJon Cooper 		copy_buf->used = len;
220ee45fd92SJon Cooper 	}
221ee45fd92SJon Cooper }
222ee45fd92SJon Cooper 
223ee45fd92SJon Cooper /* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
224ee45fd92SJon Cooper  * Advances piobuf pointer. Leaves additional data in the copy buffer.
225ee45fd92SJon Cooper  */
226ee45fd92SJon Cooper static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
227ee45fd92SJon Cooper 				       u8 *data, int len,
228ee45fd92SJon Cooper 				       struct efx_short_copy_buffer *copy_buf)
229ee45fd92SJon Cooper {
230ee45fd92SJon Cooper 	if (copy_buf->used) {
231ee45fd92SJon Cooper 		/* if the copy buffer is partially full, fill it up and write */
232ee45fd92SJon Cooper 		int copy_to_buf =
233ee45fd92SJon Cooper 			min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
234ee45fd92SJon Cooper 
235ee45fd92SJon Cooper 		memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
236ee45fd92SJon Cooper 		copy_buf->used += copy_to_buf;
237ee45fd92SJon Cooper 
238ee45fd92SJon Cooper 		/* if we didn't fill it up then we're done for now */
239ee45fd92SJon Cooper 		if (copy_buf->used < sizeof(copy_buf->buf))
240ee45fd92SJon Cooper 			return;
241ee45fd92SJon Cooper 
2424984c237SBen Hutchings 		__iowrite64_copy(*piobuf, copy_buf->buf,
2434984c237SBen Hutchings 				 sizeof(copy_buf->buf) >> 3);
244ee45fd92SJon Cooper 		*piobuf += sizeof(copy_buf->buf);
245ee45fd92SJon Cooper 		data += copy_to_buf;
246ee45fd92SJon Cooper 		len -= copy_to_buf;
247ee45fd92SJon Cooper 		copy_buf->used = 0;
248ee45fd92SJon Cooper 	}
249ee45fd92SJon Cooper 
250ee45fd92SJon Cooper 	efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
251ee45fd92SJon Cooper }
252ee45fd92SJon Cooper 
253ee45fd92SJon Cooper static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
254ee45fd92SJon Cooper 				  struct efx_short_copy_buffer *copy_buf)
255ee45fd92SJon Cooper {
256ee45fd92SJon Cooper 	/* if there's anything in it, write the whole buffer, including junk */
257ee45fd92SJon Cooper 	if (copy_buf->used)
2584984c237SBen Hutchings 		__iowrite64_copy(piobuf, copy_buf->buf,
2594984c237SBen Hutchings 				 sizeof(copy_buf->buf) >> 3);
260ee45fd92SJon Cooper }
261ee45fd92SJon Cooper 
262ee45fd92SJon Cooper /* Traverse skb structure and copy fragments in to PIO buffer.
263ee45fd92SJon Cooper  * Advances piobuf pointer.
264ee45fd92SJon Cooper  */
265ee45fd92SJon Cooper static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
266ee45fd92SJon Cooper 				     u8 __iomem **piobuf,
267ee45fd92SJon Cooper 				     struct efx_short_copy_buffer *copy_buf)
268ee45fd92SJon Cooper {
269ee45fd92SJon Cooper 	int i;
270ee45fd92SJon Cooper 
271ee45fd92SJon Cooper 	efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
272ee45fd92SJon Cooper 				copy_buf);
273ee45fd92SJon Cooper 
274ee45fd92SJon Cooper 	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
275ee45fd92SJon Cooper 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
276ee45fd92SJon Cooper 		u8 *vaddr;
277ee45fd92SJon Cooper 
278ee45fd92SJon Cooper 		vaddr = kmap_atomic(skb_frag_page(f));
279ee45fd92SJon Cooper 
280ee45fd92SJon Cooper 		efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
281ee45fd92SJon Cooper 					   skb_frag_size(f), copy_buf);
282ee45fd92SJon Cooper 		kunmap_atomic(vaddr);
283ee45fd92SJon Cooper 	}
284ee45fd92SJon Cooper 
285e01b16a7SEdward Cree 	EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
286ee45fd92SJon Cooper }
287ee45fd92SJon Cooper 
288e9117e50SBert Kenward static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
289e9117e50SBert Kenward 			       struct sk_buff *skb)
290ee45fd92SJon Cooper {
291ee45fd92SJon Cooper 	struct efx_tx_buffer *buffer =
292ee45fd92SJon Cooper 		efx_tx_queue_get_insert_buffer(tx_queue);
293ee45fd92SJon Cooper 	u8 __iomem *piobuf = tx_queue->piobuf;
294ee45fd92SJon Cooper 
295ee45fd92SJon Cooper 	/* Copy to PIO buffer. Ensure the writes are padded to the end
296ee45fd92SJon Cooper 	 * of a cache line, as this is required for write-combining to be
297ee45fd92SJon Cooper 	 * effective on at least x86.
298ee45fd92SJon Cooper 	 */
299ee45fd92SJon Cooper 
300ee45fd92SJon Cooper 	if (skb_shinfo(skb)->nr_frags) {
301ee45fd92SJon Cooper 		/* The size of the copy buffer will ensure all writes
302ee45fd92SJon Cooper 		 * are the size of a cache line.
303ee45fd92SJon Cooper 		 */
304ee45fd92SJon Cooper 		struct efx_short_copy_buffer copy_buf;
305ee45fd92SJon Cooper 
306ee45fd92SJon Cooper 		copy_buf.used = 0;
307ee45fd92SJon Cooper 
308ee45fd92SJon Cooper 		efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
309ee45fd92SJon Cooper 					 &piobuf, &copy_buf);
310ee45fd92SJon Cooper 		efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
311ee45fd92SJon Cooper 	} else {
312ee45fd92SJon Cooper 		/* Pad the write to the size of a cache line.
313e9117e50SBert Kenward 		 * We can do this because we know the skb_shared_info struct is
314ee45fd92SJon Cooper 		 * after the source, and the destination buffer is big enough.
315ee45fd92SJon Cooper 		 */
316ee45fd92SJon Cooper 		BUILD_BUG_ON(L1_CACHE_BYTES >
317ee45fd92SJon Cooper 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
3184984c237SBen Hutchings 		__iowrite64_copy(tx_queue->piobuf, skb->data,
3194984c237SBen Hutchings 				 ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
320ee45fd92SJon Cooper 	}
321ee45fd92SJon Cooper 
322e9117e50SBert Kenward 	buffer->skb = skb;
323e9117e50SBert Kenward 	buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION;
324e9117e50SBert Kenward 
325ee45fd92SJon Cooper 	EFX_POPULATE_QWORD_5(buffer->option,
326ee45fd92SJon Cooper 			     ESF_DZ_TX_DESC_IS_OPT, 1,
327ee45fd92SJon Cooper 			     ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
328ee45fd92SJon Cooper 			     ESF_DZ_TX_PIO_CONT, 0,
329ee45fd92SJon Cooper 			     ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
330ee45fd92SJon Cooper 			     ESF_DZ_TX_PIO_BUF_ADDR,
331ee45fd92SJon Cooper 			     tx_queue->piobuf_offset);
332ee45fd92SJon Cooper 	++tx_queue->insert_count;
333e9117e50SBert Kenward 	return 0;
334ee45fd92SJon Cooper }
335ee45fd92SJon Cooper #endif /* EFX_USE_PIO */
336ee45fd92SJon Cooper 
337e9117e50SBert Kenward static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
338e9117e50SBert Kenward 					      dma_addr_t dma_addr,
339e9117e50SBert Kenward 					      size_t len)
340e9117e50SBert Kenward {
341e9117e50SBert Kenward 	const struct efx_nic_type *nic_type = tx_queue->efx->type;
342e9117e50SBert Kenward 	struct efx_tx_buffer *buffer;
343e9117e50SBert Kenward 	unsigned int dma_len;
344e9117e50SBert Kenward 
345e9117e50SBert Kenward 	/* Map the fragment taking account of NIC-dependent DMA limits. */
346e9117e50SBert Kenward 	do {
347e9117e50SBert Kenward 		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
348e9117e50SBert Kenward 		dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
349e9117e50SBert Kenward 
350e9117e50SBert Kenward 		buffer->len = dma_len;
351e9117e50SBert Kenward 		buffer->dma_addr = dma_addr;
352e9117e50SBert Kenward 		buffer->flags = EFX_TX_BUF_CONT;
353e9117e50SBert Kenward 		len -= dma_len;
354e9117e50SBert Kenward 		dma_addr += dma_len;
355e9117e50SBert Kenward 		++tx_queue->insert_count;
356e9117e50SBert Kenward 	} while (len);
357e9117e50SBert Kenward 
358e9117e50SBert Kenward 	return buffer;
359e9117e50SBert Kenward }
360e9117e50SBert Kenward 
361e9117e50SBert Kenward /* Map all data from an SKB for DMA and create descriptors on the queue.
362e9117e50SBert Kenward  */
363e9117e50SBert Kenward static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
364e9117e50SBert Kenward 			   unsigned int segment_count)
365e9117e50SBert Kenward {
366e9117e50SBert Kenward 	struct efx_nic *efx = tx_queue->efx;
367e9117e50SBert Kenward 	struct device *dma_dev = &efx->pci_dev->dev;
368e9117e50SBert Kenward 	unsigned int frag_index, nr_frags;
369e9117e50SBert Kenward 	dma_addr_t dma_addr, unmap_addr;
370e9117e50SBert Kenward 	unsigned short dma_flags;
371e9117e50SBert Kenward 	size_t len, unmap_len;
372e9117e50SBert Kenward 
373e9117e50SBert Kenward 	nr_frags = skb_shinfo(skb)->nr_frags;
374e9117e50SBert Kenward 	frag_index = 0;
375e9117e50SBert Kenward 
376e9117e50SBert Kenward 	/* Map header data. */
377e9117e50SBert Kenward 	len = skb_headlen(skb);
378e9117e50SBert Kenward 	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
379e9117e50SBert Kenward 	dma_flags = EFX_TX_BUF_MAP_SINGLE;
380e9117e50SBert Kenward 	unmap_len = len;
381e9117e50SBert Kenward 	unmap_addr = dma_addr;
382e9117e50SBert Kenward 
383e9117e50SBert Kenward 	if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
384e9117e50SBert Kenward 		return -EIO;
385e9117e50SBert Kenward 
386e9117e50SBert Kenward 	if (segment_count) {
387e9117e50SBert Kenward 		/* For TSO we need to put the header in to a separate
388e9117e50SBert Kenward 		 * descriptor. Map this separately if necessary.
389e9117e50SBert Kenward 		 */
390e9117e50SBert Kenward 		size_t header_len = skb_transport_header(skb) - skb->data +
391e9117e50SBert Kenward 				(tcp_hdr(skb)->doff << 2u);
392e9117e50SBert Kenward 
393e9117e50SBert Kenward 		if (header_len != len) {
394e9117e50SBert Kenward 			tx_queue->tso_long_headers++;
395e9117e50SBert Kenward 			efx_tx_map_chunk(tx_queue, dma_addr, header_len);
396e9117e50SBert Kenward 			len -= header_len;
397e9117e50SBert Kenward 			dma_addr += header_len;
398e9117e50SBert Kenward 		}
399e9117e50SBert Kenward 	}
400e9117e50SBert Kenward 
401e9117e50SBert Kenward 	/* Add descriptors for each fragment. */
402e9117e50SBert Kenward 	do {
403e9117e50SBert Kenward 		struct efx_tx_buffer *buffer;
404e9117e50SBert Kenward 		skb_frag_t *fragment;
405e9117e50SBert Kenward 
406e9117e50SBert Kenward 		buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
407e9117e50SBert Kenward 
408e9117e50SBert Kenward 		/* The final descriptor for a fragment is responsible for
409e9117e50SBert Kenward 		 * unmapping the whole fragment.
410e9117e50SBert Kenward 		 */
411e9117e50SBert Kenward 		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
412e9117e50SBert Kenward 		buffer->unmap_len = unmap_len;
413e9117e50SBert Kenward 		buffer->dma_offset = buffer->dma_addr - unmap_addr;
414e9117e50SBert Kenward 
415e9117e50SBert Kenward 		if (frag_index >= nr_frags) {
416e9117e50SBert Kenward 			/* Store SKB details with the final buffer for
417e9117e50SBert Kenward 			 * the completion.
418e9117e50SBert Kenward 			 */
419e9117e50SBert Kenward 			buffer->skb = skb;
420e9117e50SBert Kenward 			buffer->flags = EFX_TX_BUF_SKB | dma_flags;
421e9117e50SBert Kenward 			return 0;
422e9117e50SBert Kenward 		}
423e9117e50SBert Kenward 
424e9117e50SBert Kenward 		/* Move on to the next fragment. */
425e9117e50SBert Kenward 		fragment = &skb_shinfo(skb)->frags[frag_index++];
426e9117e50SBert Kenward 		len = skb_frag_size(fragment);
427e9117e50SBert Kenward 		dma_addr = skb_frag_dma_map(dma_dev, fragment,
428e9117e50SBert Kenward 				0, len, DMA_TO_DEVICE);
429e9117e50SBert Kenward 		dma_flags = 0;
430e9117e50SBert Kenward 		unmap_len = len;
431e9117e50SBert Kenward 		unmap_addr = dma_addr;
432e9117e50SBert Kenward 
433e9117e50SBert Kenward 		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
434e9117e50SBert Kenward 			return -EIO;
435e9117e50SBert Kenward 	} while (1);
436e9117e50SBert Kenward }
437e9117e50SBert Kenward 
4380c235113SMartin Habets /* Remove buffers put into a tx_queue for the current packet.
4390c235113SMartin Habets  * None of the buffers must have an skb attached.
440e9117e50SBert Kenward  */
4410c235113SMartin Habets static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
4420c235113SMartin Habets 			       unsigned int insert_count)
443e9117e50SBert Kenward {
444e9117e50SBert Kenward 	struct efx_tx_buffer *buffer;
445d4a7a889SBert Kenward 	unsigned int bytes_compl = 0;
446d4a7a889SBert Kenward 	unsigned int pkts_compl = 0;
447e9117e50SBert Kenward 
448e9117e50SBert Kenward 	/* Work backwards until we hit the original insert pointer value */
4490c235113SMartin Habets 	while (tx_queue->insert_count != insert_count) {
450e9117e50SBert Kenward 		--tx_queue->insert_count;
451e9117e50SBert Kenward 		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
452d4a7a889SBert Kenward 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
453e9117e50SBert Kenward 	}
454e9117e50SBert Kenward }
455e9117e50SBert Kenward 
45646d1efd8SEdward Cree /*
45746d1efd8SEdward Cree  * Fallback to software TSO.
45846d1efd8SEdward Cree  *
45946d1efd8SEdward Cree  * This is used if we are unable to send a GSO packet through hardware TSO.
46046d1efd8SEdward Cree  * This should only ever happen due to per-queue restrictions - unsupported
46146d1efd8SEdward Cree  * packets should first be filtered by the feature flags.
46246d1efd8SEdward Cree  *
46346d1efd8SEdward Cree  * Returns 0 on success, error code otherwise.
46446d1efd8SEdward Cree  */
46546d1efd8SEdward Cree static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
46646d1efd8SEdward Cree 			       struct sk_buff *skb)
467e9117e50SBert Kenward {
46846d1efd8SEdward Cree 	struct sk_buff *segments, *next;
46946d1efd8SEdward Cree 
47046d1efd8SEdward Cree 	segments = skb_gso_segment(skb, 0);
47146d1efd8SEdward Cree 	if (IS_ERR(segments))
47246d1efd8SEdward Cree 		return PTR_ERR(segments);
47346d1efd8SEdward Cree 
474f694be27SHuang Zijiang 	dev_consume_skb_any(skb);
47546d1efd8SEdward Cree 	skb = segments;
47646d1efd8SEdward Cree 
47746d1efd8SEdward Cree 	while (skb) {
47846d1efd8SEdward Cree 		next = skb->next;
47946d1efd8SEdward Cree 		skb->next = NULL;
48046d1efd8SEdward Cree 
48146d1efd8SEdward Cree 		efx_enqueue_skb(tx_queue, skb);
48246d1efd8SEdward Cree 		skb = next;
48346d1efd8SEdward Cree 	}
48446d1efd8SEdward Cree 
48546d1efd8SEdward Cree 	return 0;
486e9117e50SBert Kenward }
487e9117e50SBert Kenward 
488874aeea5SJeff Kirsher /*
489874aeea5SJeff Kirsher  * Add a socket buffer to a TX queue
490874aeea5SJeff Kirsher  *
491874aeea5SJeff Kirsher  * This maps all fragments of a socket buffer for DMA and adds them to
492874aeea5SJeff Kirsher  * the TX queue.  The queue's insert pointer will be incremented by
493874aeea5SJeff Kirsher  * the number of fragments in the socket buffer.
494874aeea5SJeff Kirsher  *
495874aeea5SJeff Kirsher  * If any DMA mapping fails, any mapped fragments will be unmapped,
496874aeea5SJeff Kirsher  * the queue's insert pointer will be restored to its original value.
497874aeea5SJeff Kirsher  *
498874aeea5SJeff Kirsher  * This function is split out from efx_hard_start_xmit to allow the
499874aeea5SJeff Kirsher  * loopback test to direct packets via specific TX queues.
500874aeea5SJeff Kirsher  *
50114bf718fSBen Hutchings  * Returns NETDEV_TX_OK.
502874aeea5SJeff Kirsher  * You must hold netif_tx_lock() to call this function.
503874aeea5SJeff Kirsher  */
504874aeea5SJeff Kirsher netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
505874aeea5SJeff Kirsher {
5060c235113SMartin Habets 	unsigned int old_insert_count = tx_queue->insert_count;
507f79c957aSFlorian Westphal 	bool xmit_more = netdev_xmit_more();
508e9117e50SBert Kenward 	bool data_mapped = false;
509e9117e50SBert Kenward 	unsigned int segments;
510e9117e50SBert Kenward 	unsigned int skb_len;
51146d1efd8SEdward Cree 	int rc;
512874aeea5SJeff Kirsher 
513e9117e50SBert Kenward 	skb_len = skb->len;
514e9117e50SBert Kenward 	segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
515e9117e50SBert Kenward 	if (segments == 1)
516e9117e50SBert Kenward 		segments = 0; /* Don't use TSO for a single segment. */
517874aeea5SJeff Kirsher 
518e9117e50SBert Kenward 	/* Handle TSO first - it's *possible* (although unlikely) that we might
519e9117e50SBert Kenward 	 * be passed a packet to segment that's smaller than the copybreak/PIO
520e9117e50SBert Kenward 	 * size limit.
521874aeea5SJeff Kirsher 	 */
522e9117e50SBert Kenward 	if (segments) {
523e01b16a7SEdward Cree 		EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso);
52446d1efd8SEdward Cree 		rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped);
52546d1efd8SEdward Cree 		if (rc == -EINVAL) {
52646d1efd8SEdward Cree 			rc = efx_tx_tso_fallback(tx_queue, skb);
52746d1efd8SEdward Cree 			tx_queue->tso_fallbacks++;
52846d1efd8SEdward Cree 			if (rc == 0)
52946d1efd8SEdward Cree 				return 0;
53046d1efd8SEdward Cree 		}
53146d1efd8SEdward Cree 		if (rc)
532e9117e50SBert Kenward 			goto err;
533e9117e50SBert Kenward #ifdef EFX_USE_PIO
534f79c957aSFlorian Westphal 	} else if (skb_len <= efx_piobuf_size && !xmit_more &&
535e9117e50SBert Kenward 		   efx_nic_may_tx_pio(tx_queue)) {
536e9117e50SBert Kenward 		/* Use PIO for short packets with an empty queue. */
537e9117e50SBert Kenward 		if (efx_enqueue_skb_pio(tx_queue, skb))
538e9117e50SBert Kenward 			goto err;
539e9117e50SBert Kenward 		tx_queue->pio_packets++;
540e9117e50SBert Kenward 		data_mapped = true;
541e9117e50SBert Kenward #endif
5425a6681e2SEdward Cree 	} else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
543e9117e50SBert Kenward 		/* Pad short packets or coalesce short fragmented packets. */
544e9117e50SBert Kenward 		if (efx_enqueue_skb_copy(tx_queue, skb))
545e9117e50SBert Kenward 			goto err;
546e9117e50SBert Kenward 		tx_queue->cb_packets++;
547e9117e50SBert Kenward 		data_mapped = true;
548874aeea5SJeff Kirsher 	}
549874aeea5SJeff Kirsher 
550e9117e50SBert Kenward 	/* Map for DMA and create descriptors if we haven't done so already. */
551e9117e50SBert Kenward 	if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
552e9117e50SBert Kenward 		goto err;
553874aeea5SJeff Kirsher 
5540c235113SMartin Habets 	efx_tx_maybe_stop_queue(tx_queue);
5550c235113SMartin Habets 
556874aeea5SJeff Kirsher 	/* Pass off to hardware */
55729e12207SEdward Cree 	if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
558b2663a4fSMartin Habets 		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
559b2663a4fSMartin Habets 
560f79c957aSFlorian Westphal 		/* There could be packets left on the partner queue if
561f79c957aSFlorian Westphal 		 * xmit_more was set. If we do not push those they
562b2663a4fSMartin Habets 		 * could be left for a long time and cause a netdev watchdog.
563b2663a4fSMartin Habets 		 */
564b2663a4fSMartin Habets 		if (txq2->xmit_more_available)
565b2663a4fSMartin Habets 			efx_nic_push_buffers(txq2);
566b2663a4fSMartin Habets 
567874aeea5SJeff Kirsher 		efx_nic_push_buffers(tx_queue);
568b2663a4fSMartin Habets 	} else {
569f79c957aSFlorian Westphal 		tx_queue->xmit_more_available = xmit_more;
570b2663a4fSMartin Habets 	}
571874aeea5SJeff Kirsher 
572e9117e50SBert Kenward 	if (segments) {
573e9117e50SBert Kenward 		tx_queue->tso_bursts++;
574e9117e50SBert Kenward 		tx_queue->tso_packets += segments;
575e9117e50SBert Kenward 		tx_queue->tx_packets  += segments;
576e9117e50SBert Kenward 	} else {
5778ccf3800SAndrew Rybchenko 		tx_queue->tx_packets++;
578e9117e50SBert Kenward 	}
579e9117e50SBert Kenward 
580874aeea5SJeff Kirsher 	return NETDEV_TX_OK;
581874aeea5SJeff Kirsher 
582874aeea5SJeff Kirsher 
583e9117e50SBert Kenward err:
5840c235113SMartin Habets 	efx_enqueue_unwind(tx_queue, old_insert_count);
585874aeea5SJeff Kirsher 	dev_kfree_skb_any(skb);
5860c235113SMartin Habets 
5870c235113SMartin Habets 	/* If we're not expecting another transmit and we had something to push
5880c235113SMartin Habets 	 * on this queue or a partner queue then we need to push here to get the
5890c235113SMartin Habets 	 * previous packets out.
5900c235113SMartin Habets 	 */
5910c235113SMartin Habets 	if (!xmit_more) {
5920c235113SMartin Habets 		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
5930c235113SMartin Habets 
5940c235113SMartin Habets 		if (txq2->xmit_more_available)
5950c235113SMartin Habets 			efx_nic_push_buffers(txq2);
5960c235113SMartin Habets 
5970c235113SMartin Habets 		efx_nic_push_buffers(tx_queue);
5980c235113SMartin Habets 	}
5990c235113SMartin Habets 
60014bf718fSBen Hutchings 	return NETDEV_TX_OK;
601874aeea5SJeff Kirsher }
602874aeea5SJeff Kirsher 
603874aeea5SJeff Kirsher /* Remove packets from the TX queue
604874aeea5SJeff Kirsher  *
605874aeea5SJeff Kirsher  * This removes packets from the TX queue, up to and including the
606874aeea5SJeff Kirsher  * specified index.
607874aeea5SJeff Kirsher  */
608874aeea5SJeff Kirsher static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
609c3940999STom Herbert 				unsigned int index,
610c3940999STom Herbert 				unsigned int *pkts_compl,
611c3940999STom Herbert 				unsigned int *bytes_compl)
612874aeea5SJeff Kirsher {
613874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
614874aeea5SJeff Kirsher 	unsigned int stop_index, read_ptr;
615874aeea5SJeff Kirsher 
616874aeea5SJeff Kirsher 	stop_index = (index + 1) & tx_queue->ptr_mask;
617874aeea5SJeff Kirsher 	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
618874aeea5SJeff Kirsher 
619874aeea5SJeff Kirsher 	while (read_ptr != stop_index) {
620874aeea5SJeff Kirsher 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
621ba8977bdSBen Hutchings 
622ba8977bdSBen Hutchings 		if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
623ba8977bdSBen Hutchings 		    unlikely(buffer->len == 0)) {
624874aeea5SJeff Kirsher 			netif_err(efx, tx_err, efx->net_dev,
625874aeea5SJeff Kirsher 				  "TX queue %d spurious TX completion id %x\n",
626874aeea5SJeff Kirsher 				  tx_queue->queue, read_ptr);
627874aeea5SJeff Kirsher 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
628874aeea5SJeff Kirsher 			return;
629874aeea5SJeff Kirsher 		}
630874aeea5SJeff Kirsher 
631c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
632874aeea5SJeff Kirsher 
633874aeea5SJeff Kirsher 		++tx_queue->read_count;
634874aeea5SJeff Kirsher 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
635874aeea5SJeff Kirsher 	}
636874aeea5SJeff Kirsher }
637874aeea5SJeff Kirsher 
638874aeea5SJeff Kirsher /* Initiate a packet transmission.  We use one channel per CPU
639874aeea5SJeff Kirsher  * (sharing when we have more CPUs than channels).  On Falcon, the TX
640874aeea5SJeff Kirsher  * completion events will be directed back to the CPU that transmitted
641874aeea5SJeff Kirsher  * the packet, which should be cache-efficient.
642874aeea5SJeff Kirsher  *
643874aeea5SJeff Kirsher  * Context: non-blocking.
644874aeea5SJeff Kirsher  * Note that returning anything other than NETDEV_TX_OK will cause the
645874aeea5SJeff Kirsher  * OS to free the skb.
646874aeea5SJeff Kirsher  */
647874aeea5SJeff Kirsher netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
648874aeea5SJeff Kirsher 				struct net_device *net_dev)
649874aeea5SJeff Kirsher {
650874aeea5SJeff Kirsher 	struct efx_nic *efx = netdev_priv(net_dev);
651874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
652874aeea5SJeff Kirsher 	unsigned index, type;
653874aeea5SJeff Kirsher 
654874aeea5SJeff Kirsher 	EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
655874aeea5SJeff Kirsher 
6567c236c43SStuart Hodgson 	/* PTP "event" packet */
6577c236c43SStuart Hodgson 	if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
6587c236c43SStuart Hodgson 	    unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
6597c236c43SStuart Hodgson 		return efx_ptp_tx(efx, skb);
6607c236c43SStuart Hodgson 	}
6617c236c43SStuart Hodgson 
662874aeea5SJeff Kirsher 	index = skb_get_queue_mapping(skb);
663874aeea5SJeff Kirsher 	type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
664874aeea5SJeff Kirsher 	if (index >= efx->n_tx_channels) {
665874aeea5SJeff Kirsher 		index -= efx->n_tx_channels;
666874aeea5SJeff Kirsher 		type |= EFX_TXQ_TYPE_HIGHPRI;
667874aeea5SJeff Kirsher 	}
668874aeea5SJeff Kirsher 	tx_queue = efx_get_tx_queue(efx, index, type);
669874aeea5SJeff Kirsher 
670874aeea5SJeff Kirsher 	return efx_enqueue_skb(tx_queue, skb);
671874aeea5SJeff Kirsher }
672874aeea5SJeff Kirsher 
673874aeea5SJeff Kirsher void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
674874aeea5SJeff Kirsher {
675874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
676874aeea5SJeff Kirsher 
677874aeea5SJeff Kirsher 	/* Must be inverse of queue lookup in efx_hard_start_xmit() */
678874aeea5SJeff Kirsher 	tx_queue->core_txq =
679874aeea5SJeff Kirsher 		netdev_get_tx_queue(efx->net_dev,
680874aeea5SJeff Kirsher 				    tx_queue->queue / EFX_TXQ_TYPES +
681874aeea5SJeff Kirsher 				    ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
682874aeea5SJeff Kirsher 				     efx->n_tx_channels : 0));
683874aeea5SJeff Kirsher }
684874aeea5SJeff Kirsher 
6852572ac53SJiri Pirko int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
686de4784caSJiri Pirko 		 void *type_data)
687874aeea5SJeff Kirsher {
688874aeea5SJeff Kirsher 	struct efx_nic *efx = netdev_priv(net_dev);
689de4784caSJiri Pirko 	struct tc_mqprio_qopt *mqprio = type_data;
690874aeea5SJeff Kirsher 	struct efx_channel *channel;
691874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
69216e5cc64SJohn Fastabend 	unsigned tc, num_tc;
693874aeea5SJeff Kirsher 	int rc;
694874aeea5SJeff Kirsher 
695575ed7d3SNogah Frankel 	if (type != TC_SETUP_QDISC_MQPRIO)
69638cf0426SJiri Pirko 		return -EOPNOTSUPP;
697e4c6734eSJohn Fastabend 
698de4784caSJiri Pirko 	num_tc = mqprio->num_tc;
69916e5cc64SJohn Fastabend 
7005a6681e2SEdward Cree 	if (num_tc > EFX_MAX_TX_TC)
701874aeea5SJeff Kirsher 		return -EINVAL;
702874aeea5SJeff Kirsher 
703de4784caSJiri Pirko 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
70456f36acdSAmritha Nambiar 
705874aeea5SJeff Kirsher 	if (num_tc == net_dev->num_tc)
706874aeea5SJeff Kirsher 		return 0;
707874aeea5SJeff Kirsher 
708874aeea5SJeff Kirsher 	for (tc = 0; tc < num_tc; tc++) {
709874aeea5SJeff Kirsher 		net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
710874aeea5SJeff Kirsher 		net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
711874aeea5SJeff Kirsher 	}
712874aeea5SJeff Kirsher 
713874aeea5SJeff Kirsher 	if (num_tc > net_dev->num_tc) {
714874aeea5SJeff Kirsher 		/* Initialise high-priority queues as necessary */
715874aeea5SJeff Kirsher 		efx_for_each_channel(channel, efx) {
716874aeea5SJeff Kirsher 			efx_for_each_possible_channel_tx_queue(tx_queue,
717874aeea5SJeff Kirsher 							       channel) {
718874aeea5SJeff Kirsher 				if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
719874aeea5SJeff Kirsher 					continue;
720874aeea5SJeff Kirsher 				if (!tx_queue->buffer) {
721874aeea5SJeff Kirsher 					rc = efx_probe_tx_queue(tx_queue);
722874aeea5SJeff Kirsher 					if (rc)
723874aeea5SJeff Kirsher 						return rc;
724874aeea5SJeff Kirsher 				}
725874aeea5SJeff Kirsher 				if (!tx_queue->initialised)
726874aeea5SJeff Kirsher 					efx_init_tx_queue(tx_queue);
727874aeea5SJeff Kirsher 				efx_init_tx_queue_core_txq(tx_queue);
728874aeea5SJeff Kirsher 			}
729874aeea5SJeff Kirsher 		}
730874aeea5SJeff Kirsher 	} else {
731874aeea5SJeff Kirsher 		/* Reduce number of classes before number of queues */
732874aeea5SJeff Kirsher 		net_dev->num_tc = num_tc;
733874aeea5SJeff Kirsher 	}
734874aeea5SJeff Kirsher 
735874aeea5SJeff Kirsher 	rc = netif_set_real_num_tx_queues(net_dev,
736874aeea5SJeff Kirsher 					  max_t(int, num_tc, 1) *
737874aeea5SJeff Kirsher 					  efx->n_tx_channels);
738874aeea5SJeff Kirsher 	if (rc)
739874aeea5SJeff Kirsher 		return rc;
740874aeea5SJeff Kirsher 
741874aeea5SJeff Kirsher 	/* Do not destroy high-priority queues when they become
742874aeea5SJeff Kirsher 	 * unused.  We would have to flush them first, and it is
743874aeea5SJeff Kirsher 	 * fairly difficult to flush a subset of TX queues.  Leave
744874aeea5SJeff Kirsher 	 * it to efx_fini_channels().
745874aeea5SJeff Kirsher 	 */
746874aeea5SJeff Kirsher 
747874aeea5SJeff Kirsher 	net_dev->num_tc = num_tc;
748874aeea5SJeff Kirsher 	return 0;
749874aeea5SJeff Kirsher }
750874aeea5SJeff Kirsher 
751874aeea5SJeff Kirsher void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
752874aeea5SJeff Kirsher {
753874aeea5SJeff Kirsher 	unsigned fill_level;
754874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
75514bf718fSBen Hutchings 	struct efx_tx_queue *txq2;
756c3940999STom Herbert 	unsigned int pkts_compl = 0, bytes_compl = 0;
757874aeea5SJeff Kirsher 
758e01b16a7SEdward Cree 	EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
759874aeea5SJeff Kirsher 
760c3940999STom Herbert 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
761c936835cSPeter Dunning 	tx_queue->pkts_compl += pkts_compl;
762c936835cSPeter Dunning 	tx_queue->bytes_compl += bytes_compl;
763874aeea5SJeff Kirsher 
76402e12165SBen Hutchings 	if (pkts_compl > 1)
76502e12165SBen Hutchings 		++tx_queue->merge_events;
76602e12165SBen Hutchings 
76714bf718fSBen Hutchings 	/* See if we need to restart the netif queue.  This memory
76814bf718fSBen Hutchings 	 * barrier ensures that we write read_count (inside
76914bf718fSBen Hutchings 	 * efx_dequeue_buffers()) before reading the queue status.
77014bf718fSBen Hutchings 	 */
771874aeea5SJeff Kirsher 	smp_mb();
772874aeea5SJeff Kirsher 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
773874aeea5SJeff Kirsher 	    likely(efx->port_enabled) &&
774874aeea5SJeff Kirsher 	    likely(netif_device_present(efx->net_dev))) {
77514bf718fSBen Hutchings 		txq2 = efx_tx_queue_partner(tx_queue);
77614bf718fSBen Hutchings 		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
77714bf718fSBen Hutchings 				 txq2->insert_count - txq2->read_count);
77814bf718fSBen Hutchings 		if (fill_level <= efx->txq_wake_thresh)
779874aeea5SJeff Kirsher 			netif_tx_wake_queue(tx_queue->core_txq);
780874aeea5SJeff Kirsher 	}
781874aeea5SJeff Kirsher 
782874aeea5SJeff Kirsher 	/* Check whether the hardware queue is now empty */
783874aeea5SJeff Kirsher 	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
7846aa7de05SMark Rutland 		tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
785874aeea5SJeff Kirsher 		if (tx_queue->read_count == tx_queue->old_write_count) {
786874aeea5SJeff Kirsher 			smp_mb();
787874aeea5SJeff Kirsher 			tx_queue->empty_read_count =
788874aeea5SJeff Kirsher 				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
789874aeea5SJeff Kirsher 		}
790874aeea5SJeff Kirsher 	}
791874aeea5SJeff Kirsher }
792874aeea5SJeff Kirsher 
793e9117e50SBert Kenward static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
794f7251a9cSBen Hutchings {
795e9117e50SBert Kenward 	return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER);
796f7251a9cSBen Hutchings }
797f7251a9cSBen Hutchings 
798874aeea5SJeff Kirsher int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
799874aeea5SJeff Kirsher {
800874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
801874aeea5SJeff Kirsher 	unsigned int entries;
8027668ff9cSBen Hutchings 	int rc;
803874aeea5SJeff Kirsher 
804874aeea5SJeff Kirsher 	/* Create the smallest power-of-two aligned ring */
805874aeea5SJeff Kirsher 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
806e01b16a7SEdward Cree 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
807874aeea5SJeff Kirsher 	tx_queue->ptr_mask = entries - 1;
808874aeea5SJeff Kirsher 
809874aeea5SJeff Kirsher 	netif_dbg(efx, probe, efx->net_dev,
810874aeea5SJeff Kirsher 		  "creating TX queue %d size %#x mask %#x\n",
811874aeea5SJeff Kirsher 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
812874aeea5SJeff Kirsher 
813874aeea5SJeff Kirsher 	/* Allocate software ring */
814c2e4e25aSThomas Meyer 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
815874aeea5SJeff Kirsher 				   GFP_KERNEL);
816874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
817874aeea5SJeff Kirsher 		return -ENOMEM;
818874aeea5SJeff Kirsher 
819e9117e50SBert Kenward 	tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
820e9117e50SBert Kenward 				    sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
821e9117e50SBert Kenward 	if (!tx_queue->cb_page) {
822f7251a9cSBen Hutchings 		rc = -ENOMEM;
823f7251a9cSBen Hutchings 		goto fail1;
824f7251a9cSBen Hutchings 	}
825f7251a9cSBen Hutchings 
826874aeea5SJeff Kirsher 	/* Allocate hardware ring */
827874aeea5SJeff Kirsher 	rc = efx_nic_probe_tx(tx_queue);
828874aeea5SJeff Kirsher 	if (rc)
829f7251a9cSBen Hutchings 		goto fail2;
830874aeea5SJeff Kirsher 
831874aeea5SJeff Kirsher 	return 0;
832874aeea5SJeff Kirsher 
833f7251a9cSBen Hutchings fail2:
834e9117e50SBert Kenward 	kfree(tx_queue->cb_page);
835e9117e50SBert Kenward 	tx_queue->cb_page = NULL;
836f7251a9cSBen Hutchings fail1:
837874aeea5SJeff Kirsher 	kfree(tx_queue->buffer);
838874aeea5SJeff Kirsher 	tx_queue->buffer = NULL;
839874aeea5SJeff Kirsher 	return rc;
840874aeea5SJeff Kirsher }
841874aeea5SJeff Kirsher 
842874aeea5SJeff Kirsher void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
843874aeea5SJeff Kirsher {
844e9117e50SBert Kenward 	struct efx_nic *efx = tx_queue->efx;
845e9117e50SBert Kenward 
846e9117e50SBert Kenward 	netif_dbg(efx, drv, efx->net_dev,
847874aeea5SJeff Kirsher 		  "initialising TX queue %d\n", tx_queue->queue);
848874aeea5SJeff Kirsher 
849874aeea5SJeff Kirsher 	tx_queue->insert_count = 0;
850874aeea5SJeff Kirsher 	tx_queue->write_count = 0;
851de1deff9SEdward Cree 	tx_queue->packet_write_count = 0;
852874aeea5SJeff Kirsher 	tx_queue->old_write_count = 0;
853874aeea5SJeff Kirsher 	tx_queue->read_count = 0;
854874aeea5SJeff Kirsher 	tx_queue->old_read_count = 0;
855874aeea5SJeff Kirsher 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
856b2663a4fSMartin Habets 	tx_queue->xmit_more_available = false;
8572935e3c3SEdward Cree 	tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
8582935e3c3SEdward Cree 				  tx_queue->channel == efx_ptp_channel(efx));
859b9b603d4SMartin Habets 	tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
860b9b603d4SMartin Habets 	tx_queue->completed_timestamp_major = 0;
861b9b603d4SMartin Habets 	tx_queue->completed_timestamp_minor = 0;
862874aeea5SJeff Kirsher 
863e9117e50SBert Kenward 	/* Set up default function pointers. These may get replaced by
864e9117e50SBert Kenward 	 * efx_nic_init_tx() based off NIC/queue capabilities.
865e9117e50SBert Kenward 	 */
86646d1efd8SEdward Cree 	tx_queue->handle_tso = efx_enqueue_skb_tso;
867e9117e50SBert Kenward 
868874aeea5SJeff Kirsher 	/* Set up TX descriptor ring */
869874aeea5SJeff Kirsher 	efx_nic_init_tx(tx_queue);
870874aeea5SJeff Kirsher 
871874aeea5SJeff Kirsher 	tx_queue->initialised = true;
872874aeea5SJeff Kirsher }
873874aeea5SJeff Kirsher 
874e42c3d85SBen Hutchings void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
875874aeea5SJeff Kirsher {
876874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
877874aeea5SJeff Kirsher 
878e42c3d85SBen Hutchings 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
879e42c3d85SBen Hutchings 		  "shutting down TX queue %d\n", tx_queue->queue);
880e42c3d85SBen Hutchings 
881874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
882874aeea5SJeff Kirsher 		return;
883874aeea5SJeff Kirsher 
884874aeea5SJeff Kirsher 	/* Free any buffers left in the ring */
885874aeea5SJeff Kirsher 	while (tx_queue->read_count != tx_queue->write_count) {
886c3940999STom Herbert 		unsigned int pkts_compl = 0, bytes_compl = 0;
887874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
888c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
889874aeea5SJeff Kirsher 
890874aeea5SJeff Kirsher 		++tx_queue->read_count;
891874aeea5SJeff Kirsher 	}
892b2663a4fSMartin Habets 	tx_queue->xmit_more_available = false;
893c3940999STom Herbert 	netdev_tx_reset_queue(tx_queue->core_txq);
894874aeea5SJeff Kirsher }
895874aeea5SJeff Kirsher 
896874aeea5SJeff Kirsher void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
897874aeea5SJeff Kirsher {
898f7251a9cSBen Hutchings 	int i;
899f7251a9cSBen Hutchings 
900874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
901874aeea5SJeff Kirsher 		return;
902874aeea5SJeff Kirsher 
903874aeea5SJeff Kirsher 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
904874aeea5SJeff Kirsher 		  "destroying TX queue %d\n", tx_queue->queue);
905874aeea5SJeff Kirsher 	efx_nic_remove_tx(tx_queue);
906874aeea5SJeff Kirsher 
907e9117e50SBert Kenward 	if (tx_queue->cb_page) {
908e9117e50SBert Kenward 		for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
909f7251a9cSBen Hutchings 			efx_nic_free_buffer(tx_queue->efx,
910e9117e50SBert Kenward 					    &tx_queue->cb_page[i]);
911e9117e50SBert Kenward 		kfree(tx_queue->cb_page);
912e9117e50SBert Kenward 		tx_queue->cb_page = NULL;
913f7251a9cSBen Hutchings 	}
914f7251a9cSBen Hutchings 
915874aeea5SJeff Kirsher 	kfree(tx_queue->buffer);
916874aeea5SJeff Kirsher 	tx_queue->buffer = NULL;
917874aeea5SJeff Kirsher }
918