xref: /openbmc/linux/drivers/net/ethernet/sfc/tx.c (revision e01b16a7)
1874aeea5SJeff Kirsher /****************************************************************************
2f7a6d2c4SBen Hutchings  * Driver for Solarflare network controllers and boards
3874aeea5SJeff Kirsher  * Copyright 2005-2006 Fen Systems Ltd.
4f7a6d2c4SBen Hutchings  * Copyright 2005-2013 Solarflare Communications Inc.
5874aeea5SJeff Kirsher  *
6874aeea5SJeff Kirsher  * This program is free software; you can redistribute it and/or modify it
7874aeea5SJeff Kirsher  * under the terms of the GNU General Public License version 2 as published
8874aeea5SJeff Kirsher  * by the Free Software Foundation, incorporated herein by reference.
9874aeea5SJeff Kirsher  */
10874aeea5SJeff Kirsher 
11874aeea5SJeff Kirsher #include <linux/pci.h>
12874aeea5SJeff Kirsher #include <linux/tcp.h>
13874aeea5SJeff Kirsher #include <linux/ip.h>
14874aeea5SJeff Kirsher #include <linux/in.h>
15874aeea5SJeff Kirsher #include <linux/ipv6.h>
16874aeea5SJeff Kirsher #include <linux/slab.h>
17874aeea5SJeff Kirsher #include <net/ipv6.h>
18874aeea5SJeff Kirsher #include <linux/if_ether.h>
19874aeea5SJeff Kirsher #include <linux/highmem.h>
20183233beSBen Hutchings #include <linux/cache.h>
21874aeea5SJeff Kirsher #include "net_driver.h"
22874aeea5SJeff Kirsher #include "efx.h"
23183233beSBen Hutchings #include "io.h"
24874aeea5SJeff Kirsher #include "nic.h"
25e9117e50SBert Kenward #include "tx.h"
26874aeea5SJeff Kirsher #include "workarounds.h"
27dfa50be9SBen Hutchings #include "ef10_regs.h"
28874aeea5SJeff Kirsher 
29183233beSBen Hutchings #ifdef EFX_USE_PIO
30183233beSBen Hutchings 
31183233beSBen Hutchings #define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
32183233beSBen Hutchings #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
33183233beSBen Hutchings unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
34183233beSBen Hutchings 
35183233beSBen Hutchings #endif /* EFX_USE_PIO */
36183233beSBen Hutchings 
37e9117e50SBert Kenward static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
38e9117e50SBert Kenward 					 struct efx_tx_buffer *buffer)
390fe5565bSBen Hutchings {
40e9117e50SBert Kenward 	unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
41e9117e50SBert Kenward 	struct efx_buffer *page_buf =
42e9117e50SBert Kenward 		&tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
43e9117e50SBert Kenward 	unsigned int offset =
44e9117e50SBert Kenward 		((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
45e9117e50SBert Kenward 
46e9117e50SBert Kenward 	if (unlikely(!page_buf->addr) &&
47e9117e50SBert Kenward 	    efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
48e9117e50SBert Kenward 				 GFP_ATOMIC))
49e9117e50SBert Kenward 		return NULL;
50e9117e50SBert Kenward 	buffer->dma_addr = page_buf->dma_addr + offset;
51e9117e50SBert Kenward 	buffer->unmap_len = 0;
52e9117e50SBert Kenward 	return (u8 *)page_buf->addr + offset;
530fe5565bSBen Hutchings }
540fe5565bSBen Hutchings 
55e9117e50SBert Kenward u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
56e9117e50SBert Kenward 				   struct efx_tx_buffer *buffer, size_t len)
570fe5565bSBen Hutchings {
58e9117e50SBert Kenward 	if (len > EFX_TX_CB_SIZE)
59e9117e50SBert Kenward 		return NULL;
60e9117e50SBert Kenward 	return efx_tx_get_copy_buffer(tx_queue, buffer);
610fe5565bSBen Hutchings }
620fe5565bSBen Hutchings 
63874aeea5SJeff Kirsher static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
64c3940999STom Herbert 			       struct efx_tx_buffer *buffer,
65c3940999STom Herbert 			       unsigned int *pkts_compl,
66c3940999STom Herbert 			       unsigned int *bytes_compl)
67874aeea5SJeff Kirsher {
68874aeea5SJeff Kirsher 	if (buffer->unmap_len) {
690e33d870SBen Hutchings 		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
702acdb92eSAlexandre Rames 		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
717668ff9cSBen Hutchings 		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
720e33d870SBen Hutchings 			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
730e33d870SBen Hutchings 					 DMA_TO_DEVICE);
74874aeea5SJeff Kirsher 		else
750e33d870SBen Hutchings 			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
760e33d870SBen Hutchings 				       DMA_TO_DEVICE);
77874aeea5SJeff Kirsher 		buffer->unmap_len = 0;
78874aeea5SJeff Kirsher 	}
79874aeea5SJeff Kirsher 
807668ff9cSBen Hutchings 	if (buffer->flags & EFX_TX_BUF_SKB) {
81c3940999STom Herbert 		(*pkts_compl)++;
82c3940999STom Herbert 		(*bytes_compl) += buffer->skb->len;
834ef6dae4SRick Jones 		dev_consume_skb_any((struct sk_buff *)buffer->skb);
84874aeea5SJeff Kirsher 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
85874aeea5SJeff Kirsher 			   "TX queue %d transmission id %x complete\n",
86874aeea5SJeff Kirsher 			   tx_queue->queue, tx_queue->read_count);
87874aeea5SJeff Kirsher 	}
887668ff9cSBen Hutchings 
89f7251a9cSBen Hutchings 	buffer->len = 0;
90f7251a9cSBen Hutchings 	buffer->flags = 0;
91874aeea5SJeff Kirsher }
92874aeea5SJeff Kirsher 
937e6d06f0SBen Hutchings unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
947e6d06f0SBen Hutchings {
957e6d06f0SBen Hutchings 	/* Header and payload descriptor for each output segment, plus
967e6d06f0SBen Hutchings 	 * one for every input fragment boundary within a segment
977e6d06f0SBen Hutchings 	 */
987e6d06f0SBen Hutchings 	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
997e6d06f0SBen Hutchings 
1005a6681e2SEdward Cree 	/* Possibly one more per segment for option descriptors */
1015a6681e2SEdward Cree 	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
1027e6d06f0SBen Hutchings 		max_descs += EFX_TSO_MAX_SEGS;
1037e6d06f0SBen Hutchings 
1047e6d06f0SBen Hutchings 	/* Possibly more for PCIe page boundaries within input fragments */
1057e6d06f0SBen Hutchings 	if (PAGE_SIZE > EFX_PAGE_SIZE)
1067e6d06f0SBen Hutchings 		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
1077e6d06f0SBen Hutchings 				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
1087e6d06f0SBen Hutchings 
1097e6d06f0SBen Hutchings 	return max_descs;
1107e6d06f0SBen Hutchings }
1117e6d06f0SBen Hutchings 
11214bf718fSBen Hutchings static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
11314bf718fSBen Hutchings {
11414bf718fSBen Hutchings 	/* We need to consider both queues that the net core sees as one */
11514bf718fSBen Hutchings 	struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
11614bf718fSBen Hutchings 	struct efx_nic *efx = txq1->efx;
11714bf718fSBen Hutchings 	unsigned int fill_level;
11814bf718fSBen Hutchings 
11914bf718fSBen Hutchings 	fill_level = max(txq1->insert_count - txq1->old_read_count,
12014bf718fSBen Hutchings 			 txq2->insert_count - txq2->old_read_count);
12114bf718fSBen Hutchings 	if (likely(fill_level < efx->txq_stop_thresh))
12214bf718fSBen Hutchings 		return;
12314bf718fSBen Hutchings 
12414bf718fSBen Hutchings 	/* We used the stale old_read_count above, which gives us a
12514bf718fSBen Hutchings 	 * pessimistic estimate of the fill level (which may even
12614bf718fSBen Hutchings 	 * validly be >= efx->txq_entries).  Now try again using
12714bf718fSBen Hutchings 	 * read_count (more likely to be a cache miss).
12814bf718fSBen Hutchings 	 *
12914bf718fSBen Hutchings 	 * If we read read_count and then conditionally stop the
13014bf718fSBen Hutchings 	 * queue, it is possible for the completion path to race with
13114bf718fSBen Hutchings 	 * us and complete all outstanding descriptors in the middle,
13214bf718fSBen Hutchings 	 * after which there will be no more completions to wake it.
13314bf718fSBen Hutchings 	 * Therefore we stop the queue first, then read read_count
13414bf718fSBen Hutchings 	 * (with a memory barrier to ensure the ordering), then
13514bf718fSBen Hutchings 	 * restart the queue if the fill level turns out to be low
13614bf718fSBen Hutchings 	 * enough.
13714bf718fSBen Hutchings 	 */
13814bf718fSBen Hutchings 	netif_tx_stop_queue(txq1->core_txq);
13914bf718fSBen Hutchings 	smp_mb();
14014bf718fSBen Hutchings 	txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
14114bf718fSBen Hutchings 	txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
14214bf718fSBen Hutchings 
14314bf718fSBen Hutchings 	fill_level = max(txq1->insert_count - txq1->old_read_count,
14414bf718fSBen Hutchings 			 txq2->insert_count - txq2->old_read_count);
145e01b16a7SEdward Cree 	EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
14614bf718fSBen Hutchings 	if (likely(fill_level < efx->txq_stop_thresh)) {
14714bf718fSBen Hutchings 		smp_mb();
14814bf718fSBen Hutchings 		if (likely(!efx->loopback_selftest))
14914bf718fSBen Hutchings 			netif_tx_start_queue(txq1->core_txq);
15014bf718fSBen Hutchings 	}
15114bf718fSBen Hutchings }
15214bf718fSBen Hutchings 
153e9117e50SBert Kenward static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
154e9117e50SBert Kenward 				struct sk_buff *skb)
155e9117e50SBert Kenward {
156e9117e50SBert Kenward 	unsigned int copy_len = skb->len;
157e9117e50SBert Kenward 	struct efx_tx_buffer *buffer;
158e9117e50SBert Kenward 	u8 *copy_buffer;
159e9117e50SBert Kenward 	int rc;
160e9117e50SBert Kenward 
161e01b16a7SEdward Cree 	EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
162e9117e50SBert Kenward 
163e9117e50SBert Kenward 	buffer = efx_tx_queue_get_insert_buffer(tx_queue);
164e9117e50SBert Kenward 
165e9117e50SBert Kenward 	copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
166e9117e50SBert Kenward 	if (unlikely(!copy_buffer))
167e9117e50SBert Kenward 		return -ENOMEM;
168e9117e50SBert Kenward 
169e9117e50SBert Kenward 	rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
170e9117e50SBert Kenward 	EFX_WARN_ON_PARANOID(rc);
171e9117e50SBert Kenward 	buffer->len = copy_len;
172e9117e50SBert Kenward 
173e9117e50SBert Kenward 	buffer->skb = skb;
174e9117e50SBert Kenward 	buffer->flags = EFX_TX_BUF_SKB;
175e9117e50SBert Kenward 
176e9117e50SBert Kenward 	++tx_queue->insert_count;
177e9117e50SBert Kenward 	return rc;
178e9117e50SBert Kenward }
179e9117e50SBert Kenward 
180ee45fd92SJon Cooper #ifdef EFX_USE_PIO
181ee45fd92SJon Cooper 
182ee45fd92SJon Cooper struct efx_short_copy_buffer {
183ee45fd92SJon Cooper 	int used;
184ee45fd92SJon Cooper 	u8 buf[L1_CACHE_BYTES];
185ee45fd92SJon Cooper };
186ee45fd92SJon Cooper 
187ee45fd92SJon Cooper /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
188ee45fd92SJon Cooper  * Advances piobuf pointer. Leaves additional data in the copy buffer.
189ee45fd92SJon Cooper  */
190ee45fd92SJon Cooper static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
191ee45fd92SJon Cooper 				    u8 *data, int len,
192ee45fd92SJon Cooper 				    struct efx_short_copy_buffer *copy_buf)
193ee45fd92SJon Cooper {
194ee45fd92SJon Cooper 	int block_len = len & ~(sizeof(copy_buf->buf) - 1);
195ee45fd92SJon Cooper 
1964984c237SBen Hutchings 	__iowrite64_copy(*piobuf, data, block_len >> 3);
197ee45fd92SJon Cooper 	*piobuf += block_len;
198ee45fd92SJon Cooper 	len -= block_len;
199ee45fd92SJon Cooper 
200ee45fd92SJon Cooper 	if (len) {
201ee45fd92SJon Cooper 		data += block_len;
202ee45fd92SJon Cooper 		BUG_ON(copy_buf->used);
203ee45fd92SJon Cooper 		BUG_ON(len > sizeof(copy_buf->buf));
204ee45fd92SJon Cooper 		memcpy(copy_buf->buf, data, len);
205ee45fd92SJon Cooper 		copy_buf->used = len;
206ee45fd92SJon Cooper 	}
207ee45fd92SJon Cooper }
208ee45fd92SJon Cooper 
209ee45fd92SJon Cooper /* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
210ee45fd92SJon Cooper  * Advances piobuf pointer. Leaves additional data in the copy buffer.
211ee45fd92SJon Cooper  */
212ee45fd92SJon Cooper static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
213ee45fd92SJon Cooper 				       u8 *data, int len,
214ee45fd92SJon Cooper 				       struct efx_short_copy_buffer *copy_buf)
215ee45fd92SJon Cooper {
216ee45fd92SJon Cooper 	if (copy_buf->used) {
217ee45fd92SJon Cooper 		/* if the copy buffer is partially full, fill it up and write */
218ee45fd92SJon Cooper 		int copy_to_buf =
219ee45fd92SJon Cooper 			min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
220ee45fd92SJon Cooper 
221ee45fd92SJon Cooper 		memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
222ee45fd92SJon Cooper 		copy_buf->used += copy_to_buf;
223ee45fd92SJon Cooper 
224ee45fd92SJon Cooper 		/* if we didn't fill it up then we're done for now */
225ee45fd92SJon Cooper 		if (copy_buf->used < sizeof(copy_buf->buf))
226ee45fd92SJon Cooper 			return;
227ee45fd92SJon Cooper 
2284984c237SBen Hutchings 		__iowrite64_copy(*piobuf, copy_buf->buf,
2294984c237SBen Hutchings 				 sizeof(copy_buf->buf) >> 3);
230ee45fd92SJon Cooper 		*piobuf += sizeof(copy_buf->buf);
231ee45fd92SJon Cooper 		data += copy_to_buf;
232ee45fd92SJon Cooper 		len -= copy_to_buf;
233ee45fd92SJon Cooper 		copy_buf->used = 0;
234ee45fd92SJon Cooper 	}
235ee45fd92SJon Cooper 
236ee45fd92SJon Cooper 	efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
237ee45fd92SJon Cooper }
238ee45fd92SJon Cooper 
239ee45fd92SJon Cooper static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
240ee45fd92SJon Cooper 				  struct efx_short_copy_buffer *copy_buf)
241ee45fd92SJon Cooper {
242ee45fd92SJon Cooper 	/* if there's anything in it, write the whole buffer, including junk */
243ee45fd92SJon Cooper 	if (copy_buf->used)
2444984c237SBen Hutchings 		__iowrite64_copy(piobuf, copy_buf->buf,
2454984c237SBen Hutchings 				 sizeof(copy_buf->buf) >> 3);
246ee45fd92SJon Cooper }
247ee45fd92SJon Cooper 
248ee45fd92SJon Cooper /* Traverse skb structure and copy fragments in to PIO buffer.
249ee45fd92SJon Cooper  * Advances piobuf pointer.
250ee45fd92SJon Cooper  */
251ee45fd92SJon Cooper static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
252ee45fd92SJon Cooper 				     u8 __iomem **piobuf,
253ee45fd92SJon Cooper 				     struct efx_short_copy_buffer *copy_buf)
254ee45fd92SJon Cooper {
255ee45fd92SJon Cooper 	int i;
256ee45fd92SJon Cooper 
257ee45fd92SJon Cooper 	efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
258ee45fd92SJon Cooper 				copy_buf);
259ee45fd92SJon Cooper 
260ee45fd92SJon Cooper 	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
261ee45fd92SJon Cooper 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
262ee45fd92SJon Cooper 		u8 *vaddr;
263ee45fd92SJon Cooper 
264ee45fd92SJon Cooper 		vaddr = kmap_atomic(skb_frag_page(f));
265ee45fd92SJon Cooper 
266ee45fd92SJon Cooper 		efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
267ee45fd92SJon Cooper 					   skb_frag_size(f), copy_buf);
268ee45fd92SJon Cooper 		kunmap_atomic(vaddr);
269ee45fd92SJon Cooper 	}
270ee45fd92SJon Cooper 
271e01b16a7SEdward Cree 	EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
272ee45fd92SJon Cooper }
273ee45fd92SJon Cooper 
274e9117e50SBert Kenward static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
275e9117e50SBert Kenward 			       struct sk_buff *skb)
276ee45fd92SJon Cooper {
277ee45fd92SJon Cooper 	struct efx_tx_buffer *buffer =
278ee45fd92SJon Cooper 		efx_tx_queue_get_insert_buffer(tx_queue);
279ee45fd92SJon Cooper 	u8 __iomem *piobuf = tx_queue->piobuf;
280ee45fd92SJon Cooper 
281ee45fd92SJon Cooper 	/* Copy to PIO buffer. Ensure the writes are padded to the end
282ee45fd92SJon Cooper 	 * of a cache line, as this is required for write-combining to be
283ee45fd92SJon Cooper 	 * effective on at least x86.
284ee45fd92SJon Cooper 	 */
285ee45fd92SJon Cooper 
286ee45fd92SJon Cooper 	if (skb_shinfo(skb)->nr_frags) {
287ee45fd92SJon Cooper 		/* The size of the copy buffer will ensure all writes
288ee45fd92SJon Cooper 		 * are the size of a cache line.
289ee45fd92SJon Cooper 		 */
290ee45fd92SJon Cooper 		struct efx_short_copy_buffer copy_buf;
291ee45fd92SJon Cooper 
292ee45fd92SJon Cooper 		copy_buf.used = 0;
293ee45fd92SJon Cooper 
294ee45fd92SJon Cooper 		efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
295ee45fd92SJon Cooper 					 &piobuf, &copy_buf);
296ee45fd92SJon Cooper 		efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
297ee45fd92SJon Cooper 	} else {
298ee45fd92SJon Cooper 		/* Pad the write to the size of a cache line.
299e9117e50SBert Kenward 		 * We can do this because we know the skb_shared_info struct is
300ee45fd92SJon Cooper 		 * after the source, and the destination buffer is big enough.
301ee45fd92SJon Cooper 		 */
302ee45fd92SJon Cooper 		BUILD_BUG_ON(L1_CACHE_BYTES >
303ee45fd92SJon Cooper 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
3044984c237SBen Hutchings 		__iowrite64_copy(tx_queue->piobuf, skb->data,
3054984c237SBen Hutchings 				 ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
306ee45fd92SJon Cooper 	}
307ee45fd92SJon Cooper 
308e9117e50SBert Kenward 	buffer->skb = skb;
309e9117e50SBert Kenward 	buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION;
310e9117e50SBert Kenward 
311ee45fd92SJon Cooper 	EFX_POPULATE_QWORD_5(buffer->option,
312ee45fd92SJon Cooper 			     ESF_DZ_TX_DESC_IS_OPT, 1,
313ee45fd92SJon Cooper 			     ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
314ee45fd92SJon Cooper 			     ESF_DZ_TX_PIO_CONT, 0,
315ee45fd92SJon Cooper 			     ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
316ee45fd92SJon Cooper 			     ESF_DZ_TX_PIO_BUF_ADDR,
317ee45fd92SJon Cooper 			     tx_queue->piobuf_offset);
318ee45fd92SJon Cooper 	++tx_queue->insert_count;
319e9117e50SBert Kenward 	return 0;
320ee45fd92SJon Cooper }
321ee45fd92SJon Cooper #endif /* EFX_USE_PIO */
322ee45fd92SJon Cooper 
323e9117e50SBert Kenward static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
324e9117e50SBert Kenward 					      dma_addr_t dma_addr,
325e9117e50SBert Kenward 					      size_t len)
326e9117e50SBert Kenward {
327e9117e50SBert Kenward 	const struct efx_nic_type *nic_type = tx_queue->efx->type;
328e9117e50SBert Kenward 	struct efx_tx_buffer *buffer;
329e9117e50SBert Kenward 	unsigned int dma_len;
330e9117e50SBert Kenward 
331e9117e50SBert Kenward 	/* Map the fragment taking account of NIC-dependent DMA limits. */
332e9117e50SBert Kenward 	do {
333e9117e50SBert Kenward 		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
334e9117e50SBert Kenward 		dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
335e9117e50SBert Kenward 
336e9117e50SBert Kenward 		buffer->len = dma_len;
337e9117e50SBert Kenward 		buffer->dma_addr = dma_addr;
338e9117e50SBert Kenward 		buffer->flags = EFX_TX_BUF_CONT;
339e9117e50SBert Kenward 		len -= dma_len;
340e9117e50SBert Kenward 		dma_addr += dma_len;
341e9117e50SBert Kenward 		++tx_queue->insert_count;
342e9117e50SBert Kenward 	} while (len);
343e9117e50SBert Kenward 
344e9117e50SBert Kenward 	return buffer;
345e9117e50SBert Kenward }
346e9117e50SBert Kenward 
347e9117e50SBert Kenward /* Map all data from an SKB for DMA and create descriptors on the queue.
348e9117e50SBert Kenward  */
349e9117e50SBert Kenward static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
350e9117e50SBert Kenward 			   unsigned int segment_count)
351e9117e50SBert Kenward {
352e9117e50SBert Kenward 	struct efx_nic *efx = tx_queue->efx;
353e9117e50SBert Kenward 	struct device *dma_dev = &efx->pci_dev->dev;
354e9117e50SBert Kenward 	unsigned int frag_index, nr_frags;
355e9117e50SBert Kenward 	dma_addr_t dma_addr, unmap_addr;
356e9117e50SBert Kenward 	unsigned short dma_flags;
357e9117e50SBert Kenward 	size_t len, unmap_len;
358e9117e50SBert Kenward 
359e9117e50SBert Kenward 	nr_frags = skb_shinfo(skb)->nr_frags;
360e9117e50SBert Kenward 	frag_index = 0;
361e9117e50SBert Kenward 
362e9117e50SBert Kenward 	/* Map header data. */
363e9117e50SBert Kenward 	len = skb_headlen(skb);
364e9117e50SBert Kenward 	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
365e9117e50SBert Kenward 	dma_flags = EFX_TX_BUF_MAP_SINGLE;
366e9117e50SBert Kenward 	unmap_len = len;
367e9117e50SBert Kenward 	unmap_addr = dma_addr;
368e9117e50SBert Kenward 
369e9117e50SBert Kenward 	if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
370e9117e50SBert Kenward 		return -EIO;
371e9117e50SBert Kenward 
372e9117e50SBert Kenward 	if (segment_count) {
373e9117e50SBert Kenward 		/* For TSO we need to put the header in to a separate
374e9117e50SBert Kenward 		 * descriptor. Map this separately if necessary.
375e9117e50SBert Kenward 		 */
376e9117e50SBert Kenward 		size_t header_len = skb_transport_header(skb) - skb->data +
377e9117e50SBert Kenward 				(tcp_hdr(skb)->doff << 2u);
378e9117e50SBert Kenward 
379e9117e50SBert Kenward 		if (header_len != len) {
380e9117e50SBert Kenward 			tx_queue->tso_long_headers++;
381e9117e50SBert Kenward 			efx_tx_map_chunk(tx_queue, dma_addr, header_len);
382e9117e50SBert Kenward 			len -= header_len;
383e9117e50SBert Kenward 			dma_addr += header_len;
384e9117e50SBert Kenward 		}
385e9117e50SBert Kenward 	}
386e9117e50SBert Kenward 
387e9117e50SBert Kenward 	/* Add descriptors for each fragment. */
388e9117e50SBert Kenward 	do {
389e9117e50SBert Kenward 		struct efx_tx_buffer *buffer;
390e9117e50SBert Kenward 		skb_frag_t *fragment;
391e9117e50SBert Kenward 
392e9117e50SBert Kenward 		buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
393e9117e50SBert Kenward 
394e9117e50SBert Kenward 		/* The final descriptor for a fragment is responsible for
395e9117e50SBert Kenward 		 * unmapping the whole fragment.
396e9117e50SBert Kenward 		 */
397e9117e50SBert Kenward 		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
398e9117e50SBert Kenward 		buffer->unmap_len = unmap_len;
399e9117e50SBert Kenward 		buffer->dma_offset = buffer->dma_addr - unmap_addr;
400e9117e50SBert Kenward 
401e9117e50SBert Kenward 		if (frag_index >= nr_frags) {
402e9117e50SBert Kenward 			/* Store SKB details with the final buffer for
403e9117e50SBert Kenward 			 * the completion.
404e9117e50SBert Kenward 			 */
405e9117e50SBert Kenward 			buffer->skb = skb;
406e9117e50SBert Kenward 			buffer->flags = EFX_TX_BUF_SKB | dma_flags;
407e9117e50SBert Kenward 			return 0;
408e9117e50SBert Kenward 		}
409e9117e50SBert Kenward 
410e9117e50SBert Kenward 		/* Move on to the next fragment. */
411e9117e50SBert Kenward 		fragment = &skb_shinfo(skb)->frags[frag_index++];
412e9117e50SBert Kenward 		len = skb_frag_size(fragment);
413e9117e50SBert Kenward 		dma_addr = skb_frag_dma_map(dma_dev, fragment,
414e9117e50SBert Kenward 				0, len, DMA_TO_DEVICE);
415e9117e50SBert Kenward 		dma_flags = 0;
416e9117e50SBert Kenward 		unmap_len = len;
417e9117e50SBert Kenward 		unmap_addr = dma_addr;
418e9117e50SBert Kenward 
419e9117e50SBert Kenward 		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
420e9117e50SBert Kenward 			return -EIO;
421e9117e50SBert Kenward 	} while (1);
422e9117e50SBert Kenward }
423e9117e50SBert Kenward 
424e9117e50SBert Kenward /* Remove buffers put into a tx_queue.  None of the buffers must have
425e9117e50SBert Kenward  * an skb attached.
426e9117e50SBert Kenward  */
427e9117e50SBert Kenward static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
428e9117e50SBert Kenward {
429e9117e50SBert Kenward 	struct efx_tx_buffer *buffer;
430e9117e50SBert Kenward 
431e9117e50SBert Kenward 	/* Work backwards until we hit the original insert pointer value */
432e9117e50SBert Kenward 	while (tx_queue->insert_count != tx_queue->write_count) {
433e9117e50SBert Kenward 		--tx_queue->insert_count;
434e9117e50SBert Kenward 		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
435e9117e50SBert Kenward 		efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
436e9117e50SBert Kenward 	}
437e9117e50SBert Kenward }
438e9117e50SBert Kenward 
43946d1efd8SEdward Cree /*
44046d1efd8SEdward Cree  * Fallback to software TSO.
44146d1efd8SEdward Cree  *
44246d1efd8SEdward Cree  * This is used if we are unable to send a GSO packet through hardware TSO.
44346d1efd8SEdward Cree  * This should only ever happen due to per-queue restrictions - unsupported
44446d1efd8SEdward Cree  * packets should first be filtered by the feature flags.
44546d1efd8SEdward Cree  *
44646d1efd8SEdward Cree  * Returns 0 on success, error code otherwise.
44746d1efd8SEdward Cree  */
44846d1efd8SEdward Cree static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
44946d1efd8SEdward Cree 			       struct sk_buff *skb)
450e9117e50SBert Kenward {
45146d1efd8SEdward Cree 	struct sk_buff *segments, *next;
45246d1efd8SEdward Cree 
45346d1efd8SEdward Cree 	segments = skb_gso_segment(skb, 0);
45446d1efd8SEdward Cree 	if (IS_ERR(segments))
45546d1efd8SEdward Cree 		return PTR_ERR(segments);
45646d1efd8SEdward Cree 
45746d1efd8SEdward Cree 	dev_kfree_skb_any(skb);
45846d1efd8SEdward Cree 	skb = segments;
45946d1efd8SEdward Cree 
46046d1efd8SEdward Cree 	while (skb) {
46146d1efd8SEdward Cree 		next = skb->next;
46246d1efd8SEdward Cree 		skb->next = NULL;
46346d1efd8SEdward Cree 
46446d1efd8SEdward Cree 		if (next)
46546d1efd8SEdward Cree 			skb->xmit_more = true;
46646d1efd8SEdward Cree 		efx_enqueue_skb(tx_queue, skb);
46746d1efd8SEdward Cree 		skb = next;
46846d1efd8SEdward Cree 	}
46946d1efd8SEdward Cree 
47046d1efd8SEdward Cree 	return 0;
471e9117e50SBert Kenward }
472e9117e50SBert Kenward 
473874aeea5SJeff Kirsher /*
474874aeea5SJeff Kirsher  * Add a socket buffer to a TX queue
475874aeea5SJeff Kirsher  *
476874aeea5SJeff Kirsher  * This maps all fragments of a socket buffer for DMA and adds them to
477874aeea5SJeff Kirsher  * the TX queue.  The queue's insert pointer will be incremented by
478874aeea5SJeff Kirsher  * the number of fragments in the socket buffer.
479874aeea5SJeff Kirsher  *
480874aeea5SJeff Kirsher  * If any DMA mapping fails, any mapped fragments will be unmapped,
481874aeea5SJeff Kirsher  * the queue's insert pointer will be restored to its original value.
482874aeea5SJeff Kirsher  *
483874aeea5SJeff Kirsher  * This function is split out from efx_hard_start_xmit to allow the
484874aeea5SJeff Kirsher  * loopback test to direct packets via specific TX queues.
485874aeea5SJeff Kirsher  *
48614bf718fSBen Hutchings  * Returns NETDEV_TX_OK.
487874aeea5SJeff Kirsher  * You must hold netif_tx_lock() to call this function.
488874aeea5SJeff Kirsher  */
489874aeea5SJeff Kirsher netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
490874aeea5SJeff Kirsher {
491e9117e50SBert Kenward 	bool data_mapped = false;
492e9117e50SBert Kenward 	unsigned int segments;
493e9117e50SBert Kenward 	unsigned int skb_len;
49446d1efd8SEdward Cree 	int rc;
495874aeea5SJeff Kirsher 
496e9117e50SBert Kenward 	skb_len = skb->len;
497e9117e50SBert Kenward 	segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
498e9117e50SBert Kenward 	if (segments == 1)
499e9117e50SBert Kenward 		segments = 0; /* Don't use TSO for a single segment. */
500874aeea5SJeff Kirsher 
501e9117e50SBert Kenward 	/* Handle TSO first - it's *possible* (although unlikely) that we might
502e9117e50SBert Kenward 	 * be passed a packet to segment that's smaller than the copybreak/PIO
503e9117e50SBert Kenward 	 * size limit.
504874aeea5SJeff Kirsher 	 */
505e9117e50SBert Kenward 	if (segments) {
506e01b16a7SEdward Cree 		EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso);
50746d1efd8SEdward Cree 		rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped);
50846d1efd8SEdward Cree 		if (rc == -EINVAL) {
50946d1efd8SEdward Cree 			rc = efx_tx_tso_fallback(tx_queue, skb);
51046d1efd8SEdward Cree 			tx_queue->tso_fallbacks++;
51146d1efd8SEdward Cree 			if (rc == 0)
51246d1efd8SEdward Cree 				return 0;
51346d1efd8SEdward Cree 		}
51446d1efd8SEdward Cree 		if (rc)
515e9117e50SBert Kenward 			goto err;
516e9117e50SBert Kenward #ifdef EFX_USE_PIO
517e9117e50SBert Kenward 	} else if (skb_len <= efx_piobuf_size && !skb->xmit_more &&
518e9117e50SBert Kenward 		   efx_nic_may_tx_pio(tx_queue)) {
519e9117e50SBert Kenward 		/* Use PIO for short packets with an empty queue. */
520e9117e50SBert Kenward 		if (efx_enqueue_skb_pio(tx_queue, skb))
521e9117e50SBert Kenward 			goto err;
522e9117e50SBert Kenward 		tx_queue->pio_packets++;
523e9117e50SBert Kenward 		data_mapped = true;
524e9117e50SBert Kenward #endif
5255a6681e2SEdward Cree 	} else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
526e9117e50SBert Kenward 		/* Pad short packets or coalesce short fragmented packets. */
527e9117e50SBert Kenward 		if (efx_enqueue_skb_copy(tx_queue, skb))
528e9117e50SBert Kenward 			goto err;
529e9117e50SBert Kenward 		tx_queue->cb_packets++;
530e9117e50SBert Kenward 		data_mapped = true;
531874aeea5SJeff Kirsher 	}
532874aeea5SJeff Kirsher 
533e9117e50SBert Kenward 	/* Map for DMA and create descriptors if we haven't done so already. */
534e9117e50SBert Kenward 	if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
535e9117e50SBert Kenward 		goto err;
536874aeea5SJeff Kirsher 
537e9117e50SBert Kenward 	/* Update BQL */
538e9117e50SBert Kenward 	netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
53970b33fb0SEdward Cree 
540874aeea5SJeff Kirsher 	/* Pass off to hardware */
541b2663a4fSMartin Habets 	if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
542b2663a4fSMartin Habets 		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
543b2663a4fSMartin Habets 
544b2663a4fSMartin Habets 		/* There could be packets left on the partner queue if those
545b2663a4fSMartin Habets 		 * SKBs had skb->xmit_more set. If we do not push those they
546b2663a4fSMartin Habets 		 * could be left for a long time and cause a netdev watchdog.
547b2663a4fSMartin Habets 		 */
548b2663a4fSMartin Habets 		if (txq2->xmit_more_available)
549b2663a4fSMartin Habets 			efx_nic_push_buffers(txq2);
550b2663a4fSMartin Habets 
551874aeea5SJeff Kirsher 		efx_nic_push_buffers(tx_queue);
552b2663a4fSMartin Habets 	} else {
553b2663a4fSMartin Habets 		tx_queue->xmit_more_available = skb->xmit_more;
554b2663a4fSMartin Habets 	}
555874aeea5SJeff Kirsher 
556e9117e50SBert Kenward 	if (segments) {
557e9117e50SBert Kenward 		tx_queue->tso_bursts++;
558e9117e50SBert Kenward 		tx_queue->tso_packets += segments;
559e9117e50SBert Kenward 		tx_queue->tx_packets  += segments;
560e9117e50SBert Kenward 	} else {
5618ccf3800SAndrew Rybchenko 		tx_queue->tx_packets++;
562e9117e50SBert Kenward 	}
563e9117e50SBert Kenward 
564e9117e50SBert Kenward 	efx_tx_maybe_stop_queue(tx_queue);
5658ccf3800SAndrew Rybchenko 
566874aeea5SJeff Kirsher 	return NETDEV_TX_OK;
567874aeea5SJeff Kirsher 
568874aeea5SJeff Kirsher 
569e9117e50SBert Kenward err:
570e9117e50SBert Kenward 	efx_enqueue_unwind(tx_queue);
571874aeea5SJeff Kirsher 	dev_kfree_skb_any(skb);
57214bf718fSBen Hutchings 	return NETDEV_TX_OK;
573874aeea5SJeff Kirsher }
574874aeea5SJeff Kirsher 
575874aeea5SJeff Kirsher /* Remove packets from the TX queue
576874aeea5SJeff Kirsher  *
577874aeea5SJeff Kirsher  * This removes packets from the TX queue, up to and including the
578874aeea5SJeff Kirsher  * specified index.
579874aeea5SJeff Kirsher  */
580874aeea5SJeff Kirsher static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
581c3940999STom Herbert 				unsigned int index,
582c3940999STom Herbert 				unsigned int *pkts_compl,
583c3940999STom Herbert 				unsigned int *bytes_compl)
584874aeea5SJeff Kirsher {
585874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
586874aeea5SJeff Kirsher 	unsigned int stop_index, read_ptr;
587874aeea5SJeff Kirsher 
588874aeea5SJeff Kirsher 	stop_index = (index + 1) & tx_queue->ptr_mask;
589874aeea5SJeff Kirsher 	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
590874aeea5SJeff Kirsher 
591874aeea5SJeff Kirsher 	while (read_ptr != stop_index) {
592874aeea5SJeff Kirsher 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
593ba8977bdSBen Hutchings 
594ba8977bdSBen Hutchings 		if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
595ba8977bdSBen Hutchings 		    unlikely(buffer->len == 0)) {
596874aeea5SJeff Kirsher 			netif_err(efx, tx_err, efx->net_dev,
597874aeea5SJeff Kirsher 				  "TX queue %d spurious TX completion id %x\n",
598874aeea5SJeff Kirsher 				  tx_queue->queue, read_ptr);
599874aeea5SJeff Kirsher 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
600874aeea5SJeff Kirsher 			return;
601874aeea5SJeff Kirsher 		}
602874aeea5SJeff Kirsher 
603c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
604874aeea5SJeff Kirsher 
605874aeea5SJeff Kirsher 		++tx_queue->read_count;
606874aeea5SJeff Kirsher 		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
607874aeea5SJeff Kirsher 	}
608874aeea5SJeff Kirsher }
609874aeea5SJeff Kirsher 
610874aeea5SJeff Kirsher /* Initiate a packet transmission.  We use one channel per CPU
611874aeea5SJeff Kirsher  * (sharing when we have more CPUs than channels).  On Falcon, the TX
612874aeea5SJeff Kirsher  * completion events will be directed back to the CPU that transmitted
613874aeea5SJeff Kirsher  * the packet, which should be cache-efficient.
614874aeea5SJeff Kirsher  *
615874aeea5SJeff Kirsher  * Context: non-blocking.
616874aeea5SJeff Kirsher  * Note that returning anything other than NETDEV_TX_OK will cause the
617874aeea5SJeff Kirsher  * OS to free the skb.
618874aeea5SJeff Kirsher  */
619874aeea5SJeff Kirsher netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
620874aeea5SJeff Kirsher 				struct net_device *net_dev)
621874aeea5SJeff Kirsher {
622874aeea5SJeff Kirsher 	struct efx_nic *efx = netdev_priv(net_dev);
623874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
624874aeea5SJeff Kirsher 	unsigned index, type;
625874aeea5SJeff Kirsher 
626874aeea5SJeff Kirsher 	EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
627874aeea5SJeff Kirsher 
6287c236c43SStuart Hodgson 	/* PTP "event" packet */
6297c236c43SStuart Hodgson 	if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
6307c236c43SStuart Hodgson 	    unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
6317c236c43SStuart Hodgson 		return efx_ptp_tx(efx, skb);
6327c236c43SStuart Hodgson 	}
6337c236c43SStuart Hodgson 
634874aeea5SJeff Kirsher 	index = skb_get_queue_mapping(skb);
635874aeea5SJeff Kirsher 	type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
636874aeea5SJeff Kirsher 	if (index >= efx->n_tx_channels) {
637874aeea5SJeff Kirsher 		index -= efx->n_tx_channels;
638874aeea5SJeff Kirsher 		type |= EFX_TXQ_TYPE_HIGHPRI;
639874aeea5SJeff Kirsher 	}
640874aeea5SJeff Kirsher 	tx_queue = efx_get_tx_queue(efx, index, type);
641874aeea5SJeff Kirsher 
642874aeea5SJeff Kirsher 	return efx_enqueue_skb(tx_queue, skb);
643874aeea5SJeff Kirsher }
644874aeea5SJeff Kirsher 
645874aeea5SJeff Kirsher void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
646874aeea5SJeff Kirsher {
647874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
648874aeea5SJeff Kirsher 
649874aeea5SJeff Kirsher 	/* Must be inverse of queue lookup in efx_hard_start_xmit() */
650874aeea5SJeff Kirsher 	tx_queue->core_txq =
651874aeea5SJeff Kirsher 		netdev_get_tx_queue(efx->net_dev,
652874aeea5SJeff Kirsher 				    tx_queue->queue / EFX_TXQ_TYPES +
653874aeea5SJeff Kirsher 				    ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
654874aeea5SJeff Kirsher 				     efx->n_tx_channels : 0));
655874aeea5SJeff Kirsher }
656874aeea5SJeff Kirsher 
65716e5cc64SJohn Fastabend int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
65816e5cc64SJohn Fastabend 		 struct tc_to_netdev *ntc)
659874aeea5SJeff Kirsher {
660874aeea5SJeff Kirsher 	struct efx_nic *efx = netdev_priv(net_dev);
661874aeea5SJeff Kirsher 	struct efx_channel *channel;
662874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
66316e5cc64SJohn Fastabend 	unsigned tc, num_tc;
664874aeea5SJeff Kirsher 	int rc;
665874aeea5SJeff Kirsher 
6665eb4dce3SJohn Fastabend 	if (ntc->type != TC_SETUP_MQPRIO)
667e4c6734eSJohn Fastabend 		return -EINVAL;
668e4c6734eSJohn Fastabend 
66916e5cc64SJohn Fastabend 	num_tc = ntc->tc;
67016e5cc64SJohn Fastabend 
6715a6681e2SEdward Cree 	if (num_tc > EFX_MAX_TX_TC)
672874aeea5SJeff Kirsher 		return -EINVAL;
673874aeea5SJeff Kirsher 
674874aeea5SJeff Kirsher 	if (num_tc == net_dev->num_tc)
675874aeea5SJeff Kirsher 		return 0;
676874aeea5SJeff Kirsher 
677874aeea5SJeff Kirsher 	for (tc = 0; tc < num_tc; tc++) {
678874aeea5SJeff Kirsher 		net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
679874aeea5SJeff Kirsher 		net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
680874aeea5SJeff Kirsher 	}
681874aeea5SJeff Kirsher 
682874aeea5SJeff Kirsher 	if (num_tc > net_dev->num_tc) {
683874aeea5SJeff Kirsher 		/* Initialise high-priority queues as necessary */
684874aeea5SJeff Kirsher 		efx_for_each_channel(channel, efx) {
685874aeea5SJeff Kirsher 			efx_for_each_possible_channel_tx_queue(tx_queue,
686874aeea5SJeff Kirsher 							       channel) {
687874aeea5SJeff Kirsher 				if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
688874aeea5SJeff Kirsher 					continue;
689874aeea5SJeff Kirsher 				if (!tx_queue->buffer) {
690874aeea5SJeff Kirsher 					rc = efx_probe_tx_queue(tx_queue);
691874aeea5SJeff Kirsher 					if (rc)
692874aeea5SJeff Kirsher 						return rc;
693874aeea5SJeff Kirsher 				}
694874aeea5SJeff Kirsher 				if (!tx_queue->initialised)
695874aeea5SJeff Kirsher 					efx_init_tx_queue(tx_queue);
696874aeea5SJeff Kirsher 				efx_init_tx_queue_core_txq(tx_queue);
697874aeea5SJeff Kirsher 			}
698874aeea5SJeff Kirsher 		}
699874aeea5SJeff Kirsher 	} else {
700874aeea5SJeff Kirsher 		/* Reduce number of classes before number of queues */
701874aeea5SJeff Kirsher 		net_dev->num_tc = num_tc;
702874aeea5SJeff Kirsher 	}
703874aeea5SJeff Kirsher 
704874aeea5SJeff Kirsher 	rc = netif_set_real_num_tx_queues(net_dev,
705874aeea5SJeff Kirsher 					  max_t(int, num_tc, 1) *
706874aeea5SJeff Kirsher 					  efx->n_tx_channels);
707874aeea5SJeff Kirsher 	if (rc)
708874aeea5SJeff Kirsher 		return rc;
709874aeea5SJeff Kirsher 
710874aeea5SJeff Kirsher 	/* Do not destroy high-priority queues when they become
711874aeea5SJeff Kirsher 	 * unused.  We would have to flush them first, and it is
712874aeea5SJeff Kirsher 	 * fairly difficult to flush a subset of TX queues.  Leave
713874aeea5SJeff Kirsher 	 * it to efx_fini_channels().
714874aeea5SJeff Kirsher 	 */
715874aeea5SJeff Kirsher 
716874aeea5SJeff Kirsher 	net_dev->num_tc = num_tc;
717874aeea5SJeff Kirsher 	return 0;
718874aeea5SJeff Kirsher }
719874aeea5SJeff Kirsher 
720874aeea5SJeff Kirsher void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
721874aeea5SJeff Kirsher {
722874aeea5SJeff Kirsher 	unsigned fill_level;
723874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
72414bf718fSBen Hutchings 	struct efx_tx_queue *txq2;
725c3940999STom Herbert 	unsigned int pkts_compl = 0, bytes_compl = 0;
726874aeea5SJeff Kirsher 
727e01b16a7SEdward Cree 	EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
728874aeea5SJeff Kirsher 
729c3940999STom Herbert 	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
730c936835cSPeter Dunning 	tx_queue->pkts_compl += pkts_compl;
731c936835cSPeter Dunning 	tx_queue->bytes_compl += bytes_compl;
732874aeea5SJeff Kirsher 
73302e12165SBen Hutchings 	if (pkts_compl > 1)
73402e12165SBen Hutchings 		++tx_queue->merge_events;
73502e12165SBen Hutchings 
73614bf718fSBen Hutchings 	/* See if we need to restart the netif queue.  This memory
73714bf718fSBen Hutchings 	 * barrier ensures that we write read_count (inside
73814bf718fSBen Hutchings 	 * efx_dequeue_buffers()) before reading the queue status.
73914bf718fSBen Hutchings 	 */
740874aeea5SJeff Kirsher 	smp_mb();
741874aeea5SJeff Kirsher 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
742874aeea5SJeff Kirsher 	    likely(efx->port_enabled) &&
743874aeea5SJeff Kirsher 	    likely(netif_device_present(efx->net_dev))) {
74414bf718fSBen Hutchings 		txq2 = efx_tx_queue_partner(tx_queue);
74514bf718fSBen Hutchings 		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
74614bf718fSBen Hutchings 				 txq2->insert_count - txq2->read_count);
74714bf718fSBen Hutchings 		if (fill_level <= efx->txq_wake_thresh)
748874aeea5SJeff Kirsher 			netif_tx_wake_queue(tx_queue->core_txq);
749874aeea5SJeff Kirsher 	}
750874aeea5SJeff Kirsher 
751874aeea5SJeff Kirsher 	/* Check whether the hardware queue is now empty */
752874aeea5SJeff Kirsher 	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
753874aeea5SJeff Kirsher 		tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
754874aeea5SJeff Kirsher 		if (tx_queue->read_count == tx_queue->old_write_count) {
755874aeea5SJeff Kirsher 			smp_mb();
756874aeea5SJeff Kirsher 			tx_queue->empty_read_count =
757874aeea5SJeff Kirsher 				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
758874aeea5SJeff Kirsher 		}
759874aeea5SJeff Kirsher 	}
760874aeea5SJeff Kirsher }
761874aeea5SJeff Kirsher 
762e9117e50SBert Kenward static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
763f7251a9cSBen Hutchings {
764e9117e50SBert Kenward 	return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER);
765f7251a9cSBen Hutchings }
766f7251a9cSBen Hutchings 
767874aeea5SJeff Kirsher int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
768874aeea5SJeff Kirsher {
769874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
770874aeea5SJeff Kirsher 	unsigned int entries;
7717668ff9cSBen Hutchings 	int rc;
772874aeea5SJeff Kirsher 
773874aeea5SJeff Kirsher 	/* Create the smallest power-of-two aligned ring */
774874aeea5SJeff Kirsher 	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
775e01b16a7SEdward Cree 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
776874aeea5SJeff Kirsher 	tx_queue->ptr_mask = entries - 1;
777874aeea5SJeff Kirsher 
778874aeea5SJeff Kirsher 	netif_dbg(efx, probe, efx->net_dev,
779874aeea5SJeff Kirsher 		  "creating TX queue %d size %#x mask %#x\n",
780874aeea5SJeff Kirsher 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
781874aeea5SJeff Kirsher 
782874aeea5SJeff Kirsher 	/* Allocate software ring */
783c2e4e25aSThomas Meyer 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
784874aeea5SJeff Kirsher 				   GFP_KERNEL);
785874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
786874aeea5SJeff Kirsher 		return -ENOMEM;
787874aeea5SJeff Kirsher 
788e9117e50SBert Kenward 	tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
789e9117e50SBert Kenward 				    sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
790e9117e50SBert Kenward 	if (!tx_queue->cb_page) {
791f7251a9cSBen Hutchings 		rc = -ENOMEM;
792f7251a9cSBen Hutchings 		goto fail1;
793f7251a9cSBen Hutchings 	}
794f7251a9cSBen Hutchings 
795874aeea5SJeff Kirsher 	/* Allocate hardware ring */
796874aeea5SJeff Kirsher 	rc = efx_nic_probe_tx(tx_queue);
797874aeea5SJeff Kirsher 	if (rc)
798f7251a9cSBen Hutchings 		goto fail2;
799874aeea5SJeff Kirsher 
800874aeea5SJeff Kirsher 	return 0;
801874aeea5SJeff Kirsher 
802f7251a9cSBen Hutchings fail2:
803e9117e50SBert Kenward 	kfree(tx_queue->cb_page);
804e9117e50SBert Kenward 	tx_queue->cb_page = NULL;
805f7251a9cSBen Hutchings fail1:
806874aeea5SJeff Kirsher 	kfree(tx_queue->buffer);
807874aeea5SJeff Kirsher 	tx_queue->buffer = NULL;
808874aeea5SJeff Kirsher 	return rc;
809874aeea5SJeff Kirsher }
810874aeea5SJeff Kirsher 
811874aeea5SJeff Kirsher void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
812874aeea5SJeff Kirsher {
813e9117e50SBert Kenward 	struct efx_nic *efx = tx_queue->efx;
814e9117e50SBert Kenward 
815e9117e50SBert Kenward 	netif_dbg(efx, drv, efx->net_dev,
816874aeea5SJeff Kirsher 		  "initialising TX queue %d\n", tx_queue->queue);
817874aeea5SJeff Kirsher 
818874aeea5SJeff Kirsher 	tx_queue->insert_count = 0;
819874aeea5SJeff Kirsher 	tx_queue->write_count = 0;
820874aeea5SJeff Kirsher 	tx_queue->old_write_count = 0;
821874aeea5SJeff Kirsher 	tx_queue->read_count = 0;
822874aeea5SJeff Kirsher 	tx_queue->old_read_count = 0;
823874aeea5SJeff Kirsher 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
824b2663a4fSMartin Habets 	tx_queue->xmit_more_available = false;
825874aeea5SJeff Kirsher 
826e9117e50SBert Kenward 	/* Set up default function pointers. These may get replaced by
827e9117e50SBert Kenward 	 * efx_nic_init_tx() based off NIC/queue capabilities.
828e9117e50SBert Kenward 	 */
82946d1efd8SEdward Cree 	tx_queue->handle_tso = efx_enqueue_skb_tso;
830e9117e50SBert Kenward 
831874aeea5SJeff Kirsher 	/* Set up TX descriptor ring */
832874aeea5SJeff Kirsher 	efx_nic_init_tx(tx_queue);
833874aeea5SJeff Kirsher 
834874aeea5SJeff Kirsher 	tx_queue->initialised = true;
835874aeea5SJeff Kirsher }
836874aeea5SJeff Kirsher 
837e42c3d85SBen Hutchings void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
838874aeea5SJeff Kirsher {
839874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
840874aeea5SJeff Kirsher 
841e42c3d85SBen Hutchings 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
842e42c3d85SBen Hutchings 		  "shutting down TX queue %d\n", tx_queue->queue);
843e42c3d85SBen Hutchings 
844874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
845874aeea5SJeff Kirsher 		return;
846874aeea5SJeff Kirsher 
847874aeea5SJeff Kirsher 	/* Free any buffers left in the ring */
848874aeea5SJeff Kirsher 	while (tx_queue->read_count != tx_queue->write_count) {
849c3940999STom Herbert 		unsigned int pkts_compl = 0, bytes_compl = 0;
850874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
851c3940999STom Herbert 		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
852874aeea5SJeff Kirsher 
853874aeea5SJeff Kirsher 		++tx_queue->read_count;
854874aeea5SJeff Kirsher 	}
855b2663a4fSMartin Habets 	tx_queue->xmit_more_available = false;
856c3940999STom Herbert 	netdev_tx_reset_queue(tx_queue->core_txq);
857874aeea5SJeff Kirsher }
858874aeea5SJeff Kirsher 
859874aeea5SJeff Kirsher void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
860874aeea5SJeff Kirsher {
861f7251a9cSBen Hutchings 	int i;
862f7251a9cSBen Hutchings 
863874aeea5SJeff Kirsher 	if (!tx_queue->buffer)
864874aeea5SJeff Kirsher 		return;
865874aeea5SJeff Kirsher 
866874aeea5SJeff Kirsher 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
867874aeea5SJeff Kirsher 		  "destroying TX queue %d\n", tx_queue->queue);
868874aeea5SJeff Kirsher 	efx_nic_remove_tx(tx_queue);
869874aeea5SJeff Kirsher 
870e9117e50SBert Kenward 	if (tx_queue->cb_page) {
871e9117e50SBert Kenward 		for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
872f7251a9cSBen Hutchings 			efx_nic_free_buffer(tx_queue->efx,
873e9117e50SBert Kenward 					    &tx_queue->cb_page[i]);
874e9117e50SBert Kenward 		kfree(tx_queue->cb_page);
875e9117e50SBert Kenward 		tx_queue->cb_page = NULL;
876f7251a9cSBen Hutchings 	}
877f7251a9cSBen Hutchings 
878874aeea5SJeff Kirsher 	kfree(tx_queue->buffer);
879874aeea5SJeff Kirsher 	tx_queue->buffer = NULL;
880874aeea5SJeff Kirsher }
881