xref: /openbmc/linux/drivers/net/ethernet/sfc/tx.c (revision a080a92a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2005-2006 Fen Systems Ltd.
5  * Copyright 2005-2013 Solarflare Communications Inc.
6  */
7 
8 #include <linux/pci.h>
9 #include <linux/tcp.h>
10 #include <linux/ip.h>
11 #include <linux/in.h>
12 #include <linux/ipv6.h>
13 #include <linux/slab.h>
14 #include <net/ipv6.h>
15 #include <linux/if_ether.h>
16 #include <linux/highmem.h>
17 #include <linux/cache.h>
18 #include "net_driver.h"
19 #include "efx.h"
20 #include "io.h"
21 #include "nic.h"
22 #include "tx.h"
23 #include "tx_common.h"
24 #include "workarounds.h"
25 #include "ef10_regs.h"
26 
27 #ifdef EFX_USE_PIO
28 
29 #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
30 unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
31 
32 #endif /* EFX_USE_PIO */
33 
34 static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
35 					 struct efx_tx_buffer *buffer)
36 {
37 	unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
38 	struct efx_buffer *page_buf =
39 		&tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
40 	unsigned int offset =
41 		((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
42 
43 	if (unlikely(!page_buf->addr) &&
44 	    efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
45 				 GFP_ATOMIC))
46 		return NULL;
47 	buffer->dma_addr = page_buf->dma_addr + offset;
48 	buffer->unmap_len = 0;
49 	return (u8 *)page_buf->addr + offset;
50 }
51 
52 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
53 				   struct efx_tx_buffer *buffer, size_t len)
54 {
55 	if (len > EFX_TX_CB_SIZE)
56 		return NULL;
57 	return efx_tx_get_copy_buffer(tx_queue, buffer);
58 }
59 
60 static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
61 {
62 	/* We need to consider both queues that the net core sees as one */
63 	struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
64 	struct efx_nic *efx = txq1->efx;
65 	unsigned int fill_level;
66 
67 	fill_level = max(txq1->insert_count - txq1->old_read_count,
68 			 txq2->insert_count - txq2->old_read_count);
69 	if (likely(fill_level < efx->txq_stop_thresh))
70 		return;
71 
72 	/* We used the stale old_read_count above, which gives us a
73 	 * pessimistic estimate of the fill level (which may even
74 	 * validly be >= efx->txq_entries).  Now try again using
75 	 * read_count (more likely to be a cache miss).
76 	 *
77 	 * If we read read_count and then conditionally stop the
78 	 * queue, it is possible for the completion path to race with
79 	 * us and complete all outstanding descriptors in the middle,
80 	 * after which there will be no more completions to wake it.
81 	 * Therefore we stop the queue first, then read read_count
82 	 * (with a memory barrier to ensure the ordering), then
83 	 * restart the queue if the fill level turns out to be low
84 	 * enough.
85 	 */
86 	netif_tx_stop_queue(txq1->core_txq);
87 	smp_mb();
88 	txq1->old_read_count = READ_ONCE(txq1->read_count);
89 	txq2->old_read_count = READ_ONCE(txq2->read_count);
90 
91 	fill_level = max(txq1->insert_count - txq1->old_read_count,
92 			 txq2->insert_count - txq2->old_read_count);
93 	EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
94 	if (likely(fill_level < efx->txq_stop_thresh)) {
95 		smp_mb();
96 		if (likely(!efx->loopback_selftest))
97 			netif_tx_start_queue(txq1->core_txq);
98 	}
99 }
100 
101 static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
102 				struct sk_buff *skb)
103 {
104 	unsigned int copy_len = skb->len;
105 	struct efx_tx_buffer *buffer;
106 	u8 *copy_buffer;
107 	int rc;
108 
109 	EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
110 
111 	buffer = efx_tx_queue_get_insert_buffer(tx_queue);
112 
113 	copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
114 	if (unlikely(!copy_buffer))
115 		return -ENOMEM;
116 
117 	rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
118 	EFX_WARN_ON_PARANOID(rc);
119 	buffer->len = copy_len;
120 
121 	buffer->skb = skb;
122 	buffer->flags = EFX_TX_BUF_SKB;
123 
124 	++tx_queue->insert_count;
125 	return rc;
126 }
127 
128 #ifdef EFX_USE_PIO
129 
130 struct efx_short_copy_buffer {
131 	int used;
132 	u8 buf[L1_CACHE_BYTES];
133 };
134 
135 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
136  * Advances piobuf pointer. Leaves additional data in the copy buffer.
137  */
138 static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
139 				    u8 *data, int len,
140 				    struct efx_short_copy_buffer *copy_buf)
141 {
142 	int block_len = len & ~(sizeof(copy_buf->buf) - 1);
143 
144 	__iowrite64_copy(*piobuf, data, block_len >> 3);
145 	*piobuf += block_len;
146 	len -= block_len;
147 
148 	if (len) {
149 		data += block_len;
150 		BUG_ON(copy_buf->used);
151 		BUG_ON(len > sizeof(copy_buf->buf));
152 		memcpy(copy_buf->buf, data, len);
153 		copy_buf->used = len;
154 	}
155 }
156 
157 /* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
158  * Advances piobuf pointer. Leaves additional data in the copy buffer.
159  */
160 static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
161 				       u8 *data, int len,
162 				       struct efx_short_copy_buffer *copy_buf)
163 {
164 	if (copy_buf->used) {
165 		/* if the copy buffer is partially full, fill it up and write */
166 		int copy_to_buf =
167 			min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
168 
169 		memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
170 		copy_buf->used += copy_to_buf;
171 
172 		/* if we didn't fill it up then we're done for now */
173 		if (copy_buf->used < sizeof(copy_buf->buf))
174 			return;
175 
176 		__iowrite64_copy(*piobuf, copy_buf->buf,
177 				 sizeof(copy_buf->buf) >> 3);
178 		*piobuf += sizeof(copy_buf->buf);
179 		data += copy_to_buf;
180 		len -= copy_to_buf;
181 		copy_buf->used = 0;
182 	}
183 
184 	efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
185 }
186 
187 static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
188 				  struct efx_short_copy_buffer *copy_buf)
189 {
190 	/* if there's anything in it, write the whole buffer, including junk */
191 	if (copy_buf->used)
192 		__iowrite64_copy(piobuf, copy_buf->buf,
193 				 sizeof(copy_buf->buf) >> 3);
194 }
195 
196 /* Traverse skb structure and copy fragments in to PIO buffer.
197  * Advances piobuf pointer.
198  */
199 static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
200 				     u8 __iomem **piobuf,
201 				     struct efx_short_copy_buffer *copy_buf)
202 {
203 	int i;
204 
205 	efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
206 				copy_buf);
207 
208 	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
209 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
210 		u8 *vaddr;
211 
212 		vaddr = kmap_atomic(skb_frag_page(f));
213 
214 		efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
215 					   skb_frag_size(f), copy_buf);
216 		kunmap_atomic(vaddr);
217 	}
218 
219 	EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
220 }
221 
222 static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
223 			       struct sk_buff *skb)
224 {
225 	struct efx_tx_buffer *buffer =
226 		efx_tx_queue_get_insert_buffer(tx_queue);
227 	u8 __iomem *piobuf = tx_queue->piobuf;
228 
229 	/* Copy to PIO buffer. Ensure the writes are padded to the end
230 	 * of a cache line, as this is required for write-combining to be
231 	 * effective on at least x86.
232 	 */
233 
234 	if (skb_shinfo(skb)->nr_frags) {
235 		/* The size of the copy buffer will ensure all writes
236 		 * are the size of a cache line.
237 		 */
238 		struct efx_short_copy_buffer copy_buf;
239 
240 		copy_buf.used = 0;
241 
242 		efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
243 					 &piobuf, &copy_buf);
244 		efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
245 	} else {
246 		/* Pad the write to the size of a cache line.
247 		 * We can do this because we know the skb_shared_info struct is
248 		 * after the source, and the destination buffer is big enough.
249 		 */
250 		BUILD_BUG_ON(L1_CACHE_BYTES >
251 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
252 		__iowrite64_copy(tx_queue->piobuf, skb->data,
253 				 ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
254 	}
255 
256 	buffer->skb = skb;
257 	buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION;
258 
259 	EFX_POPULATE_QWORD_5(buffer->option,
260 			     ESF_DZ_TX_DESC_IS_OPT, 1,
261 			     ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
262 			     ESF_DZ_TX_PIO_CONT, 0,
263 			     ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
264 			     ESF_DZ_TX_PIO_BUF_ADDR,
265 			     tx_queue->piobuf_offset);
266 	++tx_queue->insert_count;
267 	return 0;
268 }
269 #endif /* EFX_USE_PIO */
270 
271 /*
272  * Fallback to software TSO.
273  *
274  * This is used if we are unable to send a GSO packet through hardware TSO.
275  * This should only ever happen due to per-queue restrictions - unsupported
276  * packets should first be filtered by the feature flags.
277  *
278  * Returns 0 on success, error code otherwise.
279  */
280 static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
281 			       struct sk_buff *skb)
282 {
283 	struct sk_buff *segments, *next;
284 
285 	segments = skb_gso_segment(skb, 0);
286 	if (IS_ERR(segments))
287 		return PTR_ERR(segments);
288 
289 	dev_consume_skb_any(skb);
290 	skb = segments;
291 
292 	skb_list_walk_safe(skb, skb, next) {
293 		skb_mark_not_on_list(skb);
294 		efx_enqueue_skb(tx_queue, skb);
295 	}
296 
297 	return 0;
298 }
299 
300 /*
301  * Add a socket buffer to a TX queue
302  *
303  * This maps all fragments of a socket buffer for DMA and adds them to
304  * the TX queue.  The queue's insert pointer will be incremented by
305  * the number of fragments in the socket buffer.
306  *
307  * If any DMA mapping fails, any mapped fragments will be unmapped,
308  * the queue's insert pointer will be restored to its original value.
309  *
310  * This function is split out from efx_hard_start_xmit to allow the
311  * loopback test to direct packets via specific TX queues.
312  *
313  * Returns NETDEV_TX_OK.
314  * You must hold netif_tx_lock() to call this function.
315  */
316 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
317 {
318 	unsigned int old_insert_count = tx_queue->insert_count;
319 	bool xmit_more = netdev_xmit_more();
320 	bool data_mapped = false;
321 	unsigned int segments;
322 	unsigned int skb_len;
323 	int rc;
324 
325 	skb_len = skb->len;
326 	segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
327 	if (segments == 1)
328 		segments = 0; /* Don't use TSO for a single segment. */
329 
330 	/* Handle TSO first - it's *possible* (although unlikely) that we might
331 	 * be passed a packet to segment that's smaller than the copybreak/PIO
332 	 * size limit.
333 	 */
334 	if (segments) {
335 		EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso);
336 		rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped);
337 		if (rc == -EINVAL) {
338 			rc = efx_tx_tso_fallback(tx_queue, skb);
339 			tx_queue->tso_fallbacks++;
340 			if (rc == 0)
341 				return 0;
342 		}
343 		if (rc)
344 			goto err;
345 #ifdef EFX_USE_PIO
346 	} else if (skb_len <= efx_piobuf_size && !xmit_more &&
347 		   efx_nic_may_tx_pio(tx_queue)) {
348 		/* Use PIO for short packets with an empty queue. */
349 		if (efx_enqueue_skb_pio(tx_queue, skb))
350 			goto err;
351 		tx_queue->pio_packets++;
352 		data_mapped = true;
353 #endif
354 	} else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
355 		/* Pad short packets or coalesce short fragmented packets. */
356 		if (efx_enqueue_skb_copy(tx_queue, skb))
357 			goto err;
358 		tx_queue->cb_packets++;
359 		data_mapped = true;
360 	}
361 
362 	/* Map for DMA and create descriptors if we haven't done so already. */
363 	if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
364 		goto err;
365 
366 	efx_tx_maybe_stop_queue(tx_queue);
367 
368 	/* Pass off to hardware */
369 	if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
370 		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
371 
372 		/* There could be packets left on the partner queue if
373 		 * xmit_more was set. If we do not push those they
374 		 * could be left for a long time and cause a netdev watchdog.
375 		 */
376 		if (txq2->xmit_more_available)
377 			efx_nic_push_buffers(txq2);
378 
379 		efx_nic_push_buffers(tx_queue);
380 	} else {
381 		tx_queue->xmit_more_available = xmit_more;
382 	}
383 
384 	if (segments) {
385 		tx_queue->tso_bursts++;
386 		tx_queue->tso_packets += segments;
387 		tx_queue->tx_packets  += segments;
388 	} else {
389 		tx_queue->tx_packets++;
390 	}
391 
392 	return NETDEV_TX_OK;
393 
394 
395 err:
396 	efx_enqueue_unwind(tx_queue, old_insert_count);
397 	dev_kfree_skb_any(skb);
398 
399 	/* If we're not expecting another transmit and we had something to push
400 	 * on this queue or a partner queue then we need to push here to get the
401 	 * previous packets out.
402 	 */
403 	if (!xmit_more) {
404 		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
405 
406 		if (txq2->xmit_more_available)
407 			efx_nic_push_buffers(txq2);
408 
409 		efx_nic_push_buffers(tx_queue);
410 	}
411 
412 	return NETDEV_TX_OK;
413 }
414 
415 static void efx_xdp_return_frames(int n,  struct xdp_frame **xdpfs)
416 {
417 	int i;
418 
419 	for (i = 0; i < n; i++)
420 		xdp_return_frame_rx_napi(xdpfs[i]);
421 }
422 
423 /* Transmit a packet from an XDP buffer
424  *
425  * Returns number of packets sent on success, error code otherwise.
426  * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
427  * (for XDP redirect).
428  */
429 int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
430 		       bool flush)
431 {
432 	struct efx_tx_buffer *tx_buffer;
433 	struct efx_tx_queue *tx_queue;
434 	struct xdp_frame *xdpf;
435 	dma_addr_t dma_addr;
436 	unsigned int len;
437 	int space;
438 	int cpu;
439 	int i;
440 
441 	cpu = raw_smp_processor_id();
442 
443 	if (!efx->xdp_tx_queue_count ||
444 	    unlikely(cpu >= efx->xdp_tx_queue_count))
445 		return -EINVAL;
446 
447 	tx_queue = efx->xdp_tx_queues[cpu];
448 	if (unlikely(!tx_queue))
449 		return -EINVAL;
450 
451 	if (unlikely(n && !xdpfs))
452 		return -EINVAL;
453 
454 	if (!n)
455 		return 0;
456 
457 	/* Check for available space. We should never need multiple
458 	 * descriptors per frame.
459 	 */
460 	space = efx->txq_entries +
461 		tx_queue->read_count - tx_queue->insert_count;
462 
463 	for (i = 0; i < n; i++) {
464 		xdpf = xdpfs[i];
465 
466 		if (i >= space)
467 			break;
468 
469 		/* We'll want a descriptor for this tx. */
470 		prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
471 
472 		len = xdpf->len;
473 
474 		/* Map for DMA. */
475 		dma_addr = dma_map_single(&efx->pci_dev->dev,
476 					  xdpf->data, len,
477 					  DMA_TO_DEVICE);
478 		if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
479 			break;
480 
481 		/*  Create descriptor and set up for unmapping DMA. */
482 		tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
483 		tx_buffer->xdpf = xdpf;
484 		tx_buffer->flags = EFX_TX_BUF_XDP |
485 				   EFX_TX_BUF_MAP_SINGLE;
486 		tx_buffer->dma_offset = 0;
487 		tx_buffer->unmap_len = len;
488 		tx_queue->tx_packets++;
489 	}
490 
491 	/* Pass mapped frames to hardware. */
492 	if (flush && i > 0)
493 		efx_nic_push_buffers(tx_queue);
494 
495 	if (i == 0)
496 		return -EIO;
497 
498 	efx_xdp_return_frames(n - i, xdpfs + i);
499 
500 	return i;
501 }
502 
503 /* Initiate a packet transmission.  We use one channel per CPU
504  * (sharing when we have more CPUs than channels).  On Falcon, the TX
505  * completion events will be directed back to the CPU that transmitted
506  * the packet, which should be cache-efficient.
507  *
508  * Context: non-blocking.
509  * Note that returning anything other than NETDEV_TX_OK will cause the
510  * OS to free the skb.
511  */
512 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
513 				struct net_device *net_dev)
514 {
515 	struct efx_nic *efx = netdev_priv(net_dev);
516 	struct efx_tx_queue *tx_queue;
517 	unsigned index, type;
518 
519 	EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
520 
521 	/* PTP "event" packet */
522 	if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
523 	    unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
524 		return efx_ptp_tx(efx, skb);
525 	}
526 
527 	index = skb_get_queue_mapping(skb);
528 	type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
529 	if (index >= efx->n_tx_channels) {
530 		index -= efx->n_tx_channels;
531 		type |= EFX_TXQ_TYPE_HIGHPRI;
532 	}
533 	tx_queue = efx_get_tx_queue(efx, index, type);
534 
535 	return efx_enqueue_skb(tx_queue, skb);
536 }
537 
538 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
539 {
540 	struct efx_nic *efx = tx_queue->efx;
541 
542 	/* Must be inverse of queue lookup in efx_hard_start_xmit() */
543 	tx_queue->core_txq =
544 		netdev_get_tx_queue(efx->net_dev,
545 				    tx_queue->queue / EFX_TXQ_TYPES +
546 				    ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
547 				     efx->n_tx_channels : 0));
548 }
549 
550 int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
551 		 void *type_data)
552 {
553 	struct efx_nic *efx = netdev_priv(net_dev);
554 	struct tc_mqprio_qopt *mqprio = type_data;
555 	struct efx_channel *channel;
556 	struct efx_tx_queue *tx_queue;
557 	unsigned tc, num_tc;
558 	int rc;
559 
560 	if (type != TC_SETUP_QDISC_MQPRIO)
561 		return -EOPNOTSUPP;
562 
563 	num_tc = mqprio->num_tc;
564 
565 	if (num_tc > EFX_MAX_TX_TC)
566 		return -EINVAL;
567 
568 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
569 
570 	if (num_tc == net_dev->num_tc)
571 		return 0;
572 
573 	for (tc = 0; tc < num_tc; tc++) {
574 		net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
575 		net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
576 	}
577 
578 	if (num_tc > net_dev->num_tc) {
579 		/* Initialise high-priority queues as necessary */
580 		efx_for_each_channel(channel, efx) {
581 			efx_for_each_possible_channel_tx_queue(tx_queue,
582 							       channel) {
583 				if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
584 					continue;
585 				if (!tx_queue->buffer) {
586 					rc = efx_probe_tx_queue(tx_queue);
587 					if (rc)
588 						return rc;
589 				}
590 				if (!tx_queue->initialised)
591 					efx_init_tx_queue(tx_queue);
592 				efx_init_tx_queue_core_txq(tx_queue);
593 			}
594 		}
595 	} else {
596 		/* Reduce number of classes before number of queues */
597 		net_dev->num_tc = num_tc;
598 	}
599 
600 	rc = netif_set_real_num_tx_queues(net_dev,
601 					  max_t(int, num_tc, 1) *
602 					  efx->n_tx_channels);
603 	if (rc)
604 		return rc;
605 
606 	/* Do not destroy high-priority queues when they become
607 	 * unused.  We would have to flush them first, and it is
608 	 * fairly difficult to flush a subset of TX queues.  Leave
609 	 * it to efx_fini_channels().
610 	 */
611 
612 	net_dev->num_tc = num_tc;
613 	return 0;
614 }
615