1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2005-2006 Fen Systems Ltd. 5 * Copyright 2005-2013 Solarflare Communications Inc. 6 */ 7 8 #include <linux/pci.h> 9 #include <linux/tcp.h> 10 #include <linux/ip.h> 11 #include <linux/in.h> 12 #include <linux/ipv6.h> 13 #include <linux/slab.h> 14 #include <net/ipv6.h> 15 #include <linux/if_ether.h> 16 #include <linux/highmem.h> 17 #include <linux/cache.h> 18 #include "net_driver.h" 19 #include "efx.h" 20 #include "io.h" 21 #include "nic.h" 22 #include "tx.h" 23 #include "tx_common.h" 24 #include "workarounds.h" 25 #include "ef10_regs.h" 26 27 #ifdef EFX_USE_PIO 28 29 #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) 30 unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; 31 32 #endif /* EFX_USE_PIO */ 33 34 static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, 35 struct efx_tx_buffer *buffer) 36 { 37 unsigned int index = efx_tx_queue_get_insert_index(tx_queue); 38 struct efx_buffer *page_buf = 39 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)]; 40 unsigned int offset = 41 ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1); 42 43 if (unlikely(!page_buf->addr) && 44 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, 45 GFP_ATOMIC)) 46 return NULL; 47 buffer->dma_addr = page_buf->dma_addr + offset; 48 buffer->unmap_len = 0; 49 return (u8 *)page_buf->addr + offset; 50 } 51 52 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, 53 struct efx_tx_buffer *buffer, size_t len) 54 { 55 if (len > EFX_TX_CB_SIZE) 56 return NULL; 57 return efx_tx_get_copy_buffer(tx_queue, buffer); 58 } 59 60 static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) 61 { 62 /* We need to consider both queues that the net core sees as one */ 63 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1); 64 struct efx_nic *efx = txq1->efx; 65 unsigned int fill_level; 66 67 fill_level = max(txq1->insert_count - txq1->old_read_count, 68 txq2->insert_count - txq2->old_read_count); 69 if (likely(fill_level < efx->txq_stop_thresh)) 70 return; 71 72 /* We used the stale old_read_count above, which gives us a 73 * pessimistic estimate of the fill level (which may even 74 * validly be >= efx->txq_entries). Now try again using 75 * read_count (more likely to be a cache miss). 76 * 77 * If we read read_count and then conditionally stop the 78 * queue, it is possible for the completion path to race with 79 * us and complete all outstanding descriptors in the middle, 80 * after which there will be no more completions to wake it. 81 * Therefore we stop the queue first, then read read_count 82 * (with a memory barrier to ensure the ordering), then 83 * restart the queue if the fill level turns out to be low 84 * enough. 85 */ 86 netif_tx_stop_queue(txq1->core_txq); 87 smp_mb(); 88 txq1->old_read_count = READ_ONCE(txq1->read_count); 89 txq2->old_read_count = READ_ONCE(txq2->read_count); 90 91 fill_level = max(txq1->insert_count - txq1->old_read_count, 92 txq2->insert_count - txq2->old_read_count); 93 EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries); 94 if (likely(fill_level < efx->txq_stop_thresh)) { 95 smp_mb(); 96 if (likely(!efx->loopback_selftest)) 97 netif_tx_start_queue(txq1->core_txq); 98 } 99 } 100 101 static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, 102 struct sk_buff *skb) 103 { 104 unsigned int copy_len = skb->len; 105 struct efx_tx_buffer *buffer; 106 u8 *copy_buffer; 107 int rc; 108 109 EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE); 110 111 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 112 113 copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer); 114 if (unlikely(!copy_buffer)) 115 return -ENOMEM; 116 117 rc = skb_copy_bits(skb, 0, copy_buffer, copy_len); 118 EFX_WARN_ON_PARANOID(rc); 119 buffer->len = copy_len; 120 121 buffer->skb = skb; 122 buffer->flags = EFX_TX_BUF_SKB; 123 124 ++tx_queue->insert_count; 125 return rc; 126 } 127 128 #ifdef EFX_USE_PIO 129 130 struct efx_short_copy_buffer { 131 int used; 132 u8 buf[L1_CACHE_BYTES]; 133 }; 134 135 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. 136 * Advances piobuf pointer. Leaves additional data in the copy buffer. 137 */ 138 static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf, 139 u8 *data, int len, 140 struct efx_short_copy_buffer *copy_buf) 141 { 142 int block_len = len & ~(sizeof(copy_buf->buf) - 1); 143 144 __iowrite64_copy(*piobuf, data, block_len >> 3); 145 *piobuf += block_len; 146 len -= block_len; 147 148 if (len) { 149 data += block_len; 150 BUG_ON(copy_buf->used); 151 BUG_ON(len > sizeof(copy_buf->buf)); 152 memcpy(copy_buf->buf, data, len); 153 copy_buf->used = len; 154 } 155 } 156 157 /* Copy to PIO, respecting dword alignment, popping data from copy buffer first. 158 * Advances piobuf pointer. Leaves additional data in the copy buffer. 159 */ 160 static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf, 161 u8 *data, int len, 162 struct efx_short_copy_buffer *copy_buf) 163 { 164 if (copy_buf->used) { 165 /* if the copy buffer is partially full, fill it up and write */ 166 int copy_to_buf = 167 min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len); 168 169 memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf); 170 copy_buf->used += copy_to_buf; 171 172 /* if we didn't fill it up then we're done for now */ 173 if (copy_buf->used < sizeof(copy_buf->buf)) 174 return; 175 176 __iowrite64_copy(*piobuf, copy_buf->buf, 177 sizeof(copy_buf->buf) >> 3); 178 *piobuf += sizeof(copy_buf->buf); 179 data += copy_to_buf; 180 len -= copy_to_buf; 181 copy_buf->used = 0; 182 } 183 184 efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf); 185 } 186 187 static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf, 188 struct efx_short_copy_buffer *copy_buf) 189 { 190 /* if there's anything in it, write the whole buffer, including junk */ 191 if (copy_buf->used) 192 __iowrite64_copy(piobuf, copy_buf->buf, 193 sizeof(copy_buf->buf) >> 3); 194 } 195 196 /* Traverse skb structure and copy fragments in to PIO buffer. 197 * Advances piobuf pointer. 198 */ 199 static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, 200 u8 __iomem **piobuf, 201 struct efx_short_copy_buffer *copy_buf) 202 { 203 int i; 204 205 efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb), 206 copy_buf); 207 208 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 209 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 210 u8 *vaddr; 211 212 vaddr = kmap_atomic(skb_frag_page(f)); 213 214 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f), 215 skb_frag_size(f), copy_buf); 216 kunmap_atomic(vaddr); 217 } 218 219 EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list); 220 } 221 222 static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, 223 struct sk_buff *skb) 224 { 225 struct efx_tx_buffer *buffer = 226 efx_tx_queue_get_insert_buffer(tx_queue); 227 u8 __iomem *piobuf = tx_queue->piobuf; 228 229 /* Copy to PIO buffer. Ensure the writes are padded to the end 230 * of a cache line, as this is required for write-combining to be 231 * effective on at least x86. 232 */ 233 234 if (skb_shinfo(skb)->nr_frags) { 235 /* The size of the copy buffer will ensure all writes 236 * are the size of a cache line. 237 */ 238 struct efx_short_copy_buffer copy_buf; 239 240 copy_buf.used = 0; 241 242 efx_skb_copy_bits_to_pio(tx_queue->efx, skb, 243 &piobuf, ©_buf); 244 efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); 245 } else { 246 /* Pad the write to the size of a cache line. 247 * We can do this because we know the skb_shared_info struct is 248 * after the source, and the destination buffer is big enough. 249 */ 250 BUILD_BUG_ON(L1_CACHE_BYTES > 251 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 252 __iowrite64_copy(tx_queue->piobuf, skb->data, 253 ALIGN(skb->len, L1_CACHE_BYTES) >> 3); 254 } 255 256 buffer->skb = skb; 257 buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION; 258 259 EFX_POPULATE_QWORD_5(buffer->option, 260 ESF_DZ_TX_DESC_IS_OPT, 1, 261 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO, 262 ESF_DZ_TX_PIO_CONT, 0, 263 ESF_DZ_TX_PIO_BYTE_CNT, skb->len, 264 ESF_DZ_TX_PIO_BUF_ADDR, 265 tx_queue->piobuf_offset); 266 ++tx_queue->insert_count; 267 return 0; 268 } 269 #endif /* EFX_USE_PIO */ 270 271 /* 272 * Add a socket buffer to a TX queue 273 * 274 * This maps all fragments of a socket buffer for DMA and adds them to 275 * the TX queue. The queue's insert pointer will be incremented by 276 * the number of fragments in the socket buffer. 277 * 278 * If any DMA mapping fails, any mapped fragments will be unmapped, 279 * the queue's insert pointer will be restored to its original value. 280 * 281 * This function is split out from efx_hard_start_xmit to allow the 282 * loopback test to direct packets via specific TX queues. 283 * 284 * Returns NETDEV_TX_OK. 285 * You must hold netif_tx_lock() to call this function. 286 */ 287 netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 288 { 289 unsigned int old_insert_count = tx_queue->insert_count; 290 bool xmit_more = netdev_xmit_more(); 291 bool data_mapped = false; 292 unsigned int segments; 293 unsigned int skb_len; 294 int rc; 295 296 skb_len = skb->len; 297 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; 298 if (segments == 1) 299 segments = 0; /* Don't use TSO for a single segment. */ 300 301 /* Handle TSO first - it's *possible* (although unlikely) that we might 302 * be passed a packet to segment that's smaller than the copybreak/PIO 303 * size limit. 304 */ 305 if (segments) { 306 EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso); 307 rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped); 308 if (rc == -EINVAL) { 309 rc = efx_tx_tso_fallback(tx_queue, skb); 310 tx_queue->tso_fallbacks++; 311 if (rc == 0) 312 return 0; 313 } 314 if (rc) 315 goto err; 316 #ifdef EFX_USE_PIO 317 } else if (skb_len <= efx_piobuf_size && !xmit_more && 318 efx_nic_may_tx_pio(tx_queue)) { 319 /* Use PIO for short packets with an empty queue. */ 320 if (efx_enqueue_skb_pio(tx_queue, skb)) 321 goto err; 322 tx_queue->pio_packets++; 323 data_mapped = true; 324 #endif 325 } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) { 326 /* Pad short packets or coalesce short fragmented packets. */ 327 if (efx_enqueue_skb_copy(tx_queue, skb)) 328 goto err; 329 tx_queue->cb_packets++; 330 data_mapped = true; 331 } 332 333 /* Map for DMA and create descriptors if we haven't done so already. */ 334 if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments))) 335 goto err; 336 337 efx_tx_maybe_stop_queue(tx_queue); 338 339 /* Pass off to hardware */ 340 if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) { 341 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); 342 343 /* There could be packets left on the partner queue if 344 * xmit_more was set. If we do not push those they 345 * could be left for a long time and cause a netdev watchdog. 346 */ 347 if (txq2->xmit_more_available) 348 efx_nic_push_buffers(txq2); 349 350 efx_nic_push_buffers(tx_queue); 351 } else { 352 tx_queue->xmit_more_available = xmit_more; 353 } 354 355 if (segments) { 356 tx_queue->tso_bursts++; 357 tx_queue->tso_packets += segments; 358 tx_queue->tx_packets += segments; 359 } else { 360 tx_queue->tx_packets++; 361 } 362 363 return NETDEV_TX_OK; 364 365 366 err: 367 efx_enqueue_unwind(tx_queue, old_insert_count); 368 dev_kfree_skb_any(skb); 369 370 /* If we're not expecting another transmit and we had something to push 371 * on this queue or a partner queue then we need to push here to get the 372 * previous packets out. 373 */ 374 if (!xmit_more) { 375 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); 376 377 if (txq2->xmit_more_available) 378 efx_nic_push_buffers(txq2); 379 380 efx_nic_push_buffers(tx_queue); 381 } 382 383 return NETDEV_TX_OK; 384 } 385 386 static void efx_xdp_return_frames(int n, struct xdp_frame **xdpfs) 387 { 388 int i; 389 390 for (i = 0; i < n; i++) 391 xdp_return_frame_rx_napi(xdpfs[i]); 392 } 393 394 /* Transmit a packet from an XDP buffer 395 * 396 * Returns number of packets sent on success, error code otherwise. 397 * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC 398 * (for XDP redirect). 399 */ 400 int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, 401 bool flush) 402 { 403 struct efx_tx_buffer *tx_buffer; 404 struct efx_tx_queue *tx_queue; 405 struct xdp_frame *xdpf; 406 dma_addr_t dma_addr; 407 unsigned int len; 408 int space; 409 int cpu; 410 int i; 411 412 cpu = raw_smp_processor_id(); 413 414 if (!efx->xdp_tx_queue_count || 415 unlikely(cpu >= efx->xdp_tx_queue_count)) 416 return -EINVAL; 417 418 tx_queue = efx->xdp_tx_queues[cpu]; 419 if (unlikely(!tx_queue)) 420 return -EINVAL; 421 422 if (unlikely(n && !xdpfs)) 423 return -EINVAL; 424 425 if (!n) 426 return 0; 427 428 /* Check for available space. We should never need multiple 429 * descriptors per frame. 430 */ 431 space = efx->txq_entries + 432 tx_queue->read_count - tx_queue->insert_count; 433 434 for (i = 0; i < n; i++) { 435 xdpf = xdpfs[i]; 436 437 if (i >= space) 438 break; 439 440 /* We'll want a descriptor for this tx. */ 441 prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue)); 442 443 len = xdpf->len; 444 445 /* Map for DMA. */ 446 dma_addr = dma_map_single(&efx->pci_dev->dev, 447 xdpf->data, len, 448 DMA_TO_DEVICE); 449 if (dma_mapping_error(&efx->pci_dev->dev, dma_addr)) 450 break; 451 452 /* Create descriptor and set up for unmapping DMA. */ 453 tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); 454 tx_buffer->xdpf = xdpf; 455 tx_buffer->flags = EFX_TX_BUF_XDP | 456 EFX_TX_BUF_MAP_SINGLE; 457 tx_buffer->dma_offset = 0; 458 tx_buffer->unmap_len = len; 459 tx_queue->tx_packets++; 460 } 461 462 /* Pass mapped frames to hardware. */ 463 if (flush && i > 0) 464 efx_nic_push_buffers(tx_queue); 465 466 if (i == 0) 467 return -EIO; 468 469 efx_xdp_return_frames(n - i, xdpfs + i); 470 471 return i; 472 } 473 474 /* Initiate a packet transmission. We use one channel per CPU 475 * (sharing when we have more CPUs than channels). On Falcon, the TX 476 * completion events will be directed back to the CPU that transmitted 477 * the packet, which should be cache-efficient. 478 * 479 * Context: non-blocking. 480 * Note that returning anything other than NETDEV_TX_OK will cause the 481 * OS to free the skb. 482 */ 483 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, 484 struct net_device *net_dev) 485 { 486 struct efx_nic *efx = netdev_priv(net_dev); 487 struct efx_tx_queue *tx_queue; 488 unsigned index, type; 489 490 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); 491 492 /* PTP "event" packet */ 493 if (unlikely(efx_xmit_with_hwtstamp(skb)) && 494 unlikely(efx_ptp_is_ptp_tx(efx, skb))) { 495 return efx_ptp_tx(efx, skb); 496 } 497 498 index = skb_get_queue_mapping(skb); 499 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; 500 if (index >= efx->n_tx_channels) { 501 index -= efx->n_tx_channels; 502 type |= EFX_TXQ_TYPE_HIGHPRI; 503 } 504 tx_queue = efx_get_tx_queue(efx, index, type); 505 506 return __efx_enqueue_skb(tx_queue, skb); 507 } 508 509 void efx_xmit_done_single(struct efx_tx_queue *tx_queue) 510 { 511 unsigned int pkts_compl = 0, bytes_compl = 0; 512 unsigned int read_ptr; 513 bool finished = false; 514 515 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 516 517 while (!finished) { 518 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 519 520 if (!efx_tx_buffer_in_use(buffer)) { 521 struct efx_nic *efx = tx_queue->efx; 522 523 netif_err(efx, hw, efx->net_dev, 524 "TX queue %d spurious single TX completion\n", 525 tx_queue->queue); 526 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); 527 return; 528 } 529 530 /* Need to check the flag before dequeueing. */ 531 if (buffer->flags & EFX_TX_BUF_SKB) 532 finished = true; 533 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 534 535 ++tx_queue->read_count; 536 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 537 } 538 539 tx_queue->pkts_compl += pkts_compl; 540 tx_queue->bytes_compl += bytes_compl; 541 542 EFX_WARN_ON_PARANOID(pkts_compl != 1); 543 544 efx_xmit_done_check_empty(tx_queue); 545 } 546 547 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) 548 { 549 struct efx_nic *efx = tx_queue->efx; 550 551 /* Must be inverse of queue lookup in efx_hard_start_xmit() */ 552 tx_queue->core_txq = 553 netdev_get_tx_queue(efx->net_dev, 554 tx_queue->channel->channel + 555 ((tx_queue->label & EFX_TXQ_TYPE_HIGHPRI) ? 556 efx->n_tx_channels : 0)); 557 } 558 559 int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, 560 void *type_data) 561 { 562 struct efx_nic *efx = netdev_priv(net_dev); 563 struct tc_mqprio_qopt *mqprio = type_data; 564 unsigned tc, num_tc; 565 566 if (type != TC_SETUP_QDISC_MQPRIO) 567 return -EOPNOTSUPP; 568 569 /* Only Siena supported highpri queues */ 570 if (efx_nic_rev(efx) > EFX_REV_SIENA_A0) 571 return -EOPNOTSUPP; 572 573 num_tc = mqprio->num_tc; 574 575 if (num_tc > EFX_MAX_TX_TC) 576 return -EINVAL; 577 578 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 579 580 if (num_tc == net_dev->num_tc) 581 return 0; 582 583 for (tc = 0; tc < num_tc; tc++) { 584 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; 585 net_dev->tc_to_txq[tc].count = efx->n_tx_channels; 586 } 587 588 net_dev->num_tc = num_tc; 589 590 return netif_set_real_num_tx_queues(net_dev, 591 max_t(int, num_tc, 1) * 592 efx->n_tx_channels); 593 } 594