1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2013 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/pci.h> 12 #include <linux/tcp.h> 13 #include <linux/ip.h> 14 #include <linux/in.h> 15 #include <linux/ipv6.h> 16 #include <linux/slab.h> 17 #include <net/ipv6.h> 18 #include <linux/if_ether.h> 19 #include <linux/highmem.h> 20 #include <linux/cache.h> 21 #include "net_driver.h" 22 #include "efx.h" 23 #include "io.h" 24 #include "nic.h" 25 #include "workarounds.h" 26 #include "ef10_regs.h" 27 28 #ifdef EFX_USE_PIO 29 30 #define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE 31 #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) 32 unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; 33 34 #endif /* EFX_USE_PIO */ 35 36 static inline unsigned int 37 efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue) 38 { 39 return tx_queue->insert_count & tx_queue->ptr_mask; 40 } 41 42 static inline struct efx_tx_buffer * 43 __efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) 44 { 45 return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)]; 46 } 47 48 static inline struct efx_tx_buffer * 49 efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) 50 { 51 struct efx_tx_buffer *buffer = 52 __efx_tx_queue_get_insert_buffer(tx_queue); 53 54 EFX_BUG_ON_PARANOID(buffer->len); 55 EFX_BUG_ON_PARANOID(buffer->flags); 56 EFX_BUG_ON_PARANOID(buffer->unmap_len); 57 58 return buffer; 59 } 60 61 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 62 struct efx_tx_buffer *buffer, 63 unsigned int *pkts_compl, 64 unsigned int *bytes_compl) 65 { 66 if (buffer->unmap_len) { 67 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 68 dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; 69 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) 70 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, 71 DMA_TO_DEVICE); 72 else 73 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, 74 DMA_TO_DEVICE); 75 buffer->unmap_len = 0; 76 } 77 78 if (buffer->flags & EFX_TX_BUF_SKB) { 79 (*pkts_compl)++; 80 (*bytes_compl) += buffer->skb->len; 81 dev_consume_skb_any((struct sk_buff *)buffer->skb); 82 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, 83 "TX queue %d transmission id %x complete\n", 84 tx_queue->queue, tx_queue->read_count); 85 } else if (buffer->flags & EFX_TX_BUF_HEAP) { 86 kfree(buffer->heap_buf); 87 } 88 89 buffer->len = 0; 90 buffer->flags = 0; 91 } 92 93 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 94 struct sk_buff *skb); 95 96 static inline unsigned 97 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) 98 { 99 /* Depending on the NIC revision, we can use descriptor 100 * lengths up to 8K or 8K-1. However, since PCI Express 101 * devices must split read requests at 4K boundaries, there is 102 * little benefit from using descriptors that cross those 103 * boundaries and we keep things simple by not doing so. 104 */ 105 unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; 106 107 /* Work around hardware bug for unaligned buffers. */ 108 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) 109 len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); 110 111 return len; 112 } 113 114 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) 115 { 116 /* Header and payload descriptor for each output segment, plus 117 * one for every input fragment boundary within a segment 118 */ 119 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; 120 121 /* Possibly one more per segment for the alignment workaround, 122 * or for option descriptors 123 */ 124 if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0) 125 max_descs += EFX_TSO_MAX_SEGS; 126 127 /* Possibly more for PCIe page boundaries within input fragments */ 128 if (PAGE_SIZE > EFX_PAGE_SIZE) 129 max_descs += max_t(unsigned int, MAX_SKB_FRAGS, 130 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); 131 132 return max_descs; 133 } 134 135 static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) 136 { 137 /* We need to consider both queues that the net core sees as one */ 138 struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1); 139 struct efx_nic *efx = txq1->efx; 140 unsigned int fill_level; 141 142 fill_level = max(txq1->insert_count - txq1->old_read_count, 143 txq2->insert_count - txq2->old_read_count); 144 if (likely(fill_level < efx->txq_stop_thresh)) 145 return; 146 147 /* We used the stale old_read_count above, which gives us a 148 * pessimistic estimate of the fill level (which may even 149 * validly be >= efx->txq_entries). Now try again using 150 * read_count (more likely to be a cache miss). 151 * 152 * If we read read_count and then conditionally stop the 153 * queue, it is possible for the completion path to race with 154 * us and complete all outstanding descriptors in the middle, 155 * after which there will be no more completions to wake it. 156 * Therefore we stop the queue first, then read read_count 157 * (with a memory barrier to ensure the ordering), then 158 * restart the queue if the fill level turns out to be low 159 * enough. 160 */ 161 netif_tx_stop_queue(txq1->core_txq); 162 smp_mb(); 163 txq1->old_read_count = ACCESS_ONCE(txq1->read_count); 164 txq2->old_read_count = ACCESS_ONCE(txq2->read_count); 165 166 fill_level = max(txq1->insert_count - txq1->old_read_count, 167 txq2->insert_count - txq2->old_read_count); 168 EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries); 169 if (likely(fill_level < efx->txq_stop_thresh)) { 170 smp_mb(); 171 if (likely(!efx->loopback_selftest)) 172 netif_tx_start_queue(txq1->core_txq); 173 } 174 } 175 176 #ifdef EFX_USE_PIO 177 178 struct efx_short_copy_buffer { 179 int used; 180 u8 buf[L1_CACHE_BYTES]; 181 }; 182 183 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. 184 * Advances piobuf pointer. Leaves additional data in the copy buffer. 185 */ 186 static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf, 187 u8 *data, int len, 188 struct efx_short_copy_buffer *copy_buf) 189 { 190 int block_len = len & ~(sizeof(copy_buf->buf) - 1); 191 192 __iowrite64_copy(*piobuf, data, block_len >> 3); 193 *piobuf += block_len; 194 len -= block_len; 195 196 if (len) { 197 data += block_len; 198 BUG_ON(copy_buf->used); 199 BUG_ON(len > sizeof(copy_buf->buf)); 200 memcpy(copy_buf->buf, data, len); 201 copy_buf->used = len; 202 } 203 } 204 205 /* Copy to PIO, respecting dword alignment, popping data from copy buffer first. 206 * Advances piobuf pointer. Leaves additional data in the copy buffer. 207 */ 208 static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf, 209 u8 *data, int len, 210 struct efx_short_copy_buffer *copy_buf) 211 { 212 if (copy_buf->used) { 213 /* if the copy buffer is partially full, fill it up and write */ 214 int copy_to_buf = 215 min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len); 216 217 memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf); 218 copy_buf->used += copy_to_buf; 219 220 /* if we didn't fill it up then we're done for now */ 221 if (copy_buf->used < sizeof(copy_buf->buf)) 222 return; 223 224 __iowrite64_copy(*piobuf, copy_buf->buf, 225 sizeof(copy_buf->buf) >> 3); 226 *piobuf += sizeof(copy_buf->buf); 227 data += copy_to_buf; 228 len -= copy_to_buf; 229 copy_buf->used = 0; 230 } 231 232 efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf); 233 } 234 235 static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf, 236 struct efx_short_copy_buffer *copy_buf) 237 { 238 /* if there's anything in it, write the whole buffer, including junk */ 239 if (copy_buf->used) 240 __iowrite64_copy(piobuf, copy_buf->buf, 241 sizeof(copy_buf->buf) >> 3); 242 } 243 244 /* Traverse skb structure and copy fragments in to PIO buffer. 245 * Advances piobuf pointer. 246 */ 247 static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, 248 u8 __iomem **piobuf, 249 struct efx_short_copy_buffer *copy_buf) 250 { 251 int i; 252 253 efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb), 254 copy_buf); 255 256 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 257 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 258 u8 *vaddr; 259 260 vaddr = kmap_atomic(skb_frag_page(f)); 261 262 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset, 263 skb_frag_size(f), copy_buf); 264 kunmap_atomic(vaddr); 265 } 266 267 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list); 268 } 269 270 static struct efx_tx_buffer * 271 efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 272 { 273 struct efx_tx_buffer *buffer = 274 efx_tx_queue_get_insert_buffer(tx_queue); 275 u8 __iomem *piobuf = tx_queue->piobuf; 276 277 /* Copy to PIO buffer. Ensure the writes are padded to the end 278 * of a cache line, as this is required for write-combining to be 279 * effective on at least x86. 280 */ 281 282 if (skb_shinfo(skb)->nr_frags) { 283 /* The size of the copy buffer will ensure all writes 284 * are the size of a cache line. 285 */ 286 struct efx_short_copy_buffer copy_buf; 287 288 copy_buf.used = 0; 289 290 efx_skb_copy_bits_to_pio(tx_queue->efx, skb, 291 &piobuf, ©_buf); 292 efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); 293 } else { 294 /* Pad the write to the size of a cache line. 295 * We can do this because we know the skb_shared_info sruct is 296 * after the source, and the destination buffer is big enough. 297 */ 298 BUILD_BUG_ON(L1_CACHE_BYTES > 299 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 300 __iowrite64_copy(tx_queue->piobuf, skb->data, 301 ALIGN(skb->len, L1_CACHE_BYTES) >> 3); 302 } 303 304 EFX_POPULATE_QWORD_5(buffer->option, 305 ESF_DZ_TX_DESC_IS_OPT, 1, 306 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO, 307 ESF_DZ_TX_PIO_CONT, 0, 308 ESF_DZ_TX_PIO_BYTE_CNT, skb->len, 309 ESF_DZ_TX_PIO_BUF_ADDR, 310 tx_queue->piobuf_offset); 311 ++tx_queue->pio_packets; 312 ++tx_queue->insert_count; 313 return buffer; 314 } 315 #endif /* EFX_USE_PIO */ 316 317 /* 318 * Add a socket buffer to a TX queue 319 * 320 * This maps all fragments of a socket buffer for DMA and adds them to 321 * the TX queue. The queue's insert pointer will be incremented by 322 * the number of fragments in the socket buffer. 323 * 324 * If any DMA mapping fails, any mapped fragments will be unmapped, 325 * the queue's insert pointer will be restored to its original value. 326 * 327 * This function is split out from efx_hard_start_xmit to allow the 328 * loopback test to direct packets via specific TX queues. 329 * 330 * Returns NETDEV_TX_OK. 331 * You must hold netif_tx_lock() to call this function. 332 */ 333 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 334 { 335 struct efx_nic *efx = tx_queue->efx; 336 struct device *dma_dev = &efx->pci_dev->dev; 337 struct efx_tx_buffer *buffer; 338 unsigned int old_insert_count = tx_queue->insert_count; 339 skb_frag_t *fragment; 340 unsigned int len, unmap_len = 0; 341 dma_addr_t dma_addr, unmap_addr = 0; 342 unsigned int dma_len; 343 unsigned short dma_flags; 344 int i = 0; 345 346 EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count); 347 348 if (skb_shinfo(skb)->gso_size) 349 return efx_enqueue_skb_tso(tx_queue, skb); 350 351 /* Get size of the initial fragment */ 352 len = skb_headlen(skb); 353 354 /* Pad if necessary */ 355 if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { 356 EFX_BUG_ON_PARANOID(skb->data_len); 357 len = 32 + 1; 358 if (skb_pad(skb, len - skb->len)) 359 return NETDEV_TX_OK; 360 } 361 362 /* Consider using PIO for short packets */ 363 #ifdef EFX_USE_PIO 364 if (skb->len <= efx_piobuf_size && !skb->xmit_more && 365 efx_nic_may_tx_pio(tx_queue)) { 366 buffer = efx_enqueue_skb_pio(tx_queue, skb); 367 dma_flags = EFX_TX_BUF_OPTION; 368 goto finish_packet; 369 } 370 #endif 371 372 /* Map for DMA. Use dma_map_single rather than dma_map_page 373 * since this is more efficient on machines with sparse 374 * memory. 375 */ 376 dma_flags = EFX_TX_BUF_MAP_SINGLE; 377 dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); 378 379 /* Process all fragments */ 380 while (1) { 381 if (unlikely(dma_mapping_error(dma_dev, dma_addr))) 382 goto dma_err; 383 384 /* Store fields for marking in the per-fragment final 385 * descriptor */ 386 unmap_len = len; 387 unmap_addr = dma_addr; 388 389 /* Add to TX queue, splitting across DMA boundaries */ 390 do { 391 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 392 393 dma_len = efx_max_tx_len(efx, dma_addr); 394 if (likely(dma_len >= len)) 395 dma_len = len; 396 397 /* Fill out per descriptor fields */ 398 buffer->len = dma_len; 399 buffer->dma_addr = dma_addr; 400 buffer->flags = EFX_TX_BUF_CONT; 401 len -= dma_len; 402 dma_addr += dma_len; 403 ++tx_queue->insert_count; 404 } while (len); 405 406 /* Transfer ownership of the unmapping to the final buffer */ 407 buffer->flags = EFX_TX_BUF_CONT | dma_flags; 408 buffer->unmap_len = unmap_len; 409 buffer->dma_offset = buffer->dma_addr - unmap_addr; 410 unmap_len = 0; 411 412 /* Get address and size of next fragment */ 413 if (i >= skb_shinfo(skb)->nr_frags) 414 break; 415 fragment = &skb_shinfo(skb)->frags[i]; 416 len = skb_frag_size(fragment); 417 i++; 418 /* Map for DMA */ 419 dma_flags = 0; 420 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, 421 DMA_TO_DEVICE); 422 } 423 424 /* Transfer ownership of the skb to the final buffer */ 425 #ifdef EFX_USE_PIO 426 finish_packet: 427 #endif 428 buffer->skb = skb; 429 buffer->flags = EFX_TX_BUF_SKB | dma_flags; 430 431 netdev_tx_sent_queue(tx_queue->core_txq, skb->len); 432 433 efx_tx_maybe_stop_queue(tx_queue); 434 435 /* Pass off to hardware */ 436 if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) 437 efx_nic_push_buffers(tx_queue); 438 439 tx_queue->tx_packets++; 440 441 return NETDEV_TX_OK; 442 443 dma_err: 444 netif_err(efx, tx_err, efx->net_dev, 445 " TX queue %d could not map skb with %d bytes %d " 446 "fragments for DMA\n", tx_queue->queue, skb->len, 447 skb_shinfo(skb)->nr_frags + 1); 448 449 /* Mark the packet as transmitted, and free the SKB ourselves */ 450 dev_kfree_skb_any(skb); 451 452 /* Work backwards until we hit the original insert pointer value */ 453 while (tx_queue->insert_count != old_insert_count) { 454 unsigned int pkts_compl = 0, bytes_compl = 0; 455 --tx_queue->insert_count; 456 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); 457 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 458 } 459 460 /* Free the fragment we were mid-way through pushing */ 461 if (unmap_len) { 462 if (dma_flags & EFX_TX_BUF_MAP_SINGLE) 463 dma_unmap_single(dma_dev, unmap_addr, unmap_len, 464 DMA_TO_DEVICE); 465 else 466 dma_unmap_page(dma_dev, unmap_addr, unmap_len, 467 DMA_TO_DEVICE); 468 } 469 470 return NETDEV_TX_OK; 471 } 472 473 /* Remove packets from the TX queue 474 * 475 * This removes packets from the TX queue, up to and including the 476 * specified index. 477 */ 478 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, 479 unsigned int index, 480 unsigned int *pkts_compl, 481 unsigned int *bytes_compl) 482 { 483 struct efx_nic *efx = tx_queue->efx; 484 unsigned int stop_index, read_ptr; 485 486 stop_index = (index + 1) & tx_queue->ptr_mask; 487 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 488 489 while (read_ptr != stop_index) { 490 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 491 492 if (!(buffer->flags & EFX_TX_BUF_OPTION) && 493 unlikely(buffer->len == 0)) { 494 netif_err(efx, tx_err, efx->net_dev, 495 "TX queue %d spurious TX completion id %x\n", 496 tx_queue->queue, read_ptr); 497 efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); 498 return; 499 } 500 501 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); 502 503 ++tx_queue->read_count; 504 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 505 } 506 } 507 508 /* Initiate a packet transmission. We use one channel per CPU 509 * (sharing when we have more CPUs than channels). On Falcon, the TX 510 * completion events will be directed back to the CPU that transmitted 511 * the packet, which should be cache-efficient. 512 * 513 * Context: non-blocking. 514 * Note that returning anything other than NETDEV_TX_OK will cause the 515 * OS to free the skb. 516 */ 517 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, 518 struct net_device *net_dev) 519 { 520 struct efx_nic *efx = netdev_priv(net_dev); 521 struct efx_tx_queue *tx_queue; 522 unsigned index, type; 523 524 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); 525 526 /* PTP "event" packet */ 527 if (unlikely(efx_xmit_with_hwtstamp(skb)) && 528 unlikely(efx_ptp_is_ptp_tx(efx, skb))) { 529 return efx_ptp_tx(efx, skb); 530 } 531 532 index = skb_get_queue_mapping(skb); 533 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; 534 if (index >= efx->n_tx_channels) { 535 index -= efx->n_tx_channels; 536 type |= EFX_TXQ_TYPE_HIGHPRI; 537 } 538 tx_queue = efx_get_tx_queue(efx, index, type); 539 540 return efx_enqueue_skb(tx_queue, skb); 541 } 542 543 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) 544 { 545 struct efx_nic *efx = tx_queue->efx; 546 547 /* Must be inverse of queue lookup in efx_hard_start_xmit() */ 548 tx_queue->core_txq = 549 netdev_get_tx_queue(efx->net_dev, 550 tx_queue->queue / EFX_TXQ_TYPES + 551 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 552 efx->n_tx_channels : 0)); 553 } 554 555 int efx_setup_tc(struct net_device *net_dev, u8 num_tc) 556 { 557 struct efx_nic *efx = netdev_priv(net_dev); 558 struct efx_channel *channel; 559 struct efx_tx_queue *tx_queue; 560 unsigned tc; 561 int rc; 562 563 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) 564 return -EINVAL; 565 566 if (num_tc == net_dev->num_tc) 567 return 0; 568 569 for (tc = 0; tc < num_tc; tc++) { 570 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; 571 net_dev->tc_to_txq[tc].count = efx->n_tx_channels; 572 } 573 574 if (num_tc > net_dev->num_tc) { 575 /* Initialise high-priority queues as necessary */ 576 efx_for_each_channel(channel, efx) { 577 efx_for_each_possible_channel_tx_queue(tx_queue, 578 channel) { 579 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) 580 continue; 581 if (!tx_queue->buffer) { 582 rc = efx_probe_tx_queue(tx_queue); 583 if (rc) 584 return rc; 585 } 586 if (!tx_queue->initialised) 587 efx_init_tx_queue(tx_queue); 588 efx_init_tx_queue_core_txq(tx_queue); 589 } 590 } 591 } else { 592 /* Reduce number of classes before number of queues */ 593 net_dev->num_tc = num_tc; 594 } 595 596 rc = netif_set_real_num_tx_queues(net_dev, 597 max_t(int, num_tc, 1) * 598 efx->n_tx_channels); 599 if (rc) 600 return rc; 601 602 /* Do not destroy high-priority queues when they become 603 * unused. We would have to flush them first, and it is 604 * fairly difficult to flush a subset of TX queues. Leave 605 * it to efx_fini_channels(). 606 */ 607 608 net_dev->num_tc = num_tc; 609 return 0; 610 } 611 612 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 613 { 614 unsigned fill_level; 615 struct efx_nic *efx = tx_queue->efx; 616 struct efx_tx_queue *txq2; 617 unsigned int pkts_compl = 0, bytes_compl = 0; 618 619 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); 620 621 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 622 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); 623 624 if (pkts_compl > 1) 625 ++tx_queue->merge_events; 626 627 /* See if we need to restart the netif queue. This memory 628 * barrier ensures that we write read_count (inside 629 * efx_dequeue_buffers()) before reading the queue status. 630 */ 631 smp_mb(); 632 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 633 likely(efx->port_enabled) && 634 likely(netif_device_present(efx->net_dev))) { 635 txq2 = efx_tx_queue_partner(tx_queue); 636 fill_level = max(tx_queue->insert_count - tx_queue->read_count, 637 txq2->insert_count - txq2->read_count); 638 if (fill_level <= efx->txq_wake_thresh) 639 netif_tx_wake_queue(tx_queue->core_txq); 640 } 641 642 /* Check whether the hardware queue is now empty */ 643 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { 644 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); 645 if (tx_queue->read_count == tx_queue->old_write_count) { 646 smp_mb(); 647 tx_queue->empty_read_count = 648 tx_queue->read_count | EFX_EMPTY_COUNT_VALID; 649 } 650 } 651 } 652 653 /* Size of page-based TSO header buffers. Larger blocks must be 654 * allocated from the heap. 655 */ 656 #define TSOH_STD_SIZE 128 657 #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) 658 659 /* At most half the descriptors in the queue at any time will refer to 660 * a TSO header buffer, since they must always be followed by a 661 * payload descriptor referring to an skb. 662 */ 663 static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue) 664 { 665 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE); 666 } 667 668 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) 669 { 670 struct efx_nic *efx = tx_queue->efx; 671 unsigned int entries; 672 int rc; 673 674 /* Create the smallest power-of-two aligned ring */ 675 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); 676 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); 677 tx_queue->ptr_mask = entries - 1; 678 679 netif_dbg(efx, probe, efx->net_dev, 680 "creating TX queue %d size %#x mask %#x\n", 681 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); 682 683 /* Allocate software ring */ 684 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), 685 GFP_KERNEL); 686 if (!tx_queue->buffer) 687 return -ENOMEM; 688 689 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) { 690 tx_queue->tsoh_page = 691 kcalloc(efx_tsoh_page_count(tx_queue), 692 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL); 693 if (!tx_queue->tsoh_page) { 694 rc = -ENOMEM; 695 goto fail1; 696 } 697 } 698 699 /* Allocate hardware ring */ 700 rc = efx_nic_probe_tx(tx_queue); 701 if (rc) 702 goto fail2; 703 704 return 0; 705 706 fail2: 707 kfree(tx_queue->tsoh_page); 708 tx_queue->tsoh_page = NULL; 709 fail1: 710 kfree(tx_queue->buffer); 711 tx_queue->buffer = NULL; 712 return rc; 713 } 714 715 void efx_init_tx_queue(struct efx_tx_queue *tx_queue) 716 { 717 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 718 "initialising TX queue %d\n", tx_queue->queue); 719 720 tx_queue->insert_count = 0; 721 tx_queue->write_count = 0; 722 tx_queue->old_write_count = 0; 723 tx_queue->read_count = 0; 724 tx_queue->old_read_count = 0; 725 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; 726 727 /* Set up TX descriptor ring */ 728 efx_nic_init_tx(tx_queue); 729 730 tx_queue->initialised = true; 731 } 732 733 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 734 { 735 struct efx_tx_buffer *buffer; 736 737 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 738 "shutting down TX queue %d\n", tx_queue->queue); 739 740 if (!tx_queue->buffer) 741 return; 742 743 /* Free any buffers left in the ring */ 744 while (tx_queue->read_count != tx_queue->write_count) { 745 unsigned int pkts_compl = 0, bytes_compl = 0; 746 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; 747 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 748 749 ++tx_queue->read_count; 750 } 751 netdev_tx_reset_queue(tx_queue->core_txq); 752 } 753 754 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 755 { 756 int i; 757 758 if (!tx_queue->buffer) 759 return; 760 761 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 762 "destroying TX queue %d\n", tx_queue->queue); 763 efx_nic_remove_tx(tx_queue); 764 765 if (tx_queue->tsoh_page) { 766 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++) 767 efx_nic_free_buffer(tx_queue->efx, 768 &tx_queue->tsoh_page[i]); 769 kfree(tx_queue->tsoh_page); 770 tx_queue->tsoh_page = NULL; 771 } 772 773 kfree(tx_queue->buffer); 774 tx_queue->buffer = NULL; 775 } 776 777 778 /* Efx TCP segmentation acceleration. 779 * 780 * Why? Because by doing it here in the driver we can go significantly 781 * faster than the GSO. 782 * 783 * Requires TX checksum offload support. 784 */ 785 786 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) 787 788 /** 789 * struct tso_state - TSO state for an SKB 790 * @out_len: Remaining length in current segment 791 * @seqnum: Current sequence number 792 * @ipv4_id: Current IPv4 ID, host endian 793 * @packet_space: Remaining space in current packet 794 * @dma_addr: DMA address of current position 795 * @in_len: Remaining length in current SKB fragment 796 * @unmap_len: Length of SKB fragment 797 * @unmap_addr: DMA address of SKB fragment 798 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0 799 * @protocol: Network protocol (after any VLAN header) 800 * @ip_off: Offset of IP header 801 * @tcp_off: Offset of TCP header 802 * @header_len: Number of bytes of header 803 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload 804 * @header_dma_addr: Header DMA address, when using option descriptors 805 * @header_unmap_len: Header DMA mapped length, or 0 if not using option 806 * descriptors 807 * 808 * The state used during segmentation. It is put into this data structure 809 * just to make it easy to pass into inline functions. 810 */ 811 struct tso_state { 812 /* Output position */ 813 unsigned out_len; 814 unsigned seqnum; 815 u16 ipv4_id; 816 unsigned packet_space; 817 818 /* Input position */ 819 dma_addr_t dma_addr; 820 unsigned in_len; 821 unsigned unmap_len; 822 dma_addr_t unmap_addr; 823 unsigned short dma_flags; 824 825 __be16 protocol; 826 unsigned int ip_off; 827 unsigned int tcp_off; 828 unsigned header_len; 829 unsigned int ip_base_len; 830 dma_addr_t header_dma_addr; 831 unsigned int header_unmap_len; 832 }; 833 834 835 /* 836 * Verify that our various assumptions about sk_buffs and the conditions 837 * under which TSO will be attempted hold true. Return the protocol number. 838 */ 839 static __be16 efx_tso_check_protocol(struct sk_buff *skb) 840 { 841 __be16 protocol = skb->protocol; 842 843 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != 844 protocol); 845 if (protocol == htons(ETH_P_8021Q)) { 846 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 847 protocol = veh->h_vlan_encapsulated_proto; 848 } 849 850 if (protocol == htons(ETH_P_IP)) { 851 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); 852 } else { 853 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); 854 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); 855 } 856 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) 857 + (tcp_hdr(skb)->doff << 2u)) > 858 skb_headlen(skb)); 859 860 return protocol; 861 } 862 863 static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, 864 struct efx_tx_buffer *buffer, unsigned int len) 865 { 866 u8 *result; 867 868 EFX_BUG_ON_PARANOID(buffer->len); 869 EFX_BUG_ON_PARANOID(buffer->flags); 870 EFX_BUG_ON_PARANOID(buffer->unmap_len); 871 872 if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) { 873 unsigned index = 874 (tx_queue->insert_count & tx_queue->ptr_mask) / 2; 875 struct efx_buffer *page_buf = 876 &tx_queue->tsoh_page[index / TSOH_PER_PAGE]; 877 unsigned offset = 878 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN; 879 880 if (unlikely(!page_buf->addr) && 881 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, 882 GFP_ATOMIC)) 883 return NULL; 884 885 result = (u8 *)page_buf->addr + offset; 886 buffer->dma_addr = page_buf->dma_addr + offset; 887 buffer->flags = EFX_TX_BUF_CONT; 888 } else { 889 tx_queue->tso_long_headers++; 890 891 buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC); 892 if (unlikely(!buffer->heap_buf)) 893 return NULL; 894 result = (u8 *)buffer->heap_buf + NET_IP_ALIGN; 895 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; 896 } 897 898 buffer->len = len; 899 900 return result; 901 } 902 903 /** 904 * efx_tx_queue_insert - push descriptors onto the TX queue 905 * @tx_queue: Efx TX queue 906 * @dma_addr: DMA address of fragment 907 * @len: Length of fragment 908 * @final_buffer: The final buffer inserted into the queue 909 * 910 * Push descriptors onto the TX queue. 911 */ 912 static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, 913 dma_addr_t dma_addr, unsigned len, 914 struct efx_tx_buffer **final_buffer) 915 { 916 struct efx_tx_buffer *buffer; 917 struct efx_nic *efx = tx_queue->efx; 918 unsigned dma_len; 919 920 EFX_BUG_ON_PARANOID(len <= 0); 921 922 while (1) { 923 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 924 ++tx_queue->insert_count; 925 926 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 927 tx_queue->read_count >= 928 efx->txq_entries); 929 930 buffer->dma_addr = dma_addr; 931 932 dma_len = efx_max_tx_len(efx, dma_addr); 933 934 /* If there is enough space to send then do so */ 935 if (dma_len >= len) 936 break; 937 938 buffer->len = dma_len; 939 buffer->flags = EFX_TX_BUF_CONT; 940 dma_addr += dma_len; 941 len -= dma_len; 942 } 943 944 EFX_BUG_ON_PARANOID(!len); 945 buffer->len = len; 946 *final_buffer = buffer; 947 } 948 949 950 /* 951 * Put a TSO header into the TX queue. 952 * 953 * This is special-cased because we know that it is small enough to fit in 954 * a single fragment, and we know it doesn't cross a page boundary. It 955 * also allows us to not worry about end-of-packet etc. 956 */ 957 static int efx_tso_put_header(struct efx_tx_queue *tx_queue, 958 struct efx_tx_buffer *buffer, u8 *header) 959 { 960 if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) { 961 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, 962 header, buffer->len, 963 DMA_TO_DEVICE); 964 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, 965 buffer->dma_addr))) { 966 kfree(buffer->heap_buf); 967 buffer->len = 0; 968 buffer->flags = 0; 969 return -ENOMEM; 970 } 971 buffer->unmap_len = buffer->len; 972 buffer->dma_offset = 0; 973 buffer->flags |= EFX_TX_BUF_MAP_SINGLE; 974 } 975 976 ++tx_queue->insert_count; 977 return 0; 978 } 979 980 981 /* Remove buffers put into a tx_queue. None of the buffers must have 982 * an skb attached. 983 */ 984 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, 985 unsigned int insert_count) 986 { 987 struct efx_tx_buffer *buffer; 988 989 /* Work backwards until we hit the original insert pointer value */ 990 while (tx_queue->insert_count != insert_count) { 991 --tx_queue->insert_count; 992 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); 993 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); 994 } 995 } 996 997 998 /* Parse the SKB header and initialise state. */ 999 static int tso_start(struct tso_state *st, struct efx_nic *efx, 1000 const struct sk_buff *skb) 1001 { 1002 bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; 1003 struct device *dma_dev = &efx->pci_dev->dev; 1004 unsigned int header_len, in_len; 1005 dma_addr_t dma_addr; 1006 1007 st->ip_off = skb_network_header(skb) - skb->data; 1008 st->tcp_off = skb_transport_header(skb) - skb->data; 1009 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); 1010 in_len = skb_headlen(skb) - header_len; 1011 st->header_len = header_len; 1012 st->in_len = in_len; 1013 if (st->protocol == htons(ETH_P_IP)) { 1014 st->ip_base_len = st->header_len - st->ip_off; 1015 st->ipv4_id = ntohs(ip_hdr(skb)->id); 1016 } else { 1017 st->ip_base_len = st->header_len - st->tcp_off; 1018 st->ipv4_id = 0; 1019 } 1020 st->seqnum = ntohl(tcp_hdr(skb)->seq); 1021 1022 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); 1023 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); 1024 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); 1025 1026 st->out_len = skb->len - header_len; 1027 1028 if (!use_opt_desc) { 1029 st->header_unmap_len = 0; 1030 1031 if (likely(in_len == 0)) { 1032 st->dma_flags = 0; 1033 st->unmap_len = 0; 1034 return 0; 1035 } 1036 1037 dma_addr = dma_map_single(dma_dev, skb->data + header_len, 1038 in_len, DMA_TO_DEVICE); 1039 st->dma_flags = EFX_TX_BUF_MAP_SINGLE; 1040 st->dma_addr = dma_addr; 1041 st->unmap_addr = dma_addr; 1042 st->unmap_len = in_len; 1043 } else { 1044 dma_addr = dma_map_single(dma_dev, skb->data, 1045 skb_headlen(skb), DMA_TO_DEVICE); 1046 st->header_dma_addr = dma_addr; 1047 st->header_unmap_len = skb_headlen(skb); 1048 st->dma_flags = 0; 1049 st->dma_addr = dma_addr + header_len; 1050 st->unmap_len = 0; 1051 } 1052 1053 return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0; 1054 } 1055 1056 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 1057 skb_frag_t *frag) 1058 { 1059 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, 1060 skb_frag_size(frag), DMA_TO_DEVICE); 1061 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { 1062 st->dma_flags = 0; 1063 st->unmap_len = skb_frag_size(frag); 1064 st->in_len = skb_frag_size(frag); 1065 st->dma_addr = st->unmap_addr; 1066 return 0; 1067 } 1068 return -ENOMEM; 1069 } 1070 1071 1072 /** 1073 * tso_fill_packet_with_fragment - form descriptors for the current fragment 1074 * @tx_queue: Efx TX queue 1075 * @skb: Socket buffer 1076 * @st: TSO state 1077 * 1078 * Form descriptors for the current fragment, until we reach the end 1079 * of fragment or end-of-packet. 1080 */ 1081 static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, 1082 const struct sk_buff *skb, 1083 struct tso_state *st) 1084 { 1085 struct efx_tx_buffer *buffer; 1086 int n; 1087 1088 if (st->in_len == 0) 1089 return; 1090 if (st->packet_space == 0) 1091 return; 1092 1093 EFX_BUG_ON_PARANOID(st->in_len <= 0); 1094 EFX_BUG_ON_PARANOID(st->packet_space <= 0); 1095 1096 n = min(st->in_len, st->packet_space); 1097 1098 st->packet_space -= n; 1099 st->out_len -= n; 1100 st->in_len -= n; 1101 1102 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); 1103 1104 if (st->out_len == 0) { 1105 /* Transfer ownership of the skb */ 1106 buffer->skb = skb; 1107 buffer->flags = EFX_TX_BUF_SKB; 1108 } else if (st->packet_space != 0) { 1109 buffer->flags = EFX_TX_BUF_CONT; 1110 } 1111 1112 if (st->in_len == 0) { 1113 /* Transfer ownership of the DMA mapping */ 1114 buffer->unmap_len = st->unmap_len; 1115 buffer->dma_offset = buffer->unmap_len - buffer->len; 1116 buffer->flags |= st->dma_flags; 1117 st->unmap_len = 0; 1118 } 1119 1120 st->dma_addr += n; 1121 } 1122 1123 1124 /** 1125 * tso_start_new_packet - generate a new header and prepare for the new packet 1126 * @tx_queue: Efx TX queue 1127 * @skb: Socket buffer 1128 * @st: TSO state 1129 * 1130 * Generate a new header and prepare for the new packet. Return 0 on 1131 * success, or -%ENOMEM if failed to alloc header. 1132 */ 1133 static int tso_start_new_packet(struct efx_tx_queue *tx_queue, 1134 const struct sk_buff *skb, 1135 struct tso_state *st) 1136 { 1137 struct efx_tx_buffer *buffer = 1138 efx_tx_queue_get_insert_buffer(tx_queue); 1139 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size; 1140 u8 tcp_flags_clear; 1141 1142 if (!is_last) { 1143 st->packet_space = skb_shinfo(skb)->gso_size; 1144 tcp_flags_clear = 0x09; /* mask out FIN and PSH */ 1145 } else { 1146 st->packet_space = st->out_len; 1147 tcp_flags_clear = 0x00; 1148 } 1149 1150 if (!st->header_unmap_len) { 1151 /* Allocate and insert a DMA-mapped header buffer. */ 1152 struct tcphdr *tsoh_th; 1153 unsigned ip_length; 1154 u8 *header; 1155 int rc; 1156 1157 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len); 1158 if (!header) 1159 return -ENOMEM; 1160 1161 tsoh_th = (struct tcphdr *)(header + st->tcp_off); 1162 1163 /* Copy and update the headers. */ 1164 memcpy(header, skb->data, st->header_len); 1165 1166 tsoh_th->seq = htonl(st->seqnum); 1167 ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear; 1168 1169 ip_length = st->ip_base_len + st->packet_space; 1170 1171 if (st->protocol == htons(ETH_P_IP)) { 1172 struct iphdr *tsoh_iph = 1173 (struct iphdr *)(header + st->ip_off); 1174 1175 tsoh_iph->tot_len = htons(ip_length); 1176 tsoh_iph->id = htons(st->ipv4_id); 1177 } else { 1178 struct ipv6hdr *tsoh_iph = 1179 (struct ipv6hdr *)(header + st->ip_off); 1180 1181 tsoh_iph->payload_len = htons(ip_length); 1182 } 1183 1184 rc = efx_tso_put_header(tx_queue, buffer, header); 1185 if (unlikely(rc)) 1186 return rc; 1187 } else { 1188 /* Send the original headers with a TSO option descriptor 1189 * in front 1190 */ 1191 u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear; 1192 1193 buffer->flags = EFX_TX_BUF_OPTION; 1194 buffer->len = 0; 1195 buffer->unmap_len = 0; 1196 EFX_POPULATE_QWORD_5(buffer->option, 1197 ESF_DZ_TX_DESC_IS_OPT, 1, 1198 ESF_DZ_TX_OPTION_TYPE, 1199 ESE_DZ_TX_OPTION_DESC_TSO, 1200 ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, 1201 ESF_DZ_TX_TSO_IP_ID, st->ipv4_id, 1202 ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum); 1203 ++tx_queue->insert_count; 1204 1205 /* We mapped the headers in tso_start(). Unmap them 1206 * when the last segment is completed. 1207 */ 1208 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 1209 buffer->dma_addr = st->header_dma_addr; 1210 buffer->len = st->header_len; 1211 if (is_last) { 1212 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE; 1213 buffer->unmap_len = st->header_unmap_len; 1214 buffer->dma_offset = 0; 1215 /* Ensure we only unmap them once in case of a 1216 * later DMA mapping error and rollback 1217 */ 1218 st->header_unmap_len = 0; 1219 } else { 1220 buffer->flags = EFX_TX_BUF_CONT; 1221 buffer->unmap_len = 0; 1222 } 1223 ++tx_queue->insert_count; 1224 } 1225 1226 st->seqnum += skb_shinfo(skb)->gso_size; 1227 1228 /* Linux leaves suitable gaps in the IP ID space for us to fill. */ 1229 ++st->ipv4_id; 1230 1231 ++tx_queue->tso_packets; 1232 1233 ++tx_queue->tx_packets; 1234 1235 return 0; 1236 } 1237 1238 1239 /** 1240 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer 1241 * @tx_queue: Efx TX queue 1242 * @skb: Socket buffer 1243 * 1244 * Context: You must hold netif_tx_lock() to call this function. 1245 * 1246 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if 1247 * @skb was not enqueued. In all cases @skb is consumed. Return 1248 * %NETDEV_TX_OK. 1249 */ 1250 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 1251 struct sk_buff *skb) 1252 { 1253 struct efx_nic *efx = tx_queue->efx; 1254 unsigned int old_insert_count = tx_queue->insert_count; 1255 int frag_i, rc; 1256 struct tso_state state; 1257 1258 /* Find the packet protocol and sanity-check it */ 1259 state.protocol = efx_tso_check_protocol(skb); 1260 1261 EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count); 1262 1263 rc = tso_start(&state, efx, skb); 1264 if (rc) 1265 goto mem_err; 1266 1267 if (likely(state.in_len == 0)) { 1268 /* Grab the first payload fragment. */ 1269 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); 1270 frag_i = 0; 1271 rc = tso_get_fragment(&state, efx, 1272 skb_shinfo(skb)->frags + frag_i); 1273 if (rc) 1274 goto mem_err; 1275 } else { 1276 /* Payload starts in the header area. */ 1277 frag_i = -1; 1278 } 1279 1280 if (tso_start_new_packet(tx_queue, skb, &state) < 0) 1281 goto mem_err; 1282 1283 while (1) { 1284 tso_fill_packet_with_fragment(tx_queue, skb, &state); 1285 1286 /* Move onto the next fragment? */ 1287 if (state.in_len == 0) { 1288 if (++frag_i >= skb_shinfo(skb)->nr_frags) 1289 /* End of payload reached. */ 1290 break; 1291 rc = tso_get_fragment(&state, efx, 1292 skb_shinfo(skb)->frags + frag_i); 1293 if (rc) 1294 goto mem_err; 1295 } 1296 1297 /* Start at new packet? */ 1298 if (state.packet_space == 0 && 1299 tso_start_new_packet(tx_queue, skb, &state) < 0) 1300 goto mem_err; 1301 } 1302 1303 netdev_tx_sent_queue(tx_queue->core_txq, skb->len); 1304 1305 efx_tx_maybe_stop_queue(tx_queue); 1306 1307 /* Pass off to hardware */ 1308 if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) 1309 efx_nic_push_buffers(tx_queue); 1310 1311 tx_queue->tso_bursts++; 1312 return NETDEV_TX_OK; 1313 1314 mem_err: 1315 netif_err(efx, tx_err, efx->net_dev, 1316 "Out of memory for TSO headers, or DMA mapping error\n"); 1317 dev_kfree_skb_any(skb); 1318 1319 /* Free the DMA mapping we were in the process of writing out */ 1320 if (state.unmap_len) { 1321 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE) 1322 dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, 1323 state.unmap_len, DMA_TO_DEVICE); 1324 else 1325 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr, 1326 state.unmap_len, DMA_TO_DEVICE); 1327 } 1328 1329 /* Free the header DMA mapping, if using option descriptors */ 1330 if (state.header_unmap_len) 1331 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr, 1332 state.header_unmap_len, DMA_TO_DEVICE); 1333 1334 efx_enqueue_unwind(tx_queue, old_insert_count); 1335 return NETDEV_TX_OK; 1336 } 1337