1874aeea5SJeff Kirsher /**************************************************************************** 2874aeea5SJeff Kirsher * Driver for Solarflare Solarstorm network controllers and boards 3874aeea5SJeff Kirsher * Copyright 2005-2006 Fen Systems Ltd. 4874aeea5SJeff Kirsher * Copyright 2005-2010 Solarflare Communications Inc. 5874aeea5SJeff Kirsher * 6874aeea5SJeff Kirsher * This program is free software; you can redistribute it and/or modify it 7874aeea5SJeff Kirsher * under the terms of the GNU General Public License version 2 as published 8874aeea5SJeff Kirsher * by the Free Software Foundation, incorporated herein by reference. 9874aeea5SJeff Kirsher */ 10874aeea5SJeff Kirsher 11874aeea5SJeff Kirsher #include <linux/pci.h> 12874aeea5SJeff Kirsher #include <linux/tcp.h> 13874aeea5SJeff Kirsher #include <linux/ip.h> 14874aeea5SJeff Kirsher #include <linux/in.h> 15874aeea5SJeff Kirsher #include <linux/ipv6.h> 16874aeea5SJeff Kirsher #include <linux/slab.h> 17874aeea5SJeff Kirsher #include <net/ipv6.h> 18874aeea5SJeff Kirsher #include <linux/if_ether.h> 19874aeea5SJeff Kirsher #include <linux/highmem.h> 20874aeea5SJeff Kirsher #include "net_driver.h" 21874aeea5SJeff Kirsher #include "efx.h" 22874aeea5SJeff Kirsher #include "nic.h" 23874aeea5SJeff Kirsher #include "workarounds.h" 24874aeea5SJeff Kirsher 25874aeea5SJeff Kirsher static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 26c3940999STom Herbert struct efx_tx_buffer *buffer, 27c3940999STom Herbert unsigned int *pkts_compl, 28c3940999STom Herbert unsigned int *bytes_compl) 29874aeea5SJeff Kirsher { 30874aeea5SJeff Kirsher if (buffer->unmap_len) { 310e33d870SBen Hutchings struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 32874aeea5SJeff Kirsher dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - 33874aeea5SJeff Kirsher buffer->unmap_len); 347668ff9cSBen Hutchings if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) 350e33d870SBen Hutchings dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, 360e33d870SBen Hutchings DMA_TO_DEVICE); 37874aeea5SJeff Kirsher else 380e33d870SBen Hutchings dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, 390e33d870SBen Hutchings DMA_TO_DEVICE); 40874aeea5SJeff Kirsher buffer->unmap_len = 0; 41874aeea5SJeff Kirsher } 42874aeea5SJeff Kirsher 437668ff9cSBen Hutchings if (buffer->flags & EFX_TX_BUF_SKB) { 44c3940999STom Herbert (*pkts_compl)++; 45c3940999STom Herbert (*bytes_compl) += buffer->skb->len; 46874aeea5SJeff Kirsher dev_kfree_skb_any((struct sk_buff *) buffer->skb); 47874aeea5SJeff Kirsher netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, 48874aeea5SJeff Kirsher "TX queue %d transmission id %x complete\n", 49874aeea5SJeff Kirsher tx_queue->queue, tx_queue->read_count); 50f7251a9cSBen Hutchings } else if (buffer->flags & EFX_TX_BUF_HEAP) { 51f7251a9cSBen Hutchings kfree(buffer->heap_buf); 52874aeea5SJeff Kirsher } 537668ff9cSBen Hutchings 54f7251a9cSBen Hutchings buffer->len = 0; 55f7251a9cSBen Hutchings buffer->flags = 0; 56874aeea5SJeff Kirsher } 57874aeea5SJeff Kirsher 58874aeea5SJeff Kirsher static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 59874aeea5SJeff Kirsher struct sk_buff *skb); 60874aeea5SJeff Kirsher 61874aeea5SJeff Kirsher static inline unsigned 62874aeea5SJeff Kirsher efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) 63874aeea5SJeff Kirsher { 64874aeea5SJeff Kirsher /* Depending on the NIC revision, we can use descriptor 65874aeea5SJeff Kirsher * lengths up to 8K or 8K-1. However, since PCI Express 66874aeea5SJeff Kirsher * devices must split read requests at 4K boundaries, there is 67874aeea5SJeff Kirsher * little benefit from using descriptors that cross those 68874aeea5SJeff Kirsher * boundaries and we keep things simple by not doing so. 69874aeea5SJeff Kirsher */ 705b6262d0SBen Hutchings unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; 71874aeea5SJeff Kirsher 72874aeea5SJeff Kirsher /* Work around hardware bug for unaligned buffers. */ 73874aeea5SJeff Kirsher if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) 74874aeea5SJeff Kirsher len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); 75874aeea5SJeff Kirsher 76874aeea5SJeff Kirsher return len; 77874aeea5SJeff Kirsher } 78874aeea5SJeff Kirsher 797e6d06f0SBen Hutchings unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) 807e6d06f0SBen Hutchings { 817e6d06f0SBen Hutchings /* Header and payload descriptor for each output segment, plus 827e6d06f0SBen Hutchings * one for every input fragment boundary within a segment 837e6d06f0SBen Hutchings */ 847e6d06f0SBen Hutchings unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; 857e6d06f0SBen Hutchings 867e6d06f0SBen Hutchings /* Possibly one more per segment for the alignment workaround */ 877e6d06f0SBen Hutchings if (EFX_WORKAROUND_5391(efx)) 887e6d06f0SBen Hutchings max_descs += EFX_TSO_MAX_SEGS; 897e6d06f0SBen Hutchings 907e6d06f0SBen Hutchings /* Possibly more for PCIe page boundaries within input fragments */ 917e6d06f0SBen Hutchings if (PAGE_SIZE > EFX_PAGE_SIZE) 927e6d06f0SBen Hutchings max_descs += max_t(unsigned int, MAX_SKB_FRAGS, 937e6d06f0SBen Hutchings DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); 947e6d06f0SBen Hutchings 957e6d06f0SBen Hutchings return max_descs; 967e6d06f0SBen Hutchings } 977e6d06f0SBen Hutchings 9814bf718fSBen Hutchings /* Get partner of a TX queue, seen as part of the same net core queue */ 9914bf718fSBen Hutchings static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) 10014bf718fSBen Hutchings { 10114bf718fSBen Hutchings if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 10214bf718fSBen Hutchings return tx_queue - EFX_TXQ_TYPE_OFFLOAD; 10314bf718fSBen Hutchings else 10414bf718fSBen Hutchings return tx_queue + EFX_TXQ_TYPE_OFFLOAD; 10514bf718fSBen Hutchings } 10614bf718fSBen Hutchings 10714bf718fSBen Hutchings static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) 10814bf718fSBen Hutchings { 10914bf718fSBen Hutchings /* We need to consider both queues that the net core sees as one */ 11014bf718fSBen Hutchings struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1); 11114bf718fSBen Hutchings struct efx_nic *efx = txq1->efx; 11214bf718fSBen Hutchings unsigned int fill_level; 11314bf718fSBen Hutchings 11414bf718fSBen Hutchings fill_level = max(txq1->insert_count - txq1->old_read_count, 11514bf718fSBen Hutchings txq2->insert_count - txq2->old_read_count); 11614bf718fSBen Hutchings if (likely(fill_level < efx->txq_stop_thresh)) 11714bf718fSBen Hutchings return; 11814bf718fSBen Hutchings 11914bf718fSBen Hutchings /* We used the stale old_read_count above, which gives us a 12014bf718fSBen Hutchings * pessimistic estimate of the fill level (which may even 12114bf718fSBen Hutchings * validly be >= efx->txq_entries). Now try again using 12214bf718fSBen Hutchings * read_count (more likely to be a cache miss). 12314bf718fSBen Hutchings * 12414bf718fSBen Hutchings * If we read read_count and then conditionally stop the 12514bf718fSBen Hutchings * queue, it is possible for the completion path to race with 12614bf718fSBen Hutchings * us and complete all outstanding descriptors in the middle, 12714bf718fSBen Hutchings * after which there will be no more completions to wake it. 12814bf718fSBen Hutchings * Therefore we stop the queue first, then read read_count 12914bf718fSBen Hutchings * (with a memory barrier to ensure the ordering), then 13014bf718fSBen Hutchings * restart the queue if the fill level turns out to be low 13114bf718fSBen Hutchings * enough. 13214bf718fSBen Hutchings */ 13314bf718fSBen Hutchings netif_tx_stop_queue(txq1->core_txq); 13414bf718fSBen Hutchings smp_mb(); 13514bf718fSBen Hutchings txq1->old_read_count = ACCESS_ONCE(txq1->read_count); 13614bf718fSBen Hutchings txq2->old_read_count = ACCESS_ONCE(txq2->read_count); 13714bf718fSBen Hutchings 13814bf718fSBen Hutchings fill_level = max(txq1->insert_count - txq1->old_read_count, 13914bf718fSBen Hutchings txq2->insert_count - txq2->old_read_count); 14014bf718fSBen Hutchings EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries); 14114bf718fSBen Hutchings if (likely(fill_level < efx->txq_stop_thresh)) { 14214bf718fSBen Hutchings smp_mb(); 14314bf718fSBen Hutchings if (likely(!efx->loopback_selftest)) 14414bf718fSBen Hutchings netif_tx_start_queue(txq1->core_txq); 14514bf718fSBen Hutchings } 14614bf718fSBen Hutchings } 14714bf718fSBen Hutchings 148874aeea5SJeff Kirsher /* 149874aeea5SJeff Kirsher * Add a socket buffer to a TX queue 150874aeea5SJeff Kirsher * 151874aeea5SJeff Kirsher * This maps all fragments of a socket buffer for DMA and adds them to 152874aeea5SJeff Kirsher * the TX queue. The queue's insert pointer will be incremented by 153874aeea5SJeff Kirsher * the number of fragments in the socket buffer. 154874aeea5SJeff Kirsher * 155874aeea5SJeff Kirsher * If any DMA mapping fails, any mapped fragments will be unmapped, 156874aeea5SJeff Kirsher * the queue's insert pointer will be restored to its original value. 157874aeea5SJeff Kirsher * 158874aeea5SJeff Kirsher * This function is split out from efx_hard_start_xmit to allow the 159874aeea5SJeff Kirsher * loopback test to direct packets via specific TX queues. 160874aeea5SJeff Kirsher * 16114bf718fSBen Hutchings * Returns NETDEV_TX_OK. 162874aeea5SJeff Kirsher * You must hold netif_tx_lock() to call this function. 163874aeea5SJeff Kirsher */ 164874aeea5SJeff Kirsher netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 165874aeea5SJeff Kirsher { 166874aeea5SJeff Kirsher struct efx_nic *efx = tx_queue->efx; 1670e33d870SBen Hutchings struct device *dma_dev = &efx->pci_dev->dev; 168874aeea5SJeff Kirsher struct efx_tx_buffer *buffer; 169874aeea5SJeff Kirsher skb_frag_t *fragment; 17014bf718fSBen Hutchings unsigned int len, unmap_len = 0, insert_ptr; 171874aeea5SJeff Kirsher dma_addr_t dma_addr, unmap_addr = 0; 172874aeea5SJeff Kirsher unsigned int dma_len; 1737668ff9cSBen Hutchings unsigned short dma_flags; 17414bf718fSBen Hutchings int i = 0; 175874aeea5SJeff Kirsher 176874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 177874aeea5SJeff Kirsher 178874aeea5SJeff Kirsher if (skb_shinfo(skb)->gso_size) 179874aeea5SJeff Kirsher return efx_enqueue_skb_tso(tx_queue, skb); 180874aeea5SJeff Kirsher 181874aeea5SJeff Kirsher /* Get size of the initial fragment */ 182874aeea5SJeff Kirsher len = skb_headlen(skb); 183874aeea5SJeff Kirsher 184874aeea5SJeff Kirsher /* Pad if necessary */ 185874aeea5SJeff Kirsher if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { 186874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(skb->data_len); 187874aeea5SJeff Kirsher len = 32 + 1; 188874aeea5SJeff Kirsher if (skb_pad(skb, len - skb->len)) 189874aeea5SJeff Kirsher return NETDEV_TX_OK; 190874aeea5SJeff Kirsher } 191874aeea5SJeff Kirsher 1920e33d870SBen Hutchings /* Map for DMA. Use dma_map_single rather than dma_map_page 193874aeea5SJeff Kirsher * since this is more efficient on machines with sparse 194874aeea5SJeff Kirsher * memory. 195874aeea5SJeff Kirsher */ 1967668ff9cSBen Hutchings dma_flags = EFX_TX_BUF_MAP_SINGLE; 1970e33d870SBen Hutchings dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); 198874aeea5SJeff Kirsher 199874aeea5SJeff Kirsher /* Process all fragments */ 200874aeea5SJeff Kirsher while (1) { 2010e33d870SBen Hutchings if (unlikely(dma_mapping_error(dma_dev, dma_addr))) 2020e33d870SBen Hutchings goto dma_err; 203874aeea5SJeff Kirsher 204874aeea5SJeff Kirsher /* Store fields for marking in the per-fragment final 205874aeea5SJeff Kirsher * descriptor */ 206874aeea5SJeff Kirsher unmap_len = len; 207874aeea5SJeff Kirsher unmap_addr = dma_addr; 208874aeea5SJeff Kirsher 209874aeea5SJeff Kirsher /* Add to TX queue, splitting across DMA boundaries */ 210874aeea5SJeff Kirsher do { 211874aeea5SJeff Kirsher insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 212874aeea5SJeff Kirsher buffer = &tx_queue->buffer[insert_ptr]; 2137668ff9cSBen Hutchings EFX_BUG_ON_PARANOID(buffer->flags); 214874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(buffer->len); 215874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(buffer->unmap_len); 216874aeea5SJeff Kirsher 217874aeea5SJeff Kirsher dma_len = efx_max_tx_len(efx, dma_addr); 218874aeea5SJeff Kirsher if (likely(dma_len >= len)) 219874aeea5SJeff Kirsher dma_len = len; 220874aeea5SJeff Kirsher 221874aeea5SJeff Kirsher /* Fill out per descriptor fields */ 222874aeea5SJeff Kirsher buffer->len = dma_len; 223874aeea5SJeff Kirsher buffer->dma_addr = dma_addr; 2247668ff9cSBen Hutchings buffer->flags = EFX_TX_BUF_CONT; 225874aeea5SJeff Kirsher len -= dma_len; 226874aeea5SJeff Kirsher dma_addr += dma_len; 227874aeea5SJeff Kirsher ++tx_queue->insert_count; 228874aeea5SJeff Kirsher } while (len); 229874aeea5SJeff Kirsher 230874aeea5SJeff Kirsher /* Transfer ownership of the unmapping to the final buffer */ 2317668ff9cSBen Hutchings buffer->flags = EFX_TX_BUF_CONT | dma_flags; 232874aeea5SJeff Kirsher buffer->unmap_len = unmap_len; 233874aeea5SJeff Kirsher unmap_len = 0; 234874aeea5SJeff Kirsher 235874aeea5SJeff Kirsher /* Get address and size of next fragment */ 236874aeea5SJeff Kirsher if (i >= skb_shinfo(skb)->nr_frags) 237874aeea5SJeff Kirsher break; 238874aeea5SJeff Kirsher fragment = &skb_shinfo(skb)->frags[i]; 2399e903e08SEric Dumazet len = skb_frag_size(fragment); 240874aeea5SJeff Kirsher i++; 241874aeea5SJeff Kirsher /* Map for DMA */ 2427668ff9cSBen Hutchings dma_flags = 0; 2430e33d870SBen Hutchings dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, 2445d6bcdfeSIan Campbell DMA_TO_DEVICE); 245874aeea5SJeff Kirsher } 246874aeea5SJeff Kirsher 247874aeea5SJeff Kirsher /* Transfer ownership of the skb to the final buffer */ 248874aeea5SJeff Kirsher buffer->skb = skb; 2497668ff9cSBen Hutchings buffer->flags = EFX_TX_BUF_SKB | dma_flags; 250874aeea5SJeff Kirsher 251c3940999STom Herbert netdev_tx_sent_queue(tx_queue->core_txq, skb->len); 252c3940999STom Herbert 253874aeea5SJeff Kirsher /* Pass off to hardware */ 254874aeea5SJeff Kirsher efx_nic_push_buffers(tx_queue); 255874aeea5SJeff Kirsher 25614bf718fSBen Hutchings efx_tx_maybe_stop_queue(tx_queue); 25714bf718fSBen Hutchings 258874aeea5SJeff Kirsher return NETDEV_TX_OK; 259874aeea5SJeff Kirsher 2600e33d870SBen Hutchings dma_err: 261874aeea5SJeff Kirsher netif_err(efx, tx_err, efx->net_dev, 262874aeea5SJeff Kirsher " TX queue %d could not map skb with %d bytes %d " 263874aeea5SJeff Kirsher "fragments for DMA\n", tx_queue->queue, skb->len, 264874aeea5SJeff Kirsher skb_shinfo(skb)->nr_frags + 1); 265874aeea5SJeff Kirsher 266874aeea5SJeff Kirsher /* Mark the packet as transmitted, and free the SKB ourselves */ 267874aeea5SJeff Kirsher dev_kfree_skb_any(skb); 268874aeea5SJeff Kirsher 269874aeea5SJeff Kirsher /* Work backwards until we hit the original insert pointer value */ 270874aeea5SJeff Kirsher while (tx_queue->insert_count != tx_queue->write_count) { 271c3940999STom Herbert unsigned int pkts_compl = 0, bytes_compl = 0; 272874aeea5SJeff Kirsher --tx_queue->insert_count; 273874aeea5SJeff Kirsher insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 274874aeea5SJeff Kirsher buffer = &tx_queue->buffer[insert_ptr]; 275c3940999STom Herbert efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 276874aeea5SJeff Kirsher } 277874aeea5SJeff Kirsher 278874aeea5SJeff Kirsher /* Free the fragment we were mid-way through pushing */ 279874aeea5SJeff Kirsher if (unmap_len) { 2807668ff9cSBen Hutchings if (dma_flags & EFX_TX_BUF_MAP_SINGLE) 2810e33d870SBen Hutchings dma_unmap_single(dma_dev, unmap_addr, unmap_len, 2820e33d870SBen Hutchings DMA_TO_DEVICE); 283874aeea5SJeff Kirsher else 2840e33d870SBen Hutchings dma_unmap_page(dma_dev, unmap_addr, unmap_len, 2850e33d870SBen Hutchings DMA_TO_DEVICE); 286874aeea5SJeff Kirsher } 287874aeea5SJeff Kirsher 28814bf718fSBen Hutchings return NETDEV_TX_OK; 289874aeea5SJeff Kirsher } 290874aeea5SJeff Kirsher 291874aeea5SJeff Kirsher /* Remove packets from the TX queue 292874aeea5SJeff Kirsher * 293874aeea5SJeff Kirsher * This removes packets from the TX queue, up to and including the 294874aeea5SJeff Kirsher * specified index. 295874aeea5SJeff Kirsher */ 296874aeea5SJeff Kirsher static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, 297c3940999STom Herbert unsigned int index, 298c3940999STom Herbert unsigned int *pkts_compl, 299c3940999STom Herbert unsigned int *bytes_compl) 300874aeea5SJeff Kirsher { 301874aeea5SJeff Kirsher struct efx_nic *efx = tx_queue->efx; 302874aeea5SJeff Kirsher unsigned int stop_index, read_ptr; 303874aeea5SJeff Kirsher 304874aeea5SJeff Kirsher stop_index = (index + 1) & tx_queue->ptr_mask; 305874aeea5SJeff Kirsher read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 306874aeea5SJeff Kirsher 307874aeea5SJeff Kirsher while (read_ptr != stop_index) { 308874aeea5SJeff Kirsher struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 309874aeea5SJeff Kirsher if (unlikely(buffer->len == 0)) { 310874aeea5SJeff Kirsher netif_err(efx, tx_err, efx->net_dev, 311874aeea5SJeff Kirsher "TX queue %d spurious TX completion id %x\n", 312874aeea5SJeff Kirsher tx_queue->queue, read_ptr); 313874aeea5SJeff Kirsher efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); 314874aeea5SJeff Kirsher return; 315874aeea5SJeff Kirsher } 316874aeea5SJeff Kirsher 317c3940999STom Herbert efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); 318874aeea5SJeff Kirsher 319874aeea5SJeff Kirsher ++tx_queue->read_count; 320874aeea5SJeff Kirsher read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 321874aeea5SJeff Kirsher } 322874aeea5SJeff Kirsher } 323874aeea5SJeff Kirsher 324874aeea5SJeff Kirsher /* Initiate a packet transmission. We use one channel per CPU 325874aeea5SJeff Kirsher * (sharing when we have more CPUs than channels). On Falcon, the TX 326874aeea5SJeff Kirsher * completion events will be directed back to the CPU that transmitted 327874aeea5SJeff Kirsher * the packet, which should be cache-efficient. 328874aeea5SJeff Kirsher * 329874aeea5SJeff Kirsher * Context: non-blocking. 330874aeea5SJeff Kirsher * Note that returning anything other than NETDEV_TX_OK will cause the 331874aeea5SJeff Kirsher * OS to free the skb. 332874aeea5SJeff Kirsher */ 333874aeea5SJeff Kirsher netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, 334874aeea5SJeff Kirsher struct net_device *net_dev) 335874aeea5SJeff Kirsher { 336874aeea5SJeff Kirsher struct efx_nic *efx = netdev_priv(net_dev); 337874aeea5SJeff Kirsher struct efx_tx_queue *tx_queue; 338874aeea5SJeff Kirsher unsigned index, type; 339874aeea5SJeff Kirsher 340874aeea5SJeff Kirsher EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); 341874aeea5SJeff Kirsher 3427c236c43SStuart Hodgson /* PTP "event" packet */ 3437c236c43SStuart Hodgson if (unlikely(efx_xmit_with_hwtstamp(skb)) && 3447c236c43SStuart Hodgson unlikely(efx_ptp_is_ptp_tx(efx, skb))) { 3457c236c43SStuart Hodgson return efx_ptp_tx(efx, skb); 3467c236c43SStuart Hodgson } 3477c236c43SStuart Hodgson 348874aeea5SJeff Kirsher index = skb_get_queue_mapping(skb); 349874aeea5SJeff Kirsher type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; 350874aeea5SJeff Kirsher if (index >= efx->n_tx_channels) { 351874aeea5SJeff Kirsher index -= efx->n_tx_channels; 352874aeea5SJeff Kirsher type |= EFX_TXQ_TYPE_HIGHPRI; 353874aeea5SJeff Kirsher } 354874aeea5SJeff Kirsher tx_queue = efx_get_tx_queue(efx, index, type); 355874aeea5SJeff Kirsher 356874aeea5SJeff Kirsher return efx_enqueue_skb(tx_queue, skb); 357874aeea5SJeff Kirsher } 358874aeea5SJeff Kirsher 359874aeea5SJeff Kirsher void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) 360874aeea5SJeff Kirsher { 361874aeea5SJeff Kirsher struct efx_nic *efx = tx_queue->efx; 362874aeea5SJeff Kirsher 363874aeea5SJeff Kirsher /* Must be inverse of queue lookup in efx_hard_start_xmit() */ 364874aeea5SJeff Kirsher tx_queue->core_txq = 365874aeea5SJeff Kirsher netdev_get_tx_queue(efx->net_dev, 366874aeea5SJeff Kirsher tx_queue->queue / EFX_TXQ_TYPES + 367874aeea5SJeff Kirsher ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 368874aeea5SJeff Kirsher efx->n_tx_channels : 0)); 369874aeea5SJeff Kirsher } 370874aeea5SJeff Kirsher 371874aeea5SJeff Kirsher int efx_setup_tc(struct net_device *net_dev, u8 num_tc) 372874aeea5SJeff Kirsher { 373874aeea5SJeff Kirsher struct efx_nic *efx = netdev_priv(net_dev); 374874aeea5SJeff Kirsher struct efx_channel *channel; 375874aeea5SJeff Kirsher struct efx_tx_queue *tx_queue; 376874aeea5SJeff Kirsher unsigned tc; 377874aeea5SJeff Kirsher int rc; 378874aeea5SJeff Kirsher 379874aeea5SJeff Kirsher if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) 380874aeea5SJeff Kirsher return -EINVAL; 381874aeea5SJeff Kirsher 382874aeea5SJeff Kirsher if (num_tc == net_dev->num_tc) 383874aeea5SJeff Kirsher return 0; 384874aeea5SJeff Kirsher 385874aeea5SJeff Kirsher for (tc = 0; tc < num_tc; tc++) { 386874aeea5SJeff Kirsher net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; 387874aeea5SJeff Kirsher net_dev->tc_to_txq[tc].count = efx->n_tx_channels; 388874aeea5SJeff Kirsher } 389874aeea5SJeff Kirsher 390874aeea5SJeff Kirsher if (num_tc > net_dev->num_tc) { 391874aeea5SJeff Kirsher /* Initialise high-priority queues as necessary */ 392874aeea5SJeff Kirsher efx_for_each_channel(channel, efx) { 393874aeea5SJeff Kirsher efx_for_each_possible_channel_tx_queue(tx_queue, 394874aeea5SJeff Kirsher channel) { 395874aeea5SJeff Kirsher if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) 396874aeea5SJeff Kirsher continue; 397874aeea5SJeff Kirsher if (!tx_queue->buffer) { 398874aeea5SJeff Kirsher rc = efx_probe_tx_queue(tx_queue); 399874aeea5SJeff Kirsher if (rc) 400874aeea5SJeff Kirsher return rc; 401874aeea5SJeff Kirsher } 402874aeea5SJeff Kirsher if (!tx_queue->initialised) 403874aeea5SJeff Kirsher efx_init_tx_queue(tx_queue); 404874aeea5SJeff Kirsher efx_init_tx_queue_core_txq(tx_queue); 405874aeea5SJeff Kirsher } 406874aeea5SJeff Kirsher } 407874aeea5SJeff Kirsher } else { 408874aeea5SJeff Kirsher /* Reduce number of classes before number of queues */ 409874aeea5SJeff Kirsher net_dev->num_tc = num_tc; 410874aeea5SJeff Kirsher } 411874aeea5SJeff Kirsher 412874aeea5SJeff Kirsher rc = netif_set_real_num_tx_queues(net_dev, 413874aeea5SJeff Kirsher max_t(int, num_tc, 1) * 414874aeea5SJeff Kirsher efx->n_tx_channels); 415874aeea5SJeff Kirsher if (rc) 416874aeea5SJeff Kirsher return rc; 417874aeea5SJeff Kirsher 418874aeea5SJeff Kirsher /* Do not destroy high-priority queues when they become 419874aeea5SJeff Kirsher * unused. We would have to flush them first, and it is 420874aeea5SJeff Kirsher * fairly difficult to flush a subset of TX queues. Leave 421874aeea5SJeff Kirsher * it to efx_fini_channels(). 422874aeea5SJeff Kirsher */ 423874aeea5SJeff Kirsher 424874aeea5SJeff Kirsher net_dev->num_tc = num_tc; 425874aeea5SJeff Kirsher return 0; 426874aeea5SJeff Kirsher } 427874aeea5SJeff Kirsher 428874aeea5SJeff Kirsher void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 429874aeea5SJeff Kirsher { 430874aeea5SJeff Kirsher unsigned fill_level; 431874aeea5SJeff Kirsher struct efx_nic *efx = tx_queue->efx; 43214bf718fSBen Hutchings struct efx_tx_queue *txq2; 433c3940999STom Herbert unsigned int pkts_compl = 0, bytes_compl = 0; 434874aeea5SJeff Kirsher 435874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); 436874aeea5SJeff Kirsher 437c3940999STom Herbert efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 438c3940999STom Herbert netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); 439874aeea5SJeff Kirsher 44002e12165SBen Hutchings if (pkts_compl > 1) 44102e12165SBen Hutchings ++tx_queue->merge_events; 44202e12165SBen Hutchings 44314bf718fSBen Hutchings /* See if we need to restart the netif queue. This memory 44414bf718fSBen Hutchings * barrier ensures that we write read_count (inside 44514bf718fSBen Hutchings * efx_dequeue_buffers()) before reading the queue status. 44614bf718fSBen Hutchings */ 447874aeea5SJeff Kirsher smp_mb(); 448874aeea5SJeff Kirsher if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 449874aeea5SJeff Kirsher likely(efx->port_enabled) && 450874aeea5SJeff Kirsher likely(netif_device_present(efx->net_dev))) { 45114bf718fSBen Hutchings txq2 = efx_tx_queue_partner(tx_queue); 45214bf718fSBen Hutchings fill_level = max(tx_queue->insert_count - tx_queue->read_count, 45314bf718fSBen Hutchings txq2->insert_count - txq2->read_count); 45414bf718fSBen Hutchings if (fill_level <= efx->txq_wake_thresh) 455874aeea5SJeff Kirsher netif_tx_wake_queue(tx_queue->core_txq); 456874aeea5SJeff Kirsher } 457874aeea5SJeff Kirsher 458874aeea5SJeff Kirsher /* Check whether the hardware queue is now empty */ 459874aeea5SJeff Kirsher if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { 460874aeea5SJeff Kirsher tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); 461874aeea5SJeff Kirsher if (tx_queue->read_count == tx_queue->old_write_count) { 462874aeea5SJeff Kirsher smp_mb(); 463874aeea5SJeff Kirsher tx_queue->empty_read_count = 464874aeea5SJeff Kirsher tx_queue->read_count | EFX_EMPTY_COUNT_VALID; 465874aeea5SJeff Kirsher } 466874aeea5SJeff Kirsher } 467874aeea5SJeff Kirsher } 468874aeea5SJeff Kirsher 469f7251a9cSBen Hutchings /* Size of page-based TSO header buffers. Larger blocks must be 470f7251a9cSBen Hutchings * allocated from the heap. 471f7251a9cSBen Hutchings */ 472f7251a9cSBen Hutchings #define TSOH_STD_SIZE 128 473f7251a9cSBen Hutchings #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) 474f7251a9cSBen Hutchings 475f7251a9cSBen Hutchings /* At most half the descriptors in the queue at any time will refer to 476f7251a9cSBen Hutchings * a TSO header buffer, since they must always be followed by a 477f7251a9cSBen Hutchings * payload descriptor referring to an skb. 478f7251a9cSBen Hutchings */ 479f7251a9cSBen Hutchings static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue) 480f7251a9cSBen Hutchings { 481f7251a9cSBen Hutchings return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE); 482f7251a9cSBen Hutchings } 483f7251a9cSBen Hutchings 484874aeea5SJeff Kirsher int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) 485874aeea5SJeff Kirsher { 486874aeea5SJeff Kirsher struct efx_nic *efx = tx_queue->efx; 487874aeea5SJeff Kirsher unsigned int entries; 4887668ff9cSBen Hutchings int rc; 489874aeea5SJeff Kirsher 490874aeea5SJeff Kirsher /* Create the smallest power-of-two aligned ring */ 491874aeea5SJeff Kirsher entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); 492874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); 493874aeea5SJeff Kirsher tx_queue->ptr_mask = entries - 1; 494874aeea5SJeff Kirsher 495874aeea5SJeff Kirsher netif_dbg(efx, probe, efx->net_dev, 496874aeea5SJeff Kirsher "creating TX queue %d size %#x mask %#x\n", 497874aeea5SJeff Kirsher tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); 498874aeea5SJeff Kirsher 499874aeea5SJeff Kirsher /* Allocate software ring */ 500c2e4e25aSThomas Meyer tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), 501874aeea5SJeff Kirsher GFP_KERNEL); 502874aeea5SJeff Kirsher if (!tx_queue->buffer) 503874aeea5SJeff Kirsher return -ENOMEM; 504874aeea5SJeff Kirsher 505f7251a9cSBen Hutchings if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) { 506f7251a9cSBen Hutchings tx_queue->tsoh_page = 507f7251a9cSBen Hutchings kcalloc(efx_tsoh_page_count(tx_queue), 508f7251a9cSBen Hutchings sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL); 509f7251a9cSBen Hutchings if (!tx_queue->tsoh_page) { 510f7251a9cSBen Hutchings rc = -ENOMEM; 511f7251a9cSBen Hutchings goto fail1; 512f7251a9cSBen Hutchings } 513f7251a9cSBen Hutchings } 514f7251a9cSBen Hutchings 515874aeea5SJeff Kirsher /* Allocate hardware ring */ 516874aeea5SJeff Kirsher rc = efx_nic_probe_tx(tx_queue); 517874aeea5SJeff Kirsher if (rc) 518f7251a9cSBen Hutchings goto fail2; 519874aeea5SJeff Kirsher 520874aeea5SJeff Kirsher return 0; 521874aeea5SJeff Kirsher 522f7251a9cSBen Hutchings fail2: 523f7251a9cSBen Hutchings kfree(tx_queue->tsoh_page); 524f7251a9cSBen Hutchings tx_queue->tsoh_page = NULL; 525f7251a9cSBen Hutchings fail1: 526874aeea5SJeff Kirsher kfree(tx_queue->buffer); 527874aeea5SJeff Kirsher tx_queue->buffer = NULL; 528874aeea5SJeff Kirsher return rc; 529874aeea5SJeff Kirsher } 530874aeea5SJeff Kirsher 531874aeea5SJeff Kirsher void efx_init_tx_queue(struct efx_tx_queue *tx_queue) 532874aeea5SJeff Kirsher { 533874aeea5SJeff Kirsher netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 534874aeea5SJeff Kirsher "initialising TX queue %d\n", tx_queue->queue); 535874aeea5SJeff Kirsher 536874aeea5SJeff Kirsher tx_queue->insert_count = 0; 537874aeea5SJeff Kirsher tx_queue->write_count = 0; 538874aeea5SJeff Kirsher tx_queue->old_write_count = 0; 539874aeea5SJeff Kirsher tx_queue->read_count = 0; 540874aeea5SJeff Kirsher tx_queue->old_read_count = 0; 541874aeea5SJeff Kirsher tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; 542874aeea5SJeff Kirsher 543874aeea5SJeff Kirsher /* Set up TX descriptor ring */ 544874aeea5SJeff Kirsher efx_nic_init_tx(tx_queue); 545874aeea5SJeff Kirsher 546874aeea5SJeff Kirsher tx_queue->initialised = true; 547874aeea5SJeff Kirsher } 548874aeea5SJeff Kirsher 549e42c3d85SBen Hutchings void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 550874aeea5SJeff Kirsher { 551874aeea5SJeff Kirsher struct efx_tx_buffer *buffer; 552874aeea5SJeff Kirsher 553e42c3d85SBen Hutchings netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 554e42c3d85SBen Hutchings "shutting down TX queue %d\n", tx_queue->queue); 555e42c3d85SBen Hutchings 556874aeea5SJeff Kirsher if (!tx_queue->buffer) 557874aeea5SJeff Kirsher return; 558874aeea5SJeff Kirsher 559874aeea5SJeff Kirsher /* Free any buffers left in the ring */ 560874aeea5SJeff Kirsher while (tx_queue->read_count != tx_queue->write_count) { 561c3940999STom Herbert unsigned int pkts_compl = 0, bytes_compl = 0; 562874aeea5SJeff Kirsher buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; 563c3940999STom Herbert efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 564874aeea5SJeff Kirsher 565874aeea5SJeff Kirsher ++tx_queue->read_count; 566874aeea5SJeff Kirsher } 567c3940999STom Herbert netdev_tx_reset_queue(tx_queue->core_txq); 568874aeea5SJeff Kirsher } 569874aeea5SJeff Kirsher 570874aeea5SJeff Kirsher void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 571874aeea5SJeff Kirsher { 572f7251a9cSBen Hutchings int i; 573f7251a9cSBen Hutchings 574874aeea5SJeff Kirsher if (!tx_queue->buffer) 575874aeea5SJeff Kirsher return; 576874aeea5SJeff Kirsher 577874aeea5SJeff Kirsher netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 578874aeea5SJeff Kirsher "destroying TX queue %d\n", tx_queue->queue); 579874aeea5SJeff Kirsher efx_nic_remove_tx(tx_queue); 580874aeea5SJeff Kirsher 581f7251a9cSBen Hutchings if (tx_queue->tsoh_page) { 582f7251a9cSBen Hutchings for (i = 0; i < efx_tsoh_page_count(tx_queue); i++) 583f7251a9cSBen Hutchings efx_nic_free_buffer(tx_queue->efx, 584f7251a9cSBen Hutchings &tx_queue->tsoh_page[i]); 585f7251a9cSBen Hutchings kfree(tx_queue->tsoh_page); 586f7251a9cSBen Hutchings tx_queue->tsoh_page = NULL; 587f7251a9cSBen Hutchings } 588f7251a9cSBen Hutchings 589874aeea5SJeff Kirsher kfree(tx_queue->buffer); 590874aeea5SJeff Kirsher tx_queue->buffer = NULL; 591874aeea5SJeff Kirsher } 592874aeea5SJeff Kirsher 593874aeea5SJeff Kirsher 594874aeea5SJeff Kirsher /* Efx TCP segmentation acceleration. 595874aeea5SJeff Kirsher * 596874aeea5SJeff Kirsher * Why? Because by doing it here in the driver we can go significantly 597874aeea5SJeff Kirsher * faster than the GSO. 598874aeea5SJeff Kirsher * 599874aeea5SJeff Kirsher * Requires TX checksum offload support. 600874aeea5SJeff Kirsher */ 601874aeea5SJeff Kirsher 602874aeea5SJeff Kirsher /* Number of bytes inserted at the start of a TSO header buffer, 603874aeea5SJeff Kirsher * similar to NET_IP_ALIGN. 604874aeea5SJeff Kirsher */ 605874aeea5SJeff Kirsher #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 606874aeea5SJeff Kirsher #define TSOH_OFFSET 0 607874aeea5SJeff Kirsher #else 608874aeea5SJeff Kirsher #define TSOH_OFFSET NET_IP_ALIGN 609874aeea5SJeff Kirsher #endif 610874aeea5SJeff Kirsher 611874aeea5SJeff Kirsher #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) 612874aeea5SJeff Kirsher 613874aeea5SJeff Kirsher /** 614874aeea5SJeff Kirsher * struct tso_state - TSO state for an SKB 615874aeea5SJeff Kirsher * @out_len: Remaining length in current segment 616874aeea5SJeff Kirsher * @seqnum: Current sequence number 617874aeea5SJeff Kirsher * @ipv4_id: Current IPv4 ID, host endian 618874aeea5SJeff Kirsher * @packet_space: Remaining space in current packet 619874aeea5SJeff Kirsher * @dma_addr: DMA address of current position 620874aeea5SJeff Kirsher * @in_len: Remaining length in current SKB fragment 621874aeea5SJeff Kirsher * @unmap_len: Length of SKB fragment 622874aeea5SJeff Kirsher * @unmap_addr: DMA address of SKB fragment 6237668ff9cSBen Hutchings * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0 624874aeea5SJeff Kirsher * @protocol: Network protocol (after any VLAN header) 6259714284fSBen Hutchings * @ip_off: Offset of IP header 6269714284fSBen Hutchings * @tcp_off: Offset of TCP header 627874aeea5SJeff Kirsher * @header_len: Number of bytes of header 62853cb13c6SBen Hutchings * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload 629874aeea5SJeff Kirsher * 630874aeea5SJeff Kirsher * The state used during segmentation. It is put into this data structure 631874aeea5SJeff Kirsher * just to make it easy to pass into inline functions. 632874aeea5SJeff Kirsher */ 633874aeea5SJeff Kirsher struct tso_state { 634874aeea5SJeff Kirsher /* Output position */ 635874aeea5SJeff Kirsher unsigned out_len; 636874aeea5SJeff Kirsher unsigned seqnum; 637874aeea5SJeff Kirsher unsigned ipv4_id; 638874aeea5SJeff Kirsher unsigned packet_space; 639874aeea5SJeff Kirsher 640874aeea5SJeff Kirsher /* Input position */ 641874aeea5SJeff Kirsher dma_addr_t dma_addr; 642874aeea5SJeff Kirsher unsigned in_len; 643874aeea5SJeff Kirsher unsigned unmap_len; 644874aeea5SJeff Kirsher dma_addr_t unmap_addr; 6457668ff9cSBen Hutchings unsigned short dma_flags; 646874aeea5SJeff Kirsher 647874aeea5SJeff Kirsher __be16 protocol; 6489714284fSBen Hutchings unsigned int ip_off; 6499714284fSBen Hutchings unsigned int tcp_off; 650874aeea5SJeff Kirsher unsigned header_len; 65153cb13c6SBen Hutchings unsigned int ip_base_len; 652874aeea5SJeff Kirsher }; 653874aeea5SJeff Kirsher 654874aeea5SJeff Kirsher 655874aeea5SJeff Kirsher /* 656874aeea5SJeff Kirsher * Verify that our various assumptions about sk_buffs and the conditions 657874aeea5SJeff Kirsher * under which TSO will be attempted hold true. Return the protocol number. 658874aeea5SJeff Kirsher */ 659874aeea5SJeff Kirsher static __be16 efx_tso_check_protocol(struct sk_buff *skb) 660874aeea5SJeff Kirsher { 661874aeea5SJeff Kirsher __be16 protocol = skb->protocol; 662874aeea5SJeff Kirsher 663874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != 664874aeea5SJeff Kirsher protocol); 665874aeea5SJeff Kirsher if (protocol == htons(ETH_P_8021Q)) { 666874aeea5SJeff Kirsher struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 667874aeea5SJeff Kirsher protocol = veh->h_vlan_encapsulated_proto; 668874aeea5SJeff Kirsher } 669874aeea5SJeff Kirsher 670874aeea5SJeff Kirsher if (protocol == htons(ETH_P_IP)) { 671874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); 672874aeea5SJeff Kirsher } else { 673874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); 674874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); 675874aeea5SJeff Kirsher } 676874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) 677874aeea5SJeff Kirsher + (tcp_hdr(skb)->doff << 2u)) > 678874aeea5SJeff Kirsher skb_headlen(skb)); 679874aeea5SJeff Kirsher 680874aeea5SJeff Kirsher return protocol; 681874aeea5SJeff Kirsher } 682874aeea5SJeff Kirsher 683f7251a9cSBen Hutchings static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, 684f7251a9cSBen Hutchings struct efx_tx_buffer *buffer, unsigned int len) 685874aeea5SJeff Kirsher { 686f7251a9cSBen Hutchings u8 *result; 687874aeea5SJeff Kirsher 688f7251a9cSBen Hutchings EFX_BUG_ON_PARANOID(buffer->len); 689f7251a9cSBen Hutchings EFX_BUG_ON_PARANOID(buffer->flags); 690f7251a9cSBen Hutchings EFX_BUG_ON_PARANOID(buffer->unmap_len); 691874aeea5SJeff Kirsher 692f7251a9cSBen Hutchings if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) { 693f7251a9cSBen Hutchings unsigned index = 694f7251a9cSBen Hutchings (tx_queue->insert_count & tx_queue->ptr_mask) / 2; 695f7251a9cSBen Hutchings struct efx_buffer *page_buf = 696f7251a9cSBen Hutchings &tx_queue->tsoh_page[index / TSOH_PER_PAGE]; 697f7251a9cSBen Hutchings unsigned offset = 698f7251a9cSBen Hutchings TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET; 699874aeea5SJeff Kirsher 700f7251a9cSBen Hutchings if (unlikely(!page_buf->addr) && 7010d19a540SBen Hutchings efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, 7020d19a540SBen Hutchings GFP_ATOMIC)) 703874aeea5SJeff Kirsher return NULL; 704874aeea5SJeff Kirsher 705f7251a9cSBen Hutchings result = (u8 *)page_buf->addr + offset; 706f7251a9cSBen Hutchings buffer->dma_addr = page_buf->dma_addr + offset; 707f7251a9cSBen Hutchings buffer->flags = EFX_TX_BUF_CONT; 708f7251a9cSBen Hutchings } else { 709f7251a9cSBen Hutchings tx_queue->tso_long_headers++; 710f7251a9cSBen Hutchings 711f7251a9cSBen Hutchings buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC); 712f7251a9cSBen Hutchings if (unlikely(!buffer->heap_buf)) 713874aeea5SJeff Kirsher return NULL; 714f7251a9cSBen Hutchings result = (u8 *)buffer->heap_buf + TSOH_OFFSET; 715f7251a9cSBen Hutchings buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; 716874aeea5SJeff Kirsher } 717874aeea5SJeff Kirsher 718f7251a9cSBen Hutchings buffer->len = len; 719874aeea5SJeff Kirsher 720f7251a9cSBen Hutchings return result; 721874aeea5SJeff Kirsher } 722874aeea5SJeff Kirsher 723874aeea5SJeff Kirsher /** 724874aeea5SJeff Kirsher * efx_tx_queue_insert - push descriptors onto the TX queue 725874aeea5SJeff Kirsher * @tx_queue: Efx TX queue 726874aeea5SJeff Kirsher * @dma_addr: DMA address of fragment 727874aeea5SJeff Kirsher * @len: Length of fragment 728874aeea5SJeff Kirsher * @final_buffer: The final buffer inserted into the queue 729874aeea5SJeff Kirsher * 73014bf718fSBen Hutchings * Push descriptors onto the TX queue. 731874aeea5SJeff Kirsher */ 73214bf718fSBen Hutchings static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, 733874aeea5SJeff Kirsher dma_addr_t dma_addr, unsigned len, 734874aeea5SJeff Kirsher struct efx_tx_buffer **final_buffer) 735874aeea5SJeff Kirsher { 736874aeea5SJeff Kirsher struct efx_tx_buffer *buffer; 737874aeea5SJeff Kirsher struct efx_nic *efx = tx_queue->efx; 73814bf718fSBen Hutchings unsigned dma_len, insert_ptr; 739874aeea5SJeff Kirsher 740874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(len <= 0); 741874aeea5SJeff Kirsher 742874aeea5SJeff Kirsher while (1) { 743874aeea5SJeff Kirsher insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 744874aeea5SJeff Kirsher buffer = &tx_queue->buffer[insert_ptr]; 745874aeea5SJeff Kirsher ++tx_queue->insert_count; 746874aeea5SJeff Kirsher 747874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(tx_queue->insert_count - 748874aeea5SJeff Kirsher tx_queue->read_count >= 749874aeea5SJeff Kirsher efx->txq_entries); 750874aeea5SJeff Kirsher 751874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(buffer->len); 752874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(buffer->unmap_len); 7537668ff9cSBen Hutchings EFX_BUG_ON_PARANOID(buffer->flags); 754874aeea5SJeff Kirsher 755874aeea5SJeff Kirsher buffer->dma_addr = dma_addr; 756874aeea5SJeff Kirsher 757874aeea5SJeff Kirsher dma_len = efx_max_tx_len(efx, dma_addr); 758874aeea5SJeff Kirsher 759874aeea5SJeff Kirsher /* If there is enough space to send then do so */ 760874aeea5SJeff Kirsher if (dma_len >= len) 761874aeea5SJeff Kirsher break; 762874aeea5SJeff Kirsher 7637668ff9cSBen Hutchings buffer->len = dma_len; 7647668ff9cSBen Hutchings buffer->flags = EFX_TX_BUF_CONT; 765874aeea5SJeff Kirsher dma_addr += dma_len; 766874aeea5SJeff Kirsher len -= dma_len; 767874aeea5SJeff Kirsher } 768874aeea5SJeff Kirsher 769874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(!len); 770874aeea5SJeff Kirsher buffer->len = len; 771874aeea5SJeff Kirsher *final_buffer = buffer; 772874aeea5SJeff Kirsher } 773874aeea5SJeff Kirsher 774874aeea5SJeff Kirsher 775874aeea5SJeff Kirsher /* 776874aeea5SJeff Kirsher * Put a TSO header into the TX queue. 777874aeea5SJeff Kirsher * 778874aeea5SJeff Kirsher * This is special-cased because we know that it is small enough to fit in 779874aeea5SJeff Kirsher * a single fragment, and we know it doesn't cross a page boundary. It 780874aeea5SJeff Kirsher * also allows us to not worry about end-of-packet etc. 781874aeea5SJeff Kirsher */ 782f7251a9cSBen Hutchings static int efx_tso_put_header(struct efx_tx_queue *tx_queue, 783f7251a9cSBen Hutchings struct efx_tx_buffer *buffer, u8 *header) 784874aeea5SJeff Kirsher { 785f7251a9cSBen Hutchings if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) { 786f7251a9cSBen Hutchings buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, 787f7251a9cSBen Hutchings header, buffer->len, 788f7251a9cSBen Hutchings DMA_TO_DEVICE); 789f7251a9cSBen Hutchings if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, 790f7251a9cSBen Hutchings buffer->dma_addr))) { 791f7251a9cSBen Hutchings kfree(buffer->heap_buf); 792f7251a9cSBen Hutchings buffer->len = 0; 793f7251a9cSBen Hutchings buffer->flags = 0; 794f7251a9cSBen Hutchings return -ENOMEM; 795f7251a9cSBen Hutchings } 796f7251a9cSBen Hutchings buffer->unmap_len = buffer->len; 797f7251a9cSBen Hutchings buffer->flags |= EFX_TX_BUF_MAP_SINGLE; 798f7251a9cSBen Hutchings } 799874aeea5SJeff Kirsher 800874aeea5SJeff Kirsher ++tx_queue->insert_count; 801f7251a9cSBen Hutchings return 0; 802874aeea5SJeff Kirsher } 803874aeea5SJeff Kirsher 804874aeea5SJeff Kirsher 805f7251a9cSBen Hutchings /* Remove buffers put into a tx_queue. None of the buffers must have 806f7251a9cSBen Hutchings * an skb attached. 807f7251a9cSBen Hutchings */ 808874aeea5SJeff Kirsher static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) 809874aeea5SJeff Kirsher { 810874aeea5SJeff Kirsher struct efx_tx_buffer *buffer; 811874aeea5SJeff Kirsher 812874aeea5SJeff Kirsher /* Work backwards until we hit the original insert pointer value */ 813874aeea5SJeff Kirsher while (tx_queue->insert_count != tx_queue->write_count) { 814874aeea5SJeff Kirsher --tx_queue->insert_count; 815874aeea5SJeff Kirsher buffer = &tx_queue->buffer[tx_queue->insert_count & 816874aeea5SJeff Kirsher tx_queue->ptr_mask]; 817f7251a9cSBen Hutchings efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); 818874aeea5SJeff Kirsher } 819874aeea5SJeff Kirsher } 820874aeea5SJeff Kirsher 821874aeea5SJeff Kirsher 822874aeea5SJeff Kirsher /* Parse the SKB header and initialise state. */ 823874aeea5SJeff Kirsher static void tso_start(struct tso_state *st, const struct sk_buff *skb) 824874aeea5SJeff Kirsher { 8259714284fSBen Hutchings st->ip_off = skb_network_header(skb) - skb->data; 8269714284fSBen Hutchings st->tcp_off = skb_transport_header(skb) - skb->data; 8279714284fSBen Hutchings st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); 82853cb13c6SBen Hutchings if (st->protocol == htons(ETH_P_IP)) { 8299714284fSBen Hutchings st->ip_base_len = st->header_len - st->ip_off; 830874aeea5SJeff Kirsher st->ipv4_id = ntohs(ip_hdr(skb)->id); 83153cb13c6SBen Hutchings } else { 8329714284fSBen Hutchings st->ip_base_len = st->header_len - st->tcp_off; 833874aeea5SJeff Kirsher st->ipv4_id = 0; 83453cb13c6SBen Hutchings } 835874aeea5SJeff Kirsher st->seqnum = ntohl(tcp_hdr(skb)->seq); 836874aeea5SJeff Kirsher 837874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); 838874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); 839874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); 840874aeea5SJeff Kirsher 841874aeea5SJeff Kirsher st->out_len = skb->len - st->header_len; 842874aeea5SJeff Kirsher st->unmap_len = 0; 8437668ff9cSBen Hutchings st->dma_flags = 0; 844874aeea5SJeff Kirsher } 845874aeea5SJeff Kirsher 846874aeea5SJeff Kirsher static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 847874aeea5SJeff Kirsher skb_frag_t *frag) 848874aeea5SJeff Kirsher { 8494a22c4c9SIan Campbell st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, 8509e903e08SEric Dumazet skb_frag_size(frag), DMA_TO_DEVICE); 8515d6bcdfeSIan Campbell if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { 8527668ff9cSBen Hutchings st->dma_flags = 0; 8539e903e08SEric Dumazet st->unmap_len = skb_frag_size(frag); 8549e903e08SEric Dumazet st->in_len = skb_frag_size(frag); 855874aeea5SJeff Kirsher st->dma_addr = st->unmap_addr; 856874aeea5SJeff Kirsher return 0; 857874aeea5SJeff Kirsher } 858874aeea5SJeff Kirsher return -ENOMEM; 859874aeea5SJeff Kirsher } 860874aeea5SJeff Kirsher 861874aeea5SJeff Kirsher static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, 862874aeea5SJeff Kirsher const struct sk_buff *skb) 863874aeea5SJeff Kirsher { 864874aeea5SJeff Kirsher int hl = st->header_len; 865874aeea5SJeff Kirsher int len = skb_headlen(skb) - hl; 866874aeea5SJeff Kirsher 8670e33d870SBen Hutchings st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl, 8680e33d870SBen Hutchings len, DMA_TO_DEVICE); 8690e33d870SBen Hutchings if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { 8707668ff9cSBen Hutchings st->dma_flags = EFX_TX_BUF_MAP_SINGLE; 871874aeea5SJeff Kirsher st->unmap_len = len; 872874aeea5SJeff Kirsher st->in_len = len; 873874aeea5SJeff Kirsher st->dma_addr = st->unmap_addr; 874874aeea5SJeff Kirsher return 0; 875874aeea5SJeff Kirsher } 876874aeea5SJeff Kirsher return -ENOMEM; 877874aeea5SJeff Kirsher } 878874aeea5SJeff Kirsher 879874aeea5SJeff Kirsher 880874aeea5SJeff Kirsher /** 881874aeea5SJeff Kirsher * tso_fill_packet_with_fragment - form descriptors for the current fragment 882874aeea5SJeff Kirsher * @tx_queue: Efx TX queue 883874aeea5SJeff Kirsher * @skb: Socket buffer 884874aeea5SJeff Kirsher * @st: TSO state 885874aeea5SJeff Kirsher * 886874aeea5SJeff Kirsher * Form descriptors for the current fragment, until we reach the end 88714bf718fSBen Hutchings * of fragment or end-of-packet. 888874aeea5SJeff Kirsher */ 88914bf718fSBen Hutchings static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, 890874aeea5SJeff Kirsher const struct sk_buff *skb, 891874aeea5SJeff Kirsher struct tso_state *st) 892874aeea5SJeff Kirsher { 893874aeea5SJeff Kirsher struct efx_tx_buffer *buffer; 89414bf718fSBen Hutchings int n; 895874aeea5SJeff Kirsher 896874aeea5SJeff Kirsher if (st->in_len == 0) 89714bf718fSBen Hutchings return; 898874aeea5SJeff Kirsher if (st->packet_space == 0) 89914bf718fSBen Hutchings return; 900874aeea5SJeff Kirsher 901874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(st->in_len <= 0); 902874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(st->packet_space <= 0); 903874aeea5SJeff Kirsher 904874aeea5SJeff Kirsher n = min(st->in_len, st->packet_space); 905874aeea5SJeff Kirsher 906874aeea5SJeff Kirsher st->packet_space -= n; 907874aeea5SJeff Kirsher st->out_len -= n; 908874aeea5SJeff Kirsher st->in_len -= n; 909874aeea5SJeff Kirsher 91014bf718fSBen Hutchings efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); 91114bf718fSBen Hutchings 9127668ff9cSBen Hutchings if (st->out_len == 0) { 913874aeea5SJeff Kirsher /* Transfer ownership of the skb */ 914874aeea5SJeff Kirsher buffer->skb = skb; 9157668ff9cSBen Hutchings buffer->flags = EFX_TX_BUF_SKB; 9167668ff9cSBen Hutchings } else if (st->packet_space != 0) { 9177668ff9cSBen Hutchings buffer->flags = EFX_TX_BUF_CONT; 9187668ff9cSBen Hutchings } 919874aeea5SJeff Kirsher 920874aeea5SJeff Kirsher if (st->in_len == 0) { 9210e33d870SBen Hutchings /* Transfer ownership of the DMA mapping */ 922874aeea5SJeff Kirsher buffer->unmap_len = st->unmap_len; 9237668ff9cSBen Hutchings buffer->flags |= st->dma_flags; 924874aeea5SJeff Kirsher st->unmap_len = 0; 925874aeea5SJeff Kirsher } 926874aeea5SJeff Kirsher 927874aeea5SJeff Kirsher st->dma_addr += n; 928874aeea5SJeff Kirsher } 929874aeea5SJeff Kirsher 930874aeea5SJeff Kirsher 931874aeea5SJeff Kirsher /** 932874aeea5SJeff Kirsher * tso_start_new_packet - generate a new header and prepare for the new packet 933874aeea5SJeff Kirsher * @tx_queue: Efx TX queue 934874aeea5SJeff Kirsher * @skb: Socket buffer 935874aeea5SJeff Kirsher * @st: TSO state 936874aeea5SJeff Kirsher * 937874aeea5SJeff Kirsher * Generate a new header and prepare for the new packet. Return 0 on 938f7251a9cSBen Hutchings * success, or -%ENOMEM if failed to alloc header. 939874aeea5SJeff Kirsher */ 940874aeea5SJeff Kirsher static int tso_start_new_packet(struct efx_tx_queue *tx_queue, 941874aeea5SJeff Kirsher const struct sk_buff *skb, 942874aeea5SJeff Kirsher struct tso_state *st) 943874aeea5SJeff Kirsher { 944f7251a9cSBen Hutchings struct efx_tx_buffer *buffer = 945f7251a9cSBen Hutchings &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; 946874aeea5SJeff Kirsher struct tcphdr *tsoh_th; 947874aeea5SJeff Kirsher unsigned ip_length; 948874aeea5SJeff Kirsher u8 *header; 949f7251a9cSBen Hutchings int rc; 950874aeea5SJeff Kirsher 951f7251a9cSBen Hutchings /* Allocate and insert a DMA-mapped header buffer. */ 952f7251a9cSBen Hutchings header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len); 953f7251a9cSBen Hutchings if (!header) 954f7251a9cSBen Hutchings return -ENOMEM; 955874aeea5SJeff Kirsher 9569714284fSBen Hutchings tsoh_th = (struct tcphdr *)(header + st->tcp_off); 957874aeea5SJeff Kirsher 958874aeea5SJeff Kirsher /* Copy and update the headers. */ 959874aeea5SJeff Kirsher memcpy(header, skb->data, st->header_len); 960874aeea5SJeff Kirsher 961874aeea5SJeff Kirsher tsoh_th->seq = htonl(st->seqnum); 962874aeea5SJeff Kirsher st->seqnum += skb_shinfo(skb)->gso_size; 963874aeea5SJeff Kirsher if (st->out_len > skb_shinfo(skb)->gso_size) { 964874aeea5SJeff Kirsher /* This packet will not finish the TSO burst. */ 96553cb13c6SBen Hutchings st->packet_space = skb_shinfo(skb)->gso_size; 966874aeea5SJeff Kirsher tsoh_th->fin = 0; 967874aeea5SJeff Kirsher tsoh_th->psh = 0; 968874aeea5SJeff Kirsher } else { 969874aeea5SJeff Kirsher /* This packet will be the last in the TSO burst. */ 97053cb13c6SBen Hutchings st->packet_space = st->out_len; 971874aeea5SJeff Kirsher tsoh_th->fin = tcp_hdr(skb)->fin; 972874aeea5SJeff Kirsher tsoh_th->psh = tcp_hdr(skb)->psh; 973874aeea5SJeff Kirsher } 97453cb13c6SBen Hutchings ip_length = st->ip_base_len + st->packet_space; 975874aeea5SJeff Kirsher 976874aeea5SJeff Kirsher if (st->protocol == htons(ETH_P_IP)) { 9779714284fSBen Hutchings struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off); 978874aeea5SJeff Kirsher 979874aeea5SJeff Kirsher tsoh_iph->tot_len = htons(ip_length); 980874aeea5SJeff Kirsher 981874aeea5SJeff Kirsher /* Linux leaves suitable gaps in the IP ID space for us to fill. */ 982874aeea5SJeff Kirsher tsoh_iph->id = htons(st->ipv4_id); 983874aeea5SJeff Kirsher st->ipv4_id++; 984874aeea5SJeff Kirsher } else { 985874aeea5SJeff Kirsher struct ipv6hdr *tsoh_iph = 9869714284fSBen Hutchings (struct ipv6hdr *)(header + st->ip_off); 987874aeea5SJeff Kirsher 98853cb13c6SBen Hutchings tsoh_iph->payload_len = htons(ip_length); 989874aeea5SJeff Kirsher } 990874aeea5SJeff Kirsher 991f7251a9cSBen Hutchings rc = efx_tso_put_header(tx_queue, buffer, header); 992f7251a9cSBen Hutchings if (unlikely(rc)) 993f7251a9cSBen Hutchings return rc; 994f7251a9cSBen Hutchings 995874aeea5SJeff Kirsher ++tx_queue->tso_packets; 996874aeea5SJeff Kirsher 997874aeea5SJeff Kirsher return 0; 998874aeea5SJeff Kirsher } 999874aeea5SJeff Kirsher 1000874aeea5SJeff Kirsher 1001874aeea5SJeff Kirsher /** 1002874aeea5SJeff Kirsher * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer 1003874aeea5SJeff Kirsher * @tx_queue: Efx TX queue 1004874aeea5SJeff Kirsher * @skb: Socket buffer 1005874aeea5SJeff Kirsher * 1006874aeea5SJeff Kirsher * Context: You must hold netif_tx_lock() to call this function. 1007874aeea5SJeff Kirsher * 1008874aeea5SJeff Kirsher * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if 1009874aeea5SJeff Kirsher * @skb was not enqueued. In all cases @skb is consumed. Return 101014bf718fSBen Hutchings * %NETDEV_TX_OK. 1011874aeea5SJeff Kirsher */ 1012874aeea5SJeff Kirsher static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 1013874aeea5SJeff Kirsher struct sk_buff *skb) 1014874aeea5SJeff Kirsher { 1015874aeea5SJeff Kirsher struct efx_nic *efx = tx_queue->efx; 101614bf718fSBen Hutchings int frag_i, rc; 1017874aeea5SJeff Kirsher struct tso_state state; 1018874aeea5SJeff Kirsher 1019874aeea5SJeff Kirsher /* Find the packet protocol and sanity-check it */ 1020874aeea5SJeff Kirsher state.protocol = efx_tso_check_protocol(skb); 1021874aeea5SJeff Kirsher 1022874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 1023874aeea5SJeff Kirsher 1024874aeea5SJeff Kirsher tso_start(&state, skb); 1025874aeea5SJeff Kirsher 1026874aeea5SJeff Kirsher /* Assume that skb header area contains exactly the headers, and 1027874aeea5SJeff Kirsher * all payload is in the frag list. 1028874aeea5SJeff Kirsher */ 1029874aeea5SJeff Kirsher if (skb_headlen(skb) == state.header_len) { 1030874aeea5SJeff Kirsher /* Grab the first payload fragment. */ 1031874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); 1032874aeea5SJeff Kirsher frag_i = 0; 1033874aeea5SJeff Kirsher rc = tso_get_fragment(&state, efx, 1034874aeea5SJeff Kirsher skb_shinfo(skb)->frags + frag_i); 1035874aeea5SJeff Kirsher if (rc) 1036874aeea5SJeff Kirsher goto mem_err; 1037874aeea5SJeff Kirsher } else { 1038874aeea5SJeff Kirsher rc = tso_get_head_fragment(&state, efx, skb); 1039874aeea5SJeff Kirsher if (rc) 1040874aeea5SJeff Kirsher goto mem_err; 1041874aeea5SJeff Kirsher frag_i = -1; 1042874aeea5SJeff Kirsher } 1043874aeea5SJeff Kirsher 1044874aeea5SJeff Kirsher if (tso_start_new_packet(tx_queue, skb, &state) < 0) 1045874aeea5SJeff Kirsher goto mem_err; 1046874aeea5SJeff Kirsher 1047874aeea5SJeff Kirsher while (1) { 104814bf718fSBen Hutchings tso_fill_packet_with_fragment(tx_queue, skb, &state); 1049874aeea5SJeff Kirsher 1050874aeea5SJeff Kirsher /* Move onto the next fragment? */ 1051874aeea5SJeff Kirsher if (state.in_len == 0) { 1052874aeea5SJeff Kirsher if (++frag_i >= skb_shinfo(skb)->nr_frags) 1053874aeea5SJeff Kirsher /* End of payload reached. */ 1054874aeea5SJeff Kirsher break; 1055874aeea5SJeff Kirsher rc = tso_get_fragment(&state, efx, 1056874aeea5SJeff Kirsher skb_shinfo(skb)->frags + frag_i); 1057874aeea5SJeff Kirsher if (rc) 1058874aeea5SJeff Kirsher goto mem_err; 1059874aeea5SJeff Kirsher } 1060874aeea5SJeff Kirsher 1061874aeea5SJeff Kirsher /* Start at new packet? */ 1062874aeea5SJeff Kirsher if (state.packet_space == 0 && 1063874aeea5SJeff Kirsher tso_start_new_packet(tx_queue, skb, &state) < 0) 1064874aeea5SJeff Kirsher goto mem_err; 1065874aeea5SJeff Kirsher } 1066874aeea5SJeff Kirsher 1067449fa023SEric Dumazet netdev_tx_sent_queue(tx_queue->core_txq, skb->len); 1068449fa023SEric Dumazet 1069874aeea5SJeff Kirsher /* Pass off to hardware */ 1070874aeea5SJeff Kirsher efx_nic_push_buffers(tx_queue); 1071874aeea5SJeff Kirsher 107214bf718fSBen Hutchings efx_tx_maybe_stop_queue(tx_queue); 107314bf718fSBen Hutchings 1074874aeea5SJeff Kirsher tx_queue->tso_bursts++; 1075874aeea5SJeff Kirsher return NETDEV_TX_OK; 1076874aeea5SJeff Kirsher 1077874aeea5SJeff Kirsher mem_err: 1078874aeea5SJeff Kirsher netif_err(efx, tx_err, efx->net_dev, 10790e33d870SBen Hutchings "Out of memory for TSO headers, or DMA mapping error\n"); 1080874aeea5SJeff Kirsher dev_kfree_skb_any(skb); 1081874aeea5SJeff Kirsher 1082874aeea5SJeff Kirsher /* Free the DMA mapping we were in the process of writing out */ 1083874aeea5SJeff Kirsher if (state.unmap_len) { 10847668ff9cSBen Hutchings if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE) 10850e33d870SBen Hutchings dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, 10860e33d870SBen Hutchings state.unmap_len, DMA_TO_DEVICE); 1087874aeea5SJeff Kirsher else 10880e33d870SBen Hutchings dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr, 10890e33d870SBen Hutchings state.unmap_len, DMA_TO_DEVICE); 1090874aeea5SJeff Kirsher } 1091874aeea5SJeff Kirsher 1092874aeea5SJeff Kirsher efx_enqueue_unwind(tx_queue); 109314bf718fSBen Hutchings return NETDEV_TX_OK; 1094874aeea5SJeff Kirsher } 1095