117d3b21cSAlex Maftei (amaftei) // SPDX-License-Identifier: GPL-2.0-only 217d3b21cSAlex Maftei (amaftei) /**************************************************************************** 317d3b21cSAlex Maftei (amaftei) * Driver for Solarflare network controllers and boards 417d3b21cSAlex Maftei (amaftei) * Copyright 2018 Solarflare Communications Inc. 517d3b21cSAlex Maftei (amaftei) * 617d3b21cSAlex Maftei (amaftei) * This program is free software; you can redistribute it and/or modify it 717d3b21cSAlex Maftei (amaftei) * under the terms of the GNU General Public License version 2 as published 817d3b21cSAlex Maftei (amaftei) * by the Free Software Foundation, incorporated herein by reference. 917d3b21cSAlex Maftei (amaftei) */ 1017d3b21cSAlex Maftei (amaftei) 1117d3b21cSAlex Maftei (amaftei) #include "net_driver.h" 1217d3b21cSAlex Maftei (amaftei) #include "efx.h" 1393841000SEdward Cree #include "nic_common.h" 1417d3b21cSAlex Maftei (amaftei) #include "tx_common.h" 1517d3b21cSAlex Maftei (amaftei) 1617d3b21cSAlex Maftei (amaftei) static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) 1717d3b21cSAlex Maftei (amaftei) { 1817d3b21cSAlex Maftei (amaftei) return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 1917d3b21cSAlex Maftei (amaftei) PAGE_SIZE >> EFX_TX_CB_ORDER); 2017d3b21cSAlex Maftei (amaftei) } 2117d3b21cSAlex Maftei (amaftei) 2217d3b21cSAlex Maftei (amaftei) int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) 2317d3b21cSAlex Maftei (amaftei) { 2417d3b21cSAlex Maftei (amaftei) struct efx_nic *efx = tx_queue->efx; 2517d3b21cSAlex Maftei (amaftei) unsigned int entries; 2617d3b21cSAlex Maftei (amaftei) int rc; 2717d3b21cSAlex Maftei (amaftei) 2817d3b21cSAlex Maftei (amaftei) /* Create the smallest power-of-two aligned ring */ 2917d3b21cSAlex Maftei (amaftei) entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); 3017d3b21cSAlex Maftei (amaftei) EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); 3117d3b21cSAlex Maftei (amaftei) tx_queue->ptr_mask = entries - 1; 3217d3b21cSAlex Maftei (amaftei) 3317d3b21cSAlex Maftei (amaftei) netif_dbg(efx, probe, efx->net_dev, 3417d3b21cSAlex Maftei (amaftei) "creating TX queue %d size %#x mask %#x\n", 3517d3b21cSAlex Maftei (amaftei) tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); 3617d3b21cSAlex Maftei (amaftei) 3717d3b21cSAlex Maftei (amaftei) /* Allocate software ring */ 3817d3b21cSAlex Maftei (amaftei) tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), 3917d3b21cSAlex Maftei (amaftei) GFP_KERNEL); 4017d3b21cSAlex Maftei (amaftei) if (!tx_queue->buffer) 4117d3b21cSAlex Maftei (amaftei) return -ENOMEM; 4217d3b21cSAlex Maftei (amaftei) 4317d3b21cSAlex Maftei (amaftei) tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), 4417d3b21cSAlex Maftei (amaftei) sizeof(tx_queue->cb_page[0]), GFP_KERNEL); 4517d3b21cSAlex Maftei (amaftei) if (!tx_queue->cb_page) { 4617d3b21cSAlex Maftei (amaftei) rc = -ENOMEM; 4717d3b21cSAlex Maftei (amaftei) goto fail1; 4817d3b21cSAlex Maftei (amaftei) } 4917d3b21cSAlex Maftei (amaftei) 5012804793SEdward Cree /* Allocate hardware ring, determine TXQ type */ 5117d3b21cSAlex Maftei (amaftei) rc = efx_nic_probe_tx(tx_queue); 5217d3b21cSAlex Maftei (amaftei) if (rc) 5317d3b21cSAlex Maftei (amaftei) goto fail2; 5417d3b21cSAlex Maftei (amaftei) 5512804793SEdward Cree tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue; 5617d3b21cSAlex Maftei (amaftei) return 0; 5717d3b21cSAlex Maftei (amaftei) 5817d3b21cSAlex Maftei (amaftei) fail2: 5917d3b21cSAlex Maftei (amaftei) kfree(tx_queue->cb_page); 6017d3b21cSAlex Maftei (amaftei) tx_queue->cb_page = NULL; 6117d3b21cSAlex Maftei (amaftei) fail1: 6217d3b21cSAlex Maftei (amaftei) kfree(tx_queue->buffer); 6317d3b21cSAlex Maftei (amaftei) tx_queue->buffer = NULL; 6417d3b21cSAlex Maftei (amaftei) return rc; 6517d3b21cSAlex Maftei (amaftei) } 6617d3b21cSAlex Maftei (amaftei) 6717d3b21cSAlex Maftei (amaftei) void efx_init_tx_queue(struct efx_tx_queue *tx_queue) 6817d3b21cSAlex Maftei (amaftei) { 6917d3b21cSAlex Maftei (amaftei) struct efx_nic *efx = tx_queue->efx; 7017d3b21cSAlex Maftei (amaftei) 7117d3b21cSAlex Maftei (amaftei) netif_dbg(efx, drv, efx->net_dev, 7217d3b21cSAlex Maftei (amaftei) "initialising TX queue %d\n", tx_queue->queue); 7317d3b21cSAlex Maftei (amaftei) 7417d3b21cSAlex Maftei (amaftei) tx_queue->insert_count = 0; 75d19a5372SEdward Cree tx_queue->notify_count = 0; 7617d3b21cSAlex Maftei (amaftei) tx_queue->write_count = 0; 7717d3b21cSAlex Maftei (amaftei) tx_queue->packet_write_count = 0; 7817d3b21cSAlex Maftei (amaftei) tx_queue->old_write_count = 0; 7917d3b21cSAlex Maftei (amaftei) tx_queue->read_count = 0; 8017d3b21cSAlex Maftei (amaftei) tx_queue->old_read_count = 0; 8117d3b21cSAlex Maftei (amaftei) tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; 821c0544d2SEdward Cree tx_queue->xmit_pending = false; 8317d3b21cSAlex Maftei (amaftei) tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) && 8417d3b21cSAlex Maftei (amaftei) tx_queue->channel == efx_ptp_channel(efx)); 8517d3b21cSAlex Maftei (amaftei) tx_queue->completed_timestamp_major = 0; 8617d3b21cSAlex Maftei (amaftei) tx_queue->completed_timestamp_minor = 0; 8717d3b21cSAlex Maftei (amaftei) 8817d3b21cSAlex Maftei (amaftei) tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel); 891679c72cSEdward Cree tx_queue->tso_version = 0; 9017d3b21cSAlex Maftei (amaftei) 9117d3b21cSAlex Maftei (amaftei) /* Set up TX descriptor ring */ 9217d3b21cSAlex Maftei (amaftei) efx_nic_init_tx(tx_queue); 9317d3b21cSAlex Maftei (amaftei) 9417d3b21cSAlex Maftei (amaftei) tx_queue->initialised = true; 9517d3b21cSAlex Maftei (amaftei) } 9617d3b21cSAlex Maftei (amaftei) 9717d3b21cSAlex Maftei (amaftei) void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 9817d3b21cSAlex Maftei (amaftei) { 9917d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *buffer; 10017d3b21cSAlex Maftei (amaftei) 10117d3b21cSAlex Maftei (amaftei) netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 10217d3b21cSAlex Maftei (amaftei) "shutting down TX queue %d\n", tx_queue->queue); 10317d3b21cSAlex Maftei (amaftei) 104fb5833d8STaehee Yoo tx_queue->initialised = false; 105fb5833d8STaehee Yoo 10617d3b21cSAlex Maftei (amaftei) if (!tx_queue->buffer) 10717d3b21cSAlex Maftei (amaftei) return; 10817d3b21cSAlex Maftei (amaftei) 10917d3b21cSAlex Maftei (amaftei) /* Free any buffers left in the ring */ 11017d3b21cSAlex Maftei (amaftei) while (tx_queue->read_count != tx_queue->write_count) { 11117d3b21cSAlex Maftei (amaftei) unsigned int pkts_compl = 0, bytes_compl = 0; 11202443ab8SEdward Cree unsigned int efv_pkts_compl = 0; 11317d3b21cSAlex Maftei (amaftei) 11417d3b21cSAlex Maftei (amaftei) buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; 11502443ab8SEdward Cree efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl, 11602443ab8SEdward Cree &efv_pkts_compl); 11717d3b21cSAlex Maftei (amaftei) 11817d3b21cSAlex Maftei (amaftei) ++tx_queue->read_count; 11917d3b21cSAlex Maftei (amaftei) } 1201c0544d2SEdward Cree tx_queue->xmit_pending = false; 12117d3b21cSAlex Maftei (amaftei) netdev_tx_reset_queue(tx_queue->core_txq); 12217d3b21cSAlex Maftei (amaftei) } 12317d3b21cSAlex Maftei (amaftei) 12417d3b21cSAlex Maftei (amaftei) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 12517d3b21cSAlex Maftei (amaftei) { 12617d3b21cSAlex Maftei (amaftei) int i; 12717d3b21cSAlex Maftei (amaftei) 12817d3b21cSAlex Maftei (amaftei) if (!tx_queue->buffer) 12917d3b21cSAlex Maftei (amaftei) return; 13017d3b21cSAlex Maftei (amaftei) 13117d3b21cSAlex Maftei (amaftei) netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 13217d3b21cSAlex Maftei (amaftei) "destroying TX queue %d\n", tx_queue->queue); 13317d3b21cSAlex Maftei (amaftei) efx_nic_remove_tx(tx_queue); 13417d3b21cSAlex Maftei (amaftei) 13517d3b21cSAlex Maftei (amaftei) if (tx_queue->cb_page) { 13617d3b21cSAlex Maftei (amaftei) for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++) 13717d3b21cSAlex Maftei (amaftei) efx_nic_free_buffer(tx_queue->efx, 13817d3b21cSAlex Maftei (amaftei) &tx_queue->cb_page[i]); 13917d3b21cSAlex Maftei (amaftei) kfree(tx_queue->cb_page); 14017d3b21cSAlex Maftei (amaftei) tx_queue->cb_page = NULL; 14117d3b21cSAlex Maftei (amaftei) } 14217d3b21cSAlex Maftei (amaftei) 14317d3b21cSAlex Maftei (amaftei) kfree(tx_queue->buffer); 14417d3b21cSAlex Maftei (amaftei) tx_queue->buffer = NULL; 14512804793SEdward Cree tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL; 14617d3b21cSAlex Maftei (amaftei) } 14717d3b21cSAlex Maftei (amaftei) 14817d3b21cSAlex Maftei (amaftei) void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 14917d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *buffer, 15017d3b21cSAlex Maftei (amaftei) unsigned int *pkts_compl, 15102443ab8SEdward Cree unsigned int *bytes_compl, 15202443ab8SEdward Cree unsigned int *efv_pkts_compl) 15317d3b21cSAlex Maftei (amaftei) { 15417d3b21cSAlex Maftei (amaftei) if (buffer->unmap_len) { 15517d3b21cSAlex Maftei (amaftei) struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 15617d3b21cSAlex Maftei (amaftei) dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; 15717d3b21cSAlex Maftei (amaftei) 15817d3b21cSAlex Maftei (amaftei) if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) 15917d3b21cSAlex Maftei (amaftei) dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, 16017d3b21cSAlex Maftei (amaftei) DMA_TO_DEVICE); 16117d3b21cSAlex Maftei (amaftei) else 16217d3b21cSAlex Maftei (amaftei) dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, 16317d3b21cSAlex Maftei (amaftei) DMA_TO_DEVICE); 16417d3b21cSAlex Maftei (amaftei) buffer->unmap_len = 0; 16517d3b21cSAlex Maftei (amaftei) } 16617d3b21cSAlex Maftei (amaftei) 16717d3b21cSAlex Maftei (amaftei) if (buffer->flags & EFX_TX_BUF_SKB) { 16817d3b21cSAlex Maftei (amaftei) struct sk_buff *skb = (struct sk_buff *)buffer->skb; 16917d3b21cSAlex Maftei (amaftei) 17002443ab8SEdward Cree if (unlikely(buffer->flags & EFX_TX_BUF_EFV)) { 17102443ab8SEdward Cree EFX_WARN_ON_PARANOID(!efv_pkts_compl); 17202443ab8SEdward Cree (*efv_pkts_compl)++; 17302443ab8SEdward Cree } else { 17417d3b21cSAlex Maftei (amaftei) EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); 17517d3b21cSAlex Maftei (amaftei) (*pkts_compl)++; 17617d3b21cSAlex Maftei (amaftei) (*bytes_compl) += skb->len; 17702443ab8SEdward Cree } 17802443ab8SEdward Cree 17917d3b21cSAlex Maftei (amaftei) if (tx_queue->timestamping && 18017d3b21cSAlex Maftei (amaftei) (tx_queue->completed_timestamp_major || 18117d3b21cSAlex Maftei (amaftei) tx_queue->completed_timestamp_minor)) { 18217d3b21cSAlex Maftei (amaftei) struct skb_shared_hwtstamps hwtstamp; 18317d3b21cSAlex Maftei (amaftei) 18417d3b21cSAlex Maftei (amaftei) hwtstamp.hwtstamp = 18517d3b21cSAlex Maftei (amaftei) efx_ptp_nic_to_kernel_time(tx_queue); 18617d3b21cSAlex Maftei (amaftei) skb_tstamp_tx(skb, &hwtstamp); 18717d3b21cSAlex Maftei (amaftei) 18817d3b21cSAlex Maftei (amaftei) tx_queue->completed_timestamp_major = 0; 18917d3b21cSAlex Maftei (amaftei) tx_queue->completed_timestamp_minor = 0; 19017d3b21cSAlex Maftei (amaftei) } 19117d3b21cSAlex Maftei (amaftei) dev_consume_skb_any((struct sk_buff *)buffer->skb); 19217d3b21cSAlex Maftei (amaftei) netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, 19317d3b21cSAlex Maftei (amaftei) "TX queue %d transmission id %x complete\n", 19417d3b21cSAlex Maftei (amaftei) tx_queue->queue, tx_queue->read_count); 19517d3b21cSAlex Maftei (amaftei) } else if (buffer->flags & EFX_TX_BUF_XDP) { 19617d3b21cSAlex Maftei (amaftei) xdp_return_frame_rx_napi(buffer->xdpf); 19717d3b21cSAlex Maftei (amaftei) } 19817d3b21cSAlex Maftei (amaftei) 19917d3b21cSAlex Maftei (amaftei) buffer->len = 0; 20017d3b21cSAlex Maftei (amaftei) buffer->flags = 0; 20117d3b21cSAlex Maftei (amaftei) } 20217d3b21cSAlex Maftei (amaftei) 203b8cd9499SAlex Maftei (amaftei) /* Remove packets from the TX queue 204b8cd9499SAlex Maftei (amaftei) * 205b8cd9499SAlex Maftei (amaftei) * This removes packets from the TX queue, up to and including the 206b8cd9499SAlex Maftei (amaftei) * specified index. 207b8cd9499SAlex Maftei (amaftei) */ 208b8cd9499SAlex Maftei (amaftei) static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, 209b8cd9499SAlex Maftei (amaftei) unsigned int index, 210b8cd9499SAlex Maftei (amaftei) unsigned int *pkts_compl, 21102443ab8SEdward Cree unsigned int *bytes_compl, 21202443ab8SEdward Cree unsigned int *efv_pkts_compl) 213b8cd9499SAlex Maftei (amaftei) { 214b8cd9499SAlex Maftei (amaftei) struct efx_nic *efx = tx_queue->efx; 215b8cd9499SAlex Maftei (amaftei) unsigned int stop_index, read_ptr; 216b8cd9499SAlex Maftei (amaftei) 217b8cd9499SAlex Maftei (amaftei) stop_index = (index + 1) & tx_queue->ptr_mask; 218b8cd9499SAlex Maftei (amaftei) read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 219b8cd9499SAlex Maftei (amaftei) 220b8cd9499SAlex Maftei (amaftei) while (read_ptr != stop_index) { 221b8cd9499SAlex Maftei (amaftei) struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 222b8cd9499SAlex Maftei (amaftei) 2233b4f06c7STom Zhao if (!efx_tx_buffer_in_use(buffer)) { 224b8cd9499SAlex Maftei (amaftei) netif_err(efx, tx_err, efx->net_dev, 2253b4f06c7STom Zhao "TX queue %d spurious TX completion id %d\n", 226b8cd9499SAlex Maftei (amaftei) tx_queue->queue, read_ptr); 227b8cd9499SAlex Maftei (amaftei) efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); 228b8cd9499SAlex Maftei (amaftei) return; 229b8cd9499SAlex Maftei (amaftei) } 230b8cd9499SAlex Maftei (amaftei) 23102443ab8SEdward Cree efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl, 23202443ab8SEdward Cree efv_pkts_compl); 233b8cd9499SAlex Maftei (amaftei) 234b8cd9499SAlex Maftei (amaftei) ++tx_queue->read_count; 235b8cd9499SAlex Maftei (amaftei) read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 236b8cd9499SAlex Maftei (amaftei) } 237b8cd9499SAlex Maftei (amaftei) } 238b8cd9499SAlex Maftei (amaftei) 2393b4f06c7STom Zhao void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) 2403b4f06c7STom Zhao { 2413b4f06c7STom Zhao if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { 2423b4f06c7STom Zhao tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); 2433b4f06c7STom Zhao if (tx_queue->read_count == tx_queue->old_write_count) { 2443b4f06c7STom Zhao /* Ensure that read_count is flushed. */ 2453b4f06c7STom Zhao smp_mb(); 2463b4f06c7STom Zhao tx_queue->empty_read_count = 2473b4f06c7STom Zhao tx_queue->read_count | EFX_EMPTY_COUNT_VALID; 2483b4f06c7STom Zhao } 2493b4f06c7STom Zhao } 2503b4f06c7STom Zhao } 2513b4f06c7STom Zhao 252*4aaf2c52SÍñigo Huguet int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 253b8cd9499SAlex Maftei (amaftei) { 254b8cd9499SAlex Maftei (amaftei) unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; 25502443ab8SEdward Cree unsigned int efv_pkts_compl = 0; 256b8cd9499SAlex Maftei (amaftei) struct efx_nic *efx = tx_queue->efx; 257b8cd9499SAlex Maftei (amaftei) 258b8cd9499SAlex Maftei (amaftei) EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); 259b8cd9499SAlex Maftei (amaftei) 26002443ab8SEdward Cree efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl, 26102443ab8SEdward Cree &efv_pkts_compl); 262b8cd9499SAlex Maftei (amaftei) tx_queue->pkts_compl += pkts_compl; 263b8cd9499SAlex Maftei (amaftei) tx_queue->bytes_compl += bytes_compl; 264b8cd9499SAlex Maftei (amaftei) 26502443ab8SEdward Cree if (pkts_compl + efv_pkts_compl > 1) 266b8cd9499SAlex Maftei (amaftei) ++tx_queue->merge_events; 267b8cd9499SAlex Maftei (amaftei) 268b8cd9499SAlex Maftei (amaftei) /* See if we need to restart the netif queue. This memory 269b8cd9499SAlex Maftei (amaftei) * barrier ensures that we write read_count (inside 270b8cd9499SAlex Maftei (amaftei) * efx_dequeue_buffers()) before reading the queue status. 271b8cd9499SAlex Maftei (amaftei) */ 272b8cd9499SAlex Maftei (amaftei) smp_mb(); 273b8cd9499SAlex Maftei (amaftei) if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 274b8cd9499SAlex Maftei (amaftei) likely(efx->port_enabled) && 275b8cd9499SAlex Maftei (amaftei) likely(netif_device_present(efx->net_dev))) { 2760d8c1229SEdward Cree fill_level = efx_channel_tx_fill_level(tx_queue->channel); 277b8cd9499SAlex Maftei (amaftei) if (fill_level <= efx->txq_wake_thresh) 278b8cd9499SAlex Maftei (amaftei) netif_tx_wake_queue(tx_queue->core_txq); 279b8cd9499SAlex Maftei (amaftei) } 280b8cd9499SAlex Maftei (amaftei) 2813b4f06c7STom Zhao efx_xmit_done_check_empty(tx_queue); 282*4aaf2c52SÍñigo Huguet 283*4aaf2c52SÍñigo Huguet return pkts_compl + efv_pkts_compl; 284b8cd9499SAlex Maftei (amaftei) } 285b8cd9499SAlex Maftei (amaftei) 28688f7df35SAlex Maftei (amaftei) /* Remove buffers put into a tx_queue for the current packet. 28788f7df35SAlex Maftei (amaftei) * None of the buffers must have an skb attached. 28888f7df35SAlex Maftei (amaftei) */ 28988f7df35SAlex Maftei (amaftei) void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, 29088f7df35SAlex Maftei (amaftei) unsigned int insert_count) 29188f7df35SAlex Maftei (amaftei) { 29202443ab8SEdward Cree unsigned int efv_pkts_compl = 0; 29388f7df35SAlex Maftei (amaftei) struct efx_tx_buffer *buffer; 29488f7df35SAlex Maftei (amaftei) unsigned int bytes_compl = 0; 29588f7df35SAlex Maftei (amaftei) unsigned int pkts_compl = 0; 29688f7df35SAlex Maftei (amaftei) 29788f7df35SAlex Maftei (amaftei) /* Work backwards until we hit the original insert pointer value */ 29888f7df35SAlex Maftei (amaftei) while (tx_queue->insert_count != insert_count) { 29988f7df35SAlex Maftei (amaftei) --tx_queue->insert_count; 30088f7df35SAlex Maftei (amaftei) buffer = __efx_tx_queue_get_insert_buffer(tx_queue); 30102443ab8SEdward Cree efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl, 30202443ab8SEdward Cree &efv_pkts_compl); 30388f7df35SAlex Maftei (amaftei) } 30488f7df35SAlex Maftei (amaftei) } 30588f7df35SAlex Maftei (amaftei) 30617d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, 30717d3b21cSAlex Maftei (amaftei) dma_addr_t dma_addr, size_t len) 30817d3b21cSAlex Maftei (amaftei) { 30917d3b21cSAlex Maftei (amaftei) const struct efx_nic_type *nic_type = tx_queue->efx->type; 31017d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *buffer; 31117d3b21cSAlex Maftei (amaftei) unsigned int dma_len; 31217d3b21cSAlex Maftei (amaftei) 31317d3b21cSAlex Maftei (amaftei) /* Map the fragment taking account of NIC-dependent DMA limits. */ 31417d3b21cSAlex Maftei (amaftei) do { 31517d3b21cSAlex Maftei (amaftei) buffer = efx_tx_queue_get_insert_buffer(tx_queue); 31679de6e7cSEdward Cree 31779de6e7cSEdward Cree if (nic_type->tx_limit_len) 31817d3b21cSAlex Maftei (amaftei) dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); 31979de6e7cSEdward Cree else 32079de6e7cSEdward Cree dma_len = len; 32117d3b21cSAlex Maftei (amaftei) 32217d3b21cSAlex Maftei (amaftei) buffer->len = dma_len; 32317d3b21cSAlex Maftei (amaftei) buffer->dma_addr = dma_addr; 32417d3b21cSAlex Maftei (amaftei) buffer->flags = EFX_TX_BUF_CONT; 32517d3b21cSAlex Maftei (amaftei) len -= dma_len; 32617d3b21cSAlex Maftei (amaftei) dma_addr += dma_len; 32717d3b21cSAlex Maftei (amaftei) ++tx_queue->insert_count; 32817d3b21cSAlex Maftei (amaftei) } while (len); 32917d3b21cSAlex Maftei (amaftei) 33017d3b21cSAlex Maftei (amaftei) return buffer; 33117d3b21cSAlex Maftei (amaftei) } 33217d3b21cSAlex Maftei (amaftei) 333e7a25685SEdward Cree int efx_tx_tso_header_length(struct sk_buff *skb) 334e7a25685SEdward Cree { 335e7a25685SEdward Cree size_t header_len; 336e7a25685SEdward Cree 337e7a25685SEdward Cree if (skb->encapsulation) 338e7a25685SEdward Cree header_len = skb_inner_transport_header(skb) - 339e7a25685SEdward Cree skb->data + 340e7a25685SEdward Cree (inner_tcp_hdr(skb)->doff << 2u); 341e7a25685SEdward Cree else 342e7a25685SEdward Cree header_len = skb_transport_header(skb) - skb->data + 343e7a25685SEdward Cree (tcp_hdr(skb)->doff << 2u); 344e7a25685SEdward Cree return header_len; 345e7a25685SEdward Cree } 346e7a25685SEdward Cree 34717d3b21cSAlex Maftei (amaftei) /* Map all data from an SKB for DMA and create descriptors on the queue. */ 34817d3b21cSAlex Maftei (amaftei) int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, 34917d3b21cSAlex Maftei (amaftei) unsigned int segment_count) 35017d3b21cSAlex Maftei (amaftei) { 35117d3b21cSAlex Maftei (amaftei) struct efx_nic *efx = tx_queue->efx; 35217d3b21cSAlex Maftei (amaftei) struct device *dma_dev = &efx->pci_dev->dev; 35317d3b21cSAlex Maftei (amaftei) unsigned int frag_index, nr_frags; 35417d3b21cSAlex Maftei (amaftei) dma_addr_t dma_addr, unmap_addr; 35517d3b21cSAlex Maftei (amaftei) unsigned short dma_flags; 35617d3b21cSAlex Maftei (amaftei) size_t len, unmap_len; 35717d3b21cSAlex Maftei (amaftei) 35817d3b21cSAlex Maftei (amaftei) nr_frags = skb_shinfo(skb)->nr_frags; 35917d3b21cSAlex Maftei (amaftei) frag_index = 0; 36017d3b21cSAlex Maftei (amaftei) 36117d3b21cSAlex Maftei (amaftei) /* Map header data. */ 36217d3b21cSAlex Maftei (amaftei) len = skb_headlen(skb); 36317d3b21cSAlex Maftei (amaftei) dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE); 36417d3b21cSAlex Maftei (amaftei) dma_flags = EFX_TX_BUF_MAP_SINGLE; 36517d3b21cSAlex Maftei (amaftei) unmap_len = len; 36617d3b21cSAlex Maftei (amaftei) unmap_addr = dma_addr; 36717d3b21cSAlex Maftei (amaftei) 36817d3b21cSAlex Maftei (amaftei) if (unlikely(dma_mapping_error(dma_dev, dma_addr))) 36917d3b21cSAlex Maftei (amaftei) return -EIO; 37017d3b21cSAlex Maftei (amaftei) 37117d3b21cSAlex Maftei (amaftei) if (segment_count) { 37217d3b21cSAlex Maftei (amaftei) /* For TSO we need to put the header in to a separate 37317d3b21cSAlex Maftei (amaftei) * descriptor. Map this separately if necessary. 37417d3b21cSAlex Maftei (amaftei) */ 375e7a25685SEdward Cree size_t header_len = efx_tx_tso_header_length(skb); 37617d3b21cSAlex Maftei (amaftei) 37717d3b21cSAlex Maftei (amaftei) if (header_len != len) { 37817d3b21cSAlex Maftei (amaftei) tx_queue->tso_long_headers++; 37917d3b21cSAlex Maftei (amaftei) efx_tx_map_chunk(tx_queue, dma_addr, header_len); 38017d3b21cSAlex Maftei (amaftei) len -= header_len; 38117d3b21cSAlex Maftei (amaftei) dma_addr += header_len; 38217d3b21cSAlex Maftei (amaftei) } 38317d3b21cSAlex Maftei (amaftei) } 38417d3b21cSAlex Maftei (amaftei) 38517d3b21cSAlex Maftei (amaftei) /* Add descriptors for each fragment. */ 38617d3b21cSAlex Maftei (amaftei) do { 38717d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *buffer; 38817d3b21cSAlex Maftei (amaftei) skb_frag_t *fragment; 38917d3b21cSAlex Maftei (amaftei) 39017d3b21cSAlex Maftei (amaftei) buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); 39117d3b21cSAlex Maftei (amaftei) 39217d3b21cSAlex Maftei (amaftei) /* The final descriptor for a fragment is responsible for 39317d3b21cSAlex Maftei (amaftei) * unmapping the whole fragment. 39417d3b21cSAlex Maftei (amaftei) */ 39517d3b21cSAlex Maftei (amaftei) buffer->flags = EFX_TX_BUF_CONT | dma_flags; 39617d3b21cSAlex Maftei (amaftei) buffer->unmap_len = unmap_len; 39717d3b21cSAlex Maftei (amaftei) buffer->dma_offset = buffer->dma_addr - unmap_addr; 39817d3b21cSAlex Maftei (amaftei) 39917d3b21cSAlex Maftei (amaftei) if (frag_index >= nr_frags) { 40017d3b21cSAlex Maftei (amaftei) /* Store SKB details with the final buffer for 40117d3b21cSAlex Maftei (amaftei) * the completion. 40217d3b21cSAlex Maftei (amaftei) */ 40317d3b21cSAlex Maftei (amaftei) buffer->skb = skb; 40417d3b21cSAlex Maftei (amaftei) buffer->flags = EFX_TX_BUF_SKB | dma_flags; 40517d3b21cSAlex Maftei (amaftei) return 0; 40617d3b21cSAlex Maftei (amaftei) } 40717d3b21cSAlex Maftei (amaftei) 40817d3b21cSAlex Maftei (amaftei) /* Move on to the next fragment. */ 40917d3b21cSAlex Maftei (amaftei) fragment = &skb_shinfo(skb)->frags[frag_index++]; 41017d3b21cSAlex Maftei (amaftei) len = skb_frag_size(fragment); 41117d3b21cSAlex Maftei (amaftei) dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, 41217d3b21cSAlex Maftei (amaftei) DMA_TO_DEVICE); 41317d3b21cSAlex Maftei (amaftei) dma_flags = 0; 41417d3b21cSAlex Maftei (amaftei) unmap_len = len; 41517d3b21cSAlex Maftei (amaftei) unmap_addr = dma_addr; 41617d3b21cSAlex Maftei (amaftei) 41717d3b21cSAlex Maftei (amaftei) if (unlikely(dma_mapping_error(dma_dev, dma_addr))) 41817d3b21cSAlex Maftei (amaftei) return -EIO; 41917d3b21cSAlex Maftei (amaftei) } while (1); 42017d3b21cSAlex Maftei (amaftei) } 42117d3b21cSAlex Maftei (amaftei) 42217d3b21cSAlex Maftei (amaftei) unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) 42317d3b21cSAlex Maftei (amaftei) { 42417d3b21cSAlex Maftei (amaftei) /* Header and payload descriptor for each output segment, plus 42517d3b21cSAlex Maftei (amaftei) * one for every input fragment boundary within a segment 42617d3b21cSAlex Maftei (amaftei) */ 42717d3b21cSAlex Maftei (amaftei) unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; 42817d3b21cSAlex Maftei (amaftei) 42917d3b21cSAlex Maftei (amaftei) /* Possibly one more per segment for option descriptors */ 43017d3b21cSAlex Maftei (amaftei) if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) 43117d3b21cSAlex Maftei (amaftei) max_descs += EFX_TSO_MAX_SEGS; 43217d3b21cSAlex Maftei (amaftei) 43317d3b21cSAlex Maftei (amaftei) /* Possibly more for PCIe page boundaries within input fragments */ 43417d3b21cSAlex Maftei (amaftei) if (PAGE_SIZE > EFX_PAGE_SIZE) 43517d3b21cSAlex Maftei (amaftei) max_descs += max_t(unsigned int, MAX_SKB_FRAGS, 4367c4e983cSAlexander Duyck DIV_ROUND_UP(GSO_LEGACY_MAX_SIZE, 4377c4e983cSAlexander Duyck EFX_PAGE_SIZE)); 43817d3b21cSAlex Maftei (amaftei) 43917d3b21cSAlex Maftei (amaftei) return max_descs; 44017d3b21cSAlex Maftei (amaftei) } 441740acc15SEdward Cree 442740acc15SEdward Cree /* 443740acc15SEdward Cree * Fallback to software TSO. 444740acc15SEdward Cree * 445740acc15SEdward Cree * This is used if we are unable to send a GSO packet through hardware TSO. 446740acc15SEdward Cree * This should only ever happen due to per-queue restrictions - unsupported 447740acc15SEdward Cree * packets should first be filtered by the feature flags. 448740acc15SEdward Cree * 449740acc15SEdward Cree * Returns 0 on success, error code otherwise. 450740acc15SEdward Cree */ 451740acc15SEdward Cree int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 452740acc15SEdward Cree { 453740acc15SEdward Cree struct sk_buff *segments, *next; 454740acc15SEdward Cree 455740acc15SEdward Cree segments = skb_gso_segment(skb, 0); 456740acc15SEdward Cree if (IS_ERR(segments)) 457740acc15SEdward Cree return PTR_ERR(segments); 458740acc15SEdward Cree 459740acc15SEdward Cree dev_consume_skb_any(skb); 460740acc15SEdward Cree 461740acc15SEdward Cree skb_list_walk_safe(segments, skb, next) { 462740acc15SEdward Cree skb_mark_not_on_list(skb); 463740acc15SEdward Cree efx_enqueue_skb(tx_queue, skb); 464740acc15SEdward Cree } 465740acc15SEdward Cree 466740acc15SEdward Cree return 0; 467740acc15SEdward Cree } 468