117d3b21cSAlex Maftei (amaftei) // SPDX-License-Identifier: GPL-2.0-only 217d3b21cSAlex Maftei (amaftei) /**************************************************************************** 317d3b21cSAlex Maftei (amaftei) * Driver for Solarflare network controllers and boards 417d3b21cSAlex Maftei (amaftei) * Copyright 2018 Solarflare Communications Inc. 517d3b21cSAlex Maftei (amaftei) * 617d3b21cSAlex Maftei (amaftei) * This program is free software; you can redistribute it and/or modify it 717d3b21cSAlex Maftei (amaftei) * under the terms of the GNU General Public License version 2 as published 817d3b21cSAlex Maftei (amaftei) * by the Free Software Foundation, incorporated herein by reference. 917d3b21cSAlex Maftei (amaftei) */ 1017d3b21cSAlex Maftei (amaftei) 1117d3b21cSAlex Maftei (amaftei) #include "net_driver.h" 1217d3b21cSAlex Maftei (amaftei) #include "efx.h" 1393841000SEdward Cree #include "nic_common.h" 1417d3b21cSAlex Maftei (amaftei) #include "tx_common.h" 1517d3b21cSAlex Maftei (amaftei) 1617d3b21cSAlex Maftei (amaftei) static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) 1717d3b21cSAlex Maftei (amaftei) { 1817d3b21cSAlex Maftei (amaftei) return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 1917d3b21cSAlex Maftei (amaftei) PAGE_SIZE >> EFX_TX_CB_ORDER); 2017d3b21cSAlex Maftei (amaftei) } 2117d3b21cSAlex Maftei (amaftei) 2217d3b21cSAlex Maftei (amaftei) int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) 2317d3b21cSAlex Maftei (amaftei) { 2417d3b21cSAlex Maftei (amaftei) struct efx_nic *efx = tx_queue->efx; 2517d3b21cSAlex Maftei (amaftei) unsigned int entries; 2617d3b21cSAlex Maftei (amaftei) int rc; 2717d3b21cSAlex Maftei (amaftei) 2817d3b21cSAlex Maftei (amaftei) /* Create the smallest power-of-two aligned ring */ 2917d3b21cSAlex Maftei (amaftei) entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); 3017d3b21cSAlex Maftei (amaftei) EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); 3117d3b21cSAlex Maftei (amaftei) tx_queue->ptr_mask = entries - 1; 3217d3b21cSAlex Maftei (amaftei) 3317d3b21cSAlex Maftei (amaftei) netif_dbg(efx, probe, efx->net_dev, 3417d3b21cSAlex Maftei (amaftei) "creating TX queue %d size %#x mask %#x\n", 3517d3b21cSAlex Maftei (amaftei) tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); 3617d3b21cSAlex Maftei (amaftei) 3717d3b21cSAlex Maftei (amaftei) /* Allocate software ring */ 3817d3b21cSAlex Maftei (amaftei) tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), 3917d3b21cSAlex Maftei (amaftei) GFP_KERNEL); 4017d3b21cSAlex Maftei (amaftei) if (!tx_queue->buffer) 4117d3b21cSAlex Maftei (amaftei) return -ENOMEM; 4217d3b21cSAlex Maftei (amaftei) 4317d3b21cSAlex Maftei (amaftei) tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), 4417d3b21cSAlex Maftei (amaftei) sizeof(tx_queue->cb_page[0]), GFP_KERNEL); 4517d3b21cSAlex Maftei (amaftei) if (!tx_queue->cb_page) { 4617d3b21cSAlex Maftei (amaftei) rc = -ENOMEM; 4717d3b21cSAlex Maftei (amaftei) goto fail1; 4817d3b21cSAlex Maftei (amaftei) } 4917d3b21cSAlex Maftei (amaftei) 5012804793SEdward Cree /* Allocate hardware ring, determine TXQ type */ 5117d3b21cSAlex Maftei (amaftei) rc = efx_nic_probe_tx(tx_queue); 5217d3b21cSAlex Maftei (amaftei) if (rc) 5317d3b21cSAlex Maftei (amaftei) goto fail2; 5417d3b21cSAlex Maftei (amaftei) 5512804793SEdward Cree tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue; 5617d3b21cSAlex Maftei (amaftei) return 0; 5717d3b21cSAlex Maftei (amaftei) 5817d3b21cSAlex Maftei (amaftei) fail2: 5917d3b21cSAlex Maftei (amaftei) kfree(tx_queue->cb_page); 6017d3b21cSAlex Maftei (amaftei) tx_queue->cb_page = NULL; 6117d3b21cSAlex Maftei (amaftei) fail1: 6217d3b21cSAlex Maftei (amaftei) kfree(tx_queue->buffer); 6317d3b21cSAlex Maftei (amaftei) tx_queue->buffer = NULL; 6417d3b21cSAlex Maftei (amaftei) return rc; 6517d3b21cSAlex Maftei (amaftei) } 6617d3b21cSAlex Maftei (amaftei) 6717d3b21cSAlex Maftei (amaftei) void efx_init_tx_queue(struct efx_tx_queue *tx_queue) 6817d3b21cSAlex Maftei (amaftei) { 6917d3b21cSAlex Maftei (amaftei) struct efx_nic *efx = tx_queue->efx; 7017d3b21cSAlex Maftei (amaftei) 7117d3b21cSAlex Maftei (amaftei) netif_dbg(efx, drv, efx->net_dev, 7217d3b21cSAlex Maftei (amaftei) "initialising TX queue %d\n", tx_queue->queue); 7317d3b21cSAlex Maftei (amaftei) 7417d3b21cSAlex Maftei (amaftei) tx_queue->insert_count = 0; 75d19a5372SEdward Cree tx_queue->notify_count = 0; 7617d3b21cSAlex Maftei (amaftei) tx_queue->write_count = 0; 7717d3b21cSAlex Maftei (amaftei) tx_queue->packet_write_count = 0; 7817d3b21cSAlex Maftei (amaftei) tx_queue->old_write_count = 0; 7917d3b21cSAlex Maftei (amaftei) tx_queue->read_count = 0; 8017d3b21cSAlex Maftei (amaftei) tx_queue->old_read_count = 0; 8117d3b21cSAlex Maftei (amaftei) tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; 821c0544d2SEdward Cree tx_queue->xmit_pending = false; 8317d3b21cSAlex Maftei (amaftei) tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) && 8417d3b21cSAlex Maftei (amaftei) tx_queue->channel == efx_ptp_channel(efx)); 8517d3b21cSAlex Maftei (amaftei) tx_queue->completed_timestamp_major = 0; 8617d3b21cSAlex Maftei (amaftei) tx_queue->completed_timestamp_minor = 0; 8717d3b21cSAlex Maftei (amaftei) 8817d3b21cSAlex Maftei (amaftei) tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel); 89*1679c72cSEdward Cree tx_queue->tso_version = 0; 9017d3b21cSAlex Maftei (amaftei) 9117d3b21cSAlex Maftei (amaftei) /* Set up TX descriptor ring */ 9217d3b21cSAlex Maftei (amaftei) efx_nic_init_tx(tx_queue); 9317d3b21cSAlex Maftei (amaftei) 9417d3b21cSAlex Maftei (amaftei) tx_queue->initialised = true; 9517d3b21cSAlex Maftei (amaftei) } 9617d3b21cSAlex Maftei (amaftei) 9717d3b21cSAlex Maftei (amaftei) void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 9817d3b21cSAlex Maftei (amaftei) { 9917d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *buffer; 10017d3b21cSAlex Maftei (amaftei) 10117d3b21cSAlex Maftei (amaftei) netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 10217d3b21cSAlex Maftei (amaftei) "shutting down TX queue %d\n", tx_queue->queue); 10317d3b21cSAlex Maftei (amaftei) 10417d3b21cSAlex Maftei (amaftei) if (!tx_queue->buffer) 10517d3b21cSAlex Maftei (amaftei) return; 10617d3b21cSAlex Maftei (amaftei) 10717d3b21cSAlex Maftei (amaftei) /* Free any buffers left in the ring */ 10817d3b21cSAlex Maftei (amaftei) while (tx_queue->read_count != tx_queue->write_count) { 10917d3b21cSAlex Maftei (amaftei) unsigned int pkts_compl = 0, bytes_compl = 0; 11017d3b21cSAlex Maftei (amaftei) 11117d3b21cSAlex Maftei (amaftei) buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; 11217d3b21cSAlex Maftei (amaftei) efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 11317d3b21cSAlex Maftei (amaftei) 11417d3b21cSAlex Maftei (amaftei) ++tx_queue->read_count; 11517d3b21cSAlex Maftei (amaftei) } 1161c0544d2SEdward Cree tx_queue->xmit_pending = false; 11717d3b21cSAlex Maftei (amaftei) netdev_tx_reset_queue(tx_queue->core_txq); 11817d3b21cSAlex Maftei (amaftei) } 11917d3b21cSAlex Maftei (amaftei) 12017d3b21cSAlex Maftei (amaftei) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 12117d3b21cSAlex Maftei (amaftei) { 12217d3b21cSAlex Maftei (amaftei) int i; 12317d3b21cSAlex Maftei (amaftei) 12417d3b21cSAlex Maftei (amaftei) if (!tx_queue->buffer) 12517d3b21cSAlex Maftei (amaftei) return; 12617d3b21cSAlex Maftei (amaftei) 12717d3b21cSAlex Maftei (amaftei) netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 12817d3b21cSAlex Maftei (amaftei) "destroying TX queue %d\n", tx_queue->queue); 12917d3b21cSAlex Maftei (amaftei) efx_nic_remove_tx(tx_queue); 13017d3b21cSAlex Maftei (amaftei) 13117d3b21cSAlex Maftei (amaftei) if (tx_queue->cb_page) { 13217d3b21cSAlex Maftei (amaftei) for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++) 13317d3b21cSAlex Maftei (amaftei) efx_nic_free_buffer(tx_queue->efx, 13417d3b21cSAlex Maftei (amaftei) &tx_queue->cb_page[i]); 13517d3b21cSAlex Maftei (amaftei) kfree(tx_queue->cb_page); 13617d3b21cSAlex Maftei (amaftei) tx_queue->cb_page = NULL; 13717d3b21cSAlex Maftei (amaftei) } 13817d3b21cSAlex Maftei (amaftei) 13917d3b21cSAlex Maftei (amaftei) kfree(tx_queue->buffer); 14017d3b21cSAlex Maftei (amaftei) tx_queue->buffer = NULL; 14112804793SEdward Cree tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL; 14217d3b21cSAlex Maftei (amaftei) } 14317d3b21cSAlex Maftei (amaftei) 14417d3b21cSAlex Maftei (amaftei) void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 14517d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *buffer, 14617d3b21cSAlex Maftei (amaftei) unsigned int *pkts_compl, 14717d3b21cSAlex Maftei (amaftei) unsigned int *bytes_compl) 14817d3b21cSAlex Maftei (amaftei) { 14917d3b21cSAlex Maftei (amaftei) if (buffer->unmap_len) { 15017d3b21cSAlex Maftei (amaftei) struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 15117d3b21cSAlex Maftei (amaftei) dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; 15217d3b21cSAlex Maftei (amaftei) 15317d3b21cSAlex Maftei (amaftei) if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) 15417d3b21cSAlex Maftei (amaftei) dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, 15517d3b21cSAlex Maftei (amaftei) DMA_TO_DEVICE); 15617d3b21cSAlex Maftei (amaftei) else 15717d3b21cSAlex Maftei (amaftei) dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, 15817d3b21cSAlex Maftei (amaftei) DMA_TO_DEVICE); 15917d3b21cSAlex Maftei (amaftei) buffer->unmap_len = 0; 16017d3b21cSAlex Maftei (amaftei) } 16117d3b21cSAlex Maftei (amaftei) 16217d3b21cSAlex Maftei (amaftei) if (buffer->flags & EFX_TX_BUF_SKB) { 16317d3b21cSAlex Maftei (amaftei) struct sk_buff *skb = (struct sk_buff *)buffer->skb; 16417d3b21cSAlex Maftei (amaftei) 16517d3b21cSAlex Maftei (amaftei) EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); 16617d3b21cSAlex Maftei (amaftei) (*pkts_compl)++; 16717d3b21cSAlex Maftei (amaftei) (*bytes_compl) += skb->len; 16817d3b21cSAlex Maftei (amaftei) if (tx_queue->timestamping && 16917d3b21cSAlex Maftei (amaftei) (tx_queue->completed_timestamp_major || 17017d3b21cSAlex Maftei (amaftei) tx_queue->completed_timestamp_minor)) { 17117d3b21cSAlex Maftei (amaftei) struct skb_shared_hwtstamps hwtstamp; 17217d3b21cSAlex Maftei (amaftei) 17317d3b21cSAlex Maftei (amaftei) hwtstamp.hwtstamp = 17417d3b21cSAlex Maftei (amaftei) efx_ptp_nic_to_kernel_time(tx_queue); 17517d3b21cSAlex Maftei (amaftei) skb_tstamp_tx(skb, &hwtstamp); 17617d3b21cSAlex Maftei (amaftei) 17717d3b21cSAlex Maftei (amaftei) tx_queue->completed_timestamp_major = 0; 17817d3b21cSAlex Maftei (amaftei) tx_queue->completed_timestamp_minor = 0; 17917d3b21cSAlex Maftei (amaftei) } 18017d3b21cSAlex Maftei (amaftei) dev_consume_skb_any((struct sk_buff *)buffer->skb); 18117d3b21cSAlex Maftei (amaftei) netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, 18217d3b21cSAlex Maftei (amaftei) "TX queue %d transmission id %x complete\n", 18317d3b21cSAlex Maftei (amaftei) tx_queue->queue, tx_queue->read_count); 18417d3b21cSAlex Maftei (amaftei) } else if (buffer->flags & EFX_TX_BUF_XDP) { 18517d3b21cSAlex Maftei (amaftei) xdp_return_frame_rx_napi(buffer->xdpf); 18617d3b21cSAlex Maftei (amaftei) } 18717d3b21cSAlex Maftei (amaftei) 18817d3b21cSAlex Maftei (amaftei) buffer->len = 0; 18917d3b21cSAlex Maftei (amaftei) buffer->flags = 0; 19017d3b21cSAlex Maftei (amaftei) } 19117d3b21cSAlex Maftei (amaftei) 192b8cd9499SAlex Maftei (amaftei) /* Remove packets from the TX queue 193b8cd9499SAlex Maftei (amaftei) * 194b8cd9499SAlex Maftei (amaftei) * This removes packets from the TX queue, up to and including the 195b8cd9499SAlex Maftei (amaftei) * specified index. 196b8cd9499SAlex Maftei (amaftei) */ 197b8cd9499SAlex Maftei (amaftei) static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, 198b8cd9499SAlex Maftei (amaftei) unsigned int index, 199b8cd9499SAlex Maftei (amaftei) unsigned int *pkts_compl, 200b8cd9499SAlex Maftei (amaftei) unsigned int *bytes_compl) 201b8cd9499SAlex Maftei (amaftei) { 202b8cd9499SAlex Maftei (amaftei) struct efx_nic *efx = tx_queue->efx; 203b8cd9499SAlex Maftei (amaftei) unsigned int stop_index, read_ptr; 204b8cd9499SAlex Maftei (amaftei) 205b8cd9499SAlex Maftei (amaftei) stop_index = (index + 1) & tx_queue->ptr_mask; 206b8cd9499SAlex Maftei (amaftei) read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 207b8cd9499SAlex Maftei (amaftei) 208b8cd9499SAlex Maftei (amaftei) while (read_ptr != stop_index) { 209b8cd9499SAlex Maftei (amaftei) struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 210b8cd9499SAlex Maftei (amaftei) 2113b4f06c7STom Zhao if (!efx_tx_buffer_in_use(buffer)) { 212b8cd9499SAlex Maftei (amaftei) netif_err(efx, tx_err, efx->net_dev, 2133b4f06c7STom Zhao "TX queue %d spurious TX completion id %d\n", 214b8cd9499SAlex Maftei (amaftei) tx_queue->queue, read_ptr); 215b8cd9499SAlex Maftei (amaftei) efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); 216b8cd9499SAlex Maftei (amaftei) return; 217b8cd9499SAlex Maftei (amaftei) } 218b8cd9499SAlex Maftei (amaftei) 219b8cd9499SAlex Maftei (amaftei) efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); 220b8cd9499SAlex Maftei (amaftei) 221b8cd9499SAlex Maftei (amaftei) ++tx_queue->read_count; 222b8cd9499SAlex Maftei (amaftei) read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 223b8cd9499SAlex Maftei (amaftei) } 224b8cd9499SAlex Maftei (amaftei) } 225b8cd9499SAlex Maftei (amaftei) 2263b4f06c7STom Zhao void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) 2273b4f06c7STom Zhao { 2283b4f06c7STom Zhao if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { 2293b4f06c7STom Zhao tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); 2303b4f06c7STom Zhao if (tx_queue->read_count == tx_queue->old_write_count) { 2313b4f06c7STom Zhao /* Ensure that read_count is flushed. */ 2323b4f06c7STom Zhao smp_mb(); 2333b4f06c7STom Zhao tx_queue->empty_read_count = 2343b4f06c7STom Zhao tx_queue->read_count | EFX_EMPTY_COUNT_VALID; 2353b4f06c7STom Zhao } 2363b4f06c7STom Zhao } 2373b4f06c7STom Zhao } 2383b4f06c7STom Zhao 239b8cd9499SAlex Maftei (amaftei) void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 240b8cd9499SAlex Maftei (amaftei) { 241b8cd9499SAlex Maftei (amaftei) unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; 242b8cd9499SAlex Maftei (amaftei) struct efx_nic *efx = tx_queue->efx; 243b8cd9499SAlex Maftei (amaftei) 244b8cd9499SAlex Maftei (amaftei) EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); 245b8cd9499SAlex Maftei (amaftei) 246b8cd9499SAlex Maftei (amaftei) efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 247b8cd9499SAlex Maftei (amaftei) tx_queue->pkts_compl += pkts_compl; 248b8cd9499SAlex Maftei (amaftei) tx_queue->bytes_compl += bytes_compl; 249b8cd9499SAlex Maftei (amaftei) 250b8cd9499SAlex Maftei (amaftei) if (pkts_compl > 1) 251b8cd9499SAlex Maftei (amaftei) ++tx_queue->merge_events; 252b8cd9499SAlex Maftei (amaftei) 253b8cd9499SAlex Maftei (amaftei) /* See if we need to restart the netif queue. This memory 254b8cd9499SAlex Maftei (amaftei) * barrier ensures that we write read_count (inside 255b8cd9499SAlex Maftei (amaftei) * efx_dequeue_buffers()) before reading the queue status. 256b8cd9499SAlex Maftei (amaftei) */ 257b8cd9499SAlex Maftei (amaftei) smp_mb(); 258b8cd9499SAlex Maftei (amaftei) if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 259b8cd9499SAlex Maftei (amaftei) likely(efx->port_enabled) && 260b8cd9499SAlex Maftei (amaftei) likely(netif_device_present(efx->net_dev))) { 2610d8c1229SEdward Cree fill_level = efx_channel_tx_fill_level(tx_queue->channel); 262b8cd9499SAlex Maftei (amaftei) if (fill_level <= efx->txq_wake_thresh) 263b8cd9499SAlex Maftei (amaftei) netif_tx_wake_queue(tx_queue->core_txq); 264b8cd9499SAlex Maftei (amaftei) } 265b8cd9499SAlex Maftei (amaftei) 2663b4f06c7STom Zhao efx_xmit_done_check_empty(tx_queue); 267b8cd9499SAlex Maftei (amaftei) } 268b8cd9499SAlex Maftei (amaftei) 26988f7df35SAlex Maftei (amaftei) /* Remove buffers put into a tx_queue for the current packet. 27088f7df35SAlex Maftei (amaftei) * None of the buffers must have an skb attached. 27188f7df35SAlex Maftei (amaftei) */ 27288f7df35SAlex Maftei (amaftei) void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, 27388f7df35SAlex Maftei (amaftei) unsigned int insert_count) 27488f7df35SAlex Maftei (amaftei) { 27588f7df35SAlex Maftei (amaftei) struct efx_tx_buffer *buffer; 27688f7df35SAlex Maftei (amaftei) unsigned int bytes_compl = 0; 27788f7df35SAlex Maftei (amaftei) unsigned int pkts_compl = 0; 27888f7df35SAlex Maftei (amaftei) 27988f7df35SAlex Maftei (amaftei) /* Work backwards until we hit the original insert pointer value */ 28088f7df35SAlex Maftei (amaftei) while (tx_queue->insert_count != insert_count) { 28188f7df35SAlex Maftei (amaftei) --tx_queue->insert_count; 28288f7df35SAlex Maftei (amaftei) buffer = __efx_tx_queue_get_insert_buffer(tx_queue); 28388f7df35SAlex Maftei (amaftei) efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 28488f7df35SAlex Maftei (amaftei) } 28588f7df35SAlex Maftei (amaftei) } 28688f7df35SAlex Maftei (amaftei) 28717d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, 28817d3b21cSAlex Maftei (amaftei) dma_addr_t dma_addr, size_t len) 28917d3b21cSAlex Maftei (amaftei) { 29017d3b21cSAlex Maftei (amaftei) const struct efx_nic_type *nic_type = tx_queue->efx->type; 29117d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *buffer; 29217d3b21cSAlex Maftei (amaftei) unsigned int dma_len; 29317d3b21cSAlex Maftei (amaftei) 29417d3b21cSAlex Maftei (amaftei) /* Map the fragment taking account of NIC-dependent DMA limits. */ 29517d3b21cSAlex Maftei (amaftei) do { 29617d3b21cSAlex Maftei (amaftei) buffer = efx_tx_queue_get_insert_buffer(tx_queue); 29779de6e7cSEdward Cree 29879de6e7cSEdward Cree if (nic_type->tx_limit_len) 29917d3b21cSAlex Maftei (amaftei) dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); 30079de6e7cSEdward Cree else 30179de6e7cSEdward Cree dma_len = len; 30217d3b21cSAlex Maftei (amaftei) 30317d3b21cSAlex Maftei (amaftei) buffer->len = dma_len; 30417d3b21cSAlex Maftei (amaftei) buffer->dma_addr = dma_addr; 30517d3b21cSAlex Maftei (amaftei) buffer->flags = EFX_TX_BUF_CONT; 30617d3b21cSAlex Maftei (amaftei) len -= dma_len; 30717d3b21cSAlex Maftei (amaftei) dma_addr += dma_len; 30817d3b21cSAlex Maftei (amaftei) ++tx_queue->insert_count; 30917d3b21cSAlex Maftei (amaftei) } while (len); 31017d3b21cSAlex Maftei (amaftei) 31117d3b21cSAlex Maftei (amaftei) return buffer; 31217d3b21cSAlex Maftei (amaftei) } 31317d3b21cSAlex Maftei (amaftei) 314e7a25685SEdward Cree int efx_tx_tso_header_length(struct sk_buff *skb) 315e7a25685SEdward Cree { 316e7a25685SEdward Cree size_t header_len; 317e7a25685SEdward Cree 318e7a25685SEdward Cree if (skb->encapsulation) 319e7a25685SEdward Cree header_len = skb_inner_transport_header(skb) - 320e7a25685SEdward Cree skb->data + 321e7a25685SEdward Cree (inner_tcp_hdr(skb)->doff << 2u); 322e7a25685SEdward Cree else 323e7a25685SEdward Cree header_len = skb_transport_header(skb) - skb->data + 324e7a25685SEdward Cree (tcp_hdr(skb)->doff << 2u); 325e7a25685SEdward Cree return header_len; 326e7a25685SEdward Cree } 327e7a25685SEdward Cree 32817d3b21cSAlex Maftei (amaftei) /* Map all data from an SKB for DMA and create descriptors on the queue. */ 32917d3b21cSAlex Maftei (amaftei) int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, 33017d3b21cSAlex Maftei (amaftei) unsigned int segment_count) 33117d3b21cSAlex Maftei (amaftei) { 33217d3b21cSAlex Maftei (amaftei) struct efx_nic *efx = tx_queue->efx; 33317d3b21cSAlex Maftei (amaftei) struct device *dma_dev = &efx->pci_dev->dev; 33417d3b21cSAlex Maftei (amaftei) unsigned int frag_index, nr_frags; 33517d3b21cSAlex Maftei (amaftei) dma_addr_t dma_addr, unmap_addr; 33617d3b21cSAlex Maftei (amaftei) unsigned short dma_flags; 33717d3b21cSAlex Maftei (amaftei) size_t len, unmap_len; 33817d3b21cSAlex Maftei (amaftei) 33917d3b21cSAlex Maftei (amaftei) nr_frags = skb_shinfo(skb)->nr_frags; 34017d3b21cSAlex Maftei (amaftei) frag_index = 0; 34117d3b21cSAlex Maftei (amaftei) 34217d3b21cSAlex Maftei (amaftei) /* Map header data. */ 34317d3b21cSAlex Maftei (amaftei) len = skb_headlen(skb); 34417d3b21cSAlex Maftei (amaftei) dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE); 34517d3b21cSAlex Maftei (amaftei) dma_flags = EFX_TX_BUF_MAP_SINGLE; 34617d3b21cSAlex Maftei (amaftei) unmap_len = len; 34717d3b21cSAlex Maftei (amaftei) unmap_addr = dma_addr; 34817d3b21cSAlex Maftei (amaftei) 34917d3b21cSAlex Maftei (amaftei) if (unlikely(dma_mapping_error(dma_dev, dma_addr))) 35017d3b21cSAlex Maftei (amaftei) return -EIO; 35117d3b21cSAlex Maftei (amaftei) 35217d3b21cSAlex Maftei (amaftei) if (segment_count) { 35317d3b21cSAlex Maftei (amaftei) /* For TSO we need to put the header in to a separate 35417d3b21cSAlex Maftei (amaftei) * descriptor. Map this separately if necessary. 35517d3b21cSAlex Maftei (amaftei) */ 356e7a25685SEdward Cree size_t header_len = efx_tx_tso_header_length(skb); 35717d3b21cSAlex Maftei (amaftei) 35817d3b21cSAlex Maftei (amaftei) if (header_len != len) { 35917d3b21cSAlex Maftei (amaftei) tx_queue->tso_long_headers++; 36017d3b21cSAlex Maftei (amaftei) efx_tx_map_chunk(tx_queue, dma_addr, header_len); 36117d3b21cSAlex Maftei (amaftei) len -= header_len; 36217d3b21cSAlex Maftei (amaftei) dma_addr += header_len; 36317d3b21cSAlex Maftei (amaftei) } 36417d3b21cSAlex Maftei (amaftei) } 36517d3b21cSAlex Maftei (amaftei) 36617d3b21cSAlex Maftei (amaftei) /* Add descriptors for each fragment. */ 36717d3b21cSAlex Maftei (amaftei) do { 36817d3b21cSAlex Maftei (amaftei) struct efx_tx_buffer *buffer; 36917d3b21cSAlex Maftei (amaftei) skb_frag_t *fragment; 37017d3b21cSAlex Maftei (amaftei) 37117d3b21cSAlex Maftei (amaftei) buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); 37217d3b21cSAlex Maftei (amaftei) 37317d3b21cSAlex Maftei (amaftei) /* The final descriptor for a fragment is responsible for 37417d3b21cSAlex Maftei (amaftei) * unmapping the whole fragment. 37517d3b21cSAlex Maftei (amaftei) */ 37617d3b21cSAlex Maftei (amaftei) buffer->flags = EFX_TX_BUF_CONT | dma_flags; 37717d3b21cSAlex Maftei (amaftei) buffer->unmap_len = unmap_len; 37817d3b21cSAlex Maftei (amaftei) buffer->dma_offset = buffer->dma_addr - unmap_addr; 37917d3b21cSAlex Maftei (amaftei) 38017d3b21cSAlex Maftei (amaftei) if (frag_index >= nr_frags) { 38117d3b21cSAlex Maftei (amaftei) /* Store SKB details with the final buffer for 38217d3b21cSAlex Maftei (amaftei) * the completion. 38317d3b21cSAlex Maftei (amaftei) */ 38417d3b21cSAlex Maftei (amaftei) buffer->skb = skb; 38517d3b21cSAlex Maftei (amaftei) buffer->flags = EFX_TX_BUF_SKB | dma_flags; 38617d3b21cSAlex Maftei (amaftei) return 0; 38717d3b21cSAlex Maftei (amaftei) } 38817d3b21cSAlex Maftei (amaftei) 38917d3b21cSAlex Maftei (amaftei) /* Move on to the next fragment. */ 39017d3b21cSAlex Maftei (amaftei) fragment = &skb_shinfo(skb)->frags[frag_index++]; 39117d3b21cSAlex Maftei (amaftei) len = skb_frag_size(fragment); 39217d3b21cSAlex Maftei (amaftei) dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, 39317d3b21cSAlex Maftei (amaftei) DMA_TO_DEVICE); 39417d3b21cSAlex Maftei (amaftei) dma_flags = 0; 39517d3b21cSAlex Maftei (amaftei) unmap_len = len; 39617d3b21cSAlex Maftei (amaftei) unmap_addr = dma_addr; 39717d3b21cSAlex Maftei (amaftei) 39817d3b21cSAlex Maftei (amaftei) if (unlikely(dma_mapping_error(dma_dev, dma_addr))) 39917d3b21cSAlex Maftei (amaftei) return -EIO; 40017d3b21cSAlex Maftei (amaftei) } while (1); 40117d3b21cSAlex Maftei (amaftei) } 40217d3b21cSAlex Maftei (amaftei) 40317d3b21cSAlex Maftei (amaftei) unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) 40417d3b21cSAlex Maftei (amaftei) { 40517d3b21cSAlex Maftei (amaftei) /* Header and payload descriptor for each output segment, plus 40617d3b21cSAlex Maftei (amaftei) * one for every input fragment boundary within a segment 40717d3b21cSAlex Maftei (amaftei) */ 40817d3b21cSAlex Maftei (amaftei) unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; 40917d3b21cSAlex Maftei (amaftei) 41017d3b21cSAlex Maftei (amaftei) /* Possibly one more per segment for option descriptors */ 41117d3b21cSAlex Maftei (amaftei) if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) 41217d3b21cSAlex Maftei (amaftei) max_descs += EFX_TSO_MAX_SEGS; 41317d3b21cSAlex Maftei (amaftei) 41417d3b21cSAlex Maftei (amaftei) /* Possibly more for PCIe page boundaries within input fragments */ 41517d3b21cSAlex Maftei (amaftei) if (PAGE_SIZE > EFX_PAGE_SIZE) 41617d3b21cSAlex Maftei (amaftei) max_descs += max_t(unsigned int, MAX_SKB_FRAGS, 41717d3b21cSAlex Maftei (amaftei) DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); 41817d3b21cSAlex Maftei (amaftei) 41917d3b21cSAlex Maftei (amaftei) return max_descs; 42017d3b21cSAlex Maftei (amaftei) } 421740acc15SEdward Cree 422740acc15SEdward Cree /* 423740acc15SEdward Cree * Fallback to software TSO. 424740acc15SEdward Cree * 425740acc15SEdward Cree * This is used if we are unable to send a GSO packet through hardware TSO. 426740acc15SEdward Cree * This should only ever happen due to per-queue restrictions - unsupported 427740acc15SEdward Cree * packets should first be filtered by the feature flags. 428740acc15SEdward Cree * 429740acc15SEdward Cree * Returns 0 on success, error code otherwise. 430740acc15SEdward Cree */ 431740acc15SEdward Cree int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb) 432740acc15SEdward Cree { 433740acc15SEdward Cree struct sk_buff *segments, *next; 434740acc15SEdward Cree 435740acc15SEdward Cree segments = skb_gso_segment(skb, 0); 436740acc15SEdward Cree if (IS_ERR(segments)) 437740acc15SEdward Cree return PTR_ERR(segments); 438740acc15SEdward Cree 439740acc15SEdward Cree dev_consume_skb_any(skb); 440740acc15SEdward Cree 441740acc15SEdward Cree skb_list_walk_safe(segments, skb, next) { 442740acc15SEdward Cree skb_mark_not_on_list(skb); 443740acc15SEdward Cree efx_enqueue_skb(tx_queue, skb); 444740acc15SEdward Cree } 445740acc15SEdward Cree 446740acc15SEdward Cree return 0; 447740acc15SEdward Cree } 448