1cdedef59SAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0 2cdedef59SAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */ 3cdedef59SAnirudh Venkataramanan 4cdedef59SAnirudh Venkataramanan /* The driver transmit and receive code */ 5cdedef59SAnirudh Venkataramanan 6cdedef59SAnirudh Venkataramanan #include <linux/prefetch.h> 7cdedef59SAnirudh Venkataramanan #include <linux/mm.h> 8cdedef59SAnirudh Venkataramanan #include "ice.h" 9cdedef59SAnirudh Venkataramanan 102b245cb2SAnirudh Venkataramanan #define ICE_RX_HDR_SIZE 256 112b245cb2SAnirudh Venkataramanan 12cdedef59SAnirudh Venkataramanan /** 13cdedef59SAnirudh Venkataramanan * ice_unmap_and_free_tx_buf - Release a Tx buffer 14cdedef59SAnirudh Venkataramanan * @ring: the ring that owns the buffer 15cdedef59SAnirudh Venkataramanan * @tx_buf: the buffer to free 16cdedef59SAnirudh Venkataramanan */ 17cdedef59SAnirudh Venkataramanan static void 18cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 19cdedef59SAnirudh Venkataramanan { 20cdedef59SAnirudh Venkataramanan if (tx_buf->skb) { 21cdedef59SAnirudh Venkataramanan dev_kfree_skb_any(tx_buf->skb); 22cdedef59SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) 23cdedef59SAnirudh Venkataramanan dma_unmap_single(ring->dev, 24cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 25cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 26cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 27cdedef59SAnirudh Venkataramanan } else if (dma_unmap_len(tx_buf, len)) { 28cdedef59SAnirudh Venkataramanan dma_unmap_page(ring->dev, 29cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 30cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 31cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 32cdedef59SAnirudh Venkataramanan } 33cdedef59SAnirudh Venkataramanan 34cdedef59SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 35cdedef59SAnirudh Venkataramanan tx_buf->skb = NULL; 36cdedef59SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 37cdedef59SAnirudh Venkataramanan /* tx_buf must be completely set up in the transmit path */ 38cdedef59SAnirudh Venkataramanan } 39cdedef59SAnirudh Venkataramanan 40cdedef59SAnirudh Venkataramanan static struct netdev_queue *txring_txq(const struct ice_ring *ring) 41cdedef59SAnirudh Venkataramanan { 42cdedef59SAnirudh Venkataramanan return netdev_get_tx_queue(ring->netdev, ring->q_index); 43cdedef59SAnirudh Venkataramanan } 44cdedef59SAnirudh Venkataramanan 45cdedef59SAnirudh Venkataramanan /** 46cdedef59SAnirudh Venkataramanan * ice_clean_tx_ring - Free any empty Tx buffers 47cdedef59SAnirudh Venkataramanan * @tx_ring: ring to be cleaned 48cdedef59SAnirudh Venkataramanan */ 49cdedef59SAnirudh Venkataramanan void ice_clean_tx_ring(struct ice_ring *tx_ring) 50cdedef59SAnirudh Venkataramanan { 51cdedef59SAnirudh Venkataramanan u16 i; 52cdedef59SAnirudh Venkataramanan 53cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 54cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 55cdedef59SAnirudh Venkataramanan return; 56cdedef59SAnirudh Venkataramanan 57cdedef59SAnirudh Venkataramanan /* Free all the Tx ring sk_bufss */ 58cdedef59SAnirudh Venkataramanan for (i = 0; i < tx_ring->count; i++) 59cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 60cdedef59SAnirudh Venkataramanan 61c6dfd690SBruce Allan memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 62cdedef59SAnirudh Venkataramanan 63cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 64cdedef59SAnirudh Venkataramanan memset(tx_ring->desc, 0, tx_ring->size); 65cdedef59SAnirudh Venkataramanan 66cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 67cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 68cdedef59SAnirudh Venkataramanan 69cdedef59SAnirudh Venkataramanan if (!tx_ring->netdev) 70cdedef59SAnirudh Venkataramanan return; 71cdedef59SAnirudh Venkataramanan 72cdedef59SAnirudh Venkataramanan /* cleanup Tx queue statistics */ 73cdedef59SAnirudh Venkataramanan netdev_tx_reset_queue(txring_txq(tx_ring)); 74cdedef59SAnirudh Venkataramanan } 75cdedef59SAnirudh Venkataramanan 76cdedef59SAnirudh Venkataramanan /** 77cdedef59SAnirudh Venkataramanan * ice_free_tx_ring - Free Tx resources per queue 78cdedef59SAnirudh Venkataramanan * @tx_ring: Tx descriptor ring for a specific queue 79cdedef59SAnirudh Venkataramanan * 80cdedef59SAnirudh Venkataramanan * Free all transmit software resources 81cdedef59SAnirudh Venkataramanan */ 82cdedef59SAnirudh Venkataramanan void ice_free_tx_ring(struct ice_ring *tx_ring) 83cdedef59SAnirudh Venkataramanan { 84cdedef59SAnirudh Venkataramanan ice_clean_tx_ring(tx_ring); 85cdedef59SAnirudh Venkataramanan devm_kfree(tx_ring->dev, tx_ring->tx_buf); 86cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 87cdedef59SAnirudh Venkataramanan 88cdedef59SAnirudh Venkataramanan if (tx_ring->desc) { 89cdedef59SAnirudh Venkataramanan dmam_free_coherent(tx_ring->dev, tx_ring->size, 90cdedef59SAnirudh Venkataramanan tx_ring->desc, tx_ring->dma); 91cdedef59SAnirudh Venkataramanan tx_ring->desc = NULL; 92cdedef59SAnirudh Venkataramanan } 93cdedef59SAnirudh Venkataramanan } 94cdedef59SAnirudh Venkataramanan 95cdedef59SAnirudh Venkataramanan /** 962b245cb2SAnirudh Venkataramanan * ice_clean_tx_irq - Reclaim resources after transmit completes 972b245cb2SAnirudh Venkataramanan * @vsi: the VSI we care about 982b245cb2SAnirudh Venkataramanan * @tx_ring: Tx ring to clean 992b245cb2SAnirudh Venkataramanan * @napi_budget: Used to determine if we are in netpoll 1002b245cb2SAnirudh Venkataramanan * 1012b245cb2SAnirudh Venkataramanan * Returns true if there's any budget left (e.g. the clean is finished) 1022b245cb2SAnirudh Venkataramanan */ 1032b245cb2SAnirudh Venkataramanan static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, 1042b245cb2SAnirudh Venkataramanan int napi_budget) 1052b245cb2SAnirudh Venkataramanan { 1062b245cb2SAnirudh Venkataramanan unsigned int total_bytes = 0, total_pkts = 0; 1072b245cb2SAnirudh Venkataramanan unsigned int budget = vsi->work_lmt; 1082b245cb2SAnirudh Venkataramanan s16 i = tx_ring->next_to_clean; 1092b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 1102b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 1112b245cb2SAnirudh Venkataramanan 1122b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 1132b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 1142b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 1152b245cb2SAnirudh Venkataramanan 1162b245cb2SAnirudh Venkataramanan do { 1172b245cb2SAnirudh Venkataramanan struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 1182b245cb2SAnirudh Venkataramanan 1192b245cb2SAnirudh Venkataramanan /* if next_to_watch is not set then there is no work pending */ 1202b245cb2SAnirudh Venkataramanan if (!eop_desc) 1212b245cb2SAnirudh Venkataramanan break; 1222b245cb2SAnirudh Venkataramanan 1232b245cb2SAnirudh Venkataramanan smp_rmb(); /* prevent any other reads prior to eop_desc */ 1242b245cb2SAnirudh Venkataramanan 1252b245cb2SAnirudh Venkataramanan /* if the descriptor isn't done, no work yet to do */ 1262b245cb2SAnirudh Venkataramanan if (!(eop_desc->cmd_type_offset_bsz & 1272b245cb2SAnirudh Venkataramanan cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 1282b245cb2SAnirudh Venkataramanan break; 1292b245cb2SAnirudh Venkataramanan 1302b245cb2SAnirudh Venkataramanan /* clear next_to_watch to prevent false hangs */ 1312b245cb2SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 1322b245cb2SAnirudh Venkataramanan 1332b245cb2SAnirudh Venkataramanan /* update the statistics for this packet */ 1342b245cb2SAnirudh Venkataramanan total_bytes += tx_buf->bytecount; 1352b245cb2SAnirudh Venkataramanan total_pkts += tx_buf->gso_segs; 1362b245cb2SAnirudh Venkataramanan 1372b245cb2SAnirudh Venkataramanan /* free the skb */ 1382b245cb2SAnirudh Venkataramanan napi_consume_skb(tx_buf->skb, napi_budget); 1392b245cb2SAnirudh Venkataramanan 1402b245cb2SAnirudh Venkataramanan /* unmap skb header data */ 1412b245cb2SAnirudh Venkataramanan dma_unmap_single(tx_ring->dev, 1422b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 1432b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 1442b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 1452b245cb2SAnirudh Venkataramanan 1462b245cb2SAnirudh Venkataramanan /* clear tx_buf data */ 1472b245cb2SAnirudh Venkataramanan tx_buf->skb = NULL; 1482b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 1492b245cb2SAnirudh Venkataramanan 1502b245cb2SAnirudh Venkataramanan /* unmap remaining buffers */ 1512b245cb2SAnirudh Venkataramanan while (tx_desc != eop_desc) { 1522b245cb2SAnirudh Venkataramanan tx_buf++; 1532b245cb2SAnirudh Venkataramanan tx_desc++; 1542b245cb2SAnirudh Venkataramanan i++; 1552b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 1562b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 1572b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 1582b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 1592b245cb2SAnirudh Venkataramanan } 1602b245cb2SAnirudh Venkataramanan 1612b245cb2SAnirudh Venkataramanan /* unmap any remaining paged data */ 1622b245cb2SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) { 1632b245cb2SAnirudh Venkataramanan dma_unmap_page(tx_ring->dev, 1642b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 1652b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 1662b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 1672b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 1682b245cb2SAnirudh Venkataramanan } 1692b245cb2SAnirudh Venkataramanan } 1702b245cb2SAnirudh Venkataramanan 1712b245cb2SAnirudh Venkataramanan /* move us one more past the eop_desc for start of next pkt */ 1722b245cb2SAnirudh Venkataramanan tx_buf++; 1732b245cb2SAnirudh Venkataramanan tx_desc++; 1742b245cb2SAnirudh Venkataramanan i++; 1752b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 1762b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 1772b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 1782b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 1792b245cb2SAnirudh Venkataramanan } 1802b245cb2SAnirudh Venkataramanan 1812b245cb2SAnirudh Venkataramanan prefetch(tx_desc); 1822b245cb2SAnirudh Venkataramanan 1832b245cb2SAnirudh Venkataramanan /* update budget accounting */ 1842b245cb2SAnirudh Venkataramanan budget--; 1852b245cb2SAnirudh Venkataramanan } while (likely(budget)); 1862b245cb2SAnirudh Venkataramanan 1872b245cb2SAnirudh Venkataramanan i += tx_ring->count; 1882b245cb2SAnirudh Venkataramanan tx_ring->next_to_clean = i; 1892b245cb2SAnirudh Venkataramanan u64_stats_update_begin(&tx_ring->syncp); 1902b245cb2SAnirudh Venkataramanan tx_ring->stats.bytes += total_bytes; 1912b245cb2SAnirudh Venkataramanan tx_ring->stats.pkts += total_pkts; 1922b245cb2SAnirudh Venkataramanan u64_stats_update_end(&tx_ring->syncp); 1932b245cb2SAnirudh Venkataramanan tx_ring->q_vector->tx.total_bytes += total_bytes; 1942b245cb2SAnirudh Venkataramanan tx_ring->q_vector->tx.total_pkts += total_pkts; 1952b245cb2SAnirudh Venkataramanan 1962b245cb2SAnirudh Venkataramanan netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 1972b245cb2SAnirudh Venkataramanan total_bytes); 1982b245cb2SAnirudh Venkataramanan 1992b245cb2SAnirudh Venkataramanan #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 2002b245cb2SAnirudh Venkataramanan if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 2012b245cb2SAnirudh Venkataramanan (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 2022b245cb2SAnirudh Venkataramanan /* Make sure that anybody stopping the queue after this 2032b245cb2SAnirudh Venkataramanan * sees the new next_to_clean. 2042b245cb2SAnirudh Venkataramanan */ 2052b245cb2SAnirudh Venkataramanan smp_mb(); 2062b245cb2SAnirudh Venkataramanan if (__netif_subqueue_stopped(tx_ring->netdev, 2072b245cb2SAnirudh Venkataramanan tx_ring->q_index) && 2082b245cb2SAnirudh Venkataramanan !test_bit(__ICE_DOWN, vsi->state)) { 2092b245cb2SAnirudh Venkataramanan netif_wake_subqueue(tx_ring->netdev, 2102b245cb2SAnirudh Venkataramanan tx_ring->q_index); 2112b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 2122b245cb2SAnirudh Venkataramanan } 2132b245cb2SAnirudh Venkataramanan } 2142b245cb2SAnirudh Venkataramanan 2152b245cb2SAnirudh Venkataramanan return !!budget; 2162b245cb2SAnirudh Venkataramanan } 2172b245cb2SAnirudh Venkataramanan 2182b245cb2SAnirudh Venkataramanan /** 219cdedef59SAnirudh Venkataramanan * ice_setup_tx_ring - Allocate the Tx descriptors 220d337f2afSAnirudh Venkataramanan * @tx_ring: the Tx ring to set up 221cdedef59SAnirudh Venkataramanan * 222cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 223cdedef59SAnirudh Venkataramanan */ 224cdedef59SAnirudh Venkataramanan int ice_setup_tx_ring(struct ice_ring *tx_ring) 225cdedef59SAnirudh Venkataramanan { 226cdedef59SAnirudh Venkataramanan struct device *dev = tx_ring->dev; 227cdedef59SAnirudh Venkataramanan 228cdedef59SAnirudh Venkataramanan if (!dev) 229cdedef59SAnirudh Venkataramanan return -ENOMEM; 230cdedef59SAnirudh Venkataramanan 231cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 232cdedef59SAnirudh Venkataramanan WARN_ON(tx_ring->tx_buf); 233c6dfd690SBruce Allan tx_ring->tx_buf = 234c6dfd690SBruce Allan devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 235c6dfd690SBruce Allan GFP_KERNEL); 236cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 237cdedef59SAnirudh Venkataramanan return -ENOMEM; 238cdedef59SAnirudh Venkataramanan 239cdedef59SAnirudh Venkataramanan /* round up to nearest 4K */ 240c6dfd690SBruce Allan tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 241c6dfd690SBruce Allan 4096); 242cdedef59SAnirudh Venkataramanan tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 243cdedef59SAnirudh Venkataramanan GFP_KERNEL); 244cdedef59SAnirudh Venkataramanan if (!tx_ring->desc) { 245cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 246cdedef59SAnirudh Venkataramanan tx_ring->size); 247cdedef59SAnirudh Venkataramanan goto err; 248cdedef59SAnirudh Venkataramanan } 249cdedef59SAnirudh Venkataramanan 250cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 251cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 252b3969fd7SSudheer Mogilappagari tx_ring->tx_stats.prev_pkt = -1; 253cdedef59SAnirudh Venkataramanan return 0; 254cdedef59SAnirudh Venkataramanan 255cdedef59SAnirudh Venkataramanan err: 256cdedef59SAnirudh Venkataramanan devm_kfree(dev, tx_ring->tx_buf); 257cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 258cdedef59SAnirudh Venkataramanan return -ENOMEM; 259cdedef59SAnirudh Venkataramanan } 260cdedef59SAnirudh Venkataramanan 261cdedef59SAnirudh Venkataramanan /** 262cdedef59SAnirudh Venkataramanan * ice_clean_rx_ring - Free Rx buffers 263cdedef59SAnirudh Venkataramanan * @rx_ring: ring to be cleaned 264cdedef59SAnirudh Venkataramanan */ 265cdedef59SAnirudh Venkataramanan void ice_clean_rx_ring(struct ice_ring *rx_ring) 266cdedef59SAnirudh Venkataramanan { 267cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 268cdedef59SAnirudh Venkataramanan u16 i; 269cdedef59SAnirudh Venkataramanan 270cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 271cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 272cdedef59SAnirudh Venkataramanan return; 273cdedef59SAnirudh Venkataramanan 274cdedef59SAnirudh Venkataramanan /* Free all the Rx ring sk_buffs */ 275cdedef59SAnirudh Venkataramanan for (i = 0; i < rx_ring->count; i++) { 276cdedef59SAnirudh Venkataramanan struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 277cdedef59SAnirudh Venkataramanan 278cdedef59SAnirudh Venkataramanan if (rx_buf->skb) { 279cdedef59SAnirudh Venkataramanan dev_kfree_skb(rx_buf->skb); 280cdedef59SAnirudh Venkataramanan rx_buf->skb = NULL; 281cdedef59SAnirudh Venkataramanan } 282cdedef59SAnirudh Venkataramanan if (!rx_buf->page) 283cdedef59SAnirudh Venkataramanan continue; 284cdedef59SAnirudh Venkataramanan 285cdedef59SAnirudh Venkataramanan dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE); 286cdedef59SAnirudh Venkataramanan __free_pages(rx_buf->page, 0); 287cdedef59SAnirudh Venkataramanan 288cdedef59SAnirudh Venkataramanan rx_buf->page = NULL; 289cdedef59SAnirudh Venkataramanan rx_buf->page_offset = 0; 290cdedef59SAnirudh Venkataramanan } 291cdedef59SAnirudh Venkataramanan 292c6dfd690SBruce Allan memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 293cdedef59SAnirudh Venkataramanan 294cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 295cdedef59SAnirudh Venkataramanan memset(rx_ring->desc, 0, rx_ring->size); 296cdedef59SAnirudh Venkataramanan 297cdedef59SAnirudh Venkataramanan rx_ring->next_to_alloc = 0; 298cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 299cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 300cdedef59SAnirudh Venkataramanan } 301cdedef59SAnirudh Venkataramanan 302cdedef59SAnirudh Venkataramanan /** 303cdedef59SAnirudh Venkataramanan * ice_free_rx_ring - Free Rx resources 304cdedef59SAnirudh Venkataramanan * @rx_ring: ring to clean the resources from 305cdedef59SAnirudh Venkataramanan * 306cdedef59SAnirudh Venkataramanan * Free all receive software resources 307cdedef59SAnirudh Venkataramanan */ 308cdedef59SAnirudh Venkataramanan void ice_free_rx_ring(struct ice_ring *rx_ring) 309cdedef59SAnirudh Venkataramanan { 310cdedef59SAnirudh Venkataramanan ice_clean_rx_ring(rx_ring); 311cdedef59SAnirudh Venkataramanan devm_kfree(rx_ring->dev, rx_ring->rx_buf); 312cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 313cdedef59SAnirudh Venkataramanan 314cdedef59SAnirudh Venkataramanan if (rx_ring->desc) { 315cdedef59SAnirudh Venkataramanan dmam_free_coherent(rx_ring->dev, rx_ring->size, 316cdedef59SAnirudh Venkataramanan rx_ring->desc, rx_ring->dma); 317cdedef59SAnirudh Venkataramanan rx_ring->desc = NULL; 318cdedef59SAnirudh Venkataramanan } 319cdedef59SAnirudh Venkataramanan } 320cdedef59SAnirudh Venkataramanan 321cdedef59SAnirudh Venkataramanan /** 322cdedef59SAnirudh Venkataramanan * ice_setup_rx_ring - Allocate the Rx descriptors 323d337f2afSAnirudh Venkataramanan * @rx_ring: the Rx ring to set up 324cdedef59SAnirudh Venkataramanan * 325cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 326cdedef59SAnirudh Venkataramanan */ 327cdedef59SAnirudh Venkataramanan int ice_setup_rx_ring(struct ice_ring *rx_ring) 328cdedef59SAnirudh Venkataramanan { 329cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 330cdedef59SAnirudh Venkataramanan 331cdedef59SAnirudh Venkataramanan if (!dev) 332cdedef59SAnirudh Venkataramanan return -ENOMEM; 333cdedef59SAnirudh Venkataramanan 334cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 335cdedef59SAnirudh Venkataramanan WARN_ON(rx_ring->rx_buf); 336c6dfd690SBruce Allan rx_ring->rx_buf = 337c6dfd690SBruce Allan devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 338c6dfd690SBruce Allan GFP_KERNEL); 339cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 340cdedef59SAnirudh Venkataramanan return -ENOMEM; 341cdedef59SAnirudh Venkataramanan 342cdedef59SAnirudh Venkataramanan /* round up to nearest 4K */ 343cdedef59SAnirudh Venkataramanan rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc); 344cdedef59SAnirudh Venkataramanan rx_ring->size = ALIGN(rx_ring->size, 4096); 345cdedef59SAnirudh Venkataramanan rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 346cdedef59SAnirudh Venkataramanan GFP_KERNEL); 347cdedef59SAnirudh Venkataramanan if (!rx_ring->desc) { 348cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 349cdedef59SAnirudh Venkataramanan rx_ring->size); 350cdedef59SAnirudh Venkataramanan goto err; 351cdedef59SAnirudh Venkataramanan } 352cdedef59SAnirudh Venkataramanan 353cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 354cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 355cdedef59SAnirudh Venkataramanan return 0; 356cdedef59SAnirudh Venkataramanan 357cdedef59SAnirudh Venkataramanan err: 358cdedef59SAnirudh Venkataramanan devm_kfree(dev, rx_ring->rx_buf); 359cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 360cdedef59SAnirudh Venkataramanan return -ENOMEM; 361cdedef59SAnirudh Venkataramanan } 362cdedef59SAnirudh Venkataramanan 363cdedef59SAnirudh Venkataramanan /** 364cdedef59SAnirudh Venkataramanan * ice_release_rx_desc - Store the new tail and head values 365cdedef59SAnirudh Venkataramanan * @rx_ring: ring to bump 366cdedef59SAnirudh Venkataramanan * @val: new head index 367cdedef59SAnirudh Venkataramanan */ 368cdedef59SAnirudh Venkataramanan static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) 369cdedef59SAnirudh Venkataramanan { 370cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = val; 371cdedef59SAnirudh Venkataramanan 372cdedef59SAnirudh Venkataramanan /* update next to alloc since we have filled the ring */ 373cdedef59SAnirudh Venkataramanan rx_ring->next_to_alloc = val; 374cdedef59SAnirudh Venkataramanan 375cdedef59SAnirudh Venkataramanan /* Force memory writes to complete before letting h/w 376cdedef59SAnirudh Venkataramanan * know there are new descriptors to fetch. (Only 377cdedef59SAnirudh Venkataramanan * applicable for weak-ordered memory model archs, 378cdedef59SAnirudh Venkataramanan * such as IA-64). 379cdedef59SAnirudh Venkataramanan */ 380cdedef59SAnirudh Venkataramanan wmb(); 381cdedef59SAnirudh Venkataramanan writel(val, rx_ring->tail); 382cdedef59SAnirudh Venkataramanan } 383cdedef59SAnirudh Venkataramanan 384cdedef59SAnirudh Venkataramanan /** 385cdedef59SAnirudh Venkataramanan * ice_alloc_mapped_page - recycle or make a new page 386cdedef59SAnirudh Venkataramanan * @rx_ring: ring to use 387cdedef59SAnirudh Venkataramanan * @bi: rx_buf struct to modify 388cdedef59SAnirudh Venkataramanan * 389cdedef59SAnirudh Venkataramanan * Returns true if the page was successfully allocated or 390cdedef59SAnirudh Venkataramanan * reused. 391cdedef59SAnirudh Venkataramanan */ 392cdedef59SAnirudh Venkataramanan static bool ice_alloc_mapped_page(struct ice_ring *rx_ring, 393cdedef59SAnirudh Venkataramanan struct ice_rx_buf *bi) 394cdedef59SAnirudh Venkataramanan { 395cdedef59SAnirudh Venkataramanan struct page *page = bi->page; 396cdedef59SAnirudh Venkataramanan dma_addr_t dma; 397cdedef59SAnirudh Venkataramanan 398cdedef59SAnirudh Venkataramanan /* since we are recycling buffers we should seldom need to alloc */ 3992b245cb2SAnirudh Venkataramanan if (likely(page)) { 4002b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.page_reuse_count++; 401cdedef59SAnirudh Venkataramanan return true; 4022b245cb2SAnirudh Venkataramanan } 403cdedef59SAnirudh Venkataramanan 404cdedef59SAnirudh Venkataramanan /* alloc new page for storage */ 405cdedef59SAnirudh Venkataramanan page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 4062b245cb2SAnirudh Venkataramanan if (unlikely(!page)) { 4072b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 408cdedef59SAnirudh Venkataramanan return false; 4092b245cb2SAnirudh Venkataramanan } 410cdedef59SAnirudh Venkataramanan 411cdedef59SAnirudh Venkataramanan /* map page for use */ 412cdedef59SAnirudh Venkataramanan dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 413cdedef59SAnirudh Venkataramanan 414cdedef59SAnirudh Venkataramanan /* if mapping failed free memory back to system since 415cdedef59SAnirudh Venkataramanan * there isn't much point in holding memory we can't use 416cdedef59SAnirudh Venkataramanan */ 417cdedef59SAnirudh Venkataramanan if (dma_mapping_error(rx_ring->dev, dma)) { 418cdedef59SAnirudh Venkataramanan __free_pages(page, 0); 4192b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 420cdedef59SAnirudh Venkataramanan return false; 421cdedef59SAnirudh Venkataramanan } 422cdedef59SAnirudh Venkataramanan 423cdedef59SAnirudh Venkataramanan bi->dma = dma; 424cdedef59SAnirudh Venkataramanan bi->page = page; 425cdedef59SAnirudh Venkataramanan bi->page_offset = 0; 426cdedef59SAnirudh Venkataramanan 427cdedef59SAnirudh Venkataramanan return true; 428cdedef59SAnirudh Venkataramanan } 429cdedef59SAnirudh Venkataramanan 430cdedef59SAnirudh Venkataramanan /** 431cdedef59SAnirudh Venkataramanan * ice_alloc_rx_bufs - Replace used receive buffers 432cdedef59SAnirudh Venkataramanan * @rx_ring: ring to place buffers on 433cdedef59SAnirudh Venkataramanan * @cleaned_count: number of buffers to replace 434cdedef59SAnirudh Venkataramanan * 435cdedef59SAnirudh Venkataramanan * Returns false if all allocations were successful, true if any fail 436cdedef59SAnirudh Venkataramanan */ 437cdedef59SAnirudh Venkataramanan bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) 438cdedef59SAnirudh Venkataramanan { 439cdedef59SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 440cdedef59SAnirudh Venkataramanan u16 ntu = rx_ring->next_to_use; 441cdedef59SAnirudh Venkataramanan struct ice_rx_buf *bi; 442cdedef59SAnirudh Venkataramanan 443cdedef59SAnirudh Venkataramanan /* do nothing if no valid netdev defined */ 444cdedef59SAnirudh Venkataramanan if (!rx_ring->netdev || !cleaned_count) 445cdedef59SAnirudh Venkataramanan return false; 446cdedef59SAnirudh Venkataramanan 447cdedef59SAnirudh Venkataramanan /* get the RX descriptor and buffer based on next_to_use */ 448cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, ntu); 449cdedef59SAnirudh Venkataramanan bi = &rx_ring->rx_buf[ntu]; 450cdedef59SAnirudh Venkataramanan 451cdedef59SAnirudh Venkataramanan do { 452cdedef59SAnirudh Venkataramanan if (!ice_alloc_mapped_page(rx_ring, bi)) 453cdedef59SAnirudh Venkataramanan goto no_bufs; 454cdedef59SAnirudh Venkataramanan 455cdedef59SAnirudh Venkataramanan /* Refresh the desc even if buffer_addrs didn't change 456cdedef59SAnirudh Venkataramanan * because each write-back erases this info. 457cdedef59SAnirudh Venkataramanan */ 458cdedef59SAnirudh Venkataramanan rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 459cdedef59SAnirudh Venkataramanan 460cdedef59SAnirudh Venkataramanan rx_desc++; 461cdedef59SAnirudh Venkataramanan bi++; 462cdedef59SAnirudh Venkataramanan ntu++; 463cdedef59SAnirudh Venkataramanan if (unlikely(ntu == rx_ring->count)) { 464cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, 0); 465cdedef59SAnirudh Venkataramanan bi = rx_ring->rx_buf; 466cdedef59SAnirudh Venkataramanan ntu = 0; 467cdedef59SAnirudh Venkataramanan } 468cdedef59SAnirudh Venkataramanan 469cdedef59SAnirudh Venkataramanan /* clear the status bits for the next_to_use descriptor */ 470cdedef59SAnirudh Venkataramanan rx_desc->wb.status_error0 = 0; 471cdedef59SAnirudh Venkataramanan 472cdedef59SAnirudh Venkataramanan cleaned_count--; 473cdedef59SAnirudh Venkataramanan } while (cleaned_count); 474cdedef59SAnirudh Venkataramanan 475cdedef59SAnirudh Venkataramanan if (rx_ring->next_to_use != ntu) 476cdedef59SAnirudh Venkataramanan ice_release_rx_desc(rx_ring, ntu); 477cdedef59SAnirudh Venkataramanan 478cdedef59SAnirudh Venkataramanan return false; 479cdedef59SAnirudh Venkataramanan 480cdedef59SAnirudh Venkataramanan no_bufs: 481cdedef59SAnirudh Venkataramanan if (rx_ring->next_to_use != ntu) 482cdedef59SAnirudh Venkataramanan ice_release_rx_desc(rx_ring, ntu); 483cdedef59SAnirudh Venkataramanan 484cdedef59SAnirudh Venkataramanan /* make sure to come back via polling to try again after 485cdedef59SAnirudh Venkataramanan * allocation failure 486cdedef59SAnirudh Venkataramanan */ 487cdedef59SAnirudh Venkataramanan return true; 488cdedef59SAnirudh Venkataramanan } 4892b245cb2SAnirudh Venkataramanan 4902b245cb2SAnirudh Venkataramanan /** 4912b245cb2SAnirudh Venkataramanan * ice_page_is_reserved - check if reuse is possible 4922b245cb2SAnirudh Venkataramanan * @page: page struct to check 4932b245cb2SAnirudh Venkataramanan */ 4942b245cb2SAnirudh Venkataramanan static bool ice_page_is_reserved(struct page *page) 4952b245cb2SAnirudh Venkataramanan { 4962b245cb2SAnirudh Venkataramanan return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 4972b245cb2SAnirudh Venkataramanan } 4982b245cb2SAnirudh Venkataramanan 4992b245cb2SAnirudh Venkataramanan /** 5002b245cb2SAnirudh Venkataramanan * ice_add_rx_frag - Add contents of Rx buffer to sk_buff 5012b245cb2SAnirudh Venkataramanan * @rx_buf: buffer containing page to add 5022b245cb2SAnirudh Venkataramanan * @rx_desc: descriptor containing length of buffer written by hardware 5032b245cb2SAnirudh Venkataramanan * @skb: sk_buf to place the data into 5042b245cb2SAnirudh Venkataramanan * 5052b245cb2SAnirudh Venkataramanan * This function will add the data contained in rx_buf->page to the skb. 5062b245cb2SAnirudh Venkataramanan * This is done either through a direct copy if the data in the buffer is 5072b245cb2SAnirudh Venkataramanan * less than the skb header size, otherwise it will just attach the page as 5082b245cb2SAnirudh Venkataramanan * a frag to the skb. 5092b245cb2SAnirudh Venkataramanan * 5102b245cb2SAnirudh Venkataramanan * The function will then update the page offset if necessary and return 5112b245cb2SAnirudh Venkataramanan * true if the buffer can be reused by the adapter. 5122b245cb2SAnirudh Venkataramanan */ 5132b245cb2SAnirudh Venkataramanan static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf, 5142b245cb2SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc, 5152b245cb2SAnirudh Venkataramanan struct sk_buff *skb) 5162b245cb2SAnirudh Venkataramanan { 5172b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 5182b245cb2SAnirudh Venkataramanan unsigned int truesize = ICE_RXBUF_2048; 5192b245cb2SAnirudh Venkataramanan #else 5202b245cb2SAnirudh Venkataramanan unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; 5212b245cb2SAnirudh Venkataramanan unsigned int truesize; 5222b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE < 8192) */ 5232b245cb2SAnirudh Venkataramanan 5242b245cb2SAnirudh Venkataramanan struct page *page; 5252b245cb2SAnirudh Venkataramanan unsigned int size; 5262b245cb2SAnirudh Venkataramanan 5272b245cb2SAnirudh Venkataramanan size = le16_to_cpu(rx_desc->wb.pkt_len) & 5282b245cb2SAnirudh Venkataramanan ICE_RX_FLX_DESC_PKT_LEN_M; 5292b245cb2SAnirudh Venkataramanan 5302b245cb2SAnirudh Venkataramanan page = rx_buf->page; 5312b245cb2SAnirudh Venkataramanan 5322b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE >= 8192) 5332b245cb2SAnirudh Venkataramanan truesize = ALIGN(size, L1_CACHE_BYTES); 5342b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE >= 8192) */ 5352b245cb2SAnirudh Venkataramanan 5362b245cb2SAnirudh Venkataramanan /* will the data fit in the skb we allocated? if so, just 5372b245cb2SAnirudh Venkataramanan * copy it as it is pretty small anyway 5382b245cb2SAnirudh Venkataramanan */ 5392b245cb2SAnirudh Venkataramanan if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { 5402b245cb2SAnirudh Venkataramanan unsigned char *va = page_address(page) + rx_buf->page_offset; 5412b245cb2SAnirudh Venkataramanan 5422b245cb2SAnirudh Venkataramanan memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 5432b245cb2SAnirudh Venkataramanan 5442b245cb2SAnirudh Venkataramanan /* page is not reserved, we can reuse buffer as-is */ 5452b245cb2SAnirudh Venkataramanan if (likely(!ice_page_is_reserved(page))) 5462b245cb2SAnirudh Venkataramanan return true; 5472b245cb2SAnirudh Venkataramanan 5482b245cb2SAnirudh Venkataramanan /* this page cannot be reused so discard it */ 5492b245cb2SAnirudh Venkataramanan __free_pages(page, 0); 5502b245cb2SAnirudh Venkataramanan return false; 5512b245cb2SAnirudh Venkataramanan } 5522b245cb2SAnirudh Venkataramanan 5532b245cb2SAnirudh Venkataramanan skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 5542b245cb2SAnirudh Venkataramanan rx_buf->page_offset, size, truesize); 5552b245cb2SAnirudh Venkataramanan 5562b245cb2SAnirudh Venkataramanan /* avoid re-using remote pages */ 5572b245cb2SAnirudh Venkataramanan if (unlikely(ice_page_is_reserved(page))) 5582b245cb2SAnirudh Venkataramanan return false; 5592b245cb2SAnirudh Venkataramanan 5602b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 5612b245cb2SAnirudh Venkataramanan /* if we are only owner of page we can reuse it */ 5622b245cb2SAnirudh Venkataramanan if (unlikely(page_count(page) != 1)) 5632b245cb2SAnirudh Venkataramanan return false; 5642b245cb2SAnirudh Venkataramanan 5652b245cb2SAnirudh Venkataramanan /* flip page offset to other buffer */ 5662b245cb2SAnirudh Venkataramanan rx_buf->page_offset ^= truesize; 5672b245cb2SAnirudh Venkataramanan #else 5682b245cb2SAnirudh Venkataramanan /* move offset up to the next cache line */ 5692b245cb2SAnirudh Venkataramanan rx_buf->page_offset += truesize; 5702b245cb2SAnirudh Venkataramanan 5712b245cb2SAnirudh Venkataramanan if (rx_buf->page_offset > last_offset) 5722b245cb2SAnirudh Venkataramanan return false; 5732b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE < 8192) */ 5742b245cb2SAnirudh Venkataramanan 5752b245cb2SAnirudh Venkataramanan /* Even if we own the page, we are not allowed to use atomic_set() 5762b245cb2SAnirudh Venkataramanan * This would break get_page_unless_zero() users. 5772b245cb2SAnirudh Venkataramanan */ 5782b245cb2SAnirudh Venkataramanan get_page(rx_buf->page); 5792b245cb2SAnirudh Venkataramanan 5802b245cb2SAnirudh Venkataramanan return true; 5812b245cb2SAnirudh Venkataramanan } 5822b245cb2SAnirudh Venkataramanan 5832b245cb2SAnirudh Venkataramanan /** 5842b245cb2SAnirudh Venkataramanan * ice_reuse_rx_page - page flip buffer and store it back on the ring 585d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to store buffers on 5862b245cb2SAnirudh Venkataramanan * @old_buf: donor buffer to have page reused 5872b245cb2SAnirudh Venkataramanan * 5882b245cb2SAnirudh Venkataramanan * Synchronizes page for reuse by the adapter 5892b245cb2SAnirudh Venkataramanan */ 5902b245cb2SAnirudh Venkataramanan static void ice_reuse_rx_page(struct ice_ring *rx_ring, 5912b245cb2SAnirudh Venkataramanan struct ice_rx_buf *old_buf) 5922b245cb2SAnirudh Venkataramanan { 5932b245cb2SAnirudh Venkataramanan u16 nta = rx_ring->next_to_alloc; 5942b245cb2SAnirudh Venkataramanan struct ice_rx_buf *new_buf; 5952b245cb2SAnirudh Venkataramanan 5962b245cb2SAnirudh Venkataramanan new_buf = &rx_ring->rx_buf[nta]; 5972b245cb2SAnirudh Venkataramanan 5982b245cb2SAnirudh Venkataramanan /* update, and store next to alloc */ 5992b245cb2SAnirudh Venkataramanan nta++; 6002b245cb2SAnirudh Venkataramanan rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 6012b245cb2SAnirudh Venkataramanan 6022b245cb2SAnirudh Venkataramanan /* transfer page from old buffer to new buffer */ 6032b245cb2SAnirudh Venkataramanan *new_buf = *old_buf; 6042b245cb2SAnirudh Venkataramanan } 6052b245cb2SAnirudh Venkataramanan 6062b245cb2SAnirudh Venkataramanan /** 6072b245cb2SAnirudh Venkataramanan * ice_fetch_rx_buf - Allocate skb and populate it 608d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 6092b245cb2SAnirudh Venkataramanan * @rx_desc: descriptor containing info written by hardware 6102b245cb2SAnirudh Venkataramanan * 6112b245cb2SAnirudh Venkataramanan * This function allocates an skb on the fly, and populates it with the page 6122b245cb2SAnirudh Venkataramanan * data from the current receive descriptor, taking care to set up the skb 6132b245cb2SAnirudh Venkataramanan * correctly, as well as handling calling the page recycle function if 6142b245cb2SAnirudh Venkataramanan * necessary. 6152b245cb2SAnirudh Venkataramanan */ 6162b245cb2SAnirudh Venkataramanan static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring, 6172b245cb2SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc) 6182b245cb2SAnirudh Venkataramanan { 6192b245cb2SAnirudh Venkataramanan struct ice_rx_buf *rx_buf; 6202b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 6212b245cb2SAnirudh Venkataramanan struct page *page; 6222b245cb2SAnirudh Venkataramanan 6232b245cb2SAnirudh Venkataramanan rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 6242b245cb2SAnirudh Venkataramanan page = rx_buf->page; 6252b245cb2SAnirudh Venkataramanan prefetchw(page); 6262b245cb2SAnirudh Venkataramanan 6272b245cb2SAnirudh Venkataramanan skb = rx_buf->skb; 6282b245cb2SAnirudh Venkataramanan 6292b245cb2SAnirudh Venkataramanan if (likely(!skb)) { 6302b245cb2SAnirudh Venkataramanan u8 *page_addr = page_address(page) + rx_buf->page_offset; 6312b245cb2SAnirudh Venkataramanan 6322b245cb2SAnirudh Venkataramanan /* prefetch first cache line of first page */ 6332b245cb2SAnirudh Venkataramanan prefetch(page_addr); 6342b245cb2SAnirudh Venkataramanan #if L1_CACHE_BYTES < 128 6352b245cb2SAnirudh Venkataramanan prefetch((void *)(page_addr + L1_CACHE_BYTES)); 6362b245cb2SAnirudh Venkataramanan #endif /* L1_CACHE_BYTES */ 6372b245cb2SAnirudh Venkataramanan 6382b245cb2SAnirudh Venkataramanan /* allocate a skb to store the frags */ 6392b245cb2SAnirudh Venkataramanan skb = __napi_alloc_skb(&rx_ring->q_vector->napi, 6402b245cb2SAnirudh Venkataramanan ICE_RX_HDR_SIZE, 6412b245cb2SAnirudh Venkataramanan GFP_ATOMIC | __GFP_NOWARN); 6422b245cb2SAnirudh Venkataramanan if (unlikely(!skb)) { 6432b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_buf_failed++; 6442b245cb2SAnirudh Venkataramanan return NULL; 6452b245cb2SAnirudh Venkataramanan } 6462b245cb2SAnirudh Venkataramanan 6472b245cb2SAnirudh Venkataramanan /* we will be copying header into skb->data in 6482b245cb2SAnirudh Venkataramanan * pskb_may_pull so it is in our interest to prefetch 6492b245cb2SAnirudh Venkataramanan * it now to avoid a possible cache miss 6502b245cb2SAnirudh Venkataramanan */ 6512b245cb2SAnirudh Venkataramanan prefetchw(skb->data); 6522b245cb2SAnirudh Venkataramanan 6532b245cb2SAnirudh Venkataramanan skb_record_rx_queue(skb, rx_ring->q_index); 6542b245cb2SAnirudh Venkataramanan } else { 6552b245cb2SAnirudh Venkataramanan /* we are reusing so sync this buffer for CPU use */ 6562b245cb2SAnirudh Venkataramanan dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 6572b245cb2SAnirudh Venkataramanan rx_buf->page_offset, 6582b245cb2SAnirudh Venkataramanan ICE_RXBUF_2048, 6592b245cb2SAnirudh Venkataramanan DMA_FROM_DEVICE); 6602b245cb2SAnirudh Venkataramanan 6612b245cb2SAnirudh Venkataramanan rx_buf->skb = NULL; 6622b245cb2SAnirudh Venkataramanan } 6632b245cb2SAnirudh Venkataramanan 6642b245cb2SAnirudh Venkataramanan /* pull page into skb */ 6652b245cb2SAnirudh Venkataramanan if (ice_add_rx_frag(rx_buf, rx_desc, skb)) { 6662b245cb2SAnirudh Venkataramanan /* hand second half of page back to the ring */ 6672b245cb2SAnirudh Venkataramanan ice_reuse_rx_page(rx_ring, rx_buf); 6682b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.page_reuse_count++; 6692b245cb2SAnirudh Venkataramanan } else { 6702b245cb2SAnirudh Venkataramanan /* we are not reusing the buffer so unmap it */ 6712b245cb2SAnirudh Venkataramanan dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE, 6722b245cb2SAnirudh Venkataramanan DMA_FROM_DEVICE); 6732b245cb2SAnirudh Venkataramanan } 6742b245cb2SAnirudh Venkataramanan 6752b245cb2SAnirudh Venkataramanan /* clear contents of buffer_info */ 6762b245cb2SAnirudh Venkataramanan rx_buf->page = NULL; 6772b245cb2SAnirudh Venkataramanan 6782b245cb2SAnirudh Venkataramanan return skb; 6792b245cb2SAnirudh Venkataramanan } 6802b245cb2SAnirudh Venkataramanan 6812b245cb2SAnirudh Venkataramanan /** 6822b245cb2SAnirudh Venkataramanan * ice_pull_tail - ice specific version of skb_pull_tail 6832b245cb2SAnirudh Venkataramanan * @skb: pointer to current skb being adjusted 6842b245cb2SAnirudh Venkataramanan * 6852b245cb2SAnirudh Venkataramanan * This function is an ice specific version of __pskb_pull_tail. The 6862b245cb2SAnirudh Venkataramanan * main difference between this version and the original function is that 6872b245cb2SAnirudh Venkataramanan * this function can make several assumptions about the state of things 6882b245cb2SAnirudh Venkataramanan * that allow for significant optimizations versus the standard function. 6892b245cb2SAnirudh Venkataramanan * As a result we can do things like drop a frag and maintain an accurate 6902b245cb2SAnirudh Venkataramanan * truesize for the skb. 6912b245cb2SAnirudh Venkataramanan */ 6922b245cb2SAnirudh Venkataramanan static void ice_pull_tail(struct sk_buff *skb) 6932b245cb2SAnirudh Venkataramanan { 6942b245cb2SAnirudh Venkataramanan struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 6952b245cb2SAnirudh Venkataramanan unsigned int pull_len; 6962b245cb2SAnirudh Venkataramanan unsigned char *va; 6972b245cb2SAnirudh Venkataramanan 6982b245cb2SAnirudh Venkataramanan /* it is valid to use page_address instead of kmap since we are 6992b245cb2SAnirudh Venkataramanan * working with pages allocated out of the lomem pool per 7002b245cb2SAnirudh Venkataramanan * alloc_page(GFP_ATOMIC) 7012b245cb2SAnirudh Venkataramanan */ 7022b245cb2SAnirudh Venkataramanan va = skb_frag_address(frag); 7032b245cb2SAnirudh Venkataramanan 7042b245cb2SAnirudh Venkataramanan /* we need the header to contain the greater of either ETH_HLEN or 7052b245cb2SAnirudh Venkataramanan * 60 bytes if the skb->len is less than 60 for skb_pad. 7062b245cb2SAnirudh Venkataramanan */ 7072b245cb2SAnirudh Venkataramanan pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE); 7082b245cb2SAnirudh Venkataramanan 7092b245cb2SAnirudh Venkataramanan /* align pull length to size of long to optimize memcpy performance */ 7102b245cb2SAnirudh Venkataramanan skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); 7112b245cb2SAnirudh Venkataramanan 7122b245cb2SAnirudh Venkataramanan /* update all of the pointers */ 7132b245cb2SAnirudh Venkataramanan skb_frag_size_sub(frag, pull_len); 7142b245cb2SAnirudh Venkataramanan frag->page_offset += pull_len; 7152b245cb2SAnirudh Venkataramanan skb->data_len -= pull_len; 7162b245cb2SAnirudh Venkataramanan skb->tail += pull_len; 7172b245cb2SAnirudh Venkataramanan } 7182b245cb2SAnirudh Venkataramanan 7192b245cb2SAnirudh Venkataramanan /** 7202b245cb2SAnirudh Venkataramanan * ice_cleanup_headers - Correct empty headers 7212b245cb2SAnirudh Venkataramanan * @skb: pointer to current skb being fixed 7222b245cb2SAnirudh Venkataramanan * 7232b245cb2SAnirudh Venkataramanan * Also address the case where we are pulling data in on pages only 7242b245cb2SAnirudh Venkataramanan * and as such no data is present in the skb header. 7252b245cb2SAnirudh Venkataramanan * 7262b245cb2SAnirudh Venkataramanan * In addition if skb is not at least 60 bytes we need to pad it so that 7272b245cb2SAnirudh Venkataramanan * it is large enough to qualify as a valid Ethernet frame. 7282b245cb2SAnirudh Venkataramanan * 7292b245cb2SAnirudh Venkataramanan * Returns true if an error was encountered and skb was freed. 7302b245cb2SAnirudh Venkataramanan */ 7312b245cb2SAnirudh Venkataramanan static bool ice_cleanup_headers(struct sk_buff *skb) 7322b245cb2SAnirudh Venkataramanan { 7332b245cb2SAnirudh Venkataramanan /* place header in linear portion of buffer */ 7342b245cb2SAnirudh Venkataramanan if (skb_is_nonlinear(skb)) 7352b245cb2SAnirudh Venkataramanan ice_pull_tail(skb); 7362b245cb2SAnirudh Venkataramanan 7372b245cb2SAnirudh Venkataramanan /* if eth_skb_pad returns an error the skb was freed */ 7382b245cb2SAnirudh Venkataramanan if (eth_skb_pad(skb)) 7392b245cb2SAnirudh Venkataramanan return true; 7402b245cb2SAnirudh Venkataramanan 7412b245cb2SAnirudh Venkataramanan return false; 7422b245cb2SAnirudh Venkataramanan } 7432b245cb2SAnirudh Venkataramanan 7442b245cb2SAnirudh Venkataramanan /** 7452b245cb2SAnirudh Venkataramanan * ice_test_staterr - tests bits in Rx descriptor status and error fields 7462b245cb2SAnirudh Venkataramanan * @rx_desc: pointer to receive descriptor (in le64 format) 7472b245cb2SAnirudh Venkataramanan * @stat_err_bits: value to mask 7482b245cb2SAnirudh Venkataramanan * 7492b245cb2SAnirudh Venkataramanan * This function does some fast chicanery in order to return the 7502b245cb2SAnirudh Venkataramanan * value of the mask which is really only used for boolean tests. 7512b245cb2SAnirudh Venkataramanan * The status_error_len doesn't need to be shifted because it begins 7522b245cb2SAnirudh Venkataramanan * at offset zero. 7532b245cb2SAnirudh Venkataramanan */ 7542b245cb2SAnirudh Venkataramanan static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, 7552b245cb2SAnirudh Venkataramanan const u16 stat_err_bits) 7562b245cb2SAnirudh Venkataramanan { 7572b245cb2SAnirudh Venkataramanan return !!(rx_desc->wb.status_error0 & 7582b245cb2SAnirudh Venkataramanan cpu_to_le16(stat_err_bits)); 7592b245cb2SAnirudh Venkataramanan } 7602b245cb2SAnirudh Venkataramanan 7612b245cb2SAnirudh Venkataramanan /** 7622b245cb2SAnirudh Venkataramanan * ice_is_non_eop - process handling of non-EOP buffers 7632b245cb2SAnirudh Venkataramanan * @rx_ring: Rx ring being processed 7642b245cb2SAnirudh Venkataramanan * @rx_desc: Rx descriptor for current buffer 7652b245cb2SAnirudh Venkataramanan * @skb: Current socket buffer containing buffer in progress 7662b245cb2SAnirudh Venkataramanan * 7672b245cb2SAnirudh Venkataramanan * This function updates next to clean. If the buffer is an EOP buffer 7682b245cb2SAnirudh Venkataramanan * this function exits returning false, otherwise it will place the 7692b245cb2SAnirudh Venkataramanan * sk_buff in the next buffer to be chained and return true indicating 7702b245cb2SAnirudh Venkataramanan * that this is in fact a non-EOP buffer. 7712b245cb2SAnirudh Venkataramanan */ 7722b245cb2SAnirudh Venkataramanan static bool ice_is_non_eop(struct ice_ring *rx_ring, 7732b245cb2SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc, 7742b245cb2SAnirudh Venkataramanan struct sk_buff *skb) 7752b245cb2SAnirudh Venkataramanan { 7762b245cb2SAnirudh Venkataramanan u32 ntc = rx_ring->next_to_clean + 1; 7772b245cb2SAnirudh Venkataramanan 7782b245cb2SAnirudh Venkataramanan /* fetch, update, and store next to clean */ 7792b245cb2SAnirudh Venkataramanan ntc = (ntc < rx_ring->count) ? ntc : 0; 7802b245cb2SAnirudh Venkataramanan rx_ring->next_to_clean = ntc; 7812b245cb2SAnirudh Venkataramanan 7822b245cb2SAnirudh Venkataramanan prefetch(ICE_RX_DESC(rx_ring, ntc)); 7832b245cb2SAnirudh Venkataramanan 7842b245cb2SAnirudh Venkataramanan /* if we are the last buffer then there is nothing else to do */ 7852b245cb2SAnirudh Venkataramanan #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 7862b245cb2SAnirudh Venkataramanan if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 7872b245cb2SAnirudh Venkataramanan return false; 7882b245cb2SAnirudh Venkataramanan 7892b245cb2SAnirudh Venkataramanan /* place skb in next buffer to be received */ 7902b245cb2SAnirudh Venkataramanan rx_ring->rx_buf[ntc].skb = skb; 7912b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.non_eop_descs++; 7922b245cb2SAnirudh Venkataramanan 7932b245cb2SAnirudh Venkataramanan return true; 7942b245cb2SAnirudh Venkataramanan } 7952b245cb2SAnirudh Venkataramanan 7962b245cb2SAnirudh Venkataramanan /** 797d76a60baSAnirudh Venkataramanan * ice_ptype_to_htype - get a hash type 798d76a60baSAnirudh Venkataramanan * @ptype: the ptype value from the descriptor 799d76a60baSAnirudh Venkataramanan * 800d76a60baSAnirudh Venkataramanan * Returns a hash type to be used by skb_set_hash 801d76a60baSAnirudh Venkataramanan */ 802d76a60baSAnirudh Venkataramanan static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype) 803d76a60baSAnirudh Venkataramanan { 804d76a60baSAnirudh Venkataramanan return PKT_HASH_TYPE_NONE; 805d76a60baSAnirudh Venkataramanan } 806d76a60baSAnirudh Venkataramanan 807d76a60baSAnirudh Venkataramanan /** 808d76a60baSAnirudh Venkataramanan * ice_rx_hash - set the hash value in the skb 809d76a60baSAnirudh Venkataramanan * @rx_ring: descriptor ring 810d76a60baSAnirudh Venkataramanan * @rx_desc: specific descriptor 811d76a60baSAnirudh Venkataramanan * @skb: pointer to current skb 812d76a60baSAnirudh Venkataramanan * @rx_ptype: the ptype value from the descriptor 813d76a60baSAnirudh Venkataramanan */ 814d76a60baSAnirudh Venkataramanan static void 815d76a60baSAnirudh Venkataramanan ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 816d76a60baSAnirudh Venkataramanan struct sk_buff *skb, u8 rx_ptype) 817d76a60baSAnirudh Venkataramanan { 818d76a60baSAnirudh Venkataramanan struct ice_32b_rx_flex_desc_nic *nic_mdid; 819d76a60baSAnirudh Venkataramanan u32 hash; 820d76a60baSAnirudh Venkataramanan 821d76a60baSAnirudh Venkataramanan if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) 822d76a60baSAnirudh Venkataramanan return; 823d76a60baSAnirudh Venkataramanan 824d76a60baSAnirudh Venkataramanan if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) 825d76a60baSAnirudh Venkataramanan return; 826d76a60baSAnirudh Venkataramanan 827d76a60baSAnirudh Venkataramanan nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; 828d76a60baSAnirudh Venkataramanan hash = le32_to_cpu(nic_mdid->rss_hash); 829d76a60baSAnirudh Venkataramanan skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); 830d76a60baSAnirudh Venkataramanan } 831d76a60baSAnirudh Venkataramanan 832d76a60baSAnirudh Venkataramanan /** 833d76a60baSAnirudh Venkataramanan * ice_rx_csum - Indicate in skb if checksum is good 834d76a60baSAnirudh Venkataramanan * @vsi: the VSI we care about 835d76a60baSAnirudh Venkataramanan * @skb: skb currently being received and modified 836d76a60baSAnirudh Venkataramanan * @rx_desc: the receive descriptor 837d76a60baSAnirudh Venkataramanan * @ptype: the packet type decoded by hardware 838d76a60baSAnirudh Venkataramanan * 839d76a60baSAnirudh Venkataramanan * skb->protocol must be set before this function is called 840d76a60baSAnirudh Venkataramanan */ 841d76a60baSAnirudh Venkataramanan static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, 842d76a60baSAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc, u8 ptype) 843d76a60baSAnirudh Venkataramanan { 844d76a60baSAnirudh Venkataramanan struct ice_rx_ptype_decoded decoded; 845d76a60baSAnirudh Venkataramanan u32 rx_error, rx_status; 846d76a60baSAnirudh Venkataramanan bool ipv4, ipv6; 847d76a60baSAnirudh Venkataramanan 848d76a60baSAnirudh Venkataramanan rx_status = le16_to_cpu(rx_desc->wb.status_error0); 849d76a60baSAnirudh Venkataramanan rx_error = rx_status; 850d76a60baSAnirudh Venkataramanan 851d76a60baSAnirudh Venkataramanan decoded = ice_decode_rx_desc_ptype(ptype); 852d76a60baSAnirudh Venkataramanan 853d76a60baSAnirudh Venkataramanan /* Start with CHECKSUM_NONE and by default csum_level = 0 */ 854d76a60baSAnirudh Venkataramanan skb->ip_summed = CHECKSUM_NONE; 855d76a60baSAnirudh Venkataramanan skb_checksum_none_assert(skb); 856d76a60baSAnirudh Venkataramanan 857d76a60baSAnirudh Venkataramanan /* check if Rx checksum is enabled */ 858d76a60baSAnirudh Venkataramanan if (!(vsi->netdev->features & NETIF_F_RXCSUM)) 859d76a60baSAnirudh Venkataramanan return; 860d76a60baSAnirudh Venkataramanan 861d76a60baSAnirudh Venkataramanan /* check if HW has decoded the packet and checksum */ 862d76a60baSAnirudh Venkataramanan if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) 863d76a60baSAnirudh Venkataramanan return; 864d76a60baSAnirudh Venkataramanan 865d76a60baSAnirudh Venkataramanan if (!(decoded.known && decoded.outer_ip)) 866d76a60baSAnirudh Venkataramanan return; 867d76a60baSAnirudh Venkataramanan 868d76a60baSAnirudh Venkataramanan ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 869d76a60baSAnirudh Venkataramanan (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); 870d76a60baSAnirudh Venkataramanan ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 871d76a60baSAnirudh Venkataramanan (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); 872d76a60baSAnirudh Venkataramanan 873d76a60baSAnirudh Venkataramanan if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | 874d76a60baSAnirudh Venkataramanan BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) 875d76a60baSAnirudh Venkataramanan goto checksum_fail; 876d76a60baSAnirudh Venkataramanan else if (ipv6 && (rx_status & 877d76a60baSAnirudh Venkataramanan (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) 878d76a60baSAnirudh Venkataramanan goto checksum_fail; 879d76a60baSAnirudh Venkataramanan 880d76a60baSAnirudh Venkataramanan /* check for L4 errors and handle packets that were not able to be 881d76a60baSAnirudh Venkataramanan * checksummed due to arrival speed 882d76a60baSAnirudh Venkataramanan */ 883d76a60baSAnirudh Venkataramanan if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) 884d76a60baSAnirudh Venkataramanan goto checksum_fail; 885d76a60baSAnirudh Venkataramanan 886d76a60baSAnirudh Venkataramanan /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 887d76a60baSAnirudh Venkataramanan switch (decoded.inner_prot) { 888d76a60baSAnirudh Venkataramanan case ICE_RX_PTYPE_INNER_PROT_TCP: 889d76a60baSAnirudh Venkataramanan case ICE_RX_PTYPE_INNER_PROT_UDP: 890d76a60baSAnirudh Venkataramanan case ICE_RX_PTYPE_INNER_PROT_SCTP: 891d76a60baSAnirudh Venkataramanan skb->ip_summed = CHECKSUM_UNNECESSARY; 892d76a60baSAnirudh Venkataramanan default: 893d76a60baSAnirudh Venkataramanan break; 894d76a60baSAnirudh Venkataramanan } 895d76a60baSAnirudh Venkataramanan return; 896d76a60baSAnirudh Venkataramanan 897d76a60baSAnirudh Venkataramanan checksum_fail: 898d76a60baSAnirudh Venkataramanan vsi->back->hw_csum_rx_error++; 899d76a60baSAnirudh Venkataramanan } 900d76a60baSAnirudh Venkataramanan 901d76a60baSAnirudh Venkataramanan /** 902d76a60baSAnirudh Venkataramanan * ice_process_skb_fields - Populate skb header fields from Rx descriptor 903d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring packet is being transacted on 904d76a60baSAnirudh Venkataramanan * @rx_desc: pointer to the EOP Rx descriptor 905d76a60baSAnirudh Venkataramanan * @skb: pointer to current skb being populated 906d76a60baSAnirudh Venkataramanan * @ptype: the packet type decoded by hardware 907d76a60baSAnirudh Venkataramanan * 908d76a60baSAnirudh Venkataramanan * This function checks the ring, descriptor, and packet information in 909d76a60baSAnirudh Venkataramanan * order to populate the hash, checksum, VLAN, protocol, and 910d76a60baSAnirudh Venkataramanan * other fields within the skb. 911d76a60baSAnirudh Venkataramanan */ 912d76a60baSAnirudh Venkataramanan static void ice_process_skb_fields(struct ice_ring *rx_ring, 913d76a60baSAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc, 914d76a60baSAnirudh Venkataramanan struct sk_buff *skb, u8 ptype) 915d76a60baSAnirudh Venkataramanan { 916d76a60baSAnirudh Venkataramanan ice_rx_hash(rx_ring, rx_desc, skb, ptype); 917d76a60baSAnirudh Venkataramanan 918d76a60baSAnirudh Venkataramanan /* modifies the skb - consumes the enet header */ 919d76a60baSAnirudh Venkataramanan skb->protocol = eth_type_trans(skb, rx_ring->netdev); 920d76a60baSAnirudh Venkataramanan 921d76a60baSAnirudh Venkataramanan ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype); 922d76a60baSAnirudh Venkataramanan } 923d76a60baSAnirudh Venkataramanan 924d76a60baSAnirudh Venkataramanan /** 9252b245cb2SAnirudh Venkataramanan * ice_receive_skb - Send a completed packet up the stack 926d337f2afSAnirudh Venkataramanan * @rx_ring: Rx ring in play 9272b245cb2SAnirudh Venkataramanan * @skb: packet to send up 9282b245cb2SAnirudh Venkataramanan * @vlan_tag: vlan tag for packet 9292b245cb2SAnirudh Venkataramanan * 9302b245cb2SAnirudh Venkataramanan * This function sends the completed packet (via. skb) up the stack using 9312b245cb2SAnirudh Venkataramanan * gro receive functions (with/without vlan tag) 9322b245cb2SAnirudh Venkataramanan */ 9332b245cb2SAnirudh Venkataramanan static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, 9342b245cb2SAnirudh Venkataramanan u16 vlan_tag) 9352b245cb2SAnirudh Venkataramanan { 9362b245cb2SAnirudh Venkataramanan if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 9372b245cb2SAnirudh Venkataramanan (vlan_tag & VLAN_VID_MASK)) { 9382b245cb2SAnirudh Venkataramanan __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 9392b245cb2SAnirudh Venkataramanan } 9402b245cb2SAnirudh Venkataramanan napi_gro_receive(&rx_ring->q_vector->napi, skb); 9412b245cb2SAnirudh Venkataramanan } 9422b245cb2SAnirudh Venkataramanan 9432b245cb2SAnirudh Venkataramanan /** 9442b245cb2SAnirudh Venkataramanan * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 945d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 9462b245cb2SAnirudh Venkataramanan * @budget: Total limit on number of packets to process 9472b245cb2SAnirudh Venkataramanan * 9482b245cb2SAnirudh Venkataramanan * This function provides a "bounce buffer" approach to Rx interrupt 9492b245cb2SAnirudh Venkataramanan * processing. The advantage to this is that on systems that have 9502b245cb2SAnirudh Venkataramanan * expensive overhead for IOMMU access this provides a means of avoiding 9512b245cb2SAnirudh Venkataramanan * it by maintaining the mapping of the page to the system. 9522b245cb2SAnirudh Venkataramanan * 9532b245cb2SAnirudh Venkataramanan * Returns amount of work completed 9542b245cb2SAnirudh Venkataramanan */ 9552b245cb2SAnirudh Venkataramanan static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 9562b245cb2SAnirudh Venkataramanan { 9572b245cb2SAnirudh Venkataramanan unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 9582b245cb2SAnirudh Venkataramanan u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 9592b245cb2SAnirudh Venkataramanan bool failure = false; 9602b245cb2SAnirudh Venkataramanan 9612b245cb2SAnirudh Venkataramanan /* start the loop to process RX packets bounded by 'budget' */ 9622b245cb2SAnirudh Venkataramanan while (likely(total_rx_pkts < (unsigned int)budget)) { 9632b245cb2SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 9642b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 9652b245cb2SAnirudh Venkataramanan u16 stat_err_bits; 9662b245cb2SAnirudh Venkataramanan u16 vlan_tag = 0; 967d76a60baSAnirudh Venkataramanan u8 rx_ptype; 9682b245cb2SAnirudh Venkataramanan 9692b245cb2SAnirudh Venkataramanan /* return some buffers to hardware, one at a time is too slow */ 9702b245cb2SAnirudh Venkataramanan if (cleaned_count >= ICE_RX_BUF_WRITE) { 9712b245cb2SAnirudh Venkataramanan failure = failure || 9722b245cb2SAnirudh Venkataramanan ice_alloc_rx_bufs(rx_ring, cleaned_count); 9732b245cb2SAnirudh Venkataramanan cleaned_count = 0; 9742b245cb2SAnirudh Venkataramanan } 9752b245cb2SAnirudh Venkataramanan 9762b245cb2SAnirudh Venkataramanan /* get the RX desc from RX ring based on 'next_to_clean' */ 9772b245cb2SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 9782b245cb2SAnirudh Venkataramanan 9792b245cb2SAnirudh Venkataramanan /* status_error_len will always be zero for unused descriptors 9802b245cb2SAnirudh Venkataramanan * because it's cleared in cleanup, and overlaps with hdr_addr 9812b245cb2SAnirudh Venkataramanan * which is always zero because packet split isn't used, if the 9822b245cb2SAnirudh Venkataramanan * hardware wrote DD then it will be non-zero 9832b245cb2SAnirudh Venkataramanan */ 9842b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 9852b245cb2SAnirudh Venkataramanan if (!ice_test_staterr(rx_desc, stat_err_bits)) 9862b245cb2SAnirudh Venkataramanan break; 9872b245cb2SAnirudh Venkataramanan 9882b245cb2SAnirudh Venkataramanan /* This memory barrier is needed to keep us from reading 9892b245cb2SAnirudh Venkataramanan * any other fields out of the rx_desc until we know the 9902b245cb2SAnirudh Venkataramanan * DD bit is set. 9912b245cb2SAnirudh Venkataramanan */ 9922b245cb2SAnirudh Venkataramanan dma_rmb(); 9932b245cb2SAnirudh Venkataramanan 9942b245cb2SAnirudh Venkataramanan /* allocate (if needed) and populate skb */ 9952b245cb2SAnirudh Venkataramanan skb = ice_fetch_rx_buf(rx_ring, rx_desc); 9962b245cb2SAnirudh Venkataramanan if (!skb) 9972b245cb2SAnirudh Venkataramanan break; 9982b245cb2SAnirudh Venkataramanan 9992b245cb2SAnirudh Venkataramanan cleaned_count++; 10002b245cb2SAnirudh Venkataramanan 10012b245cb2SAnirudh Venkataramanan /* skip if it is NOP desc */ 10022b245cb2SAnirudh Venkataramanan if (ice_is_non_eop(rx_ring, rx_desc, skb)) 10032b245cb2SAnirudh Venkataramanan continue; 10042b245cb2SAnirudh Venkataramanan 10052b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 10062b245cb2SAnirudh Venkataramanan if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 10072b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 10082b245cb2SAnirudh Venkataramanan continue; 10092b245cb2SAnirudh Venkataramanan } 10102b245cb2SAnirudh Venkataramanan 1011d76a60baSAnirudh Venkataramanan rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 1012d76a60baSAnirudh Venkataramanan ICE_RX_FLEX_DESC_PTYPE_M; 1013d76a60baSAnirudh Venkataramanan 10142b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 10152b245cb2SAnirudh Venkataramanan if (ice_test_staterr(rx_desc, stat_err_bits)) 10162b245cb2SAnirudh Venkataramanan vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 10172b245cb2SAnirudh Venkataramanan 10182b245cb2SAnirudh Venkataramanan /* correct empty headers and pad skb if needed (to make valid 10192b245cb2SAnirudh Venkataramanan * ethernet frame 10202b245cb2SAnirudh Venkataramanan */ 10212b245cb2SAnirudh Venkataramanan if (ice_cleanup_headers(skb)) { 10222b245cb2SAnirudh Venkataramanan skb = NULL; 10232b245cb2SAnirudh Venkataramanan continue; 10242b245cb2SAnirudh Venkataramanan } 10252b245cb2SAnirudh Venkataramanan 10262b245cb2SAnirudh Venkataramanan /* probably a little skewed due to removing CRC */ 10272b245cb2SAnirudh Venkataramanan total_rx_bytes += skb->len; 10282b245cb2SAnirudh Venkataramanan 1029d76a60baSAnirudh Venkataramanan /* populate checksum, VLAN, and protocol */ 1030d76a60baSAnirudh Venkataramanan ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1031d76a60baSAnirudh Venkataramanan 10322b245cb2SAnirudh Venkataramanan /* send completed skb up the stack */ 10332b245cb2SAnirudh Venkataramanan ice_receive_skb(rx_ring, skb, vlan_tag); 10342b245cb2SAnirudh Venkataramanan 10352b245cb2SAnirudh Venkataramanan /* update budget accounting */ 10362b245cb2SAnirudh Venkataramanan total_rx_pkts++; 10372b245cb2SAnirudh Venkataramanan } 10382b245cb2SAnirudh Venkataramanan 10392b245cb2SAnirudh Venkataramanan /* update queue and vector specific stats */ 10402b245cb2SAnirudh Venkataramanan u64_stats_update_begin(&rx_ring->syncp); 10412b245cb2SAnirudh Venkataramanan rx_ring->stats.pkts += total_rx_pkts; 10422b245cb2SAnirudh Venkataramanan rx_ring->stats.bytes += total_rx_bytes; 10432b245cb2SAnirudh Venkataramanan u64_stats_update_end(&rx_ring->syncp); 10442b245cb2SAnirudh Venkataramanan rx_ring->q_vector->rx.total_pkts += total_rx_pkts; 10452b245cb2SAnirudh Venkataramanan rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 10462b245cb2SAnirudh Venkataramanan 10472b245cb2SAnirudh Venkataramanan /* guarantee a trip back through this routine if there was a failure */ 10482b245cb2SAnirudh Venkataramanan return failure ? budget : (int)total_rx_pkts; 10492b245cb2SAnirudh Venkataramanan } 10502b245cb2SAnirudh Venkataramanan 10512b245cb2SAnirudh Venkataramanan /** 105263f545edSBrett Creeley * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 105363f545edSBrett Creeley * @itr_idx: interrupt throttling index 105463f545edSBrett Creeley * @reg_itr: interrupt throttling value adjusted based on ITR granularity 105563f545edSBrett Creeley */ 105663f545edSBrett Creeley static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr) 105763f545edSBrett Creeley { 105863f545edSBrett Creeley return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 105963f545edSBrett Creeley (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 106063f545edSBrett Creeley (reg_itr << GLINT_DYN_CTL_INTERVAL_S); 106163f545edSBrett Creeley } 106263f545edSBrett Creeley 106363f545edSBrett Creeley /** 106463f545edSBrett Creeley * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt 106563f545edSBrett Creeley * @vsi: the VSI associated with the q_vector 106663f545edSBrett Creeley * @q_vector: q_vector for which ITR is being updated and interrupt enabled 106763f545edSBrett Creeley */ 106863f545edSBrett Creeley static void 106963f545edSBrett Creeley ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) 107063f545edSBrett Creeley { 107163f545edSBrett Creeley struct ice_hw *hw = &vsi->back->hw; 107263f545edSBrett Creeley struct ice_ring_container *rc; 107363f545edSBrett Creeley u32 itr_val; 107463f545edSBrett Creeley 107563f545edSBrett Creeley /* This block of logic allows us to get away with only updating 107663f545edSBrett Creeley * one ITR value with each interrupt. The idea is to perform a 107763f545edSBrett Creeley * pseudo-lazy update with the following criteria. 107863f545edSBrett Creeley * 107963f545edSBrett Creeley * 1. Rx is given higher priority than Tx if both are in same state 108063f545edSBrett Creeley * 2. If we must reduce an ITR that is given highest priority. 108163f545edSBrett Creeley * 3. We then give priority to increasing ITR based on amount. 108263f545edSBrett Creeley */ 108363f545edSBrett Creeley if (q_vector->rx.target_itr < q_vector->rx.current_itr) { 108463f545edSBrett Creeley rc = &q_vector->rx; 108563f545edSBrett Creeley /* Rx ITR needs to be reduced, this is highest priority */ 108663f545edSBrett Creeley itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); 108763f545edSBrett Creeley rc->current_itr = rc->target_itr; 108863f545edSBrett Creeley } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || 108963f545edSBrett Creeley ((q_vector->rx.target_itr - q_vector->rx.current_itr) < 109063f545edSBrett Creeley (q_vector->tx.target_itr - q_vector->tx.current_itr))) { 109163f545edSBrett Creeley rc = &q_vector->tx; 109263f545edSBrett Creeley /* Tx ITR needs to be reduced, this is second priority 109363f545edSBrett Creeley * Tx ITR needs to be increased more than Rx, fourth priority 109463f545edSBrett Creeley */ 109563f545edSBrett Creeley itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); 109663f545edSBrett Creeley rc->current_itr = rc->target_itr; 109763f545edSBrett Creeley } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { 109863f545edSBrett Creeley rc = &q_vector->rx; 109963f545edSBrett Creeley /* Rx ITR needs to be increased, third priority */ 110063f545edSBrett Creeley itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); 110163f545edSBrett Creeley rc->current_itr = rc->target_itr; 110263f545edSBrett Creeley } else { 110363f545edSBrett Creeley /* Still have to re-enable the interrupts */ 110463f545edSBrett Creeley itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 110563f545edSBrett Creeley } 110663f545edSBrett Creeley 110763f545edSBrett Creeley if (!test_bit(__ICE_DOWN, vsi->state)) { 110863f545edSBrett Creeley int vector = vsi->hw_base_vector + q_vector->v_idx; 110963f545edSBrett Creeley 111063f545edSBrett Creeley wr32(hw, GLINT_DYN_CTL(vector), itr_val); 111163f545edSBrett Creeley } 111263f545edSBrett Creeley } 111363f545edSBrett Creeley 111463f545edSBrett Creeley /** 11152b245cb2SAnirudh Venkataramanan * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 11162b245cb2SAnirudh Venkataramanan * @napi: napi struct with our devices info in it 11172b245cb2SAnirudh Venkataramanan * @budget: amount of work driver is allowed to do this pass, in packets 11182b245cb2SAnirudh Venkataramanan * 11192b245cb2SAnirudh Venkataramanan * This function will clean all queues associated with a q_vector. 11202b245cb2SAnirudh Venkataramanan * 11212b245cb2SAnirudh Venkataramanan * Returns the amount of work done 11222b245cb2SAnirudh Venkataramanan */ 11232b245cb2SAnirudh Venkataramanan int ice_napi_poll(struct napi_struct *napi, int budget) 11242b245cb2SAnirudh Venkataramanan { 11252b245cb2SAnirudh Venkataramanan struct ice_q_vector *q_vector = 11262b245cb2SAnirudh Venkataramanan container_of(napi, struct ice_q_vector, napi); 11272b245cb2SAnirudh Venkataramanan struct ice_vsi *vsi = q_vector->vsi; 11282b245cb2SAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 11292b245cb2SAnirudh Venkataramanan bool clean_complete = true; 11302b245cb2SAnirudh Venkataramanan int budget_per_ring = 0; 11312b245cb2SAnirudh Venkataramanan struct ice_ring *ring; 11322b245cb2SAnirudh Venkataramanan int work_done = 0; 11332b245cb2SAnirudh Venkataramanan 11342b245cb2SAnirudh Venkataramanan /* Since the actual Tx work is minimal, we can give the Tx a larger 11352b245cb2SAnirudh Venkataramanan * budget and be more aggressive about cleaning up the Tx descriptors. 11362b245cb2SAnirudh Venkataramanan */ 11372b245cb2SAnirudh Venkataramanan ice_for_each_ring(ring, q_vector->tx) 11382b245cb2SAnirudh Venkataramanan if (!ice_clean_tx_irq(vsi, ring, budget)) 11392b245cb2SAnirudh Venkataramanan clean_complete = false; 11402b245cb2SAnirudh Venkataramanan 11412b245cb2SAnirudh Venkataramanan /* Handle case where we are called by netpoll with a budget of 0 */ 11422b245cb2SAnirudh Venkataramanan if (budget <= 0) 11432b245cb2SAnirudh Venkataramanan return budget; 11442b245cb2SAnirudh Venkataramanan 11452b245cb2SAnirudh Venkataramanan /* We attempt to distribute budget to each Rx queue fairly, but don't 11462b245cb2SAnirudh Venkataramanan * allow the budget to go below 1 because that would exit polling early. 11472b245cb2SAnirudh Venkataramanan */ 11482b245cb2SAnirudh Venkataramanan if (q_vector->num_ring_rx) 11492b245cb2SAnirudh Venkataramanan budget_per_ring = max(budget / q_vector->num_ring_rx, 1); 11502b245cb2SAnirudh Venkataramanan 11512b245cb2SAnirudh Venkataramanan ice_for_each_ring(ring, q_vector->rx) { 11522b245cb2SAnirudh Venkataramanan int cleaned; 11532b245cb2SAnirudh Venkataramanan 11542b245cb2SAnirudh Venkataramanan cleaned = ice_clean_rx_irq(ring, budget_per_ring); 11552b245cb2SAnirudh Venkataramanan work_done += cleaned; 11562b245cb2SAnirudh Venkataramanan /* if we clean as many as budgeted, we must not be done */ 11572b245cb2SAnirudh Venkataramanan if (cleaned >= budget_per_ring) 11582b245cb2SAnirudh Venkataramanan clean_complete = false; 11592b245cb2SAnirudh Venkataramanan } 11602b245cb2SAnirudh Venkataramanan 11612b245cb2SAnirudh Venkataramanan /* If work not completed, return budget and polling will return */ 11622b245cb2SAnirudh Venkataramanan if (!clean_complete) 11632b245cb2SAnirudh Venkataramanan return budget; 11642b245cb2SAnirudh Venkataramanan 11650bcd952fSJesse Brandeburg /* Exit the polling mode, but don't re-enable interrupts if stack might 11660bcd952fSJesse Brandeburg * poll us due to busy-polling 11670bcd952fSJesse Brandeburg */ 11680bcd952fSJesse Brandeburg if (likely(napi_complete_done(napi, work_done))) 11692b245cb2SAnirudh Venkataramanan if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 117063f545edSBrett Creeley ice_update_ena_itr(vsi, q_vector); 1171e0c9fd9bSDave Ertman 1172e0c9fd9bSDave Ertman return min(work_done, budget - 1); 11732b245cb2SAnirudh Venkataramanan } 11742b245cb2SAnirudh Venkataramanan 11752b245cb2SAnirudh Venkataramanan /* helper function for building cmd/type/offset */ 11762b245cb2SAnirudh Venkataramanan static __le64 11772b245cb2SAnirudh Venkataramanan build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) 11782b245cb2SAnirudh Venkataramanan { 11792b245cb2SAnirudh Venkataramanan return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | 11802b245cb2SAnirudh Venkataramanan (td_cmd << ICE_TXD_QW1_CMD_S) | 11812b245cb2SAnirudh Venkataramanan (td_offset << ICE_TXD_QW1_OFFSET_S) | 11822b245cb2SAnirudh Venkataramanan ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | 11832b245cb2SAnirudh Venkataramanan (td_tag << ICE_TXD_QW1_L2TAG1_S)); 11842b245cb2SAnirudh Venkataramanan } 11852b245cb2SAnirudh Venkataramanan 11862b245cb2SAnirudh Venkataramanan /** 1187d337f2afSAnirudh Venkataramanan * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 11882b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 11892b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 11902b245cb2SAnirudh Venkataramanan * 11912b245cb2SAnirudh Venkataramanan * Returns -EBUSY if a stop is needed, else 0 11922b245cb2SAnirudh Venkataramanan */ 11932b245cb2SAnirudh Venkataramanan static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 11942b245cb2SAnirudh Venkataramanan { 11952b245cb2SAnirudh Venkataramanan netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 11962b245cb2SAnirudh Venkataramanan /* Memory barrier before checking head and tail */ 11972b245cb2SAnirudh Venkataramanan smp_mb(); 11982b245cb2SAnirudh Venkataramanan 11992b245cb2SAnirudh Venkataramanan /* Check again in a case another CPU has just made room available. */ 12002b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 12012b245cb2SAnirudh Venkataramanan return -EBUSY; 12022b245cb2SAnirudh Venkataramanan 12032b245cb2SAnirudh Venkataramanan /* A reprieve! - use start_subqueue because it doesn't call schedule */ 12042b245cb2SAnirudh Venkataramanan netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 12052b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 12062b245cb2SAnirudh Venkataramanan return 0; 12072b245cb2SAnirudh Venkataramanan } 12082b245cb2SAnirudh Venkataramanan 12092b245cb2SAnirudh Venkataramanan /** 1210d337f2afSAnirudh Venkataramanan * ice_maybe_stop_tx - 1st level check for Tx stop conditions 12112b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 12122b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 12132b245cb2SAnirudh Venkataramanan * 12142b245cb2SAnirudh Venkataramanan * Returns 0 if stop is not needed 12152b245cb2SAnirudh Venkataramanan */ 12162b245cb2SAnirudh Venkataramanan static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 12172b245cb2SAnirudh Venkataramanan { 12182b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 12192b245cb2SAnirudh Venkataramanan return 0; 1220d337f2afSAnirudh Venkataramanan 12212b245cb2SAnirudh Venkataramanan return __ice_maybe_stop_tx(tx_ring, size); 12222b245cb2SAnirudh Venkataramanan } 12232b245cb2SAnirudh Venkataramanan 12242b245cb2SAnirudh Venkataramanan /** 12252b245cb2SAnirudh Venkataramanan * ice_tx_map - Build the Tx descriptor 12262b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 12272b245cb2SAnirudh Venkataramanan * @first: first buffer info buffer to use 1228d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 12292b245cb2SAnirudh Venkataramanan * 12302b245cb2SAnirudh Venkataramanan * This function loops over the skb data pointed to by *first 12312b245cb2SAnirudh Venkataramanan * and gets a physical address for each memory location and programs 12322b245cb2SAnirudh Venkataramanan * it and the length into the transmit descriptor. 12332b245cb2SAnirudh Venkataramanan */ 1234d76a60baSAnirudh Venkataramanan static void 1235d76a60baSAnirudh Venkataramanan ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, 1236d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params *off) 12372b245cb2SAnirudh Venkataramanan { 1238d76a60baSAnirudh Venkataramanan u64 td_offset, td_tag, td_cmd; 12392b245cb2SAnirudh Venkataramanan u16 i = tx_ring->next_to_use; 12402b245cb2SAnirudh Venkataramanan struct skb_frag_struct *frag; 12412b245cb2SAnirudh Venkataramanan unsigned int data_len, size; 12422b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 12432b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 12442b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 12452b245cb2SAnirudh Venkataramanan dma_addr_t dma; 12462b245cb2SAnirudh Venkataramanan 1247d76a60baSAnirudh Venkataramanan td_tag = off->td_l2tag1; 1248d76a60baSAnirudh Venkataramanan td_cmd = off->td_cmd; 1249d76a60baSAnirudh Venkataramanan td_offset = off->td_offset; 12502b245cb2SAnirudh Venkataramanan skb = first->skb; 12512b245cb2SAnirudh Venkataramanan 12522b245cb2SAnirudh Venkataramanan data_len = skb->data_len; 12532b245cb2SAnirudh Venkataramanan size = skb_headlen(skb); 12542b245cb2SAnirudh Venkataramanan 12552b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 12562b245cb2SAnirudh Venkataramanan 1257d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1258d76a60baSAnirudh Venkataramanan td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1259d76a60baSAnirudh Venkataramanan td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1260d76a60baSAnirudh Venkataramanan ICE_TX_FLAGS_VLAN_S; 1261d76a60baSAnirudh Venkataramanan } 1262d76a60baSAnirudh Venkataramanan 12632b245cb2SAnirudh Venkataramanan dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 12642b245cb2SAnirudh Venkataramanan 12652b245cb2SAnirudh Venkataramanan tx_buf = first; 12662b245cb2SAnirudh Venkataramanan 12672b245cb2SAnirudh Venkataramanan for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 12682b245cb2SAnirudh Venkataramanan unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 12692b245cb2SAnirudh Venkataramanan 12702b245cb2SAnirudh Venkataramanan if (dma_mapping_error(tx_ring->dev, dma)) 12712b245cb2SAnirudh Venkataramanan goto dma_error; 12722b245cb2SAnirudh Venkataramanan 12732b245cb2SAnirudh Venkataramanan /* record length, and DMA address */ 12742b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, size); 12752b245cb2SAnirudh Venkataramanan dma_unmap_addr_set(tx_buf, dma, dma); 12762b245cb2SAnirudh Venkataramanan 12772b245cb2SAnirudh Venkataramanan /* align size to end of page */ 12782b245cb2SAnirudh Venkataramanan max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 12792b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 12802b245cb2SAnirudh Venkataramanan 12812b245cb2SAnirudh Venkataramanan /* account for data chunks larger than the hardware 12822b245cb2SAnirudh Venkataramanan * can handle 12832b245cb2SAnirudh Venkataramanan */ 12842b245cb2SAnirudh Venkataramanan while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 12852b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = 12862b245cb2SAnirudh Venkataramanan build_ctob(td_cmd, td_offset, max_data, td_tag); 12872b245cb2SAnirudh Venkataramanan 12882b245cb2SAnirudh Venkataramanan tx_desc++; 12892b245cb2SAnirudh Venkataramanan i++; 12902b245cb2SAnirudh Venkataramanan 12912b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 12922b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 12932b245cb2SAnirudh Venkataramanan i = 0; 12942b245cb2SAnirudh Venkataramanan } 12952b245cb2SAnirudh Venkataramanan 12962b245cb2SAnirudh Venkataramanan dma += max_data; 12972b245cb2SAnirudh Venkataramanan size -= max_data; 12982b245cb2SAnirudh Venkataramanan 12992b245cb2SAnirudh Venkataramanan max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 13002b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 13012b245cb2SAnirudh Venkataramanan } 13022b245cb2SAnirudh Venkataramanan 13032b245cb2SAnirudh Venkataramanan if (likely(!data_len)) 13042b245cb2SAnirudh Venkataramanan break; 13052b245cb2SAnirudh Venkataramanan 13062b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 13072b245cb2SAnirudh Venkataramanan size, td_tag); 13082b245cb2SAnirudh Venkataramanan 13092b245cb2SAnirudh Venkataramanan tx_desc++; 13102b245cb2SAnirudh Venkataramanan i++; 13112b245cb2SAnirudh Venkataramanan 13122b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 13132b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 13142b245cb2SAnirudh Venkataramanan i = 0; 13152b245cb2SAnirudh Venkataramanan } 13162b245cb2SAnirudh Venkataramanan 13172b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag); 13182b245cb2SAnirudh Venkataramanan data_len -= size; 13192b245cb2SAnirudh Venkataramanan 13202b245cb2SAnirudh Venkataramanan dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 13212b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 13222b245cb2SAnirudh Venkataramanan 13232b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 13242b245cb2SAnirudh Venkataramanan } 13252b245cb2SAnirudh Venkataramanan 13262b245cb2SAnirudh Venkataramanan /* record bytecount for BQL */ 13272b245cb2SAnirudh Venkataramanan netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 13282b245cb2SAnirudh Venkataramanan 13292b245cb2SAnirudh Venkataramanan /* record SW timestamp if HW timestamp is not available */ 13302b245cb2SAnirudh Venkataramanan skb_tx_timestamp(first->skb); 13312b245cb2SAnirudh Venkataramanan 13322b245cb2SAnirudh Venkataramanan i++; 13332b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) 13342b245cb2SAnirudh Venkataramanan i = 0; 13352b245cb2SAnirudh Venkataramanan 13362b245cb2SAnirudh Venkataramanan /* write last descriptor with RS and EOP bits */ 13372b245cb2SAnirudh Venkataramanan td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS); 13382b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = 13392b245cb2SAnirudh Venkataramanan build_ctob(td_cmd, td_offset, size, td_tag); 13402b245cb2SAnirudh Venkataramanan 13412b245cb2SAnirudh Venkataramanan /* Force memory writes to complete before letting h/w know there 13422b245cb2SAnirudh Venkataramanan * are new descriptors to fetch. 13432b245cb2SAnirudh Venkataramanan * 13442b245cb2SAnirudh Venkataramanan * We also use this memory barrier to make certain all of the 13452b245cb2SAnirudh Venkataramanan * status bits have been updated before next_to_watch is written. 13462b245cb2SAnirudh Venkataramanan */ 13472b245cb2SAnirudh Venkataramanan wmb(); 13482b245cb2SAnirudh Venkataramanan 13492b245cb2SAnirudh Venkataramanan /* set next_to_watch value indicating a packet is present */ 13502b245cb2SAnirudh Venkataramanan first->next_to_watch = tx_desc; 13512b245cb2SAnirudh Venkataramanan 13522b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 13532b245cb2SAnirudh Venkataramanan 13542b245cb2SAnirudh Venkataramanan ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 13552b245cb2SAnirudh Venkataramanan 13562b245cb2SAnirudh Venkataramanan /* notify HW of packet */ 13572b245cb2SAnirudh Venkataramanan if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { 13582b245cb2SAnirudh Venkataramanan writel(i, tx_ring->tail); 13592b245cb2SAnirudh Venkataramanan 13602b245cb2SAnirudh Venkataramanan /* we need this if more than one processor can write to our tail 13612b245cb2SAnirudh Venkataramanan * at a time, it synchronizes IO on IA64/Altix systems 13622b245cb2SAnirudh Venkataramanan */ 13632b245cb2SAnirudh Venkataramanan mmiowb(); 13642b245cb2SAnirudh Venkataramanan } 13652b245cb2SAnirudh Venkataramanan 13662b245cb2SAnirudh Venkataramanan return; 13672b245cb2SAnirudh Venkataramanan 13682b245cb2SAnirudh Venkataramanan dma_error: 13692b245cb2SAnirudh Venkataramanan /* clear dma mappings for failed tx_buf map */ 13702b245cb2SAnirudh Venkataramanan for (;;) { 13712b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 13722b245cb2SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 13732b245cb2SAnirudh Venkataramanan if (tx_buf == first) 13742b245cb2SAnirudh Venkataramanan break; 13752b245cb2SAnirudh Venkataramanan if (i == 0) 13762b245cb2SAnirudh Venkataramanan i = tx_ring->count; 13772b245cb2SAnirudh Venkataramanan i--; 13782b245cb2SAnirudh Venkataramanan } 13792b245cb2SAnirudh Venkataramanan 13802b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 13812b245cb2SAnirudh Venkataramanan } 13822b245cb2SAnirudh Venkataramanan 13832b245cb2SAnirudh Venkataramanan /** 1384d76a60baSAnirudh Venkataramanan * ice_tx_csum - Enable Tx checksum offloads 1385d76a60baSAnirudh Venkataramanan * @first: pointer to the first descriptor 1386d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1387d76a60baSAnirudh Venkataramanan * 1388d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1389d76a60baSAnirudh Venkataramanan */ 1390d76a60baSAnirudh Venkataramanan static 1391d76a60baSAnirudh Venkataramanan int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1392d76a60baSAnirudh Venkataramanan { 1393d76a60baSAnirudh Venkataramanan u32 l4_len = 0, l3_len = 0, l2_len = 0; 1394d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1395d76a60baSAnirudh Venkataramanan union { 1396d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1397d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1398d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1399d76a60baSAnirudh Venkataramanan } ip; 1400d76a60baSAnirudh Venkataramanan union { 1401d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1402d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1403d76a60baSAnirudh Venkataramanan } l4; 1404d76a60baSAnirudh Venkataramanan __be16 frag_off, protocol; 1405d76a60baSAnirudh Venkataramanan unsigned char *exthdr; 1406d76a60baSAnirudh Venkataramanan u32 offset, cmd = 0; 1407d76a60baSAnirudh Venkataramanan u8 l4_proto = 0; 1408d76a60baSAnirudh Venkataramanan 1409d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1410d76a60baSAnirudh Venkataramanan return 0; 1411d76a60baSAnirudh Venkataramanan 1412d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 1413d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 1414d76a60baSAnirudh Venkataramanan 1415d76a60baSAnirudh Venkataramanan /* compute outer L2 header size */ 1416d76a60baSAnirudh Venkataramanan l2_len = ip.hdr - skb->data; 1417d76a60baSAnirudh Venkataramanan offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1418d76a60baSAnirudh Venkataramanan 1419d76a60baSAnirudh Venkataramanan if (skb->encapsulation) 1420d76a60baSAnirudh Venkataramanan return -1; 1421d76a60baSAnirudh Venkataramanan 1422d76a60baSAnirudh Venkataramanan /* Enable IP checksum offloads */ 1423d76a60baSAnirudh Venkataramanan protocol = vlan_get_protocol(skb); 1424d76a60baSAnirudh Venkataramanan if (protocol == htons(ETH_P_IP)) { 1425d76a60baSAnirudh Venkataramanan l4_proto = ip.v4->protocol; 1426d76a60baSAnirudh Venkataramanan /* the stack computes the IP header already, the only time we 1427d76a60baSAnirudh Venkataramanan * need the hardware to recompute it is in the case of TSO. 1428d76a60baSAnirudh Venkataramanan */ 1429d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1430d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1431d76a60baSAnirudh Venkataramanan else 1432d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1433d76a60baSAnirudh Venkataramanan 1434d76a60baSAnirudh Venkataramanan } else if (protocol == htons(ETH_P_IPV6)) { 1435d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1436d76a60baSAnirudh Venkataramanan exthdr = ip.hdr + sizeof(*ip.v6); 1437d76a60baSAnirudh Venkataramanan l4_proto = ip.v6->nexthdr; 1438d76a60baSAnirudh Venkataramanan if (l4.hdr != exthdr) 1439d76a60baSAnirudh Venkataramanan ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1440d76a60baSAnirudh Venkataramanan &frag_off); 1441d76a60baSAnirudh Venkataramanan } else { 1442d76a60baSAnirudh Venkataramanan return -1; 1443d76a60baSAnirudh Venkataramanan } 1444d76a60baSAnirudh Venkataramanan 1445d76a60baSAnirudh Venkataramanan /* compute inner L3 header size */ 1446d76a60baSAnirudh Venkataramanan l3_len = l4.hdr - ip.hdr; 1447d76a60baSAnirudh Venkataramanan offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1448d76a60baSAnirudh Venkataramanan 1449d76a60baSAnirudh Venkataramanan /* Enable L4 checksum offloads */ 1450d76a60baSAnirudh Venkataramanan switch (l4_proto) { 1451d76a60baSAnirudh Venkataramanan case IPPROTO_TCP: 1452d76a60baSAnirudh Venkataramanan /* enable checksum offloads */ 1453d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1454d76a60baSAnirudh Venkataramanan l4_len = l4.tcp->doff; 1455d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1456d76a60baSAnirudh Venkataramanan break; 1457d76a60baSAnirudh Venkataramanan case IPPROTO_UDP: 1458d76a60baSAnirudh Venkataramanan /* enable UDP checksum offload */ 1459d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1460d76a60baSAnirudh Venkataramanan l4_len = (sizeof(struct udphdr) >> 2); 1461d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1462d76a60baSAnirudh Venkataramanan break; 1463d76a60baSAnirudh Venkataramanan case IPPROTO_SCTP: 1464cf909e19SAnirudh Venkataramanan /* enable SCTP checksum offload */ 1465cf909e19SAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1466cf909e19SAnirudh Venkataramanan l4_len = sizeof(struct sctphdr) >> 2; 1467cf909e19SAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1468cf909e19SAnirudh Venkataramanan break; 1469cf909e19SAnirudh Venkataramanan 1470d76a60baSAnirudh Venkataramanan default: 1471d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1472d76a60baSAnirudh Venkataramanan return -1; 1473d76a60baSAnirudh Venkataramanan skb_checksum_help(skb); 1474d76a60baSAnirudh Venkataramanan return 0; 1475d76a60baSAnirudh Venkataramanan } 1476d76a60baSAnirudh Venkataramanan 1477d76a60baSAnirudh Venkataramanan off->td_cmd |= cmd; 1478d76a60baSAnirudh Venkataramanan off->td_offset |= offset; 1479d76a60baSAnirudh Venkataramanan return 1; 1480d76a60baSAnirudh Venkataramanan } 1481d76a60baSAnirudh Venkataramanan 1482d76a60baSAnirudh Venkataramanan /** 1483d76a60baSAnirudh Venkataramanan * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW 1484d76a60baSAnirudh Venkataramanan * @tx_ring: ring to send buffer on 1485d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1486d76a60baSAnirudh Venkataramanan * 1487d76a60baSAnirudh Venkataramanan * Checks the skb and set up correspondingly several generic transmit flags 1488d76a60baSAnirudh Venkataramanan * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1489d76a60baSAnirudh Venkataramanan * 1490d76a60baSAnirudh Venkataramanan * Returns error code indicate the frame should be dropped upon error and the 1491d76a60baSAnirudh Venkataramanan * otherwise returns 0 to indicate the flags has been set properly. 1492d76a60baSAnirudh Venkataramanan */ 1493d76a60baSAnirudh Venkataramanan static int 1494d76a60baSAnirudh Venkataramanan ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) 1495d76a60baSAnirudh Venkataramanan { 1496d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1497d76a60baSAnirudh Venkataramanan __be16 protocol = skb->protocol; 1498d76a60baSAnirudh Venkataramanan 1499d76a60baSAnirudh Venkataramanan if (protocol == htons(ETH_P_8021Q) && 1500d76a60baSAnirudh Venkataramanan !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 1501d76a60baSAnirudh Venkataramanan /* when HW VLAN acceleration is turned off by the user the 1502d76a60baSAnirudh Venkataramanan * stack sets the protocol to 8021q so that the driver 1503d76a60baSAnirudh Venkataramanan * can take any steps required to support the SW only 1504d76a60baSAnirudh Venkataramanan * VLAN handling. In our case the driver doesn't need 1505d76a60baSAnirudh Venkataramanan * to take any further steps so just set the protocol 1506d76a60baSAnirudh Venkataramanan * to the encapsulated ethertype. 1507d76a60baSAnirudh Venkataramanan */ 1508d76a60baSAnirudh Venkataramanan skb->protocol = vlan_get_protocol(skb); 1509d76a60baSAnirudh Venkataramanan goto out; 1510d76a60baSAnirudh Venkataramanan } 1511d76a60baSAnirudh Venkataramanan 1512d76a60baSAnirudh Venkataramanan /* if we have a HW VLAN tag being added, default to the HW one */ 1513d76a60baSAnirudh Venkataramanan if (skb_vlan_tag_present(skb)) { 1514d76a60baSAnirudh Venkataramanan first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1515d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1516d76a60baSAnirudh Venkataramanan } else if (protocol == htons(ETH_P_8021Q)) { 1517d76a60baSAnirudh Venkataramanan struct vlan_hdr *vhdr, _vhdr; 1518d76a60baSAnirudh Venkataramanan 1519d76a60baSAnirudh Venkataramanan /* for SW VLAN, check the next protocol and store the tag */ 1520d76a60baSAnirudh Venkataramanan vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN, 1521d76a60baSAnirudh Venkataramanan sizeof(_vhdr), 1522d76a60baSAnirudh Venkataramanan &_vhdr); 1523d76a60baSAnirudh Venkataramanan if (!vhdr) 1524d76a60baSAnirudh Venkataramanan return -EINVAL; 1525d76a60baSAnirudh Venkataramanan 1526d76a60baSAnirudh Venkataramanan first->tx_flags |= ntohs(vhdr->h_vlan_TCI) << 1527d76a60baSAnirudh Venkataramanan ICE_TX_FLAGS_VLAN_S; 1528d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_SW_VLAN; 1529d76a60baSAnirudh Venkataramanan } 1530d76a60baSAnirudh Venkataramanan 1531d76a60baSAnirudh Venkataramanan out: 1532d76a60baSAnirudh Venkataramanan return 0; 1533d76a60baSAnirudh Venkataramanan } 1534d76a60baSAnirudh Venkataramanan 1535d76a60baSAnirudh Venkataramanan /** 1536d76a60baSAnirudh Venkataramanan * ice_tso - computes mss and TSO length to prepare for TSO 1537d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1538d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1539d76a60baSAnirudh Venkataramanan * 1540d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1541d76a60baSAnirudh Venkataramanan */ 1542d76a60baSAnirudh Venkataramanan static 1543d76a60baSAnirudh Venkataramanan int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1544d76a60baSAnirudh Venkataramanan { 1545d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1546d76a60baSAnirudh Venkataramanan union { 1547d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1548d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1549d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1550d76a60baSAnirudh Venkataramanan } ip; 1551d76a60baSAnirudh Venkataramanan union { 1552d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1553d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1554d76a60baSAnirudh Venkataramanan } l4; 1555d76a60baSAnirudh Venkataramanan u64 cd_mss, cd_tso_len; 1556d76a60baSAnirudh Venkataramanan u32 paylen, l4_start; 1557d76a60baSAnirudh Venkataramanan int err; 1558d76a60baSAnirudh Venkataramanan 1559d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1560d76a60baSAnirudh Venkataramanan return 0; 1561d76a60baSAnirudh Venkataramanan 1562d76a60baSAnirudh Venkataramanan if (!skb_is_gso(skb)) 1563d76a60baSAnirudh Venkataramanan return 0; 1564d76a60baSAnirudh Venkataramanan 1565d76a60baSAnirudh Venkataramanan err = skb_cow_head(skb, 0); 1566d76a60baSAnirudh Venkataramanan if (err < 0) 1567d76a60baSAnirudh Venkataramanan return err; 1568d76a60baSAnirudh Venkataramanan 1569d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 1570d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 1571d76a60baSAnirudh Venkataramanan 1572d76a60baSAnirudh Venkataramanan /* initialize outer IP header fields */ 1573d76a60baSAnirudh Venkataramanan if (ip.v4->version == 4) { 1574d76a60baSAnirudh Venkataramanan ip.v4->tot_len = 0; 1575d76a60baSAnirudh Venkataramanan ip.v4->check = 0; 1576d76a60baSAnirudh Venkataramanan } else { 1577d76a60baSAnirudh Venkataramanan ip.v6->payload_len = 0; 1578d76a60baSAnirudh Venkataramanan } 1579d76a60baSAnirudh Venkataramanan 1580d76a60baSAnirudh Venkataramanan /* determine offset of transport header */ 1581d76a60baSAnirudh Venkataramanan l4_start = l4.hdr - skb->data; 1582d76a60baSAnirudh Venkataramanan 1583d76a60baSAnirudh Venkataramanan /* remove payload length from checksum */ 1584d76a60baSAnirudh Venkataramanan paylen = skb->len - l4_start; 1585d76a60baSAnirudh Venkataramanan csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); 1586d76a60baSAnirudh Venkataramanan 1587d76a60baSAnirudh Venkataramanan /* compute length of segmentation header */ 1588d76a60baSAnirudh Venkataramanan off->header_len = (l4.tcp->doff * 4) + l4_start; 1589d76a60baSAnirudh Venkataramanan 1590d76a60baSAnirudh Venkataramanan /* update gso_segs and bytecount */ 1591d76a60baSAnirudh Venkataramanan first->gso_segs = skb_shinfo(skb)->gso_segs; 1592d944b469SBrett Creeley first->bytecount += (first->gso_segs - 1) * off->header_len; 1593d76a60baSAnirudh Venkataramanan 1594d76a60baSAnirudh Venkataramanan cd_tso_len = skb->len - off->header_len; 1595d76a60baSAnirudh Venkataramanan cd_mss = skb_shinfo(skb)->gso_size; 1596d76a60baSAnirudh Venkataramanan 1597d76a60baSAnirudh Venkataramanan /* record cdesc_qw1 with TSO parameters */ 1598d76a60baSAnirudh Venkataramanan off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX | 1599d76a60baSAnirudh Venkataramanan (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 1600d76a60baSAnirudh Venkataramanan (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 1601d76a60baSAnirudh Venkataramanan (cd_mss << ICE_TXD_CTX_QW1_MSS_S); 1602d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_TSO; 1603d76a60baSAnirudh Venkataramanan return 1; 1604d76a60baSAnirudh Venkataramanan } 1605d76a60baSAnirudh Venkataramanan 1606d76a60baSAnirudh Venkataramanan /** 16072b245cb2SAnirudh Venkataramanan * ice_txd_use_count - estimate the number of descriptors needed for Tx 16082b245cb2SAnirudh Venkataramanan * @size: transmit request size in bytes 16092b245cb2SAnirudh Venkataramanan * 16102b245cb2SAnirudh Venkataramanan * Due to hardware alignment restrictions (4K alignment), we need to 16112b245cb2SAnirudh Venkataramanan * assume that we can have no more than 12K of data per descriptor, even 16122b245cb2SAnirudh Venkataramanan * though each descriptor can take up to 16K - 1 bytes of aligned memory. 16132b245cb2SAnirudh Venkataramanan * Thus, we need to divide by 12K. But division is slow! Instead, 16142b245cb2SAnirudh Venkataramanan * we decompose the operation into shifts and one relatively cheap 16152b245cb2SAnirudh Venkataramanan * multiply operation. 16162b245cb2SAnirudh Venkataramanan * 16172b245cb2SAnirudh Venkataramanan * To divide by 12K, we first divide by 4K, then divide by 3: 16182b245cb2SAnirudh Venkataramanan * To divide by 4K, shift right by 12 bits 16192b245cb2SAnirudh Venkataramanan * To divide by 3, multiply by 85, then divide by 256 16202b245cb2SAnirudh Venkataramanan * (Divide by 256 is done by shifting right by 8 bits) 16212b245cb2SAnirudh Venkataramanan * Finally, we add one to round up. Because 256 isn't an exact multiple of 16222b245cb2SAnirudh Venkataramanan * 3, we'll underestimate near each multiple of 12K. This is actually more 16232b245cb2SAnirudh Venkataramanan * accurate as we have 4K - 1 of wiggle room that we can fit into the last 16242b245cb2SAnirudh Venkataramanan * segment. For our purposes this is accurate out to 1M which is orders of 16252b245cb2SAnirudh Venkataramanan * magnitude greater than our largest possible GSO size. 16262b245cb2SAnirudh Venkataramanan * 16272b245cb2SAnirudh Venkataramanan * This would then be implemented as: 1628c585ea42SBrett Creeley * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 16292b245cb2SAnirudh Venkataramanan * 16302b245cb2SAnirudh Venkataramanan * Since multiplication and division are commutative, we can reorder 16312b245cb2SAnirudh Venkataramanan * operations into: 1632c585ea42SBrett Creeley * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 16332b245cb2SAnirudh Venkataramanan */ 16342b245cb2SAnirudh Venkataramanan static unsigned int ice_txd_use_count(unsigned int size) 16352b245cb2SAnirudh Venkataramanan { 1636c585ea42SBrett Creeley return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 16372b245cb2SAnirudh Venkataramanan } 16382b245cb2SAnirudh Venkataramanan 16392b245cb2SAnirudh Venkataramanan /** 1640d337f2afSAnirudh Venkataramanan * ice_xmit_desc_count - calculate number of Tx descriptors needed 16412b245cb2SAnirudh Venkataramanan * @skb: send buffer 16422b245cb2SAnirudh Venkataramanan * 16432b245cb2SAnirudh Venkataramanan * Returns number of data descriptors needed for this skb. 16442b245cb2SAnirudh Venkataramanan */ 16452b245cb2SAnirudh Venkataramanan static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 16462b245cb2SAnirudh Venkataramanan { 16472b245cb2SAnirudh Venkataramanan const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 16482b245cb2SAnirudh Venkataramanan unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 16492b245cb2SAnirudh Venkataramanan unsigned int count = 0, size = skb_headlen(skb); 16502b245cb2SAnirudh Venkataramanan 16512b245cb2SAnirudh Venkataramanan for (;;) { 16522b245cb2SAnirudh Venkataramanan count += ice_txd_use_count(size); 16532b245cb2SAnirudh Venkataramanan 16542b245cb2SAnirudh Venkataramanan if (!nr_frags--) 16552b245cb2SAnirudh Venkataramanan break; 16562b245cb2SAnirudh Venkataramanan 16572b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag++); 16582b245cb2SAnirudh Venkataramanan } 16592b245cb2SAnirudh Venkataramanan 16602b245cb2SAnirudh Venkataramanan return count; 16612b245cb2SAnirudh Venkataramanan } 16622b245cb2SAnirudh Venkataramanan 16632b245cb2SAnirudh Venkataramanan /** 16642b245cb2SAnirudh Venkataramanan * __ice_chk_linearize - Check if there are more than 8 buffers per packet 16652b245cb2SAnirudh Venkataramanan * @skb: send buffer 16662b245cb2SAnirudh Venkataramanan * 16672b245cb2SAnirudh Venkataramanan * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 16682b245cb2SAnirudh Venkataramanan * and so we need to figure out the cases where we need to linearize the skb. 16692b245cb2SAnirudh Venkataramanan * 16702b245cb2SAnirudh Venkataramanan * For TSO we need to count the TSO header and segment payload separately. 16712b245cb2SAnirudh Venkataramanan * As such we need to check cases where we have 7 fragments or more as we 16722b245cb2SAnirudh Venkataramanan * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 16732b245cb2SAnirudh Venkataramanan * the segment payload in the first descriptor, and another 7 for the 16742b245cb2SAnirudh Venkataramanan * fragments. 16752b245cb2SAnirudh Venkataramanan */ 16762b245cb2SAnirudh Venkataramanan static bool __ice_chk_linearize(struct sk_buff *skb) 16772b245cb2SAnirudh Venkataramanan { 16782b245cb2SAnirudh Venkataramanan const struct skb_frag_struct *frag, *stale; 16792b245cb2SAnirudh Venkataramanan int nr_frags, sum; 16802b245cb2SAnirudh Venkataramanan 16812b245cb2SAnirudh Venkataramanan /* no need to check if number of frags is less than 7 */ 16822b245cb2SAnirudh Venkataramanan nr_frags = skb_shinfo(skb)->nr_frags; 16832b245cb2SAnirudh Venkataramanan if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 16842b245cb2SAnirudh Venkataramanan return false; 16852b245cb2SAnirudh Venkataramanan 16862b245cb2SAnirudh Venkataramanan /* We need to walk through the list and validate that each group 16872b245cb2SAnirudh Venkataramanan * of 6 fragments totals at least gso_size. 16882b245cb2SAnirudh Venkataramanan */ 16892b245cb2SAnirudh Venkataramanan nr_frags -= ICE_MAX_BUF_TXD - 2; 16902b245cb2SAnirudh Venkataramanan frag = &skb_shinfo(skb)->frags[0]; 16912b245cb2SAnirudh Venkataramanan 16922b245cb2SAnirudh Venkataramanan /* Initialize size to the negative value of gso_size minus 1. We 16932b245cb2SAnirudh Venkataramanan * use this as the worst case scenerio in which the frag ahead 16942b245cb2SAnirudh Venkataramanan * of us only provides one byte which is why we are limited to 6 16952b245cb2SAnirudh Venkataramanan * descriptors for a single transmit as the header and previous 16962b245cb2SAnirudh Venkataramanan * fragment are already consuming 2 descriptors. 16972b245cb2SAnirudh Venkataramanan */ 16982b245cb2SAnirudh Venkataramanan sum = 1 - skb_shinfo(skb)->gso_size; 16992b245cb2SAnirudh Venkataramanan 17002b245cb2SAnirudh Venkataramanan /* Add size of frags 0 through 4 to create our initial sum */ 17012b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 17022b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 17032b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 17042b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 17052b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 17062b245cb2SAnirudh Venkataramanan 17072b245cb2SAnirudh Venkataramanan /* Walk through fragments adding latest fragment, testing it, and 17082b245cb2SAnirudh Venkataramanan * then removing stale fragments from the sum. 17092b245cb2SAnirudh Venkataramanan */ 17102b245cb2SAnirudh Venkataramanan stale = &skb_shinfo(skb)->frags[0]; 17112b245cb2SAnirudh Venkataramanan for (;;) { 17122b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 17132b245cb2SAnirudh Venkataramanan 17142b245cb2SAnirudh Venkataramanan /* if sum is negative we failed to make sufficient progress */ 17152b245cb2SAnirudh Venkataramanan if (sum < 0) 17162b245cb2SAnirudh Venkataramanan return true; 17172b245cb2SAnirudh Venkataramanan 17182b245cb2SAnirudh Venkataramanan if (!nr_frags--) 17192b245cb2SAnirudh Venkataramanan break; 17202b245cb2SAnirudh Venkataramanan 17212b245cb2SAnirudh Venkataramanan sum -= skb_frag_size(stale++); 17222b245cb2SAnirudh Venkataramanan } 17232b245cb2SAnirudh Venkataramanan 17242b245cb2SAnirudh Venkataramanan return false; 17252b245cb2SAnirudh Venkataramanan } 17262b245cb2SAnirudh Venkataramanan 17272b245cb2SAnirudh Venkataramanan /** 17282b245cb2SAnirudh Venkataramanan * ice_chk_linearize - Check if there are more than 8 fragments per packet 17292b245cb2SAnirudh Venkataramanan * @skb: send buffer 17302b245cb2SAnirudh Venkataramanan * @count: number of buffers used 17312b245cb2SAnirudh Venkataramanan * 17322b245cb2SAnirudh Venkataramanan * Note: Our HW can't scatter-gather more than 8 fragments to build 17332b245cb2SAnirudh Venkataramanan * a packet on the wire and so we need to figure out the cases where we 17342b245cb2SAnirudh Venkataramanan * need to linearize the skb. 17352b245cb2SAnirudh Venkataramanan */ 17362b245cb2SAnirudh Venkataramanan static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 17372b245cb2SAnirudh Venkataramanan { 17382b245cb2SAnirudh Venkataramanan /* Both TSO and single send will work if count is less than 8 */ 17392b245cb2SAnirudh Venkataramanan if (likely(count < ICE_MAX_BUF_TXD)) 17402b245cb2SAnirudh Venkataramanan return false; 17412b245cb2SAnirudh Venkataramanan 17422b245cb2SAnirudh Venkataramanan if (skb_is_gso(skb)) 17432b245cb2SAnirudh Venkataramanan return __ice_chk_linearize(skb); 17442b245cb2SAnirudh Venkataramanan 17452b245cb2SAnirudh Venkataramanan /* we can support up to 8 data buffers for a single send */ 17462b245cb2SAnirudh Venkataramanan return count != ICE_MAX_BUF_TXD; 17472b245cb2SAnirudh Venkataramanan } 17482b245cb2SAnirudh Venkataramanan 17492b245cb2SAnirudh Venkataramanan /** 17502b245cb2SAnirudh Venkataramanan * ice_xmit_frame_ring - Sends buffer on Tx ring 17512b245cb2SAnirudh Venkataramanan * @skb: send buffer 17522b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 17532b245cb2SAnirudh Venkataramanan * 17542b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 17552b245cb2SAnirudh Venkataramanan */ 17562b245cb2SAnirudh Venkataramanan static netdev_tx_t 17572b245cb2SAnirudh Venkataramanan ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 17582b245cb2SAnirudh Venkataramanan { 1759d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params offload = { 0 }; 17602b245cb2SAnirudh Venkataramanan struct ice_tx_buf *first; 17612b245cb2SAnirudh Venkataramanan unsigned int count; 1762d76a60baSAnirudh Venkataramanan int tso, csum; 17632b245cb2SAnirudh Venkataramanan 17642b245cb2SAnirudh Venkataramanan count = ice_xmit_desc_count(skb); 17652b245cb2SAnirudh Venkataramanan if (ice_chk_linearize(skb, count)) { 17662b245cb2SAnirudh Venkataramanan if (__skb_linearize(skb)) 17672b245cb2SAnirudh Venkataramanan goto out_drop; 17682b245cb2SAnirudh Venkataramanan count = ice_txd_use_count(skb->len); 17692b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_linearize++; 17702b245cb2SAnirudh Venkataramanan } 17712b245cb2SAnirudh Venkataramanan 17722b245cb2SAnirudh Venkataramanan /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 17732b245cb2SAnirudh Venkataramanan * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 17742b245cb2SAnirudh Venkataramanan * + 4 desc gap to avoid the cache line where head is, 17752b245cb2SAnirudh Venkataramanan * + 1 desc for context descriptor, 17762b245cb2SAnirudh Venkataramanan * otherwise try next time 17772b245cb2SAnirudh Venkataramanan */ 1778c585ea42SBrett Creeley if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 1779c585ea42SBrett Creeley ICE_DESCS_FOR_CTX_DESC)) { 17802b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_busy++; 17812b245cb2SAnirudh Venkataramanan return NETDEV_TX_BUSY; 17822b245cb2SAnirudh Venkataramanan } 17832b245cb2SAnirudh Venkataramanan 1784d76a60baSAnirudh Venkataramanan offload.tx_ring = tx_ring; 1785d76a60baSAnirudh Venkataramanan 17862b245cb2SAnirudh Venkataramanan /* record the location of the first descriptor for this packet */ 17872b245cb2SAnirudh Venkataramanan first = &tx_ring->tx_buf[tx_ring->next_to_use]; 17882b245cb2SAnirudh Venkataramanan first->skb = skb; 17892b245cb2SAnirudh Venkataramanan first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 17902b245cb2SAnirudh Venkataramanan first->gso_segs = 1; 1791d76a60baSAnirudh Venkataramanan first->tx_flags = 0; 17922b245cb2SAnirudh Venkataramanan 1793d76a60baSAnirudh Venkataramanan /* prepare the VLAN tagging flags for Tx */ 1794d76a60baSAnirudh Venkataramanan if (ice_tx_prepare_vlan_flags(tx_ring, first)) 1795d76a60baSAnirudh Venkataramanan goto out_drop; 1796d76a60baSAnirudh Venkataramanan 1797d76a60baSAnirudh Venkataramanan /* set up TSO offload */ 1798d76a60baSAnirudh Venkataramanan tso = ice_tso(first, &offload); 1799d76a60baSAnirudh Venkataramanan if (tso < 0) 1800d76a60baSAnirudh Venkataramanan goto out_drop; 1801d76a60baSAnirudh Venkataramanan 1802d76a60baSAnirudh Venkataramanan /* always set up Tx checksum offload */ 1803d76a60baSAnirudh Venkataramanan csum = ice_tx_csum(first, &offload); 1804d76a60baSAnirudh Venkataramanan if (csum < 0) 1805d76a60baSAnirudh Venkataramanan goto out_drop; 1806d76a60baSAnirudh Venkataramanan 1807d76a60baSAnirudh Venkataramanan if (tso || offload.cd_tunnel_params) { 1808d76a60baSAnirudh Venkataramanan struct ice_tx_ctx_desc *cdesc; 1809d76a60baSAnirudh Venkataramanan int i = tx_ring->next_to_use; 1810d76a60baSAnirudh Venkataramanan 1811d76a60baSAnirudh Venkataramanan /* grab the next descriptor */ 1812d76a60baSAnirudh Venkataramanan cdesc = ICE_TX_CTX_DESC(tx_ring, i); 1813d76a60baSAnirudh Venkataramanan i++; 1814d76a60baSAnirudh Venkataramanan tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1815d76a60baSAnirudh Venkataramanan 1816d76a60baSAnirudh Venkataramanan /* setup context descriptor */ 1817d76a60baSAnirudh Venkataramanan cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 1818d76a60baSAnirudh Venkataramanan cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 1819d76a60baSAnirudh Venkataramanan cdesc->rsvd = cpu_to_le16(0); 1820d76a60baSAnirudh Venkataramanan cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 1821d76a60baSAnirudh Venkataramanan } 1822d76a60baSAnirudh Venkataramanan 1823d76a60baSAnirudh Venkataramanan ice_tx_map(tx_ring, first, &offload); 18242b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 18252b245cb2SAnirudh Venkataramanan 18262b245cb2SAnirudh Venkataramanan out_drop: 18272b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 18282b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 18292b245cb2SAnirudh Venkataramanan } 18302b245cb2SAnirudh Venkataramanan 18312b245cb2SAnirudh Venkataramanan /** 18322b245cb2SAnirudh Venkataramanan * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 18332b245cb2SAnirudh Venkataramanan * @skb: send buffer 18342b245cb2SAnirudh Venkataramanan * @netdev: network interface device structure 18352b245cb2SAnirudh Venkataramanan * 18362b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 18372b245cb2SAnirudh Venkataramanan */ 18382b245cb2SAnirudh Venkataramanan netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 18392b245cb2SAnirudh Venkataramanan { 18402b245cb2SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 18412b245cb2SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 18422b245cb2SAnirudh Venkataramanan struct ice_ring *tx_ring; 18432b245cb2SAnirudh Venkataramanan 18442b245cb2SAnirudh Venkataramanan tx_ring = vsi->tx_rings[skb->queue_mapping]; 18452b245cb2SAnirudh Venkataramanan 18462b245cb2SAnirudh Venkataramanan /* hardware can't handle really short frames, hardware padding works 18472b245cb2SAnirudh Venkataramanan * beyond this point 18482b245cb2SAnirudh Venkataramanan */ 18492b245cb2SAnirudh Venkataramanan if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 18502b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 18512b245cb2SAnirudh Venkataramanan 18522b245cb2SAnirudh Venkataramanan return ice_xmit_frame_ring(skb, tx_ring); 18532b245cb2SAnirudh Venkataramanan } 1854