1cdedef59SAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0 2cdedef59SAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */ 3cdedef59SAnirudh Venkataramanan 4cdedef59SAnirudh Venkataramanan /* The driver transmit and receive code */ 5cdedef59SAnirudh Venkataramanan 6cdedef59SAnirudh Venkataramanan #include <linux/prefetch.h> 7cdedef59SAnirudh Venkataramanan #include <linux/mm.h> 8efc2214bSMaciej Fijalkowski #include <linux/bpf_trace.h> 9efc2214bSMaciej Fijalkowski #include <net/xdp.h> 100891d6d4SKrzysztof Kazimierczak #include "ice_txrx_lib.h" 11efc2214bSMaciej Fijalkowski #include "ice_lib.h" 12cdedef59SAnirudh Venkataramanan #include "ice.h" 135f6aa50eSAnirudh Venkataramanan #include "ice_dcb_lib.h" 142d4238f5SKrzysztof Kazimierczak #include "ice_xsk.h" 15cdedef59SAnirudh Venkataramanan 162b245cb2SAnirudh Venkataramanan #define ICE_RX_HDR_SIZE 256 172b245cb2SAnirudh Venkataramanan 18cdedef59SAnirudh Venkataramanan /** 19cdedef59SAnirudh Venkataramanan * ice_unmap_and_free_tx_buf - Release a Tx buffer 20cdedef59SAnirudh Venkataramanan * @ring: the ring that owns the buffer 21cdedef59SAnirudh Venkataramanan * @tx_buf: the buffer to free 22cdedef59SAnirudh Venkataramanan */ 23cdedef59SAnirudh Venkataramanan static void 24cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 25cdedef59SAnirudh Venkataramanan { 26cdedef59SAnirudh Venkataramanan if (tx_buf->skb) { 27efc2214bSMaciej Fijalkowski if (ice_ring_is_xdp(ring)) 28efc2214bSMaciej Fijalkowski page_frag_free(tx_buf->raw_buf); 29efc2214bSMaciej Fijalkowski else 30cdedef59SAnirudh Venkataramanan dev_kfree_skb_any(tx_buf->skb); 31cdedef59SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) 32cdedef59SAnirudh Venkataramanan dma_unmap_single(ring->dev, 33cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 34cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 35cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 36cdedef59SAnirudh Venkataramanan } else if (dma_unmap_len(tx_buf, len)) { 37cdedef59SAnirudh Venkataramanan dma_unmap_page(ring->dev, 38cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 39cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 40cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 41cdedef59SAnirudh Venkataramanan } 42cdedef59SAnirudh Venkataramanan 43cdedef59SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 44cdedef59SAnirudh Venkataramanan tx_buf->skb = NULL; 45cdedef59SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 46cdedef59SAnirudh Venkataramanan /* tx_buf must be completely set up in the transmit path */ 47cdedef59SAnirudh Venkataramanan } 48cdedef59SAnirudh Venkataramanan 49cdedef59SAnirudh Venkataramanan static struct netdev_queue *txring_txq(const struct ice_ring *ring) 50cdedef59SAnirudh Venkataramanan { 51cdedef59SAnirudh Venkataramanan return netdev_get_tx_queue(ring->netdev, ring->q_index); 52cdedef59SAnirudh Venkataramanan } 53cdedef59SAnirudh Venkataramanan 54cdedef59SAnirudh Venkataramanan /** 55cdedef59SAnirudh Venkataramanan * ice_clean_tx_ring - Free any empty Tx buffers 56cdedef59SAnirudh Venkataramanan * @tx_ring: ring to be cleaned 57cdedef59SAnirudh Venkataramanan */ 58cdedef59SAnirudh Venkataramanan void ice_clean_tx_ring(struct ice_ring *tx_ring) 59cdedef59SAnirudh Venkataramanan { 60cdedef59SAnirudh Venkataramanan u16 i; 61cdedef59SAnirudh Venkataramanan 622d4238f5SKrzysztof Kazimierczak if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) { 632d4238f5SKrzysztof Kazimierczak ice_xsk_clean_xdp_ring(tx_ring); 642d4238f5SKrzysztof Kazimierczak goto tx_skip_free; 652d4238f5SKrzysztof Kazimierczak } 662d4238f5SKrzysztof Kazimierczak 67cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 68cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 69cdedef59SAnirudh Venkataramanan return; 70cdedef59SAnirudh Venkataramanan 712f2da36eSAnirudh Venkataramanan /* Free all the Tx ring sk_buffs */ 72cdedef59SAnirudh Venkataramanan for (i = 0; i < tx_ring->count; i++) 73cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 74cdedef59SAnirudh Venkataramanan 752d4238f5SKrzysztof Kazimierczak tx_skip_free: 76c6dfd690SBruce Allan memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 77cdedef59SAnirudh Venkataramanan 78cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 79cdedef59SAnirudh Venkataramanan memset(tx_ring->desc, 0, tx_ring->size); 80cdedef59SAnirudh Venkataramanan 81cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 82cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 83cdedef59SAnirudh Venkataramanan 84cdedef59SAnirudh Venkataramanan if (!tx_ring->netdev) 85cdedef59SAnirudh Venkataramanan return; 86cdedef59SAnirudh Venkataramanan 87cdedef59SAnirudh Venkataramanan /* cleanup Tx queue statistics */ 88cdedef59SAnirudh Venkataramanan netdev_tx_reset_queue(txring_txq(tx_ring)); 89cdedef59SAnirudh Venkataramanan } 90cdedef59SAnirudh Venkataramanan 91cdedef59SAnirudh Venkataramanan /** 92cdedef59SAnirudh Venkataramanan * ice_free_tx_ring - Free Tx resources per queue 93cdedef59SAnirudh Venkataramanan * @tx_ring: Tx descriptor ring for a specific queue 94cdedef59SAnirudh Venkataramanan * 95cdedef59SAnirudh Venkataramanan * Free all transmit software resources 96cdedef59SAnirudh Venkataramanan */ 97cdedef59SAnirudh Venkataramanan void ice_free_tx_ring(struct ice_ring *tx_ring) 98cdedef59SAnirudh Venkataramanan { 99cdedef59SAnirudh Venkataramanan ice_clean_tx_ring(tx_ring); 100cdedef59SAnirudh Venkataramanan devm_kfree(tx_ring->dev, tx_ring->tx_buf); 101cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 102cdedef59SAnirudh Venkataramanan 103cdedef59SAnirudh Venkataramanan if (tx_ring->desc) { 104cdedef59SAnirudh Venkataramanan dmam_free_coherent(tx_ring->dev, tx_ring->size, 105cdedef59SAnirudh Venkataramanan tx_ring->desc, tx_ring->dma); 106cdedef59SAnirudh Venkataramanan tx_ring->desc = NULL; 107cdedef59SAnirudh Venkataramanan } 108cdedef59SAnirudh Venkataramanan } 109cdedef59SAnirudh Venkataramanan 110cdedef59SAnirudh Venkataramanan /** 1112b245cb2SAnirudh Venkataramanan * ice_clean_tx_irq - Reclaim resources after transmit completes 1122b245cb2SAnirudh Venkataramanan * @tx_ring: Tx ring to clean 1132b245cb2SAnirudh Venkataramanan * @napi_budget: Used to determine if we are in netpoll 1142b245cb2SAnirudh Venkataramanan * 1152b245cb2SAnirudh Venkataramanan * Returns true if there's any budget left (e.g. the clean is finished) 1162b245cb2SAnirudh Venkataramanan */ 1172fb0821fSJesse Brandeburg static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) 1182b245cb2SAnirudh Venkataramanan { 1192b245cb2SAnirudh Venkataramanan unsigned int total_bytes = 0, total_pkts = 0; 1202fb0821fSJesse Brandeburg unsigned int budget = ICE_DFLT_IRQ_WORK; 1212fb0821fSJesse Brandeburg struct ice_vsi *vsi = tx_ring->vsi; 1222b245cb2SAnirudh Venkataramanan s16 i = tx_ring->next_to_clean; 1232b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 1242b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 1252b245cb2SAnirudh Venkataramanan 1262b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 1272b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 1282b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 1292b245cb2SAnirudh Venkataramanan 1302fb0821fSJesse Brandeburg prefetch(&vsi->state); 1312fb0821fSJesse Brandeburg 1322b245cb2SAnirudh Venkataramanan do { 1332b245cb2SAnirudh Venkataramanan struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 1342b245cb2SAnirudh Venkataramanan 1352b245cb2SAnirudh Venkataramanan /* if next_to_watch is not set then there is no work pending */ 1362b245cb2SAnirudh Venkataramanan if (!eop_desc) 1372b245cb2SAnirudh Venkataramanan break; 1382b245cb2SAnirudh Venkataramanan 1392b245cb2SAnirudh Venkataramanan smp_rmb(); /* prevent any other reads prior to eop_desc */ 1402b245cb2SAnirudh Venkataramanan 1412b245cb2SAnirudh Venkataramanan /* if the descriptor isn't done, no work yet to do */ 1422b245cb2SAnirudh Venkataramanan if (!(eop_desc->cmd_type_offset_bsz & 1432b245cb2SAnirudh Venkataramanan cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 1442b245cb2SAnirudh Venkataramanan break; 1452b245cb2SAnirudh Venkataramanan 1462b245cb2SAnirudh Venkataramanan /* clear next_to_watch to prevent false hangs */ 1472b245cb2SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 1482b245cb2SAnirudh Venkataramanan 1492b245cb2SAnirudh Venkataramanan /* update the statistics for this packet */ 1502b245cb2SAnirudh Venkataramanan total_bytes += tx_buf->bytecount; 1512b245cb2SAnirudh Venkataramanan total_pkts += tx_buf->gso_segs; 1522b245cb2SAnirudh Venkataramanan 153efc2214bSMaciej Fijalkowski if (ice_ring_is_xdp(tx_ring)) 154efc2214bSMaciej Fijalkowski page_frag_free(tx_buf->raw_buf); 155efc2214bSMaciej Fijalkowski else 1562b245cb2SAnirudh Venkataramanan /* free the skb */ 1572b245cb2SAnirudh Venkataramanan napi_consume_skb(tx_buf->skb, napi_budget); 1582b245cb2SAnirudh Venkataramanan 1592b245cb2SAnirudh Venkataramanan /* unmap skb header data */ 1602b245cb2SAnirudh Venkataramanan dma_unmap_single(tx_ring->dev, 1612b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 1622b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 1632b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 1642b245cb2SAnirudh Venkataramanan 1652b245cb2SAnirudh Venkataramanan /* clear tx_buf data */ 1662b245cb2SAnirudh Venkataramanan tx_buf->skb = NULL; 1672b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 1682b245cb2SAnirudh Venkataramanan 1692b245cb2SAnirudh Venkataramanan /* unmap remaining buffers */ 1702b245cb2SAnirudh Venkataramanan while (tx_desc != eop_desc) { 1712b245cb2SAnirudh Venkataramanan tx_buf++; 1722b245cb2SAnirudh Venkataramanan tx_desc++; 1732b245cb2SAnirudh Venkataramanan i++; 1742b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 1752b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 1762b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 1772b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 1782b245cb2SAnirudh Venkataramanan } 1792b245cb2SAnirudh Venkataramanan 1802b245cb2SAnirudh Venkataramanan /* unmap any remaining paged data */ 1812b245cb2SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) { 1822b245cb2SAnirudh Venkataramanan dma_unmap_page(tx_ring->dev, 1832b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 1842b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 1852b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 1862b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 1872b245cb2SAnirudh Venkataramanan } 1882b245cb2SAnirudh Venkataramanan } 1892b245cb2SAnirudh Venkataramanan 1902b245cb2SAnirudh Venkataramanan /* move us one more past the eop_desc for start of next pkt */ 1912b245cb2SAnirudh Venkataramanan tx_buf++; 1922b245cb2SAnirudh Venkataramanan tx_desc++; 1932b245cb2SAnirudh Venkataramanan i++; 1942b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 1952b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 1962b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 1972b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 1982b245cb2SAnirudh Venkataramanan } 1992b245cb2SAnirudh Venkataramanan 2002b245cb2SAnirudh Venkataramanan prefetch(tx_desc); 2012b245cb2SAnirudh Venkataramanan 2022b245cb2SAnirudh Venkataramanan /* update budget accounting */ 2032b245cb2SAnirudh Venkataramanan budget--; 2042b245cb2SAnirudh Venkataramanan } while (likely(budget)); 2052b245cb2SAnirudh Venkataramanan 2062b245cb2SAnirudh Venkataramanan i += tx_ring->count; 2072b245cb2SAnirudh Venkataramanan tx_ring->next_to_clean = i; 2082d4238f5SKrzysztof Kazimierczak 2092d4238f5SKrzysztof Kazimierczak ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 2102b245cb2SAnirudh Venkataramanan 211efc2214bSMaciej Fijalkowski if (ice_ring_is_xdp(tx_ring)) 212efc2214bSMaciej Fijalkowski return !!budget; 213efc2214bSMaciej Fijalkowski 2142b245cb2SAnirudh Venkataramanan netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 2152b245cb2SAnirudh Venkataramanan total_bytes); 2162b245cb2SAnirudh Venkataramanan 2172b245cb2SAnirudh Venkataramanan #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 2182b245cb2SAnirudh Venkataramanan if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 2192b245cb2SAnirudh Venkataramanan (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 2202b245cb2SAnirudh Venkataramanan /* Make sure that anybody stopping the queue after this 2212b245cb2SAnirudh Venkataramanan * sees the new next_to_clean. 2222b245cb2SAnirudh Venkataramanan */ 2232b245cb2SAnirudh Venkataramanan smp_mb(); 2242b245cb2SAnirudh Venkataramanan if (__netif_subqueue_stopped(tx_ring->netdev, 2252b245cb2SAnirudh Venkataramanan tx_ring->q_index) && 2262b245cb2SAnirudh Venkataramanan !test_bit(__ICE_DOWN, vsi->state)) { 2272b245cb2SAnirudh Venkataramanan netif_wake_subqueue(tx_ring->netdev, 2282b245cb2SAnirudh Venkataramanan tx_ring->q_index); 2292b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 2302b245cb2SAnirudh Venkataramanan } 2312b245cb2SAnirudh Venkataramanan } 2322b245cb2SAnirudh Venkataramanan 2332b245cb2SAnirudh Venkataramanan return !!budget; 2342b245cb2SAnirudh Venkataramanan } 2352b245cb2SAnirudh Venkataramanan 2362b245cb2SAnirudh Venkataramanan /** 237cdedef59SAnirudh Venkataramanan * ice_setup_tx_ring - Allocate the Tx descriptors 238d337f2afSAnirudh Venkataramanan * @tx_ring: the Tx ring to set up 239cdedef59SAnirudh Venkataramanan * 240cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 241cdedef59SAnirudh Venkataramanan */ 242cdedef59SAnirudh Venkataramanan int ice_setup_tx_ring(struct ice_ring *tx_ring) 243cdedef59SAnirudh Venkataramanan { 244cdedef59SAnirudh Venkataramanan struct device *dev = tx_ring->dev; 245cdedef59SAnirudh Venkataramanan 246cdedef59SAnirudh Venkataramanan if (!dev) 247cdedef59SAnirudh Venkataramanan return -ENOMEM; 248cdedef59SAnirudh Venkataramanan 249cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 250cdedef59SAnirudh Venkataramanan WARN_ON(tx_ring->tx_buf); 251c6dfd690SBruce Allan tx_ring->tx_buf = 252c6dfd690SBruce Allan devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 253c6dfd690SBruce Allan GFP_KERNEL); 254cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 255cdedef59SAnirudh Venkataramanan return -ENOMEM; 256cdedef59SAnirudh Venkataramanan 257ad71b256SBrett Creeley /* round up to nearest page */ 258c6dfd690SBruce Allan tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 259ad71b256SBrett Creeley PAGE_SIZE); 260cdedef59SAnirudh Venkataramanan tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 261cdedef59SAnirudh Venkataramanan GFP_KERNEL); 262cdedef59SAnirudh Venkataramanan if (!tx_ring->desc) { 263cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 264cdedef59SAnirudh Venkataramanan tx_ring->size); 265cdedef59SAnirudh Venkataramanan goto err; 266cdedef59SAnirudh Venkataramanan } 267cdedef59SAnirudh Venkataramanan 268cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 269cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 270b3969fd7SSudheer Mogilappagari tx_ring->tx_stats.prev_pkt = -1; 271cdedef59SAnirudh Venkataramanan return 0; 272cdedef59SAnirudh Venkataramanan 273cdedef59SAnirudh Venkataramanan err: 274cdedef59SAnirudh Venkataramanan devm_kfree(dev, tx_ring->tx_buf); 275cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 276cdedef59SAnirudh Venkataramanan return -ENOMEM; 277cdedef59SAnirudh Venkataramanan } 278cdedef59SAnirudh Venkataramanan 279cdedef59SAnirudh Venkataramanan /** 280cdedef59SAnirudh Venkataramanan * ice_clean_rx_ring - Free Rx buffers 281cdedef59SAnirudh Venkataramanan * @rx_ring: ring to be cleaned 282cdedef59SAnirudh Venkataramanan */ 283cdedef59SAnirudh Venkataramanan void ice_clean_rx_ring(struct ice_ring *rx_ring) 284cdedef59SAnirudh Venkataramanan { 285cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 286cdedef59SAnirudh Venkataramanan u16 i; 287cdedef59SAnirudh Venkataramanan 288cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 289cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 290cdedef59SAnirudh Venkataramanan return; 291cdedef59SAnirudh Venkataramanan 2922d4238f5SKrzysztof Kazimierczak if (rx_ring->xsk_umem) { 2932d4238f5SKrzysztof Kazimierczak ice_xsk_clean_rx_ring(rx_ring); 2942d4238f5SKrzysztof Kazimierczak goto rx_skip_free; 2952d4238f5SKrzysztof Kazimierczak } 2962d4238f5SKrzysztof Kazimierczak 297cdedef59SAnirudh Venkataramanan /* Free all the Rx ring sk_buffs */ 298cdedef59SAnirudh Venkataramanan for (i = 0; i < rx_ring->count; i++) { 299cdedef59SAnirudh Venkataramanan struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 300cdedef59SAnirudh Venkataramanan 301cdedef59SAnirudh Venkataramanan if (rx_buf->skb) { 302cdedef59SAnirudh Venkataramanan dev_kfree_skb(rx_buf->skb); 303cdedef59SAnirudh Venkataramanan rx_buf->skb = NULL; 304cdedef59SAnirudh Venkataramanan } 305cdedef59SAnirudh Venkataramanan if (!rx_buf->page) 306cdedef59SAnirudh Venkataramanan continue; 307cdedef59SAnirudh Venkataramanan 308a65f71feSMaciej Fijalkowski /* Invalidate cache lines that may have been written to by 309a65f71feSMaciej Fijalkowski * device so that we avoid corrupting memory. 310a65f71feSMaciej Fijalkowski */ 311a65f71feSMaciej Fijalkowski dma_sync_single_range_for_cpu(dev, rx_buf->dma, 312a65f71feSMaciej Fijalkowski rx_buf->page_offset, 3137237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 3147237f5b0SMaciej Fijalkowski DMA_FROM_DEVICE); 315a65f71feSMaciej Fijalkowski 316a65f71feSMaciej Fijalkowski /* free resources associated with mapping */ 3177237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 318a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 31903c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 320cdedef59SAnirudh Venkataramanan 321cdedef59SAnirudh Venkataramanan rx_buf->page = NULL; 322cdedef59SAnirudh Venkataramanan rx_buf->page_offset = 0; 323cdedef59SAnirudh Venkataramanan } 324cdedef59SAnirudh Venkataramanan 3252d4238f5SKrzysztof Kazimierczak rx_skip_free: 326c6dfd690SBruce Allan memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 327cdedef59SAnirudh Venkataramanan 328cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 329cdedef59SAnirudh Venkataramanan memset(rx_ring->desc, 0, rx_ring->size); 330cdedef59SAnirudh Venkataramanan 331cdedef59SAnirudh Venkataramanan rx_ring->next_to_alloc = 0; 332cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 333cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 334cdedef59SAnirudh Venkataramanan } 335cdedef59SAnirudh Venkataramanan 336cdedef59SAnirudh Venkataramanan /** 337cdedef59SAnirudh Venkataramanan * ice_free_rx_ring - Free Rx resources 338cdedef59SAnirudh Venkataramanan * @rx_ring: ring to clean the resources from 339cdedef59SAnirudh Venkataramanan * 340cdedef59SAnirudh Venkataramanan * Free all receive software resources 341cdedef59SAnirudh Venkataramanan */ 342cdedef59SAnirudh Venkataramanan void ice_free_rx_ring(struct ice_ring *rx_ring) 343cdedef59SAnirudh Venkataramanan { 344cdedef59SAnirudh Venkataramanan ice_clean_rx_ring(rx_ring); 345efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF) 346efc2214bSMaciej Fijalkowski if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 347efc2214bSMaciej Fijalkowski xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 348efc2214bSMaciej Fijalkowski rx_ring->xdp_prog = NULL; 349cdedef59SAnirudh Venkataramanan devm_kfree(rx_ring->dev, rx_ring->rx_buf); 350cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 351cdedef59SAnirudh Venkataramanan 352cdedef59SAnirudh Venkataramanan if (rx_ring->desc) { 353cdedef59SAnirudh Venkataramanan dmam_free_coherent(rx_ring->dev, rx_ring->size, 354cdedef59SAnirudh Venkataramanan rx_ring->desc, rx_ring->dma); 355cdedef59SAnirudh Venkataramanan rx_ring->desc = NULL; 356cdedef59SAnirudh Venkataramanan } 357cdedef59SAnirudh Venkataramanan } 358cdedef59SAnirudh Venkataramanan 359cdedef59SAnirudh Venkataramanan /** 360cdedef59SAnirudh Venkataramanan * ice_setup_rx_ring - Allocate the Rx descriptors 361d337f2afSAnirudh Venkataramanan * @rx_ring: the Rx ring to set up 362cdedef59SAnirudh Venkataramanan * 363cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 364cdedef59SAnirudh Venkataramanan */ 365cdedef59SAnirudh Venkataramanan int ice_setup_rx_ring(struct ice_ring *rx_ring) 366cdedef59SAnirudh Venkataramanan { 367cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 368cdedef59SAnirudh Venkataramanan 369cdedef59SAnirudh Venkataramanan if (!dev) 370cdedef59SAnirudh Venkataramanan return -ENOMEM; 371cdedef59SAnirudh Venkataramanan 372cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 373cdedef59SAnirudh Venkataramanan WARN_ON(rx_ring->rx_buf); 374c6dfd690SBruce Allan rx_ring->rx_buf = 375c6dfd690SBruce Allan devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 376c6dfd690SBruce Allan GFP_KERNEL); 377cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 378cdedef59SAnirudh Venkataramanan return -ENOMEM; 379cdedef59SAnirudh Venkataramanan 380ad71b256SBrett Creeley /* round up to nearest page */ 381ad71b256SBrett Creeley rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 382ad71b256SBrett Creeley PAGE_SIZE); 383cdedef59SAnirudh Venkataramanan rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 384cdedef59SAnirudh Venkataramanan GFP_KERNEL); 385cdedef59SAnirudh Venkataramanan if (!rx_ring->desc) { 386cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 387cdedef59SAnirudh Venkataramanan rx_ring->size); 388cdedef59SAnirudh Venkataramanan goto err; 389cdedef59SAnirudh Venkataramanan } 390cdedef59SAnirudh Venkataramanan 391cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 392cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 393efc2214bSMaciej Fijalkowski 394efc2214bSMaciej Fijalkowski if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 395efc2214bSMaciej Fijalkowski WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 396efc2214bSMaciej Fijalkowski 397efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF && 398efc2214bSMaciej Fijalkowski !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 399efc2214bSMaciej Fijalkowski if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 400efc2214bSMaciej Fijalkowski rx_ring->q_index)) 401efc2214bSMaciej Fijalkowski goto err; 402cdedef59SAnirudh Venkataramanan return 0; 403cdedef59SAnirudh Venkataramanan 404cdedef59SAnirudh Venkataramanan err: 405cdedef59SAnirudh Venkataramanan devm_kfree(dev, rx_ring->rx_buf); 406cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 407cdedef59SAnirudh Venkataramanan return -ENOMEM; 408cdedef59SAnirudh Venkataramanan } 409cdedef59SAnirudh Venkataramanan 410cdedef59SAnirudh Venkataramanan /** 411efc2214bSMaciej Fijalkowski * ice_rx_offset - Return expected offset into page to access data 412efc2214bSMaciej Fijalkowski * @rx_ring: Ring we are requesting offset of 413efc2214bSMaciej Fijalkowski * 414efc2214bSMaciej Fijalkowski * Returns the offset value for ring into the data buffer. 415efc2214bSMaciej Fijalkowski */ 416efc2214bSMaciej Fijalkowski static unsigned int ice_rx_offset(struct ice_ring *rx_ring) 417efc2214bSMaciej Fijalkowski { 41859bb0808SMaciej Fijalkowski if (ice_ring_uses_build_skb(rx_ring)) 41959bb0808SMaciej Fijalkowski return ICE_SKB_PAD; 42059bb0808SMaciej Fijalkowski else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 42159bb0808SMaciej Fijalkowski return XDP_PACKET_HEADROOM; 42259bb0808SMaciej Fijalkowski 42359bb0808SMaciej Fijalkowski return 0; 424efc2214bSMaciej Fijalkowski } 425efc2214bSMaciej Fijalkowski 426efc2214bSMaciej Fijalkowski /** 427efc2214bSMaciej Fijalkowski * ice_run_xdp - Executes an XDP program on initialized xdp_buff 428efc2214bSMaciej Fijalkowski * @rx_ring: Rx ring 429efc2214bSMaciej Fijalkowski * @xdp: xdp_buff used as input to the XDP program 430efc2214bSMaciej Fijalkowski * @xdp_prog: XDP program to run 431efc2214bSMaciej Fijalkowski * 432efc2214bSMaciej Fijalkowski * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 433efc2214bSMaciej Fijalkowski */ 434efc2214bSMaciej Fijalkowski static int 435efc2214bSMaciej Fijalkowski ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, 436efc2214bSMaciej Fijalkowski struct bpf_prog *xdp_prog) 437efc2214bSMaciej Fijalkowski { 438efc2214bSMaciej Fijalkowski int err, result = ICE_XDP_PASS; 439efc2214bSMaciej Fijalkowski struct ice_ring *xdp_ring; 440efc2214bSMaciej Fijalkowski u32 act; 441efc2214bSMaciej Fijalkowski 442efc2214bSMaciej Fijalkowski act = bpf_prog_run_xdp(xdp_prog, xdp); 443efc2214bSMaciej Fijalkowski switch (act) { 444efc2214bSMaciej Fijalkowski case XDP_PASS: 445efc2214bSMaciej Fijalkowski break; 446efc2214bSMaciej Fijalkowski case XDP_TX: 447efc2214bSMaciej Fijalkowski xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; 448efc2214bSMaciej Fijalkowski result = ice_xmit_xdp_buff(xdp, xdp_ring); 449efc2214bSMaciej Fijalkowski break; 450efc2214bSMaciej Fijalkowski case XDP_REDIRECT: 451efc2214bSMaciej Fijalkowski err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 452efc2214bSMaciej Fijalkowski result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; 453efc2214bSMaciej Fijalkowski break; 454efc2214bSMaciej Fijalkowski default: 455efc2214bSMaciej Fijalkowski bpf_warn_invalid_xdp_action(act); 4564e83fc93SBruce Allan fallthrough; 457efc2214bSMaciej Fijalkowski case XDP_ABORTED: 458efc2214bSMaciej Fijalkowski trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 4594e83fc93SBruce Allan fallthrough; 460efc2214bSMaciej Fijalkowski case XDP_DROP: 461efc2214bSMaciej Fijalkowski result = ICE_XDP_CONSUMED; 462efc2214bSMaciej Fijalkowski break; 463efc2214bSMaciej Fijalkowski } 464efc2214bSMaciej Fijalkowski 465efc2214bSMaciej Fijalkowski return result; 466efc2214bSMaciej Fijalkowski } 467efc2214bSMaciej Fijalkowski 468efc2214bSMaciej Fijalkowski /** 469efc2214bSMaciej Fijalkowski * ice_xdp_xmit - submit packets to XDP ring for transmission 470efc2214bSMaciej Fijalkowski * @dev: netdev 471efc2214bSMaciej Fijalkowski * @n: number of XDP frames to be transmitted 472efc2214bSMaciej Fijalkowski * @frames: XDP frames to be transmitted 473efc2214bSMaciej Fijalkowski * @flags: transmit flags 474efc2214bSMaciej Fijalkowski * 475efc2214bSMaciej Fijalkowski * Returns number of frames successfully sent. Frames that fail are 476efc2214bSMaciej Fijalkowski * free'ed via XDP return API. 477efc2214bSMaciej Fijalkowski * For error cases, a negative errno code is returned and no-frames 478efc2214bSMaciej Fijalkowski * are transmitted (caller must handle freeing frames). 479efc2214bSMaciej Fijalkowski */ 480efc2214bSMaciej Fijalkowski int 481efc2214bSMaciej Fijalkowski ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 482efc2214bSMaciej Fijalkowski u32 flags) 483efc2214bSMaciej Fijalkowski { 484efc2214bSMaciej Fijalkowski struct ice_netdev_priv *np = netdev_priv(dev); 485efc2214bSMaciej Fijalkowski unsigned int queue_index = smp_processor_id(); 486efc2214bSMaciej Fijalkowski struct ice_vsi *vsi = np->vsi; 487efc2214bSMaciej Fijalkowski struct ice_ring *xdp_ring; 488efc2214bSMaciej Fijalkowski int drops = 0, i; 489efc2214bSMaciej Fijalkowski 490efc2214bSMaciej Fijalkowski if (test_bit(__ICE_DOWN, vsi->state)) 491efc2214bSMaciej Fijalkowski return -ENETDOWN; 492efc2214bSMaciej Fijalkowski 493efc2214bSMaciej Fijalkowski if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) 494efc2214bSMaciej Fijalkowski return -ENXIO; 495efc2214bSMaciej Fijalkowski 496efc2214bSMaciej Fijalkowski if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 497efc2214bSMaciej Fijalkowski return -EINVAL; 498efc2214bSMaciej Fijalkowski 499efc2214bSMaciej Fijalkowski xdp_ring = vsi->xdp_rings[queue_index]; 500efc2214bSMaciej Fijalkowski for (i = 0; i < n; i++) { 501efc2214bSMaciej Fijalkowski struct xdp_frame *xdpf = frames[i]; 502efc2214bSMaciej Fijalkowski int err; 503efc2214bSMaciej Fijalkowski 504efc2214bSMaciej Fijalkowski err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 505efc2214bSMaciej Fijalkowski if (err != ICE_XDP_TX) { 506efc2214bSMaciej Fijalkowski xdp_return_frame_rx_napi(xdpf); 507efc2214bSMaciej Fijalkowski drops++; 508efc2214bSMaciej Fijalkowski } 509efc2214bSMaciej Fijalkowski } 510efc2214bSMaciej Fijalkowski 511efc2214bSMaciej Fijalkowski if (unlikely(flags & XDP_XMIT_FLUSH)) 512efc2214bSMaciej Fijalkowski ice_xdp_ring_update_tail(xdp_ring); 513efc2214bSMaciej Fijalkowski 514efc2214bSMaciej Fijalkowski return n - drops; 515efc2214bSMaciej Fijalkowski } 516efc2214bSMaciej Fijalkowski 517efc2214bSMaciej Fijalkowski /** 518cdedef59SAnirudh Venkataramanan * ice_alloc_mapped_page - recycle or make a new page 519cdedef59SAnirudh Venkataramanan * @rx_ring: ring to use 520cdedef59SAnirudh Venkataramanan * @bi: rx_buf struct to modify 521cdedef59SAnirudh Venkataramanan * 522cdedef59SAnirudh Venkataramanan * Returns true if the page was successfully allocated or 523cdedef59SAnirudh Venkataramanan * reused. 524cdedef59SAnirudh Venkataramanan */ 525c8b7abddSBruce Allan static bool 526c8b7abddSBruce Allan ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) 527cdedef59SAnirudh Venkataramanan { 528cdedef59SAnirudh Venkataramanan struct page *page = bi->page; 529cdedef59SAnirudh Venkataramanan dma_addr_t dma; 530cdedef59SAnirudh Venkataramanan 531cdedef59SAnirudh Venkataramanan /* since we are recycling buffers we should seldom need to alloc */ 5322b245cb2SAnirudh Venkataramanan if (likely(page)) { 5332b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.page_reuse_count++; 534cdedef59SAnirudh Venkataramanan return true; 5352b245cb2SAnirudh Venkataramanan } 536cdedef59SAnirudh Venkataramanan 537cdedef59SAnirudh Venkataramanan /* alloc new page for storage */ 5387237f5b0SMaciej Fijalkowski page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 5392b245cb2SAnirudh Venkataramanan if (unlikely(!page)) { 5402b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 541cdedef59SAnirudh Venkataramanan return false; 5422b245cb2SAnirudh Venkataramanan } 543cdedef59SAnirudh Venkataramanan 544cdedef59SAnirudh Venkataramanan /* map page for use */ 5457237f5b0SMaciej Fijalkowski dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 546a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 547cdedef59SAnirudh Venkataramanan 548cdedef59SAnirudh Venkataramanan /* if mapping failed free memory back to system since 549cdedef59SAnirudh Venkataramanan * there isn't much point in holding memory we can't use 550cdedef59SAnirudh Venkataramanan */ 551cdedef59SAnirudh Venkataramanan if (dma_mapping_error(rx_ring->dev, dma)) { 5527237f5b0SMaciej Fijalkowski __free_pages(page, ice_rx_pg_order(rx_ring)); 5532b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 554cdedef59SAnirudh Venkataramanan return false; 555cdedef59SAnirudh Venkataramanan } 556cdedef59SAnirudh Venkataramanan 557cdedef59SAnirudh Venkataramanan bi->dma = dma; 558cdedef59SAnirudh Venkataramanan bi->page = page; 559efc2214bSMaciej Fijalkowski bi->page_offset = ice_rx_offset(rx_ring); 56003c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 56103c66a13SMaciej Fijalkowski bi->pagecnt_bias = USHRT_MAX; 562cdedef59SAnirudh Venkataramanan 563cdedef59SAnirudh Venkataramanan return true; 564cdedef59SAnirudh Venkataramanan } 565cdedef59SAnirudh Venkataramanan 566cdedef59SAnirudh Venkataramanan /** 567cdedef59SAnirudh Venkataramanan * ice_alloc_rx_bufs - Replace used receive buffers 568cdedef59SAnirudh Venkataramanan * @rx_ring: ring to place buffers on 569cdedef59SAnirudh Venkataramanan * @cleaned_count: number of buffers to replace 570cdedef59SAnirudh Venkataramanan * 571cb7db356SBrett Creeley * Returns false if all allocations were successful, true if any fail. Returning 572cb7db356SBrett Creeley * true signals to the caller that we didn't replace cleaned_count buffers and 573cb7db356SBrett Creeley * there is more work to do. 574cb7db356SBrett Creeley * 575cb7db356SBrett Creeley * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 576cb7db356SBrett Creeley * buffers. Then bump tail at most one time. Grouping like this lets us avoid 577cb7db356SBrett Creeley * multiple tail writes per call. 578cdedef59SAnirudh Venkataramanan */ 579cdedef59SAnirudh Venkataramanan bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) 580cdedef59SAnirudh Venkataramanan { 581cdedef59SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 582cdedef59SAnirudh Venkataramanan u16 ntu = rx_ring->next_to_use; 583cdedef59SAnirudh Venkataramanan struct ice_rx_buf *bi; 584cdedef59SAnirudh Venkataramanan 585cdedef59SAnirudh Venkataramanan /* do nothing if no valid netdev defined */ 586cdedef59SAnirudh Venkataramanan if (!rx_ring->netdev || !cleaned_count) 587cdedef59SAnirudh Venkataramanan return false; 588cdedef59SAnirudh Venkataramanan 589f9867df6SAnirudh Venkataramanan /* get the Rx descriptor and buffer based on next_to_use */ 590cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, ntu); 591cdedef59SAnirudh Venkataramanan bi = &rx_ring->rx_buf[ntu]; 592cdedef59SAnirudh Venkataramanan 593cdedef59SAnirudh Venkataramanan do { 594a1e99685SBrett Creeley /* if we fail here, we have work remaining */ 595cdedef59SAnirudh Venkataramanan if (!ice_alloc_mapped_page(rx_ring, bi)) 596a1e99685SBrett Creeley break; 597cdedef59SAnirudh Venkataramanan 598a65f71feSMaciej Fijalkowski /* sync the buffer for use by the device */ 599a65f71feSMaciej Fijalkowski dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 600a65f71feSMaciej Fijalkowski bi->page_offset, 6017237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 602a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE); 603a65f71feSMaciej Fijalkowski 604cdedef59SAnirudh Venkataramanan /* Refresh the desc even if buffer_addrs didn't change 605cdedef59SAnirudh Venkataramanan * because each write-back erases this info. 606cdedef59SAnirudh Venkataramanan */ 607cdedef59SAnirudh Venkataramanan rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 608cdedef59SAnirudh Venkataramanan 609cdedef59SAnirudh Venkataramanan rx_desc++; 610cdedef59SAnirudh Venkataramanan bi++; 611cdedef59SAnirudh Venkataramanan ntu++; 612cdedef59SAnirudh Venkataramanan if (unlikely(ntu == rx_ring->count)) { 613cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, 0); 614cdedef59SAnirudh Venkataramanan bi = rx_ring->rx_buf; 615cdedef59SAnirudh Venkataramanan ntu = 0; 616cdedef59SAnirudh Venkataramanan } 617cdedef59SAnirudh Venkataramanan 618cdedef59SAnirudh Venkataramanan /* clear the status bits for the next_to_use descriptor */ 619cdedef59SAnirudh Venkataramanan rx_desc->wb.status_error0 = 0; 620cdedef59SAnirudh Venkataramanan 621cdedef59SAnirudh Venkataramanan cleaned_count--; 622cdedef59SAnirudh Venkataramanan } while (cleaned_count); 623cdedef59SAnirudh Venkataramanan 624cdedef59SAnirudh Venkataramanan if (rx_ring->next_to_use != ntu) 625cdedef59SAnirudh Venkataramanan ice_release_rx_desc(rx_ring, ntu); 626cdedef59SAnirudh Venkataramanan 627a1e99685SBrett Creeley return !!cleaned_count; 628cdedef59SAnirudh Venkataramanan } 6292b245cb2SAnirudh Venkataramanan 6302b245cb2SAnirudh Venkataramanan /** 6312b245cb2SAnirudh Venkataramanan * ice_page_is_reserved - check if reuse is possible 6322b245cb2SAnirudh Venkataramanan * @page: page struct to check 6332b245cb2SAnirudh Venkataramanan */ 6342b245cb2SAnirudh Venkataramanan static bool ice_page_is_reserved(struct page *page) 6352b245cb2SAnirudh Venkataramanan { 6362b245cb2SAnirudh Venkataramanan return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 6372b245cb2SAnirudh Venkataramanan } 6382b245cb2SAnirudh Venkataramanan 6392b245cb2SAnirudh Venkataramanan /** 6401d032bc7SMaciej Fijalkowski * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 6411d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to adjust 6421d032bc7SMaciej Fijalkowski * @size: Size of adjustment 6432b245cb2SAnirudh Venkataramanan * 6441d032bc7SMaciej Fijalkowski * Update the offset within page so that Rx buf will be ready to be reused. 6451d032bc7SMaciej Fijalkowski * For systems with PAGE_SIZE < 8192 this function will flip the page offset 6461d032bc7SMaciej Fijalkowski * so the second half of page assigned to Rx buffer will be used, otherwise 6474ee656bbSTony Nguyen * the offset is moved by "size" bytes 6482b245cb2SAnirudh Venkataramanan */ 6491d032bc7SMaciej Fijalkowski static void 6501d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 6512b245cb2SAnirudh Venkataramanan { 6522b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 6531d032bc7SMaciej Fijalkowski /* flip page offset to other buffer */ 6541d032bc7SMaciej Fijalkowski rx_buf->page_offset ^= size; 6552b245cb2SAnirudh Venkataramanan #else 6561d032bc7SMaciej Fijalkowski /* move offset up to the next cache line */ 6571d032bc7SMaciej Fijalkowski rx_buf->page_offset += size; 6581d032bc7SMaciej Fijalkowski #endif 6592b245cb2SAnirudh Venkataramanan } 6602b245cb2SAnirudh Venkataramanan 6611d032bc7SMaciej Fijalkowski /** 662bbb97808SMaciej Fijalkowski * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 663bbb97808SMaciej Fijalkowski * @rx_buf: buffer containing the page 664bbb97808SMaciej Fijalkowski * 665bbb97808SMaciej Fijalkowski * If page is reusable, we have a green light for calling ice_reuse_rx_page, 666bbb97808SMaciej Fijalkowski * which will assign the current buffer to the buffer that next_to_alloc is 667bbb97808SMaciej Fijalkowski * pointing to; otherwise, the DMA mapping needs to be destroyed and 668bbb97808SMaciej Fijalkowski * page freed 669bbb97808SMaciej Fijalkowski */ 6701d032bc7SMaciej Fijalkowski static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) 671bbb97808SMaciej Fijalkowski { 67203c66a13SMaciej Fijalkowski unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 673bbb97808SMaciej Fijalkowski struct page *page = rx_buf->page; 6742b245cb2SAnirudh Venkataramanan 6752b245cb2SAnirudh Venkataramanan /* avoid re-using remote pages */ 6762b245cb2SAnirudh Venkataramanan if (unlikely(ice_page_is_reserved(page))) 6772b245cb2SAnirudh Venkataramanan return false; 6782b245cb2SAnirudh Venkataramanan 6792b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 6802b245cb2SAnirudh Venkataramanan /* if we are only owner of page we can reuse it */ 68103c66a13SMaciej Fijalkowski if (unlikely((page_count(page) - pagecnt_bias) > 1)) 6822b245cb2SAnirudh Venkataramanan return false; 6832b245cb2SAnirudh Venkataramanan #else 6847237f5b0SMaciej Fijalkowski #define ICE_LAST_OFFSET \ 6857237f5b0SMaciej Fijalkowski (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 6867237f5b0SMaciej Fijalkowski if (rx_buf->page_offset > ICE_LAST_OFFSET) 6872b245cb2SAnirudh Venkataramanan return false; 6882b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE < 8192) */ 6892b245cb2SAnirudh Venkataramanan 69003c66a13SMaciej Fijalkowski /* If we have drained the page fragment pool we need to update 69103c66a13SMaciej Fijalkowski * the pagecnt_bias and page count so that we fully restock the 69203c66a13SMaciej Fijalkowski * number of references the driver holds. 6932b245cb2SAnirudh Venkataramanan */ 69403c66a13SMaciej Fijalkowski if (unlikely(pagecnt_bias == 1)) { 69503c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 69603c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias = USHRT_MAX; 69703c66a13SMaciej Fijalkowski } 6982b245cb2SAnirudh Venkataramanan 6992b245cb2SAnirudh Venkataramanan return true; 7002b245cb2SAnirudh Venkataramanan } 7012b245cb2SAnirudh Venkataramanan 7022b245cb2SAnirudh Venkataramanan /** 703712edbbbSMaciej Fijalkowski * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 7047237f5b0SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 7052b245cb2SAnirudh Venkataramanan * @rx_buf: buffer containing page to add 706712edbbbSMaciej Fijalkowski * @skb: sk_buff to place the data into 707712edbbbSMaciej Fijalkowski * @size: packet length from rx_desc 7082b245cb2SAnirudh Venkataramanan * 7092b245cb2SAnirudh Venkataramanan * This function will add the data contained in rx_buf->page to the skb. 710712edbbbSMaciej Fijalkowski * It will just attach the page as a frag to the skb. 711712edbbbSMaciej Fijalkowski * The function will then update the page offset. 7122b245cb2SAnirudh Venkataramanan */ 7131d032bc7SMaciej Fijalkowski static void 7147237f5b0SMaciej Fijalkowski ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 7157237f5b0SMaciej Fijalkowski struct sk_buff *skb, unsigned int size) 7162b245cb2SAnirudh Venkataramanan { 717712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 71859bb0808SMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring)); 7192b245cb2SAnirudh Venkataramanan #else 7207237f5b0SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 721712edbbbSMaciej Fijalkowski #endif 7221857ca42SMaciej Fijalkowski 723ac6f733aSMitch Williams if (!size) 724ac6f733aSMitch Williams return; 725712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 726712edbbbSMaciej Fijalkowski rx_buf->page_offset, size, truesize); 7272b245cb2SAnirudh Venkataramanan 728712edbbbSMaciej Fijalkowski /* page is being used so we must update the page offset */ 7291d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 7302b245cb2SAnirudh Venkataramanan } 7312b245cb2SAnirudh Venkataramanan 7322b245cb2SAnirudh Venkataramanan /** 7332b245cb2SAnirudh Venkataramanan * ice_reuse_rx_page - page flip buffer and store it back on the ring 734d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to store buffers on 7352b245cb2SAnirudh Venkataramanan * @old_buf: donor buffer to have page reused 7362b245cb2SAnirudh Venkataramanan * 7372b245cb2SAnirudh Venkataramanan * Synchronizes page for reuse by the adapter 7382b245cb2SAnirudh Venkataramanan */ 739c8b7abddSBruce Allan static void 740c8b7abddSBruce Allan ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) 7412b245cb2SAnirudh Venkataramanan { 7422b245cb2SAnirudh Venkataramanan u16 nta = rx_ring->next_to_alloc; 7432b245cb2SAnirudh Venkataramanan struct ice_rx_buf *new_buf; 7442b245cb2SAnirudh Venkataramanan 7452b245cb2SAnirudh Venkataramanan new_buf = &rx_ring->rx_buf[nta]; 7462b245cb2SAnirudh Venkataramanan 7472b245cb2SAnirudh Venkataramanan /* update, and store next to alloc */ 7482b245cb2SAnirudh Venkataramanan nta++; 7492b245cb2SAnirudh Venkataramanan rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 7502b245cb2SAnirudh Venkataramanan 751712edbbbSMaciej Fijalkowski /* Transfer page from old buffer to new buffer. 752712edbbbSMaciej Fijalkowski * Move each member individually to avoid possible store 753712edbbbSMaciej Fijalkowski * forwarding stalls and unnecessary copy of skb. 754712edbbbSMaciej Fijalkowski */ 755712edbbbSMaciej Fijalkowski new_buf->dma = old_buf->dma; 756712edbbbSMaciej Fijalkowski new_buf->page = old_buf->page; 757712edbbbSMaciej Fijalkowski new_buf->page_offset = old_buf->page_offset; 758712edbbbSMaciej Fijalkowski new_buf->pagecnt_bias = old_buf->pagecnt_bias; 7592b245cb2SAnirudh Venkataramanan } 7602b245cb2SAnirudh Venkataramanan 7612b245cb2SAnirudh Venkataramanan /** 7626c869cb7SMaciej Fijalkowski * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 763d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 764712edbbbSMaciej Fijalkowski * @skb: skb to be used 7656c869cb7SMaciej Fijalkowski * @size: size of buffer to add to skb 7662b245cb2SAnirudh Venkataramanan * 7676c869cb7SMaciej Fijalkowski * This function will pull an Rx buffer from the ring and synchronize it 7686c869cb7SMaciej Fijalkowski * for use by the CPU. 7692b245cb2SAnirudh Venkataramanan */ 7706c869cb7SMaciej Fijalkowski static struct ice_rx_buf * 771712edbbbSMaciej Fijalkowski ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, 772712edbbbSMaciej Fijalkowski const unsigned int size) 7732b245cb2SAnirudh Venkataramanan { 7742b245cb2SAnirudh Venkataramanan struct ice_rx_buf *rx_buf; 7752b245cb2SAnirudh Venkataramanan 7762b245cb2SAnirudh Venkataramanan rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 7776c869cb7SMaciej Fijalkowski prefetchw(rx_buf->page); 778712edbbbSMaciej Fijalkowski *skb = rx_buf->skb; 7792b245cb2SAnirudh Venkataramanan 780ac6f733aSMitch Williams if (!size) 781ac6f733aSMitch Williams return rx_buf; 7826c869cb7SMaciej Fijalkowski /* we are reusing so sync this buffer for CPU use */ 7836c869cb7SMaciej Fijalkowski dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 7846c869cb7SMaciej Fijalkowski rx_buf->page_offset, size, 7856c869cb7SMaciej Fijalkowski DMA_FROM_DEVICE); 7862b245cb2SAnirudh Venkataramanan 78703c66a13SMaciej Fijalkowski /* We have pulled a buffer for use, so decrement pagecnt_bias */ 78803c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias--; 78903c66a13SMaciej Fijalkowski 7906c869cb7SMaciej Fijalkowski return rx_buf; 7916c869cb7SMaciej Fijalkowski } 7926c869cb7SMaciej Fijalkowski 7936c869cb7SMaciej Fijalkowski /** 794aaf27254SMaciej Fijalkowski * ice_build_skb - Build skb around an existing buffer 795aaf27254SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 796aaf27254SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 797aaf27254SMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 798aaf27254SMaciej Fijalkowski * 799aaf27254SMaciej Fijalkowski * This function builds an skb around an existing Rx buffer, taking care 800aaf27254SMaciej Fijalkowski * to set up the skb correctly and avoid any memcpy overhead. 801aaf27254SMaciej Fijalkowski */ 802aaf27254SMaciej Fijalkowski static struct sk_buff * 803aaf27254SMaciej Fijalkowski ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 804aaf27254SMaciej Fijalkowski struct xdp_buff *xdp) 805aaf27254SMaciej Fijalkowski { 806aaf27254SMaciej Fijalkowski unsigned int metasize = xdp->data - xdp->data_meta; 807aaf27254SMaciej Fijalkowski #if (PAGE_SIZE < 8192) 808aaf27254SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 809aaf27254SMaciej Fijalkowski #else 810aaf27254SMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 811aaf27254SMaciej Fijalkowski SKB_DATA_ALIGN(xdp->data_end - 812aaf27254SMaciej Fijalkowski xdp->data_hard_start); 813aaf27254SMaciej Fijalkowski #endif 814aaf27254SMaciej Fijalkowski struct sk_buff *skb; 815aaf27254SMaciej Fijalkowski 816aaf27254SMaciej Fijalkowski /* Prefetch first cache line of first page. If xdp->data_meta 817aaf27254SMaciej Fijalkowski * is unused, this points exactly as xdp->data, otherwise we 818aaf27254SMaciej Fijalkowski * likely have a consumer accessing first few bytes of meta 819aaf27254SMaciej Fijalkowski * data, and then actual data. 820aaf27254SMaciej Fijalkowski */ 821aaf27254SMaciej Fijalkowski prefetch(xdp->data_meta); 822aaf27254SMaciej Fijalkowski #if L1_CACHE_BYTES < 128 823aaf27254SMaciej Fijalkowski prefetch((void *)(xdp->data + L1_CACHE_BYTES)); 824aaf27254SMaciej Fijalkowski #endif 825aaf27254SMaciej Fijalkowski /* build an skb around the page buffer */ 826aaf27254SMaciej Fijalkowski skb = build_skb(xdp->data_hard_start, truesize); 827aaf27254SMaciej Fijalkowski if (unlikely(!skb)) 828aaf27254SMaciej Fijalkowski return NULL; 829aaf27254SMaciej Fijalkowski 830aaf27254SMaciej Fijalkowski /* must to record Rx queue, otherwise OS features such as 831aaf27254SMaciej Fijalkowski * symmetric queue won't work 832aaf27254SMaciej Fijalkowski */ 833aaf27254SMaciej Fijalkowski skb_record_rx_queue(skb, rx_ring->q_index); 834aaf27254SMaciej Fijalkowski 835aaf27254SMaciej Fijalkowski /* update pointers within the skb to store the data */ 836aaf27254SMaciej Fijalkowski skb_reserve(skb, xdp->data - xdp->data_hard_start); 837aaf27254SMaciej Fijalkowski __skb_put(skb, xdp->data_end - xdp->data); 838aaf27254SMaciej Fijalkowski if (metasize) 839aaf27254SMaciej Fijalkowski skb_metadata_set(skb, metasize); 840aaf27254SMaciej Fijalkowski 841aaf27254SMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 842aaf27254SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 843aaf27254SMaciej Fijalkowski 844aaf27254SMaciej Fijalkowski return skb; 845aaf27254SMaciej Fijalkowski } 846aaf27254SMaciej Fijalkowski 847aaf27254SMaciej Fijalkowski /** 848712edbbbSMaciej Fijalkowski * ice_construct_skb - Allocate skb and populate it 8492b245cb2SAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 8506c869cb7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 851efc2214bSMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 8522b245cb2SAnirudh Venkataramanan * 853712edbbbSMaciej Fijalkowski * This function allocates an skb. It then populates it with the page 854712edbbbSMaciej Fijalkowski * data from the current receive descriptor, taking care to set up the 855712edbbbSMaciej Fijalkowski * skb correctly. 8562b245cb2SAnirudh Venkataramanan */ 857c8b7abddSBruce Allan static struct sk_buff * 858712edbbbSMaciej Fijalkowski ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 859efc2214bSMaciej Fijalkowski struct xdp_buff *xdp) 8602b245cb2SAnirudh Venkataramanan { 861efc2214bSMaciej Fijalkowski unsigned int size = xdp->data_end - xdp->data; 862712edbbbSMaciej Fijalkowski unsigned int headlen; 863712edbbbSMaciej Fijalkowski struct sk_buff *skb; 8642b245cb2SAnirudh Venkataramanan 8652b245cb2SAnirudh Venkataramanan /* prefetch first cache line of first page */ 866efc2214bSMaciej Fijalkowski prefetch(xdp->data); 8672b245cb2SAnirudh Venkataramanan #if L1_CACHE_BYTES < 128 868efc2214bSMaciej Fijalkowski prefetch((void *)(xdp->data + L1_CACHE_BYTES)); 8692b245cb2SAnirudh Venkataramanan #endif /* L1_CACHE_BYTES */ 8702b245cb2SAnirudh Venkataramanan 8712b245cb2SAnirudh Venkataramanan /* allocate a skb to store the frags */ 872712edbbbSMaciej Fijalkowski skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 8732b245cb2SAnirudh Venkataramanan GFP_ATOMIC | __GFP_NOWARN); 874712edbbbSMaciej Fijalkowski if (unlikely(!skb)) 8752b245cb2SAnirudh Venkataramanan return NULL; 8762b245cb2SAnirudh Venkataramanan 8772b245cb2SAnirudh Venkataramanan skb_record_rx_queue(skb, rx_ring->q_index); 878712edbbbSMaciej Fijalkowski /* Determine available headroom for copy */ 879712edbbbSMaciej Fijalkowski headlen = size; 880712edbbbSMaciej Fijalkowski if (headlen > ICE_RX_HDR_SIZE) 881efc2214bSMaciej Fijalkowski headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 8822b245cb2SAnirudh Venkataramanan 883712edbbbSMaciej Fijalkowski /* align pull length to size of long to optimize memcpy performance */ 884efc2214bSMaciej Fijalkowski memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 885efc2214bSMaciej Fijalkowski sizeof(long))); 886712edbbbSMaciej Fijalkowski 887712edbbbSMaciej Fijalkowski /* if we exhaust the linear part then add what is left as a frag */ 888712edbbbSMaciej Fijalkowski size -= headlen; 889712edbbbSMaciej Fijalkowski if (size) { 890712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 891712edbbbSMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size); 892712edbbbSMaciej Fijalkowski #else 8937237f5b0SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 894712edbbbSMaciej Fijalkowski #endif 895712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, 0, rx_buf->page, 896712edbbbSMaciej Fijalkowski rx_buf->page_offset + headlen, size, truesize); 897712edbbbSMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 898712edbbbSMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 8992b245cb2SAnirudh Venkataramanan } else { 900712edbbbSMaciej Fijalkowski /* buffer is unused, reset bias back to rx_buf; data was copied 901712edbbbSMaciej Fijalkowski * onto skb's linear part so there's no need for adjusting 902712edbbbSMaciej Fijalkowski * page offset and we can reuse this buffer as-is 903712edbbbSMaciej Fijalkowski */ 904712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 9052b245cb2SAnirudh Venkataramanan } 9062b245cb2SAnirudh Venkataramanan 9072b245cb2SAnirudh Venkataramanan return skb; 9082b245cb2SAnirudh Venkataramanan } 9092b245cb2SAnirudh Venkataramanan 9102b245cb2SAnirudh Venkataramanan /** 9111d032bc7SMaciej Fijalkowski * ice_put_rx_buf - Clean up used buffer and either recycle or free 9121d032bc7SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 9131d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 9142b245cb2SAnirudh Venkataramanan * 915efc2214bSMaciej Fijalkowski * This function will update next_to_clean and then clean up the contents 916efc2214bSMaciej Fijalkowski * of the rx_buf. It will either recycle the buffer or unmap it and free 917efc2214bSMaciej Fijalkowski * the associated resources. 9182b245cb2SAnirudh Venkataramanan */ 9191d032bc7SMaciej Fijalkowski static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) 9202b245cb2SAnirudh Venkataramanan { 921efc2214bSMaciej Fijalkowski u32 ntc = rx_ring->next_to_clean + 1; 922efc2214bSMaciej Fijalkowski 923efc2214bSMaciej Fijalkowski /* fetch, update, and store next to clean */ 924efc2214bSMaciej Fijalkowski ntc = (ntc < rx_ring->count) ? ntc : 0; 925efc2214bSMaciej Fijalkowski rx_ring->next_to_clean = ntc; 926efc2214bSMaciej Fijalkowski 927ac6f733aSMitch Williams if (!rx_buf) 928ac6f733aSMitch Williams return; 929ac6f733aSMitch Williams 9301d032bc7SMaciej Fijalkowski if (ice_can_reuse_rx_page(rx_buf)) { 931ac6f733aSMitch Williams /* hand second half of page back to the ring */ 9322b245cb2SAnirudh Venkataramanan ice_reuse_rx_page(rx_ring, rx_buf); 9332b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.page_reuse_count++; 9342b245cb2SAnirudh Venkataramanan } else { 9352b245cb2SAnirudh Venkataramanan /* we are not reusing the buffer so unmap it */ 9367237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 9377237f5b0SMaciej Fijalkowski ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 9387237f5b0SMaciej Fijalkowski ICE_RX_DMA_ATTR); 93903c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 9402b245cb2SAnirudh Venkataramanan } 9412b245cb2SAnirudh Venkataramanan 9422b245cb2SAnirudh Venkataramanan /* clear contents of buffer_info */ 9432b245cb2SAnirudh Venkataramanan rx_buf->page = NULL; 944712edbbbSMaciej Fijalkowski rx_buf->skb = NULL; 9452b245cb2SAnirudh Venkataramanan } 9462b245cb2SAnirudh Venkataramanan 9472b245cb2SAnirudh Venkataramanan /** 9482b245cb2SAnirudh Venkataramanan * ice_is_non_eop - process handling of non-EOP buffers 9492b245cb2SAnirudh Venkataramanan * @rx_ring: Rx ring being processed 9502b245cb2SAnirudh Venkataramanan * @rx_desc: Rx descriptor for current buffer 9512b245cb2SAnirudh Venkataramanan * @skb: Current socket buffer containing buffer in progress 9522b245cb2SAnirudh Venkataramanan * 953efc2214bSMaciej Fijalkowski * If the buffer is an EOP buffer, this function exits returning false, 954efc2214bSMaciej Fijalkowski * otherwise return true indicating that this is in fact a non-EOP buffer. 9552b245cb2SAnirudh Venkataramanan */ 956c8b7abddSBruce Allan static bool 957c8b7abddSBruce Allan ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 9582b245cb2SAnirudh Venkataramanan struct sk_buff *skb) 9592b245cb2SAnirudh Venkataramanan { 9602b245cb2SAnirudh Venkataramanan /* if we are the last buffer then there is nothing else to do */ 9612b245cb2SAnirudh Venkataramanan #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 9622b245cb2SAnirudh Venkataramanan if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 9632b245cb2SAnirudh Venkataramanan return false; 9642b245cb2SAnirudh Venkataramanan 9652b245cb2SAnirudh Venkataramanan /* place skb in next buffer to be received */ 966efc2214bSMaciej Fijalkowski rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb; 9672b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.non_eop_descs++; 9682b245cb2SAnirudh Venkataramanan 9692b245cb2SAnirudh Venkataramanan return true; 9702b245cb2SAnirudh Venkataramanan } 9712b245cb2SAnirudh Venkataramanan 9722b245cb2SAnirudh Venkataramanan /** 9732b245cb2SAnirudh Venkataramanan * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 974d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 9752b245cb2SAnirudh Venkataramanan * @budget: Total limit on number of packets to process 9762b245cb2SAnirudh Venkataramanan * 9772b245cb2SAnirudh Venkataramanan * This function provides a "bounce buffer" approach to Rx interrupt 9782b245cb2SAnirudh Venkataramanan * processing. The advantage to this is that on systems that have 9792b245cb2SAnirudh Venkataramanan * expensive overhead for IOMMU access this provides a means of avoiding 9802b245cb2SAnirudh Venkataramanan * it by maintaining the mapping of the page to the system. 9812b245cb2SAnirudh Venkataramanan * 9822b245cb2SAnirudh Venkataramanan * Returns amount of work completed 9832b245cb2SAnirudh Venkataramanan */ 9842b245cb2SAnirudh Venkataramanan static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 9852b245cb2SAnirudh Venkataramanan { 9862b245cb2SAnirudh Venkataramanan unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 9872b245cb2SAnirudh Venkataramanan u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 988efc2214bSMaciej Fijalkowski unsigned int xdp_res, xdp_xmit = 0; 989efc2214bSMaciej Fijalkowski struct bpf_prog *xdp_prog = NULL; 990efc2214bSMaciej Fijalkowski struct xdp_buff xdp; 991cb7db356SBrett Creeley bool failure; 9922b245cb2SAnirudh Venkataramanan 993efc2214bSMaciej Fijalkowski xdp.rxq = &rx_ring->xdp_rxq; 994efc2214bSMaciej Fijalkowski 995f9867df6SAnirudh Venkataramanan /* start the loop to process Rx packets bounded by 'budget' */ 9962b245cb2SAnirudh Venkataramanan while (likely(total_rx_pkts < (unsigned int)budget)) { 9972b245cb2SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 9986c869cb7SMaciej Fijalkowski struct ice_rx_buf *rx_buf; 9992b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 10006c869cb7SMaciej Fijalkowski unsigned int size; 10012b245cb2SAnirudh Venkataramanan u16 stat_err_bits; 10022b245cb2SAnirudh Venkataramanan u16 vlan_tag = 0; 1003d76a60baSAnirudh Venkataramanan u8 rx_ptype; 10042b245cb2SAnirudh Venkataramanan 1005f9867df6SAnirudh Venkataramanan /* get the Rx desc from Rx ring based on 'next_to_clean' */ 10062b245cb2SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 10072b245cb2SAnirudh Venkataramanan 10082b245cb2SAnirudh Venkataramanan /* status_error_len will always be zero for unused descriptors 10092b245cb2SAnirudh Venkataramanan * because it's cleared in cleanup, and overlaps with hdr_addr 10102b245cb2SAnirudh Venkataramanan * which is always zero because packet split isn't used, if the 10112b245cb2SAnirudh Venkataramanan * hardware wrote DD then it will be non-zero 10122b245cb2SAnirudh Venkataramanan */ 10132b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 10142b245cb2SAnirudh Venkataramanan if (!ice_test_staterr(rx_desc, stat_err_bits)) 10152b245cb2SAnirudh Venkataramanan break; 10162b245cb2SAnirudh Venkataramanan 10172b245cb2SAnirudh Venkataramanan /* This memory barrier is needed to keep us from reading 10182b245cb2SAnirudh Venkataramanan * any other fields out of the rx_desc until we know the 10192b245cb2SAnirudh Venkataramanan * DD bit is set. 10202b245cb2SAnirudh Venkataramanan */ 10212b245cb2SAnirudh Venkataramanan dma_rmb(); 10222b245cb2SAnirudh Venkataramanan 10236c869cb7SMaciej Fijalkowski size = le16_to_cpu(rx_desc->wb.pkt_len) & 10246c869cb7SMaciej Fijalkowski ICE_RX_FLX_DESC_PKT_LEN_M; 10252b245cb2SAnirudh Venkataramanan 1026ac6f733aSMitch Williams /* retrieve a buffer from the ring */ 1027712edbbbSMaciej Fijalkowski rx_buf = ice_get_rx_buf(rx_ring, &skb, size); 1028ac6f733aSMitch Williams 1029efc2214bSMaciej Fijalkowski if (!size) { 1030efc2214bSMaciej Fijalkowski xdp.data = NULL; 1031efc2214bSMaciej Fijalkowski xdp.data_end = NULL; 1032aaf27254SMaciej Fijalkowski xdp.data_hard_start = NULL; 1033aaf27254SMaciej Fijalkowski xdp.data_meta = NULL; 1034efc2214bSMaciej Fijalkowski goto construct_skb; 1035efc2214bSMaciej Fijalkowski } 1036efc2214bSMaciej Fijalkowski 1037efc2214bSMaciej Fijalkowski xdp.data = page_address(rx_buf->page) + rx_buf->page_offset; 1038efc2214bSMaciej Fijalkowski xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring); 1039aaf27254SMaciej Fijalkowski xdp.data_meta = xdp.data; 1040efc2214bSMaciej Fijalkowski xdp.data_end = xdp.data + size; 1041efc2214bSMaciej Fijalkowski 1042efc2214bSMaciej Fijalkowski rcu_read_lock(); 1043efc2214bSMaciej Fijalkowski xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1044efc2214bSMaciej Fijalkowski if (!xdp_prog) { 1045efc2214bSMaciej Fijalkowski rcu_read_unlock(); 1046efc2214bSMaciej Fijalkowski goto construct_skb; 1047efc2214bSMaciej Fijalkowski } 1048efc2214bSMaciej Fijalkowski 1049efc2214bSMaciej Fijalkowski xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog); 1050efc2214bSMaciej Fijalkowski rcu_read_unlock(); 105159bb0808SMaciej Fijalkowski if (!xdp_res) 105259bb0808SMaciej Fijalkowski goto construct_skb; 1053efc2214bSMaciej Fijalkowski if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 10547237f5b0SMaciej Fijalkowski unsigned int truesize; 10557237f5b0SMaciej Fijalkowski 10567237f5b0SMaciej Fijalkowski #if (PAGE_SIZE < 8192) 10577237f5b0SMaciej Fijalkowski truesize = ice_rx_pg_size(rx_ring) / 2; 10587237f5b0SMaciej Fijalkowski #else 105959bb0808SMaciej Fijalkowski truesize = SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + 106059bb0808SMaciej Fijalkowski size); 10617237f5b0SMaciej Fijalkowski #endif 1062efc2214bSMaciej Fijalkowski xdp_xmit |= xdp_res; 10637237f5b0SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 1064efc2214bSMaciej Fijalkowski } else { 1065efc2214bSMaciej Fijalkowski rx_buf->pagecnt_bias++; 1066efc2214bSMaciej Fijalkowski } 1067efc2214bSMaciej Fijalkowski total_rx_bytes += size; 1068efc2214bSMaciej Fijalkowski total_rx_pkts++; 1069efc2214bSMaciej Fijalkowski 1070efc2214bSMaciej Fijalkowski cleaned_count++; 1071efc2214bSMaciej Fijalkowski ice_put_rx_buf(rx_ring, rx_buf); 1072efc2214bSMaciej Fijalkowski continue; 1073efc2214bSMaciej Fijalkowski construct_skb: 10741f45ebe0SMitch Williams if (skb) { 10757237f5b0SMaciej Fijalkowski ice_add_rx_frag(rx_ring, rx_buf, skb, size); 10761f45ebe0SMitch Williams } else if (likely(xdp.data)) { 10771f45ebe0SMitch Williams if (ice_ring_uses_build_skb(rx_ring)) 1078aaf27254SMaciej Fijalkowski skb = ice_build_skb(rx_ring, rx_buf, &xdp); 1079712edbbbSMaciej Fijalkowski else 1080efc2214bSMaciej Fijalkowski skb = ice_construct_skb(rx_ring, rx_buf, &xdp); 10811f45ebe0SMitch Williams } 1082712edbbbSMaciej Fijalkowski /* exit if we failed to retrieve a buffer */ 1083712edbbbSMaciej Fijalkowski if (!skb) { 1084712edbbbSMaciej Fijalkowski rx_ring->rx_stats.alloc_buf_failed++; 1085ac6f733aSMitch Williams if (rx_buf) 1086712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 10872b245cb2SAnirudh Venkataramanan break; 1088712edbbbSMaciej Fijalkowski } 10892b245cb2SAnirudh Venkataramanan 10901d032bc7SMaciej Fijalkowski ice_put_rx_buf(rx_ring, rx_buf); 10912b245cb2SAnirudh Venkataramanan cleaned_count++; 10922b245cb2SAnirudh Venkataramanan 10932b245cb2SAnirudh Venkataramanan /* skip if it is NOP desc */ 10942b245cb2SAnirudh Venkataramanan if (ice_is_non_eop(rx_ring, rx_desc, skb)) 10952b245cb2SAnirudh Venkataramanan continue; 10962b245cb2SAnirudh Venkataramanan 10972b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 10982b245cb2SAnirudh Venkataramanan if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 10992b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 11002b245cb2SAnirudh Venkataramanan continue; 11012b245cb2SAnirudh Venkataramanan } 11022b245cb2SAnirudh Venkataramanan 11032b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 11042b245cb2SAnirudh Venkataramanan if (ice_test_staterr(rx_desc, stat_err_bits)) 11052b245cb2SAnirudh Venkataramanan vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 11062b245cb2SAnirudh Venkataramanan 1107133f4883SKrzysztof Kazimierczak /* pad the skb if needed, to make a valid ethernet frame */ 1108133f4883SKrzysztof Kazimierczak if (eth_skb_pad(skb)) { 11092b245cb2SAnirudh Venkataramanan skb = NULL; 11102b245cb2SAnirudh Venkataramanan continue; 11112b245cb2SAnirudh Venkataramanan } 11122b245cb2SAnirudh Venkataramanan 11132b245cb2SAnirudh Venkataramanan /* probably a little skewed due to removing CRC */ 11142b245cb2SAnirudh Venkataramanan total_rx_bytes += skb->len; 11152b245cb2SAnirudh Venkataramanan 1116d76a60baSAnirudh Venkataramanan /* populate checksum, VLAN, and protocol */ 11176503b659SJesse Brandeburg rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 11186503b659SJesse Brandeburg ICE_RX_FLEX_DESC_PTYPE_M; 11196503b659SJesse Brandeburg 1120d76a60baSAnirudh Venkataramanan ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1121d76a60baSAnirudh Venkataramanan 11222b245cb2SAnirudh Venkataramanan /* send completed skb up the stack */ 11232b245cb2SAnirudh Venkataramanan ice_receive_skb(rx_ring, skb, vlan_tag); 11242b245cb2SAnirudh Venkataramanan 11252b245cb2SAnirudh Venkataramanan /* update budget accounting */ 11262b245cb2SAnirudh Venkataramanan total_rx_pkts++; 11272b245cb2SAnirudh Venkataramanan } 11282b245cb2SAnirudh Venkataramanan 1129cb7db356SBrett Creeley /* return up to cleaned_count buffers to hardware */ 1130cb7db356SBrett Creeley failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1131cb7db356SBrett Creeley 1132efc2214bSMaciej Fijalkowski if (xdp_prog) 1133efc2214bSMaciej Fijalkowski ice_finalize_xdp_rx(rx_ring, xdp_xmit); 1134efc2214bSMaciej Fijalkowski 11352d4238f5SKrzysztof Kazimierczak ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); 11362b245cb2SAnirudh Venkataramanan 11372b245cb2SAnirudh Venkataramanan /* guarantee a trip back through this routine if there was a failure */ 11382b245cb2SAnirudh Venkataramanan return failure ? budget : (int)total_rx_pkts; 11392b245cb2SAnirudh Venkataramanan } 11402b245cb2SAnirudh Venkataramanan 11412b245cb2SAnirudh Venkataramanan /** 1142711987bbSBrett Creeley * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic 1143711987bbSBrett Creeley * @port_info: port_info structure containing the current link speed 1144711987bbSBrett Creeley * @avg_pkt_size: average size of Tx or Rx packets based on clean routine 11452f2da36eSAnirudh Venkataramanan * @itr: ITR value to update 1146711987bbSBrett Creeley * 1147711987bbSBrett Creeley * Calculate how big of an increment should be applied to the ITR value passed 1148711987bbSBrett Creeley * in based on wmem_default, SKB overhead, Ethernet overhead, and the current 1149711987bbSBrett Creeley * link speed. 1150711987bbSBrett Creeley * 1151711987bbSBrett Creeley * The following is a calculation derived from: 1152711987bbSBrett Creeley * wmem_default / (size + overhead) = desired_pkts_per_int 1153711987bbSBrett Creeley * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate 1154711987bbSBrett Creeley * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1155711987bbSBrett Creeley * 1156711987bbSBrett Creeley * Assuming wmem_default is 212992 and overhead is 640 bytes per 1157711987bbSBrett Creeley * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1158711987bbSBrett Creeley * formula down to: 1159711987bbSBrett Creeley * 1160711987bbSBrett Creeley * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 1161711987bbSBrett Creeley * ITR = -------------------------------------------- * -------------- 1162711987bbSBrett Creeley * rate pkt_size + 640 1163711987bbSBrett Creeley */ 1164711987bbSBrett Creeley static unsigned int 1165711987bbSBrett Creeley ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, 1166711987bbSBrett Creeley unsigned int avg_pkt_size, 1167711987bbSBrett Creeley unsigned int itr) 116864a59d05SAnirudh Venkataramanan { 1169711987bbSBrett Creeley switch (port_info->phy.link_info.link_speed) { 1170711987bbSBrett Creeley case ICE_AQ_LINK_SPEED_100GB: 1171711987bbSBrett Creeley itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), 1172711987bbSBrett Creeley avg_pkt_size + 640); 1173711987bbSBrett Creeley break; 1174711987bbSBrett Creeley case ICE_AQ_LINK_SPEED_50GB: 1175711987bbSBrett Creeley itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), 1176711987bbSBrett Creeley avg_pkt_size + 640); 1177711987bbSBrett Creeley break; 117864a59d05SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_40GB: 1179711987bbSBrett Creeley itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), 1180711987bbSBrett Creeley avg_pkt_size + 640); 1181711987bbSBrett Creeley break; 118264a59d05SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_25GB: 1183711987bbSBrett Creeley itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), 1184711987bbSBrett Creeley avg_pkt_size + 640); 1185711987bbSBrett Creeley break; 118664a59d05SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_20GB: 1187711987bbSBrett Creeley itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), 1188711987bbSBrett Creeley avg_pkt_size + 640); 1189711987bbSBrett Creeley break; 1190711987bbSBrett Creeley case ICE_AQ_LINK_SPEED_10GB: 119164a59d05SAnirudh Venkataramanan default: 1192711987bbSBrett Creeley itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), 1193711987bbSBrett Creeley avg_pkt_size + 640); 1194711987bbSBrett Creeley break; 119564a59d05SAnirudh Venkataramanan } 1196711987bbSBrett Creeley 1197711987bbSBrett Creeley if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1198711987bbSBrett Creeley itr &= ICE_ITR_ADAPTIVE_LATENCY; 1199711987bbSBrett Creeley itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1200711987bbSBrett Creeley } 1201711987bbSBrett Creeley 1202711987bbSBrett Creeley return itr; 120364a59d05SAnirudh Venkataramanan } 120464a59d05SAnirudh Venkataramanan 120564a59d05SAnirudh Venkataramanan /** 120664a59d05SAnirudh Venkataramanan * ice_update_itr - update the adaptive ITR value based on statistics 120764a59d05SAnirudh Venkataramanan * @q_vector: structure containing interrupt and ring information 120864a59d05SAnirudh Venkataramanan * @rc: structure containing ring performance data 120964a59d05SAnirudh Venkataramanan * 121064a59d05SAnirudh Venkataramanan * Stores a new ITR value based on packets and byte 121164a59d05SAnirudh Venkataramanan * counts during the last interrupt. The advantage of per interrupt 121264a59d05SAnirudh Venkataramanan * computation is faster updates and more accurate ITR for the current 121364a59d05SAnirudh Venkataramanan * traffic pattern. Constants in this function were computed 121464a59d05SAnirudh Venkataramanan * based on theoretical maximum wire speed and thresholds were set based 121564a59d05SAnirudh Venkataramanan * on testing data as well as attempting to minimize response time 121664a59d05SAnirudh Venkataramanan * while increasing bulk throughput. 121764a59d05SAnirudh Venkataramanan */ 121864a59d05SAnirudh Venkataramanan static void 121964a59d05SAnirudh Venkataramanan ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) 122064a59d05SAnirudh Venkataramanan { 122164a59d05SAnirudh Venkataramanan unsigned long next_update = jiffies; 1222711987bbSBrett Creeley unsigned int packets, bytes, itr; 122364a59d05SAnirudh Venkataramanan bool container_is_rx; 122464a59d05SAnirudh Venkataramanan 122564a59d05SAnirudh Venkataramanan if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) 122664a59d05SAnirudh Venkataramanan return; 122764a59d05SAnirudh Venkataramanan 122864a59d05SAnirudh Venkataramanan /* If itr_countdown is set it means we programmed an ITR within 122964a59d05SAnirudh Venkataramanan * the last 4 interrupt cycles. This has a side effect of us 123064a59d05SAnirudh Venkataramanan * potentially firing an early interrupt. In order to work around 123164a59d05SAnirudh Venkataramanan * this we need to throw out any data received for a few 123264a59d05SAnirudh Venkataramanan * interrupts following the update. 123364a59d05SAnirudh Venkataramanan */ 123464a59d05SAnirudh Venkataramanan if (q_vector->itr_countdown) { 123564a59d05SAnirudh Venkataramanan itr = rc->target_itr; 123664a59d05SAnirudh Venkataramanan goto clear_counts; 123764a59d05SAnirudh Venkataramanan } 123864a59d05SAnirudh Venkataramanan 123964a59d05SAnirudh Venkataramanan container_is_rx = (&q_vector->rx == rc); 124064a59d05SAnirudh Venkataramanan /* For Rx we want to push the delay up and default to low latency. 124164a59d05SAnirudh Venkataramanan * for Tx we want to pull the delay down and default to high latency. 124264a59d05SAnirudh Venkataramanan */ 124364a59d05SAnirudh Venkataramanan itr = container_is_rx ? 124464a59d05SAnirudh Venkataramanan ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : 124564a59d05SAnirudh Venkataramanan ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; 124664a59d05SAnirudh Venkataramanan 124764a59d05SAnirudh Venkataramanan /* If we didn't update within up to 1 - 2 jiffies we can assume 124864a59d05SAnirudh Venkataramanan * that either packets are coming in so slow there hasn't been 124964a59d05SAnirudh Venkataramanan * any work, or that there is so much work that NAPI is dealing 125064a59d05SAnirudh Venkataramanan * with interrupt moderation and we don't need to do anything. 125164a59d05SAnirudh Venkataramanan */ 125264a59d05SAnirudh Venkataramanan if (time_after(next_update, rc->next_update)) 125364a59d05SAnirudh Venkataramanan goto clear_counts; 125464a59d05SAnirudh Venkataramanan 1255d27525ecSJesse Brandeburg prefetch(q_vector->vsi->port_info); 1256d27525ecSJesse Brandeburg 125764a59d05SAnirudh Venkataramanan packets = rc->total_pkts; 125864a59d05SAnirudh Venkataramanan bytes = rc->total_bytes; 125964a59d05SAnirudh Venkataramanan 126064a59d05SAnirudh Venkataramanan if (container_is_rx) { 126164a59d05SAnirudh Venkataramanan /* If Rx there are 1 to 4 packets and bytes are less than 126264a59d05SAnirudh Venkataramanan * 9000 assume insufficient data to use bulk rate limiting 126364a59d05SAnirudh Venkataramanan * approach unless Tx is already in bulk rate limiting. We 126464a59d05SAnirudh Venkataramanan * are likely latency driven. 126564a59d05SAnirudh Venkataramanan */ 126664a59d05SAnirudh Venkataramanan if (packets && packets < 4 && bytes < 9000 && 126764a59d05SAnirudh Venkataramanan (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { 126864a59d05SAnirudh Venkataramanan itr = ICE_ITR_ADAPTIVE_LATENCY; 1269711987bbSBrett Creeley goto adjust_by_size_and_speed; 127064a59d05SAnirudh Venkataramanan } 127164a59d05SAnirudh Venkataramanan } else if (packets < 4) { 127264a59d05SAnirudh Venkataramanan /* If we have Tx and Rx ITR maxed and Tx ITR is running in 127364a59d05SAnirudh Venkataramanan * bulk mode and we are receiving 4 or fewer packets just 127464a59d05SAnirudh Venkataramanan * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 127564a59d05SAnirudh Venkataramanan * that the Rx can relax. 127664a59d05SAnirudh Venkataramanan */ 127764a59d05SAnirudh Venkataramanan if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && 127864a59d05SAnirudh Venkataramanan (q_vector->rx.target_itr & ICE_ITR_MASK) == 127964a59d05SAnirudh Venkataramanan ICE_ITR_ADAPTIVE_MAX_USECS) 128064a59d05SAnirudh Venkataramanan goto clear_counts; 128164a59d05SAnirudh Venkataramanan } else if (packets > 32) { 128264a59d05SAnirudh Venkataramanan /* If we have processed over 32 packets in a single interrupt 128364a59d05SAnirudh Venkataramanan * for Tx assume we need to switch over to "bulk" mode. 128464a59d05SAnirudh Venkataramanan */ 128564a59d05SAnirudh Venkataramanan rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; 128664a59d05SAnirudh Venkataramanan } 128764a59d05SAnirudh Venkataramanan 128864a59d05SAnirudh Venkataramanan /* We have no packets to actually measure against. This means 128964a59d05SAnirudh Venkataramanan * either one of the other queues on this vector is active or 129064a59d05SAnirudh Venkataramanan * we are a Tx queue doing TSO with too high of an interrupt rate. 129164a59d05SAnirudh Venkataramanan * 129264a59d05SAnirudh Venkataramanan * Between 4 and 56 we can assume that our current interrupt delay 129364a59d05SAnirudh Venkataramanan * is only slightly too low. As such we should increase it by a small 129464a59d05SAnirudh Venkataramanan * fixed amount. 129564a59d05SAnirudh Venkataramanan */ 129664a59d05SAnirudh Venkataramanan if (packets < 56) { 129764a59d05SAnirudh Venkataramanan itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; 129864a59d05SAnirudh Venkataramanan if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 129964a59d05SAnirudh Venkataramanan itr &= ICE_ITR_ADAPTIVE_LATENCY; 130064a59d05SAnirudh Venkataramanan itr += ICE_ITR_ADAPTIVE_MAX_USECS; 130164a59d05SAnirudh Venkataramanan } 130264a59d05SAnirudh Venkataramanan goto clear_counts; 130364a59d05SAnirudh Venkataramanan } 130464a59d05SAnirudh Venkataramanan 130564a59d05SAnirudh Venkataramanan if (packets <= 256) { 130664a59d05SAnirudh Venkataramanan itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 130764a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 130864a59d05SAnirudh Venkataramanan 130964a59d05SAnirudh Venkataramanan /* Between 56 and 112 is our "goldilocks" zone where we are 131064a59d05SAnirudh Venkataramanan * working out "just right". Just report that our current 131164a59d05SAnirudh Venkataramanan * ITR is good for us. 131264a59d05SAnirudh Venkataramanan */ 131364a59d05SAnirudh Venkataramanan if (packets <= 112) 131464a59d05SAnirudh Venkataramanan goto clear_counts; 131564a59d05SAnirudh Venkataramanan 131664a59d05SAnirudh Venkataramanan /* If packet count is 128 or greater we are likely looking 131764a59d05SAnirudh Venkataramanan * at a slight overrun of the delay we want. Try halving 131864a59d05SAnirudh Venkataramanan * our delay to see if that will cut the number of packets 131964a59d05SAnirudh Venkataramanan * in half per interrupt. 132064a59d05SAnirudh Venkataramanan */ 132164a59d05SAnirudh Venkataramanan itr >>= 1; 132264a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 132364a59d05SAnirudh Venkataramanan if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) 132464a59d05SAnirudh Venkataramanan itr = ICE_ITR_ADAPTIVE_MIN_USECS; 132564a59d05SAnirudh Venkataramanan 132664a59d05SAnirudh Venkataramanan goto clear_counts; 132764a59d05SAnirudh Venkataramanan } 132864a59d05SAnirudh Venkataramanan 132964a59d05SAnirudh Venkataramanan /* The paths below assume we are dealing with a bulk ITR since 133064a59d05SAnirudh Venkataramanan * number of packets is greater than 256. We are just going to have 133164a59d05SAnirudh Venkataramanan * to compute a value and try to bring the count under control, 133264a59d05SAnirudh Venkataramanan * though for smaller packet sizes there isn't much we can do as 133364a59d05SAnirudh Venkataramanan * NAPI polling will likely be kicking in sooner rather than later. 133464a59d05SAnirudh Venkataramanan */ 133564a59d05SAnirudh Venkataramanan itr = ICE_ITR_ADAPTIVE_BULK; 133664a59d05SAnirudh Venkataramanan 1337711987bbSBrett Creeley adjust_by_size_and_speed: 133864a59d05SAnirudh Venkataramanan 1339711987bbSBrett Creeley /* based on checks above packets cannot be 0 so division is safe */ 1340711987bbSBrett Creeley itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, 1341711987bbSBrett Creeley bytes / packets, itr); 134264a59d05SAnirudh Venkataramanan 134364a59d05SAnirudh Venkataramanan clear_counts: 134464a59d05SAnirudh Venkataramanan /* write back value */ 134564a59d05SAnirudh Venkataramanan rc->target_itr = itr; 134664a59d05SAnirudh Venkataramanan 134764a59d05SAnirudh Venkataramanan /* next update should occur within next jiffy */ 134864a59d05SAnirudh Venkataramanan rc->next_update = next_update + 1; 134964a59d05SAnirudh Venkataramanan 135064a59d05SAnirudh Venkataramanan rc->total_bytes = 0; 135164a59d05SAnirudh Venkataramanan rc->total_pkts = 0; 135264a59d05SAnirudh Venkataramanan } 135364a59d05SAnirudh Venkataramanan 13542b245cb2SAnirudh Venkataramanan /** 135563f545edSBrett Creeley * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 135663f545edSBrett Creeley * @itr_idx: interrupt throttling index 135764a59d05SAnirudh Venkataramanan * @itr: interrupt throttling value in usecs 135863f545edSBrett Creeley */ 13598244dd2dSBrett Creeley static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 136063f545edSBrett Creeley { 13612f2da36eSAnirudh Venkataramanan /* The ITR value is reported in microseconds, and the register value is 136264a59d05SAnirudh Venkataramanan * recorded in 2 microsecond units. For this reason we only need to 136364a59d05SAnirudh Venkataramanan * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 136464a59d05SAnirudh Venkataramanan * granularity as a shift instead of division. The mask makes sure the 136564a59d05SAnirudh Venkataramanan * ITR value is never odd so we don't accidentally write into the field 136664a59d05SAnirudh Venkataramanan * prior to the ITR field. 136764a59d05SAnirudh Venkataramanan */ 136864a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 136964a59d05SAnirudh Venkataramanan 137063f545edSBrett Creeley return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 137163f545edSBrett Creeley (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 137264a59d05SAnirudh Venkataramanan (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 137363f545edSBrett Creeley } 137463f545edSBrett Creeley 137564a59d05SAnirudh Venkataramanan /* The act of updating the ITR will cause it to immediately trigger. In order 137664a59d05SAnirudh Venkataramanan * to prevent this from throwing off adaptive update statistics we defer the 137764a59d05SAnirudh Venkataramanan * update so that it can only happen so often. So after either Tx or Rx are 137864a59d05SAnirudh Venkataramanan * updated we make the adaptive scheme wait until either the ITR completely 137964a59d05SAnirudh Venkataramanan * expires via the next_update expiration or we have been through at least 138064a59d05SAnirudh Venkataramanan * 3 interrupts. 138164a59d05SAnirudh Venkataramanan */ 138264a59d05SAnirudh Venkataramanan #define ITR_COUNTDOWN_START 3 138364a59d05SAnirudh Venkataramanan 138463f545edSBrett Creeley /** 138563f545edSBrett Creeley * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt 138663f545edSBrett Creeley * @q_vector: q_vector for which ITR is being updated and interrupt enabled 138763f545edSBrett Creeley */ 13882fb0821fSJesse Brandeburg static void ice_update_ena_itr(struct ice_q_vector *q_vector) 138963f545edSBrett Creeley { 139064a59d05SAnirudh Venkataramanan struct ice_ring_container *tx = &q_vector->tx; 139164a59d05SAnirudh Venkataramanan struct ice_ring_container *rx = &q_vector->rx; 13922fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 139363f545edSBrett Creeley u32 itr_val; 139463f545edSBrett Creeley 13952ab28bb0SBrett Creeley /* when exiting WB_ON_ITR lets set a low ITR value and trigger 13962ab28bb0SBrett Creeley * interrupts to expire right away in case we have more work ready to go 13972ab28bb0SBrett Creeley * already 13982ab28bb0SBrett Creeley */ 13992ab28bb0SBrett Creeley if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) { 14002ab28bb0SBrett Creeley itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS); 14012ab28bb0SBrett Creeley wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 14022ab28bb0SBrett Creeley /* set target back to last user set value */ 14032ab28bb0SBrett Creeley rx->target_itr = rx->itr_setting; 14042ab28bb0SBrett Creeley /* set current to what we just wrote and dynamic if needed */ 14052ab28bb0SBrett Creeley rx->current_itr = ICE_WB_ON_ITR_USECS | 14062ab28bb0SBrett Creeley (rx->itr_setting & ICE_ITR_DYNAMIC); 14072ab28bb0SBrett Creeley /* allow normal interrupt flow to start */ 14082ab28bb0SBrett Creeley q_vector->itr_countdown = 0; 14092ab28bb0SBrett Creeley return; 14102ab28bb0SBrett Creeley } 14112ab28bb0SBrett Creeley 141264a59d05SAnirudh Venkataramanan /* This will do nothing if dynamic updates are not enabled */ 141364a59d05SAnirudh Venkataramanan ice_update_itr(q_vector, tx); 141464a59d05SAnirudh Venkataramanan ice_update_itr(q_vector, rx); 141564a59d05SAnirudh Venkataramanan 141663f545edSBrett Creeley /* This block of logic allows us to get away with only updating 141763f545edSBrett Creeley * one ITR value with each interrupt. The idea is to perform a 141863f545edSBrett Creeley * pseudo-lazy update with the following criteria. 141963f545edSBrett Creeley * 142063f545edSBrett Creeley * 1. Rx is given higher priority than Tx if both are in same state 142163f545edSBrett Creeley * 2. If we must reduce an ITR that is given highest priority. 142263f545edSBrett Creeley * 3. We then give priority to increasing ITR based on amount. 142363f545edSBrett Creeley */ 142464a59d05SAnirudh Venkataramanan if (rx->target_itr < rx->current_itr) { 142563f545edSBrett Creeley /* Rx ITR needs to be reduced, this is highest priority */ 142664a59d05SAnirudh Venkataramanan itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 142764a59d05SAnirudh Venkataramanan rx->current_itr = rx->target_itr; 142864a59d05SAnirudh Venkataramanan q_vector->itr_countdown = ITR_COUNTDOWN_START; 142964a59d05SAnirudh Venkataramanan } else if ((tx->target_itr < tx->current_itr) || 143064a59d05SAnirudh Venkataramanan ((rx->target_itr - rx->current_itr) < 143164a59d05SAnirudh Venkataramanan (tx->target_itr - tx->current_itr))) { 143263f545edSBrett Creeley /* Tx ITR needs to be reduced, this is second priority 143363f545edSBrett Creeley * Tx ITR needs to be increased more than Rx, fourth priority 143463f545edSBrett Creeley */ 143564a59d05SAnirudh Venkataramanan itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); 143664a59d05SAnirudh Venkataramanan tx->current_itr = tx->target_itr; 143764a59d05SAnirudh Venkataramanan q_vector->itr_countdown = ITR_COUNTDOWN_START; 143864a59d05SAnirudh Venkataramanan } else if (rx->current_itr != rx->target_itr) { 143963f545edSBrett Creeley /* Rx ITR needs to be increased, third priority */ 144064a59d05SAnirudh Venkataramanan itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 144164a59d05SAnirudh Venkataramanan rx->current_itr = rx->target_itr; 144264a59d05SAnirudh Venkataramanan q_vector->itr_countdown = ITR_COUNTDOWN_START; 144363f545edSBrett Creeley } else { 144463f545edSBrett Creeley /* Still have to re-enable the interrupts */ 144563f545edSBrett Creeley itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 144664a59d05SAnirudh Venkataramanan if (q_vector->itr_countdown) 144764a59d05SAnirudh Venkataramanan q_vector->itr_countdown--; 144863f545edSBrett Creeley } 144963f545edSBrett Creeley 14502fb0821fSJesse Brandeburg if (!test_bit(__ICE_DOWN, q_vector->vsi->state)) 14512fb0821fSJesse Brandeburg wr32(&q_vector->vsi->back->hw, 1452b07833a0SBrett Creeley GLINT_DYN_CTL(q_vector->reg_idx), 145364a59d05SAnirudh Venkataramanan itr_val); 145463f545edSBrett Creeley } 145563f545edSBrett Creeley 145663f545edSBrett Creeley /** 14572ab28bb0SBrett Creeley * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 14582ab28bb0SBrett Creeley * @q_vector: q_vector to set WB_ON_ITR on 14592ab28bb0SBrett Creeley * 14602ab28bb0SBrett Creeley * We need to tell hardware to write-back completed descriptors even when 14612ab28bb0SBrett Creeley * interrupts are disabled. Descriptors will be written back on cache line 14622ab28bb0SBrett Creeley * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 14632ab28bb0SBrett Creeley * descriptors may not be written back if they don't fill a cache line until the 14642ab28bb0SBrett Creeley * next interrupt. 14652ab28bb0SBrett Creeley * 14662ab28bb0SBrett Creeley * This sets the write-back frequency to 2 microseconds as that is the minimum 14672ab28bb0SBrett Creeley * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to 14682ab28bb0SBrett Creeley * make sure hardware knows we aren't meddling with the INTENA_M bit. 14692ab28bb0SBrett Creeley */ 14702fb0821fSJesse Brandeburg static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 14712ab28bb0SBrett Creeley { 14722fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 14732fb0821fSJesse Brandeburg 14742ab28bb0SBrett Creeley /* already in WB_ON_ITR mode no need to change it */ 14752ab28bb0SBrett Creeley if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) 14762ab28bb0SBrett Creeley return; 14772ab28bb0SBrett Creeley 14782ab28bb0SBrett Creeley if (q_vector->num_ring_rx) 14792ab28bb0SBrett Creeley wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 14802ab28bb0SBrett Creeley ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, 14812ab28bb0SBrett Creeley ICE_RX_ITR)); 14822ab28bb0SBrett Creeley 14832ab28bb0SBrett Creeley if (q_vector->num_ring_tx) 14842ab28bb0SBrett Creeley wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 14852ab28bb0SBrett Creeley ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, 14862ab28bb0SBrett Creeley ICE_TX_ITR)); 14872ab28bb0SBrett Creeley 14882ab28bb0SBrett Creeley q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE; 14892ab28bb0SBrett Creeley } 14902ab28bb0SBrett Creeley 14912ab28bb0SBrett Creeley /** 14922b245cb2SAnirudh Venkataramanan * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 14932b245cb2SAnirudh Venkataramanan * @napi: napi struct with our devices info in it 14942b245cb2SAnirudh Venkataramanan * @budget: amount of work driver is allowed to do this pass, in packets 14952b245cb2SAnirudh Venkataramanan * 14962b245cb2SAnirudh Venkataramanan * This function will clean all queues associated with a q_vector. 14972b245cb2SAnirudh Venkataramanan * 14982b245cb2SAnirudh Venkataramanan * Returns the amount of work done 14992b245cb2SAnirudh Venkataramanan */ 15002b245cb2SAnirudh Venkataramanan int ice_napi_poll(struct napi_struct *napi, int budget) 15012b245cb2SAnirudh Venkataramanan { 15022b245cb2SAnirudh Venkataramanan struct ice_q_vector *q_vector = 15032b245cb2SAnirudh Venkataramanan container_of(napi, struct ice_q_vector, napi); 15042b245cb2SAnirudh Venkataramanan bool clean_complete = true; 15052b245cb2SAnirudh Venkataramanan struct ice_ring *ring; 15069118fcd5SBrett Creeley int budget_per_ring; 15072b245cb2SAnirudh Venkataramanan int work_done = 0; 15082b245cb2SAnirudh Venkataramanan 15092b245cb2SAnirudh Venkataramanan /* Since the actual Tx work is minimal, we can give the Tx a larger 15102b245cb2SAnirudh Venkataramanan * budget and be more aggressive about cleaning up the Tx descriptors. 15112b245cb2SAnirudh Venkataramanan */ 15122d4238f5SKrzysztof Kazimierczak ice_for_each_ring(ring, q_vector->tx) { 15132d4238f5SKrzysztof Kazimierczak bool wd = ring->xsk_umem ? 15142d4238f5SKrzysztof Kazimierczak ice_clean_tx_irq_zc(ring, budget) : 15152d4238f5SKrzysztof Kazimierczak ice_clean_tx_irq(ring, budget); 15162d4238f5SKrzysztof Kazimierczak 15172d4238f5SKrzysztof Kazimierczak if (!wd) 15182b245cb2SAnirudh Venkataramanan clean_complete = false; 15192d4238f5SKrzysztof Kazimierczak } 15202b245cb2SAnirudh Venkataramanan 15212b245cb2SAnirudh Venkataramanan /* Handle case where we are called by netpoll with a budget of 0 */ 1522d27525ecSJesse Brandeburg if (unlikely(budget <= 0)) 15232b245cb2SAnirudh Venkataramanan return budget; 15242b245cb2SAnirudh Venkataramanan 15259118fcd5SBrett Creeley /* normally we have 1 Rx ring per q_vector */ 15269118fcd5SBrett Creeley if (unlikely(q_vector->num_ring_rx > 1)) 15279118fcd5SBrett Creeley /* We attempt to distribute budget to each Rx queue fairly, but 15289118fcd5SBrett Creeley * don't allow the budget to go below 1 because that would exit 15299118fcd5SBrett Creeley * polling early. 15302b245cb2SAnirudh Venkataramanan */ 15312b245cb2SAnirudh Venkataramanan budget_per_ring = max(budget / q_vector->num_ring_rx, 1); 15329118fcd5SBrett Creeley else 15339118fcd5SBrett Creeley /* Max of 1 Rx ring in this q_vector so give it the budget */ 15349118fcd5SBrett Creeley budget_per_ring = budget; 15352b245cb2SAnirudh Venkataramanan 15362b245cb2SAnirudh Venkataramanan ice_for_each_ring(ring, q_vector->rx) { 15372b245cb2SAnirudh Venkataramanan int cleaned; 15382b245cb2SAnirudh Venkataramanan 15392d4238f5SKrzysztof Kazimierczak /* A dedicated path for zero-copy allows making a single 15402d4238f5SKrzysztof Kazimierczak * comparison in the irq context instead of many inside the 15412d4238f5SKrzysztof Kazimierczak * ice_clean_rx_irq function and makes the codebase cleaner. 15422d4238f5SKrzysztof Kazimierczak */ 15432d4238f5SKrzysztof Kazimierczak cleaned = ring->xsk_umem ? 15442d4238f5SKrzysztof Kazimierczak ice_clean_rx_irq_zc(ring, budget_per_ring) : 15452d4238f5SKrzysztof Kazimierczak ice_clean_rx_irq(ring, budget_per_ring); 15462b245cb2SAnirudh Venkataramanan work_done += cleaned; 15472b245cb2SAnirudh Venkataramanan /* if we clean as many as budgeted, we must not be done */ 15482b245cb2SAnirudh Venkataramanan if (cleaned >= budget_per_ring) 15492b245cb2SAnirudh Venkataramanan clean_complete = false; 15502b245cb2SAnirudh Venkataramanan } 15512b245cb2SAnirudh Venkataramanan 15522b245cb2SAnirudh Venkataramanan /* If work not completed, return budget and polling will return */ 15532b245cb2SAnirudh Venkataramanan if (!clean_complete) 15542b245cb2SAnirudh Venkataramanan return budget; 15552b245cb2SAnirudh Venkataramanan 15560bcd952fSJesse Brandeburg /* Exit the polling mode, but don't re-enable interrupts if stack might 15570bcd952fSJesse Brandeburg * poll us due to busy-polling 15580bcd952fSJesse Brandeburg */ 15590bcd952fSJesse Brandeburg if (likely(napi_complete_done(napi, work_done))) 15602fb0821fSJesse Brandeburg ice_update_ena_itr(q_vector); 15612ab28bb0SBrett Creeley else 15622fb0821fSJesse Brandeburg ice_set_wb_on_itr(q_vector); 1563e0c9fd9bSDave Ertman 156432a64994SBruce Allan return min_t(int, work_done, budget - 1); 15652b245cb2SAnirudh Venkataramanan } 15662b245cb2SAnirudh Venkataramanan 15672b245cb2SAnirudh Venkataramanan /** 1568d337f2afSAnirudh Venkataramanan * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 15692b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 15702b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 15712b245cb2SAnirudh Venkataramanan * 15722b245cb2SAnirudh Venkataramanan * Returns -EBUSY if a stop is needed, else 0 15732b245cb2SAnirudh Venkataramanan */ 15742b245cb2SAnirudh Venkataramanan static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 15752b245cb2SAnirudh Venkataramanan { 15762b245cb2SAnirudh Venkataramanan netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 15772b245cb2SAnirudh Venkataramanan /* Memory barrier before checking head and tail */ 15782b245cb2SAnirudh Venkataramanan smp_mb(); 15792b245cb2SAnirudh Venkataramanan 15802b245cb2SAnirudh Venkataramanan /* Check again in a case another CPU has just made room available. */ 15812b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 15822b245cb2SAnirudh Venkataramanan return -EBUSY; 15832b245cb2SAnirudh Venkataramanan 15842b245cb2SAnirudh Venkataramanan /* A reprieve! - use start_subqueue because it doesn't call schedule */ 15852b245cb2SAnirudh Venkataramanan netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 15862b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 15872b245cb2SAnirudh Venkataramanan return 0; 15882b245cb2SAnirudh Venkataramanan } 15892b245cb2SAnirudh Venkataramanan 15902b245cb2SAnirudh Venkataramanan /** 1591d337f2afSAnirudh Venkataramanan * ice_maybe_stop_tx - 1st level check for Tx stop conditions 15922b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 15932b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 15942b245cb2SAnirudh Venkataramanan * 15952b245cb2SAnirudh Venkataramanan * Returns 0 if stop is not needed 15962b245cb2SAnirudh Venkataramanan */ 15972b245cb2SAnirudh Venkataramanan static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 15982b245cb2SAnirudh Venkataramanan { 15992b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 16002b245cb2SAnirudh Venkataramanan return 0; 1601d337f2afSAnirudh Venkataramanan 16022b245cb2SAnirudh Venkataramanan return __ice_maybe_stop_tx(tx_ring, size); 16032b245cb2SAnirudh Venkataramanan } 16042b245cb2SAnirudh Venkataramanan 16052b245cb2SAnirudh Venkataramanan /** 16062b245cb2SAnirudh Venkataramanan * ice_tx_map - Build the Tx descriptor 16072b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 16082b245cb2SAnirudh Venkataramanan * @first: first buffer info buffer to use 1609d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 16102b245cb2SAnirudh Venkataramanan * 16112b245cb2SAnirudh Venkataramanan * This function loops over the skb data pointed to by *first 16122b245cb2SAnirudh Venkataramanan * and gets a physical address for each memory location and programs 16132b245cb2SAnirudh Venkataramanan * it and the length into the transmit descriptor. 16142b245cb2SAnirudh Venkataramanan */ 1615d76a60baSAnirudh Venkataramanan static void 1616d76a60baSAnirudh Venkataramanan ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, 1617d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params *off) 16182b245cb2SAnirudh Venkataramanan { 1619d76a60baSAnirudh Venkataramanan u64 td_offset, td_tag, td_cmd; 16202b245cb2SAnirudh Venkataramanan u16 i = tx_ring->next_to_use; 16212b245cb2SAnirudh Venkataramanan unsigned int data_len, size; 16222b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 16232b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 16242b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 16254ee656bbSTony Nguyen skb_frag_t *frag; 16262b245cb2SAnirudh Venkataramanan dma_addr_t dma; 16272b245cb2SAnirudh Venkataramanan 1628d76a60baSAnirudh Venkataramanan td_tag = off->td_l2tag1; 1629d76a60baSAnirudh Venkataramanan td_cmd = off->td_cmd; 1630d76a60baSAnirudh Venkataramanan td_offset = off->td_offset; 16312b245cb2SAnirudh Venkataramanan skb = first->skb; 16322b245cb2SAnirudh Venkataramanan 16332b245cb2SAnirudh Venkataramanan data_len = skb->data_len; 16342b245cb2SAnirudh Venkataramanan size = skb_headlen(skb); 16352b245cb2SAnirudh Venkataramanan 16362b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 16372b245cb2SAnirudh Venkataramanan 1638d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1639d76a60baSAnirudh Venkataramanan td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1640d76a60baSAnirudh Venkataramanan td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1641d76a60baSAnirudh Venkataramanan ICE_TX_FLAGS_VLAN_S; 1642d76a60baSAnirudh Venkataramanan } 1643d76a60baSAnirudh Venkataramanan 16442b245cb2SAnirudh Venkataramanan dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 16452b245cb2SAnirudh Venkataramanan 16462b245cb2SAnirudh Venkataramanan tx_buf = first; 16472b245cb2SAnirudh Venkataramanan 16482b245cb2SAnirudh Venkataramanan for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 16492b245cb2SAnirudh Venkataramanan unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 16502b245cb2SAnirudh Venkataramanan 16512b245cb2SAnirudh Venkataramanan if (dma_mapping_error(tx_ring->dev, dma)) 16522b245cb2SAnirudh Venkataramanan goto dma_error; 16532b245cb2SAnirudh Venkataramanan 16542b245cb2SAnirudh Venkataramanan /* record length, and DMA address */ 16552b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, size); 16562b245cb2SAnirudh Venkataramanan dma_unmap_addr_set(tx_buf, dma, dma); 16572b245cb2SAnirudh Venkataramanan 16582b245cb2SAnirudh Venkataramanan /* align size to end of page */ 16592b245cb2SAnirudh Venkataramanan max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 16602b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 16612b245cb2SAnirudh Venkataramanan 16622b245cb2SAnirudh Venkataramanan /* account for data chunks larger than the hardware 16632b245cb2SAnirudh Venkataramanan * can handle 16642b245cb2SAnirudh Venkataramanan */ 16652b245cb2SAnirudh Venkataramanan while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 16662b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = 16672b245cb2SAnirudh Venkataramanan build_ctob(td_cmd, td_offset, max_data, td_tag); 16682b245cb2SAnirudh Venkataramanan 16692b245cb2SAnirudh Venkataramanan tx_desc++; 16702b245cb2SAnirudh Venkataramanan i++; 16712b245cb2SAnirudh Venkataramanan 16722b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 16732b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 16742b245cb2SAnirudh Venkataramanan i = 0; 16752b245cb2SAnirudh Venkataramanan } 16762b245cb2SAnirudh Venkataramanan 16772b245cb2SAnirudh Venkataramanan dma += max_data; 16782b245cb2SAnirudh Venkataramanan size -= max_data; 16792b245cb2SAnirudh Venkataramanan 16802b245cb2SAnirudh Venkataramanan max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 16812b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 16822b245cb2SAnirudh Venkataramanan } 16832b245cb2SAnirudh Venkataramanan 16842b245cb2SAnirudh Venkataramanan if (likely(!data_len)) 16852b245cb2SAnirudh Venkataramanan break; 16862b245cb2SAnirudh Venkataramanan 16872b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 16882b245cb2SAnirudh Venkataramanan size, td_tag); 16892b245cb2SAnirudh Venkataramanan 16902b245cb2SAnirudh Venkataramanan tx_desc++; 16912b245cb2SAnirudh Venkataramanan i++; 16922b245cb2SAnirudh Venkataramanan 16932b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 16942b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 16952b245cb2SAnirudh Venkataramanan i = 0; 16962b245cb2SAnirudh Venkataramanan } 16972b245cb2SAnirudh Venkataramanan 16982b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag); 16992b245cb2SAnirudh Venkataramanan data_len -= size; 17002b245cb2SAnirudh Venkataramanan 17012b245cb2SAnirudh Venkataramanan dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 17022b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 17032b245cb2SAnirudh Venkataramanan 17042b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 17052b245cb2SAnirudh Venkataramanan } 17062b245cb2SAnirudh Venkataramanan 17072b245cb2SAnirudh Venkataramanan /* record bytecount for BQL */ 17082b245cb2SAnirudh Venkataramanan netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 17092b245cb2SAnirudh Venkataramanan 17102b245cb2SAnirudh Venkataramanan /* record SW timestamp if HW timestamp is not available */ 17112b245cb2SAnirudh Venkataramanan skb_tx_timestamp(first->skb); 17122b245cb2SAnirudh Venkataramanan 17132b245cb2SAnirudh Venkataramanan i++; 17142b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) 17152b245cb2SAnirudh Venkataramanan i = 0; 17162b245cb2SAnirudh Venkataramanan 17172b245cb2SAnirudh Venkataramanan /* write last descriptor with RS and EOP bits */ 1718efc2214bSMaciej Fijalkowski td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 1719efc2214bSMaciej Fijalkowski tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, 1720efc2214bSMaciej Fijalkowski td_tag); 17212b245cb2SAnirudh Venkataramanan 17222b245cb2SAnirudh Venkataramanan /* Force memory writes to complete before letting h/w know there 17232b245cb2SAnirudh Venkataramanan * are new descriptors to fetch. 17242b245cb2SAnirudh Venkataramanan * 17252b245cb2SAnirudh Venkataramanan * We also use this memory barrier to make certain all of the 17262b245cb2SAnirudh Venkataramanan * status bits have been updated before next_to_watch is written. 17272b245cb2SAnirudh Venkataramanan */ 17282b245cb2SAnirudh Venkataramanan wmb(); 17292b245cb2SAnirudh Venkataramanan 17302b245cb2SAnirudh Venkataramanan /* set next_to_watch value indicating a packet is present */ 17312b245cb2SAnirudh Venkataramanan first->next_to_watch = tx_desc; 17322b245cb2SAnirudh Venkataramanan 17332b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 17342b245cb2SAnirudh Venkataramanan 17352b245cb2SAnirudh Venkataramanan ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 17362b245cb2SAnirudh Venkataramanan 17372b245cb2SAnirudh Venkataramanan /* notify HW of packet */ 17384ee656bbSTony Nguyen if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) 17392b245cb2SAnirudh Venkataramanan writel(i, tx_ring->tail); 17402b245cb2SAnirudh Venkataramanan 17412b245cb2SAnirudh Venkataramanan return; 17422b245cb2SAnirudh Venkataramanan 17432b245cb2SAnirudh Venkataramanan dma_error: 17442f2da36eSAnirudh Venkataramanan /* clear DMA mappings for failed tx_buf map */ 17452b245cb2SAnirudh Venkataramanan for (;;) { 17462b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 17472b245cb2SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 17482b245cb2SAnirudh Venkataramanan if (tx_buf == first) 17492b245cb2SAnirudh Venkataramanan break; 17502b245cb2SAnirudh Venkataramanan if (i == 0) 17512b245cb2SAnirudh Venkataramanan i = tx_ring->count; 17522b245cb2SAnirudh Venkataramanan i--; 17532b245cb2SAnirudh Venkataramanan } 17542b245cb2SAnirudh Venkataramanan 17552b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 17562b245cb2SAnirudh Venkataramanan } 17572b245cb2SAnirudh Venkataramanan 17582b245cb2SAnirudh Venkataramanan /** 1759d76a60baSAnirudh Venkataramanan * ice_tx_csum - Enable Tx checksum offloads 1760d76a60baSAnirudh Venkataramanan * @first: pointer to the first descriptor 1761d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1762d76a60baSAnirudh Venkataramanan * 1763d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1764d76a60baSAnirudh Venkataramanan */ 1765d76a60baSAnirudh Venkataramanan static 1766d76a60baSAnirudh Venkataramanan int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1767d76a60baSAnirudh Venkataramanan { 1768d76a60baSAnirudh Venkataramanan u32 l4_len = 0, l3_len = 0, l2_len = 0; 1769d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1770d76a60baSAnirudh Venkataramanan union { 1771d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1772d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1773d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1774d76a60baSAnirudh Venkataramanan } ip; 1775d76a60baSAnirudh Venkataramanan union { 1776d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1777d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1778d76a60baSAnirudh Venkataramanan } l4; 1779d76a60baSAnirudh Venkataramanan __be16 frag_off, protocol; 1780d76a60baSAnirudh Venkataramanan unsigned char *exthdr; 1781d76a60baSAnirudh Venkataramanan u32 offset, cmd = 0; 1782d76a60baSAnirudh Venkataramanan u8 l4_proto = 0; 1783d76a60baSAnirudh Venkataramanan 1784d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1785d76a60baSAnirudh Venkataramanan return 0; 1786d76a60baSAnirudh Venkataramanan 1787d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 1788d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 1789d76a60baSAnirudh Venkataramanan 1790d76a60baSAnirudh Venkataramanan /* compute outer L2 header size */ 1791d76a60baSAnirudh Venkataramanan l2_len = ip.hdr - skb->data; 1792d76a60baSAnirudh Venkataramanan offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1793d76a60baSAnirudh Venkataramanan 1794d76a60baSAnirudh Venkataramanan if (skb->encapsulation) 1795d76a60baSAnirudh Venkataramanan return -1; 1796d76a60baSAnirudh Venkataramanan 1797d76a60baSAnirudh Venkataramanan /* Enable IP checksum offloads */ 1798d76a60baSAnirudh Venkataramanan protocol = vlan_get_protocol(skb); 1799d76a60baSAnirudh Venkataramanan if (protocol == htons(ETH_P_IP)) { 1800d76a60baSAnirudh Venkataramanan l4_proto = ip.v4->protocol; 1801d76a60baSAnirudh Venkataramanan /* the stack computes the IP header already, the only time we 1802d76a60baSAnirudh Venkataramanan * need the hardware to recompute it is in the case of TSO. 1803d76a60baSAnirudh Venkataramanan */ 1804d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1805d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1806d76a60baSAnirudh Venkataramanan else 1807d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1808d76a60baSAnirudh Venkataramanan 1809d76a60baSAnirudh Venkataramanan } else if (protocol == htons(ETH_P_IPV6)) { 1810d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1811d76a60baSAnirudh Venkataramanan exthdr = ip.hdr + sizeof(*ip.v6); 1812d76a60baSAnirudh Venkataramanan l4_proto = ip.v6->nexthdr; 1813d76a60baSAnirudh Venkataramanan if (l4.hdr != exthdr) 1814d76a60baSAnirudh Venkataramanan ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1815d76a60baSAnirudh Venkataramanan &frag_off); 1816d76a60baSAnirudh Venkataramanan } else { 1817d76a60baSAnirudh Venkataramanan return -1; 1818d76a60baSAnirudh Venkataramanan } 1819d76a60baSAnirudh Venkataramanan 1820d76a60baSAnirudh Venkataramanan /* compute inner L3 header size */ 1821d76a60baSAnirudh Venkataramanan l3_len = l4.hdr - ip.hdr; 1822d76a60baSAnirudh Venkataramanan offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1823d76a60baSAnirudh Venkataramanan 1824d76a60baSAnirudh Venkataramanan /* Enable L4 checksum offloads */ 1825d76a60baSAnirudh Venkataramanan switch (l4_proto) { 1826d76a60baSAnirudh Venkataramanan case IPPROTO_TCP: 1827d76a60baSAnirudh Venkataramanan /* enable checksum offloads */ 1828d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1829d76a60baSAnirudh Venkataramanan l4_len = l4.tcp->doff; 1830d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1831d76a60baSAnirudh Venkataramanan break; 1832d76a60baSAnirudh Venkataramanan case IPPROTO_UDP: 1833d76a60baSAnirudh Venkataramanan /* enable UDP checksum offload */ 1834d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1835d76a60baSAnirudh Venkataramanan l4_len = (sizeof(struct udphdr) >> 2); 1836d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1837d76a60baSAnirudh Venkataramanan break; 1838d76a60baSAnirudh Venkataramanan case IPPROTO_SCTP: 1839cf909e19SAnirudh Venkataramanan /* enable SCTP checksum offload */ 1840cf909e19SAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1841cf909e19SAnirudh Venkataramanan l4_len = sizeof(struct sctphdr) >> 2; 1842cf909e19SAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1843cf909e19SAnirudh Venkataramanan break; 1844cf909e19SAnirudh Venkataramanan 1845d76a60baSAnirudh Venkataramanan default: 1846d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1847d76a60baSAnirudh Venkataramanan return -1; 1848d76a60baSAnirudh Venkataramanan skb_checksum_help(skb); 1849d76a60baSAnirudh Venkataramanan return 0; 1850d76a60baSAnirudh Venkataramanan } 1851d76a60baSAnirudh Venkataramanan 1852d76a60baSAnirudh Venkataramanan off->td_cmd |= cmd; 1853d76a60baSAnirudh Venkataramanan off->td_offset |= offset; 1854d76a60baSAnirudh Venkataramanan return 1; 1855d76a60baSAnirudh Venkataramanan } 1856d76a60baSAnirudh Venkataramanan 1857d76a60baSAnirudh Venkataramanan /** 1858f9867df6SAnirudh Venkataramanan * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1859d76a60baSAnirudh Venkataramanan * @tx_ring: ring to send buffer on 1860d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1861d76a60baSAnirudh Venkataramanan * 1862d76a60baSAnirudh Venkataramanan * Checks the skb and set up correspondingly several generic transmit flags 1863d76a60baSAnirudh Venkataramanan * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1864d76a60baSAnirudh Venkataramanan * 1865d76a60baSAnirudh Venkataramanan * Returns error code indicate the frame should be dropped upon error and the 1866d76a60baSAnirudh Venkataramanan * otherwise returns 0 to indicate the flags has been set properly. 1867d76a60baSAnirudh Venkataramanan */ 1868d76a60baSAnirudh Venkataramanan static int 1869d76a60baSAnirudh Venkataramanan ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) 1870d76a60baSAnirudh Venkataramanan { 1871d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1872d76a60baSAnirudh Venkataramanan __be16 protocol = skb->protocol; 1873d76a60baSAnirudh Venkataramanan 1874d76a60baSAnirudh Venkataramanan if (protocol == htons(ETH_P_8021Q) && 1875d76a60baSAnirudh Venkataramanan !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 1876d76a60baSAnirudh Venkataramanan /* when HW VLAN acceleration is turned off by the user the 1877d76a60baSAnirudh Venkataramanan * stack sets the protocol to 8021q so that the driver 1878d76a60baSAnirudh Venkataramanan * can take any steps required to support the SW only 1879d76a60baSAnirudh Venkataramanan * VLAN handling. In our case the driver doesn't need 1880d76a60baSAnirudh Venkataramanan * to take any further steps so just set the protocol 1881d76a60baSAnirudh Venkataramanan * to the encapsulated ethertype. 1882d76a60baSAnirudh Venkataramanan */ 1883d76a60baSAnirudh Venkataramanan skb->protocol = vlan_get_protocol(skb); 18845f6aa50eSAnirudh Venkataramanan return 0; 1885d76a60baSAnirudh Venkataramanan } 1886d76a60baSAnirudh Venkataramanan 1887d76a60baSAnirudh Venkataramanan /* if we have a HW VLAN tag being added, default to the HW one */ 1888d76a60baSAnirudh Venkataramanan if (skb_vlan_tag_present(skb)) { 1889d76a60baSAnirudh Venkataramanan first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1890d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1891d76a60baSAnirudh Venkataramanan } else if (protocol == htons(ETH_P_8021Q)) { 1892d76a60baSAnirudh Venkataramanan struct vlan_hdr *vhdr, _vhdr; 1893d76a60baSAnirudh Venkataramanan 1894d76a60baSAnirudh Venkataramanan /* for SW VLAN, check the next protocol and store the tag */ 1895d76a60baSAnirudh Venkataramanan vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN, 1896d76a60baSAnirudh Venkataramanan sizeof(_vhdr), 1897d76a60baSAnirudh Venkataramanan &_vhdr); 1898d76a60baSAnirudh Venkataramanan if (!vhdr) 1899d76a60baSAnirudh Venkataramanan return -EINVAL; 1900d76a60baSAnirudh Venkataramanan 1901d76a60baSAnirudh Venkataramanan first->tx_flags |= ntohs(vhdr->h_vlan_TCI) << 1902d76a60baSAnirudh Venkataramanan ICE_TX_FLAGS_VLAN_S; 1903d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_SW_VLAN; 1904d76a60baSAnirudh Venkataramanan } 1905d76a60baSAnirudh Venkataramanan 19065f6aa50eSAnirudh Venkataramanan return ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 1907d76a60baSAnirudh Venkataramanan } 1908d76a60baSAnirudh Venkataramanan 1909d76a60baSAnirudh Venkataramanan /** 1910d76a60baSAnirudh Venkataramanan * ice_tso - computes mss and TSO length to prepare for TSO 1911d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1912d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1913d76a60baSAnirudh Venkataramanan * 1914d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1915d76a60baSAnirudh Venkataramanan */ 1916d76a60baSAnirudh Venkataramanan static 1917d76a60baSAnirudh Venkataramanan int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1918d76a60baSAnirudh Venkataramanan { 1919d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1920d76a60baSAnirudh Venkataramanan union { 1921d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1922d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1923d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1924d76a60baSAnirudh Venkataramanan } ip; 1925d76a60baSAnirudh Venkataramanan union { 1926d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1927a54e3b8cSBrett Creeley struct udphdr *udp; 1928d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1929d76a60baSAnirudh Venkataramanan } l4; 1930d76a60baSAnirudh Venkataramanan u64 cd_mss, cd_tso_len; 1931d76a60baSAnirudh Venkataramanan u32 paylen, l4_start; 1932d76a60baSAnirudh Venkataramanan int err; 1933d76a60baSAnirudh Venkataramanan 1934d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1935d76a60baSAnirudh Venkataramanan return 0; 1936d76a60baSAnirudh Venkataramanan 1937d76a60baSAnirudh Venkataramanan if (!skb_is_gso(skb)) 1938d76a60baSAnirudh Venkataramanan return 0; 1939d76a60baSAnirudh Venkataramanan 1940d76a60baSAnirudh Venkataramanan err = skb_cow_head(skb, 0); 1941d76a60baSAnirudh Venkataramanan if (err < 0) 1942d76a60baSAnirudh Venkataramanan return err; 1943d76a60baSAnirudh Venkataramanan 1944c3a6825eSBruce Allan /* cppcheck-suppress unreadVariable */ 1945d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 1946d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 1947d76a60baSAnirudh Venkataramanan 1948d76a60baSAnirudh Venkataramanan /* initialize outer IP header fields */ 1949d76a60baSAnirudh Venkataramanan if (ip.v4->version == 4) { 1950d76a60baSAnirudh Venkataramanan ip.v4->tot_len = 0; 1951d76a60baSAnirudh Venkataramanan ip.v4->check = 0; 1952d76a60baSAnirudh Venkataramanan } else { 1953d76a60baSAnirudh Venkataramanan ip.v6->payload_len = 0; 1954d76a60baSAnirudh Venkataramanan } 1955d76a60baSAnirudh Venkataramanan 1956d76a60baSAnirudh Venkataramanan /* determine offset of transport header */ 1957d76a60baSAnirudh Venkataramanan l4_start = l4.hdr - skb->data; 1958d76a60baSAnirudh Venkataramanan 1959d76a60baSAnirudh Venkataramanan /* remove payload length from checksum */ 1960d76a60baSAnirudh Venkataramanan paylen = skb->len - l4_start; 1961d76a60baSAnirudh Venkataramanan 1962a54e3b8cSBrett Creeley if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 1963a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.udp->check, 1964a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 1965a54e3b8cSBrett Creeley /* compute length of UDP segmentation header */ 1966a54e3b8cSBrett Creeley off->header_len = sizeof(l4.udp) + l4_start; 1967a54e3b8cSBrett Creeley } else { 1968a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.tcp->check, 1969a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 1970a54e3b8cSBrett Creeley /* compute length of TCP segmentation header */ 1971d76a60baSAnirudh Venkataramanan off->header_len = (l4.tcp->doff * 4) + l4_start; 1972a54e3b8cSBrett Creeley } 1973d76a60baSAnirudh Venkataramanan 1974d76a60baSAnirudh Venkataramanan /* update gso_segs and bytecount */ 1975d76a60baSAnirudh Venkataramanan first->gso_segs = skb_shinfo(skb)->gso_segs; 1976d944b469SBrett Creeley first->bytecount += (first->gso_segs - 1) * off->header_len; 1977d76a60baSAnirudh Venkataramanan 1978d76a60baSAnirudh Venkataramanan cd_tso_len = skb->len - off->header_len; 1979d76a60baSAnirudh Venkataramanan cd_mss = skb_shinfo(skb)->gso_size; 1980d76a60baSAnirudh Venkataramanan 1981d76a60baSAnirudh Venkataramanan /* record cdesc_qw1 with TSO parameters */ 1982e65e9e15SBruce Allan off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 1983d76a60baSAnirudh Venkataramanan (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 1984d76a60baSAnirudh Venkataramanan (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 1985e65e9e15SBruce Allan (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 1986d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_TSO; 1987d76a60baSAnirudh Venkataramanan return 1; 1988d76a60baSAnirudh Venkataramanan } 1989d76a60baSAnirudh Venkataramanan 1990d76a60baSAnirudh Venkataramanan /** 19912b245cb2SAnirudh Venkataramanan * ice_txd_use_count - estimate the number of descriptors needed for Tx 19922b245cb2SAnirudh Venkataramanan * @size: transmit request size in bytes 19932b245cb2SAnirudh Venkataramanan * 19942b245cb2SAnirudh Venkataramanan * Due to hardware alignment restrictions (4K alignment), we need to 19952b245cb2SAnirudh Venkataramanan * assume that we can have no more than 12K of data per descriptor, even 19962b245cb2SAnirudh Venkataramanan * though each descriptor can take up to 16K - 1 bytes of aligned memory. 19972b245cb2SAnirudh Venkataramanan * Thus, we need to divide by 12K. But division is slow! Instead, 19982b245cb2SAnirudh Venkataramanan * we decompose the operation into shifts and one relatively cheap 19992b245cb2SAnirudh Venkataramanan * multiply operation. 20002b245cb2SAnirudh Venkataramanan * 20012b245cb2SAnirudh Venkataramanan * To divide by 12K, we first divide by 4K, then divide by 3: 20022b245cb2SAnirudh Venkataramanan * To divide by 4K, shift right by 12 bits 20032b245cb2SAnirudh Venkataramanan * To divide by 3, multiply by 85, then divide by 256 20042b245cb2SAnirudh Venkataramanan * (Divide by 256 is done by shifting right by 8 bits) 20052b245cb2SAnirudh Venkataramanan * Finally, we add one to round up. Because 256 isn't an exact multiple of 20062b245cb2SAnirudh Venkataramanan * 3, we'll underestimate near each multiple of 12K. This is actually more 20072b245cb2SAnirudh Venkataramanan * accurate as we have 4K - 1 of wiggle room that we can fit into the last 20082b245cb2SAnirudh Venkataramanan * segment. For our purposes this is accurate out to 1M which is orders of 20092b245cb2SAnirudh Venkataramanan * magnitude greater than our largest possible GSO size. 20102b245cb2SAnirudh Venkataramanan * 20112b245cb2SAnirudh Venkataramanan * This would then be implemented as: 2012c585ea42SBrett Creeley * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 20132b245cb2SAnirudh Venkataramanan * 20142b245cb2SAnirudh Venkataramanan * Since multiplication and division are commutative, we can reorder 20152b245cb2SAnirudh Venkataramanan * operations into: 2016c585ea42SBrett Creeley * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 20172b245cb2SAnirudh Venkataramanan */ 20182b245cb2SAnirudh Venkataramanan static unsigned int ice_txd_use_count(unsigned int size) 20192b245cb2SAnirudh Venkataramanan { 2020c585ea42SBrett Creeley return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 20212b245cb2SAnirudh Venkataramanan } 20222b245cb2SAnirudh Venkataramanan 20232b245cb2SAnirudh Venkataramanan /** 2024d337f2afSAnirudh Venkataramanan * ice_xmit_desc_count - calculate number of Tx descriptors needed 20252b245cb2SAnirudh Venkataramanan * @skb: send buffer 20262b245cb2SAnirudh Venkataramanan * 20272b245cb2SAnirudh Venkataramanan * Returns number of data descriptors needed for this skb. 20282b245cb2SAnirudh Venkataramanan */ 20292b245cb2SAnirudh Venkataramanan static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 20302b245cb2SAnirudh Venkataramanan { 2031d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 20322b245cb2SAnirudh Venkataramanan unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 20332b245cb2SAnirudh Venkataramanan unsigned int count = 0, size = skb_headlen(skb); 20342b245cb2SAnirudh Venkataramanan 20352b245cb2SAnirudh Venkataramanan for (;;) { 20362b245cb2SAnirudh Venkataramanan count += ice_txd_use_count(size); 20372b245cb2SAnirudh Venkataramanan 20382b245cb2SAnirudh Venkataramanan if (!nr_frags--) 20392b245cb2SAnirudh Venkataramanan break; 20402b245cb2SAnirudh Venkataramanan 20412b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag++); 20422b245cb2SAnirudh Venkataramanan } 20432b245cb2SAnirudh Venkataramanan 20442b245cb2SAnirudh Venkataramanan return count; 20452b245cb2SAnirudh Venkataramanan } 20462b245cb2SAnirudh Venkataramanan 20472b245cb2SAnirudh Venkataramanan /** 20482b245cb2SAnirudh Venkataramanan * __ice_chk_linearize - Check if there are more than 8 buffers per packet 20492b245cb2SAnirudh Venkataramanan * @skb: send buffer 20502b245cb2SAnirudh Venkataramanan * 20512b245cb2SAnirudh Venkataramanan * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 20522b245cb2SAnirudh Venkataramanan * and so we need to figure out the cases where we need to linearize the skb. 20532b245cb2SAnirudh Venkataramanan * 20542b245cb2SAnirudh Venkataramanan * For TSO we need to count the TSO header and segment payload separately. 20552b245cb2SAnirudh Venkataramanan * As such we need to check cases where we have 7 fragments or more as we 20562b245cb2SAnirudh Venkataramanan * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 20572b245cb2SAnirudh Venkataramanan * the segment payload in the first descriptor, and another 7 for the 20582b245cb2SAnirudh Venkataramanan * fragments. 20592b245cb2SAnirudh Venkataramanan */ 20602b245cb2SAnirudh Venkataramanan static bool __ice_chk_linearize(struct sk_buff *skb) 20612b245cb2SAnirudh Venkataramanan { 2062d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag, *stale; 20632b245cb2SAnirudh Venkataramanan int nr_frags, sum; 20642b245cb2SAnirudh Venkataramanan 20652b245cb2SAnirudh Venkataramanan /* no need to check if number of frags is less than 7 */ 20662b245cb2SAnirudh Venkataramanan nr_frags = skb_shinfo(skb)->nr_frags; 20672b245cb2SAnirudh Venkataramanan if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 20682b245cb2SAnirudh Venkataramanan return false; 20692b245cb2SAnirudh Venkataramanan 20702b245cb2SAnirudh Venkataramanan /* We need to walk through the list and validate that each group 20712b245cb2SAnirudh Venkataramanan * of 6 fragments totals at least gso_size. 20722b245cb2SAnirudh Venkataramanan */ 20732b245cb2SAnirudh Venkataramanan nr_frags -= ICE_MAX_BUF_TXD - 2; 20742b245cb2SAnirudh Venkataramanan frag = &skb_shinfo(skb)->frags[0]; 20752b245cb2SAnirudh Venkataramanan 20762b245cb2SAnirudh Venkataramanan /* Initialize size to the negative value of gso_size minus 1. We 20774ee656bbSTony Nguyen * use this as the worst case scenario in which the frag ahead 20782b245cb2SAnirudh Venkataramanan * of us only provides one byte which is why we are limited to 6 20792b245cb2SAnirudh Venkataramanan * descriptors for a single transmit as the header and previous 20802b245cb2SAnirudh Venkataramanan * fragment are already consuming 2 descriptors. 20812b245cb2SAnirudh Venkataramanan */ 20822b245cb2SAnirudh Venkataramanan sum = 1 - skb_shinfo(skb)->gso_size; 20832b245cb2SAnirudh Venkataramanan 20842b245cb2SAnirudh Venkataramanan /* Add size of frags 0 through 4 to create our initial sum */ 20852b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 20862b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 20872b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 20882b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 20892b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 20902b245cb2SAnirudh Venkataramanan 20912b245cb2SAnirudh Venkataramanan /* Walk through fragments adding latest fragment, testing it, and 20922b245cb2SAnirudh Venkataramanan * then removing stale fragments from the sum. 20932b245cb2SAnirudh Venkataramanan */ 20942b245cb2SAnirudh Venkataramanan stale = &skb_shinfo(skb)->frags[0]; 20952b245cb2SAnirudh Venkataramanan for (;;) { 20962b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 20972b245cb2SAnirudh Venkataramanan 20982b245cb2SAnirudh Venkataramanan /* if sum is negative we failed to make sufficient progress */ 20992b245cb2SAnirudh Venkataramanan if (sum < 0) 21002b245cb2SAnirudh Venkataramanan return true; 21012b245cb2SAnirudh Venkataramanan 21022b245cb2SAnirudh Venkataramanan if (!nr_frags--) 21032b245cb2SAnirudh Venkataramanan break; 21042b245cb2SAnirudh Venkataramanan 21052b245cb2SAnirudh Venkataramanan sum -= skb_frag_size(stale++); 21062b245cb2SAnirudh Venkataramanan } 21072b245cb2SAnirudh Venkataramanan 21082b245cb2SAnirudh Venkataramanan return false; 21092b245cb2SAnirudh Venkataramanan } 21102b245cb2SAnirudh Venkataramanan 21112b245cb2SAnirudh Venkataramanan /** 21122b245cb2SAnirudh Venkataramanan * ice_chk_linearize - Check if there are more than 8 fragments per packet 21132b245cb2SAnirudh Venkataramanan * @skb: send buffer 21142b245cb2SAnirudh Venkataramanan * @count: number of buffers used 21152b245cb2SAnirudh Venkataramanan * 21162b245cb2SAnirudh Venkataramanan * Note: Our HW can't scatter-gather more than 8 fragments to build 21172b245cb2SAnirudh Venkataramanan * a packet on the wire and so we need to figure out the cases where we 21182b245cb2SAnirudh Venkataramanan * need to linearize the skb. 21192b245cb2SAnirudh Venkataramanan */ 21202b245cb2SAnirudh Venkataramanan static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 21212b245cb2SAnirudh Venkataramanan { 21222b245cb2SAnirudh Venkataramanan /* Both TSO and single send will work if count is less than 8 */ 21232b245cb2SAnirudh Venkataramanan if (likely(count < ICE_MAX_BUF_TXD)) 21242b245cb2SAnirudh Venkataramanan return false; 21252b245cb2SAnirudh Venkataramanan 21262b245cb2SAnirudh Venkataramanan if (skb_is_gso(skb)) 21272b245cb2SAnirudh Venkataramanan return __ice_chk_linearize(skb); 21282b245cb2SAnirudh Venkataramanan 21292b245cb2SAnirudh Venkataramanan /* we can support up to 8 data buffers for a single send */ 21302b245cb2SAnirudh Venkataramanan return count != ICE_MAX_BUF_TXD; 21312b245cb2SAnirudh Venkataramanan } 21322b245cb2SAnirudh Venkataramanan 21332b245cb2SAnirudh Venkataramanan /** 21342b245cb2SAnirudh Venkataramanan * ice_xmit_frame_ring - Sends buffer on Tx ring 21352b245cb2SAnirudh Venkataramanan * @skb: send buffer 21362b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 21372b245cb2SAnirudh Venkataramanan * 21382b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 21392b245cb2SAnirudh Venkataramanan */ 21402b245cb2SAnirudh Venkataramanan static netdev_tx_t 21412b245cb2SAnirudh Venkataramanan ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 21422b245cb2SAnirudh Venkataramanan { 2143d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params offload = { 0 }; 21440c3a6101SDave Ertman struct ice_vsi *vsi = tx_ring->vsi; 21452b245cb2SAnirudh Venkataramanan struct ice_tx_buf *first; 21462b245cb2SAnirudh Venkataramanan unsigned int count; 2147d76a60baSAnirudh Venkataramanan int tso, csum; 21482b245cb2SAnirudh Venkataramanan 21492b245cb2SAnirudh Venkataramanan count = ice_xmit_desc_count(skb); 21502b245cb2SAnirudh Venkataramanan if (ice_chk_linearize(skb, count)) { 21512b245cb2SAnirudh Venkataramanan if (__skb_linearize(skb)) 21522b245cb2SAnirudh Venkataramanan goto out_drop; 21532b245cb2SAnirudh Venkataramanan count = ice_txd_use_count(skb->len); 21542b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_linearize++; 21552b245cb2SAnirudh Venkataramanan } 21562b245cb2SAnirudh Venkataramanan 21572b245cb2SAnirudh Venkataramanan /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 21582b245cb2SAnirudh Venkataramanan * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 21592b245cb2SAnirudh Venkataramanan * + 4 desc gap to avoid the cache line where head is, 21602b245cb2SAnirudh Venkataramanan * + 1 desc for context descriptor, 21612b245cb2SAnirudh Venkataramanan * otherwise try next time 21622b245cb2SAnirudh Venkataramanan */ 2163c585ea42SBrett Creeley if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2164c585ea42SBrett Creeley ICE_DESCS_FOR_CTX_DESC)) { 21652b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_busy++; 21662b245cb2SAnirudh Venkataramanan return NETDEV_TX_BUSY; 21672b245cb2SAnirudh Venkataramanan } 21682b245cb2SAnirudh Venkataramanan 2169d76a60baSAnirudh Venkataramanan offload.tx_ring = tx_ring; 2170d76a60baSAnirudh Venkataramanan 21712b245cb2SAnirudh Venkataramanan /* record the location of the first descriptor for this packet */ 21722b245cb2SAnirudh Venkataramanan first = &tx_ring->tx_buf[tx_ring->next_to_use]; 21732b245cb2SAnirudh Venkataramanan first->skb = skb; 21742b245cb2SAnirudh Venkataramanan first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 21752b245cb2SAnirudh Venkataramanan first->gso_segs = 1; 2176d76a60baSAnirudh Venkataramanan first->tx_flags = 0; 21772b245cb2SAnirudh Venkataramanan 2178d76a60baSAnirudh Venkataramanan /* prepare the VLAN tagging flags for Tx */ 2179d76a60baSAnirudh Venkataramanan if (ice_tx_prepare_vlan_flags(tx_ring, first)) 2180d76a60baSAnirudh Venkataramanan goto out_drop; 2181d76a60baSAnirudh Venkataramanan 2182d76a60baSAnirudh Venkataramanan /* set up TSO offload */ 2183d76a60baSAnirudh Venkataramanan tso = ice_tso(first, &offload); 2184d76a60baSAnirudh Venkataramanan if (tso < 0) 2185d76a60baSAnirudh Venkataramanan goto out_drop; 2186d76a60baSAnirudh Venkataramanan 2187d76a60baSAnirudh Venkataramanan /* always set up Tx checksum offload */ 2188d76a60baSAnirudh Venkataramanan csum = ice_tx_csum(first, &offload); 2189d76a60baSAnirudh Venkataramanan if (csum < 0) 2190d76a60baSAnirudh Venkataramanan goto out_drop; 2191d76a60baSAnirudh Venkataramanan 21920c3a6101SDave Ertman /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 21930c3a6101SDave Ertman if (unlikely(skb->priority == TC_PRIO_CONTROL && 21940c3a6101SDave Ertman vsi->type == ICE_VSI_PF && 21950c3a6101SDave Ertman vsi->port_info->is_sw_lldp)) 21960c3a6101SDave Ertman offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 21970c3a6101SDave Ertman ICE_TX_CTX_DESC_SWTCH_UPLINK << 21980c3a6101SDave Ertman ICE_TXD_CTX_QW1_CMD_S); 21990c3a6101SDave Ertman 22000c3a6101SDave Ertman if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2201d76a60baSAnirudh Venkataramanan struct ice_tx_ctx_desc *cdesc; 2202d76a60baSAnirudh Venkataramanan int i = tx_ring->next_to_use; 2203d76a60baSAnirudh Venkataramanan 2204d76a60baSAnirudh Venkataramanan /* grab the next descriptor */ 2205d76a60baSAnirudh Venkataramanan cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2206d76a60baSAnirudh Venkataramanan i++; 2207d76a60baSAnirudh Venkataramanan tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2208d76a60baSAnirudh Venkataramanan 2209d76a60baSAnirudh Venkataramanan /* setup context descriptor */ 2210d76a60baSAnirudh Venkataramanan cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2211d76a60baSAnirudh Venkataramanan cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2212d76a60baSAnirudh Venkataramanan cdesc->rsvd = cpu_to_le16(0); 2213d76a60baSAnirudh Venkataramanan cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2214d76a60baSAnirudh Venkataramanan } 2215d76a60baSAnirudh Venkataramanan 2216d76a60baSAnirudh Venkataramanan ice_tx_map(tx_ring, first, &offload); 22172b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 22182b245cb2SAnirudh Venkataramanan 22192b245cb2SAnirudh Venkataramanan out_drop: 22202b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 22212b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 22222b245cb2SAnirudh Venkataramanan } 22232b245cb2SAnirudh Venkataramanan 22242b245cb2SAnirudh Venkataramanan /** 22252b245cb2SAnirudh Venkataramanan * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 22262b245cb2SAnirudh Venkataramanan * @skb: send buffer 22272b245cb2SAnirudh Venkataramanan * @netdev: network interface device structure 22282b245cb2SAnirudh Venkataramanan * 22292b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 22302b245cb2SAnirudh Venkataramanan */ 22312b245cb2SAnirudh Venkataramanan netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 22322b245cb2SAnirudh Venkataramanan { 22332b245cb2SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 22342b245cb2SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 22352b245cb2SAnirudh Venkataramanan struct ice_ring *tx_ring; 22362b245cb2SAnirudh Venkataramanan 22372b245cb2SAnirudh Venkataramanan tx_ring = vsi->tx_rings[skb->queue_mapping]; 22382b245cb2SAnirudh Venkataramanan 22392b245cb2SAnirudh Venkataramanan /* hardware can't handle really short frames, hardware padding works 22402b245cb2SAnirudh Venkataramanan * beyond this point 22412b245cb2SAnirudh Venkataramanan */ 22422b245cb2SAnirudh Venkataramanan if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 22432b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 22442b245cb2SAnirudh Venkataramanan 22452b245cb2SAnirudh Venkataramanan return ice_xmit_frame_ring(skb, tx_ring); 22462b245cb2SAnirudh Venkataramanan } 2247