1cdedef59SAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0 2cdedef59SAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */ 3cdedef59SAnirudh Venkataramanan 4cdedef59SAnirudh Venkataramanan /* The driver transmit and receive code */ 5cdedef59SAnirudh Venkataramanan 6cdedef59SAnirudh Venkataramanan #include <linux/prefetch.h> 7cdedef59SAnirudh Venkataramanan #include <linux/mm.h> 8efc2214bSMaciej Fijalkowski #include <linux/bpf_trace.h> 9efc2214bSMaciej Fijalkowski #include <net/xdp.h> 100891d6d4SKrzysztof Kazimierczak #include "ice_txrx_lib.h" 11efc2214bSMaciej Fijalkowski #include "ice_lib.h" 12cdedef59SAnirudh Venkataramanan #include "ice.h" 135f6aa50eSAnirudh Venkataramanan #include "ice_dcb_lib.h" 142d4238f5SKrzysztof Kazimierczak #include "ice_xsk.h" 15cdedef59SAnirudh Venkataramanan 162b245cb2SAnirudh Venkataramanan #define ICE_RX_HDR_SIZE 256 172b245cb2SAnirudh Venkataramanan 18cdedef59SAnirudh Venkataramanan /** 19cdedef59SAnirudh Venkataramanan * ice_unmap_and_free_tx_buf - Release a Tx buffer 20cdedef59SAnirudh Venkataramanan * @ring: the ring that owns the buffer 21cdedef59SAnirudh Venkataramanan * @tx_buf: the buffer to free 22cdedef59SAnirudh Venkataramanan */ 23cdedef59SAnirudh Venkataramanan static void 24cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 25cdedef59SAnirudh Venkataramanan { 26cdedef59SAnirudh Venkataramanan if (tx_buf->skb) { 27efc2214bSMaciej Fijalkowski if (ice_ring_is_xdp(ring)) 28efc2214bSMaciej Fijalkowski page_frag_free(tx_buf->raw_buf); 29efc2214bSMaciej Fijalkowski else 30cdedef59SAnirudh Venkataramanan dev_kfree_skb_any(tx_buf->skb); 31cdedef59SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) 32cdedef59SAnirudh Venkataramanan dma_unmap_single(ring->dev, 33cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 34cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 35cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 36cdedef59SAnirudh Venkataramanan } else if (dma_unmap_len(tx_buf, len)) { 37cdedef59SAnirudh Venkataramanan dma_unmap_page(ring->dev, 38cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 39cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 40cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 41cdedef59SAnirudh Venkataramanan } 42cdedef59SAnirudh Venkataramanan 43cdedef59SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 44cdedef59SAnirudh Venkataramanan tx_buf->skb = NULL; 45cdedef59SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 46cdedef59SAnirudh Venkataramanan /* tx_buf must be completely set up in the transmit path */ 47cdedef59SAnirudh Venkataramanan } 48cdedef59SAnirudh Venkataramanan 49cdedef59SAnirudh Venkataramanan static struct netdev_queue *txring_txq(const struct ice_ring *ring) 50cdedef59SAnirudh Venkataramanan { 51cdedef59SAnirudh Venkataramanan return netdev_get_tx_queue(ring->netdev, ring->q_index); 52cdedef59SAnirudh Venkataramanan } 53cdedef59SAnirudh Venkataramanan 54cdedef59SAnirudh Venkataramanan /** 55cdedef59SAnirudh Venkataramanan * ice_clean_tx_ring - Free any empty Tx buffers 56cdedef59SAnirudh Venkataramanan * @tx_ring: ring to be cleaned 57cdedef59SAnirudh Venkataramanan */ 58cdedef59SAnirudh Venkataramanan void ice_clean_tx_ring(struct ice_ring *tx_ring) 59cdedef59SAnirudh Venkataramanan { 60cdedef59SAnirudh Venkataramanan u16 i; 61cdedef59SAnirudh Venkataramanan 622d4238f5SKrzysztof Kazimierczak if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) { 632d4238f5SKrzysztof Kazimierczak ice_xsk_clean_xdp_ring(tx_ring); 642d4238f5SKrzysztof Kazimierczak goto tx_skip_free; 652d4238f5SKrzysztof Kazimierczak } 662d4238f5SKrzysztof Kazimierczak 67cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 68cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 69cdedef59SAnirudh Venkataramanan return; 70cdedef59SAnirudh Venkataramanan 712f2da36eSAnirudh Venkataramanan /* Free all the Tx ring sk_buffs */ 72cdedef59SAnirudh Venkataramanan for (i = 0; i < tx_ring->count; i++) 73cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 74cdedef59SAnirudh Venkataramanan 752d4238f5SKrzysztof Kazimierczak tx_skip_free: 76c6dfd690SBruce Allan memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 77cdedef59SAnirudh Venkataramanan 78cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 79cdedef59SAnirudh Venkataramanan memset(tx_ring->desc, 0, tx_ring->size); 80cdedef59SAnirudh Venkataramanan 81cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 82cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 83cdedef59SAnirudh Venkataramanan 84cdedef59SAnirudh Venkataramanan if (!tx_ring->netdev) 85cdedef59SAnirudh Venkataramanan return; 86cdedef59SAnirudh Venkataramanan 87cdedef59SAnirudh Venkataramanan /* cleanup Tx queue statistics */ 88cdedef59SAnirudh Venkataramanan netdev_tx_reset_queue(txring_txq(tx_ring)); 89cdedef59SAnirudh Venkataramanan } 90cdedef59SAnirudh Venkataramanan 91cdedef59SAnirudh Venkataramanan /** 92cdedef59SAnirudh Venkataramanan * ice_free_tx_ring - Free Tx resources per queue 93cdedef59SAnirudh Venkataramanan * @tx_ring: Tx descriptor ring for a specific queue 94cdedef59SAnirudh Venkataramanan * 95cdedef59SAnirudh Venkataramanan * Free all transmit software resources 96cdedef59SAnirudh Venkataramanan */ 97cdedef59SAnirudh Venkataramanan void ice_free_tx_ring(struct ice_ring *tx_ring) 98cdedef59SAnirudh Venkataramanan { 99cdedef59SAnirudh Venkataramanan ice_clean_tx_ring(tx_ring); 100cdedef59SAnirudh Venkataramanan devm_kfree(tx_ring->dev, tx_ring->tx_buf); 101cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 102cdedef59SAnirudh Venkataramanan 103cdedef59SAnirudh Venkataramanan if (tx_ring->desc) { 104cdedef59SAnirudh Venkataramanan dmam_free_coherent(tx_ring->dev, tx_ring->size, 105cdedef59SAnirudh Venkataramanan tx_ring->desc, tx_ring->dma); 106cdedef59SAnirudh Venkataramanan tx_ring->desc = NULL; 107cdedef59SAnirudh Venkataramanan } 108cdedef59SAnirudh Venkataramanan } 109cdedef59SAnirudh Venkataramanan 110cdedef59SAnirudh Venkataramanan /** 1112b245cb2SAnirudh Venkataramanan * ice_clean_tx_irq - Reclaim resources after transmit completes 1122b245cb2SAnirudh Venkataramanan * @tx_ring: Tx ring to clean 1132b245cb2SAnirudh Venkataramanan * @napi_budget: Used to determine if we are in netpoll 1142b245cb2SAnirudh Venkataramanan * 1152b245cb2SAnirudh Venkataramanan * Returns true if there's any budget left (e.g. the clean is finished) 1162b245cb2SAnirudh Venkataramanan */ 1172fb0821fSJesse Brandeburg static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) 1182b245cb2SAnirudh Venkataramanan { 1192b245cb2SAnirudh Venkataramanan unsigned int total_bytes = 0, total_pkts = 0; 1202fb0821fSJesse Brandeburg unsigned int budget = ICE_DFLT_IRQ_WORK; 1212fb0821fSJesse Brandeburg struct ice_vsi *vsi = tx_ring->vsi; 1222b245cb2SAnirudh Venkataramanan s16 i = tx_ring->next_to_clean; 1232b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 1242b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 1252b245cb2SAnirudh Venkataramanan 1262b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 1272b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 1282b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 1292b245cb2SAnirudh Venkataramanan 1302fb0821fSJesse Brandeburg prefetch(&vsi->state); 1312fb0821fSJesse Brandeburg 1322b245cb2SAnirudh Venkataramanan do { 1332b245cb2SAnirudh Venkataramanan struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 1342b245cb2SAnirudh Venkataramanan 1352b245cb2SAnirudh Venkataramanan /* if next_to_watch is not set then there is no work pending */ 1362b245cb2SAnirudh Venkataramanan if (!eop_desc) 1372b245cb2SAnirudh Venkataramanan break; 1382b245cb2SAnirudh Venkataramanan 1392b245cb2SAnirudh Venkataramanan smp_rmb(); /* prevent any other reads prior to eop_desc */ 1402b245cb2SAnirudh Venkataramanan 1412b245cb2SAnirudh Venkataramanan /* if the descriptor isn't done, no work yet to do */ 1422b245cb2SAnirudh Venkataramanan if (!(eop_desc->cmd_type_offset_bsz & 1432b245cb2SAnirudh Venkataramanan cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 1442b245cb2SAnirudh Venkataramanan break; 1452b245cb2SAnirudh Venkataramanan 1462b245cb2SAnirudh Venkataramanan /* clear next_to_watch to prevent false hangs */ 1472b245cb2SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 1482b245cb2SAnirudh Venkataramanan 1492b245cb2SAnirudh Venkataramanan /* update the statistics for this packet */ 1502b245cb2SAnirudh Venkataramanan total_bytes += tx_buf->bytecount; 1512b245cb2SAnirudh Venkataramanan total_pkts += tx_buf->gso_segs; 1522b245cb2SAnirudh Venkataramanan 153efc2214bSMaciej Fijalkowski if (ice_ring_is_xdp(tx_ring)) 154efc2214bSMaciej Fijalkowski page_frag_free(tx_buf->raw_buf); 155efc2214bSMaciej Fijalkowski else 1562b245cb2SAnirudh Venkataramanan /* free the skb */ 1572b245cb2SAnirudh Venkataramanan napi_consume_skb(tx_buf->skb, napi_budget); 1582b245cb2SAnirudh Venkataramanan 1592b245cb2SAnirudh Venkataramanan /* unmap skb header data */ 1602b245cb2SAnirudh Venkataramanan dma_unmap_single(tx_ring->dev, 1612b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 1622b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 1632b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 1642b245cb2SAnirudh Venkataramanan 1652b245cb2SAnirudh Venkataramanan /* clear tx_buf data */ 1662b245cb2SAnirudh Venkataramanan tx_buf->skb = NULL; 1672b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 1682b245cb2SAnirudh Venkataramanan 1692b245cb2SAnirudh Venkataramanan /* unmap remaining buffers */ 1702b245cb2SAnirudh Venkataramanan while (tx_desc != eop_desc) { 1712b245cb2SAnirudh Venkataramanan tx_buf++; 1722b245cb2SAnirudh Venkataramanan tx_desc++; 1732b245cb2SAnirudh Venkataramanan i++; 1742b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 1752b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 1762b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 1772b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 1782b245cb2SAnirudh Venkataramanan } 1792b245cb2SAnirudh Venkataramanan 1802b245cb2SAnirudh Venkataramanan /* unmap any remaining paged data */ 1812b245cb2SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) { 1822b245cb2SAnirudh Venkataramanan dma_unmap_page(tx_ring->dev, 1832b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 1842b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 1852b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 1862b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 1872b245cb2SAnirudh Venkataramanan } 1882b245cb2SAnirudh Venkataramanan } 1892b245cb2SAnirudh Venkataramanan 1902b245cb2SAnirudh Venkataramanan /* move us one more past the eop_desc for start of next pkt */ 1912b245cb2SAnirudh Venkataramanan tx_buf++; 1922b245cb2SAnirudh Venkataramanan tx_desc++; 1932b245cb2SAnirudh Venkataramanan i++; 1942b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 1952b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 1962b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 1972b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 1982b245cb2SAnirudh Venkataramanan } 1992b245cb2SAnirudh Venkataramanan 2002b245cb2SAnirudh Venkataramanan prefetch(tx_desc); 2012b245cb2SAnirudh Venkataramanan 2022b245cb2SAnirudh Venkataramanan /* update budget accounting */ 2032b245cb2SAnirudh Venkataramanan budget--; 2042b245cb2SAnirudh Venkataramanan } while (likely(budget)); 2052b245cb2SAnirudh Venkataramanan 2062b245cb2SAnirudh Venkataramanan i += tx_ring->count; 2072b245cb2SAnirudh Venkataramanan tx_ring->next_to_clean = i; 2082d4238f5SKrzysztof Kazimierczak 2092d4238f5SKrzysztof Kazimierczak ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 2102b245cb2SAnirudh Venkataramanan 211efc2214bSMaciej Fijalkowski if (ice_ring_is_xdp(tx_ring)) 212efc2214bSMaciej Fijalkowski return !!budget; 213efc2214bSMaciej Fijalkowski 2142b245cb2SAnirudh Venkataramanan netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 2152b245cb2SAnirudh Venkataramanan total_bytes); 2162b245cb2SAnirudh Venkataramanan 2172b245cb2SAnirudh Venkataramanan #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 2182b245cb2SAnirudh Venkataramanan if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 2192b245cb2SAnirudh Venkataramanan (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 2202b245cb2SAnirudh Venkataramanan /* Make sure that anybody stopping the queue after this 2212b245cb2SAnirudh Venkataramanan * sees the new next_to_clean. 2222b245cb2SAnirudh Venkataramanan */ 2232b245cb2SAnirudh Venkataramanan smp_mb(); 2242b245cb2SAnirudh Venkataramanan if (__netif_subqueue_stopped(tx_ring->netdev, 2252b245cb2SAnirudh Venkataramanan tx_ring->q_index) && 2262b245cb2SAnirudh Venkataramanan !test_bit(__ICE_DOWN, vsi->state)) { 2272b245cb2SAnirudh Venkataramanan netif_wake_subqueue(tx_ring->netdev, 2282b245cb2SAnirudh Venkataramanan tx_ring->q_index); 2292b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 2302b245cb2SAnirudh Venkataramanan } 2312b245cb2SAnirudh Venkataramanan } 2322b245cb2SAnirudh Venkataramanan 2332b245cb2SAnirudh Venkataramanan return !!budget; 2342b245cb2SAnirudh Venkataramanan } 2352b245cb2SAnirudh Venkataramanan 2362b245cb2SAnirudh Venkataramanan /** 237cdedef59SAnirudh Venkataramanan * ice_setup_tx_ring - Allocate the Tx descriptors 238d337f2afSAnirudh Venkataramanan * @tx_ring: the Tx ring to set up 239cdedef59SAnirudh Venkataramanan * 240cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 241cdedef59SAnirudh Venkataramanan */ 242cdedef59SAnirudh Venkataramanan int ice_setup_tx_ring(struct ice_ring *tx_ring) 243cdedef59SAnirudh Venkataramanan { 244cdedef59SAnirudh Venkataramanan struct device *dev = tx_ring->dev; 245cdedef59SAnirudh Venkataramanan 246cdedef59SAnirudh Venkataramanan if (!dev) 247cdedef59SAnirudh Venkataramanan return -ENOMEM; 248cdedef59SAnirudh Venkataramanan 249cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 250cdedef59SAnirudh Venkataramanan WARN_ON(tx_ring->tx_buf); 251c6dfd690SBruce Allan tx_ring->tx_buf = 252c6dfd690SBruce Allan devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 253c6dfd690SBruce Allan GFP_KERNEL); 254cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 255cdedef59SAnirudh Venkataramanan return -ENOMEM; 256cdedef59SAnirudh Venkataramanan 257ad71b256SBrett Creeley /* round up to nearest page */ 258c6dfd690SBruce Allan tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 259ad71b256SBrett Creeley PAGE_SIZE); 260cdedef59SAnirudh Venkataramanan tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 261cdedef59SAnirudh Venkataramanan GFP_KERNEL); 262cdedef59SAnirudh Venkataramanan if (!tx_ring->desc) { 263cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 264cdedef59SAnirudh Venkataramanan tx_ring->size); 265cdedef59SAnirudh Venkataramanan goto err; 266cdedef59SAnirudh Venkataramanan } 267cdedef59SAnirudh Venkataramanan 268cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 269cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 270b3969fd7SSudheer Mogilappagari tx_ring->tx_stats.prev_pkt = -1; 271cdedef59SAnirudh Venkataramanan return 0; 272cdedef59SAnirudh Venkataramanan 273cdedef59SAnirudh Venkataramanan err: 274cdedef59SAnirudh Venkataramanan devm_kfree(dev, tx_ring->tx_buf); 275cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 276cdedef59SAnirudh Venkataramanan return -ENOMEM; 277cdedef59SAnirudh Venkataramanan } 278cdedef59SAnirudh Venkataramanan 279cdedef59SAnirudh Venkataramanan /** 280cdedef59SAnirudh Venkataramanan * ice_clean_rx_ring - Free Rx buffers 281cdedef59SAnirudh Venkataramanan * @rx_ring: ring to be cleaned 282cdedef59SAnirudh Venkataramanan */ 283cdedef59SAnirudh Venkataramanan void ice_clean_rx_ring(struct ice_ring *rx_ring) 284cdedef59SAnirudh Venkataramanan { 285cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 286cdedef59SAnirudh Venkataramanan u16 i; 287cdedef59SAnirudh Venkataramanan 288cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 289cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 290cdedef59SAnirudh Venkataramanan return; 291cdedef59SAnirudh Venkataramanan 2922d4238f5SKrzysztof Kazimierczak if (rx_ring->xsk_umem) { 2932d4238f5SKrzysztof Kazimierczak ice_xsk_clean_rx_ring(rx_ring); 2942d4238f5SKrzysztof Kazimierczak goto rx_skip_free; 2952d4238f5SKrzysztof Kazimierczak } 2962d4238f5SKrzysztof Kazimierczak 297cdedef59SAnirudh Venkataramanan /* Free all the Rx ring sk_buffs */ 298cdedef59SAnirudh Venkataramanan for (i = 0; i < rx_ring->count; i++) { 299cdedef59SAnirudh Venkataramanan struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 300cdedef59SAnirudh Venkataramanan 301cdedef59SAnirudh Venkataramanan if (rx_buf->skb) { 302cdedef59SAnirudh Venkataramanan dev_kfree_skb(rx_buf->skb); 303cdedef59SAnirudh Venkataramanan rx_buf->skb = NULL; 304cdedef59SAnirudh Venkataramanan } 305cdedef59SAnirudh Venkataramanan if (!rx_buf->page) 306cdedef59SAnirudh Venkataramanan continue; 307cdedef59SAnirudh Venkataramanan 308a65f71feSMaciej Fijalkowski /* Invalidate cache lines that may have been written to by 309a65f71feSMaciej Fijalkowski * device so that we avoid corrupting memory. 310a65f71feSMaciej Fijalkowski */ 311a65f71feSMaciej Fijalkowski dma_sync_single_range_for_cpu(dev, rx_buf->dma, 312a65f71feSMaciej Fijalkowski rx_buf->page_offset, 3137237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 3147237f5b0SMaciej Fijalkowski DMA_FROM_DEVICE); 315a65f71feSMaciej Fijalkowski 316a65f71feSMaciej Fijalkowski /* free resources associated with mapping */ 3177237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 318a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 31903c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 320cdedef59SAnirudh Venkataramanan 321cdedef59SAnirudh Venkataramanan rx_buf->page = NULL; 322cdedef59SAnirudh Venkataramanan rx_buf->page_offset = 0; 323cdedef59SAnirudh Venkataramanan } 324cdedef59SAnirudh Venkataramanan 3252d4238f5SKrzysztof Kazimierczak rx_skip_free: 326c6dfd690SBruce Allan memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 327cdedef59SAnirudh Venkataramanan 328cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 329cdedef59SAnirudh Venkataramanan memset(rx_ring->desc, 0, rx_ring->size); 330cdedef59SAnirudh Venkataramanan 331cdedef59SAnirudh Venkataramanan rx_ring->next_to_alloc = 0; 332cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 333cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 334cdedef59SAnirudh Venkataramanan } 335cdedef59SAnirudh Venkataramanan 336cdedef59SAnirudh Venkataramanan /** 337cdedef59SAnirudh Venkataramanan * ice_free_rx_ring - Free Rx resources 338cdedef59SAnirudh Venkataramanan * @rx_ring: ring to clean the resources from 339cdedef59SAnirudh Venkataramanan * 340cdedef59SAnirudh Venkataramanan * Free all receive software resources 341cdedef59SAnirudh Venkataramanan */ 342cdedef59SAnirudh Venkataramanan void ice_free_rx_ring(struct ice_ring *rx_ring) 343cdedef59SAnirudh Venkataramanan { 344cdedef59SAnirudh Venkataramanan ice_clean_rx_ring(rx_ring); 345efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF) 346efc2214bSMaciej Fijalkowski if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 347efc2214bSMaciej Fijalkowski xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 348efc2214bSMaciej Fijalkowski rx_ring->xdp_prog = NULL; 349cdedef59SAnirudh Venkataramanan devm_kfree(rx_ring->dev, rx_ring->rx_buf); 350cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 351cdedef59SAnirudh Venkataramanan 352cdedef59SAnirudh Venkataramanan if (rx_ring->desc) { 353cdedef59SAnirudh Venkataramanan dmam_free_coherent(rx_ring->dev, rx_ring->size, 354cdedef59SAnirudh Venkataramanan rx_ring->desc, rx_ring->dma); 355cdedef59SAnirudh Venkataramanan rx_ring->desc = NULL; 356cdedef59SAnirudh Venkataramanan } 357cdedef59SAnirudh Venkataramanan } 358cdedef59SAnirudh Venkataramanan 359cdedef59SAnirudh Venkataramanan /** 360cdedef59SAnirudh Venkataramanan * ice_setup_rx_ring - Allocate the Rx descriptors 361d337f2afSAnirudh Venkataramanan * @rx_ring: the Rx ring to set up 362cdedef59SAnirudh Venkataramanan * 363cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 364cdedef59SAnirudh Venkataramanan */ 365cdedef59SAnirudh Venkataramanan int ice_setup_rx_ring(struct ice_ring *rx_ring) 366cdedef59SAnirudh Venkataramanan { 367cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 368cdedef59SAnirudh Venkataramanan 369cdedef59SAnirudh Venkataramanan if (!dev) 370cdedef59SAnirudh Venkataramanan return -ENOMEM; 371cdedef59SAnirudh Venkataramanan 372cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 373cdedef59SAnirudh Venkataramanan WARN_ON(rx_ring->rx_buf); 374c6dfd690SBruce Allan rx_ring->rx_buf = 375c6dfd690SBruce Allan devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 376c6dfd690SBruce Allan GFP_KERNEL); 377cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 378cdedef59SAnirudh Venkataramanan return -ENOMEM; 379cdedef59SAnirudh Venkataramanan 380ad71b256SBrett Creeley /* round up to nearest page */ 381ad71b256SBrett Creeley rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 382ad71b256SBrett Creeley PAGE_SIZE); 383cdedef59SAnirudh Venkataramanan rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 384cdedef59SAnirudh Venkataramanan GFP_KERNEL); 385cdedef59SAnirudh Venkataramanan if (!rx_ring->desc) { 386cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 387cdedef59SAnirudh Venkataramanan rx_ring->size); 388cdedef59SAnirudh Venkataramanan goto err; 389cdedef59SAnirudh Venkataramanan } 390cdedef59SAnirudh Venkataramanan 391cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 392cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 393efc2214bSMaciej Fijalkowski 394efc2214bSMaciej Fijalkowski if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 395efc2214bSMaciej Fijalkowski WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 396efc2214bSMaciej Fijalkowski 397efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF && 398efc2214bSMaciej Fijalkowski !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 399efc2214bSMaciej Fijalkowski if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 400efc2214bSMaciej Fijalkowski rx_ring->q_index)) 401efc2214bSMaciej Fijalkowski goto err; 402cdedef59SAnirudh Venkataramanan return 0; 403cdedef59SAnirudh Venkataramanan 404cdedef59SAnirudh Venkataramanan err: 405cdedef59SAnirudh Venkataramanan devm_kfree(dev, rx_ring->rx_buf); 406cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 407cdedef59SAnirudh Venkataramanan return -ENOMEM; 408cdedef59SAnirudh Venkataramanan } 409cdedef59SAnirudh Venkataramanan 410cdedef59SAnirudh Venkataramanan /** 411efc2214bSMaciej Fijalkowski * ice_rx_offset - Return expected offset into page to access data 412efc2214bSMaciej Fijalkowski * @rx_ring: Ring we are requesting offset of 413efc2214bSMaciej Fijalkowski * 414efc2214bSMaciej Fijalkowski * Returns the offset value for ring into the data buffer. 415efc2214bSMaciej Fijalkowski */ 416efc2214bSMaciej Fijalkowski static unsigned int ice_rx_offset(struct ice_ring *rx_ring) 417efc2214bSMaciej Fijalkowski { 41859bb0808SMaciej Fijalkowski if (ice_ring_uses_build_skb(rx_ring)) 41959bb0808SMaciej Fijalkowski return ICE_SKB_PAD; 42059bb0808SMaciej Fijalkowski else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 42159bb0808SMaciej Fijalkowski return XDP_PACKET_HEADROOM; 42259bb0808SMaciej Fijalkowski 42359bb0808SMaciej Fijalkowski return 0; 424efc2214bSMaciej Fijalkowski } 425efc2214bSMaciej Fijalkowski 426d4ecdbf7SJesper Dangaard Brouer static unsigned int ice_rx_frame_truesize(struct ice_ring *rx_ring, 427d4ecdbf7SJesper Dangaard Brouer unsigned int size) 428d4ecdbf7SJesper Dangaard Brouer { 429d4ecdbf7SJesper Dangaard Brouer unsigned int truesize; 430d4ecdbf7SJesper Dangaard Brouer 431d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE < 8192) 432d4ecdbf7SJesper Dangaard Brouer truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 433d4ecdbf7SJesper Dangaard Brouer #else 434d4ecdbf7SJesper Dangaard Brouer truesize = ice_rx_offset(rx_ring) ? 435d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) + 436d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 437d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(size); 438d4ecdbf7SJesper Dangaard Brouer #endif 439d4ecdbf7SJesper Dangaard Brouer return truesize; 440d4ecdbf7SJesper Dangaard Brouer } 441d4ecdbf7SJesper Dangaard Brouer 442efc2214bSMaciej Fijalkowski /** 443efc2214bSMaciej Fijalkowski * ice_run_xdp - Executes an XDP program on initialized xdp_buff 444efc2214bSMaciej Fijalkowski * @rx_ring: Rx ring 445efc2214bSMaciej Fijalkowski * @xdp: xdp_buff used as input to the XDP program 446efc2214bSMaciej Fijalkowski * @xdp_prog: XDP program to run 447efc2214bSMaciej Fijalkowski * 448efc2214bSMaciej Fijalkowski * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 449efc2214bSMaciej Fijalkowski */ 450efc2214bSMaciej Fijalkowski static int 451efc2214bSMaciej Fijalkowski ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, 452efc2214bSMaciej Fijalkowski struct bpf_prog *xdp_prog) 453efc2214bSMaciej Fijalkowski { 454efc2214bSMaciej Fijalkowski int err, result = ICE_XDP_PASS; 455efc2214bSMaciej Fijalkowski struct ice_ring *xdp_ring; 456efc2214bSMaciej Fijalkowski u32 act; 457efc2214bSMaciej Fijalkowski 458efc2214bSMaciej Fijalkowski act = bpf_prog_run_xdp(xdp_prog, xdp); 459efc2214bSMaciej Fijalkowski switch (act) { 460efc2214bSMaciej Fijalkowski case XDP_PASS: 461efc2214bSMaciej Fijalkowski break; 462efc2214bSMaciej Fijalkowski case XDP_TX: 463efc2214bSMaciej Fijalkowski xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; 464efc2214bSMaciej Fijalkowski result = ice_xmit_xdp_buff(xdp, xdp_ring); 465efc2214bSMaciej Fijalkowski break; 466efc2214bSMaciej Fijalkowski case XDP_REDIRECT: 467efc2214bSMaciej Fijalkowski err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 468efc2214bSMaciej Fijalkowski result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; 469efc2214bSMaciej Fijalkowski break; 470efc2214bSMaciej Fijalkowski default: 471efc2214bSMaciej Fijalkowski bpf_warn_invalid_xdp_action(act); 4724e83fc93SBruce Allan fallthrough; 473efc2214bSMaciej Fijalkowski case XDP_ABORTED: 474efc2214bSMaciej Fijalkowski trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 4754e83fc93SBruce Allan fallthrough; 476efc2214bSMaciej Fijalkowski case XDP_DROP: 477efc2214bSMaciej Fijalkowski result = ICE_XDP_CONSUMED; 478efc2214bSMaciej Fijalkowski break; 479efc2214bSMaciej Fijalkowski } 480efc2214bSMaciej Fijalkowski 481efc2214bSMaciej Fijalkowski return result; 482efc2214bSMaciej Fijalkowski } 483efc2214bSMaciej Fijalkowski 484efc2214bSMaciej Fijalkowski /** 485efc2214bSMaciej Fijalkowski * ice_xdp_xmit - submit packets to XDP ring for transmission 486efc2214bSMaciej Fijalkowski * @dev: netdev 487efc2214bSMaciej Fijalkowski * @n: number of XDP frames to be transmitted 488efc2214bSMaciej Fijalkowski * @frames: XDP frames to be transmitted 489efc2214bSMaciej Fijalkowski * @flags: transmit flags 490efc2214bSMaciej Fijalkowski * 491efc2214bSMaciej Fijalkowski * Returns number of frames successfully sent. Frames that fail are 492efc2214bSMaciej Fijalkowski * free'ed via XDP return API. 493efc2214bSMaciej Fijalkowski * For error cases, a negative errno code is returned and no-frames 494efc2214bSMaciej Fijalkowski * are transmitted (caller must handle freeing frames). 495efc2214bSMaciej Fijalkowski */ 496efc2214bSMaciej Fijalkowski int 497efc2214bSMaciej Fijalkowski ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 498efc2214bSMaciej Fijalkowski u32 flags) 499efc2214bSMaciej Fijalkowski { 500efc2214bSMaciej Fijalkowski struct ice_netdev_priv *np = netdev_priv(dev); 501efc2214bSMaciej Fijalkowski unsigned int queue_index = smp_processor_id(); 502efc2214bSMaciej Fijalkowski struct ice_vsi *vsi = np->vsi; 503efc2214bSMaciej Fijalkowski struct ice_ring *xdp_ring; 504efc2214bSMaciej Fijalkowski int drops = 0, i; 505efc2214bSMaciej Fijalkowski 506efc2214bSMaciej Fijalkowski if (test_bit(__ICE_DOWN, vsi->state)) 507efc2214bSMaciej Fijalkowski return -ENETDOWN; 508efc2214bSMaciej Fijalkowski 509efc2214bSMaciej Fijalkowski if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) 510efc2214bSMaciej Fijalkowski return -ENXIO; 511efc2214bSMaciej Fijalkowski 512efc2214bSMaciej Fijalkowski if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 513efc2214bSMaciej Fijalkowski return -EINVAL; 514efc2214bSMaciej Fijalkowski 515efc2214bSMaciej Fijalkowski xdp_ring = vsi->xdp_rings[queue_index]; 516efc2214bSMaciej Fijalkowski for (i = 0; i < n; i++) { 517efc2214bSMaciej Fijalkowski struct xdp_frame *xdpf = frames[i]; 518efc2214bSMaciej Fijalkowski int err; 519efc2214bSMaciej Fijalkowski 520efc2214bSMaciej Fijalkowski err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 521efc2214bSMaciej Fijalkowski if (err != ICE_XDP_TX) { 522efc2214bSMaciej Fijalkowski xdp_return_frame_rx_napi(xdpf); 523efc2214bSMaciej Fijalkowski drops++; 524efc2214bSMaciej Fijalkowski } 525efc2214bSMaciej Fijalkowski } 526efc2214bSMaciej Fijalkowski 527efc2214bSMaciej Fijalkowski if (unlikely(flags & XDP_XMIT_FLUSH)) 528efc2214bSMaciej Fijalkowski ice_xdp_ring_update_tail(xdp_ring); 529efc2214bSMaciej Fijalkowski 530efc2214bSMaciej Fijalkowski return n - drops; 531efc2214bSMaciej Fijalkowski } 532efc2214bSMaciej Fijalkowski 533efc2214bSMaciej Fijalkowski /** 534cdedef59SAnirudh Venkataramanan * ice_alloc_mapped_page - recycle or make a new page 535cdedef59SAnirudh Venkataramanan * @rx_ring: ring to use 536cdedef59SAnirudh Venkataramanan * @bi: rx_buf struct to modify 537cdedef59SAnirudh Venkataramanan * 538cdedef59SAnirudh Venkataramanan * Returns true if the page was successfully allocated or 539cdedef59SAnirudh Venkataramanan * reused. 540cdedef59SAnirudh Venkataramanan */ 541c8b7abddSBruce Allan static bool 542c8b7abddSBruce Allan ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) 543cdedef59SAnirudh Venkataramanan { 544cdedef59SAnirudh Venkataramanan struct page *page = bi->page; 545cdedef59SAnirudh Venkataramanan dma_addr_t dma; 546cdedef59SAnirudh Venkataramanan 547cdedef59SAnirudh Venkataramanan /* since we are recycling buffers we should seldom need to alloc */ 5482b245cb2SAnirudh Venkataramanan if (likely(page)) { 5492b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.page_reuse_count++; 550cdedef59SAnirudh Venkataramanan return true; 5512b245cb2SAnirudh Venkataramanan } 552cdedef59SAnirudh Venkataramanan 553cdedef59SAnirudh Venkataramanan /* alloc new page for storage */ 5547237f5b0SMaciej Fijalkowski page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 5552b245cb2SAnirudh Venkataramanan if (unlikely(!page)) { 5562b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 557cdedef59SAnirudh Venkataramanan return false; 5582b245cb2SAnirudh Venkataramanan } 559cdedef59SAnirudh Venkataramanan 560cdedef59SAnirudh Venkataramanan /* map page for use */ 5617237f5b0SMaciej Fijalkowski dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 562a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 563cdedef59SAnirudh Venkataramanan 564cdedef59SAnirudh Venkataramanan /* if mapping failed free memory back to system since 565cdedef59SAnirudh Venkataramanan * there isn't much point in holding memory we can't use 566cdedef59SAnirudh Venkataramanan */ 567cdedef59SAnirudh Venkataramanan if (dma_mapping_error(rx_ring->dev, dma)) { 5687237f5b0SMaciej Fijalkowski __free_pages(page, ice_rx_pg_order(rx_ring)); 5692b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 570cdedef59SAnirudh Venkataramanan return false; 571cdedef59SAnirudh Venkataramanan } 572cdedef59SAnirudh Venkataramanan 573cdedef59SAnirudh Venkataramanan bi->dma = dma; 574cdedef59SAnirudh Venkataramanan bi->page = page; 575efc2214bSMaciej Fijalkowski bi->page_offset = ice_rx_offset(rx_ring); 57603c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 57703c66a13SMaciej Fijalkowski bi->pagecnt_bias = USHRT_MAX; 578cdedef59SAnirudh Venkataramanan 579cdedef59SAnirudh Venkataramanan return true; 580cdedef59SAnirudh Venkataramanan } 581cdedef59SAnirudh Venkataramanan 582cdedef59SAnirudh Venkataramanan /** 583cdedef59SAnirudh Venkataramanan * ice_alloc_rx_bufs - Replace used receive buffers 584cdedef59SAnirudh Venkataramanan * @rx_ring: ring to place buffers on 585cdedef59SAnirudh Venkataramanan * @cleaned_count: number of buffers to replace 586cdedef59SAnirudh Venkataramanan * 587cb7db356SBrett Creeley * Returns false if all allocations were successful, true if any fail. Returning 588cb7db356SBrett Creeley * true signals to the caller that we didn't replace cleaned_count buffers and 589cb7db356SBrett Creeley * there is more work to do. 590cb7db356SBrett Creeley * 591cb7db356SBrett Creeley * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 592cb7db356SBrett Creeley * buffers. Then bump tail at most one time. Grouping like this lets us avoid 593cb7db356SBrett Creeley * multiple tail writes per call. 594cdedef59SAnirudh Venkataramanan */ 595cdedef59SAnirudh Venkataramanan bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) 596cdedef59SAnirudh Venkataramanan { 597cdedef59SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 598cdedef59SAnirudh Venkataramanan u16 ntu = rx_ring->next_to_use; 599cdedef59SAnirudh Venkataramanan struct ice_rx_buf *bi; 600cdedef59SAnirudh Venkataramanan 601cdedef59SAnirudh Venkataramanan /* do nothing if no valid netdev defined */ 602cdedef59SAnirudh Venkataramanan if (!rx_ring->netdev || !cleaned_count) 603cdedef59SAnirudh Venkataramanan return false; 604cdedef59SAnirudh Venkataramanan 605f9867df6SAnirudh Venkataramanan /* get the Rx descriptor and buffer based on next_to_use */ 606cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, ntu); 607cdedef59SAnirudh Venkataramanan bi = &rx_ring->rx_buf[ntu]; 608cdedef59SAnirudh Venkataramanan 609cdedef59SAnirudh Venkataramanan do { 610a1e99685SBrett Creeley /* if we fail here, we have work remaining */ 611cdedef59SAnirudh Venkataramanan if (!ice_alloc_mapped_page(rx_ring, bi)) 612a1e99685SBrett Creeley break; 613cdedef59SAnirudh Venkataramanan 614a65f71feSMaciej Fijalkowski /* sync the buffer for use by the device */ 615a65f71feSMaciej Fijalkowski dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 616a65f71feSMaciej Fijalkowski bi->page_offset, 6177237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 618a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE); 619a65f71feSMaciej Fijalkowski 620cdedef59SAnirudh Venkataramanan /* Refresh the desc even if buffer_addrs didn't change 621cdedef59SAnirudh Venkataramanan * because each write-back erases this info. 622cdedef59SAnirudh Venkataramanan */ 623cdedef59SAnirudh Venkataramanan rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 624cdedef59SAnirudh Venkataramanan 625cdedef59SAnirudh Venkataramanan rx_desc++; 626cdedef59SAnirudh Venkataramanan bi++; 627cdedef59SAnirudh Venkataramanan ntu++; 628cdedef59SAnirudh Venkataramanan if (unlikely(ntu == rx_ring->count)) { 629cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, 0); 630cdedef59SAnirudh Venkataramanan bi = rx_ring->rx_buf; 631cdedef59SAnirudh Venkataramanan ntu = 0; 632cdedef59SAnirudh Venkataramanan } 633cdedef59SAnirudh Venkataramanan 634cdedef59SAnirudh Venkataramanan /* clear the status bits for the next_to_use descriptor */ 635cdedef59SAnirudh Venkataramanan rx_desc->wb.status_error0 = 0; 636cdedef59SAnirudh Venkataramanan 637cdedef59SAnirudh Venkataramanan cleaned_count--; 638cdedef59SAnirudh Venkataramanan } while (cleaned_count); 639cdedef59SAnirudh Venkataramanan 640cdedef59SAnirudh Venkataramanan if (rx_ring->next_to_use != ntu) 641cdedef59SAnirudh Venkataramanan ice_release_rx_desc(rx_ring, ntu); 642cdedef59SAnirudh Venkataramanan 643a1e99685SBrett Creeley return !!cleaned_count; 644cdedef59SAnirudh Venkataramanan } 6452b245cb2SAnirudh Venkataramanan 6462b245cb2SAnirudh Venkataramanan /** 6472b245cb2SAnirudh Venkataramanan * ice_page_is_reserved - check if reuse is possible 6482b245cb2SAnirudh Venkataramanan * @page: page struct to check 6492b245cb2SAnirudh Venkataramanan */ 6502b245cb2SAnirudh Venkataramanan static bool ice_page_is_reserved(struct page *page) 6512b245cb2SAnirudh Venkataramanan { 6522b245cb2SAnirudh Venkataramanan return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 6532b245cb2SAnirudh Venkataramanan } 6542b245cb2SAnirudh Venkataramanan 6552b245cb2SAnirudh Venkataramanan /** 6561d032bc7SMaciej Fijalkowski * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 6571d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to adjust 6581d032bc7SMaciej Fijalkowski * @size: Size of adjustment 6592b245cb2SAnirudh Venkataramanan * 6601d032bc7SMaciej Fijalkowski * Update the offset within page so that Rx buf will be ready to be reused. 6611d032bc7SMaciej Fijalkowski * For systems with PAGE_SIZE < 8192 this function will flip the page offset 6621d032bc7SMaciej Fijalkowski * so the second half of page assigned to Rx buffer will be used, otherwise 6634ee656bbSTony Nguyen * the offset is moved by "size" bytes 6642b245cb2SAnirudh Venkataramanan */ 6651d032bc7SMaciej Fijalkowski static void 6661d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 6672b245cb2SAnirudh Venkataramanan { 6682b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 6691d032bc7SMaciej Fijalkowski /* flip page offset to other buffer */ 6701d032bc7SMaciej Fijalkowski rx_buf->page_offset ^= size; 6712b245cb2SAnirudh Venkataramanan #else 6721d032bc7SMaciej Fijalkowski /* move offset up to the next cache line */ 6731d032bc7SMaciej Fijalkowski rx_buf->page_offset += size; 6741d032bc7SMaciej Fijalkowski #endif 6752b245cb2SAnirudh Venkataramanan } 6762b245cb2SAnirudh Venkataramanan 6771d032bc7SMaciej Fijalkowski /** 678bbb97808SMaciej Fijalkowski * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 679bbb97808SMaciej Fijalkowski * @rx_buf: buffer containing the page 680bbb97808SMaciej Fijalkowski * 681bbb97808SMaciej Fijalkowski * If page is reusable, we have a green light for calling ice_reuse_rx_page, 682bbb97808SMaciej Fijalkowski * which will assign the current buffer to the buffer that next_to_alloc is 683bbb97808SMaciej Fijalkowski * pointing to; otherwise, the DMA mapping needs to be destroyed and 684bbb97808SMaciej Fijalkowski * page freed 685bbb97808SMaciej Fijalkowski */ 6861d032bc7SMaciej Fijalkowski static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) 687bbb97808SMaciej Fijalkowski { 68803c66a13SMaciej Fijalkowski unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 689bbb97808SMaciej Fijalkowski struct page *page = rx_buf->page; 6902b245cb2SAnirudh Venkataramanan 6912b245cb2SAnirudh Venkataramanan /* avoid re-using remote pages */ 6922b245cb2SAnirudh Venkataramanan if (unlikely(ice_page_is_reserved(page))) 6932b245cb2SAnirudh Venkataramanan return false; 6942b245cb2SAnirudh Venkataramanan 6952b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 6962b245cb2SAnirudh Venkataramanan /* if we are only owner of page we can reuse it */ 69703c66a13SMaciej Fijalkowski if (unlikely((page_count(page) - pagecnt_bias) > 1)) 6982b245cb2SAnirudh Venkataramanan return false; 6992b245cb2SAnirudh Venkataramanan #else 7007237f5b0SMaciej Fijalkowski #define ICE_LAST_OFFSET \ 7017237f5b0SMaciej Fijalkowski (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 7027237f5b0SMaciej Fijalkowski if (rx_buf->page_offset > ICE_LAST_OFFSET) 7032b245cb2SAnirudh Venkataramanan return false; 7042b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE < 8192) */ 7052b245cb2SAnirudh Venkataramanan 70603c66a13SMaciej Fijalkowski /* If we have drained the page fragment pool we need to update 70703c66a13SMaciej Fijalkowski * the pagecnt_bias and page count so that we fully restock the 70803c66a13SMaciej Fijalkowski * number of references the driver holds. 7092b245cb2SAnirudh Venkataramanan */ 71003c66a13SMaciej Fijalkowski if (unlikely(pagecnt_bias == 1)) { 71103c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 71203c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias = USHRT_MAX; 71303c66a13SMaciej Fijalkowski } 7142b245cb2SAnirudh Venkataramanan 7152b245cb2SAnirudh Venkataramanan return true; 7162b245cb2SAnirudh Venkataramanan } 7172b245cb2SAnirudh Venkataramanan 7182b245cb2SAnirudh Venkataramanan /** 719712edbbbSMaciej Fijalkowski * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 7207237f5b0SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 7212b245cb2SAnirudh Venkataramanan * @rx_buf: buffer containing page to add 722712edbbbSMaciej Fijalkowski * @skb: sk_buff to place the data into 723712edbbbSMaciej Fijalkowski * @size: packet length from rx_desc 7242b245cb2SAnirudh Venkataramanan * 7252b245cb2SAnirudh Venkataramanan * This function will add the data contained in rx_buf->page to the skb. 726712edbbbSMaciej Fijalkowski * It will just attach the page as a frag to the skb. 727712edbbbSMaciej Fijalkowski * The function will then update the page offset. 7282b245cb2SAnirudh Venkataramanan */ 7291d032bc7SMaciej Fijalkowski static void 7307237f5b0SMaciej Fijalkowski ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 7317237f5b0SMaciej Fijalkowski struct sk_buff *skb, unsigned int size) 7322b245cb2SAnirudh Venkataramanan { 733712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 73459bb0808SMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring)); 7352b245cb2SAnirudh Venkataramanan #else 7367237f5b0SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 737712edbbbSMaciej Fijalkowski #endif 7381857ca42SMaciej Fijalkowski 739ac6f733aSMitch Williams if (!size) 740ac6f733aSMitch Williams return; 741712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 742712edbbbSMaciej Fijalkowski rx_buf->page_offset, size, truesize); 7432b245cb2SAnirudh Venkataramanan 744712edbbbSMaciej Fijalkowski /* page is being used so we must update the page offset */ 7451d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 7462b245cb2SAnirudh Venkataramanan } 7472b245cb2SAnirudh Venkataramanan 7482b245cb2SAnirudh Venkataramanan /** 7492b245cb2SAnirudh Venkataramanan * ice_reuse_rx_page - page flip buffer and store it back on the ring 750d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to store buffers on 7512b245cb2SAnirudh Venkataramanan * @old_buf: donor buffer to have page reused 7522b245cb2SAnirudh Venkataramanan * 7532b245cb2SAnirudh Venkataramanan * Synchronizes page for reuse by the adapter 7542b245cb2SAnirudh Venkataramanan */ 755c8b7abddSBruce Allan static void 756c8b7abddSBruce Allan ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) 7572b245cb2SAnirudh Venkataramanan { 7582b245cb2SAnirudh Venkataramanan u16 nta = rx_ring->next_to_alloc; 7592b245cb2SAnirudh Venkataramanan struct ice_rx_buf *new_buf; 7602b245cb2SAnirudh Venkataramanan 7612b245cb2SAnirudh Venkataramanan new_buf = &rx_ring->rx_buf[nta]; 7622b245cb2SAnirudh Venkataramanan 7632b245cb2SAnirudh Venkataramanan /* update, and store next to alloc */ 7642b245cb2SAnirudh Venkataramanan nta++; 7652b245cb2SAnirudh Venkataramanan rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 7662b245cb2SAnirudh Venkataramanan 767712edbbbSMaciej Fijalkowski /* Transfer page from old buffer to new buffer. 768712edbbbSMaciej Fijalkowski * Move each member individually to avoid possible store 769712edbbbSMaciej Fijalkowski * forwarding stalls and unnecessary copy of skb. 770712edbbbSMaciej Fijalkowski */ 771712edbbbSMaciej Fijalkowski new_buf->dma = old_buf->dma; 772712edbbbSMaciej Fijalkowski new_buf->page = old_buf->page; 773712edbbbSMaciej Fijalkowski new_buf->page_offset = old_buf->page_offset; 774712edbbbSMaciej Fijalkowski new_buf->pagecnt_bias = old_buf->pagecnt_bias; 7752b245cb2SAnirudh Venkataramanan } 7762b245cb2SAnirudh Venkataramanan 7772b245cb2SAnirudh Venkataramanan /** 7786c869cb7SMaciej Fijalkowski * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 779d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 780712edbbbSMaciej Fijalkowski * @skb: skb to be used 7816c869cb7SMaciej Fijalkowski * @size: size of buffer to add to skb 7822b245cb2SAnirudh Venkataramanan * 7836c869cb7SMaciej Fijalkowski * This function will pull an Rx buffer from the ring and synchronize it 7846c869cb7SMaciej Fijalkowski * for use by the CPU. 7852b245cb2SAnirudh Venkataramanan */ 7866c869cb7SMaciej Fijalkowski static struct ice_rx_buf * 787712edbbbSMaciej Fijalkowski ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, 788712edbbbSMaciej Fijalkowski const unsigned int size) 7892b245cb2SAnirudh Venkataramanan { 7902b245cb2SAnirudh Venkataramanan struct ice_rx_buf *rx_buf; 7912b245cb2SAnirudh Venkataramanan 7922b245cb2SAnirudh Venkataramanan rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 7936c869cb7SMaciej Fijalkowski prefetchw(rx_buf->page); 794712edbbbSMaciej Fijalkowski *skb = rx_buf->skb; 7952b245cb2SAnirudh Venkataramanan 796ac6f733aSMitch Williams if (!size) 797ac6f733aSMitch Williams return rx_buf; 7986c869cb7SMaciej Fijalkowski /* we are reusing so sync this buffer for CPU use */ 7996c869cb7SMaciej Fijalkowski dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 8006c869cb7SMaciej Fijalkowski rx_buf->page_offset, size, 8016c869cb7SMaciej Fijalkowski DMA_FROM_DEVICE); 8022b245cb2SAnirudh Venkataramanan 80303c66a13SMaciej Fijalkowski /* We have pulled a buffer for use, so decrement pagecnt_bias */ 80403c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias--; 80503c66a13SMaciej Fijalkowski 8066c869cb7SMaciej Fijalkowski return rx_buf; 8076c869cb7SMaciej Fijalkowski } 8086c869cb7SMaciej Fijalkowski 8096c869cb7SMaciej Fijalkowski /** 810aaf27254SMaciej Fijalkowski * ice_build_skb - Build skb around an existing buffer 811aaf27254SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 812aaf27254SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 813aaf27254SMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 814aaf27254SMaciej Fijalkowski * 815aaf27254SMaciej Fijalkowski * This function builds an skb around an existing Rx buffer, taking care 816aaf27254SMaciej Fijalkowski * to set up the skb correctly and avoid any memcpy overhead. 817aaf27254SMaciej Fijalkowski */ 818aaf27254SMaciej Fijalkowski static struct sk_buff * 819aaf27254SMaciej Fijalkowski ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 820aaf27254SMaciej Fijalkowski struct xdp_buff *xdp) 821aaf27254SMaciej Fijalkowski { 82288865fc4SKarol Kolacinski u8 metasize = xdp->data - xdp->data_meta; 823aaf27254SMaciej Fijalkowski #if (PAGE_SIZE < 8192) 824aaf27254SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 825aaf27254SMaciej Fijalkowski #else 826aaf27254SMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 827aaf27254SMaciej Fijalkowski SKB_DATA_ALIGN(xdp->data_end - 828aaf27254SMaciej Fijalkowski xdp->data_hard_start); 829aaf27254SMaciej Fijalkowski #endif 830aaf27254SMaciej Fijalkowski struct sk_buff *skb; 831aaf27254SMaciej Fijalkowski 832aaf27254SMaciej Fijalkowski /* Prefetch first cache line of first page. If xdp->data_meta 833aaf27254SMaciej Fijalkowski * is unused, this points exactly as xdp->data, otherwise we 834aaf27254SMaciej Fijalkowski * likely have a consumer accessing first few bytes of meta 835aaf27254SMaciej Fijalkowski * data, and then actual data. 836aaf27254SMaciej Fijalkowski */ 837aaf27254SMaciej Fijalkowski prefetch(xdp->data_meta); 838aaf27254SMaciej Fijalkowski #if L1_CACHE_BYTES < 128 839aaf27254SMaciej Fijalkowski prefetch((void *)(xdp->data + L1_CACHE_BYTES)); 840aaf27254SMaciej Fijalkowski #endif 841aaf27254SMaciej Fijalkowski /* build an skb around the page buffer */ 842aaf27254SMaciej Fijalkowski skb = build_skb(xdp->data_hard_start, truesize); 843aaf27254SMaciej Fijalkowski if (unlikely(!skb)) 844aaf27254SMaciej Fijalkowski return NULL; 845aaf27254SMaciej Fijalkowski 846aaf27254SMaciej Fijalkowski /* must to record Rx queue, otherwise OS features such as 847aaf27254SMaciej Fijalkowski * symmetric queue won't work 848aaf27254SMaciej Fijalkowski */ 849aaf27254SMaciej Fijalkowski skb_record_rx_queue(skb, rx_ring->q_index); 850aaf27254SMaciej Fijalkowski 851aaf27254SMaciej Fijalkowski /* update pointers within the skb to store the data */ 852aaf27254SMaciej Fijalkowski skb_reserve(skb, xdp->data - xdp->data_hard_start); 853aaf27254SMaciej Fijalkowski __skb_put(skb, xdp->data_end - xdp->data); 854aaf27254SMaciej Fijalkowski if (metasize) 855aaf27254SMaciej Fijalkowski skb_metadata_set(skb, metasize); 856aaf27254SMaciej Fijalkowski 857aaf27254SMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 858aaf27254SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 859aaf27254SMaciej Fijalkowski 860aaf27254SMaciej Fijalkowski return skb; 861aaf27254SMaciej Fijalkowski } 862aaf27254SMaciej Fijalkowski 863aaf27254SMaciej Fijalkowski /** 864712edbbbSMaciej Fijalkowski * ice_construct_skb - Allocate skb and populate it 8652b245cb2SAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 8666c869cb7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 867efc2214bSMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 8682b245cb2SAnirudh Venkataramanan * 869712edbbbSMaciej Fijalkowski * This function allocates an skb. It then populates it with the page 870712edbbbSMaciej Fijalkowski * data from the current receive descriptor, taking care to set up the 871712edbbbSMaciej Fijalkowski * skb correctly. 8722b245cb2SAnirudh Venkataramanan */ 873c8b7abddSBruce Allan static struct sk_buff * 874712edbbbSMaciej Fijalkowski ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 875efc2214bSMaciej Fijalkowski struct xdp_buff *xdp) 8762b245cb2SAnirudh Venkataramanan { 877efc2214bSMaciej Fijalkowski unsigned int size = xdp->data_end - xdp->data; 878712edbbbSMaciej Fijalkowski unsigned int headlen; 879712edbbbSMaciej Fijalkowski struct sk_buff *skb; 8802b245cb2SAnirudh Venkataramanan 8812b245cb2SAnirudh Venkataramanan /* prefetch first cache line of first page */ 882efc2214bSMaciej Fijalkowski prefetch(xdp->data); 8832b245cb2SAnirudh Venkataramanan #if L1_CACHE_BYTES < 128 884efc2214bSMaciej Fijalkowski prefetch((void *)(xdp->data + L1_CACHE_BYTES)); 8852b245cb2SAnirudh Venkataramanan #endif /* L1_CACHE_BYTES */ 8862b245cb2SAnirudh Venkataramanan 8872b245cb2SAnirudh Venkataramanan /* allocate a skb to store the frags */ 888712edbbbSMaciej Fijalkowski skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 8892b245cb2SAnirudh Venkataramanan GFP_ATOMIC | __GFP_NOWARN); 890712edbbbSMaciej Fijalkowski if (unlikely(!skb)) 8912b245cb2SAnirudh Venkataramanan return NULL; 8922b245cb2SAnirudh Venkataramanan 8932b245cb2SAnirudh Venkataramanan skb_record_rx_queue(skb, rx_ring->q_index); 894712edbbbSMaciej Fijalkowski /* Determine available headroom for copy */ 895712edbbbSMaciej Fijalkowski headlen = size; 896712edbbbSMaciej Fijalkowski if (headlen > ICE_RX_HDR_SIZE) 897efc2214bSMaciej Fijalkowski headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 8982b245cb2SAnirudh Venkataramanan 899712edbbbSMaciej Fijalkowski /* align pull length to size of long to optimize memcpy performance */ 900efc2214bSMaciej Fijalkowski memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 901efc2214bSMaciej Fijalkowski sizeof(long))); 902712edbbbSMaciej Fijalkowski 903712edbbbSMaciej Fijalkowski /* if we exhaust the linear part then add what is left as a frag */ 904712edbbbSMaciej Fijalkowski size -= headlen; 905712edbbbSMaciej Fijalkowski if (size) { 906712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 907712edbbbSMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size); 908712edbbbSMaciej Fijalkowski #else 9097237f5b0SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 910712edbbbSMaciej Fijalkowski #endif 911712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, 0, rx_buf->page, 912712edbbbSMaciej Fijalkowski rx_buf->page_offset + headlen, size, truesize); 913712edbbbSMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 914712edbbbSMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 9152b245cb2SAnirudh Venkataramanan } else { 916712edbbbSMaciej Fijalkowski /* buffer is unused, reset bias back to rx_buf; data was copied 917712edbbbSMaciej Fijalkowski * onto skb's linear part so there's no need for adjusting 918712edbbbSMaciej Fijalkowski * page offset and we can reuse this buffer as-is 919712edbbbSMaciej Fijalkowski */ 920712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 9212b245cb2SAnirudh Venkataramanan } 9222b245cb2SAnirudh Venkataramanan 9232b245cb2SAnirudh Venkataramanan return skb; 9242b245cb2SAnirudh Venkataramanan } 9252b245cb2SAnirudh Venkataramanan 9262b245cb2SAnirudh Venkataramanan /** 9271d032bc7SMaciej Fijalkowski * ice_put_rx_buf - Clean up used buffer and either recycle or free 9281d032bc7SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 9291d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 9302b245cb2SAnirudh Venkataramanan * 931efc2214bSMaciej Fijalkowski * This function will update next_to_clean and then clean up the contents 932efc2214bSMaciej Fijalkowski * of the rx_buf. It will either recycle the buffer or unmap it and free 933efc2214bSMaciej Fijalkowski * the associated resources. 9342b245cb2SAnirudh Venkataramanan */ 9351d032bc7SMaciej Fijalkowski static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) 9362b245cb2SAnirudh Venkataramanan { 93788865fc4SKarol Kolacinski u16 ntc = rx_ring->next_to_clean + 1; 938efc2214bSMaciej Fijalkowski 939efc2214bSMaciej Fijalkowski /* fetch, update, and store next to clean */ 940efc2214bSMaciej Fijalkowski ntc = (ntc < rx_ring->count) ? ntc : 0; 941efc2214bSMaciej Fijalkowski rx_ring->next_to_clean = ntc; 942efc2214bSMaciej Fijalkowski 943ac6f733aSMitch Williams if (!rx_buf) 944ac6f733aSMitch Williams return; 945ac6f733aSMitch Williams 9461d032bc7SMaciej Fijalkowski if (ice_can_reuse_rx_page(rx_buf)) { 947ac6f733aSMitch Williams /* hand second half of page back to the ring */ 9482b245cb2SAnirudh Venkataramanan ice_reuse_rx_page(rx_ring, rx_buf); 9492b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.page_reuse_count++; 9502b245cb2SAnirudh Venkataramanan } else { 9512b245cb2SAnirudh Venkataramanan /* we are not reusing the buffer so unmap it */ 9527237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 9537237f5b0SMaciej Fijalkowski ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 9547237f5b0SMaciej Fijalkowski ICE_RX_DMA_ATTR); 95503c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 9562b245cb2SAnirudh Venkataramanan } 9572b245cb2SAnirudh Venkataramanan 9582b245cb2SAnirudh Venkataramanan /* clear contents of buffer_info */ 9592b245cb2SAnirudh Venkataramanan rx_buf->page = NULL; 960712edbbbSMaciej Fijalkowski rx_buf->skb = NULL; 9612b245cb2SAnirudh Venkataramanan } 9622b245cb2SAnirudh Venkataramanan 9632b245cb2SAnirudh Venkataramanan /** 9642b245cb2SAnirudh Venkataramanan * ice_is_non_eop - process handling of non-EOP buffers 9652b245cb2SAnirudh Venkataramanan * @rx_ring: Rx ring being processed 9662b245cb2SAnirudh Venkataramanan * @rx_desc: Rx descriptor for current buffer 9672b245cb2SAnirudh Venkataramanan * @skb: Current socket buffer containing buffer in progress 9682b245cb2SAnirudh Venkataramanan * 969efc2214bSMaciej Fijalkowski * If the buffer is an EOP buffer, this function exits returning false, 970efc2214bSMaciej Fijalkowski * otherwise return true indicating that this is in fact a non-EOP buffer. 9712b245cb2SAnirudh Venkataramanan */ 972c8b7abddSBruce Allan static bool 973c8b7abddSBruce Allan ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 9742b245cb2SAnirudh Venkataramanan struct sk_buff *skb) 9752b245cb2SAnirudh Venkataramanan { 9762b245cb2SAnirudh Venkataramanan /* if we are the last buffer then there is nothing else to do */ 9772b245cb2SAnirudh Venkataramanan #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 9782b245cb2SAnirudh Venkataramanan if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 9792b245cb2SAnirudh Venkataramanan return false; 9802b245cb2SAnirudh Venkataramanan 9812b245cb2SAnirudh Venkataramanan /* place skb in next buffer to be received */ 982efc2214bSMaciej Fijalkowski rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb; 9832b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.non_eop_descs++; 9842b245cb2SAnirudh Venkataramanan 9852b245cb2SAnirudh Venkataramanan return true; 9862b245cb2SAnirudh Venkataramanan } 9872b245cb2SAnirudh Venkataramanan 9882b245cb2SAnirudh Venkataramanan /** 9892b245cb2SAnirudh Venkataramanan * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 990d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 9912b245cb2SAnirudh Venkataramanan * @budget: Total limit on number of packets to process 9922b245cb2SAnirudh Venkataramanan * 9932b245cb2SAnirudh Venkataramanan * This function provides a "bounce buffer" approach to Rx interrupt 9942b245cb2SAnirudh Venkataramanan * processing. The advantage to this is that on systems that have 9952b245cb2SAnirudh Venkataramanan * expensive overhead for IOMMU access this provides a means of avoiding 9962b245cb2SAnirudh Venkataramanan * it by maintaining the mapping of the page to the system. 9972b245cb2SAnirudh Venkataramanan * 9982b245cb2SAnirudh Venkataramanan * Returns amount of work completed 9992b245cb2SAnirudh Venkataramanan */ 10002b245cb2SAnirudh Venkataramanan static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 10012b245cb2SAnirudh Venkataramanan { 10022b245cb2SAnirudh Venkataramanan unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 10032b245cb2SAnirudh Venkataramanan u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1004efc2214bSMaciej Fijalkowski unsigned int xdp_res, xdp_xmit = 0; 1005efc2214bSMaciej Fijalkowski struct bpf_prog *xdp_prog = NULL; 1006efc2214bSMaciej Fijalkowski struct xdp_buff xdp; 1007cb7db356SBrett Creeley bool failure; 10082b245cb2SAnirudh Venkataramanan 1009efc2214bSMaciej Fijalkowski xdp.rxq = &rx_ring->xdp_rxq; 1010d4ecdbf7SJesper Dangaard Brouer /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1011d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE < 8192) 1012d4ecdbf7SJesper Dangaard Brouer xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0); 1013d4ecdbf7SJesper Dangaard Brouer #endif 1014efc2214bSMaciej Fijalkowski 1015f9867df6SAnirudh Venkataramanan /* start the loop to process Rx packets bounded by 'budget' */ 10162b245cb2SAnirudh Venkataramanan while (likely(total_rx_pkts < (unsigned int)budget)) { 10172b245cb2SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 10186c869cb7SMaciej Fijalkowski struct ice_rx_buf *rx_buf; 10192b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 10206c869cb7SMaciej Fijalkowski unsigned int size; 10212b245cb2SAnirudh Venkataramanan u16 stat_err_bits; 10222b245cb2SAnirudh Venkataramanan u16 vlan_tag = 0; 1023d76a60baSAnirudh Venkataramanan u8 rx_ptype; 10242b245cb2SAnirudh Venkataramanan 1025f9867df6SAnirudh Venkataramanan /* get the Rx desc from Rx ring based on 'next_to_clean' */ 10262b245cb2SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 10272b245cb2SAnirudh Venkataramanan 10282b245cb2SAnirudh Venkataramanan /* status_error_len will always be zero for unused descriptors 10292b245cb2SAnirudh Venkataramanan * because it's cleared in cleanup, and overlaps with hdr_addr 10302b245cb2SAnirudh Venkataramanan * which is always zero because packet split isn't used, if the 10312b245cb2SAnirudh Venkataramanan * hardware wrote DD then it will be non-zero 10322b245cb2SAnirudh Venkataramanan */ 10332b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 10342b245cb2SAnirudh Venkataramanan if (!ice_test_staterr(rx_desc, stat_err_bits)) 10352b245cb2SAnirudh Venkataramanan break; 10362b245cb2SAnirudh Venkataramanan 10372b245cb2SAnirudh Venkataramanan /* This memory barrier is needed to keep us from reading 10382b245cb2SAnirudh Venkataramanan * any other fields out of the rx_desc until we know the 10392b245cb2SAnirudh Venkataramanan * DD bit is set. 10402b245cb2SAnirudh Venkataramanan */ 10412b245cb2SAnirudh Venkataramanan dma_rmb(); 10422b245cb2SAnirudh Venkataramanan 10436c869cb7SMaciej Fijalkowski size = le16_to_cpu(rx_desc->wb.pkt_len) & 10446c869cb7SMaciej Fijalkowski ICE_RX_FLX_DESC_PKT_LEN_M; 10452b245cb2SAnirudh Venkataramanan 1046ac6f733aSMitch Williams /* retrieve a buffer from the ring */ 1047712edbbbSMaciej Fijalkowski rx_buf = ice_get_rx_buf(rx_ring, &skb, size); 1048ac6f733aSMitch Williams 1049efc2214bSMaciej Fijalkowski if (!size) { 1050efc2214bSMaciej Fijalkowski xdp.data = NULL; 1051efc2214bSMaciej Fijalkowski xdp.data_end = NULL; 1052aaf27254SMaciej Fijalkowski xdp.data_hard_start = NULL; 1053aaf27254SMaciej Fijalkowski xdp.data_meta = NULL; 1054efc2214bSMaciej Fijalkowski goto construct_skb; 1055efc2214bSMaciej Fijalkowski } 1056efc2214bSMaciej Fijalkowski 1057efc2214bSMaciej Fijalkowski xdp.data = page_address(rx_buf->page) + rx_buf->page_offset; 1058efc2214bSMaciej Fijalkowski xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring); 1059aaf27254SMaciej Fijalkowski xdp.data_meta = xdp.data; 1060efc2214bSMaciej Fijalkowski xdp.data_end = xdp.data + size; 1061d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE > 4096) 1062d4ecdbf7SJesper Dangaard Brouer /* At larger PAGE_SIZE, frame_sz depend on len size */ 1063d4ecdbf7SJesper Dangaard Brouer xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); 1064d4ecdbf7SJesper Dangaard Brouer #endif 1065efc2214bSMaciej Fijalkowski 1066efc2214bSMaciej Fijalkowski rcu_read_lock(); 1067efc2214bSMaciej Fijalkowski xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1068efc2214bSMaciej Fijalkowski if (!xdp_prog) { 1069efc2214bSMaciej Fijalkowski rcu_read_unlock(); 1070efc2214bSMaciej Fijalkowski goto construct_skb; 1071efc2214bSMaciej Fijalkowski } 1072efc2214bSMaciej Fijalkowski 1073efc2214bSMaciej Fijalkowski xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog); 1074efc2214bSMaciej Fijalkowski rcu_read_unlock(); 107559bb0808SMaciej Fijalkowski if (!xdp_res) 107659bb0808SMaciej Fijalkowski goto construct_skb; 1077efc2214bSMaciej Fijalkowski if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1078efc2214bSMaciej Fijalkowski xdp_xmit |= xdp_res; 1079d4ecdbf7SJesper Dangaard Brouer ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); 1080efc2214bSMaciej Fijalkowski } else { 1081efc2214bSMaciej Fijalkowski rx_buf->pagecnt_bias++; 1082efc2214bSMaciej Fijalkowski } 1083efc2214bSMaciej Fijalkowski total_rx_bytes += size; 1084efc2214bSMaciej Fijalkowski total_rx_pkts++; 1085efc2214bSMaciej Fijalkowski 1086efc2214bSMaciej Fijalkowski cleaned_count++; 1087efc2214bSMaciej Fijalkowski ice_put_rx_buf(rx_ring, rx_buf); 1088efc2214bSMaciej Fijalkowski continue; 1089efc2214bSMaciej Fijalkowski construct_skb: 10901f45ebe0SMitch Williams if (skb) { 10917237f5b0SMaciej Fijalkowski ice_add_rx_frag(rx_ring, rx_buf, skb, size); 10921f45ebe0SMitch Williams } else if (likely(xdp.data)) { 10931f45ebe0SMitch Williams if (ice_ring_uses_build_skb(rx_ring)) 1094aaf27254SMaciej Fijalkowski skb = ice_build_skb(rx_ring, rx_buf, &xdp); 1095712edbbbSMaciej Fijalkowski else 1096efc2214bSMaciej Fijalkowski skb = ice_construct_skb(rx_ring, rx_buf, &xdp); 10971f45ebe0SMitch Williams } 1098712edbbbSMaciej Fijalkowski /* exit if we failed to retrieve a buffer */ 1099712edbbbSMaciej Fijalkowski if (!skb) { 1100712edbbbSMaciej Fijalkowski rx_ring->rx_stats.alloc_buf_failed++; 1101ac6f733aSMitch Williams if (rx_buf) 1102712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 11032b245cb2SAnirudh Venkataramanan break; 1104712edbbbSMaciej Fijalkowski } 11052b245cb2SAnirudh Venkataramanan 11061d032bc7SMaciej Fijalkowski ice_put_rx_buf(rx_ring, rx_buf); 11072b245cb2SAnirudh Venkataramanan cleaned_count++; 11082b245cb2SAnirudh Venkataramanan 11092b245cb2SAnirudh Venkataramanan /* skip if it is NOP desc */ 11102b245cb2SAnirudh Venkataramanan if (ice_is_non_eop(rx_ring, rx_desc, skb)) 11112b245cb2SAnirudh Venkataramanan continue; 11122b245cb2SAnirudh Venkataramanan 11132b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 11142b245cb2SAnirudh Venkataramanan if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 11152b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 11162b245cb2SAnirudh Venkataramanan continue; 11172b245cb2SAnirudh Venkataramanan } 11182b245cb2SAnirudh Venkataramanan 11192b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 11202b245cb2SAnirudh Venkataramanan if (ice_test_staterr(rx_desc, stat_err_bits)) 11212b245cb2SAnirudh Venkataramanan vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 11222b245cb2SAnirudh Venkataramanan 1123133f4883SKrzysztof Kazimierczak /* pad the skb if needed, to make a valid ethernet frame */ 1124133f4883SKrzysztof Kazimierczak if (eth_skb_pad(skb)) { 11252b245cb2SAnirudh Venkataramanan skb = NULL; 11262b245cb2SAnirudh Venkataramanan continue; 11272b245cb2SAnirudh Venkataramanan } 11282b245cb2SAnirudh Venkataramanan 11292b245cb2SAnirudh Venkataramanan /* probably a little skewed due to removing CRC */ 11302b245cb2SAnirudh Venkataramanan total_rx_bytes += skb->len; 11312b245cb2SAnirudh Venkataramanan 1132d76a60baSAnirudh Venkataramanan /* populate checksum, VLAN, and protocol */ 11336503b659SJesse Brandeburg rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 11346503b659SJesse Brandeburg ICE_RX_FLEX_DESC_PTYPE_M; 11356503b659SJesse Brandeburg 1136d76a60baSAnirudh Venkataramanan ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1137d76a60baSAnirudh Venkataramanan 11382b245cb2SAnirudh Venkataramanan /* send completed skb up the stack */ 11392b245cb2SAnirudh Venkataramanan ice_receive_skb(rx_ring, skb, vlan_tag); 11402b245cb2SAnirudh Venkataramanan 11412b245cb2SAnirudh Venkataramanan /* update budget accounting */ 11422b245cb2SAnirudh Venkataramanan total_rx_pkts++; 11432b245cb2SAnirudh Venkataramanan } 11442b245cb2SAnirudh Venkataramanan 1145cb7db356SBrett Creeley /* return up to cleaned_count buffers to hardware */ 1146cb7db356SBrett Creeley failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1147cb7db356SBrett Creeley 1148efc2214bSMaciej Fijalkowski if (xdp_prog) 1149efc2214bSMaciej Fijalkowski ice_finalize_xdp_rx(rx_ring, xdp_xmit); 1150efc2214bSMaciej Fijalkowski 11512d4238f5SKrzysztof Kazimierczak ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); 11522b245cb2SAnirudh Venkataramanan 11532b245cb2SAnirudh Venkataramanan /* guarantee a trip back through this routine if there was a failure */ 11542b245cb2SAnirudh Venkataramanan return failure ? budget : (int)total_rx_pkts; 11552b245cb2SAnirudh Venkataramanan } 11562b245cb2SAnirudh Venkataramanan 11572b245cb2SAnirudh Venkataramanan /** 1158711987bbSBrett Creeley * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic 1159711987bbSBrett Creeley * @port_info: port_info structure containing the current link speed 1160711987bbSBrett Creeley * @avg_pkt_size: average size of Tx or Rx packets based on clean routine 11612f2da36eSAnirudh Venkataramanan * @itr: ITR value to update 1162711987bbSBrett Creeley * 1163711987bbSBrett Creeley * Calculate how big of an increment should be applied to the ITR value passed 1164711987bbSBrett Creeley * in based on wmem_default, SKB overhead, Ethernet overhead, and the current 1165711987bbSBrett Creeley * link speed. 1166711987bbSBrett Creeley * 1167711987bbSBrett Creeley * The following is a calculation derived from: 1168711987bbSBrett Creeley * wmem_default / (size + overhead) = desired_pkts_per_int 1169711987bbSBrett Creeley * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate 1170711987bbSBrett Creeley * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1171711987bbSBrett Creeley * 1172711987bbSBrett Creeley * Assuming wmem_default is 212992 and overhead is 640 bytes per 1173711987bbSBrett Creeley * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1174711987bbSBrett Creeley * formula down to: 1175711987bbSBrett Creeley * 1176711987bbSBrett Creeley * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 1177711987bbSBrett Creeley * ITR = -------------------------------------------- * -------------- 1178711987bbSBrett Creeley * rate pkt_size + 640 1179711987bbSBrett Creeley */ 1180711987bbSBrett Creeley static unsigned int 1181711987bbSBrett Creeley ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, 1182711987bbSBrett Creeley unsigned int avg_pkt_size, 1183711987bbSBrett Creeley unsigned int itr) 118464a59d05SAnirudh Venkataramanan { 1185711987bbSBrett Creeley switch (port_info->phy.link_info.link_speed) { 1186711987bbSBrett Creeley case ICE_AQ_LINK_SPEED_100GB: 1187711987bbSBrett Creeley itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), 1188711987bbSBrett Creeley avg_pkt_size + 640); 1189711987bbSBrett Creeley break; 1190711987bbSBrett Creeley case ICE_AQ_LINK_SPEED_50GB: 1191711987bbSBrett Creeley itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), 1192711987bbSBrett Creeley avg_pkt_size + 640); 1193711987bbSBrett Creeley break; 119464a59d05SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_40GB: 1195711987bbSBrett Creeley itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), 1196711987bbSBrett Creeley avg_pkt_size + 640); 1197711987bbSBrett Creeley break; 119864a59d05SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_25GB: 1199711987bbSBrett Creeley itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), 1200711987bbSBrett Creeley avg_pkt_size + 640); 1201711987bbSBrett Creeley break; 120264a59d05SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_20GB: 1203711987bbSBrett Creeley itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), 1204711987bbSBrett Creeley avg_pkt_size + 640); 1205711987bbSBrett Creeley break; 1206711987bbSBrett Creeley case ICE_AQ_LINK_SPEED_10GB: 120764a59d05SAnirudh Venkataramanan default: 1208711987bbSBrett Creeley itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), 1209711987bbSBrett Creeley avg_pkt_size + 640); 1210711987bbSBrett Creeley break; 121164a59d05SAnirudh Venkataramanan } 1212711987bbSBrett Creeley 1213711987bbSBrett Creeley if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1214711987bbSBrett Creeley itr &= ICE_ITR_ADAPTIVE_LATENCY; 1215711987bbSBrett Creeley itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1216711987bbSBrett Creeley } 1217711987bbSBrett Creeley 1218711987bbSBrett Creeley return itr; 121964a59d05SAnirudh Venkataramanan } 122064a59d05SAnirudh Venkataramanan 122164a59d05SAnirudh Venkataramanan /** 122264a59d05SAnirudh Venkataramanan * ice_update_itr - update the adaptive ITR value based on statistics 122364a59d05SAnirudh Venkataramanan * @q_vector: structure containing interrupt and ring information 122464a59d05SAnirudh Venkataramanan * @rc: structure containing ring performance data 122564a59d05SAnirudh Venkataramanan * 122664a59d05SAnirudh Venkataramanan * Stores a new ITR value based on packets and byte 122764a59d05SAnirudh Venkataramanan * counts during the last interrupt. The advantage of per interrupt 122864a59d05SAnirudh Venkataramanan * computation is faster updates and more accurate ITR for the current 122964a59d05SAnirudh Venkataramanan * traffic pattern. Constants in this function were computed 123064a59d05SAnirudh Venkataramanan * based on theoretical maximum wire speed and thresholds were set based 123164a59d05SAnirudh Venkataramanan * on testing data as well as attempting to minimize response time 123264a59d05SAnirudh Venkataramanan * while increasing bulk throughput. 123364a59d05SAnirudh Venkataramanan */ 123464a59d05SAnirudh Venkataramanan static void 123564a59d05SAnirudh Venkataramanan ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) 123664a59d05SAnirudh Venkataramanan { 123764a59d05SAnirudh Venkataramanan unsigned long next_update = jiffies; 1238711987bbSBrett Creeley unsigned int packets, bytes, itr; 123964a59d05SAnirudh Venkataramanan bool container_is_rx; 124064a59d05SAnirudh Venkataramanan 124164a59d05SAnirudh Venkataramanan if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) 124264a59d05SAnirudh Venkataramanan return; 124364a59d05SAnirudh Venkataramanan 124464a59d05SAnirudh Venkataramanan /* If itr_countdown is set it means we programmed an ITR within 124564a59d05SAnirudh Venkataramanan * the last 4 interrupt cycles. This has a side effect of us 124664a59d05SAnirudh Venkataramanan * potentially firing an early interrupt. In order to work around 124764a59d05SAnirudh Venkataramanan * this we need to throw out any data received for a few 124864a59d05SAnirudh Venkataramanan * interrupts following the update. 124964a59d05SAnirudh Venkataramanan */ 125064a59d05SAnirudh Venkataramanan if (q_vector->itr_countdown) { 125164a59d05SAnirudh Venkataramanan itr = rc->target_itr; 125264a59d05SAnirudh Venkataramanan goto clear_counts; 125364a59d05SAnirudh Venkataramanan } 125464a59d05SAnirudh Venkataramanan 125564a59d05SAnirudh Venkataramanan container_is_rx = (&q_vector->rx == rc); 125664a59d05SAnirudh Venkataramanan /* For Rx we want to push the delay up and default to low latency. 125764a59d05SAnirudh Venkataramanan * for Tx we want to pull the delay down and default to high latency. 125864a59d05SAnirudh Venkataramanan */ 125964a59d05SAnirudh Venkataramanan itr = container_is_rx ? 126064a59d05SAnirudh Venkataramanan ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : 126164a59d05SAnirudh Venkataramanan ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; 126264a59d05SAnirudh Venkataramanan 126364a59d05SAnirudh Venkataramanan /* If we didn't update within up to 1 - 2 jiffies we can assume 126464a59d05SAnirudh Venkataramanan * that either packets are coming in so slow there hasn't been 126564a59d05SAnirudh Venkataramanan * any work, or that there is so much work that NAPI is dealing 126664a59d05SAnirudh Venkataramanan * with interrupt moderation and we don't need to do anything. 126764a59d05SAnirudh Venkataramanan */ 126864a59d05SAnirudh Venkataramanan if (time_after(next_update, rc->next_update)) 126964a59d05SAnirudh Venkataramanan goto clear_counts; 127064a59d05SAnirudh Venkataramanan 1271d27525ecSJesse Brandeburg prefetch(q_vector->vsi->port_info); 1272d27525ecSJesse Brandeburg 127364a59d05SAnirudh Venkataramanan packets = rc->total_pkts; 127464a59d05SAnirudh Venkataramanan bytes = rc->total_bytes; 127564a59d05SAnirudh Venkataramanan 127664a59d05SAnirudh Venkataramanan if (container_is_rx) { 127764a59d05SAnirudh Venkataramanan /* If Rx there are 1 to 4 packets and bytes are less than 127864a59d05SAnirudh Venkataramanan * 9000 assume insufficient data to use bulk rate limiting 127964a59d05SAnirudh Venkataramanan * approach unless Tx is already in bulk rate limiting. We 128064a59d05SAnirudh Venkataramanan * are likely latency driven. 128164a59d05SAnirudh Venkataramanan */ 128264a59d05SAnirudh Venkataramanan if (packets && packets < 4 && bytes < 9000 && 128364a59d05SAnirudh Venkataramanan (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { 128464a59d05SAnirudh Venkataramanan itr = ICE_ITR_ADAPTIVE_LATENCY; 1285711987bbSBrett Creeley goto adjust_by_size_and_speed; 128664a59d05SAnirudh Venkataramanan } 128764a59d05SAnirudh Venkataramanan } else if (packets < 4) { 128864a59d05SAnirudh Venkataramanan /* If we have Tx and Rx ITR maxed and Tx ITR is running in 128964a59d05SAnirudh Venkataramanan * bulk mode and we are receiving 4 or fewer packets just 129064a59d05SAnirudh Venkataramanan * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 129164a59d05SAnirudh Venkataramanan * that the Rx can relax. 129264a59d05SAnirudh Venkataramanan */ 129364a59d05SAnirudh Venkataramanan if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && 129464a59d05SAnirudh Venkataramanan (q_vector->rx.target_itr & ICE_ITR_MASK) == 129564a59d05SAnirudh Venkataramanan ICE_ITR_ADAPTIVE_MAX_USECS) 129664a59d05SAnirudh Venkataramanan goto clear_counts; 129764a59d05SAnirudh Venkataramanan } else if (packets > 32) { 129864a59d05SAnirudh Venkataramanan /* If we have processed over 32 packets in a single interrupt 129964a59d05SAnirudh Venkataramanan * for Tx assume we need to switch over to "bulk" mode. 130064a59d05SAnirudh Venkataramanan */ 130164a59d05SAnirudh Venkataramanan rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; 130264a59d05SAnirudh Venkataramanan } 130364a59d05SAnirudh Venkataramanan 130464a59d05SAnirudh Venkataramanan /* We have no packets to actually measure against. This means 130564a59d05SAnirudh Venkataramanan * either one of the other queues on this vector is active or 130664a59d05SAnirudh Venkataramanan * we are a Tx queue doing TSO with too high of an interrupt rate. 130764a59d05SAnirudh Venkataramanan * 130864a59d05SAnirudh Venkataramanan * Between 4 and 56 we can assume that our current interrupt delay 130964a59d05SAnirudh Venkataramanan * is only slightly too low. As such we should increase it by a small 131064a59d05SAnirudh Venkataramanan * fixed amount. 131164a59d05SAnirudh Venkataramanan */ 131264a59d05SAnirudh Venkataramanan if (packets < 56) { 131364a59d05SAnirudh Venkataramanan itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; 131464a59d05SAnirudh Venkataramanan if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 131564a59d05SAnirudh Venkataramanan itr &= ICE_ITR_ADAPTIVE_LATENCY; 131664a59d05SAnirudh Venkataramanan itr += ICE_ITR_ADAPTIVE_MAX_USECS; 131764a59d05SAnirudh Venkataramanan } 131864a59d05SAnirudh Venkataramanan goto clear_counts; 131964a59d05SAnirudh Venkataramanan } 132064a59d05SAnirudh Venkataramanan 132164a59d05SAnirudh Venkataramanan if (packets <= 256) { 132264a59d05SAnirudh Venkataramanan itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 132364a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 132464a59d05SAnirudh Venkataramanan 132564a59d05SAnirudh Venkataramanan /* Between 56 and 112 is our "goldilocks" zone where we are 132664a59d05SAnirudh Venkataramanan * working out "just right". Just report that our current 132764a59d05SAnirudh Venkataramanan * ITR is good for us. 132864a59d05SAnirudh Venkataramanan */ 132964a59d05SAnirudh Venkataramanan if (packets <= 112) 133064a59d05SAnirudh Venkataramanan goto clear_counts; 133164a59d05SAnirudh Venkataramanan 133264a59d05SAnirudh Venkataramanan /* If packet count is 128 or greater we are likely looking 133364a59d05SAnirudh Venkataramanan * at a slight overrun of the delay we want. Try halving 133464a59d05SAnirudh Venkataramanan * our delay to see if that will cut the number of packets 133564a59d05SAnirudh Venkataramanan * in half per interrupt. 133664a59d05SAnirudh Venkataramanan */ 133764a59d05SAnirudh Venkataramanan itr >>= 1; 133864a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 133964a59d05SAnirudh Venkataramanan if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) 134064a59d05SAnirudh Venkataramanan itr = ICE_ITR_ADAPTIVE_MIN_USECS; 134164a59d05SAnirudh Venkataramanan 134264a59d05SAnirudh Venkataramanan goto clear_counts; 134364a59d05SAnirudh Venkataramanan } 134464a59d05SAnirudh Venkataramanan 134564a59d05SAnirudh Venkataramanan /* The paths below assume we are dealing with a bulk ITR since 134664a59d05SAnirudh Venkataramanan * number of packets is greater than 256. We are just going to have 134764a59d05SAnirudh Venkataramanan * to compute a value and try to bring the count under control, 134864a59d05SAnirudh Venkataramanan * though for smaller packet sizes there isn't much we can do as 134964a59d05SAnirudh Venkataramanan * NAPI polling will likely be kicking in sooner rather than later. 135064a59d05SAnirudh Venkataramanan */ 135164a59d05SAnirudh Venkataramanan itr = ICE_ITR_ADAPTIVE_BULK; 135264a59d05SAnirudh Venkataramanan 1353711987bbSBrett Creeley adjust_by_size_and_speed: 135464a59d05SAnirudh Venkataramanan 1355711987bbSBrett Creeley /* based on checks above packets cannot be 0 so division is safe */ 1356711987bbSBrett Creeley itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, 1357711987bbSBrett Creeley bytes / packets, itr); 135864a59d05SAnirudh Venkataramanan 135964a59d05SAnirudh Venkataramanan clear_counts: 136064a59d05SAnirudh Venkataramanan /* write back value */ 136164a59d05SAnirudh Venkataramanan rc->target_itr = itr; 136264a59d05SAnirudh Venkataramanan 136364a59d05SAnirudh Venkataramanan /* next update should occur within next jiffy */ 136464a59d05SAnirudh Venkataramanan rc->next_update = next_update + 1; 136564a59d05SAnirudh Venkataramanan 136664a59d05SAnirudh Venkataramanan rc->total_bytes = 0; 136764a59d05SAnirudh Venkataramanan rc->total_pkts = 0; 136864a59d05SAnirudh Venkataramanan } 136964a59d05SAnirudh Venkataramanan 13702b245cb2SAnirudh Venkataramanan /** 137163f545edSBrett Creeley * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 137263f545edSBrett Creeley * @itr_idx: interrupt throttling index 137364a59d05SAnirudh Venkataramanan * @itr: interrupt throttling value in usecs 137463f545edSBrett Creeley */ 13758244dd2dSBrett Creeley static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 137663f545edSBrett Creeley { 13772f2da36eSAnirudh Venkataramanan /* The ITR value is reported in microseconds, and the register value is 137864a59d05SAnirudh Venkataramanan * recorded in 2 microsecond units. For this reason we only need to 137964a59d05SAnirudh Venkataramanan * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 138064a59d05SAnirudh Venkataramanan * granularity as a shift instead of division. The mask makes sure the 138164a59d05SAnirudh Venkataramanan * ITR value is never odd so we don't accidentally write into the field 138264a59d05SAnirudh Venkataramanan * prior to the ITR field. 138364a59d05SAnirudh Venkataramanan */ 138464a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 138564a59d05SAnirudh Venkataramanan 138663f545edSBrett Creeley return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 138763f545edSBrett Creeley (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 138864a59d05SAnirudh Venkataramanan (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 138963f545edSBrett Creeley } 139063f545edSBrett Creeley 139164a59d05SAnirudh Venkataramanan /* The act of updating the ITR will cause it to immediately trigger. In order 139264a59d05SAnirudh Venkataramanan * to prevent this from throwing off adaptive update statistics we defer the 139364a59d05SAnirudh Venkataramanan * update so that it can only happen so often. So after either Tx or Rx are 139464a59d05SAnirudh Venkataramanan * updated we make the adaptive scheme wait until either the ITR completely 139564a59d05SAnirudh Venkataramanan * expires via the next_update expiration or we have been through at least 139664a59d05SAnirudh Venkataramanan * 3 interrupts. 139764a59d05SAnirudh Venkataramanan */ 139864a59d05SAnirudh Venkataramanan #define ITR_COUNTDOWN_START 3 139964a59d05SAnirudh Venkataramanan 140063f545edSBrett Creeley /** 140163f545edSBrett Creeley * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt 140263f545edSBrett Creeley * @q_vector: q_vector for which ITR is being updated and interrupt enabled 140363f545edSBrett Creeley */ 14042fb0821fSJesse Brandeburg static void ice_update_ena_itr(struct ice_q_vector *q_vector) 140563f545edSBrett Creeley { 140664a59d05SAnirudh Venkataramanan struct ice_ring_container *tx = &q_vector->tx; 140764a59d05SAnirudh Venkataramanan struct ice_ring_container *rx = &q_vector->rx; 14082fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 140963f545edSBrett Creeley u32 itr_val; 141063f545edSBrett Creeley 14112ab28bb0SBrett Creeley /* when exiting WB_ON_ITR lets set a low ITR value and trigger 14122ab28bb0SBrett Creeley * interrupts to expire right away in case we have more work ready to go 14132ab28bb0SBrett Creeley * already 14142ab28bb0SBrett Creeley */ 14152ab28bb0SBrett Creeley if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) { 14162ab28bb0SBrett Creeley itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS); 14172ab28bb0SBrett Creeley wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 14182ab28bb0SBrett Creeley /* set target back to last user set value */ 14192ab28bb0SBrett Creeley rx->target_itr = rx->itr_setting; 14202ab28bb0SBrett Creeley /* set current to what we just wrote and dynamic if needed */ 14212ab28bb0SBrett Creeley rx->current_itr = ICE_WB_ON_ITR_USECS | 14222ab28bb0SBrett Creeley (rx->itr_setting & ICE_ITR_DYNAMIC); 14232ab28bb0SBrett Creeley /* allow normal interrupt flow to start */ 14242ab28bb0SBrett Creeley q_vector->itr_countdown = 0; 14252ab28bb0SBrett Creeley return; 14262ab28bb0SBrett Creeley } 14272ab28bb0SBrett Creeley 142864a59d05SAnirudh Venkataramanan /* This will do nothing if dynamic updates are not enabled */ 142964a59d05SAnirudh Venkataramanan ice_update_itr(q_vector, tx); 143064a59d05SAnirudh Venkataramanan ice_update_itr(q_vector, rx); 143164a59d05SAnirudh Venkataramanan 143263f545edSBrett Creeley /* This block of logic allows us to get away with only updating 143363f545edSBrett Creeley * one ITR value with each interrupt. The idea is to perform a 143463f545edSBrett Creeley * pseudo-lazy update with the following criteria. 143563f545edSBrett Creeley * 143663f545edSBrett Creeley * 1. Rx is given higher priority than Tx if both are in same state 143763f545edSBrett Creeley * 2. If we must reduce an ITR that is given highest priority. 143863f545edSBrett Creeley * 3. We then give priority to increasing ITR based on amount. 143963f545edSBrett Creeley */ 144064a59d05SAnirudh Venkataramanan if (rx->target_itr < rx->current_itr) { 144163f545edSBrett Creeley /* Rx ITR needs to be reduced, this is highest priority */ 144264a59d05SAnirudh Venkataramanan itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 144364a59d05SAnirudh Venkataramanan rx->current_itr = rx->target_itr; 144464a59d05SAnirudh Venkataramanan q_vector->itr_countdown = ITR_COUNTDOWN_START; 144564a59d05SAnirudh Venkataramanan } else if ((tx->target_itr < tx->current_itr) || 144664a59d05SAnirudh Venkataramanan ((rx->target_itr - rx->current_itr) < 144764a59d05SAnirudh Venkataramanan (tx->target_itr - tx->current_itr))) { 144863f545edSBrett Creeley /* Tx ITR needs to be reduced, this is second priority 144963f545edSBrett Creeley * Tx ITR needs to be increased more than Rx, fourth priority 145063f545edSBrett Creeley */ 145164a59d05SAnirudh Venkataramanan itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); 145264a59d05SAnirudh Venkataramanan tx->current_itr = tx->target_itr; 145364a59d05SAnirudh Venkataramanan q_vector->itr_countdown = ITR_COUNTDOWN_START; 145464a59d05SAnirudh Venkataramanan } else if (rx->current_itr != rx->target_itr) { 145563f545edSBrett Creeley /* Rx ITR needs to be increased, third priority */ 145664a59d05SAnirudh Venkataramanan itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 145764a59d05SAnirudh Venkataramanan rx->current_itr = rx->target_itr; 145864a59d05SAnirudh Venkataramanan q_vector->itr_countdown = ITR_COUNTDOWN_START; 145963f545edSBrett Creeley } else { 146063f545edSBrett Creeley /* Still have to re-enable the interrupts */ 146163f545edSBrett Creeley itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 146264a59d05SAnirudh Venkataramanan if (q_vector->itr_countdown) 146364a59d05SAnirudh Venkataramanan q_vector->itr_countdown--; 146463f545edSBrett Creeley } 146563f545edSBrett Creeley 14662fb0821fSJesse Brandeburg if (!test_bit(__ICE_DOWN, q_vector->vsi->state)) 14672fb0821fSJesse Brandeburg wr32(&q_vector->vsi->back->hw, 1468b07833a0SBrett Creeley GLINT_DYN_CTL(q_vector->reg_idx), 146964a59d05SAnirudh Venkataramanan itr_val); 147063f545edSBrett Creeley } 147163f545edSBrett Creeley 147263f545edSBrett Creeley /** 14732ab28bb0SBrett Creeley * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 14742ab28bb0SBrett Creeley * @q_vector: q_vector to set WB_ON_ITR on 14752ab28bb0SBrett Creeley * 14762ab28bb0SBrett Creeley * We need to tell hardware to write-back completed descriptors even when 14772ab28bb0SBrett Creeley * interrupts are disabled. Descriptors will be written back on cache line 14782ab28bb0SBrett Creeley * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 14792ab28bb0SBrett Creeley * descriptors may not be written back if they don't fill a cache line until the 14802ab28bb0SBrett Creeley * next interrupt. 14812ab28bb0SBrett Creeley * 14822ab28bb0SBrett Creeley * This sets the write-back frequency to 2 microseconds as that is the minimum 14832ab28bb0SBrett Creeley * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to 14842ab28bb0SBrett Creeley * make sure hardware knows we aren't meddling with the INTENA_M bit. 14852ab28bb0SBrett Creeley */ 14862fb0821fSJesse Brandeburg static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 14872ab28bb0SBrett Creeley { 14882fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 14892fb0821fSJesse Brandeburg 14902ab28bb0SBrett Creeley /* already in WB_ON_ITR mode no need to change it */ 14912ab28bb0SBrett Creeley if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) 14922ab28bb0SBrett Creeley return; 14932ab28bb0SBrett Creeley 14942ab28bb0SBrett Creeley if (q_vector->num_ring_rx) 14952ab28bb0SBrett Creeley wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 14962ab28bb0SBrett Creeley ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, 14972ab28bb0SBrett Creeley ICE_RX_ITR)); 14982ab28bb0SBrett Creeley 14992ab28bb0SBrett Creeley if (q_vector->num_ring_tx) 15002ab28bb0SBrett Creeley wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 15012ab28bb0SBrett Creeley ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, 15022ab28bb0SBrett Creeley ICE_TX_ITR)); 15032ab28bb0SBrett Creeley 15042ab28bb0SBrett Creeley q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE; 15052ab28bb0SBrett Creeley } 15062ab28bb0SBrett Creeley 15072ab28bb0SBrett Creeley /** 15082b245cb2SAnirudh Venkataramanan * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 15092b245cb2SAnirudh Venkataramanan * @napi: napi struct with our devices info in it 15102b245cb2SAnirudh Venkataramanan * @budget: amount of work driver is allowed to do this pass, in packets 15112b245cb2SAnirudh Venkataramanan * 15122b245cb2SAnirudh Venkataramanan * This function will clean all queues associated with a q_vector. 15132b245cb2SAnirudh Venkataramanan * 15142b245cb2SAnirudh Venkataramanan * Returns the amount of work done 15152b245cb2SAnirudh Venkataramanan */ 15162b245cb2SAnirudh Venkataramanan int ice_napi_poll(struct napi_struct *napi, int budget) 15172b245cb2SAnirudh Venkataramanan { 15182b245cb2SAnirudh Venkataramanan struct ice_q_vector *q_vector = 15192b245cb2SAnirudh Venkataramanan container_of(napi, struct ice_q_vector, napi); 15202b245cb2SAnirudh Venkataramanan bool clean_complete = true; 15212b245cb2SAnirudh Venkataramanan struct ice_ring *ring; 15229118fcd5SBrett Creeley int budget_per_ring; 15232b245cb2SAnirudh Venkataramanan int work_done = 0; 15242b245cb2SAnirudh Venkataramanan 15252b245cb2SAnirudh Venkataramanan /* Since the actual Tx work is minimal, we can give the Tx a larger 15262b245cb2SAnirudh Venkataramanan * budget and be more aggressive about cleaning up the Tx descriptors. 15272b245cb2SAnirudh Venkataramanan */ 15282d4238f5SKrzysztof Kazimierczak ice_for_each_ring(ring, q_vector->tx) { 15292d4238f5SKrzysztof Kazimierczak bool wd = ring->xsk_umem ? 15302d4238f5SKrzysztof Kazimierczak ice_clean_tx_irq_zc(ring, budget) : 15312d4238f5SKrzysztof Kazimierczak ice_clean_tx_irq(ring, budget); 15322d4238f5SKrzysztof Kazimierczak 15332d4238f5SKrzysztof Kazimierczak if (!wd) 15342b245cb2SAnirudh Venkataramanan clean_complete = false; 15352d4238f5SKrzysztof Kazimierczak } 15362b245cb2SAnirudh Venkataramanan 15372b245cb2SAnirudh Venkataramanan /* Handle case where we are called by netpoll with a budget of 0 */ 1538d27525ecSJesse Brandeburg if (unlikely(budget <= 0)) 15392b245cb2SAnirudh Venkataramanan return budget; 15402b245cb2SAnirudh Venkataramanan 15419118fcd5SBrett Creeley /* normally we have 1 Rx ring per q_vector */ 15429118fcd5SBrett Creeley if (unlikely(q_vector->num_ring_rx > 1)) 15439118fcd5SBrett Creeley /* We attempt to distribute budget to each Rx queue fairly, but 15449118fcd5SBrett Creeley * don't allow the budget to go below 1 because that would exit 15459118fcd5SBrett Creeley * polling early. 15462b245cb2SAnirudh Venkataramanan */ 154788865fc4SKarol Kolacinski budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 15489118fcd5SBrett Creeley else 15499118fcd5SBrett Creeley /* Max of 1 Rx ring in this q_vector so give it the budget */ 15509118fcd5SBrett Creeley budget_per_ring = budget; 15512b245cb2SAnirudh Venkataramanan 15522b245cb2SAnirudh Venkataramanan ice_for_each_ring(ring, q_vector->rx) { 15532b245cb2SAnirudh Venkataramanan int cleaned; 15542b245cb2SAnirudh Venkataramanan 15552d4238f5SKrzysztof Kazimierczak /* A dedicated path for zero-copy allows making a single 15562d4238f5SKrzysztof Kazimierczak * comparison in the irq context instead of many inside the 15572d4238f5SKrzysztof Kazimierczak * ice_clean_rx_irq function and makes the codebase cleaner. 15582d4238f5SKrzysztof Kazimierczak */ 15592d4238f5SKrzysztof Kazimierczak cleaned = ring->xsk_umem ? 15602d4238f5SKrzysztof Kazimierczak ice_clean_rx_irq_zc(ring, budget_per_ring) : 15612d4238f5SKrzysztof Kazimierczak ice_clean_rx_irq(ring, budget_per_ring); 15622b245cb2SAnirudh Venkataramanan work_done += cleaned; 15632b245cb2SAnirudh Venkataramanan /* if we clean as many as budgeted, we must not be done */ 15642b245cb2SAnirudh Venkataramanan if (cleaned >= budget_per_ring) 15652b245cb2SAnirudh Venkataramanan clean_complete = false; 15662b245cb2SAnirudh Venkataramanan } 15672b245cb2SAnirudh Venkataramanan 15682b245cb2SAnirudh Venkataramanan /* If work not completed, return budget and polling will return */ 15692b245cb2SAnirudh Venkataramanan if (!clean_complete) 15702b245cb2SAnirudh Venkataramanan return budget; 15712b245cb2SAnirudh Venkataramanan 15720bcd952fSJesse Brandeburg /* Exit the polling mode, but don't re-enable interrupts if stack might 15730bcd952fSJesse Brandeburg * poll us due to busy-polling 15740bcd952fSJesse Brandeburg */ 15750bcd952fSJesse Brandeburg if (likely(napi_complete_done(napi, work_done))) 15762fb0821fSJesse Brandeburg ice_update_ena_itr(q_vector); 15772ab28bb0SBrett Creeley else 15782fb0821fSJesse Brandeburg ice_set_wb_on_itr(q_vector); 1579e0c9fd9bSDave Ertman 158032a64994SBruce Allan return min_t(int, work_done, budget - 1); 15812b245cb2SAnirudh Venkataramanan } 15822b245cb2SAnirudh Venkataramanan 15832b245cb2SAnirudh Venkataramanan /** 1584d337f2afSAnirudh Venkataramanan * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 15852b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 15862b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 15872b245cb2SAnirudh Venkataramanan * 15882b245cb2SAnirudh Venkataramanan * Returns -EBUSY if a stop is needed, else 0 15892b245cb2SAnirudh Venkataramanan */ 15902b245cb2SAnirudh Venkataramanan static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 15912b245cb2SAnirudh Venkataramanan { 15922b245cb2SAnirudh Venkataramanan netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 15932b245cb2SAnirudh Venkataramanan /* Memory barrier before checking head and tail */ 15942b245cb2SAnirudh Venkataramanan smp_mb(); 15952b245cb2SAnirudh Venkataramanan 15962b245cb2SAnirudh Venkataramanan /* Check again in a case another CPU has just made room available. */ 15972b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 15982b245cb2SAnirudh Venkataramanan return -EBUSY; 15992b245cb2SAnirudh Venkataramanan 16002b245cb2SAnirudh Venkataramanan /* A reprieve! - use start_subqueue because it doesn't call schedule */ 16012b245cb2SAnirudh Venkataramanan netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 16022b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 16032b245cb2SAnirudh Venkataramanan return 0; 16042b245cb2SAnirudh Venkataramanan } 16052b245cb2SAnirudh Venkataramanan 16062b245cb2SAnirudh Venkataramanan /** 1607d337f2afSAnirudh Venkataramanan * ice_maybe_stop_tx - 1st level check for Tx stop conditions 16082b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 16092b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 16102b245cb2SAnirudh Venkataramanan * 16112b245cb2SAnirudh Venkataramanan * Returns 0 if stop is not needed 16122b245cb2SAnirudh Venkataramanan */ 16132b245cb2SAnirudh Venkataramanan static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 16142b245cb2SAnirudh Venkataramanan { 16152b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 16162b245cb2SAnirudh Venkataramanan return 0; 1617d337f2afSAnirudh Venkataramanan 16182b245cb2SAnirudh Venkataramanan return __ice_maybe_stop_tx(tx_ring, size); 16192b245cb2SAnirudh Venkataramanan } 16202b245cb2SAnirudh Venkataramanan 16212b245cb2SAnirudh Venkataramanan /** 16222b245cb2SAnirudh Venkataramanan * ice_tx_map - Build the Tx descriptor 16232b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 16242b245cb2SAnirudh Venkataramanan * @first: first buffer info buffer to use 1625d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 16262b245cb2SAnirudh Venkataramanan * 16272b245cb2SAnirudh Venkataramanan * This function loops over the skb data pointed to by *first 16282b245cb2SAnirudh Venkataramanan * and gets a physical address for each memory location and programs 16292b245cb2SAnirudh Venkataramanan * it and the length into the transmit descriptor. 16302b245cb2SAnirudh Venkataramanan */ 1631d76a60baSAnirudh Venkataramanan static void 1632d76a60baSAnirudh Venkataramanan ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, 1633d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params *off) 16342b245cb2SAnirudh Venkataramanan { 1635d76a60baSAnirudh Venkataramanan u64 td_offset, td_tag, td_cmd; 16362b245cb2SAnirudh Venkataramanan u16 i = tx_ring->next_to_use; 16372b245cb2SAnirudh Venkataramanan unsigned int data_len, size; 16382b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 16392b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 16402b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 16414ee656bbSTony Nguyen skb_frag_t *frag; 16422b245cb2SAnirudh Venkataramanan dma_addr_t dma; 16432b245cb2SAnirudh Venkataramanan 1644d76a60baSAnirudh Venkataramanan td_tag = off->td_l2tag1; 1645d76a60baSAnirudh Venkataramanan td_cmd = off->td_cmd; 1646d76a60baSAnirudh Venkataramanan td_offset = off->td_offset; 16472b245cb2SAnirudh Venkataramanan skb = first->skb; 16482b245cb2SAnirudh Venkataramanan 16492b245cb2SAnirudh Venkataramanan data_len = skb->data_len; 16502b245cb2SAnirudh Venkataramanan size = skb_headlen(skb); 16512b245cb2SAnirudh Venkataramanan 16522b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 16532b245cb2SAnirudh Venkataramanan 1654d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1655d76a60baSAnirudh Venkataramanan td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1656d76a60baSAnirudh Venkataramanan td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1657d76a60baSAnirudh Venkataramanan ICE_TX_FLAGS_VLAN_S; 1658d76a60baSAnirudh Venkataramanan } 1659d76a60baSAnirudh Venkataramanan 16602b245cb2SAnirudh Venkataramanan dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 16612b245cb2SAnirudh Venkataramanan 16622b245cb2SAnirudh Venkataramanan tx_buf = first; 16632b245cb2SAnirudh Venkataramanan 16642b245cb2SAnirudh Venkataramanan for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 16652b245cb2SAnirudh Venkataramanan unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 16662b245cb2SAnirudh Venkataramanan 16672b245cb2SAnirudh Venkataramanan if (dma_mapping_error(tx_ring->dev, dma)) 16682b245cb2SAnirudh Venkataramanan goto dma_error; 16692b245cb2SAnirudh Venkataramanan 16702b245cb2SAnirudh Venkataramanan /* record length, and DMA address */ 16712b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, size); 16722b245cb2SAnirudh Venkataramanan dma_unmap_addr_set(tx_buf, dma, dma); 16732b245cb2SAnirudh Venkataramanan 16742b245cb2SAnirudh Venkataramanan /* align size to end of page */ 16752b245cb2SAnirudh Venkataramanan max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 16762b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 16772b245cb2SAnirudh Venkataramanan 16782b245cb2SAnirudh Venkataramanan /* account for data chunks larger than the hardware 16792b245cb2SAnirudh Venkataramanan * can handle 16802b245cb2SAnirudh Venkataramanan */ 16812b245cb2SAnirudh Venkataramanan while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 16822b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = 16835757cc7cSTony Nguyen ice_build_ctob(td_cmd, td_offset, max_data, 16845757cc7cSTony Nguyen td_tag); 16852b245cb2SAnirudh Venkataramanan 16862b245cb2SAnirudh Venkataramanan tx_desc++; 16872b245cb2SAnirudh Venkataramanan i++; 16882b245cb2SAnirudh Venkataramanan 16892b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 16902b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 16912b245cb2SAnirudh Venkataramanan i = 0; 16922b245cb2SAnirudh Venkataramanan } 16932b245cb2SAnirudh Venkataramanan 16942b245cb2SAnirudh Venkataramanan dma += max_data; 16952b245cb2SAnirudh Venkataramanan size -= max_data; 16962b245cb2SAnirudh Venkataramanan 16972b245cb2SAnirudh Venkataramanan max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 16982b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 16992b245cb2SAnirudh Venkataramanan } 17002b245cb2SAnirudh Venkataramanan 17012b245cb2SAnirudh Venkataramanan if (likely(!data_len)) 17022b245cb2SAnirudh Venkataramanan break; 17032b245cb2SAnirudh Venkataramanan 17045757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 17052b245cb2SAnirudh Venkataramanan size, td_tag); 17062b245cb2SAnirudh Venkataramanan 17072b245cb2SAnirudh Venkataramanan tx_desc++; 17082b245cb2SAnirudh Venkataramanan i++; 17092b245cb2SAnirudh Venkataramanan 17102b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 17112b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 17122b245cb2SAnirudh Venkataramanan i = 0; 17132b245cb2SAnirudh Venkataramanan } 17142b245cb2SAnirudh Venkataramanan 17152b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag); 17162b245cb2SAnirudh Venkataramanan data_len -= size; 17172b245cb2SAnirudh Venkataramanan 17182b245cb2SAnirudh Venkataramanan dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 17192b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 17202b245cb2SAnirudh Venkataramanan 17212b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 17222b245cb2SAnirudh Venkataramanan } 17232b245cb2SAnirudh Venkataramanan 17242b245cb2SAnirudh Venkataramanan /* record bytecount for BQL */ 17252b245cb2SAnirudh Venkataramanan netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 17262b245cb2SAnirudh Venkataramanan 17272b245cb2SAnirudh Venkataramanan /* record SW timestamp if HW timestamp is not available */ 17282b245cb2SAnirudh Venkataramanan skb_tx_timestamp(first->skb); 17292b245cb2SAnirudh Venkataramanan 17302b245cb2SAnirudh Venkataramanan i++; 17312b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) 17322b245cb2SAnirudh Venkataramanan i = 0; 17332b245cb2SAnirudh Venkataramanan 17342b245cb2SAnirudh Venkataramanan /* write last descriptor with RS and EOP bits */ 1735efc2214bSMaciej Fijalkowski td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 17365757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = 17375757cc7cSTony Nguyen ice_build_ctob(td_cmd, td_offset, size, td_tag); 17382b245cb2SAnirudh Venkataramanan 17392b245cb2SAnirudh Venkataramanan /* Force memory writes to complete before letting h/w know there 17402b245cb2SAnirudh Venkataramanan * are new descriptors to fetch. 17412b245cb2SAnirudh Venkataramanan * 17422b245cb2SAnirudh Venkataramanan * We also use this memory barrier to make certain all of the 17432b245cb2SAnirudh Venkataramanan * status bits have been updated before next_to_watch is written. 17442b245cb2SAnirudh Venkataramanan */ 17452b245cb2SAnirudh Venkataramanan wmb(); 17462b245cb2SAnirudh Venkataramanan 17472b245cb2SAnirudh Venkataramanan /* set next_to_watch value indicating a packet is present */ 17482b245cb2SAnirudh Venkataramanan first->next_to_watch = tx_desc; 17492b245cb2SAnirudh Venkataramanan 17502b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 17512b245cb2SAnirudh Venkataramanan 17522b245cb2SAnirudh Venkataramanan ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 17532b245cb2SAnirudh Venkataramanan 17542b245cb2SAnirudh Venkataramanan /* notify HW of packet */ 17554ee656bbSTony Nguyen if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) 17562b245cb2SAnirudh Venkataramanan writel(i, tx_ring->tail); 17572b245cb2SAnirudh Venkataramanan 17582b245cb2SAnirudh Venkataramanan return; 17592b245cb2SAnirudh Venkataramanan 17602b245cb2SAnirudh Venkataramanan dma_error: 17612f2da36eSAnirudh Venkataramanan /* clear DMA mappings for failed tx_buf map */ 17622b245cb2SAnirudh Venkataramanan for (;;) { 17632b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 17642b245cb2SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 17652b245cb2SAnirudh Venkataramanan if (tx_buf == first) 17662b245cb2SAnirudh Venkataramanan break; 17672b245cb2SAnirudh Venkataramanan if (i == 0) 17682b245cb2SAnirudh Venkataramanan i = tx_ring->count; 17692b245cb2SAnirudh Venkataramanan i--; 17702b245cb2SAnirudh Venkataramanan } 17712b245cb2SAnirudh Venkataramanan 17722b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 17732b245cb2SAnirudh Venkataramanan } 17742b245cb2SAnirudh Venkataramanan 17752b245cb2SAnirudh Venkataramanan /** 1776d76a60baSAnirudh Venkataramanan * ice_tx_csum - Enable Tx checksum offloads 1777d76a60baSAnirudh Venkataramanan * @first: pointer to the first descriptor 1778d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1779d76a60baSAnirudh Venkataramanan * 1780d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1781d76a60baSAnirudh Venkataramanan */ 1782d76a60baSAnirudh Venkataramanan static 1783d76a60baSAnirudh Venkataramanan int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1784d76a60baSAnirudh Venkataramanan { 1785d76a60baSAnirudh Venkataramanan u32 l4_len = 0, l3_len = 0, l2_len = 0; 1786d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1787d76a60baSAnirudh Venkataramanan union { 1788d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1789d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1790d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1791d76a60baSAnirudh Venkataramanan } ip; 1792d76a60baSAnirudh Venkataramanan union { 1793d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1794d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1795d76a60baSAnirudh Venkataramanan } l4; 1796d76a60baSAnirudh Venkataramanan __be16 frag_off, protocol; 1797d76a60baSAnirudh Venkataramanan unsigned char *exthdr; 1798d76a60baSAnirudh Venkataramanan u32 offset, cmd = 0; 1799d76a60baSAnirudh Venkataramanan u8 l4_proto = 0; 1800d76a60baSAnirudh Venkataramanan 1801d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1802d76a60baSAnirudh Venkataramanan return 0; 1803d76a60baSAnirudh Venkataramanan 1804d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 1805d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 1806d76a60baSAnirudh Venkataramanan 1807d76a60baSAnirudh Venkataramanan /* compute outer L2 header size */ 1808d76a60baSAnirudh Venkataramanan l2_len = ip.hdr - skb->data; 1809d76a60baSAnirudh Venkataramanan offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1810d76a60baSAnirudh Venkataramanan 1811a4e82a81STony Nguyen protocol = vlan_get_protocol(skb); 1812a4e82a81STony Nguyen 1813a4e82a81STony Nguyen if (protocol == htons(ETH_P_IP)) 1814a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV4; 1815a4e82a81STony Nguyen else if (protocol == htons(ETH_P_IPV6)) 1816a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV6; 1817a4e82a81STony Nguyen 1818a4e82a81STony Nguyen if (skb->encapsulation) { 1819a4e82a81STony Nguyen bool gso_ena = false; 1820a4e82a81STony Nguyen u32 tunnel = 0; 1821a4e82a81STony Nguyen 1822a4e82a81STony Nguyen /* define outer network header type */ 1823a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1824a4e82a81STony Nguyen tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 1825a4e82a81STony Nguyen ICE_TX_CTX_EIPT_IPV4 : 1826a4e82a81STony Nguyen ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 1827a4e82a81STony Nguyen l4_proto = ip.v4->protocol; 1828a4e82a81STony Nguyen } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1829a4e82a81STony Nguyen tunnel |= ICE_TX_CTX_EIPT_IPV6; 1830a4e82a81STony Nguyen exthdr = ip.hdr + sizeof(*ip.v6); 1831a4e82a81STony Nguyen l4_proto = ip.v6->nexthdr; 1832a4e82a81STony Nguyen if (l4.hdr != exthdr) 1833a4e82a81STony Nguyen ipv6_skip_exthdr(skb, exthdr - skb->data, 1834a4e82a81STony Nguyen &l4_proto, &frag_off); 1835a4e82a81STony Nguyen } 1836a4e82a81STony Nguyen 1837a4e82a81STony Nguyen /* define outer transport */ 1838a4e82a81STony Nguyen switch (l4_proto) { 1839a4e82a81STony Nguyen case IPPROTO_UDP: 1840a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 1841a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1842a4e82a81STony Nguyen break; 1843a4e82a81STony Nguyen case IPPROTO_GRE: 1844a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 1845a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1846a4e82a81STony Nguyen break; 1847a4e82a81STony Nguyen case IPPROTO_IPIP: 1848a4e82a81STony Nguyen case IPPROTO_IPV6: 1849a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1850a4e82a81STony Nguyen l4.hdr = skb_inner_network_header(skb); 1851a4e82a81STony Nguyen break; 1852a4e82a81STony Nguyen default: 1853a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_TSO) 1854d76a60baSAnirudh Venkataramanan return -1; 1855d76a60baSAnirudh Venkataramanan 1856a4e82a81STony Nguyen skb_checksum_help(skb); 1857a4e82a81STony Nguyen return 0; 1858a4e82a81STony Nguyen } 1859a4e82a81STony Nguyen 1860a4e82a81STony Nguyen /* compute outer L3 header size */ 1861a4e82a81STony Nguyen tunnel |= ((l4.hdr - ip.hdr) / 4) << 1862a4e82a81STony Nguyen ICE_TXD_CTX_QW0_EIPLEN_S; 1863a4e82a81STony Nguyen 1864a4e82a81STony Nguyen /* switch IP header pointer from outer to inner header */ 1865a4e82a81STony Nguyen ip.hdr = skb_inner_network_header(skb); 1866a4e82a81STony Nguyen 1867a4e82a81STony Nguyen /* compute tunnel header size */ 1868a4e82a81STony Nguyen tunnel |= ((ip.hdr - l4.hdr) / 2) << 1869a4e82a81STony Nguyen ICE_TXD_CTX_QW0_NATLEN_S; 1870a4e82a81STony Nguyen 1871a4e82a81STony Nguyen gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 1872a4e82a81STony Nguyen /* indicate if we need to offload outer UDP header */ 1873a4e82a81STony Nguyen if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 1874a4e82a81STony Nguyen (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1875a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 1876a4e82a81STony Nguyen 1877a4e82a81STony Nguyen /* record tunnel offload values */ 1878a4e82a81STony Nguyen off->cd_tunnel_params |= tunnel; 1879a4e82a81STony Nguyen 1880a4e82a81STony Nguyen /* set DTYP=1 to indicate that it's an Tx context descriptor 1881a4e82a81STony Nguyen * in IPsec tunnel mode with Tx offloads in Quad word 1 1882a4e82a81STony Nguyen */ 1883a4e82a81STony Nguyen off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 1884a4e82a81STony Nguyen 1885a4e82a81STony Nguyen /* switch L4 header pointer from outer to inner */ 1886a4e82a81STony Nguyen l4.hdr = skb_inner_transport_header(skb); 1887a4e82a81STony Nguyen l4_proto = 0; 1888a4e82a81STony Nguyen 1889a4e82a81STony Nguyen /* reset type as we transition from outer to inner headers */ 1890a4e82a81STony Nguyen first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 1891a4e82a81STony Nguyen if (ip.v4->version == 4) 1892a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV4; 1893a4e82a81STony Nguyen if (ip.v6->version == 6) 1894a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV6; 1895a4e82a81STony Nguyen } 1896a4e82a81STony Nguyen 1897d76a60baSAnirudh Venkataramanan /* Enable IP checksum offloads */ 1898a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1899d76a60baSAnirudh Venkataramanan l4_proto = ip.v4->protocol; 1900d76a60baSAnirudh Venkataramanan /* the stack computes the IP header already, the only time we 1901d76a60baSAnirudh Venkataramanan * need the hardware to recompute it is in the case of TSO. 1902d76a60baSAnirudh Venkataramanan */ 1903d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1904d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1905d76a60baSAnirudh Venkataramanan else 1906d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1907d76a60baSAnirudh Venkataramanan 1908a4e82a81STony Nguyen } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1909d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1910d76a60baSAnirudh Venkataramanan exthdr = ip.hdr + sizeof(*ip.v6); 1911d76a60baSAnirudh Venkataramanan l4_proto = ip.v6->nexthdr; 1912d76a60baSAnirudh Venkataramanan if (l4.hdr != exthdr) 1913d76a60baSAnirudh Venkataramanan ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1914d76a60baSAnirudh Venkataramanan &frag_off); 1915d76a60baSAnirudh Venkataramanan } else { 1916d76a60baSAnirudh Venkataramanan return -1; 1917d76a60baSAnirudh Venkataramanan } 1918d76a60baSAnirudh Venkataramanan 1919d76a60baSAnirudh Venkataramanan /* compute inner L3 header size */ 1920d76a60baSAnirudh Venkataramanan l3_len = l4.hdr - ip.hdr; 1921d76a60baSAnirudh Venkataramanan offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1922d76a60baSAnirudh Venkataramanan 1923d76a60baSAnirudh Venkataramanan /* Enable L4 checksum offloads */ 1924d76a60baSAnirudh Venkataramanan switch (l4_proto) { 1925d76a60baSAnirudh Venkataramanan case IPPROTO_TCP: 1926d76a60baSAnirudh Venkataramanan /* enable checksum offloads */ 1927d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1928d76a60baSAnirudh Venkataramanan l4_len = l4.tcp->doff; 1929d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1930d76a60baSAnirudh Venkataramanan break; 1931d76a60baSAnirudh Venkataramanan case IPPROTO_UDP: 1932d76a60baSAnirudh Venkataramanan /* enable UDP checksum offload */ 1933d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1934d76a60baSAnirudh Venkataramanan l4_len = (sizeof(struct udphdr) >> 2); 1935d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1936d76a60baSAnirudh Venkataramanan break; 1937d76a60baSAnirudh Venkataramanan case IPPROTO_SCTP: 1938cf909e19SAnirudh Venkataramanan /* enable SCTP checksum offload */ 1939cf909e19SAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1940cf909e19SAnirudh Venkataramanan l4_len = sizeof(struct sctphdr) >> 2; 1941cf909e19SAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1942cf909e19SAnirudh Venkataramanan break; 1943cf909e19SAnirudh Venkataramanan 1944d76a60baSAnirudh Venkataramanan default: 1945d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1946d76a60baSAnirudh Venkataramanan return -1; 1947d76a60baSAnirudh Venkataramanan skb_checksum_help(skb); 1948d76a60baSAnirudh Venkataramanan return 0; 1949d76a60baSAnirudh Venkataramanan } 1950d76a60baSAnirudh Venkataramanan 1951d76a60baSAnirudh Venkataramanan off->td_cmd |= cmd; 1952d76a60baSAnirudh Venkataramanan off->td_offset |= offset; 1953d76a60baSAnirudh Venkataramanan return 1; 1954d76a60baSAnirudh Venkataramanan } 1955d76a60baSAnirudh Venkataramanan 1956d76a60baSAnirudh Venkataramanan /** 1957f9867df6SAnirudh Venkataramanan * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1958d76a60baSAnirudh Venkataramanan * @tx_ring: ring to send buffer on 1959d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1960d76a60baSAnirudh Venkataramanan * 1961d76a60baSAnirudh Venkataramanan * Checks the skb and set up correspondingly several generic transmit flags 1962d76a60baSAnirudh Venkataramanan * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1963d76a60baSAnirudh Venkataramanan * 1964d76a60baSAnirudh Venkataramanan * Returns error code indicate the frame should be dropped upon error and the 1965d76a60baSAnirudh Venkataramanan * otherwise returns 0 to indicate the flags has been set properly. 1966d76a60baSAnirudh Venkataramanan */ 1967d76a60baSAnirudh Venkataramanan static int 1968d76a60baSAnirudh Venkataramanan ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) 1969d76a60baSAnirudh Venkataramanan { 1970d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1971d76a60baSAnirudh Venkataramanan __be16 protocol = skb->protocol; 1972d76a60baSAnirudh Venkataramanan 1973d76a60baSAnirudh Venkataramanan if (protocol == htons(ETH_P_8021Q) && 1974d76a60baSAnirudh Venkataramanan !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 1975d76a60baSAnirudh Venkataramanan /* when HW VLAN acceleration is turned off by the user the 1976d76a60baSAnirudh Venkataramanan * stack sets the protocol to 8021q so that the driver 1977d76a60baSAnirudh Venkataramanan * can take any steps required to support the SW only 1978d76a60baSAnirudh Venkataramanan * VLAN handling. In our case the driver doesn't need 1979d76a60baSAnirudh Venkataramanan * to take any further steps so just set the protocol 1980d76a60baSAnirudh Venkataramanan * to the encapsulated ethertype. 1981d76a60baSAnirudh Venkataramanan */ 1982d76a60baSAnirudh Venkataramanan skb->protocol = vlan_get_protocol(skb); 19835f6aa50eSAnirudh Venkataramanan return 0; 1984d76a60baSAnirudh Venkataramanan } 1985d76a60baSAnirudh Venkataramanan 1986d76a60baSAnirudh Venkataramanan /* if we have a HW VLAN tag being added, default to the HW one */ 1987d76a60baSAnirudh Venkataramanan if (skb_vlan_tag_present(skb)) { 1988d76a60baSAnirudh Venkataramanan first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1989d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1990d76a60baSAnirudh Venkataramanan } else if (protocol == htons(ETH_P_8021Q)) { 1991d76a60baSAnirudh Venkataramanan struct vlan_hdr *vhdr, _vhdr; 1992d76a60baSAnirudh Venkataramanan 1993d76a60baSAnirudh Venkataramanan /* for SW VLAN, check the next protocol and store the tag */ 1994d76a60baSAnirudh Venkataramanan vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN, 1995d76a60baSAnirudh Venkataramanan sizeof(_vhdr), 1996d76a60baSAnirudh Venkataramanan &_vhdr); 1997d76a60baSAnirudh Venkataramanan if (!vhdr) 1998d76a60baSAnirudh Venkataramanan return -EINVAL; 1999d76a60baSAnirudh Venkataramanan 2000d76a60baSAnirudh Venkataramanan first->tx_flags |= ntohs(vhdr->h_vlan_TCI) << 2001d76a60baSAnirudh Venkataramanan ICE_TX_FLAGS_VLAN_S; 2002d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_SW_VLAN; 2003d76a60baSAnirudh Venkataramanan } 2004d76a60baSAnirudh Venkataramanan 20055f6aa50eSAnirudh Venkataramanan return ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 2006d76a60baSAnirudh Venkataramanan } 2007d76a60baSAnirudh Venkataramanan 2008d76a60baSAnirudh Venkataramanan /** 2009d76a60baSAnirudh Venkataramanan * ice_tso - computes mss and TSO length to prepare for TSO 2010d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 2011d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 2012d76a60baSAnirudh Venkataramanan * 2013d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 2014d76a60baSAnirudh Venkataramanan */ 2015d76a60baSAnirudh Venkataramanan static 2016d76a60baSAnirudh Venkataramanan int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2017d76a60baSAnirudh Venkataramanan { 2018d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 2019d76a60baSAnirudh Venkataramanan union { 2020d76a60baSAnirudh Venkataramanan struct iphdr *v4; 2021d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 2022d76a60baSAnirudh Venkataramanan unsigned char *hdr; 2023d76a60baSAnirudh Venkataramanan } ip; 2024d76a60baSAnirudh Venkataramanan union { 2025d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 2026a54e3b8cSBrett Creeley struct udphdr *udp; 2027d76a60baSAnirudh Venkataramanan unsigned char *hdr; 2028d76a60baSAnirudh Venkataramanan } l4; 2029d76a60baSAnirudh Venkataramanan u64 cd_mss, cd_tso_len; 203088865fc4SKarol Kolacinski u32 paylen; 203188865fc4SKarol Kolacinski u8 l4_start; 2032d76a60baSAnirudh Venkataramanan int err; 2033d76a60baSAnirudh Venkataramanan 2034d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 2035d76a60baSAnirudh Venkataramanan return 0; 2036d76a60baSAnirudh Venkataramanan 2037d76a60baSAnirudh Venkataramanan if (!skb_is_gso(skb)) 2038d76a60baSAnirudh Venkataramanan return 0; 2039d76a60baSAnirudh Venkataramanan 2040d76a60baSAnirudh Venkataramanan err = skb_cow_head(skb, 0); 2041d76a60baSAnirudh Venkataramanan if (err < 0) 2042d76a60baSAnirudh Venkataramanan return err; 2043d76a60baSAnirudh Venkataramanan 2044c3a6825eSBruce Allan /* cppcheck-suppress unreadVariable */ 2045d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 2046d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 2047d76a60baSAnirudh Venkataramanan 2048d76a60baSAnirudh Venkataramanan /* initialize outer IP header fields */ 2049d76a60baSAnirudh Venkataramanan if (ip.v4->version == 4) { 2050d76a60baSAnirudh Venkataramanan ip.v4->tot_len = 0; 2051d76a60baSAnirudh Venkataramanan ip.v4->check = 0; 2052d76a60baSAnirudh Venkataramanan } else { 2053d76a60baSAnirudh Venkataramanan ip.v6->payload_len = 0; 2054d76a60baSAnirudh Venkataramanan } 2055d76a60baSAnirudh Venkataramanan 2056a4e82a81STony Nguyen if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 2057a4e82a81STony Nguyen SKB_GSO_GRE_CSUM | 2058a4e82a81STony Nguyen SKB_GSO_IPXIP4 | 2059a4e82a81STony Nguyen SKB_GSO_IPXIP6 | 2060a4e82a81STony Nguyen SKB_GSO_UDP_TUNNEL | 2061a4e82a81STony Nguyen SKB_GSO_UDP_TUNNEL_CSUM)) { 2062a4e82a81STony Nguyen if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 2063a4e82a81STony Nguyen (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 2064a4e82a81STony Nguyen l4.udp->len = 0; 2065a4e82a81STony Nguyen 2066a4e82a81STony Nguyen /* determine offset of outer transport header */ 206788865fc4SKarol Kolacinski l4_start = (u8)(l4.hdr - skb->data); 2068a4e82a81STony Nguyen 2069a4e82a81STony Nguyen /* remove payload length from outer checksum */ 2070a4e82a81STony Nguyen paylen = skb->len - l4_start; 2071a4e82a81STony Nguyen csum_replace_by_diff(&l4.udp->check, 2072a4e82a81STony Nguyen (__force __wsum)htonl(paylen)); 2073a4e82a81STony Nguyen } 2074a4e82a81STony Nguyen 2075a4e82a81STony Nguyen /* reset pointers to inner headers */ 2076a4e82a81STony Nguyen 2077a4e82a81STony Nguyen /* cppcheck-suppress unreadVariable */ 2078a4e82a81STony Nguyen ip.hdr = skb_inner_network_header(skb); 2079a4e82a81STony Nguyen l4.hdr = skb_inner_transport_header(skb); 2080a4e82a81STony Nguyen 2081a4e82a81STony Nguyen /* initialize inner IP header fields */ 2082a4e82a81STony Nguyen if (ip.v4->version == 4) { 2083a4e82a81STony Nguyen ip.v4->tot_len = 0; 2084a4e82a81STony Nguyen ip.v4->check = 0; 2085a4e82a81STony Nguyen } else { 2086a4e82a81STony Nguyen ip.v6->payload_len = 0; 2087a4e82a81STony Nguyen } 2088a4e82a81STony Nguyen } 2089a4e82a81STony Nguyen 2090d76a60baSAnirudh Venkataramanan /* determine offset of transport header */ 209188865fc4SKarol Kolacinski l4_start = (u8)(l4.hdr - skb->data); 2092d76a60baSAnirudh Venkataramanan 2093d76a60baSAnirudh Venkataramanan /* remove payload length from checksum */ 2094d76a60baSAnirudh Venkataramanan paylen = skb->len - l4_start; 2095d76a60baSAnirudh Venkataramanan 2096a54e3b8cSBrett Creeley if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 2097a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.udp->check, 2098a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 2099a54e3b8cSBrett Creeley /* compute length of UDP segmentation header */ 210088865fc4SKarol Kolacinski off->header_len = (u8)sizeof(l4.udp) + l4_start; 2101a54e3b8cSBrett Creeley } else { 2102a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.tcp->check, 2103a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 2104a54e3b8cSBrett Creeley /* compute length of TCP segmentation header */ 210588865fc4SKarol Kolacinski off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 2106a54e3b8cSBrett Creeley } 2107d76a60baSAnirudh Venkataramanan 2108d76a60baSAnirudh Venkataramanan /* update gso_segs and bytecount */ 2109d76a60baSAnirudh Venkataramanan first->gso_segs = skb_shinfo(skb)->gso_segs; 2110d944b469SBrett Creeley first->bytecount += (first->gso_segs - 1) * off->header_len; 2111d76a60baSAnirudh Venkataramanan 2112d76a60baSAnirudh Venkataramanan cd_tso_len = skb->len - off->header_len; 2113d76a60baSAnirudh Venkataramanan cd_mss = skb_shinfo(skb)->gso_size; 2114d76a60baSAnirudh Venkataramanan 2115d76a60baSAnirudh Venkataramanan /* record cdesc_qw1 with TSO parameters */ 2116e65e9e15SBruce Allan off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2117d76a60baSAnirudh Venkataramanan (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 2118d76a60baSAnirudh Venkataramanan (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2119e65e9e15SBruce Allan (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2120d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_TSO; 2121d76a60baSAnirudh Venkataramanan return 1; 2122d76a60baSAnirudh Venkataramanan } 2123d76a60baSAnirudh Venkataramanan 2124d76a60baSAnirudh Venkataramanan /** 21252b245cb2SAnirudh Venkataramanan * ice_txd_use_count - estimate the number of descriptors needed for Tx 21262b245cb2SAnirudh Venkataramanan * @size: transmit request size in bytes 21272b245cb2SAnirudh Venkataramanan * 21282b245cb2SAnirudh Venkataramanan * Due to hardware alignment restrictions (4K alignment), we need to 21292b245cb2SAnirudh Venkataramanan * assume that we can have no more than 12K of data per descriptor, even 21302b245cb2SAnirudh Venkataramanan * though each descriptor can take up to 16K - 1 bytes of aligned memory. 21312b245cb2SAnirudh Venkataramanan * Thus, we need to divide by 12K. But division is slow! Instead, 21322b245cb2SAnirudh Venkataramanan * we decompose the operation into shifts and one relatively cheap 21332b245cb2SAnirudh Venkataramanan * multiply operation. 21342b245cb2SAnirudh Venkataramanan * 21352b245cb2SAnirudh Venkataramanan * To divide by 12K, we first divide by 4K, then divide by 3: 21362b245cb2SAnirudh Venkataramanan * To divide by 4K, shift right by 12 bits 21372b245cb2SAnirudh Venkataramanan * To divide by 3, multiply by 85, then divide by 256 21382b245cb2SAnirudh Venkataramanan * (Divide by 256 is done by shifting right by 8 bits) 21392b245cb2SAnirudh Venkataramanan * Finally, we add one to round up. Because 256 isn't an exact multiple of 21402b245cb2SAnirudh Venkataramanan * 3, we'll underestimate near each multiple of 12K. This is actually more 21412b245cb2SAnirudh Venkataramanan * accurate as we have 4K - 1 of wiggle room that we can fit into the last 21422b245cb2SAnirudh Venkataramanan * segment. For our purposes this is accurate out to 1M which is orders of 21432b245cb2SAnirudh Venkataramanan * magnitude greater than our largest possible GSO size. 21442b245cb2SAnirudh Venkataramanan * 21452b245cb2SAnirudh Venkataramanan * This would then be implemented as: 2146c585ea42SBrett Creeley * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 21472b245cb2SAnirudh Venkataramanan * 21482b245cb2SAnirudh Venkataramanan * Since multiplication and division are commutative, we can reorder 21492b245cb2SAnirudh Venkataramanan * operations into: 2150c585ea42SBrett Creeley * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 21512b245cb2SAnirudh Venkataramanan */ 21522b245cb2SAnirudh Venkataramanan static unsigned int ice_txd_use_count(unsigned int size) 21532b245cb2SAnirudh Venkataramanan { 2154c585ea42SBrett Creeley return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 21552b245cb2SAnirudh Venkataramanan } 21562b245cb2SAnirudh Venkataramanan 21572b245cb2SAnirudh Venkataramanan /** 2158d337f2afSAnirudh Venkataramanan * ice_xmit_desc_count - calculate number of Tx descriptors needed 21592b245cb2SAnirudh Venkataramanan * @skb: send buffer 21602b245cb2SAnirudh Venkataramanan * 21612b245cb2SAnirudh Venkataramanan * Returns number of data descriptors needed for this skb. 21622b245cb2SAnirudh Venkataramanan */ 21632b245cb2SAnirudh Venkataramanan static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 21642b245cb2SAnirudh Venkataramanan { 2165d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 21662b245cb2SAnirudh Venkataramanan unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 21672b245cb2SAnirudh Venkataramanan unsigned int count = 0, size = skb_headlen(skb); 21682b245cb2SAnirudh Venkataramanan 21692b245cb2SAnirudh Venkataramanan for (;;) { 21702b245cb2SAnirudh Venkataramanan count += ice_txd_use_count(size); 21712b245cb2SAnirudh Venkataramanan 21722b245cb2SAnirudh Venkataramanan if (!nr_frags--) 21732b245cb2SAnirudh Venkataramanan break; 21742b245cb2SAnirudh Venkataramanan 21752b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag++); 21762b245cb2SAnirudh Venkataramanan } 21772b245cb2SAnirudh Venkataramanan 21782b245cb2SAnirudh Venkataramanan return count; 21792b245cb2SAnirudh Venkataramanan } 21802b245cb2SAnirudh Venkataramanan 21812b245cb2SAnirudh Venkataramanan /** 21822b245cb2SAnirudh Venkataramanan * __ice_chk_linearize - Check if there are more than 8 buffers per packet 21832b245cb2SAnirudh Venkataramanan * @skb: send buffer 21842b245cb2SAnirudh Venkataramanan * 21852b245cb2SAnirudh Venkataramanan * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 21862b245cb2SAnirudh Venkataramanan * and so we need to figure out the cases where we need to linearize the skb. 21872b245cb2SAnirudh Venkataramanan * 21882b245cb2SAnirudh Venkataramanan * For TSO we need to count the TSO header and segment payload separately. 21892b245cb2SAnirudh Venkataramanan * As such we need to check cases where we have 7 fragments or more as we 21902b245cb2SAnirudh Venkataramanan * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 21912b245cb2SAnirudh Venkataramanan * the segment payload in the first descriptor, and another 7 for the 21922b245cb2SAnirudh Venkataramanan * fragments. 21932b245cb2SAnirudh Venkataramanan */ 21942b245cb2SAnirudh Venkataramanan static bool __ice_chk_linearize(struct sk_buff *skb) 21952b245cb2SAnirudh Venkataramanan { 2196d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag, *stale; 21972b245cb2SAnirudh Venkataramanan int nr_frags, sum; 21982b245cb2SAnirudh Venkataramanan 21992b245cb2SAnirudh Venkataramanan /* no need to check if number of frags is less than 7 */ 22002b245cb2SAnirudh Venkataramanan nr_frags = skb_shinfo(skb)->nr_frags; 22012b245cb2SAnirudh Venkataramanan if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 22022b245cb2SAnirudh Venkataramanan return false; 22032b245cb2SAnirudh Venkataramanan 22042b245cb2SAnirudh Venkataramanan /* We need to walk through the list and validate that each group 22052b245cb2SAnirudh Venkataramanan * of 6 fragments totals at least gso_size. 22062b245cb2SAnirudh Venkataramanan */ 22072b245cb2SAnirudh Venkataramanan nr_frags -= ICE_MAX_BUF_TXD - 2; 22082b245cb2SAnirudh Venkataramanan frag = &skb_shinfo(skb)->frags[0]; 22092b245cb2SAnirudh Venkataramanan 22102b245cb2SAnirudh Venkataramanan /* Initialize size to the negative value of gso_size minus 1. We 22114ee656bbSTony Nguyen * use this as the worst case scenario in which the frag ahead 22122b245cb2SAnirudh Venkataramanan * of us only provides one byte which is why we are limited to 6 22132b245cb2SAnirudh Venkataramanan * descriptors for a single transmit as the header and previous 22142b245cb2SAnirudh Venkataramanan * fragment are already consuming 2 descriptors. 22152b245cb2SAnirudh Venkataramanan */ 22162b245cb2SAnirudh Venkataramanan sum = 1 - skb_shinfo(skb)->gso_size; 22172b245cb2SAnirudh Venkataramanan 22182b245cb2SAnirudh Venkataramanan /* Add size of frags 0 through 4 to create our initial sum */ 22192b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22202b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22212b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22222b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22232b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22242b245cb2SAnirudh Venkataramanan 22252b245cb2SAnirudh Venkataramanan /* Walk through fragments adding latest fragment, testing it, and 22262b245cb2SAnirudh Venkataramanan * then removing stale fragments from the sum. 22272b245cb2SAnirudh Venkataramanan */ 22282b245cb2SAnirudh Venkataramanan stale = &skb_shinfo(skb)->frags[0]; 22292b245cb2SAnirudh Venkataramanan for (;;) { 22302b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22312b245cb2SAnirudh Venkataramanan 22322b245cb2SAnirudh Venkataramanan /* if sum is negative we failed to make sufficient progress */ 22332b245cb2SAnirudh Venkataramanan if (sum < 0) 22342b245cb2SAnirudh Venkataramanan return true; 22352b245cb2SAnirudh Venkataramanan 22362b245cb2SAnirudh Venkataramanan if (!nr_frags--) 22372b245cb2SAnirudh Venkataramanan break; 22382b245cb2SAnirudh Venkataramanan 22392b245cb2SAnirudh Venkataramanan sum -= skb_frag_size(stale++); 22402b245cb2SAnirudh Venkataramanan } 22412b245cb2SAnirudh Venkataramanan 22422b245cb2SAnirudh Venkataramanan return false; 22432b245cb2SAnirudh Venkataramanan } 22442b245cb2SAnirudh Venkataramanan 22452b245cb2SAnirudh Venkataramanan /** 22462b245cb2SAnirudh Venkataramanan * ice_chk_linearize - Check if there are more than 8 fragments per packet 22472b245cb2SAnirudh Venkataramanan * @skb: send buffer 22482b245cb2SAnirudh Venkataramanan * @count: number of buffers used 22492b245cb2SAnirudh Venkataramanan * 22502b245cb2SAnirudh Venkataramanan * Note: Our HW can't scatter-gather more than 8 fragments to build 22512b245cb2SAnirudh Venkataramanan * a packet on the wire and so we need to figure out the cases where we 22522b245cb2SAnirudh Venkataramanan * need to linearize the skb. 22532b245cb2SAnirudh Venkataramanan */ 22542b245cb2SAnirudh Venkataramanan static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 22552b245cb2SAnirudh Venkataramanan { 22562b245cb2SAnirudh Venkataramanan /* Both TSO and single send will work if count is less than 8 */ 22572b245cb2SAnirudh Venkataramanan if (likely(count < ICE_MAX_BUF_TXD)) 22582b245cb2SAnirudh Venkataramanan return false; 22592b245cb2SAnirudh Venkataramanan 22602b245cb2SAnirudh Venkataramanan if (skb_is_gso(skb)) 22612b245cb2SAnirudh Venkataramanan return __ice_chk_linearize(skb); 22622b245cb2SAnirudh Venkataramanan 22632b245cb2SAnirudh Venkataramanan /* we can support up to 8 data buffers for a single send */ 22642b245cb2SAnirudh Venkataramanan return count != ICE_MAX_BUF_TXD; 22652b245cb2SAnirudh Venkataramanan } 22662b245cb2SAnirudh Venkataramanan 22672b245cb2SAnirudh Venkataramanan /** 22682b245cb2SAnirudh Venkataramanan * ice_xmit_frame_ring - Sends buffer on Tx ring 22692b245cb2SAnirudh Venkataramanan * @skb: send buffer 22702b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 22712b245cb2SAnirudh Venkataramanan * 22722b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 22732b245cb2SAnirudh Venkataramanan */ 22742b245cb2SAnirudh Venkataramanan static netdev_tx_t 22752b245cb2SAnirudh Venkataramanan ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 22762b245cb2SAnirudh Venkataramanan { 2277d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params offload = { 0 }; 22780c3a6101SDave Ertman struct ice_vsi *vsi = tx_ring->vsi; 22792b245cb2SAnirudh Venkataramanan struct ice_tx_buf *first; 22802b245cb2SAnirudh Venkataramanan unsigned int count; 2281d76a60baSAnirudh Venkataramanan int tso, csum; 22822b245cb2SAnirudh Venkataramanan 22832b245cb2SAnirudh Venkataramanan count = ice_xmit_desc_count(skb); 22842b245cb2SAnirudh Venkataramanan if (ice_chk_linearize(skb, count)) { 22852b245cb2SAnirudh Venkataramanan if (__skb_linearize(skb)) 22862b245cb2SAnirudh Venkataramanan goto out_drop; 22872b245cb2SAnirudh Venkataramanan count = ice_txd_use_count(skb->len); 22882b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_linearize++; 22892b245cb2SAnirudh Venkataramanan } 22902b245cb2SAnirudh Venkataramanan 22912b245cb2SAnirudh Venkataramanan /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 22922b245cb2SAnirudh Venkataramanan * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 22932b245cb2SAnirudh Venkataramanan * + 4 desc gap to avoid the cache line where head is, 22942b245cb2SAnirudh Venkataramanan * + 1 desc for context descriptor, 22952b245cb2SAnirudh Venkataramanan * otherwise try next time 22962b245cb2SAnirudh Venkataramanan */ 2297c585ea42SBrett Creeley if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2298c585ea42SBrett Creeley ICE_DESCS_FOR_CTX_DESC)) { 22992b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_busy++; 23002b245cb2SAnirudh Venkataramanan return NETDEV_TX_BUSY; 23012b245cb2SAnirudh Venkataramanan } 23022b245cb2SAnirudh Venkataramanan 2303d76a60baSAnirudh Venkataramanan offload.tx_ring = tx_ring; 2304d76a60baSAnirudh Venkataramanan 23052b245cb2SAnirudh Venkataramanan /* record the location of the first descriptor for this packet */ 23062b245cb2SAnirudh Venkataramanan first = &tx_ring->tx_buf[tx_ring->next_to_use]; 23072b245cb2SAnirudh Venkataramanan first->skb = skb; 23082b245cb2SAnirudh Venkataramanan first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 23092b245cb2SAnirudh Venkataramanan first->gso_segs = 1; 2310d76a60baSAnirudh Venkataramanan first->tx_flags = 0; 23112b245cb2SAnirudh Venkataramanan 2312d76a60baSAnirudh Venkataramanan /* prepare the VLAN tagging flags for Tx */ 2313d76a60baSAnirudh Venkataramanan if (ice_tx_prepare_vlan_flags(tx_ring, first)) 2314d76a60baSAnirudh Venkataramanan goto out_drop; 2315d76a60baSAnirudh Venkataramanan 2316d76a60baSAnirudh Venkataramanan /* set up TSO offload */ 2317d76a60baSAnirudh Venkataramanan tso = ice_tso(first, &offload); 2318d76a60baSAnirudh Venkataramanan if (tso < 0) 2319d76a60baSAnirudh Venkataramanan goto out_drop; 2320d76a60baSAnirudh Venkataramanan 2321d76a60baSAnirudh Venkataramanan /* always set up Tx checksum offload */ 2322d76a60baSAnirudh Venkataramanan csum = ice_tx_csum(first, &offload); 2323d76a60baSAnirudh Venkataramanan if (csum < 0) 2324d76a60baSAnirudh Venkataramanan goto out_drop; 2325d76a60baSAnirudh Venkataramanan 23260c3a6101SDave Ertman /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 23270c3a6101SDave Ertman if (unlikely(skb->priority == TC_PRIO_CONTROL && 23280c3a6101SDave Ertman vsi->type == ICE_VSI_PF && 23290c3a6101SDave Ertman vsi->port_info->is_sw_lldp)) 23300c3a6101SDave Ertman offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 23310c3a6101SDave Ertman ICE_TX_CTX_DESC_SWTCH_UPLINK << 23320c3a6101SDave Ertman ICE_TXD_CTX_QW1_CMD_S); 23330c3a6101SDave Ertman 23340c3a6101SDave Ertman if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2335d76a60baSAnirudh Venkataramanan struct ice_tx_ctx_desc *cdesc; 233688865fc4SKarol Kolacinski u16 i = tx_ring->next_to_use; 2337d76a60baSAnirudh Venkataramanan 2338d76a60baSAnirudh Venkataramanan /* grab the next descriptor */ 2339d76a60baSAnirudh Venkataramanan cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2340d76a60baSAnirudh Venkataramanan i++; 2341d76a60baSAnirudh Venkataramanan tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2342d76a60baSAnirudh Venkataramanan 2343d76a60baSAnirudh Venkataramanan /* setup context descriptor */ 2344d76a60baSAnirudh Venkataramanan cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2345d76a60baSAnirudh Venkataramanan cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2346d76a60baSAnirudh Venkataramanan cdesc->rsvd = cpu_to_le16(0); 2347d76a60baSAnirudh Venkataramanan cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2348d76a60baSAnirudh Venkataramanan } 2349d76a60baSAnirudh Venkataramanan 2350d76a60baSAnirudh Venkataramanan ice_tx_map(tx_ring, first, &offload); 23512b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 23522b245cb2SAnirudh Venkataramanan 23532b245cb2SAnirudh Venkataramanan out_drop: 23542b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 23552b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 23562b245cb2SAnirudh Venkataramanan } 23572b245cb2SAnirudh Venkataramanan 23582b245cb2SAnirudh Venkataramanan /** 23592b245cb2SAnirudh Venkataramanan * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 23602b245cb2SAnirudh Venkataramanan * @skb: send buffer 23612b245cb2SAnirudh Venkataramanan * @netdev: network interface device structure 23622b245cb2SAnirudh Venkataramanan * 23632b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 23642b245cb2SAnirudh Venkataramanan */ 23652b245cb2SAnirudh Venkataramanan netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 23662b245cb2SAnirudh Venkataramanan { 23672b245cb2SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 23682b245cb2SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 23692b245cb2SAnirudh Venkataramanan struct ice_ring *tx_ring; 23702b245cb2SAnirudh Venkataramanan 23712b245cb2SAnirudh Venkataramanan tx_ring = vsi->tx_rings[skb->queue_mapping]; 23722b245cb2SAnirudh Venkataramanan 23732b245cb2SAnirudh Venkataramanan /* hardware can't handle really short frames, hardware padding works 23742b245cb2SAnirudh Venkataramanan * beyond this point 23752b245cb2SAnirudh Venkataramanan */ 23762b245cb2SAnirudh Venkataramanan if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 23772b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 23782b245cb2SAnirudh Venkataramanan 23792b245cb2SAnirudh Venkataramanan return ice_xmit_frame_ring(skb, tx_ring); 23802b245cb2SAnirudh Venkataramanan } 2381