1cdedef59SAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0 2cdedef59SAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */ 3cdedef59SAnirudh Venkataramanan 4cdedef59SAnirudh Venkataramanan /* The driver transmit and receive code */ 5cdedef59SAnirudh Venkataramanan 6cdedef59SAnirudh Venkataramanan #include <linux/mm.h> 7cc14db11SJesse Brandeburg #include <linux/netdevice.h> 8cc14db11SJesse Brandeburg #include <linux/prefetch.h> 9efc2214bSMaciej Fijalkowski #include <linux/bpf_trace.h> 102a87bd73SDave Ertman #include <net/dsfield.h> 1169e66c04SJoe Damato #include <net/mpls.h> 12efc2214bSMaciej Fijalkowski #include <net/xdp.h> 130891d6d4SKrzysztof Kazimierczak #include "ice_txrx_lib.h" 14efc2214bSMaciej Fijalkowski #include "ice_lib.h" 15cdedef59SAnirudh Venkataramanan #include "ice.h" 163089cf6dSJesse Brandeburg #include "ice_trace.h" 175f6aa50eSAnirudh Venkataramanan #include "ice_dcb_lib.h" 182d4238f5SKrzysztof Kazimierczak #include "ice_xsk.h" 19f5396b8aSGrzegorz Nitka #include "ice_eswitch.h" 20cdedef59SAnirudh Venkataramanan 212b245cb2SAnirudh Venkataramanan #define ICE_RX_HDR_SIZE 256 222b245cb2SAnirudh Venkataramanan 23148beb61SHenry Tieman #define FDIR_DESC_RXDID 0x40 24cac2a27cSHenry Tieman #define ICE_FDIR_CLEAN_DELAY 10 25cac2a27cSHenry Tieman 26cac2a27cSHenry Tieman /** 27cac2a27cSHenry Tieman * ice_prgm_fdir_fltr - Program a Flow Director filter 28cac2a27cSHenry Tieman * @vsi: VSI to send dummy packet 29cac2a27cSHenry Tieman * @fdir_desc: flow director descriptor 30cac2a27cSHenry Tieman * @raw_packet: allocated buffer for flow director 31cac2a27cSHenry Tieman */ 32cac2a27cSHenry Tieman int 33cac2a27cSHenry Tieman ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 34cac2a27cSHenry Tieman u8 *raw_packet) 35cac2a27cSHenry Tieman { 36cac2a27cSHenry Tieman struct ice_tx_buf *tx_buf, *first; 37cac2a27cSHenry Tieman struct ice_fltr_desc *f_desc; 38cac2a27cSHenry Tieman struct ice_tx_desc *tx_desc; 39e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring; 40cac2a27cSHenry Tieman struct device *dev; 41cac2a27cSHenry Tieman dma_addr_t dma; 42cac2a27cSHenry Tieman u32 td_cmd; 43cac2a27cSHenry Tieman u16 i; 44cac2a27cSHenry Tieman 45cac2a27cSHenry Tieman /* VSI and Tx ring */ 46cac2a27cSHenry Tieman if (!vsi) 47cac2a27cSHenry Tieman return -ENOENT; 48cac2a27cSHenry Tieman tx_ring = vsi->tx_rings[0]; 49cac2a27cSHenry Tieman if (!tx_ring || !tx_ring->desc) 50cac2a27cSHenry Tieman return -ENOENT; 51cac2a27cSHenry Tieman dev = tx_ring->dev; 52cac2a27cSHenry Tieman 53cac2a27cSHenry Tieman /* we are using two descriptors to add/del a filter and we can wait */ 54cac2a27cSHenry Tieman for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 55cac2a27cSHenry Tieman if (!i) 56cac2a27cSHenry Tieman return -EAGAIN; 57cac2a27cSHenry Tieman msleep_interruptible(1); 58cac2a27cSHenry Tieman } 59cac2a27cSHenry Tieman 60cac2a27cSHenry Tieman dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 61cac2a27cSHenry Tieman DMA_TO_DEVICE); 62cac2a27cSHenry Tieman 63cac2a27cSHenry Tieman if (dma_mapping_error(dev, dma)) 64cac2a27cSHenry Tieman return -EINVAL; 65cac2a27cSHenry Tieman 66cac2a27cSHenry Tieman /* grab the next descriptor */ 67cac2a27cSHenry Tieman i = tx_ring->next_to_use; 68cac2a27cSHenry Tieman first = &tx_ring->tx_buf[i]; 69cac2a27cSHenry Tieman f_desc = ICE_TX_FDIRDESC(tx_ring, i); 70cac2a27cSHenry Tieman memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 71cac2a27cSHenry Tieman 72cac2a27cSHenry Tieman i++; 73cac2a27cSHenry Tieman i = (i < tx_ring->count) ? i : 0; 74cac2a27cSHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, i); 75cac2a27cSHenry Tieman tx_buf = &tx_ring->tx_buf[i]; 76cac2a27cSHenry Tieman 77cac2a27cSHenry Tieman i++; 78cac2a27cSHenry Tieman tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 79cac2a27cSHenry Tieman 80cac2a27cSHenry Tieman memset(tx_buf, 0, sizeof(*tx_buf)); 81cac2a27cSHenry Tieman dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 82cac2a27cSHenry Tieman dma_unmap_addr_set(tx_buf, dma, dma); 83cac2a27cSHenry Tieman 84cac2a27cSHenry Tieman tx_desc->buf_addr = cpu_to_le64(dma); 85cac2a27cSHenry Tieman td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 86cac2a27cSHenry Tieman ICE_TX_DESC_CMD_RE; 87cac2a27cSHenry Tieman 88cac2a27cSHenry Tieman tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; 89cac2a27cSHenry Tieman tx_buf->raw_buf = raw_packet; 90cac2a27cSHenry Tieman 91cac2a27cSHenry Tieman tx_desc->cmd_type_offset_bsz = 92cac2a27cSHenry Tieman ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 93cac2a27cSHenry Tieman 94cac2a27cSHenry Tieman /* Force memory write to complete before letting h/w know 95cac2a27cSHenry Tieman * there are new descriptors to fetch. 96cac2a27cSHenry Tieman */ 97cac2a27cSHenry Tieman wmb(); 98cac2a27cSHenry Tieman 99cac2a27cSHenry Tieman /* mark the data descriptor to be watched */ 100cac2a27cSHenry Tieman first->next_to_watch = tx_desc; 101cac2a27cSHenry Tieman 102cac2a27cSHenry Tieman writel(tx_ring->next_to_use, tx_ring->tail); 103cac2a27cSHenry Tieman 104cac2a27cSHenry Tieman return 0; 105cac2a27cSHenry Tieman } 106148beb61SHenry Tieman 107cdedef59SAnirudh Venkataramanan /** 108cdedef59SAnirudh Venkataramanan * ice_unmap_and_free_tx_buf - Release a Tx buffer 109cdedef59SAnirudh Venkataramanan * @ring: the ring that owns the buffer 110cdedef59SAnirudh Venkataramanan * @tx_buf: the buffer to free 111cdedef59SAnirudh Venkataramanan */ 112cdedef59SAnirudh Venkataramanan static void 113e72bba21SMaciej Fijalkowski ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) 114cdedef59SAnirudh Venkataramanan { 115cdedef59SAnirudh Venkataramanan if (tx_buf->skb) { 116148beb61SHenry Tieman if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 117148beb61SHenry Tieman devm_kfree(ring->dev, tx_buf->raw_buf); 118148beb61SHenry Tieman else if (ice_ring_is_xdp(ring)) 119efc2214bSMaciej Fijalkowski page_frag_free(tx_buf->raw_buf); 120efc2214bSMaciej Fijalkowski else 121cdedef59SAnirudh Venkataramanan dev_kfree_skb_any(tx_buf->skb); 122cdedef59SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) 123cdedef59SAnirudh Venkataramanan dma_unmap_single(ring->dev, 124cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 125cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 126cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 127cdedef59SAnirudh Venkataramanan } else if (dma_unmap_len(tx_buf, len)) { 128cdedef59SAnirudh Venkataramanan dma_unmap_page(ring->dev, 129cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 130cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 131cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 132cdedef59SAnirudh Venkataramanan } 133cdedef59SAnirudh Venkataramanan 134cdedef59SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 135cdedef59SAnirudh Venkataramanan tx_buf->skb = NULL; 136cdedef59SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 137cdedef59SAnirudh Venkataramanan /* tx_buf must be completely set up in the transmit path */ 138cdedef59SAnirudh Venkataramanan } 139cdedef59SAnirudh Venkataramanan 140e72bba21SMaciej Fijalkowski static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring) 141cdedef59SAnirudh Venkataramanan { 142cdedef59SAnirudh Venkataramanan return netdev_get_tx_queue(ring->netdev, ring->q_index); 143cdedef59SAnirudh Venkataramanan } 144cdedef59SAnirudh Venkataramanan 145cdedef59SAnirudh Venkataramanan /** 146cdedef59SAnirudh Venkataramanan * ice_clean_tx_ring - Free any empty Tx buffers 147cdedef59SAnirudh Venkataramanan * @tx_ring: ring to be cleaned 148cdedef59SAnirudh Venkataramanan */ 149e72bba21SMaciej Fijalkowski void ice_clean_tx_ring(struct ice_tx_ring *tx_ring) 150cdedef59SAnirudh Venkataramanan { 151e72bba21SMaciej Fijalkowski u32 size; 152cdedef59SAnirudh Venkataramanan u16 i; 153cdedef59SAnirudh Venkataramanan 1541742b3d5SMagnus Karlsson if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 1552d4238f5SKrzysztof Kazimierczak ice_xsk_clean_xdp_ring(tx_ring); 1562d4238f5SKrzysztof Kazimierczak goto tx_skip_free; 1572d4238f5SKrzysztof Kazimierczak } 1582d4238f5SKrzysztof Kazimierczak 159cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 160cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 161cdedef59SAnirudh Venkataramanan return; 162cdedef59SAnirudh Venkataramanan 1632f2da36eSAnirudh Venkataramanan /* Free all the Tx ring sk_buffs */ 164cdedef59SAnirudh Venkataramanan for (i = 0; i < tx_ring->count; i++) 165cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 166cdedef59SAnirudh Venkataramanan 1672d4238f5SKrzysztof Kazimierczak tx_skip_free: 168c6dfd690SBruce Allan memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 169cdedef59SAnirudh Venkataramanan 170e72bba21SMaciej Fijalkowski size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 171e72bba21SMaciej Fijalkowski PAGE_SIZE); 172cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 173e72bba21SMaciej Fijalkowski memset(tx_ring->desc, 0, size); 174cdedef59SAnirudh Venkataramanan 175cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 176cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 17786e3f78cSMaciej Fijalkowski tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1; 17886e3f78cSMaciej Fijalkowski tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1; 179cdedef59SAnirudh Venkataramanan 180cdedef59SAnirudh Venkataramanan if (!tx_ring->netdev) 181cdedef59SAnirudh Venkataramanan return; 182cdedef59SAnirudh Venkataramanan 183cdedef59SAnirudh Venkataramanan /* cleanup Tx queue statistics */ 184cdedef59SAnirudh Venkataramanan netdev_tx_reset_queue(txring_txq(tx_ring)); 185cdedef59SAnirudh Venkataramanan } 186cdedef59SAnirudh Venkataramanan 187cdedef59SAnirudh Venkataramanan /** 188cdedef59SAnirudh Venkataramanan * ice_free_tx_ring - Free Tx resources per queue 189cdedef59SAnirudh Venkataramanan * @tx_ring: Tx descriptor ring for a specific queue 190cdedef59SAnirudh Venkataramanan * 191cdedef59SAnirudh Venkataramanan * Free all transmit software resources 192cdedef59SAnirudh Venkataramanan */ 193e72bba21SMaciej Fijalkowski void ice_free_tx_ring(struct ice_tx_ring *tx_ring) 194cdedef59SAnirudh Venkataramanan { 195e72bba21SMaciej Fijalkowski u32 size; 196e72bba21SMaciej Fijalkowski 197cdedef59SAnirudh Venkataramanan ice_clean_tx_ring(tx_ring); 198cdedef59SAnirudh Venkataramanan devm_kfree(tx_ring->dev, tx_ring->tx_buf); 199cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 200cdedef59SAnirudh Venkataramanan 201cdedef59SAnirudh Venkataramanan if (tx_ring->desc) { 202e72bba21SMaciej Fijalkowski size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 203e72bba21SMaciej Fijalkowski PAGE_SIZE); 204e72bba21SMaciej Fijalkowski dmam_free_coherent(tx_ring->dev, size, 205cdedef59SAnirudh Venkataramanan tx_ring->desc, tx_ring->dma); 206cdedef59SAnirudh Venkataramanan tx_ring->desc = NULL; 207cdedef59SAnirudh Venkataramanan } 208cdedef59SAnirudh Venkataramanan } 209cdedef59SAnirudh Venkataramanan 210cdedef59SAnirudh Venkataramanan /** 2112b245cb2SAnirudh Venkataramanan * ice_clean_tx_irq - Reclaim resources after transmit completes 2122b245cb2SAnirudh Venkataramanan * @tx_ring: Tx ring to clean 2132b245cb2SAnirudh Venkataramanan * @napi_budget: Used to determine if we are in netpoll 2142b245cb2SAnirudh Venkataramanan * 2152b245cb2SAnirudh Venkataramanan * Returns true if there's any budget left (e.g. the clean is finished) 2162b245cb2SAnirudh Venkataramanan */ 217e72bba21SMaciej Fijalkowski static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) 2182b245cb2SAnirudh Venkataramanan { 2192b245cb2SAnirudh Venkataramanan unsigned int total_bytes = 0, total_pkts = 0; 2202fb0821fSJesse Brandeburg unsigned int budget = ICE_DFLT_IRQ_WORK; 2212fb0821fSJesse Brandeburg struct ice_vsi *vsi = tx_ring->vsi; 2222b245cb2SAnirudh Venkataramanan s16 i = tx_ring->next_to_clean; 2232b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 2242b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 2252b245cb2SAnirudh Venkataramanan 226cc14db11SJesse Brandeburg /* get the bql data ready */ 227cc14db11SJesse Brandeburg netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); 228cc14db11SJesse Brandeburg 2292b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 2302b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 2312b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2322b245cb2SAnirudh Venkataramanan 2332fb0821fSJesse Brandeburg prefetch(&vsi->state); 2342fb0821fSJesse Brandeburg 2352b245cb2SAnirudh Venkataramanan do { 2362b245cb2SAnirudh Venkataramanan struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2372b245cb2SAnirudh Venkataramanan 2382b245cb2SAnirudh Venkataramanan /* if next_to_watch is not set then there is no work pending */ 2392b245cb2SAnirudh Venkataramanan if (!eop_desc) 2402b245cb2SAnirudh Venkataramanan break; 2412b245cb2SAnirudh Venkataramanan 242cc14db11SJesse Brandeburg /* follow the guidelines of other drivers */ 243cc14db11SJesse Brandeburg prefetchw(&tx_buf->skb->users); 244cc14db11SJesse Brandeburg 2452b245cb2SAnirudh Venkataramanan smp_rmb(); /* prevent any other reads prior to eop_desc */ 2462b245cb2SAnirudh Venkataramanan 2473089cf6dSJesse Brandeburg ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 2482b245cb2SAnirudh Venkataramanan /* if the descriptor isn't done, no work yet to do */ 2492b245cb2SAnirudh Venkataramanan if (!(eop_desc->cmd_type_offset_bsz & 2502b245cb2SAnirudh Venkataramanan cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2512b245cb2SAnirudh Venkataramanan break; 2522b245cb2SAnirudh Venkataramanan 2532b245cb2SAnirudh Venkataramanan /* clear next_to_watch to prevent false hangs */ 2542b245cb2SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 2552b245cb2SAnirudh Venkataramanan 2562b245cb2SAnirudh Venkataramanan /* update the statistics for this packet */ 2572b245cb2SAnirudh Venkataramanan total_bytes += tx_buf->bytecount; 2582b245cb2SAnirudh Venkataramanan total_pkts += tx_buf->gso_segs; 2592b245cb2SAnirudh Venkataramanan 2602b245cb2SAnirudh Venkataramanan /* free the skb */ 2612b245cb2SAnirudh Venkataramanan napi_consume_skb(tx_buf->skb, napi_budget); 2622b245cb2SAnirudh Venkataramanan 2632b245cb2SAnirudh Venkataramanan /* unmap skb header data */ 2642b245cb2SAnirudh Venkataramanan dma_unmap_single(tx_ring->dev, 2652b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 2662b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 2672b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 2682b245cb2SAnirudh Venkataramanan 2692b245cb2SAnirudh Venkataramanan /* clear tx_buf data */ 2702b245cb2SAnirudh Venkataramanan tx_buf->skb = NULL; 2712b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 2722b245cb2SAnirudh Venkataramanan 2732b245cb2SAnirudh Venkataramanan /* unmap remaining buffers */ 2742b245cb2SAnirudh Venkataramanan while (tx_desc != eop_desc) { 2753089cf6dSJesse Brandeburg ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); 2762b245cb2SAnirudh Venkataramanan tx_buf++; 2772b245cb2SAnirudh Venkataramanan tx_desc++; 2782b245cb2SAnirudh Venkataramanan i++; 2792b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 2802b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2812b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 2822b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 2832b245cb2SAnirudh Venkataramanan } 2842b245cb2SAnirudh Venkataramanan 2852b245cb2SAnirudh Venkataramanan /* unmap any remaining paged data */ 2862b245cb2SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) { 2872b245cb2SAnirudh Venkataramanan dma_unmap_page(tx_ring->dev, 2882b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 2892b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 2902b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 2912b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 2922b245cb2SAnirudh Venkataramanan } 2932b245cb2SAnirudh Venkataramanan } 2943089cf6dSJesse Brandeburg ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf); 2952b245cb2SAnirudh Venkataramanan 2962b245cb2SAnirudh Venkataramanan /* move us one more past the eop_desc for start of next pkt */ 2972b245cb2SAnirudh Venkataramanan tx_buf++; 2982b245cb2SAnirudh Venkataramanan tx_desc++; 2992b245cb2SAnirudh Venkataramanan i++; 3002b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 3012b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 3022b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 3032b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 3042b245cb2SAnirudh Venkataramanan } 3052b245cb2SAnirudh Venkataramanan 3062b245cb2SAnirudh Venkataramanan prefetch(tx_desc); 3072b245cb2SAnirudh Venkataramanan 3082b245cb2SAnirudh Venkataramanan /* update budget accounting */ 3092b245cb2SAnirudh Venkataramanan budget--; 3102b245cb2SAnirudh Venkataramanan } while (likely(budget)); 3112b245cb2SAnirudh Venkataramanan 3122b245cb2SAnirudh Venkataramanan i += tx_ring->count; 3132b245cb2SAnirudh Venkataramanan tx_ring->next_to_clean = i; 3142d4238f5SKrzysztof Kazimierczak 3152d4238f5SKrzysztof Kazimierczak ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 3161c96c168SJesse Brandeburg netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); 3172b245cb2SAnirudh Venkataramanan 3182b245cb2SAnirudh Venkataramanan #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 3192b245cb2SAnirudh Venkataramanan if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 3202b245cb2SAnirudh Venkataramanan (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 3212b245cb2SAnirudh Venkataramanan /* Make sure that anybody stopping the queue after this 3222b245cb2SAnirudh Venkataramanan * sees the new next_to_clean. 3232b245cb2SAnirudh Venkataramanan */ 3242b245cb2SAnirudh Venkataramanan smp_mb(); 3251c96c168SJesse Brandeburg if (netif_tx_queue_stopped(txring_txq(tx_ring)) && 326e97fb1aeSAnirudh Venkataramanan !test_bit(ICE_VSI_DOWN, vsi->state)) { 3271c96c168SJesse Brandeburg netif_tx_wake_queue(txring_txq(tx_ring)); 3282b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 3292b245cb2SAnirudh Venkataramanan } 3302b245cb2SAnirudh Venkataramanan } 3312b245cb2SAnirudh Venkataramanan 3322b245cb2SAnirudh Venkataramanan return !!budget; 3332b245cb2SAnirudh Venkataramanan } 3342b245cb2SAnirudh Venkataramanan 3352b245cb2SAnirudh Venkataramanan /** 336cdedef59SAnirudh Venkataramanan * ice_setup_tx_ring - Allocate the Tx descriptors 337d337f2afSAnirudh Venkataramanan * @tx_ring: the Tx ring to set up 338cdedef59SAnirudh Venkataramanan * 339cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 340cdedef59SAnirudh Venkataramanan */ 341e72bba21SMaciej Fijalkowski int ice_setup_tx_ring(struct ice_tx_ring *tx_ring) 342cdedef59SAnirudh Venkataramanan { 343cdedef59SAnirudh Venkataramanan struct device *dev = tx_ring->dev; 344e72bba21SMaciej Fijalkowski u32 size; 345cdedef59SAnirudh Venkataramanan 346cdedef59SAnirudh Venkataramanan if (!dev) 347cdedef59SAnirudh Venkataramanan return -ENOMEM; 348cdedef59SAnirudh Venkataramanan 349cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 350cdedef59SAnirudh Venkataramanan WARN_ON(tx_ring->tx_buf); 351c6dfd690SBruce Allan tx_ring->tx_buf = 3526f332353SGustavo A. R. Silva devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, 353c6dfd690SBruce Allan GFP_KERNEL); 354cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 355cdedef59SAnirudh Venkataramanan return -ENOMEM; 356cdedef59SAnirudh Venkataramanan 357ad71b256SBrett Creeley /* round up to nearest page */ 358e72bba21SMaciej Fijalkowski size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 359ad71b256SBrett Creeley PAGE_SIZE); 360e72bba21SMaciej Fijalkowski tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, 361cdedef59SAnirudh Venkataramanan GFP_KERNEL); 362cdedef59SAnirudh Venkataramanan if (!tx_ring->desc) { 363cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 364e72bba21SMaciej Fijalkowski size); 365cdedef59SAnirudh Venkataramanan goto err; 366cdedef59SAnirudh Venkataramanan } 367cdedef59SAnirudh Venkataramanan 368cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 369cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 370b3969fd7SSudheer Mogilappagari tx_ring->tx_stats.prev_pkt = -1; 371cdedef59SAnirudh Venkataramanan return 0; 372cdedef59SAnirudh Venkataramanan 373cdedef59SAnirudh Venkataramanan err: 374cdedef59SAnirudh Venkataramanan devm_kfree(dev, tx_ring->tx_buf); 375cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 376cdedef59SAnirudh Venkataramanan return -ENOMEM; 377cdedef59SAnirudh Venkataramanan } 378cdedef59SAnirudh Venkataramanan 379cdedef59SAnirudh Venkataramanan /** 380cdedef59SAnirudh Venkataramanan * ice_clean_rx_ring - Free Rx buffers 381cdedef59SAnirudh Venkataramanan * @rx_ring: ring to be cleaned 382cdedef59SAnirudh Venkataramanan */ 383e72bba21SMaciej Fijalkowski void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) 384cdedef59SAnirudh Venkataramanan { 385cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 386e72bba21SMaciej Fijalkowski u32 size; 387cdedef59SAnirudh Venkataramanan u16 i; 388cdedef59SAnirudh Venkataramanan 389cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 390cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 391cdedef59SAnirudh Venkataramanan return; 392cdedef59SAnirudh Venkataramanan 39329b82f2aSMaciej Fijalkowski if (rx_ring->skb) { 39429b82f2aSMaciej Fijalkowski dev_kfree_skb(rx_ring->skb); 39529b82f2aSMaciej Fijalkowski rx_ring->skb = NULL; 39629b82f2aSMaciej Fijalkowski } 39729b82f2aSMaciej Fijalkowski 3981742b3d5SMagnus Karlsson if (rx_ring->xsk_pool) { 3992d4238f5SKrzysztof Kazimierczak ice_xsk_clean_rx_ring(rx_ring); 4002d4238f5SKrzysztof Kazimierczak goto rx_skip_free; 4012d4238f5SKrzysztof Kazimierczak } 4022d4238f5SKrzysztof Kazimierczak 403cdedef59SAnirudh Venkataramanan /* Free all the Rx ring sk_buffs */ 404cdedef59SAnirudh Venkataramanan for (i = 0; i < rx_ring->count; i++) { 405cdedef59SAnirudh Venkataramanan struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 406cdedef59SAnirudh Venkataramanan 407cdedef59SAnirudh Venkataramanan if (!rx_buf->page) 408cdedef59SAnirudh Venkataramanan continue; 409cdedef59SAnirudh Venkataramanan 410a65f71feSMaciej Fijalkowski /* Invalidate cache lines that may have been written to by 411a65f71feSMaciej Fijalkowski * device so that we avoid corrupting memory. 412a65f71feSMaciej Fijalkowski */ 413a65f71feSMaciej Fijalkowski dma_sync_single_range_for_cpu(dev, rx_buf->dma, 414a65f71feSMaciej Fijalkowski rx_buf->page_offset, 4157237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 4167237f5b0SMaciej Fijalkowski DMA_FROM_DEVICE); 417a65f71feSMaciej Fijalkowski 418a65f71feSMaciej Fijalkowski /* free resources associated with mapping */ 4197237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 420a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 42103c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 422cdedef59SAnirudh Venkataramanan 423cdedef59SAnirudh Venkataramanan rx_buf->page = NULL; 424cdedef59SAnirudh Venkataramanan rx_buf->page_offset = 0; 425cdedef59SAnirudh Venkataramanan } 426cdedef59SAnirudh Venkataramanan 4272d4238f5SKrzysztof Kazimierczak rx_skip_free: 428617f3e1bSMaciej Fijalkowski if (rx_ring->xsk_pool) 429617f3e1bSMaciej Fijalkowski memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); 430617f3e1bSMaciej Fijalkowski else 431617f3e1bSMaciej Fijalkowski memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); 432cdedef59SAnirudh Venkataramanan 433cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 434e72bba21SMaciej Fijalkowski size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 435e72bba21SMaciej Fijalkowski PAGE_SIZE); 436e72bba21SMaciej Fijalkowski memset(rx_ring->desc, 0, size); 437cdedef59SAnirudh Venkataramanan 438cdedef59SAnirudh Venkataramanan rx_ring->next_to_alloc = 0; 439cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 440cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 441cdedef59SAnirudh Venkataramanan } 442cdedef59SAnirudh Venkataramanan 443cdedef59SAnirudh Venkataramanan /** 444cdedef59SAnirudh Venkataramanan * ice_free_rx_ring - Free Rx resources 445cdedef59SAnirudh Venkataramanan * @rx_ring: ring to clean the resources from 446cdedef59SAnirudh Venkataramanan * 447cdedef59SAnirudh Venkataramanan * Free all receive software resources 448cdedef59SAnirudh Venkataramanan */ 449e72bba21SMaciej Fijalkowski void ice_free_rx_ring(struct ice_rx_ring *rx_ring) 450cdedef59SAnirudh Venkataramanan { 451e72bba21SMaciej Fijalkowski u32 size; 452e72bba21SMaciej Fijalkowski 453cdedef59SAnirudh Venkataramanan ice_clean_rx_ring(rx_ring); 454efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF) 455efc2214bSMaciej Fijalkowski if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 456efc2214bSMaciej Fijalkowski xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 457efc2214bSMaciej Fijalkowski rx_ring->xdp_prog = NULL; 458617f3e1bSMaciej Fijalkowski if (rx_ring->xsk_pool) { 459617f3e1bSMaciej Fijalkowski kfree(rx_ring->xdp_buf); 460617f3e1bSMaciej Fijalkowski rx_ring->xdp_buf = NULL; 461617f3e1bSMaciej Fijalkowski } else { 462617f3e1bSMaciej Fijalkowski kfree(rx_ring->rx_buf); 463cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 464617f3e1bSMaciej Fijalkowski } 465cdedef59SAnirudh Venkataramanan 466cdedef59SAnirudh Venkataramanan if (rx_ring->desc) { 467e72bba21SMaciej Fijalkowski size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 468e72bba21SMaciej Fijalkowski PAGE_SIZE); 469e72bba21SMaciej Fijalkowski dmam_free_coherent(rx_ring->dev, size, 470cdedef59SAnirudh Venkataramanan rx_ring->desc, rx_ring->dma); 471cdedef59SAnirudh Venkataramanan rx_ring->desc = NULL; 472cdedef59SAnirudh Venkataramanan } 473cdedef59SAnirudh Venkataramanan } 474cdedef59SAnirudh Venkataramanan 475cdedef59SAnirudh Venkataramanan /** 476cdedef59SAnirudh Venkataramanan * ice_setup_rx_ring - Allocate the Rx descriptors 477d337f2afSAnirudh Venkataramanan * @rx_ring: the Rx ring to set up 478cdedef59SAnirudh Venkataramanan * 479cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 480cdedef59SAnirudh Venkataramanan */ 481e72bba21SMaciej Fijalkowski int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) 482cdedef59SAnirudh Venkataramanan { 483cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 484e72bba21SMaciej Fijalkowski u32 size; 485cdedef59SAnirudh Venkataramanan 486cdedef59SAnirudh Venkataramanan if (!dev) 487cdedef59SAnirudh Venkataramanan return -ENOMEM; 488cdedef59SAnirudh Venkataramanan 489cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 490cdedef59SAnirudh Venkataramanan WARN_ON(rx_ring->rx_buf); 491c6dfd690SBruce Allan rx_ring->rx_buf = 492617f3e1bSMaciej Fijalkowski kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); 493cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 494cdedef59SAnirudh Venkataramanan return -ENOMEM; 495cdedef59SAnirudh Venkataramanan 496ad71b256SBrett Creeley /* round up to nearest page */ 497e72bba21SMaciej Fijalkowski size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 498ad71b256SBrett Creeley PAGE_SIZE); 499e72bba21SMaciej Fijalkowski rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, 500cdedef59SAnirudh Venkataramanan GFP_KERNEL); 501cdedef59SAnirudh Venkataramanan if (!rx_ring->desc) { 502cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 503e72bba21SMaciej Fijalkowski size); 504cdedef59SAnirudh Venkataramanan goto err; 505cdedef59SAnirudh Venkataramanan } 506cdedef59SAnirudh Venkataramanan 507cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 508cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 509efc2214bSMaciej Fijalkowski 510efc2214bSMaciej Fijalkowski if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 511efc2214bSMaciej Fijalkowski WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 512efc2214bSMaciej Fijalkowski 513efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF && 514efc2214bSMaciej Fijalkowski !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 515efc2214bSMaciej Fijalkowski if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 516b02e5a0eSBjörn Töpel rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) 517efc2214bSMaciej Fijalkowski goto err; 518cdedef59SAnirudh Venkataramanan return 0; 519cdedef59SAnirudh Venkataramanan 520cdedef59SAnirudh Venkataramanan err: 521617f3e1bSMaciej Fijalkowski kfree(rx_ring->rx_buf); 522cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 523cdedef59SAnirudh Venkataramanan return -ENOMEM; 524cdedef59SAnirudh Venkataramanan } 525cdedef59SAnirudh Venkataramanan 5266221595fSTony Nguyen static unsigned int 527e72bba21SMaciej Fijalkowski ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size) 528d4ecdbf7SJesper Dangaard Brouer { 529d4ecdbf7SJesper Dangaard Brouer unsigned int truesize; 530d4ecdbf7SJesper Dangaard Brouer 531d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE < 8192) 532d4ecdbf7SJesper Dangaard Brouer truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 533d4ecdbf7SJesper Dangaard Brouer #else 534f1b1f409SMaciej Fijalkowski truesize = rx_ring->rx_offset ? 535f1b1f409SMaciej Fijalkowski SKB_DATA_ALIGN(rx_ring->rx_offset + size) + 536d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 537d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(size); 538d4ecdbf7SJesper Dangaard Brouer #endif 539d4ecdbf7SJesper Dangaard Brouer return truesize; 540d4ecdbf7SJesper Dangaard Brouer } 541d4ecdbf7SJesper Dangaard Brouer 542efc2214bSMaciej Fijalkowski /** 543efc2214bSMaciej Fijalkowski * ice_run_xdp - Executes an XDP program on initialized xdp_buff 544efc2214bSMaciej Fijalkowski * @rx_ring: Rx ring 545efc2214bSMaciej Fijalkowski * @xdp: xdp_buff used as input to the XDP program 546efc2214bSMaciej Fijalkowski * @xdp_prog: XDP program to run 547eb087cd8SMaciej Fijalkowski * @xdp_ring: ring to be used for XDP_TX action 548efc2214bSMaciej Fijalkowski * 549efc2214bSMaciej Fijalkowski * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 550efc2214bSMaciej Fijalkowski */ 551efc2214bSMaciej Fijalkowski static int 552e72bba21SMaciej Fijalkowski ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 553eb087cd8SMaciej Fijalkowski struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) 554efc2214bSMaciej Fijalkowski { 555eb087cd8SMaciej Fijalkowski int err; 556efc2214bSMaciej Fijalkowski u32 act; 557efc2214bSMaciej Fijalkowski 558efc2214bSMaciej Fijalkowski act = bpf_prog_run_xdp(xdp_prog, xdp); 559efc2214bSMaciej Fijalkowski switch (act) { 560efc2214bSMaciej Fijalkowski case XDP_PASS: 56159c97d1bSMaciej Fijalkowski return ICE_XDP_PASS; 562efc2214bSMaciej Fijalkowski case XDP_TX: 56322bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) 56422bf877eSMaciej Fijalkowski spin_lock(&xdp_ring->tx_lock); 565eb087cd8SMaciej Fijalkowski err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring); 56622bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) 56722bf877eSMaciej Fijalkowski spin_unlock(&xdp_ring->tx_lock); 568eb087cd8SMaciej Fijalkowski if (err == ICE_XDP_CONSUMED) 56989d65df0SMagnus Karlsson goto out_failure; 570eb087cd8SMaciej Fijalkowski return err; 571efc2214bSMaciej Fijalkowski case XDP_REDIRECT: 572efc2214bSMaciej Fijalkowski err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 57389d65df0SMagnus Karlsson if (err) 57489d65df0SMagnus Karlsson goto out_failure; 57589d65df0SMagnus Karlsson return ICE_XDP_REDIR; 576efc2214bSMaciej Fijalkowski default: 577c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); 5784e83fc93SBruce Allan fallthrough; 579efc2214bSMaciej Fijalkowski case XDP_ABORTED: 58089d65df0SMagnus Karlsson out_failure: 581efc2214bSMaciej Fijalkowski trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 5824e83fc93SBruce Allan fallthrough; 583efc2214bSMaciej Fijalkowski case XDP_DROP: 58459c97d1bSMaciej Fijalkowski return ICE_XDP_CONSUMED; 585efc2214bSMaciej Fijalkowski } 586efc2214bSMaciej Fijalkowski } 587efc2214bSMaciej Fijalkowski 588efc2214bSMaciej Fijalkowski /** 589efc2214bSMaciej Fijalkowski * ice_xdp_xmit - submit packets to XDP ring for transmission 590efc2214bSMaciej Fijalkowski * @dev: netdev 591efc2214bSMaciej Fijalkowski * @n: number of XDP frames to be transmitted 592efc2214bSMaciej Fijalkowski * @frames: XDP frames to be transmitted 593efc2214bSMaciej Fijalkowski * @flags: transmit flags 594efc2214bSMaciej Fijalkowski * 595fdc13979SLorenzo Bianconi * Returns number of frames successfully sent. Failed frames 596fdc13979SLorenzo Bianconi * will be free'ed by XDP core. 597efc2214bSMaciej Fijalkowski * For error cases, a negative errno code is returned and no-frames 598efc2214bSMaciej Fijalkowski * are transmitted (caller must handle freeing frames). 599efc2214bSMaciej Fijalkowski */ 600efc2214bSMaciej Fijalkowski int 601efc2214bSMaciej Fijalkowski ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 602efc2214bSMaciej Fijalkowski u32 flags) 603efc2214bSMaciej Fijalkowski { 604efc2214bSMaciej Fijalkowski struct ice_netdev_priv *np = netdev_priv(dev); 605efc2214bSMaciej Fijalkowski unsigned int queue_index = smp_processor_id(); 606efc2214bSMaciej Fijalkowski struct ice_vsi *vsi = np->vsi; 607e72bba21SMaciej Fijalkowski struct ice_tx_ring *xdp_ring; 608fdc13979SLorenzo Bianconi int nxmit = 0, i; 609efc2214bSMaciej Fijalkowski 610e97fb1aeSAnirudh Venkataramanan if (test_bit(ICE_VSI_DOWN, vsi->state)) 611efc2214bSMaciej Fijalkowski return -ENETDOWN; 612efc2214bSMaciej Fijalkowski 613*114f398dSLarysa Zaremba if (!ice_is_xdp_ena_vsi(vsi)) 614efc2214bSMaciej Fijalkowski return -ENXIO; 615efc2214bSMaciej Fijalkowski 616efc2214bSMaciej Fijalkowski if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 617efc2214bSMaciej Fijalkowski return -EINVAL; 618efc2214bSMaciej Fijalkowski 61922bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) { 62022bf877eSMaciej Fijalkowski queue_index %= vsi->num_xdp_txq; 621efc2214bSMaciej Fijalkowski xdp_ring = vsi->xdp_rings[queue_index]; 62222bf877eSMaciej Fijalkowski spin_lock(&xdp_ring->tx_lock); 62322bf877eSMaciej Fijalkowski } else { 624*114f398dSLarysa Zaremba /* Generally, should not happen */ 625*114f398dSLarysa Zaremba if (unlikely(queue_index >= vsi->num_xdp_txq)) 626*114f398dSLarysa Zaremba return -ENXIO; 62722bf877eSMaciej Fijalkowski xdp_ring = vsi->xdp_rings[queue_index]; 62822bf877eSMaciej Fijalkowski } 62922bf877eSMaciej Fijalkowski 630efc2214bSMaciej Fijalkowski for (i = 0; i < n; i++) { 631efc2214bSMaciej Fijalkowski struct xdp_frame *xdpf = frames[i]; 632efc2214bSMaciej Fijalkowski int err; 633efc2214bSMaciej Fijalkowski 634efc2214bSMaciej Fijalkowski err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 635fdc13979SLorenzo Bianconi if (err != ICE_XDP_TX) 636fdc13979SLorenzo Bianconi break; 637fdc13979SLorenzo Bianconi nxmit++; 638efc2214bSMaciej Fijalkowski } 639efc2214bSMaciej Fijalkowski 640efc2214bSMaciej Fijalkowski if (unlikely(flags & XDP_XMIT_FLUSH)) 641efc2214bSMaciej Fijalkowski ice_xdp_ring_update_tail(xdp_ring); 642efc2214bSMaciej Fijalkowski 64322bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) 64422bf877eSMaciej Fijalkowski spin_unlock(&xdp_ring->tx_lock); 64522bf877eSMaciej Fijalkowski 646fdc13979SLorenzo Bianconi return nxmit; 647efc2214bSMaciej Fijalkowski } 648efc2214bSMaciej Fijalkowski 649efc2214bSMaciej Fijalkowski /** 650cdedef59SAnirudh Venkataramanan * ice_alloc_mapped_page - recycle or make a new page 651cdedef59SAnirudh Venkataramanan * @rx_ring: ring to use 652cdedef59SAnirudh Venkataramanan * @bi: rx_buf struct to modify 653cdedef59SAnirudh Venkataramanan * 654cdedef59SAnirudh Venkataramanan * Returns true if the page was successfully allocated or 655cdedef59SAnirudh Venkataramanan * reused. 656cdedef59SAnirudh Venkataramanan */ 657c8b7abddSBruce Allan static bool 658e72bba21SMaciej Fijalkowski ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) 659cdedef59SAnirudh Venkataramanan { 660cdedef59SAnirudh Venkataramanan struct page *page = bi->page; 661cdedef59SAnirudh Venkataramanan dma_addr_t dma; 662cdedef59SAnirudh Venkataramanan 663cdedef59SAnirudh Venkataramanan /* since we are recycling buffers we should seldom need to alloc */ 6647dbc63f0STony Nguyen if (likely(page)) 665cdedef59SAnirudh Venkataramanan return true; 666cdedef59SAnirudh Venkataramanan 667cdedef59SAnirudh Venkataramanan /* alloc new page for storage */ 6687237f5b0SMaciej Fijalkowski page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 6692b245cb2SAnirudh Venkataramanan if (unlikely(!page)) { 6702b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 671cdedef59SAnirudh Venkataramanan return false; 6722b245cb2SAnirudh Venkataramanan } 673cdedef59SAnirudh Venkataramanan 674cdedef59SAnirudh Venkataramanan /* map page for use */ 6757237f5b0SMaciej Fijalkowski dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 676a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 677cdedef59SAnirudh Venkataramanan 678cdedef59SAnirudh Venkataramanan /* if mapping failed free memory back to system since 679cdedef59SAnirudh Venkataramanan * there isn't much point in holding memory we can't use 680cdedef59SAnirudh Venkataramanan */ 681cdedef59SAnirudh Venkataramanan if (dma_mapping_error(rx_ring->dev, dma)) { 6827237f5b0SMaciej Fijalkowski __free_pages(page, ice_rx_pg_order(rx_ring)); 6832b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 684cdedef59SAnirudh Venkataramanan return false; 685cdedef59SAnirudh Venkataramanan } 686cdedef59SAnirudh Venkataramanan 687cdedef59SAnirudh Venkataramanan bi->dma = dma; 688cdedef59SAnirudh Venkataramanan bi->page = page; 689f1b1f409SMaciej Fijalkowski bi->page_offset = rx_ring->rx_offset; 69003c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 69103c66a13SMaciej Fijalkowski bi->pagecnt_bias = USHRT_MAX; 692cdedef59SAnirudh Venkataramanan 693cdedef59SAnirudh Venkataramanan return true; 694cdedef59SAnirudh Venkataramanan } 695cdedef59SAnirudh Venkataramanan 696cdedef59SAnirudh Venkataramanan /** 697cdedef59SAnirudh Venkataramanan * ice_alloc_rx_bufs - Replace used receive buffers 698cdedef59SAnirudh Venkataramanan * @rx_ring: ring to place buffers on 699cdedef59SAnirudh Venkataramanan * @cleaned_count: number of buffers to replace 700cdedef59SAnirudh Venkataramanan * 701cb7db356SBrett Creeley * Returns false if all allocations were successful, true if any fail. Returning 702cb7db356SBrett Creeley * true signals to the caller that we didn't replace cleaned_count buffers and 703cb7db356SBrett Creeley * there is more work to do. 704cb7db356SBrett Creeley * 705cb7db356SBrett Creeley * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 706cb7db356SBrett Creeley * buffers. Then bump tail at most one time. Grouping like this lets us avoid 707cb7db356SBrett Creeley * multiple tail writes per call. 708cdedef59SAnirudh Venkataramanan */ 709e72bba21SMaciej Fijalkowski bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count) 710cdedef59SAnirudh Venkataramanan { 711cdedef59SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 712cdedef59SAnirudh Venkataramanan u16 ntu = rx_ring->next_to_use; 713cdedef59SAnirudh Venkataramanan struct ice_rx_buf *bi; 714cdedef59SAnirudh Venkataramanan 715cdedef59SAnirudh Venkataramanan /* do nothing if no valid netdev defined */ 716148beb61SHenry Tieman if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || 717148beb61SHenry Tieman !cleaned_count) 718cdedef59SAnirudh Venkataramanan return false; 719cdedef59SAnirudh Venkataramanan 720f9867df6SAnirudh Venkataramanan /* get the Rx descriptor and buffer based on next_to_use */ 721cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, ntu); 722cdedef59SAnirudh Venkataramanan bi = &rx_ring->rx_buf[ntu]; 723cdedef59SAnirudh Venkataramanan 724cdedef59SAnirudh Venkataramanan do { 725a1e99685SBrett Creeley /* if we fail here, we have work remaining */ 726cdedef59SAnirudh Venkataramanan if (!ice_alloc_mapped_page(rx_ring, bi)) 727a1e99685SBrett Creeley break; 728cdedef59SAnirudh Venkataramanan 729a65f71feSMaciej Fijalkowski /* sync the buffer for use by the device */ 730a65f71feSMaciej Fijalkowski dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 731a65f71feSMaciej Fijalkowski bi->page_offset, 7327237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 733a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE); 734a65f71feSMaciej Fijalkowski 735cdedef59SAnirudh Venkataramanan /* Refresh the desc even if buffer_addrs didn't change 736cdedef59SAnirudh Venkataramanan * because each write-back erases this info. 737cdedef59SAnirudh Venkataramanan */ 738cdedef59SAnirudh Venkataramanan rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 739cdedef59SAnirudh Venkataramanan 740cdedef59SAnirudh Venkataramanan rx_desc++; 741cdedef59SAnirudh Venkataramanan bi++; 742cdedef59SAnirudh Venkataramanan ntu++; 743cdedef59SAnirudh Venkataramanan if (unlikely(ntu == rx_ring->count)) { 744cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, 0); 745cdedef59SAnirudh Venkataramanan bi = rx_ring->rx_buf; 746cdedef59SAnirudh Venkataramanan ntu = 0; 747cdedef59SAnirudh Venkataramanan } 748cdedef59SAnirudh Venkataramanan 749cdedef59SAnirudh Venkataramanan /* clear the status bits for the next_to_use descriptor */ 750cdedef59SAnirudh Venkataramanan rx_desc->wb.status_error0 = 0; 751cdedef59SAnirudh Venkataramanan 752cdedef59SAnirudh Venkataramanan cleaned_count--; 753cdedef59SAnirudh Venkataramanan } while (cleaned_count); 754cdedef59SAnirudh Venkataramanan 755cdedef59SAnirudh Venkataramanan if (rx_ring->next_to_use != ntu) 756cdedef59SAnirudh Venkataramanan ice_release_rx_desc(rx_ring, ntu); 757cdedef59SAnirudh Venkataramanan 758a1e99685SBrett Creeley return !!cleaned_count; 759cdedef59SAnirudh Venkataramanan } 7602b245cb2SAnirudh Venkataramanan 7612b245cb2SAnirudh Venkataramanan /** 7621d032bc7SMaciej Fijalkowski * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 7631d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to adjust 7641d032bc7SMaciej Fijalkowski * @size: Size of adjustment 7652b245cb2SAnirudh Venkataramanan * 7661d032bc7SMaciej Fijalkowski * Update the offset within page so that Rx buf will be ready to be reused. 7671d032bc7SMaciej Fijalkowski * For systems with PAGE_SIZE < 8192 this function will flip the page offset 7681d032bc7SMaciej Fijalkowski * so the second half of page assigned to Rx buffer will be used, otherwise 7694ee656bbSTony Nguyen * the offset is moved by "size" bytes 7702b245cb2SAnirudh Venkataramanan */ 7711d032bc7SMaciej Fijalkowski static void 7721d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 7732b245cb2SAnirudh Venkataramanan { 7742b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 7751d032bc7SMaciej Fijalkowski /* flip page offset to other buffer */ 7761d032bc7SMaciej Fijalkowski rx_buf->page_offset ^= size; 7772b245cb2SAnirudh Venkataramanan #else 7781d032bc7SMaciej Fijalkowski /* move offset up to the next cache line */ 7791d032bc7SMaciej Fijalkowski rx_buf->page_offset += size; 7801d032bc7SMaciej Fijalkowski #endif 7812b245cb2SAnirudh Venkataramanan } 7822b245cb2SAnirudh Venkataramanan 7831d032bc7SMaciej Fijalkowski /** 784bbb97808SMaciej Fijalkowski * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 785bbb97808SMaciej Fijalkowski * @rx_buf: buffer containing the page 7861beb7830SBjörn Töpel * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call 787bbb97808SMaciej Fijalkowski * 788bbb97808SMaciej Fijalkowski * If page is reusable, we have a green light for calling ice_reuse_rx_page, 789bbb97808SMaciej Fijalkowski * which will assign the current buffer to the buffer that next_to_alloc is 790bbb97808SMaciej Fijalkowski * pointing to; otherwise, the DMA mapping needs to be destroyed and 791bbb97808SMaciej Fijalkowski * page freed 792bbb97808SMaciej Fijalkowski */ 7931beb7830SBjörn Töpel static bool 7941beb7830SBjörn Töpel ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) 795bbb97808SMaciej Fijalkowski { 79603c66a13SMaciej Fijalkowski unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 797bbb97808SMaciej Fijalkowski struct page *page = rx_buf->page; 7982b245cb2SAnirudh Venkataramanan 799a79afa78SAlexander Lobakin /* avoid re-using remote and pfmemalloc pages */ 800a79afa78SAlexander Lobakin if (!dev_page_is_reusable(page)) 8012b245cb2SAnirudh Venkataramanan return false; 8022b245cb2SAnirudh Venkataramanan 8032b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 8042b245cb2SAnirudh Venkataramanan /* if we are only owner of page we can reuse it */ 8051beb7830SBjörn Töpel if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) 8062b245cb2SAnirudh Venkataramanan return false; 8072b245cb2SAnirudh Venkataramanan #else 8087237f5b0SMaciej Fijalkowski #define ICE_LAST_OFFSET \ 8097237f5b0SMaciej Fijalkowski (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 8107237f5b0SMaciej Fijalkowski if (rx_buf->page_offset > ICE_LAST_OFFSET) 8112b245cb2SAnirudh Venkataramanan return false; 8122b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE < 8192) */ 8132b245cb2SAnirudh Venkataramanan 81403c66a13SMaciej Fijalkowski /* If we have drained the page fragment pool we need to update 81503c66a13SMaciej Fijalkowski * the pagecnt_bias and page count so that we fully restock the 81603c66a13SMaciej Fijalkowski * number of references the driver holds. 8172b245cb2SAnirudh Venkataramanan */ 81803c66a13SMaciej Fijalkowski if (unlikely(pagecnt_bias == 1)) { 81903c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 82003c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias = USHRT_MAX; 82103c66a13SMaciej Fijalkowski } 8222b245cb2SAnirudh Venkataramanan 8232b245cb2SAnirudh Venkataramanan return true; 8242b245cb2SAnirudh Venkataramanan } 8252b245cb2SAnirudh Venkataramanan 8262b245cb2SAnirudh Venkataramanan /** 827712edbbbSMaciej Fijalkowski * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 8287237f5b0SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 8292b245cb2SAnirudh Venkataramanan * @rx_buf: buffer containing page to add 830712edbbbSMaciej Fijalkowski * @skb: sk_buff to place the data into 831712edbbbSMaciej Fijalkowski * @size: packet length from rx_desc 8322b245cb2SAnirudh Venkataramanan * 8332b245cb2SAnirudh Venkataramanan * This function will add the data contained in rx_buf->page to the skb. 834712edbbbSMaciej Fijalkowski * It will just attach the page as a frag to the skb. 835712edbbbSMaciej Fijalkowski * The function will then update the page offset. 8362b245cb2SAnirudh Venkataramanan */ 8371d032bc7SMaciej Fijalkowski static void 838e72bba21SMaciej Fijalkowski ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 8397237f5b0SMaciej Fijalkowski struct sk_buff *skb, unsigned int size) 8402b245cb2SAnirudh Venkataramanan { 841712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 842f1b1f409SMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset); 8432b245cb2SAnirudh Venkataramanan #else 8447237f5b0SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 845712edbbbSMaciej Fijalkowski #endif 8461857ca42SMaciej Fijalkowski 847ac6f733aSMitch Williams if (!size) 848ac6f733aSMitch Williams return; 849712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 850712edbbbSMaciej Fijalkowski rx_buf->page_offset, size, truesize); 8512b245cb2SAnirudh Venkataramanan 852712edbbbSMaciej Fijalkowski /* page is being used so we must update the page offset */ 8531d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 8542b245cb2SAnirudh Venkataramanan } 8552b245cb2SAnirudh Venkataramanan 8562b245cb2SAnirudh Venkataramanan /** 8572b245cb2SAnirudh Venkataramanan * ice_reuse_rx_page - page flip buffer and store it back on the ring 858d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to store buffers on 8592b245cb2SAnirudh Venkataramanan * @old_buf: donor buffer to have page reused 8602b245cb2SAnirudh Venkataramanan * 8612b245cb2SAnirudh Venkataramanan * Synchronizes page for reuse by the adapter 8622b245cb2SAnirudh Venkataramanan */ 863c8b7abddSBruce Allan static void 864e72bba21SMaciej Fijalkowski ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) 8652b245cb2SAnirudh Venkataramanan { 8662b245cb2SAnirudh Venkataramanan u16 nta = rx_ring->next_to_alloc; 8672b245cb2SAnirudh Venkataramanan struct ice_rx_buf *new_buf; 8682b245cb2SAnirudh Venkataramanan 8692b245cb2SAnirudh Venkataramanan new_buf = &rx_ring->rx_buf[nta]; 8702b245cb2SAnirudh Venkataramanan 8712b245cb2SAnirudh Venkataramanan /* update, and store next to alloc */ 8722b245cb2SAnirudh Venkataramanan nta++; 8732b245cb2SAnirudh Venkataramanan rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 8742b245cb2SAnirudh Venkataramanan 875712edbbbSMaciej Fijalkowski /* Transfer page from old buffer to new buffer. 876712edbbbSMaciej Fijalkowski * Move each member individually to avoid possible store 877712edbbbSMaciej Fijalkowski * forwarding stalls and unnecessary copy of skb. 878712edbbbSMaciej Fijalkowski */ 879712edbbbSMaciej Fijalkowski new_buf->dma = old_buf->dma; 880712edbbbSMaciej Fijalkowski new_buf->page = old_buf->page; 881712edbbbSMaciej Fijalkowski new_buf->page_offset = old_buf->page_offset; 882712edbbbSMaciej Fijalkowski new_buf->pagecnt_bias = old_buf->pagecnt_bias; 8832b245cb2SAnirudh Venkataramanan } 8842b245cb2SAnirudh Venkataramanan 8852b245cb2SAnirudh Venkataramanan /** 8866c869cb7SMaciej Fijalkowski * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 887d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 8886c869cb7SMaciej Fijalkowski * @size: size of buffer to add to skb 8891beb7830SBjörn Töpel * @rx_buf_pgcnt: rx_buf page refcount 8902b245cb2SAnirudh Venkataramanan * 8916c869cb7SMaciej Fijalkowski * This function will pull an Rx buffer from the ring and synchronize it 8926c869cb7SMaciej Fijalkowski * for use by the CPU. 8932b245cb2SAnirudh Venkataramanan */ 8946c869cb7SMaciej Fijalkowski static struct ice_rx_buf * 895e72bba21SMaciej Fijalkowski ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, 89629b82f2aSMaciej Fijalkowski int *rx_buf_pgcnt) 8972b245cb2SAnirudh Venkataramanan { 8982b245cb2SAnirudh Venkataramanan struct ice_rx_buf *rx_buf; 8992b245cb2SAnirudh Venkataramanan 9002b245cb2SAnirudh Venkataramanan rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 9011beb7830SBjörn Töpel *rx_buf_pgcnt = 9021beb7830SBjörn Töpel #if (PAGE_SIZE < 8192) 9031beb7830SBjörn Töpel page_count(rx_buf->page); 9041beb7830SBjörn Töpel #else 9051beb7830SBjörn Töpel 0; 9061beb7830SBjörn Töpel #endif 9076c869cb7SMaciej Fijalkowski prefetchw(rx_buf->page); 9082b245cb2SAnirudh Venkataramanan 909ac6f733aSMitch Williams if (!size) 910ac6f733aSMitch Williams return rx_buf; 9116c869cb7SMaciej Fijalkowski /* we are reusing so sync this buffer for CPU use */ 9126c869cb7SMaciej Fijalkowski dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 9136c869cb7SMaciej Fijalkowski rx_buf->page_offset, size, 9146c869cb7SMaciej Fijalkowski DMA_FROM_DEVICE); 9152b245cb2SAnirudh Venkataramanan 91603c66a13SMaciej Fijalkowski /* We have pulled a buffer for use, so decrement pagecnt_bias */ 91703c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias--; 91803c66a13SMaciej Fijalkowski 9196c869cb7SMaciej Fijalkowski return rx_buf; 9206c869cb7SMaciej Fijalkowski } 9216c869cb7SMaciej Fijalkowski 9226c869cb7SMaciej Fijalkowski /** 923aaf27254SMaciej Fijalkowski * ice_build_skb - Build skb around an existing buffer 924aaf27254SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 925aaf27254SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 926aaf27254SMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 927aaf27254SMaciej Fijalkowski * 928aaf27254SMaciej Fijalkowski * This function builds an skb around an existing Rx buffer, taking care 929aaf27254SMaciej Fijalkowski * to set up the skb correctly and avoid any memcpy overhead. 930aaf27254SMaciej Fijalkowski */ 931aaf27254SMaciej Fijalkowski static struct sk_buff * 932e72bba21SMaciej Fijalkowski ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 933aaf27254SMaciej Fijalkowski struct xdp_buff *xdp) 934aaf27254SMaciej Fijalkowski { 93588865fc4SKarol Kolacinski u8 metasize = xdp->data - xdp->data_meta; 936aaf27254SMaciej Fijalkowski #if (PAGE_SIZE < 8192) 937aaf27254SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 938aaf27254SMaciej Fijalkowski #else 939aaf27254SMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 940aaf27254SMaciej Fijalkowski SKB_DATA_ALIGN(xdp->data_end - 941aaf27254SMaciej Fijalkowski xdp->data_hard_start); 942aaf27254SMaciej Fijalkowski #endif 943aaf27254SMaciej Fijalkowski struct sk_buff *skb; 944aaf27254SMaciej Fijalkowski 945aaf27254SMaciej Fijalkowski /* Prefetch first cache line of first page. If xdp->data_meta 946aaf27254SMaciej Fijalkowski * is unused, this points exactly as xdp->data, otherwise we 947aaf27254SMaciej Fijalkowski * likely have a consumer accessing first few bytes of meta 948aaf27254SMaciej Fijalkowski * data, and then actual data. 949aaf27254SMaciej Fijalkowski */ 950f468f21bSTariq Toukan net_prefetch(xdp->data_meta); 951aaf27254SMaciej Fijalkowski /* build an skb around the page buffer */ 9525ce66631SAlexander Lobakin skb = napi_build_skb(xdp->data_hard_start, truesize); 953aaf27254SMaciej Fijalkowski if (unlikely(!skb)) 954aaf27254SMaciej Fijalkowski return NULL; 955aaf27254SMaciej Fijalkowski 956aaf27254SMaciej Fijalkowski /* must to record Rx queue, otherwise OS features such as 957aaf27254SMaciej Fijalkowski * symmetric queue won't work 958aaf27254SMaciej Fijalkowski */ 959aaf27254SMaciej Fijalkowski skb_record_rx_queue(skb, rx_ring->q_index); 960aaf27254SMaciej Fijalkowski 961aaf27254SMaciej Fijalkowski /* update pointers within the skb to store the data */ 962aaf27254SMaciej Fijalkowski skb_reserve(skb, xdp->data - xdp->data_hard_start); 963aaf27254SMaciej Fijalkowski __skb_put(skb, xdp->data_end - xdp->data); 964aaf27254SMaciej Fijalkowski if (metasize) 965aaf27254SMaciej Fijalkowski skb_metadata_set(skb, metasize); 966aaf27254SMaciej Fijalkowski 967aaf27254SMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 968aaf27254SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 969aaf27254SMaciej Fijalkowski 970aaf27254SMaciej Fijalkowski return skb; 971aaf27254SMaciej Fijalkowski } 972aaf27254SMaciej Fijalkowski 973aaf27254SMaciej Fijalkowski /** 974712edbbbSMaciej Fijalkowski * ice_construct_skb - Allocate skb and populate it 9752b245cb2SAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 9766c869cb7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 977efc2214bSMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 9782b245cb2SAnirudh Venkataramanan * 979712edbbbSMaciej Fijalkowski * This function allocates an skb. It then populates it with the page 980712edbbbSMaciej Fijalkowski * data from the current receive descriptor, taking care to set up the 981712edbbbSMaciej Fijalkowski * skb correctly. 9822b245cb2SAnirudh Venkataramanan */ 983c8b7abddSBruce Allan static struct sk_buff * 984e72bba21SMaciej Fijalkowski ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 985efc2214bSMaciej Fijalkowski struct xdp_buff *xdp) 9862b245cb2SAnirudh Venkataramanan { 987ee803dcaSAlexander Lobakin unsigned int metasize = xdp->data - xdp->data_meta; 988efc2214bSMaciej Fijalkowski unsigned int size = xdp->data_end - xdp->data; 989712edbbbSMaciej Fijalkowski unsigned int headlen; 990712edbbbSMaciej Fijalkowski struct sk_buff *skb; 9912b245cb2SAnirudh Venkataramanan 9922b245cb2SAnirudh Venkataramanan /* prefetch first cache line of first page */ 993ee803dcaSAlexander Lobakin net_prefetch(xdp->data_meta); 9942b245cb2SAnirudh Venkataramanan 9952b245cb2SAnirudh Venkataramanan /* allocate a skb to store the frags */ 996ee803dcaSAlexander Lobakin skb = __napi_alloc_skb(&rx_ring->q_vector->napi, 997ee803dcaSAlexander Lobakin ICE_RX_HDR_SIZE + metasize, 9982b245cb2SAnirudh Venkataramanan GFP_ATOMIC | __GFP_NOWARN); 999712edbbbSMaciej Fijalkowski if (unlikely(!skb)) 10002b245cb2SAnirudh Venkataramanan return NULL; 10012b245cb2SAnirudh Venkataramanan 10022b245cb2SAnirudh Venkataramanan skb_record_rx_queue(skb, rx_ring->q_index); 1003712edbbbSMaciej Fijalkowski /* Determine available headroom for copy */ 1004712edbbbSMaciej Fijalkowski headlen = size; 1005712edbbbSMaciej Fijalkowski if (headlen > ICE_RX_HDR_SIZE) 1006efc2214bSMaciej Fijalkowski headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 10072b245cb2SAnirudh Venkataramanan 1008712edbbbSMaciej Fijalkowski /* align pull length to size of long to optimize memcpy performance */ 1009ee803dcaSAlexander Lobakin memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, 1010ee803dcaSAlexander Lobakin ALIGN(headlen + metasize, sizeof(long))); 1011ee803dcaSAlexander Lobakin 1012ee803dcaSAlexander Lobakin if (metasize) { 1013ee803dcaSAlexander Lobakin skb_metadata_set(skb, metasize); 1014ee803dcaSAlexander Lobakin __skb_pull(skb, metasize); 1015ee803dcaSAlexander Lobakin } 1016712edbbbSMaciej Fijalkowski 1017712edbbbSMaciej Fijalkowski /* if we exhaust the linear part then add what is left as a frag */ 1018712edbbbSMaciej Fijalkowski size -= headlen; 1019712edbbbSMaciej Fijalkowski if (size) { 1020712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 1021712edbbbSMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size); 1022712edbbbSMaciej Fijalkowski #else 10237237f5b0SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 1024712edbbbSMaciej Fijalkowski #endif 1025712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, 0, rx_buf->page, 1026712edbbbSMaciej Fijalkowski rx_buf->page_offset + headlen, size, truesize); 1027712edbbbSMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 1028712edbbbSMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 10292b245cb2SAnirudh Venkataramanan } else { 1030712edbbbSMaciej Fijalkowski /* buffer is unused, reset bias back to rx_buf; data was copied 1031712edbbbSMaciej Fijalkowski * onto skb's linear part so there's no need for adjusting 1032712edbbbSMaciej Fijalkowski * page offset and we can reuse this buffer as-is 1033712edbbbSMaciej Fijalkowski */ 1034712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 10352b245cb2SAnirudh Venkataramanan } 10362b245cb2SAnirudh Venkataramanan 10372b245cb2SAnirudh Venkataramanan return skb; 10382b245cb2SAnirudh Venkataramanan } 10392b245cb2SAnirudh Venkataramanan 10402b245cb2SAnirudh Venkataramanan /** 10411d032bc7SMaciej Fijalkowski * ice_put_rx_buf - Clean up used buffer and either recycle or free 10421d032bc7SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 10431d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 10441beb7830SBjörn Töpel * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect() 10452b245cb2SAnirudh Venkataramanan * 1046efc2214bSMaciej Fijalkowski * This function will update next_to_clean and then clean up the contents 1047efc2214bSMaciej Fijalkowski * of the rx_buf. It will either recycle the buffer or unmap it and free 1048efc2214bSMaciej Fijalkowski * the associated resources. 10492b245cb2SAnirudh Venkataramanan */ 10501beb7830SBjörn Töpel static void 1051e72bba21SMaciej Fijalkowski ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 10521beb7830SBjörn Töpel int rx_buf_pgcnt) 10532b245cb2SAnirudh Venkataramanan { 105488865fc4SKarol Kolacinski u16 ntc = rx_ring->next_to_clean + 1; 1055efc2214bSMaciej Fijalkowski 1056efc2214bSMaciej Fijalkowski /* fetch, update, and store next to clean */ 1057efc2214bSMaciej Fijalkowski ntc = (ntc < rx_ring->count) ? ntc : 0; 1058efc2214bSMaciej Fijalkowski rx_ring->next_to_clean = ntc; 1059efc2214bSMaciej Fijalkowski 1060ac6f733aSMitch Williams if (!rx_buf) 1061ac6f733aSMitch Williams return; 1062ac6f733aSMitch Williams 10631beb7830SBjörn Töpel if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) { 1064ac6f733aSMitch Williams /* hand second half of page back to the ring */ 10652b245cb2SAnirudh Venkataramanan ice_reuse_rx_page(rx_ring, rx_buf); 10662b245cb2SAnirudh Venkataramanan } else { 10672b245cb2SAnirudh Venkataramanan /* we are not reusing the buffer so unmap it */ 10687237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 10697237f5b0SMaciej Fijalkowski ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 10707237f5b0SMaciej Fijalkowski ICE_RX_DMA_ATTR); 107103c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 10722b245cb2SAnirudh Venkataramanan } 10732b245cb2SAnirudh Venkataramanan 10742b245cb2SAnirudh Venkataramanan /* clear contents of buffer_info */ 10752b245cb2SAnirudh Venkataramanan rx_buf->page = NULL; 10762b245cb2SAnirudh Venkataramanan } 10772b245cb2SAnirudh Venkataramanan 10782b245cb2SAnirudh Venkataramanan /** 10792b245cb2SAnirudh Venkataramanan * ice_is_non_eop - process handling of non-EOP buffers 10802b245cb2SAnirudh Venkataramanan * @rx_ring: Rx ring being processed 10812b245cb2SAnirudh Venkataramanan * @rx_desc: Rx descriptor for current buffer 10822b245cb2SAnirudh Venkataramanan * 1083efc2214bSMaciej Fijalkowski * If the buffer is an EOP buffer, this function exits returning false, 1084efc2214bSMaciej Fijalkowski * otherwise return true indicating that this is in fact a non-EOP buffer. 10852b245cb2SAnirudh Venkataramanan */ 1086c8b7abddSBruce Allan static bool 1087e72bba21SMaciej Fijalkowski ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) 10882b245cb2SAnirudh Venkataramanan { 10892b245cb2SAnirudh Venkataramanan /* if we are the last buffer then there is nothing else to do */ 10902b245cb2SAnirudh Venkataramanan #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 10910d54d8f7SBrett Creeley if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF))) 10922b245cb2SAnirudh Venkataramanan return false; 10932b245cb2SAnirudh Venkataramanan 10942b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.non_eop_descs++; 10952b245cb2SAnirudh Venkataramanan 10962b245cb2SAnirudh Venkataramanan return true; 10972b245cb2SAnirudh Venkataramanan } 10982b245cb2SAnirudh Venkataramanan 10992b245cb2SAnirudh Venkataramanan /** 11002b245cb2SAnirudh Venkataramanan * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 1101d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 11022b245cb2SAnirudh Venkataramanan * @budget: Total limit on number of packets to process 11032b245cb2SAnirudh Venkataramanan * 11042b245cb2SAnirudh Venkataramanan * This function provides a "bounce buffer" approach to Rx interrupt 11052b245cb2SAnirudh Venkataramanan * processing. The advantage to this is that on systems that have 11062b245cb2SAnirudh Venkataramanan * expensive overhead for IOMMU access this provides a means of avoiding 11072b245cb2SAnirudh Venkataramanan * it by maintaining the mapping of the page to the system. 11082b245cb2SAnirudh Venkataramanan * 11092b245cb2SAnirudh Venkataramanan * Returns amount of work completed 11102b245cb2SAnirudh Venkataramanan */ 1111e72bba21SMaciej Fijalkowski int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) 11122b245cb2SAnirudh Venkataramanan { 111343b5169dSLorenzo Bianconi unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0; 11142b245cb2SAnirudh Venkataramanan u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1115f1b1f409SMaciej Fijalkowski unsigned int offset = rx_ring->rx_offset; 1116eb087cd8SMaciej Fijalkowski struct ice_tx_ring *xdp_ring = NULL; 1117efc2214bSMaciej Fijalkowski unsigned int xdp_res, xdp_xmit = 0; 111829b82f2aSMaciej Fijalkowski struct sk_buff *skb = rx_ring->skb; 1119efc2214bSMaciej Fijalkowski struct bpf_prog *xdp_prog = NULL; 1120efc2214bSMaciej Fijalkowski struct xdp_buff xdp; 1121cb7db356SBrett Creeley bool failure; 11222b245cb2SAnirudh Venkataramanan 1123d4ecdbf7SJesper Dangaard Brouer /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1124d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE < 8192) 112543b5169dSLorenzo Bianconi frame_sz = ice_rx_frame_truesize(rx_ring, 0); 1126d4ecdbf7SJesper Dangaard Brouer #endif 112743b5169dSLorenzo Bianconi xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); 1128efc2214bSMaciej Fijalkowski 1129eb087cd8SMaciej Fijalkowski xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1130eb087cd8SMaciej Fijalkowski if (xdp_prog) 1131eb087cd8SMaciej Fijalkowski xdp_ring = rx_ring->xdp_ring; 1132eb087cd8SMaciej Fijalkowski 1133f9867df6SAnirudh Venkataramanan /* start the loop to process Rx packets bounded by 'budget' */ 11342b245cb2SAnirudh Venkataramanan while (likely(total_rx_pkts < (unsigned int)budget)) { 11352b245cb2SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 11366c869cb7SMaciej Fijalkowski struct ice_rx_buf *rx_buf; 1137be9df4afSLorenzo Bianconi unsigned char *hard_start; 11386c869cb7SMaciej Fijalkowski unsigned int size; 11392b245cb2SAnirudh Venkataramanan u16 stat_err_bits; 11401beb7830SBjörn Töpel int rx_buf_pgcnt; 11412b245cb2SAnirudh Venkataramanan u16 vlan_tag = 0; 1142dda90cb9SJesse Brandeburg u16 rx_ptype; 11432b245cb2SAnirudh Venkataramanan 1144f9867df6SAnirudh Venkataramanan /* get the Rx desc from Rx ring based on 'next_to_clean' */ 11452b245cb2SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 11462b245cb2SAnirudh Venkataramanan 11472b245cb2SAnirudh Venkataramanan /* status_error_len will always be zero for unused descriptors 11482b245cb2SAnirudh Venkataramanan * because it's cleared in cleanup, and overlaps with hdr_addr 11492b245cb2SAnirudh Venkataramanan * which is always zero because packet split isn't used, if the 11502b245cb2SAnirudh Venkataramanan * hardware wrote DD then it will be non-zero 11512b245cb2SAnirudh Venkataramanan */ 11522b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 11530d54d8f7SBrett Creeley if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) 11542b245cb2SAnirudh Venkataramanan break; 11552b245cb2SAnirudh Venkataramanan 11562b245cb2SAnirudh Venkataramanan /* This memory barrier is needed to keep us from reading 11572b245cb2SAnirudh Venkataramanan * any other fields out of the rx_desc until we know the 11582b245cb2SAnirudh Venkataramanan * DD bit is set. 11592b245cb2SAnirudh Venkataramanan */ 11602b245cb2SAnirudh Venkataramanan dma_rmb(); 11612b245cb2SAnirudh Venkataramanan 11623089cf6dSJesse Brandeburg ice_trace(clean_rx_irq, rx_ring, rx_desc); 1163148beb61SHenry Tieman if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 1164d6218317SQi Zhang struct ice_vsi *ctrl_vsi = rx_ring->vsi; 1165d6218317SQi Zhang 1166d6218317SQi Zhang if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && 1167b03d519dSJacob Keller ctrl_vsi->vf) 1168d6218317SQi Zhang ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); 11691beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, NULL, 0); 1170148beb61SHenry Tieman cleaned_count++; 1171148beb61SHenry Tieman continue; 1172148beb61SHenry Tieman } 1173148beb61SHenry Tieman 11746c869cb7SMaciej Fijalkowski size = le16_to_cpu(rx_desc->wb.pkt_len) & 11756c869cb7SMaciej Fijalkowski ICE_RX_FLX_DESC_PKT_LEN_M; 11762b245cb2SAnirudh Venkataramanan 1177ac6f733aSMitch Williams /* retrieve a buffer from the ring */ 117829b82f2aSMaciej Fijalkowski rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt); 1179ac6f733aSMitch Williams 1180efc2214bSMaciej Fijalkowski if (!size) { 1181efc2214bSMaciej Fijalkowski xdp.data = NULL; 1182efc2214bSMaciej Fijalkowski xdp.data_end = NULL; 1183aaf27254SMaciej Fijalkowski xdp.data_hard_start = NULL; 1184aaf27254SMaciej Fijalkowski xdp.data_meta = NULL; 1185efc2214bSMaciej Fijalkowski goto construct_skb; 1186efc2214bSMaciej Fijalkowski } 1187efc2214bSMaciej Fijalkowski 1188be9df4afSLorenzo Bianconi hard_start = page_address(rx_buf->page) + rx_buf->page_offset - 1189be9df4afSLorenzo Bianconi offset; 1190be9df4afSLorenzo Bianconi xdp_prepare_buff(&xdp, hard_start, offset, size, true); 1191d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE > 4096) 1192d4ecdbf7SJesper Dangaard Brouer /* At larger PAGE_SIZE, frame_sz depend on len size */ 1193d4ecdbf7SJesper Dangaard Brouer xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); 1194d4ecdbf7SJesper Dangaard Brouer #endif 1195efc2214bSMaciej Fijalkowski 119649589b23SToke Høiland-Jørgensen if (!xdp_prog) 1197efc2214bSMaciej Fijalkowski goto construct_skb; 1198efc2214bSMaciej Fijalkowski 1199eb087cd8SMaciej Fijalkowski xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring); 120059bb0808SMaciej Fijalkowski if (!xdp_res) 120159bb0808SMaciej Fijalkowski goto construct_skb; 1202efc2214bSMaciej Fijalkowski if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1203efc2214bSMaciej Fijalkowski xdp_xmit |= xdp_res; 1204d4ecdbf7SJesper Dangaard Brouer ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); 1205efc2214bSMaciej Fijalkowski } else { 1206efc2214bSMaciej Fijalkowski rx_buf->pagecnt_bias++; 1207efc2214bSMaciej Fijalkowski } 1208efc2214bSMaciej Fijalkowski total_rx_bytes += size; 1209efc2214bSMaciej Fijalkowski total_rx_pkts++; 1210efc2214bSMaciej Fijalkowski 1211efc2214bSMaciej Fijalkowski cleaned_count++; 12121beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 1213efc2214bSMaciej Fijalkowski continue; 1214efc2214bSMaciej Fijalkowski construct_skb: 12151f45ebe0SMitch Williams if (skb) { 12167237f5b0SMaciej Fijalkowski ice_add_rx_frag(rx_ring, rx_buf, skb, size); 12171f45ebe0SMitch Williams } else if (likely(xdp.data)) { 12181f45ebe0SMitch Williams if (ice_ring_uses_build_skb(rx_ring)) 1219aaf27254SMaciej Fijalkowski skb = ice_build_skb(rx_ring, rx_buf, &xdp); 1220712edbbbSMaciej Fijalkowski else 1221efc2214bSMaciej Fijalkowski skb = ice_construct_skb(rx_ring, rx_buf, &xdp); 12221f45ebe0SMitch Williams } 1223712edbbbSMaciej Fijalkowski /* exit if we failed to retrieve a buffer */ 1224712edbbbSMaciej Fijalkowski if (!skb) { 1225712edbbbSMaciej Fijalkowski rx_ring->rx_stats.alloc_buf_failed++; 1226ac6f733aSMitch Williams if (rx_buf) 1227712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 12282b245cb2SAnirudh Venkataramanan break; 1229712edbbbSMaciej Fijalkowski } 12302b245cb2SAnirudh Venkataramanan 12311beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 12322b245cb2SAnirudh Venkataramanan cleaned_count++; 12332b245cb2SAnirudh Venkataramanan 12342b245cb2SAnirudh Venkataramanan /* skip if it is NOP desc */ 123529b82f2aSMaciej Fijalkowski if (ice_is_non_eop(rx_ring, rx_desc)) 12362b245cb2SAnirudh Venkataramanan continue; 12372b245cb2SAnirudh Venkataramanan 12382b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 12390d54d8f7SBrett Creeley if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, 12400d54d8f7SBrett Creeley stat_err_bits))) { 12412b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 12422b245cb2SAnirudh Venkataramanan continue; 12432b245cb2SAnirudh Venkataramanan } 12442b245cb2SAnirudh Venkataramanan 12450d54d8f7SBrett Creeley vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc); 12462b245cb2SAnirudh Venkataramanan 1247133f4883SKrzysztof Kazimierczak /* pad the skb if needed, to make a valid ethernet frame */ 1248133f4883SKrzysztof Kazimierczak if (eth_skb_pad(skb)) { 12492b245cb2SAnirudh Venkataramanan skb = NULL; 12502b245cb2SAnirudh Venkataramanan continue; 12512b245cb2SAnirudh Venkataramanan } 12522b245cb2SAnirudh Venkataramanan 12532b245cb2SAnirudh Venkataramanan /* probably a little skewed due to removing CRC */ 12542b245cb2SAnirudh Venkataramanan total_rx_bytes += skb->len; 12552b245cb2SAnirudh Venkataramanan 1256d76a60baSAnirudh Venkataramanan /* populate checksum, VLAN, and protocol */ 12576503b659SJesse Brandeburg rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 12586503b659SJesse Brandeburg ICE_RX_FLEX_DESC_PTYPE_M; 12596503b659SJesse Brandeburg 1260d76a60baSAnirudh Venkataramanan ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1261d76a60baSAnirudh Venkataramanan 12623089cf6dSJesse Brandeburg ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); 12632b245cb2SAnirudh Venkataramanan /* send completed skb up the stack */ 12642b245cb2SAnirudh Venkataramanan ice_receive_skb(rx_ring, skb, vlan_tag); 126529b82f2aSMaciej Fijalkowski skb = NULL; 12662b245cb2SAnirudh Venkataramanan 12672b245cb2SAnirudh Venkataramanan /* update budget accounting */ 12682b245cb2SAnirudh Venkataramanan total_rx_pkts++; 12692b245cb2SAnirudh Venkataramanan } 12702b245cb2SAnirudh Venkataramanan 1271cb7db356SBrett Creeley /* return up to cleaned_count buffers to hardware */ 1272cb7db356SBrett Creeley failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1273cb7db356SBrett Creeley 1274efc2214bSMaciej Fijalkowski if (xdp_prog) 1275eb087cd8SMaciej Fijalkowski ice_finalize_xdp_rx(xdp_ring, xdp_xmit); 127629b82f2aSMaciej Fijalkowski rx_ring->skb = skb; 1277efc2214bSMaciej Fijalkowski 12782d4238f5SKrzysztof Kazimierczak ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); 12792b245cb2SAnirudh Venkataramanan 12802b245cb2SAnirudh Venkataramanan /* guarantee a trip back through this routine if there was a failure */ 12812b245cb2SAnirudh Venkataramanan return failure ? budget : (int)total_rx_pkts; 12822b245cb2SAnirudh Venkataramanan } 12832b245cb2SAnirudh Venkataramanan 1284d8eb7ad5SJesse Brandeburg static void __ice_update_sample(struct ice_q_vector *q_vector, 1285d8eb7ad5SJesse Brandeburg struct ice_ring_container *rc, 1286d8eb7ad5SJesse Brandeburg struct dim_sample *sample, 1287d8eb7ad5SJesse Brandeburg bool is_tx) 1288d8eb7ad5SJesse Brandeburg { 1289d8eb7ad5SJesse Brandeburg u64 packets = 0, bytes = 0; 1290d8eb7ad5SJesse Brandeburg 1291d8eb7ad5SJesse Brandeburg if (is_tx) { 1292d8eb7ad5SJesse Brandeburg struct ice_tx_ring *tx_ring; 1293d8eb7ad5SJesse Brandeburg 1294d8eb7ad5SJesse Brandeburg ice_for_each_tx_ring(tx_ring, *rc) { 1295d8eb7ad5SJesse Brandeburg packets += tx_ring->stats.pkts; 1296d8eb7ad5SJesse Brandeburg bytes += tx_ring->stats.bytes; 1297d8eb7ad5SJesse Brandeburg } 1298d8eb7ad5SJesse Brandeburg } else { 1299d8eb7ad5SJesse Brandeburg struct ice_rx_ring *rx_ring; 1300d8eb7ad5SJesse Brandeburg 1301d8eb7ad5SJesse Brandeburg ice_for_each_rx_ring(rx_ring, *rc) { 1302d8eb7ad5SJesse Brandeburg packets += rx_ring->stats.pkts; 1303d8eb7ad5SJesse Brandeburg bytes += rx_ring->stats.bytes; 1304d8eb7ad5SJesse Brandeburg } 1305d8eb7ad5SJesse Brandeburg } 1306d8eb7ad5SJesse Brandeburg 1307d8eb7ad5SJesse Brandeburg dim_update_sample(q_vector->total_events, packets, bytes, sample); 1308d8eb7ad5SJesse Brandeburg sample->comp_ctr = 0; 1309d8eb7ad5SJesse Brandeburg 1310d8eb7ad5SJesse Brandeburg /* if dim settings get stale, like when not updated for 1 1311d8eb7ad5SJesse Brandeburg * second or longer, force it to start again. This addresses the 1312d8eb7ad5SJesse Brandeburg * frequent case of an idle queue being switched to by the 1313d8eb7ad5SJesse Brandeburg * scheduler. The 1,000 here means 1,000 milliseconds. 1314d8eb7ad5SJesse Brandeburg */ 1315d8eb7ad5SJesse Brandeburg if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) 1316d8eb7ad5SJesse Brandeburg rc->dim.state = DIM_START_MEASURE; 1317d8eb7ad5SJesse Brandeburg } 1318d8eb7ad5SJesse Brandeburg 13192b245cb2SAnirudh Venkataramanan /** 1320cdf1f1f1SJacob Keller * ice_net_dim - Update net DIM algorithm 1321cdf1f1f1SJacob Keller * @q_vector: the vector associated with the interrupt 1322711987bbSBrett Creeley * 1323cdf1f1f1SJacob Keller * Create a DIM sample and notify net_dim() so that it can possibly decide 1324cdf1f1f1SJacob Keller * a new ITR value based on incoming packets, bytes, and interrupts. 1325711987bbSBrett Creeley * 1326cdf1f1f1SJacob Keller * This function is a no-op if the ring is not configured to dynamic ITR. 1327711987bbSBrett Creeley */ 1328cdf1f1f1SJacob Keller static void ice_net_dim(struct ice_q_vector *q_vector) 132964a59d05SAnirudh Venkataramanan { 1330cdf1f1f1SJacob Keller struct ice_ring_container *tx = &q_vector->tx; 1331cdf1f1f1SJacob Keller struct ice_ring_container *rx = &q_vector->rx; 1332cdf1f1f1SJacob Keller 1333d59684a0SJesse Brandeburg if (ITR_IS_DYNAMIC(tx)) { 1334d8eb7ad5SJesse Brandeburg struct dim_sample dim_sample; 1335cdf1f1f1SJacob Keller 1336d8eb7ad5SJesse Brandeburg __ice_update_sample(q_vector, tx, &dim_sample, true); 1337cdf1f1f1SJacob Keller net_dim(&tx->dim, dim_sample); 1338711987bbSBrett Creeley } 1339711987bbSBrett Creeley 1340d59684a0SJesse Brandeburg if (ITR_IS_DYNAMIC(rx)) { 1341d8eb7ad5SJesse Brandeburg struct dim_sample dim_sample; 1342cdf1f1f1SJacob Keller 1343d8eb7ad5SJesse Brandeburg __ice_update_sample(q_vector, rx, &dim_sample, false); 1344cdf1f1f1SJacob Keller net_dim(&rx->dim, dim_sample); 134564a59d05SAnirudh Venkataramanan } 134664a59d05SAnirudh Venkataramanan } 134764a59d05SAnirudh Venkataramanan 13482b245cb2SAnirudh Venkataramanan /** 134963f545edSBrett Creeley * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 135063f545edSBrett Creeley * @itr_idx: interrupt throttling index 135164a59d05SAnirudh Venkataramanan * @itr: interrupt throttling value in usecs 135263f545edSBrett Creeley */ 13538244dd2dSBrett Creeley static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 135463f545edSBrett Creeley { 13552f2da36eSAnirudh Venkataramanan /* The ITR value is reported in microseconds, and the register value is 135664a59d05SAnirudh Venkataramanan * recorded in 2 microsecond units. For this reason we only need to 135764a59d05SAnirudh Venkataramanan * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 135864a59d05SAnirudh Venkataramanan * granularity as a shift instead of division. The mask makes sure the 135964a59d05SAnirudh Venkataramanan * ITR value is never odd so we don't accidentally write into the field 136064a59d05SAnirudh Venkataramanan * prior to the ITR field. 136164a59d05SAnirudh Venkataramanan */ 136264a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 136364a59d05SAnirudh Venkataramanan 136463f545edSBrett Creeley return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 136563f545edSBrett Creeley (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 136664a59d05SAnirudh Venkataramanan (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 136763f545edSBrett Creeley } 136863f545edSBrett Creeley 136963f545edSBrett Creeley /** 1370d8eb7ad5SJesse Brandeburg * ice_enable_interrupt - re-enable MSI-X interrupt 1371cdf1f1f1SJacob Keller * @q_vector: the vector associated with the interrupt to enable 1372cdf1f1f1SJacob Keller * 1373d8eb7ad5SJesse Brandeburg * If the VSI is down, the interrupt will not be re-enabled. Also, 1374d8eb7ad5SJesse Brandeburg * when enabling the interrupt always reset the wb_on_itr to false 1375d8eb7ad5SJesse Brandeburg * and trigger a software interrupt to clean out internal state. 137663f545edSBrett Creeley */ 1377d8eb7ad5SJesse Brandeburg static void ice_enable_interrupt(struct ice_q_vector *q_vector) 137863f545edSBrett Creeley { 13792fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 1380b7306b42SJesse Brandeburg bool wb_en = q_vector->wb_on_itr; 138163f545edSBrett Creeley u32 itr_val; 138263f545edSBrett Creeley 1383cdf1f1f1SJacob Keller if (test_bit(ICE_DOWN, vsi->state)) 1384cdf1f1f1SJacob Keller return; 13852ab28bb0SBrett Creeley 138623be7075SJesse Brandeburg /* trigger an ITR delayed software interrupt when exiting busy poll, to 138723be7075SJesse Brandeburg * make sure to catch any pending cleanups that might have been missed 138823be7075SJesse Brandeburg * due to interrupt state transition. If busy poll or poll isn't 138923be7075SJesse Brandeburg * enabled, then don't update ITR, and just enable the interrupt. 1390cdf1f1f1SJacob Keller */ 139123be7075SJesse Brandeburg if (!wb_en) { 139223be7075SJesse Brandeburg itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 139323be7075SJesse Brandeburg } else { 1394cdf1f1f1SJacob Keller q_vector->wb_on_itr = false; 139564a59d05SAnirudh Venkataramanan 139623be7075SJesse Brandeburg /* do two things here with a single write. Set up the third ITR 139723be7075SJesse Brandeburg * index to be used for software interrupt moderation, and then 139823be7075SJesse Brandeburg * trigger a software interrupt with a rate limit of 20K on 139923be7075SJesse Brandeburg * software interrupts, this will help avoid high interrupt 140023be7075SJesse Brandeburg * loads due to frequently polling and exiting polling. 1401b7306b42SJesse Brandeburg */ 140223be7075SJesse Brandeburg itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K); 1403b7306b42SJesse Brandeburg itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M | 140423be7075SJesse Brandeburg ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S | 1405b7306b42SJesse Brandeburg GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; 1406b7306b42SJesse Brandeburg } 14071d9f7ca3SJesse Brandeburg wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 140863f545edSBrett Creeley } 140963f545edSBrett Creeley 141063f545edSBrett Creeley /** 14112ab28bb0SBrett Creeley * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 14122ab28bb0SBrett Creeley * @q_vector: q_vector to set WB_ON_ITR on 14132ab28bb0SBrett Creeley * 14142ab28bb0SBrett Creeley * We need to tell hardware to write-back completed descriptors even when 14152ab28bb0SBrett Creeley * interrupts are disabled. Descriptors will be written back on cache line 14162ab28bb0SBrett Creeley * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 14171d9f7ca3SJesse Brandeburg * descriptors may not be written back if they don't fill a cache line until 14181d9f7ca3SJesse Brandeburg * the next interrupt. 14192ab28bb0SBrett Creeley * 14201d9f7ca3SJesse Brandeburg * This sets the write-back frequency to whatever was set previously for the 14211d9f7ca3SJesse Brandeburg * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we 14221d9f7ca3SJesse Brandeburg * aren't meddling with the INTENA_M bit. 14232ab28bb0SBrett Creeley */ 14242fb0821fSJesse Brandeburg static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 14252ab28bb0SBrett Creeley { 14262fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 14272fb0821fSJesse Brandeburg 14281d9f7ca3SJesse Brandeburg /* already in wb_on_itr mode no need to change it */ 1429cdf1f1f1SJacob Keller if (q_vector->wb_on_itr) 14302ab28bb0SBrett Creeley return; 14312ab28bb0SBrett Creeley 14321d9f7ca3SJesse Brandeburg /* use previously set ITR values for all of the ITR indices by 14331d9f7ca3SJesse Brandeburg * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and 14341d9f7ca3SJesse Brandeburg * be static in non-adaptive mode (user configured) 14351d9f7ca3SJesse Brandeburg */ 14362ab28bb0SBrett Creeley wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 14371d9f7ca3SJesse Brandeburg ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) & 14381d9f7ca3SJesse Brandeburg GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | 14391d9f7ca3SJesse Brandeburg GLINT_DYN_CTL_WB_ON_ITR_M); 14402ab28bb0SBrett Creeley 1441cdf1f1f1SJacob Keller q_vector->wb_on_itr = true; 14422ab28bb0SBrett Creeley } 14432ab28bb0SBrett Creeley 14442ab28bb0SBrett Creeley /** 14452b245cb2SAnirudh Venkataramanan * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 14462b245cb2SAnirudh Venkataramanan * @napi: napi struct with our devices info in it 14472b245cb2SAnirudh Venkataramanan * @budget: amount of work driver is allowed to do this pass, in packets 14482b245cb2SAnirudh Venkataramanan * 14492b245cb2SAnirudh Venkataramanan * This function will clean all queues associated with a q_vector. 14502b245cb2SAnirudh Venkataramanan * 14512b245cb2SAnirudh Venkataramanan * Returns the amount of work done 14522b245cb2SAnirudh Venkataramanan */ 14532b245cb2SAnirudh Venkataramanan int ice_napi_poll(struct napi_struct *napi, int budget) 14542b245cb2SAnirudh Venkataramanan { 14552b245cb2SAnirudh Venkataramanan struct ice_q_vector *q_vector = 14562b245cb2SAnirudh Venkataramanan container_of(napi, struct ice_q_vector, napi); 1457e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring; 1458e72bba21SMaciej Fijalkowski struct ice_rx_ring *rx_ring; 14592b245cb2SAnirudh Venkataramanan bool clean_complete = true; 14609118fcd5SBrett Creeley int budget_per_ring; 14612b245cb2SAnirudh Venkataramanan int work_done = 0; 14622b245cb2SAnirudh Venkataramanan 14632b245cb2SAnirudh Venkataramanan /* Since the actual Tx work is minimal, we can give the Tx a larger 14642b245cb2SAnirudh Venkataramanan * budget and be more aggressive about cleaning up the Tx descriptors. 14652b245cb2SAnirudh Venkataramanan */ 1466e72bba21SMaciej Fijalkowski ice_for_each_tx_ring(tx_ring, q_vector->tx) { 14679610bd98SMaciej Fijalkowski bool wd; 14689610bd98SMaciej Fijalkowski 14699610bd98SMaciej Fijalkowski if (tx_ring->xsk_pool) 1470126cdfe1SMaciej Fijalkowski wd = ice_xmit_zc(tx_ring, ICE_DESC_UNUSED(tx_ring), budget); 14719610bd98SMaciej Fijalkowski else if (ice_ring_is_xdp(tx_ring)) 14729610bd98SMaciej Fijalkowski wd = true; 14739610bd98SMaciej Fijalkowski else 14749610bd98SMaciej Fijalkowski wd = ice_clean_tx_irq(tx_ring, budget); 14752d4238f5SKrzysztof Kazimierczak 14762d4238f5SKrzysztof Kazimierczak if (!wd) 14772b245cb2SAnirudh Venkataramanan clean_complete = false; 14782d4238f5SKrzysztof Kazimierczak } 14792b245cb2SAnirudh Venkataramanan 14802b245cb2SAnirudh Venkataramanan /* Handle case where we are called by netpoll with a budget of 0 */ 1481d27525ecSJesse Brandeburg if (unlikely(budget <= 0)) 14822b245cb2SAnirudh Venkataramanan return budget; 14832b245cb2SAnirudh Venkataramanan 14849118fcd5SBrett Creeley /* normally we have 1 Rx ring per q_vector */ 14859118fcd5SBrett Creeley if (unlikely(q_vector->num_ring_rx > 1)) 14869118fcd5SBrett Creeley /* We attempt to distribute budget to each Rx queue fairly, but 14879118fcd5SBrett Creeley * don't allow the budget to go below 1 because that would exit 14889118fcd5SBrett Creeley * polling early. 14892b245cb2SAnirudh Venkataramanan */ 149088865fc4SKarol Kolacinski budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 14919118fcd5SBrett Creeley else 14929118fcd5SBrett Creeley /* Max of 1 Rx ring in this q_vector so give it the budget */ 14939118fcd5SBrett Creeley budget_per_ring = budget; 14942b245cb2SAnirudh Venkataramanan 1495e72bba21SMaciej Fijalkowski ice_for_each_rx_ring(rx_ring, q_vector->rx) { 14962b245cb2SAnirudh Venkataramanan int cleaned; 14972b245cb2SAnirudh Venkataramanan 14982d4238f5SKrzysztof Kazimierczak /* A dedicated path for zero-copy allows making a single 14992d4238f5SKrzysztof Kazimierczak * comparison in the irq context instead of many inside the 15002d4238f5SKrzysztof Kazimierczak * ice_clean_rx_irq function and makes the codebase cleaner. 15012d4238f5SKrzysztof Kazimierczak */ 1502e72bba21SMaciej Fijalkowski cleaned = rx_ring->xsk_pool ? 1503e72bba21SMaciej Fijalkowski ice_clean_rx_irq_zc(rx_ring, budget_per_ring) : 1504e72bba21SMaciej Fijalkowski ice_clean_rx_irq(rx_ring, budget_per_ring); 15052b245cb2SAnirudh Venkataramanan work_done += cleaned; 15062b245cb2SAnirudh Venkataramanan /* if we clean as many as budgeted, we must not be done */ 15072b245cb2SAnirudh Venkataramanan if (cleaned >= budget_per_ring) 15082b245cb2SAnirudh Venkataramanan clean_complete = false; 15092b245cb2SAnirudh Venkataramanan } 15102b245cb2SAnirudh Venkataramanan 15112b245cb2SAnirudh Venkataramanan /* If work not completed, return budget and polling will return */ 15121d9f7ca3SJesse Brandeburg if (!clean_complete) { 15131d9f7ca3SJesse Brandeburg /* Set the writeback on ITR so partial completions of 15141d9f7ca3SJesse Brandeburg * cache-lines will still continue even if we're polling. 15151d9f7ca3SJesse Brandeburg */ 15161d9f7ca3SJesse Brandeburg ice_set_wb_on_itr(q_vector); 15172b245cb2SAnirudh Venkataramanan return budget; 15181d9f7ca3SJesse Brandeburg } 15192b245cb2SAnirudh Venkataramanan 15200bcd952fSJesse Brandeburg /* Exit the polling mode, but don't re-enable interrupts if stack might 15210bcd952fSJesse Brandeburg * poll us due to busy-polling 15220bcd952fSJesse Brandeburg */ 1523a4e18669SMaciej Fijalkowski if (napi_complete_done(napi, work_done)) { 1524d8eb7ad5SJesse Brandeburg ice_net_dim(q_vector); 1525d8eb7ad5SJesse Brandeburg ice_enable_interrupt(q_vector); 1526d8eb7ad5SJesse Brandeburg } else { 15272fb0821fSJesse Brandeburg ice_set_wb_on_itr(q_vector); 1528d8eb7ad5SJesse Brandeburg } 1529e0c9fd9bSDave Ertman 153032a64994SBruce Allan return min_t(int, work_done, budget - 1); 15312b245cb2SAnirudh Venkataramanan } 15322b245cb2SAnirudh Venkataramanan 15332b245cb2SAnirudh Venkataramanan /** 1534d337f2afSAnirudh Venkataramanan * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 15352b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 15362b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 15372b245cb2SAnirudh Venkataramanan * 15382b245cb2SAnirudh Venkataramanan * Returns -EBUSY if a stop is needed, else 0 15392b245cb2SAnirudh Venkataramanan */ 1540e72bba21SMaciej Fijalkowski static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 15412b245cb2SAnirudh Venkataramanan { 15421c96c168SJesse Brandeburg netif_tx_stop_queue(txring_txq(tx_ring)); 15432b245cb2SAnirudh Venkataramanan /* Memory barrier before checking head and tail */ 15442b245cb2SAnirudh Venkataramanan smp_mb(); 15452b245cb2SAnirudh Venkataramanan 15462b245cb2SAnirudh Venkataramanan /* Check again in a case another CPU has just made room available. */ 15472b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 15482b245cb2SAnirudh Venkataramanan return -EBUSY; 15492b245cb2SAnirudh Venkataramanan 15501c96c168SJesse Brandeburg /* A reprieve! - use start_queue because it doesn't call schedule */ 15511c96c168SJesse Brandeburg netif_tx_start_queue(txring_txq(tx_ring)); 15522b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 15532b245cb2SAnirudh Venkataramanan return 0; 15542b245cb2SAnirudh Venkataramanan } 15552b245cb2SAnirudh Venkataramanan 15562b245cb2SAnirudh Venkataramanan /** 1557d337f2afSAnirudh Venkataramanan * ice_maybe_stop_tx - 1st level check for Tx stop conditions 15582b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 15592b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 15602b245cb2SAnirudh Venkataramanan * 15612b245cb2SAnirudh Venkataramanan * Returns 0 if stop is not needed 15622b245cb2SAnirudh Venkataramanan */ 1563e72bba21SMaciej Fijalkowski static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 15642b245cb2SAnirudh Venkataramanan { 15652b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 15662b245cb2SAnirudh Venkataramanan return 0; 1567d337f2afSAnirudh Venkataramanan 15682b245cb2SAnirudh Venkataramanan return __ice_maybe_stop_tx(tx_ring, size); 15692b245cb2SAnirudh Venkataramanan } 15702b245cb2SAnirudh Venkataramanan 15712b245cb2SAnirudh Venkataramanan /** 15722b245cb2SAnirudh Venkataramanan * ice_tx_map - Build the Tx descriptor 15732b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 15742b245cb2SAnirudh Venkataramanan * @first: first buffer info buffer to use 1575d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 15762b245cb2SAnirudh Venkataramanan * 15772b245cb2SAnirudh Venkataramanan * This function loops over the skb data pointed to by *first 15782b245cb2SAnirudh Venkataramanan * and gets a physical address for each memory location and programs 15792b245cb2SAnirudh Venkataramanan * it and the length into the transmit descriptor. 15802b245cb2SAnirudh Venkataramanan */ 1581d76a60baSAnirudh Venkataramanan static void 1582e72bba21SMaciej Fijalkowski ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, 1583d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params *off) 15842b245cb2SAnirudh Venkataramanan { 1585d76a60baSAnirudh Venkataramanan u64 td_offset, td_tag, td_cmd; 15862b245cb2SAnirudh Venkataramanan u16 i = tx_ring->next_to_use; 15872b245cb2SAnirudh Venkataramanan unsigned int data_len, size; 15882b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 15892b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 15902b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 15914ee656bbSTony Nguyen skb_frag_t *frag; 15922b245cb2SAnirudh Venkataramanan dma_addr_t dma; 15939c99d099SJesse Brandeburg bool kick; 15942b245cb2SAnirudh Venkataramanan 1595d76a60baSAnirudh Venkataramanan td_tag = off->td_l2tag1; 1596d76a60baSAnirudh Venkataramanan td_cmd = off->td_cmd; 1597d76a60baSAnirudh Venkataramanan td_offset = off->td_offset; 15982b245cb2SAnirudh Venkataramanan skb = first->skb; 15992b245cb2SAnirudh Venkataramanan 16002b245cb2SAnirudh Venkataramanan data_len = skb->data_len; 16012b245cb2SAnirudh Venkataramanan size = skb_headlen(skb); 16022b245cb2SAnirudh Venkataramanan 16032b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 16042b245cb2SAnirudh Venkataramanan 1605d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1606d76a60baSAnirudh Venkataramanan td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1607d76a60baSAnirudh Venkataramanan td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1608d76a60baSAnirudh Venkataramanan ICE_TX_FLAGS_VLAN_S; 1609d76a60baSAnirudh Venkataramanan } 1610d76a60baSAnirudh Venkataramanan 16112b245cb2SAnirudh Venkataramanan dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 16122b245cb2SAnirudh Venkataramanan 16132b245cb2SAnirudh Venkataramanan tx_buf = first; 16142b245cb2SAnirudh Venkataramanan 16152b245cb2SAnirudh Venkataramanan for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 16162b245cb2SAnirudh Venkataramanan unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 16172b245cb2SAnirudh Venkataramanan 16182b245cb2SAnirudh Venkataramanan if (dma_mapping_error(tx_ring->dev, dma)) 16192b245cb2SAnirudh Venkataramanan goto dma_error; 16202b245cb2SAnirudh Venkataramanan 16212b245cb2SAnirudh Venkataramanan /* record length, and DMA address */ 16222b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, size); 16232b245cb2SAnirudh Venkataramanan dma_unmap_addr_set(tx_buf, dma, dma); 16242b245cb2SAnirudh Venkataramanan 16252b245cb2SAnirudh Venkataramanan /* align size to end of page */ 16262b245cb2SAnirudh Venkataramanan max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 16272b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 16282b245cb2SAnirudh Venkataramanan 16292b245cb2SAnirudh Venkataramanan /* account for data chunks larger than the hardware 16302b245cb2SAnirudh Venkataramanan * can handle 16312b245cb2SAnirudh Venkataramanan */ 16322b245cb2SAnirudh Venkataramanan while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 16332b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = 16345757cc7cSTony Nguyen ice_build_ctob(td_cmd, td_offset, max_data, 16355757cc7cSTony Nguyen td_tag); 16362b245cb2SAnirudh Venkataramanan 16372b245cb2SAnirudh Venkataramanan tx_desc++; 16382b245cb2SAnirudh Venkataramanan i++; 16392b245cb2SAnirudh Venkataramanan 16402b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 16412b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 16422b245cb2SAnirudh Venkataramanan i = 0; 16432b245cb2SAnirudh Venkataramanan } 16442b245cb2SAnirudh Venkataramanan 16452b245cb2SAnirudh Venkataramanan dma += max_data; 16462b245cb2SAnirudh Venkataramanan size -= max_data; 16472b245cb2SAnirudh Venkataramanan 16482b245cb2SAnirudh Venkataramanan max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 16492b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 16502b245cb2SAnirudh Venkataramanan } 16512b245cb2SAnirudh Venkataramanan 16522b245cb2SAnirudh Venkataramanan if (likely(!data_len)) 16532b245cb2SAnirudh Venkataramanan break; 16542b245cb2SAnirudh Venkataramanan 16555757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 16562b245cb2SAnirudh Venkataramanan size, td_tag); 16572b245cb2SAnirudh Venkataramanan 16582b245cb2SAnirudh Venkataramanan tx_desc++; 16592b245cb2SAnirudh Venkataramanan i++; 16602b245cb2SAnirudh Venkataramanan 16612b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 16622b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 16632b245cb2SAnirudh Venkataramanan i = 0; 16642b245cb2SAnirudh Venkataramanan } 16652b245cb2SAnirudh Venkataramanan 16662b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag); 16672b245cb2SAnirudh Venkataramanan data_len -= size; 16682b245cb2SAnirudh Venkataramanan 16692b245cb2SAnirudh Venkataramanan dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 16702b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 16712b245cb2SAnirudh Venkataramanan 16722b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 16732b245cb2SAnirudh Venkataramanan } 16742b245cb2SAnirudh Venkataramanan 16752b245cb2SAnirudh Venkataramanan /* record SW timestamp if HW timestamp is not available */ 16762b245cb2SAnirudh Venkataramanan skb_tx_timestamp(first->skb); 16772b245cb2SAnirudh Venkataramanan 16782b245cb2SAnirudh Venkataramanan i++; 16792b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) 16802b245cb2SAnirudh Venkataramanan i = 0; 16812b245cb2SAnirudh Venkataramanan 16822b245cb2SAnirudh Venkataramanan /* write last descriptor with RS and EOP bits */ 1683efc2214bSMaciej Fijalkowski td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 16845757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = 16855757cc7cSTony Nguyen ice_build_ctob(td_cmd, td_offset, size, td_tag); 16862b245cb2SAnirudh Venkataramanan 16872b245cb2SAnirudh Venkataramanan /* Force memory writes to complete before letting h/w know there 16882b245cb2SAnirudh Venkataramanan * are new descriptors to fetch. 16892b245cb2SAnirudh Venkataramanan * 16902b245cb2SAnirudh Venkataramanan * We also use this memory barrier to make certain all of the 16912b245cb2SAnirudh Venkataramanan * status bits have been updated before next_to_watch is written. 16922b245cb2SAnirudh Venkataramanan */ 16932b245cb2SAnirudh Venkataramanan wmb(); 16942b245cb2SAnirudh Venkataramanan 16952b245cb2SAnirudh Venkataramanan /* set next_to_watch value indicating a packet is present */ 16962b245cb2SAnirudh Venkataramanan first->next_to_watch = tx_desc; 16972b245cb2SAnirudh Venkataramanan 16982b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 16992b245cb2SAnirudh Venkataramanan 17002b245cb2SAnirudh Venkataramanan ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 17012b245cb2SAnirudh Venkataramanan 17022b245cb2SAnirudh Venkataramanan /* notify HW of packet */ 17039c99d099SJesse Brandeburg kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, 17049c99d099SJesse Brandeburg netdev_xmit_more()); 17059c99d099SJesse Brandeburg if (kick) 17069c99d099SJesse Brandeburg /* notify HW of packet */ 17072b245cb2SAnirudh Venkataramanan writel(i, tx_ring->tail); 17082b245cb2SAnirudh Venkataramanan 17092b245cb2SAnirudh Venkataramanan return; 17102b245cb2SAnirudh Venkataramanan 17112b245cb2SAnirudh Venkataramanan dma_error: 17122f2da36eSAnirudh Venkataramanan /* clear DMA mappings for failed tx_buf map */ 17132b245cb2SAnirudh Venkataramanan for (;;) { 17142b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 17152b245cb2SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 17162b245cb2SAnirudh Venkataramanan if (tx_buf == first) 17172b245cb2SAnirudh Venkataramanan break; 17182b245cb2SAnirudh Venkataramanan if (i == 0) 17192b245cb2SAnirudh Venkataramanan i = tx_ring->count; 17202b245cb2SAnirudh Venkataramanan i--; 17212b245cb2SAnirudh Venkataramanan } 17222b245cb2SAnirudh Venkataramanan 17232b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 17242b245cb2SAnirudh Venkataramanan } 17252b245cb2SAnirudh Venkataramanan 17262b245cb2SAnirudh Venkataramanan /** 1727d76a60baSAnirudh Venkataramanan * ice_tx_csum - Enable Tx checksum offloads 1728d76a60baSAnirudh Venkataramanan * @first: pointer to the first descriptor 1729d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1730d76a60baSAnirudh Venkataramanan * 1731d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1732d76a60baSAnirudh Venkataramanan */ 1733d76a60baSAnirudh Venkataramanan static 1734d76a60baSAnirudh Venkataramanan int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1735d76a60baSAnirudh Venkataramanan { 1736d76a60baSAnirudh Venkataramanan u32 l4_len = 0, l3_len = 0, l2_len = 0; 1737d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1738d76a60baSAnirudh Venkataramanan union { 1739d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1740d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1741d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1742d76a60baSAnirudh Venkataramanan } ip; 1743d76a60baSAnirudh Venkataramanan union { 1744d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1745d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1746d76a60baSAnirudh Venkataramanan } l4; 1747d76a60baSAnirudh Venkataramanan __be16 frag_off, protocol; 1748d76a60baSAnirudh Venkataramanan unsigned char *exthdr; 1749d76a60baSAnirudh Venkataramanan u32 offset, cmd = 0; 1750d76a60baSAnirudh Venkataramanan u8 l4_proto = 0; 1751d76a60baSAnirudh Venkataramanan 1752d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1753d76a60baSAnirudh Venkataramanan return 0; 1754d76a60baSAnirudh Venkataramanan 175569e66c04SJoe Damato protocol = vlan_get_protocol(skb); 175669e66c04SJoe Damato 175701658aeeSPrzemyslaw Patynowski if (eth_p_mpls(protocol)) { 175869e66c04SJoe Damato ip.hdr = skb_inner_network_header(skb); 175969e66c04SJoe Damato l4.hdr = skb_checksum_start(skb); 176001658aeeSPrzemyslaw Patynowski } else { 176101658aeeSPrzemyslaw Patynowski ip.hdr = skb_network_header(skb); 176201658aeeSPrzemyslaw Patynowski l4.hdr = skb_transport_header(skb); 176301658aeeSPrzemyslaw Patynowski } 1764d76a60baSAnirudh Venkataramanan 1765d76a60baSAnirudh Venkataramanan /* compute outer L2 header size */ 1766d76a60baSAnirudh Venkataramanan l2_len = ip.hdr - skb->data; 1767d76a60baSAnirudh Venkataramanan offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1768d76a60baSAnirudh Venkataramanan 176969e66c04SJoe Damato /* set the tx_flags to indicate the IP protocol type. this is 177069e66c04SJoe Damato * required so that checksum header computation below is accurate. 177169e66c04SJoe Damato */ 177269e66c04SJoe Damato if (ip.v4->version == 4) 1773a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV4; 177469e66c04SJoe Damato else if (ip.v6->version == 6) 1775a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV6; 1776a4e82a81STony Nguyen 1777a4e82a81STony Nguyen if (skb->encapsulation) { 1778a4e82a81STony Nguyen bool gso_ena = false; 1779a4e82a81STony Nguyen u32 tunnel = 0; 1780a4e82a81STony Nguyen 1781a4e82a81STony Nguyen /* define outer network header type */ 1782a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1783a4e82a81STony Nguyen tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 1784a4e82a81STony Nguyen ICE_TX_CTX_EIPT_IPV4 : 1785a4e82a81STony Nguyen ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 1786a4e82a81STony Nguyen l4_proto = ip.v4->protocol; 1787a4e82a81STony Nguyen } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 17881b0b0b58SNick Nunley int ret; 17891b0b0b58SNick Nunley 1790a4e82a81STony Nguyen tunnel |= ICE_TX_CTX_EIPT_IPV6; 1791a4e82a81STony Nguyen exthdr = ip.hdr + sizeof(*ip.v6); 1792a4e82a81STony Nguyen l4_proto = ip.v6->nexthdr; 17931b0b0b58SNick Nunley ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 1794a4e82a81STony Nguyen &l4_proto, &frag_off); 17951b0b0b58SNick Nunley if (ret < 0) 17961b0b0b58SNick Nunley return -1; 1797a4e82a81STony Nguyen } 1798a4e82a81STony Nguyen 1799a4e82a81STony Nguyen /* define outer transport */ 1800a4e82a81STony Nguyen switch (l4_proto) { 1801a4e82a81STony Nguyen case IPPROTO_UDP: 1802a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 1803a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1804a4e82a81STony Nguyen break; 1805a4e82a81STony Nguyen case IPPROTO_GRE: 1806a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 1807a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1808a4e82a81STony Nguyen break; 1809a4e82a81STony Nguyen case IPPROTO_IPIP: 1810a4e82a81STony Nguyen case IPPROTO_IPV6: 1811a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1812a4e82a81STony Nguyen l4.hdr = skb_inner_network_header(skb); 1813a4e82a81STony Nguyen break; 1814a4e82a81STony Nguyen default: 1815a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_TSO) 1816d76a60baSAnirudh Venkataramanan return -1; 1817d76a60baSAnirudh Venkataramanan 1818a4e82a81STony Nguyen skb_checksum_help(skb); 1819a4e82a81STony Nguyen return 0; 1820a4e82a81STony Nguyen } 1821a4e82a81STony Nguyen 1822a4e82a81STony Nguyen /* compute outer L3 header size */ 1823a4e82a81STony Nguyen tunnel |= ((l4.hdr - ip.hdr) / 4) << 1824a4e82a81STony Nguyen ICE_TXD_CTX_QW0_EIPLEN_S; 1825a4e82a81STony Nguyen 1826a4e82a81STony Nguyen /* switch IP header pointer from outer to inner header */ 1827a4e82a81STony Nguyen ip.hdr = skb_inner_network_header(skb); 1828a4e82a81STony Nguyen 1829a4e82a81STony Nguyen /* compute tunnel header size */ 1830a4e82a81STony Nguyen tunnel |= ((ip.hdr - l4.hdr) / 2) << 1831a4e82a81STony Nguyen ICE_TXD_CTX_QW0_NATLEN_S; 1832a4e82a81STony Nguyen 1833a4e82a81STony Nguyen gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 1834a4e82a81STony Nguyen /* indicate if we need to offload outer UDP header */ 1835a4e82a81STony Nguyen if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 1836a4e82a81STony Nguyen (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1837a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 1838a4e82a81STony Nguyen 1839a4e82a81STony Nguyen /* record tunnel offload values */ 1840a4e82a81STony Nguyen off->cd_tunnel_params |= tunnel; 1841a4e82a81STony Nguyen 1842a4e82a81STony Nguyen /* set DTYP=1 to indicate that it's an Tx context descriptor 1843a4e82a81STony Nguyen * in IPsec tunnel mode with Tx offloads in Quad word 1 1844a4e82a81STony Nguyen */ 1845a4e82a81STony Nguyen off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 1846a4e82a81STony Nguyen 1847a4e82a81STony Nguyen /* switch L4 header pointer from outer to inner */ 1848a4e82a81STony Nguyen l4.hdr = skb_inner_transport_header(skb); 1849a4e82a81STony Nguyen l4_proto = 0; 1850a4e82a81STony Nguyen 1851a4e82a81STony Nguyen /* reset type as we transition from outer to inner headers */ 1852a4e82a81STony Nguyen first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 1853a4e82a81STony Nguyen if (ip.v4->version == 4) 1854a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV4; 1855a4e82a81STony Nguyen if (ip.v6->version == 6) 1856a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV6; 1857a4e82a81STony Nguyen } 1858a4e82a81STony Nguyen 1859d76a60baSAnirudh Venkataramanan /* Enable IP checksum offloads */ 1860a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1861d76a60baSAnirudh Venkataramanan l4_proto = ip.v4->protocol; 1862d76a60baSAnirudh Venkataramanan /* the stack computes the IP header already, the only time we 1863d76a60baSAnirudh Venkataramanan * need the hardware to recompute it is in the case of TSO. 1864d76a60baSAnirudh Venkataramanan */ 1865d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1866d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1867d76a60baSAnirudh Venkataramanan else 1868d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1869d76a60baSAnirudh Venkataramanan 1870a4e82a81STony Nguyen } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1871d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1872d76a60baSAnirudh Venkataramanan exthdr = ip.hdr + sizeof(*ip.v6); 1873d76a60baSAnirudh Venkataramanan l4_proto = ip.v6->nexthdr; 1874d76a60baSAnirudh Venkataramanan if (l4.hdr != exthdr) 1875d76a60baSAnirudh Venkataramanan ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1876d76a60baSAnirudh Venkataramanan &frag_off); 1877d76a60baSAnirudh Venkataramanan } else { 1878d76a60baSAnirudh Venkataramanan return -1; 1879d76a60baSAnirudh Venkataramanan } 1880d76a60baSAnirudh Venkataramanan 1881d76a60baSAnirudh Venkataramanan /* compute inner L3 header size */ 1882d76a60baSAnirudh Venkataramanan l3_len = l4.hdr - ip.hdr; 1883d76a60baSAnirudh Venkataramanan offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1884d76a60baSAnirudh Venkataramanan 1885d76a60baSAnirudh Venkataramanan /* Enable L4 checksum offloads */ 1886d76a60baSAnirudh Venkataramanan switch (l4_proto) { 1887d76a60baSAnirudh Venkataramanan case IPPROTO_TCP: 1888d76a60baSAnirudh Venkataramanan /* enable checksum offloads */ 1889d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1890d76a60baSAnirudh Venkataramanan l4_len = l4.tcp->doff; 1891d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1892d76a60baSAnirudh Venkataramanan break; 1893d76a60baSAnirudh Venkataramanan case IPPROTO_UDP: 1894d76a60baSAnirudh Venkataramanan /* enable UDP checksum offload */ 1895d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1896d76a60baSAnirudh Venkataramanan l4_len = (sizeof(struct udphdr) >> 2); 1897d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1898d76a60baSAnirudh Venkataramanan break; 1899d76a60baSAnirudh Venkataramanan case IPPROTO_SCTP: 1900cf909e19SAnirudh Venkataramanan /* enable SCTP checksum offload */ 1901cf909e19SAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1902cf909e19SAnirudh Venkataramanan l4_len = sizeof(struct sctphdr) >> 2; 1903cf909e19SAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1904cf909e19SAnirudh Venkataramanan break; 1905cf909e19SAnirudh Venkataramanan 1906d76a60baSAnirudh Venkataramanan default: 1907d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1908d76a60baSAnirudh Venkataramanan return -1; 1909d76a60baSAnirudh Venkataramanan skb_checksum_help(skb); 1910d76a60baSAnirudh Venkataramanan return 0; 1911d76a60baSAnirudh Venkataramanan } 1912d76a60baSAnirudh Venkataramanan 1913d76a60baSAnirudh Venkataramanan off->td_cmd |= cmd; 1914d76a60baSAnirudh Venkataramanan off->td_offset |= offset; 1915d76a60baSAnirudh Venkataramanan return 1; 1916d76a60baSAnirudh Venkataramanan } 1917d76a60baSAnirudh Venkataramanan 1918d76a60baSAnirudh Venkataramanan /** 1919f9867df6SAnirudh Venkataramanan * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1920d76a60baSAnirudh Venkataramanan * @tx_ring: ring to send buffer on 1921d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1922d76a60baSAnirudh Venkataramanan * 1923d76a60baSAnirudh Venkataramanan * Checks the skb and set up correspondingly several generic transmit flags 1924d76a60baSAnirudh Venkataramanan * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1925d76a60baSAnirudh Venkataramanan */ 19262bb19d6eSBrett Creeley static void 1927e72bba21SMaciej Fijalkowski ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) 1928d76a60baSAnirudh Venkataramanan { 1929d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1930d76a60baSAnirudh Venkataramanan 19312bb19d6eSBrett Creeley /* nothing left to do, software offloaded VLAN */ 19322bb19d6eSBrett Creeley if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) 19332bb19d6eSBrett Creeley return; 19342bb19d6eSBrett Creeley 19350d54d8f7SBrett Creeley /* the VLAN ethertype/tpid is determined by VSI configuration and netdev 19360d54d8f7SBrett Creeley * feature flags, which the driver only allows either 802.1Q or 802.1ad 19370d54d8f7SBrett Creeley * VLAN offloads exclusively so we only care about the VLAN ID here 1938d76a60baSAnirudh Venkataramanan */ 1939d76a60baSAnirudh Venkataramanan if (skb_vlan_tag_present(skb)) { 1940d76a60baSAnirudh Venkataramanan first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 19410d54d8f7SBrett Creeley if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2) 19420d54d8f7SBrett Creeley first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN; 19430d54d8f7SBrett Creeley else 1944d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1945d76a60baSAnirudh Venkataramanan } 1946d76a60baSAnirudh Venkataramanan 19472bb19d6eSBrett Creeley ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 1948d76a60baSAnirudh Venkataramanan } 1949d76a60baSAnirudh Venkataramanan 1950d76a60baSAnirudh Venkataramanan /** 1951d76a60baSAnirudh Venkataramanan * ice_tso - computes mss and TSO length to prepare for TSO 1952d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1953d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1954d76a60baSAnirudh Venkataramanan * 1955d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1956d76a60baSAnirudh Venkataramanan */ 1957d76a60baSAnirudh Venkataramanan static 1958d76a60baSAnirudh Venkataramanan int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1959d76a60baSAnirudh Venkataramanan { 1960d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1961d76a60baSAnirudh Venkataramanan union { 1962d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1963d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1964d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1965d76a60baSAnirudh Venkataramanan } ip; 1966d76a60baSAnirudh Venkataramanan union { 1967d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1968a54e3b8cSBrett Creeley struct udphdr *udp; 1969d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1970d76a60baSAnirudh Venkataramanan } l4; 1971d76a60baSAnirudh Venkataramanan u64 cd_mss, cd_tso_len; 197269e66c04SJoe Damato __be16 protocol; 197388865fc4SKarol Kolacinski u32 paylen; 197488865fc4SKarol Kolacinski u8 l4_start; 1975d76a60baSAnirudh Venkataramanan int err; 1976d76a60baSAnirudh Venkataramanan 1977d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1978d76a60baSAnirudh Venkataramanan return 0; 1979d76a60baSAnirudh Venkataramanan 1980d76a60baSAnirudh Venkataramanan if (!skb_is_gso(skb)) 1981d76a60baSAnirudh Venkataramanan return 0; 1982d76a60baSAnirudh Venkataramanan 1983d76a60baSAnirudh Venkataramanan err = skb_cow_head(skb, 0); 1984d76a60baSAnirudh Venkataramanan if (err < 0) 1985d76a60baSAnirudh Venkataramanan return err; 1986d76a60baSAnirudh Venkataramanan 1987c3a6825eSBruce Allan /* cppcheck-suppress unreadVariable */ 198869e66c04SJoe Damato protocol = vlan_get_protocol(skb); 198969e66c04SJoe Damato 199069e66c04SJoe Damato if (eth_p_mpls(protocol)) 199169e66c04SJoe Damato ip.hdr = skb_inner_network_header(skb); 199269e66c04SJoe Damato else 1993d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 199469e66c04SJoe Damato l4.hdr = skb_checksum_start(skb); 1995d76a60baSAnirudh Venkataramanan 1996d76a60baSAnirudh Venkataramanan /* initialize outer IP header fields */ 1997d76a60baSAnirudh Venkataramanan if (ip.v4->version == 4) { 1998d76a60baSAnirudh Venkataramanan ip.v4->tot_len = 0; 1999d76a60baSAnirudh Venkataramanan ip.v4->check = 0; 2000d76a60baSAnirudh Venkataramanan } else { 2001d76a60baSAnirudh Venkataramanan ip.v6->payload_len = 0; 2002d76a60baSAnirudh Venkataramanan } 2003d76a60baSAnirudh Venkataramanan 2004a4e82a81STony Nguyen if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 2005a4e82a81STony Nguyen SKB_GSO_GRE_CSUM | 2006a4e82a81STony Nguyen SKB_GSO_IPXIP4 | 2007a4e82a81STony Nguyen SKB_GSO_IPXIP6 | 2008a4e82a81STony Nguyen SKB_GSO_UDP_TUNNEL | 2009a4e82a81STony Nguyen SKB_GSO_UDP_TUNNEL_CSUM)) { 2010a4e82a81STony Nguyen if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 2011a4e82a81STony Nguyen (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 2012a4e82a81STony Nguyen l4.udp->len = 0; 2013a4e82a81STony Nguyen 2014a4e82a81STony Nguyen /* determine offset of outer transport header */ 201588865fc4SKarol Kolacinski l4_start = (u8)(l4.hdr - skb->data); 2016a4e82a81STony Nguyen 2017a4e82a81STony Nguyen /* remove payload length from outer checksum */ 2018a4e82a81STony Nguyen paylen = skb->len - l4_start; 2019a4e82a81STony Nguyen csum_replace_by_diff(&l4.udp->check, 2020a4e82a81STony Nguyen (__force __wsum)htonl(paylen)); 2021a4e82a81STony Nguyen } 2022a4e82a81STony Nguyen 2023a4e82a81STony Nguyen /* reset pointers to inner headers */ 2024a4e82a81STony Nguyen 2025a4e82a81STony Nguyen /* cppcheck-suppress unreadVariable */ 2026a4e82a81STony Nguyen ip.hdr = skb_inner_network_header(skb); 2027a4e82a81STony Nguyen l4.hdr = skb_inner_transport_header(skb); 2028a4e82a81STony Nguyen 2029a4e82a81STony Nguyen /* initialize inner IP header fields */ 2030a4e82a81STony Nguyen if (ip.v4->version == 4) { 2031a4e82a81STony Nguyen ip.v4->tot_len = 0; 2032a4e82a81STony Nguyen ip.v4->check = 0; 2033a4e82a81STony Nguyen } else { 2034a4e82a81STony Nguyen ip.v6->payload_len = 0; 2035a4e82a81STony Nguyen } 2036a4e82a81STony Nguyen } 2037a4e82a81STony Nguyen 2038d76a60baSAnirudh Venkataramanan /* determine offset of transport header */ 203988865fc4SKarol Kolacinski l4_start = (u8)(l4.hdr - skb->data); 2040d76a60baSAnirudh Venkataramanan 2041d76a60baSAnirudh Venkataramanan /* remove payload length from checksum */ 2042d76a60baSAnirudh Venkataramanan paylen = skb->len - l4_start; 2043d76a60baSAnirudh Venkataramanan 2044a54e3b8cSBrett Creeley if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 2045a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.udp->check, 2046a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 2047a54e3b8cSBrett Creeley /* compute length of UDP segmentation header */ 204888865fc4SKarol Kolacinski off->header_len = (u8)sizeof(l4.udp) + l4_start; 2049a54e3b8cSBrett Creeley } else { 2050a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.tcp->check, 2051a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 2052a54e3b8cSBrett Creeley /* compute length of TCP segmentation header */ 205388865fc4SKarol Kolacinski off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 2054a54e3b8cSBrett Creeley } 2055d76a60baSAnirudh Venkataramanan 2056d76a60baSAnirudh Venkataramanan /* update gso_segs and bytecount */ 2057d76a60baSAnirudh Venkataramanan first->gso_segs = skb_shinfo(skb)->gso_segs; 2058d944b469SBrett Creeley first->bytecount += (first->gso_segs - 1) * off->header_len; 2059d76a60baSAnirudh Venkataramanan 2060d76a60baSAnirudh Venkataramanan cd_tso_len = skb->len - off->header_len; 2061d76a60baSAnirudh Venkataramanan cd_mss = skb_shinfo(skb)->gso_size; 2062d76a60baSAnirudh Venkataramanan 2063d76a60baSAnirudh Venkataramanan /* record cdesc_qw1 with TSO parameters */ 2064e65e9e15SBruce Allan off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2065d76a60baSAnirudh Venkataramanan (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 2066d76a60baSAnirudh Venkataramanan (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2067e65e9e15SBruce Allan (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2068d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_TSO; 2069d76a60baSAnirudh Venkataramanan return 1; 2070d76a60baSAnirudh Venkataramanan } 2071d76a60baSAnirudh Venkataramanan 2072d76a60baSAnirudh Venkataramanan /** 20732b245cb2SAnirudh Venkataramanan * ice_txd_use_count - estimate the number of descriptors needed for Tx 20742b245cb2SAnirudh Venkataramanan * @size: transmit request size in bytes 20752b245cb2SAnirudh Venkataramanan * 20762b245cb2SAnirudh Venkataramanan * Due to hardware alignment restrictions (4K alignment), we need to 20772b245cb2SAnirudh Venkataramanan * assume that we can have no more than 12K of data per descriptor, even 20782b245cb2SAnirudh Venkataramanan * though each descriptor can take up to 16K - 1 bytes of aligned memory. 20792b245cb2SAnirudh Venkataramanan * Thus, we need to divide by 12K. But division is slow! Instead, 20802b245cb2SAnirudh Venkataramanan * we decompose the operation into shifts and one relatively cheap 20812b245cb2SAnirudh Venkataramanan * multiply operation. 20822b245cb2SAnirudh Venkataramanan * 20832b245cb2SAnirudh Venkataramanan * To divide by 12K, we first divide by 4K, then divide by 3: 20842b245cb2SAnirudh Venkataramanan * To divide by 4K, shift right by 12 bits 20852b245cb2SAnirudh Venkataramanan * To divide by 3, multiply by 85, then divide by 256 20862b245cb2SAnirudh Venkataramanan * (Divide by 256 is done by shifting right by 8 bits) 20872b245cb2SAnirudh Venkataramanan * Finally, we add one to round up. Because 256 isn't an exact multiple of 20882b245cb2SAnirudh Venkataramanan * 3, we'll underestimate near each multiple of 12K. This is actually more 20892b245cb2SAnirudh Venkataramanan * accurate as we have 4K - 1 of wiggle room that we can fit into the last 20902b245cb2SAnirudh Venkataramanan * segment. For our purposes this is accurate out to 1M which is orders of 20912b245cb2SAnirudh Venkataramanan * magnitude greater than our largest possible GSO size. 20922b245cb2SAnirudh Venkataramanan * 20932b245cb2SAnirudh Venkataramanan * This would then be implemented as: 2094c585ea42SBrett Creeley * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 20952b245cb2SAnirudh Venkataramanan * 20962b245cb2SAnirudh Venkataramanan * Since multiplication and division are commutative, we can reorder 20972b245cb2SAnirudh Venkataramanan * operations into: 2098c585ea42SBrett Creeley * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 20992b245cb2SAnirudh Venkataramanan */ 21002b245cb2SAnirudh Venkataramanan static unsigned int ice_txd_use_count(unsigned int size) 21012b245cb2SAnirudh Venkataramanan { 2102c585ea42SBrett Creeley return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 21032b245cb2SAnirudh Venkataramanan } 21042b245cb2SAnirudh Venkataramanan 21052b245cb2SAnirudh Venkataramanan /** 2106d337f2afSAnirudh Venkataramanan * ice_xmit_desc_count - calculate number of Tx descriptors needed 21072b245cb2SAnirudh Venkataramanan * @skb: send buffer 21082b245cb2SAnirudh Venkataramanan * 21092b245cb2SAnirudh Venkataramanan * Returns number of data descriptors needed for this skb. 21102b245cb2SAnirudh Venkataramanan */ 21112b245cb2SAnirudh Venkataramanan static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 21122b245cb2SAnirudh Venkataramanan { 2113d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 21142b245cb2SAnirudh Venkataramanan unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 21152b245cb2SAnirudh Venkataramanan unsigned int count = 0, size = skb_headlen(skb); 21162b245cb2SAnirudh Venkataramanan 21172b245cb2SAnirudh Venkataramanan for (;;) { 21182b245cb2SAnirudh Venkataramanan count += ice_txd_use_count(size); 21192b245cb2SAnirudh Venkataramanan 21202b245cb2SAnirudh Venkataramanan if (!nr_frags--) 21212b245cb2SAnirudh Venkataramanan break; 21222b245cb2SAnirudh Venkataramanan 21232b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag++); 21242b245cb2SAnirudh Venkataramanan } 21252b245cb2SAnirudh Venkataramanan 21262b245cb2SAnirudh Venkataramanan return count; 21272b245cb2SAnirudh Venkataramanan } 21282b245cb2SAnirudh Venkataramanan 21292b245cb2SAnirudh Venkataramanan /** 21302b245cb2SAnirudh Venkataramanan * __ice_chk_linearize - Check if there are more than 8 buffers per packet 21312b245cb2SAnirudh Venkataramanan * @skb: send buffer 21322b245cb2SAnirudh Venkataramanan * 21332b245cb2SAnirudh Venkataramanan * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 21342b245cb2SAnirudh Venkataramanan * and so we need to figure out the cases where we need to linearize the skb. 21352b245cb2SAnirudh Venkataramanan * 21362b245cb2SAnirudh Venkataramanan * For TSO we need to count the TSO header and segment payload separately. 21372b245cb2SAnirudh Venkataramanan * As such we need to check cases where we have 7 fragments or more as we 21382b245cb2SAnirudh Venkataramanan * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 21392b245cb2SAnirudh Venkataramanan * the segment payload in the first descriptor, and another 7 for the 21402b245cb2SAnirudh Venkataramanan * fragments. 21412b245cb2SAnirudh Venkataramanan */ 21422b245cb2SAnirudh Venkataramanan static bool __ice_chk_linearize(struct sk_buff *skb) 21432b245cb2SAnirudh Venkataramanan { 2144d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag, *stale; 21452b245cb2SAnirudh Venkataramanan int nr_frags, sum; 21462b245cb2SAnirudh Venkataramanan 21472b245cb2SAnirudh Venkataramanan /* no need to check if number of frags is less than 7 */ 21482b245cb2SAnirudh Venkataramanan nr_frags = skb_shinfo(skb)->nr_frags; 21492b245cb2SAnirudh Venkataramanan if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 21502b245cb2SAnirudh Venkataramanan return false; 21512b245cb2SAnirudh Venkataramanan 21522b245cb2SAnirudh Venkataramanan /* We need to walk through the list and validate that each group 21532b245cb2SAnirudh Venkataramanan * of 6 fragments totals at least gso_size. 21542b245cb2SAnirudh Venkataramanan */ 21552b245cb2SAnirudh Venkataramanan nr_frags -= ICE_MAX_BUF_TXD - 2; 21562b245cb2SAnirudh Venkataramanan frag = &skb_shinfo(skb)->frags[0]; 21572b245cb2SAnirudh Venkataramanan 21582b245cb2SAnirudh Venkataramanan /* Initialize size to the negative value of gso_size minus 1. We 21594ee656bbSTony Nguyen * use this as the worst case scenario in which the frag ahead 21602b245cb2SAnirudh Venkataramanan * of us only provides one byte which is why we are limited to 6 21612b245cb2SAnirudh Venkataramanan * descriptors for a single transmit as the header and previous 21622b245cb2SAnirudh Venkataramanan * fragment are already consuming 2 descriptors. 21632b245cb2SAnirudh Venkataramanan */ 21642b245cb2SAnirudh Venkataramanan sum = 1 - skb_shinfo(skb)->gso_size; 21652b245cb2SAnirudh Venkataramanan 21662b245cb2SAnirudh Venkataramanan /* Add size of frags 0 through 4 to create our initial sum */ 21672b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21682b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21692b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21702b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21712b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21722b245cb2SAnirudh Venkataramanan 21732b245cb2SAnirudh Venkataramanan /* Walk through fragments adding latest fragment, testing it, and 21742b245cb2SAnirudh Venkataramanan * then removing stale fragments from the sum. 21752b245cb2SAnirudh Venkataramanan */ 21760a37abfaSKiran Patil for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 21770a37abfaSKiran Patil int stale_size = skb_frag_size(stale); 21780a37abfaSKiran Patil 21792b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21802b245cb2SAnirudh Venkataramanan 21810a37abfaSKiran Patil /* The stale fragment may present us with a smaller 21820a37abfaSKiran Patil * descriptor than the actual fragment size. To account 21830a37abfaSKiran Patil * for that we need to remove all the data on the front and 21840a37abfaSKiran Patil * figure out what the remainder would be in the last 21850a37abfaSKiran Patil * descriptor associated with the fragment. 21860a37abfaSKiran Patil */ 21870a37abfaSKiran Patil if (stale_size > ICE_MAX_DATA_PER_TXD) { 21880a37abfaSKiran Patil int align_pad = -(skb_frag_off(stale)) & 21890a37abfaSKiran Patil (ICE_MAX_READ_REQ_SIZE - 1); 21900a37abfaSKiran Patil 21910a37abfaSKiran Patil sum -= align_pad; 21920a37abfaSKiran Patil stale_size -= align_pad; 21930a37abfaSKiran Patil 21940a37abfaSKiran Patil do { 21950a37abfaSKiran Patil sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; 21960a37abfaSKiran Patil stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; 21970a37abfaSKiran Patil } while (stale_size > ICE_MAX_DATA_PER_TXD); 21980a37abfaSKiran Patil } 21990a37abfaSKiran Patil 22002b245cb2SAnirudh Venkataramanan /* if sum is negative we failed to make sufficient progress */ 22012b245cb2SAnirudh Venkataramanan if (sum < 0) 22022b245cb2SAnirudh Venkataramanan return true; 22032b245cb2SAnirudh Venkataramanan 22042b245cb2SAnirudh Venkataramanan if (!nr_frags--) 22052b245cb2SAnirudh Venkataramanan break; 22062b245cb2SAnirudh Venkataramanan 22070a37abfaSKiran Patil sum -= stale_size; 22082b245cb2SAnirudh Venkataramanan } 22092b245cb2SAnirudh Venkataramanan 22102b245cb2SAnirudh Venkataramanan return false; 22112b245cb2SAnirudh Venkataramanan } 22122b245cb2SAnirudh Venkataramanan 22132b245cb2SAnirudh Venkataramanan /** 22142b245cb2SAnirudh Venkataramanan * ice_chk_linearize - Check if there are more than 8 fragments per packet 22152b245cb2SAnirudh Venkataramanan * @skb: send buffer 22162b245cb2SAnirudh Venkataramanan * @count: number of buffers used 22172b245cb2SAnirudh Venkataramanan * 22182b245cb2SAnirudh Venkataramanan * Note: Our HW can't scatter-gather more than 8 fragments to build 22192b245cb2SAnirudh Venkataramanan * a packet on the wire and so we need to figure out the cases where we 22202b245cb2SAnirudh Venkataramanan * need to linearize the skb. 22212b245cb2SAnirudh Venkataramanan */ 22222b245cb2SAnirudh Venkataramanan static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 22232b245cb2SAnirudh Venkataramanan { 22242b245cb2SAnirudh Venkataramanan /* Both TSO and single send will work if count is less than 8 */ 22252b245cb2SAnirudh Venkataramanan if (likely(count < ICE_MAX_BUF_TXD)) 22262b245cb2SAnirudh Venkataramanan return false; 22272b245cb2SAnirudh Venkataramanan 22282b245cb2SAnirudh Venkataramanan if (skb_is_gso(skb)) 22292b245cb2SAnirudh Venkataramanan return __ice_chk_linearize(skb); 22302b245cb2SAnirudh Venkataramanan 22312b245cb2SAnirudh Venkataramanan /* we can support up to 8 data buffers for a single send */ 22322b245cb2SAnirudh Venkataramanan return count != ICE_MAX_BUF_TXD; 22332b245cb2SAnirudh Venkataramanan } 22342b245cb2SAnirudh Venkataramanan 22352b245cb2SAnirudh Venkataramanan /** 2236ea9b847cSJacob Keller * ice_tstamp - set up context descriptor for hardware timestamp 2237ea9b847cSJacob Keller * @tx_ring: pointer to the Tx ring to send buffer on 2238ea9b847cSJacob Keller * @skb: pointer to the SKB we're sending 2239ea9b847cSJacob Keller * @first: Tx buffer 2240ea9b847cSJacob Keller * @off: Tx offload parameters 2241ea9b847cSJacob Keller */ 2242ea9b847cSJacob Keller static void 2243e72bba21SMaciej Fijalkowski ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, 2244ea9b847cSJacob Keller struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2245ea9b847cSJacob Keller { 2246ea9b847cSJacob Keller s8 idx; 2247ea9b847cSJacob Keller 2248ea9b847cSJacob Keller /* only timestamp the outbound packet if the user has requested it */ 2249ea9b847cSJacob Keller if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 2250ea9b847cSJacob Keller return; 2251ea9b847cSJacob Keller 2252ea9b847cSJacob Keller if (!tx_ring->ptp_tx) 2253ea9b847cSJacob Keller return; 2254ea9b847cSJacob Keller 2255ea9b847cSJacob Keller /* Tx timestamps cannot be sampled when doing TSO */ 2256ea9b847cSJacob Keller if (first->tx_flags & ICE_TX_FLAGS_TSO) 2257ea9b847cSJacob Keller return; 2258ea9b847cSJacob Keller 2259ea9b847cSJacob Keller /* Grab an open timestamp slot */ 2260ea9b847cSJacob Keller idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); 2261ea9b847cSJacob Keller if (idx < 0) 2262ea9b847cSJacob Keller return; 2263ea9b847cSJacob Keller 2264ea9b847cSJacob Keller off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2265ea9b847cSJacob Keller (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) | 2266ea9b847cSJacob Keller ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S)); 2267ea9b847cSJacob Keller first->tx_flags |= ICE_TX_FLAGS_TSYN; 2268ea9b847cSJacob Keller } 2269ea9b847cSJacob Keller 2270ea9b847cSJacob Keller /** 22712b245cb2SAnirudh Venkataramanan * ice_xmit_frame_ring - Sends buffer on Tx ring 22722b245cb2SAnirudh Venkataramanan * @skb: send buffer 22732b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 22742b245cb2SAnirudh Venkataramanan * 22752b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 22762b245cb2SAnirudh Venkataramanan */ 22772b245cb2SAnirudh Venkataramanan static netdev_tx_t 2278e72bba21SMaciej Fijalkowski ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) 22792b245cb2SAnirudh Venkataramanan { 2280d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params offload = { 0 }; 22810c3a6101SDave Ertman struct ice_vsi *vsi = tx_ring->vsi; 22822b245cb2SAnirudh Venkataramanan struct ice_tx_buf *first; 2283f9f83202SDave Ertman struct ethhdr *eth; 22842b245cb2SAnirudh Venkataramanan unsigned int count; 2285d76a60baSAnirudh Venkataramanan int tso, csum; 22862b245cb2SAnirudh Venkataramanan 22873089cf6dSJesse Brandeburg ice_trace(xmit_frame_ring, tx_ring, skb); 22883089cf6dSJesse Brandeburg 22892b245cb2SAnirudh Venkataramanan count = ice_xmit_desc_count(skb); 22902b245cb2SAnirudh Venkataramanan if (ice_chk_linearize(skb, count)) { 22912b245cb2SAnirudh Venkataramanan if (__skb_linearize(skb)) 22922b245cb2SAnirudh Venkataramanan goto out_drop; 22932b245cb2SAnirudh Venkataramanan count = ice_txd_use_count(skb->len); 22942b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_linearize++; 22952b245cb2SAnirudh Venkataramanan } 22962b245cb2SAnirudh Venkataramanan 22972b245cb2SAnirudh Venkataramanan /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 22982b245cb2SAnirudh Venkataramanan * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 22992b245cb2SAnirudh Venkataramanan * + 4 desc gap to avoid the cache line where head is, 23002b245cb2SAnirudh Venkataramanan * + 1 desc for context descriptor, 23012b245cb2SAnirudh Venkataramanan * otherwise try next time 23022b245cb2SAnirudh Venkataramanan */ 2303c585ea42SBrett Creeley if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2304c585ea42SBrett Creeley ICE_DESCS_FOR_CTX_DESC)) { 23052b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_busy++; 23062b245cb2SAnirudh Venkataramanan return NETDEV_TX_BUSY; 23072b245cb2SAnirudh Venkataramanan } 23082b245cb2SAnirudh Venkataramanan 2309cc14db11SJesse Brandeburg /* prefetch for bql data which is infrequently used */ 2310cc14db11SJesse Brandeburg netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring)); 2311cc14db11SJesse Brandeburg 2312d76a60baSAnirudh Venkataramanan offload.tx_ring = tx_ring; 2313d76a60baSAnirudh Venkataramanan 23142b245cb2SAnirudh Venkataramanan /* record the location of the first descriptor for this packet */ 23152b245cb2SAnirudh Venkataramanan first = &tx_ring->tx_buf[tx_ring->next_to_use]; 23162b245cb2SAnirudh Venkataramanan first->skb = skb; 23172b245cb2SAnirudh Venkataramanan first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 23182b245cb2SAnirudh Venkataramanan first->gso_segs = 1; 2319d76a60baSAnirudh Venkataramanan first->tx_flags = 0; 23202b245cb2SAnirudh Venkataramanan 2321d76a60baSAnirudh Venkataramanan /* prepare the VLAN tagging flags for Tx */ 23222bb19d6eSBrett Creeley ice_tx_prepare_vlan_flags(tx_ring, first); 23230d54d8f7SBrett Creeley if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { 23240d54d8f7SBrett Creeley offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 23250d54d8f7SBrett Creeley (ICE_TX_CTX_DESC_IL2TAG2 << 23260d54d8f7SBrett Creeley ICE_TXD_CTX_QW1_CMD_S)); 23270d54d8f7SBrett Creeley offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 23280d54d8f7SBrett Creeley ICE_TX_FLAGS_VLAN_S; 23290d54d8f7SBrett Creeley } 2330d76a60baSAnirudh Venkataramanan 2331d76a60baSAnirudh Venkataramanan /* set up TSO offload */ 2332d76a60baSAnirudh Venkataramanan tso = ice_tso(first, &offload); 2333d76a60baSAnirudh Venkataramanan if (tso < 0) 2334d76a60baSAnirudh Venkataramanan goto out_drop; 2335d76a60baSAnirudh Venkataramanan 2336d76a60baSAnirudh Venkataramanan /* always set up Tx checksum offload */ 2337d76a60baSAnirudh Venkataramanan csum = ice_tx_csum(first, &offload); 2338d76a60baSAnirudh Venkataramanan if (csum < 0) 2339d76a60baSAnirudh Venkataramanan goto out_drop; 2340d76a60baSAnirudh Venkataramanan 23410c3a6101SDave Ertman /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 2342f9f83202SDave Ertman eth = (struct ethhdr *)skb_mac_header(skb); 2343f9f83202SDave Ertman if (unlikely((skb->priority == TC_PRIO_CONTROL || 2344f9f83202SDave Ertman eth->h_proto == htons(ETH_P_LLDP)) && 23450c3a6101SDave Ertman vsi->type == ICE_VSI_PF && 2346fc2d1165SChinh T Cao vsi->port_info->qos_cfg.is_sw_lldp)) 23470c3a6101SDave Ertman offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 23480c3a6101SDave Ertman ICE_TX_CTX_DESC_SWTCH_UPLINK << 23490c3a6101SDave Ertman ICE_TXD_CTX_QW1_CMD_S); 23500c3a6101SDave Ertman 2351ea9b847cSJacob Keller ice_tstamp(tx_ring, skb, first, &offload); 2352f5396b8aSGrzegorz Nitka if (ice_is_switchdev_running(vsi->back)) 2353f5396b8aSGrzegorz Nitka ice_eswitch_set_target_vsi(skb, &offload); 2354ea9b847cSJacob Keller 23550c3a6101SDave Ertman if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2356d76a60baSAnirudh Venkataramanan struct ice_tx_ctx_desc *cdesc; 235788865fc4SKarol Kolacinski u16 i = tx_ring->next_to_use; 2358d76a60baSAnirudh Venkataramanan 2359d76a60baSAnirudh Venkataramanan /* grab the next descriptor */ 2360d76a60baSAnirudh Venkataramanan cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2361d76a60baSAnirudh Venkataramanan i++; 2362d76a60baSAnirudh Venkataramanan tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2363d76a60baSAnirudh Venkataramanan 2364d76a60baSAnirudh Venkataramanan /* setup context descriptor */ 2365d76a60baSAnirudh Venkataramanan cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2366d76a60baSAnirudh Venkataramanan cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2367d76a60baSAnirudh Venkataramanan cdesc->rsvd = cpu_to_le16(0); 2368d76a60baSAnirudh Venkataramanan cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2369d76a60baSAnirudh Venkataramanan } 2370d76a60baSAnirudh Venkataramanan 2371d76a60baSAnirudh Venkataramanan ice_tx_map(tx_ring, first, &offload); 23722b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 23732b245cb2SAnirudh Venkataramanan 23742b245cb2SAnirudh Venkataramanan out_drop: 23753089cf6dSJesse Brandeburg ice_trace(xmit_frame_ring_drop, tx_ring, skb); 23762b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 23772b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 23782b245cb2SAnirudh Venkataramanan } 23792b245cb2SAnirudh Venkataramanan 23802b245cb2SAnirudh Venkataramanan /** 23812b245cb2SAnirudh Venkataramanan * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 23822b245cb2SAnirudh Venkataramanan * @skb: send buffer 23832b245cb2SAnirudh Venkataramanan * @netdev: network interface device structure 23842b245cb2SAnirudh Venkataramanan * 23852b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 23862b245cb2SAnirudh Venkataramanan */ 23872b245cb2SAnirudh Venkataramanan netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 23882b245cb2SAnirudh Venkataramanan { 23892b245cb2SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 23902b245cb2SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 2391e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring; 23922b245cb2SAnirudh Venkataramanan 23932b245cb2SAnirudh Venkataramanan tx_ring = vsi->tx_rings[skb->queue_mapping]; 23942b245cb2SAnirudh Venkataramanan 23952b245cb2SAnirudh Venkataramanan /* hardware can't handle really short frames, hardware padding works 23962b245cb2SAnirudh Venkataramanan * beyond this point 23972b245cb2SAnirudh Venkataramanan */ 23982b245cb2SAnirudh Venkataramanan if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 23992b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 24002b245cb2SAnirudh Venkataramanan 24012b245cb2SAnirudh Venkataramanan return ice_xmit_frame_ring(skb, tx_ring); 24022b245cb2SAnirudh Venkataramanan } 2403148beb61SHenry Tieman 2404148beb61SHenry Tieman /** 24052a87bd73SDave Ertman * ice_get_dscp_up - return the UP/TC value for a SKB 24062a87bd73SDave Ertman * @dcbcfg: DCB config that contains DSCP to UP/TC mapping 24072a87bd73SDave Ertman * @skb: SKB to query for info to determine UP/TC 24082a87bd73SDave Ertman * 24092a87bd73SDave Ertman * This function is to only be called when the PF is in L3 DSCP PFC mode 24102a87bd73SDave Ertman */ 24112a87bd73SDave Ertman static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb) 24122a87bd73SDave Ertman { 24132a87bd73SDave Ertman u8 dscp = 0; 24142a87bd73SDave Ertman 24152a87bd73SDave Ertman if (skb->protocol == htons(ETH_P_IP)) 24162a87bd73SDave Ertman dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 24172a87bd73SDave Ertman else if (skb->protocol == htons(ETH_P_IPV6)) 24182a87bd73SDave Ertman dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 24192a87bd73SDave Ertman 24202a87bd73SDave Ertman return dcbcfg->dscp_map[dscp]; 24212a87bd73SDave Ertman } 24222a87bd73SDave Ertman 24232a87bd73SDave Ertman u16 24242a87bd73SDave Ertman ice_select_queue(struct net_device *netdev, struct sk_buff *skb, 24252a87bd73SDave Ertman struct net_device *sb_dev) 24262a87bd73SDave Ertman { 24272a87bd73SDave Ertman struct ice_pf *pf = ice_netdev_to_pf(netdev); 24282a87bd73SDave Ertman struct ice_dcbx_cfg *dcbcfg; 24292a87bd73SDave Ertman 24302a87bd73SDave Ertman dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 24312a87bd73SDave Ertman if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) 24322a87bd73SDave Ertman skb->priority = ice_get_dscp_up(dcbcfg, skb); 24332a87bd73SDave Ertman 24342a87bd73SDave Ertman return netdev_pick_tx(netdev, skb, sb_dev); 24352a87bd73SDave Ertman } 24362a87bd73SDave Ertman 24372a87bd73SDave Ertman /** 2438148beb61SHenry Tieman * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 2439148beb61SHenry Tieman * @tx_ring: tx_ring to clean 2440148beb61SHenry Tieman */ 2441e72bba21SMaciej Fijalkowski void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) 2442148beb61SHenry Tieman { 2443148beb61SHenry Tieman struct ice_vsi *vsi = tx_ring->vsi; 2444148beb61SHenry Tieman s16 i = tx_ring->next_to_clean; 2445148beb61SHenry Tieman int budget = ICE_DFLT_IRQ_WORK; 2446148beb61SHenry Tieman struct ice_tx_desc *tx_desc; 2447148beb61SHenry Tieman struct ice_tx_buf *tx_buf; 2448148beb61SHenry Tieman 2449148beb61SHenry Tieman tx_buf = &tx_ring->tx_buf[i]; 2450148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, i); 2451148beb61SHenry Tieman i -= tx_ring->count; 2452148beb61SHenry Tieman 2453148beb61SHenry Tieman do { 2454148beb61SHenry Tieman struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2455148beb61SHenry Tieman 2456148beb61SHenry Tieman /* if next_to_watch is not set then there is no pending work */ 2457148beb61SHenry Tieman if (!eop_desc) 2458148beb61SHenry Tieman break; 2459148beb61SHenry Tieman 2460148beb61SHenry Tieman /* prevent any other reads prior to eop_desc */ 2461148beb61SHenry Tieman smp_rmb(); 2462148beb61SHenry Tieman 2463148beb61SHenry Tieman /* if the descriptor isn't done, no work to do */ 2464148beb61SHenry Tieman if (!(eop_desc->cmd_type_offset_bsz & 2465148beb61SHenry Tieman cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2466148beb61SHenry Tieman break; 2467148beb61SHenry Tieman 2468148beb61SHenry Tieman /* clear next_to_watch to prevent false hangs */ 2469148beb61SHenry Tieman tx_buf->next_to_watch = NULL; 2470148beb61SHenry Tieman tx_desc->buf_addr = 0; 2471148beb61SHenry Tieman tx_desc->cmd_type_offset_bsz = 0; 2472148beb61SHenry Tieman 2473148beb61SHenry Tieman /* move past filter desc */ 2474148beb61SHenry Tieman tx_buf++; 2475148beb61SHenry Tieman tx_desc++; 2476148beb61SHenry Tieman i++; 2477148beb61SHenry Tieman if (unlikely(!i)) { 2478148beb61SHenry Tieman i -= tx_ring->count; 2479148beb61SHenry Tieman tx_buf = tx_ring->tx_buf; 2480148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, 0); 2481148beb61SHenry Tieman } 2482148beb61SHenry Tieman 2483148beb61SHenry Tieman /* unmap the data header */ 2484148beb61SHenry Tieman if (dma_unmap_len(tx_buf, len)) 2485148beb61SHenry Tieman dma_unmap_single(tx_ring->dev, 2486148beb61SHenry Tieman dma_unmap_addr(tx_buf, dma), 2487148beb61SHenry Tieman dma_unmap_len(tx_buf, len), 2488148beb61SHenry Tieman DMA_TO_DEVICE); 2489148beb61SHenry Tieman if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 2490148beb61SHenry Tieman devm_kfree(tx_ring->dev, tx_buf->raw_buf); 2491148beb61SHenry Tieman 2492148beb61SHenry Tieman /* clear next_to_watch to prevent false hangs */ 2493148beb61SHenry Tieman tx_buf->raw_buf = NULL; 2494148beb61SHenry Tieman tx_buf->tx_flags = 0; 2495148beb61SHenry Tieman tx_buf->next_to_watch = NULL; 2496148beb61SHenry Tieman dma_unmap_len_set(tx_buf, len, 0); 2497148beb61SHenry Tieman tx_desc->buf_addr = 0; 2498148beb61SHenry Tieman tx_desc->cmd_type_offset_bsz = 0; 2499148beb61SHenry Tieman 2500148beb61SHenry Tieman /* move past eop_desc for start of next FD desc */ 2501148beb61SHenry Tieman tx_buf++; 2502148beb61SHenry Tieman tx_desc++; 2503148beb61SHenry Tieman i++; 2504148beb61SHenry Tieman if (unlikely(!i)) { 2505148beb61SHenry Tieman i -= tx_ring->count; 2506148beb61SHenry Tieman tx_buf = tx_ring->tx_buf; 2507148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, 0); 2508148beb61SHenry Tieman } 2509148beb61SHenry Tieman 2510148beb61SHenry Tieman budget--; 2511148beb61SHenry Tieman } while (likely(budget)); 2512148beb61SHenry Tieman 2513148beb61SHenry Tieman i += tx_ring->count; 2514148beb61SHenry Tieman tx_ring->next_to_clean = i; 2515148beb61SHenry Tieman 2516148beb61SHenry Tieman /* re-enable interrupt if needed */ 2517148beb61SHenry Tieman ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 2518148beb61SHenry Tieman } 2519