1cdedef59SAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0 2cdedef59SAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */ 3cdedef59SAnirudh Venkataramanan 4cdedef59SAnirudh Venkataramanan /* The driver transmit and receive code */ 5cdedef59SAnirudh Venkataramanan 6cdedef59SAnirudh Venkataramanan #include <linux/prefetch.h> 7cdedef59SAnirudh Venkataramanan #include <linux/mm.h> 8efc2214bSMaciej Fijalkowski #include <linux/bpf_trace.h> 9efc2214bSMaciej Fijalkowski #include <net/xdp.h> 100891d6d4SKrzysztof Kazimierczak #include "ice_txrx_lib.h" 11efc2214bSMaciej Fijalkowski #include "ice_lib.h" 12cdedef59SAnirudh Venkataramanan #include "ice.h" 135f6aa50eSAnirudh Venkataramanan #include "ice_dcb_lib.h" 142d4238f5SKrzysztof Kazimierczak #include "ice_xsk.h" 15cdedef59SAnirudh Venkataramanan 162b245cb2SAnirudh Venkataramanan #define ICE_RX_HDR_SIZE 256 172b245cb2SAnirudh Venkataramanan 18148beb61SHenry Tieman #define FDIR_DESC_RXDID 0x40 19cac2a27cSHenry Tieman #define ICE_FDIR_CLEAN_DELAY 10 20cac2a27cSHenry Tieman 21cac2a27cSHenry Tieman /** 22cac2a27cSHenry Tieman * ice_prgm_fdir_fltr - Program a Flow Director filter 23cac2a27cSHenry Tieman * @vsi: VSI to send dummy packet 24cac2a27cSHenry Tieman * @fdir_desc: flow director descriptor 25cac2a27cSHenry Tieman * @raw_packet: allocated buffer for flow director 26cac2a27cSHenry Tieman */ 27cac2a27cSHenry Tieman int 28cac2a27cSHenry Tieman ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 29cac2a27cSHenry Tieman u8 *raw_packet) 30cac2a27cSHenry Tieman { 31cac2a27cSHenry Tieman struct ice_tx_buf *tx_buf, *first; 32cac2a27cSHenry Tieman struct ice_fltr_desc *f_desc; 33cac2a27cSHenry Tieman struct ice_tx_desc *tx_desc; 34cac2a27cSHenry Tieman struct ice_ring *tx_ring; 35cac2a27cSHenry Tieman struct device *dev; 36cac2a27cSHenry Tieman dma_addr_t dma; 37cac2a27cSHenry Tieman u32 td_cmd; 38cac2a27cSHenry Tieman u16 i; 39cac2a27cSHenry Tieman 40cac2a27cSHenry Tieman /* VSI and Tx ring */ 41cac2a27cSHenry Tieman if (!vsi) 42cac2a27cSHenry Tieman return -ENOENT; 43cac2a27cSHenry Tieman tx_ring = vsi->tx_rings[0]; 44cac2a27cSHenry Tieman if (!tx_ring || !tx_ring->desc) 45cac2a27cSHenry Tieman return -ENOENT; 46cac2a27cSHenry Tieman dev = tx_ring->dev; 47cac2a27cSHenry Tieman 48cac2a27cSHenry Tieman /* we are using two descriptors to add/del a filter and we can wait */ 49cac2a27cSHenry Tieman for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 50cac2a27cSHenry Tieman if (!i) 51cac2a27cSHenry Tieman return -EAGAIN; 52cac2a27cSHenry Tieman msleep_interruptible(1); 53cac2a27cSHenry Tieman } 54cac2a27cSHenry Tieman 55cac2a27cSHenry Tieman dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 56cac2a27cSHenry Tieman DMA_TO_DEVICE); 57cac2a27cSHenry Tieman 58cac2a27cSHenry Tieman if (dma_mapping_error(dev, dma)) 59cac2a27cSHenry Tieman return -EINVAL; 60cac2a27cSHenry Tieman 61cac2a27cSHenry Tieman /* grab the next descriptor */ 62cac2a27cSHenry Tieman i = tx_ring->next_to_use; 63cac2a27cSHenry Tieman first = &tx_ring->tx_buf[i]; 64cac2a27cSHenry Tieman f_desc = ICE_TX_FDIRDESC(tx_ring, i); 65cac2a27cSHenry Tieman memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 66cac2a27cSHenry Tieman 67cac2a27cSHenry Tieman i++; 68cac2a27cSHenry Tieman i = (i < tx_ring->count) ? i : 0; 69cac2a27cSHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, i); 70cac2a27cSHenry Tieman tx_buf = &tx_ring->tx_buf[i]; 71cac2a27cSHenry Tieman 72cac2a27cSHenry Tieman i++; 73cac2a27cSHenry Tieman tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 74cac2a27cSHenry Tieman 75cac2a27cSHenry Tieman memset(tx_buf, 0, sizeof(*tx_buf)); 76cac2a27cSHenry Tieman dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 77cac2a27cSHenry Tieman dma_unmap_addr_set(tx_buf, dma, dma); 78cac2a27cSHenry Tieman 79cac2a27cSHenry Tieman tx_desc->buf_addr = cpu_to_le64(dma); 80cac2a27cSHenry Tieman td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 81cac2a27cSHenry Tieman ICE_TX_DESC_CMD_RE; 82cac2a27cSHenry Tieman 83cac2a27cSHenry Tieman tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; 84cac2a27cSHenry Tieman tx_buf->raw_buf = raw_packet; 85cac2a27cSHenry Tieman 86cac2a27cSHenry Tieman tx_desc->cmd_type_offset_bsz = 87cac2a27cSHenry Tieman ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 88cac2a27cSHenry Tieman 89cac2a27cSHenry Tieman /* Force memory write to complete before letting h/w know 90cac2a27cSHenry Tieman * there are new descriptors to fetch. 91cac2a27cSHenry Tieman */ 92cac2a27cSHenry Tieman wmb(); 93cac2a27cSHenry Tieman 94cac2a27cSHenry Tieman /* mark the data descriptor to be watched */ 95cac2a27cSHenry Tieman first->next_to_watch = tx_desc; 96cac2a27cSHenry Tieman 97cac2a27cSHenry Tieman writel(tx_ring->next_to_use, tx_ring->tail); 98cac2a27cSHenry Tieman 99cac2a27cSHenry Tieman return 0; 100cac2a27cSHenry Tieman } 101148beb61SHenry Tieman 102cdedef59SAnirudh Venkataramanan /** 103cdedef59SAnirudh Venkataramanan * ice_unmap_and_free_tx_buf - Release a Tx buffer 104cdedef59SAnirudh Venkataramanan * @ring: the ring that owns the buffer 105cdedef59SAnirudh Venkataramanan * @tx_buf: the buffer to free 106cdedef59SAnirudh Venkataramanan */ 107cdedef59SAnirudh Venkataramanan static void 108cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 109cdedef59SAnirudh Venkataramanan { 110cdedef59SAnirudh Venkataramanan if (tx_buf->skb) { 111148beb61SHenry Tieman if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 112148beb61SHenry Tieman devm_kfree(ring->dev, tx_buf->raw_buf); 113148beb61SHenry Tieman else if (ice_ring_is_xdp(ring)) 114efc2214bSMaciej Fijalkowski page_frag_free(tx_buf->raw_buf); 115efc2214bSMaciej Fijalkowski else 116cdedef59SAnirudh Venkataramanan dev_kfree_skb_any(tx_buf->skb); 117cdedef59SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) 118cdedef59SAnirudh Venkataramanan dma_unmap_single(ring->dev, 119cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 120cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 121cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 122cdedef59SAnirudh Venkataramanan } else if (dma_unmap_len(tx_buf, len)) { 123cdedef59SAnirudh Venkataramanan dma_unmap_page(ring->dev, 124cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 125cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 126cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 127cdedef59SAnirudh Venkataramanan } 128cdedef59SAnirudh Venkataramanan 129cdedef59SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 130cdedef59SAnirudh Venkataramanan tx_buf->skb = NULL; 131cdedef59SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 132cdedef59SAnirudh Venkataramanan /* tx_buf must be completely set up in the transmit path */ 133cdedef59SAnirudh Venkataramanan } 134cdedef59SAnirudh Venkataramanan 135cdedef59SAnirudh Venkataramanan static struct netdev_queue *txring_txq(const struct ice_ring *ring) 136cdedef59SAnirudh Venkataramanan { 137cdedef59SAnirudh Venkataramanan return netdev_get_tx_queue(ring->netdev, ring->q_index); 138cdedef59SAnirudh Venkataramanan } 139cdedef59SAnirudh Venkataramanan 140cdedef59SAnirudh Venkataramanan /** 141cdedef59SAnirudh Venkataramanan * ice_clean_tx_ring - Free any empty Tx buffers 142cdedef59SAnirudh Venkataramanan * @tx_ring: ring to be cleaned 143cdedef59SAnirudh Venkataramanan */ 144cdedef59SAnirudh Venkataramanan void ice_clean_tx_ring(struct ice_ring *tx_ring) 145cdedef59SAnirudh Venkataramanan { 146cdedef59SAnirudh Venkataramanan u16 i; 147cdedef59SAnirudh Venkataramanan 1481742b3d5SMagnus Karlsson if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 1492d4238f5SKrzysztof Kazimierczak ice_xsk_clean_xdp_ring(tx_ring); 1502d4238f5SKrzysztof Kazimierczak goto tx_skip_free; 1512d4238f5SKrzysztof Kazimierczak } 1522d4238f5SKrzysztof Kazimierczak 153cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 154cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 155cdedef59SAnirudh Venkataramanan return; 156cdedef59SAnirudh Venkataramanan 1572f2da36eSAnirudh Venkataramanan /* Free all the Tx ring sk_buffs */ 158cdedef59SAnirudh Venkataramanan for (i = 0; i < tx_ring->count; i++) 159cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 160cdedef59SAnirudh Venkataramanan 1612d4238f5SKrzysztof Kazimierczak tx_skip_free: 162c6dfd690SBruce Allan memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 163cdedef59SAnirudh Venkataramanan 164cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 165cdedef59SAnirudh Venkataramanan memset(tx_ring->desc, 0, tx_ring->size); 166cdedef59SAnirudh Venkataramanan 167cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 168cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 169cdedef59SAnirudh Venkataramanan 170cdedef59SAnirudh Venkataramanan if (!tx_ring->netdev) 171cdedef59SAnirudh Venkataramanan return; 172cdedef59SAnirudh Venkataramanan 173cdedef59SAnirudh Venkataramanan /* cleanup Tx queue statistics */ 174cdedef59SAnirudh Venkataramanan netdev_tx_reset_queue(txring_txq(tx_ring)); 175cdedef59SAnirudh Venkataramanan } 176cdedef59SAnirudh Venkataramanan 177cdedef59SAnirudh Venkataramanan /** 178cdedef59SAnirudh Venkataramanan * ice_free_tx_ring - Free Tx resources per queue 179cdedef59SAnirudh Venkataramanan * @tx_ring: Tx descriptor ring for a specific queue 180cdedef59SAnirudh Venkataramanan * 181cdedef59SAnirudh Venkataramanan * Free all transmit software resources 182cdedef59SAnirudh Venkataramanan */ 183cdedef59SAnirudh Venkataramanan void ice_free_tx_ring(struct ice_ring *tx_ring) 184cdedef59SAnirudh Venkataramanan { 185cdedef59SAnirudh Venkataramanan ice_clean_tx_ring(tx_ring); 186cdedef59SAnirudh Venkataramanan devm_kfree(tx_ring->dev, tx_ring->tx_buf); 187cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 188cdedef59SAnirudh Venkataramanan 189cdedef59SAnirudh Venkataramanan if (tx_ring->desc) { 190cdedef59SAnirudh Venkataramanan dmam_free_coherent(tx_ring->dev, tx_ring->size, 191cdedef59SAnirudh Venkataramanan tx_ring->desc, tx_ring->dma); 192cdedef59SAnirudh Venkataramanan tx_ring->desc = NULL; 193cdedef59SAnirudh Venkataramanan } 194cdedef59SAnirudh Venkataramanan } 195cdedef59SAnirudh Venkataramanan 196cdedef59SAnirudh Venkataramanan /** 1972b245cb2SAnirudh Venkataramanan * ice_clean_tx_irq - Reclaim resources after transmit completes 1982b245cb2SAnirudh Venkataramanan * @tx_ring: Tx ring to clean 1992b245cb2SAnirudh Venkataramanan * @napi_budget: Used to determine if we are in netpoll 2002b245cb2SAnirudh Venkataramanan * 2012b245cb2SAnirudh Venkataramanan * Returns true if there's any budget left (e.g. the clean is finished) 2022b245cb2SAnirudh Venkataramanan */ 2032fb0821fSJesse Brandeburg static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) 2042b245cb2SAnirudh Venkataramanan { 2052b245cb2SAnirudh Venkataramanan unsigned int total_bytes = 0, total_pkts = 0; 2062fb0821fSJesse Brandeburg unsigned int budget = ICE_DFLT_IRQ_WORK; 2072fb0821fSJesse Brandeburg struct ice_vsi *vsi = tx_ring->vsi; 2082b245cb2SAnirudh Venkataramanan s16 i = tx_ring->next_to_clean; 2092b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 2102b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 2112b245cb2SAnirudh Venkataramanan 2122b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 2132b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 2142b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2152b245cb2SAnirudh Venkataramanan 2162fb0821fSJesse Brandeburg prefetch(&vsi->state); 2172fb0821fSJesse Brandeburg 2182b245cb2SAnirudh Venkataramanan do { 2192b245cb2SAnirudh Venkataramanan struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2202b245cb2SAnirudh Venkataramanan 2212b245cb2SAnirudh Venkataramanan /* if next_to_watch is not set then there is no work pending */ 2222b245cb2SAnirudh Venkataramanan if (!eop_desc) 2232b245cb2SAnirudh Venkataramanan break; 2242b245cb2SAnirudh Venkataramanan 2252b245cb2SAnirudh Venkataramanan smp_rmb(); /* prevent any other reads prior to eop_desc */ 2262b245cb2SAnirudh Venkataramanan 2272b245cb2SAnirudh Venkataramanan /* if the descriptor isn't done, no work yet to do */ 2282b245cb2SAnirudh Venkataramanan if (!(eop_desc->cmd_type_offset_bsz & 2292b245cb2SAnirudh Venkataramanan cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2302b245cb2SAnirudh Venkataramanan break; 2312b245cb2SAnirudh Venkataramanan 2322b245cb2SAnirudh Venkataramanan /* clear next_to_watch to prevent false hangs */ 2332b245cb2SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 2342b245cb2SAnirudh Venkataramanan 2352b245cb2SAnirudh Venkataramanan /* update the statistics for this packet */ 2362b245cb2SAnirudh Venkataramanan total_bytes += tx_buf->bytecount; 2372b245cb2SAnirudh Venkataramanan total_pkts += tx_buf->gso_segs; 2382b245cb2SAnirudh Venkataramanan 239efc2214bSMaciej Fijalkowski if (ice_ring_is_xdp(tx_ring)) 240efc2214bSMaciej Fijalkowski page_frag_free(tx_buf->raw_buf); 241efc2214bSMaciej Fijalkowski else 2422b245cb2SAnirudh Venkataramanan /* free the skb */ 2432b245cb2SAnirudh Venkataramanan napi_consume_skb(tx_buf->skb, napi_budget); 2442b245cb2SAnirudh Venkataramanan 2452b245cb2SAnirudh Venkataramanan /* unmap skb header data */ 2462b245cb2SAnirudh Venkataramanan dma_unmap_single(tx_ring->dev, 2472b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 2482b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 2492b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 2502b245cb2SAnirudh Venkataramanan 2512b245cb2SAnirudh Venkataramanan /* clear tx_buf data */ 2522b245cb2SAnirudh Venkataramanan tx_buf->skb = NULL; 2532b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 2542b245cb2SAnirudh Venkataramanan 2552b245cb2SAnirudh Venkataramanan /* unmap remaining buffers */ 2562b245cb2SAnirudh Venkataramanan while (tx_desc != eop_desc) { 2572b245cb2SAnirudh Venkataramanan tx_buf++; 2582b245cb2SAnirudh Venkataramanan tx_desc++; 2592b245cb2SAnirudh Venkataramanan i++; 2602b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 2612b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2622b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 2632b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 2642b245cb2SAnirudh Venkataramanan } 2652b245cb2SAnirudh Venkataramanan 2662b245cb2SAnirudh Venkataramanan /* unmap any remaining paged data */ 2672b245cb2SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) { 2682b245cb2SAnirudh Venkataramanan dma_unmap_page(tx_ring->dev, 2692b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 2702b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 2712b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 2722b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 2732b245cb2SAnirudh Venkataramanan } 2742b245cb2SAnirudh Venkataramanan } 2752b245cb2SAnirudh Venkataramanan 2762b245cb2SAnirudh Venkataramanan /* move us one more past the eop_desc for start of next pkt */ 2772b245cb2SAnirudh Venkataramanan tx_buf++; 2782b245cb2SAnirudh Venkataramanan tx_desc++; 2792b245cb2SAnirudh Venkataramanan i++; 2802b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 2812b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2822b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 2832b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 2842b245cb2SAnirudh Venkataramanan } 2852b245cb2SAnirudh Venkataramanan 2862b245cb2SAnirudh Venkataramanan prefetch(tx_desc); 2872b245cb2SAnirudh Venkataramanan 2882b245cb2SAnirudh Venkataramanan /* update budget accounting */ 2892b245cb2SAnirudh Venkataramanan budget--; 2902b245cb2SAnirudh Venkataramanan } while (likely(budget)); 2912b245cb2SAnirudh Venkataramanan 2922b245cb2SAnirudh Venkataramanan i += tx_ring->count; 2932b245cb2SAnirudh Venkataramanan tx_ring->next_to_clean = i; 2942d4238f5SKrzysztof Kazimierczak 2952d4238f5SKrzysztof Kazimierczak ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 2962b245cb2SAnirudh Venkataramanan 297efc2214bSMaciej Fijalkowski if (ice_ring_is_xdp(tx_ring)) 298efc2214bSMaciej Fijalkowski return !!budget; 299efc2214bSMaciej Fijalkowski 3002b245cb2SAnirudh Venkataramanan netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 3012b245cb2SAnirudh Venkataramanan total_bytes); 3022b245cb2SAnirudh Venkataramanan 3032b245cb2SAnirudh Venkataramanan #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 3042b245cb2SAnirudh Venkataramanan if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 3052b245cb2SAnirudh Venkataramanan (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 3062b245cb2SAnirudh Venkataramanan /* Make sure that anybody stopping the queue after this 3072b245cb2SAnirudh Venkataramanan * sees the new next_to_clean. 3082b245cb2SAnirudh Venkataramanan */ 3092b245cb2SAnirudh Venkataramanan smp_mb(); 3102b245cb2SAnirudh Venkataramanan if (__netif_subqueue_stopped(tx_ring->netdev, 3112b245cb2SAnirudh Venkataramanan tx_ring->q_index) && 3122b245cb2SAnirudh Venkataramanan !test_bit(__ICE_DOWN, vsi->state)) { 3132b245cb2SAnirudh Venkataramanan netif_wake_subqueue(tx_ring->netdev, 3142b245cb2SAnirudh Venkataramanan tx_ring->q_index); 3152b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 3162b245cb2SAnirudh Venkataramanan } 3172b245cb2SAnirudh Venkataramanan } 3182b245cb2SAnirudh Venkataramanan 3192b245cb2SAnirudh Venkataramanan return !!budget; 3202b245cb2SAnirudh Venkataramanan } 3212b245cb2SAnirudh Venkataramanan 3222b245cb2SAnirudh Venkataramanan /** 323cdedef59SAnirudh Venkataramanan * ice_setup_tx_ring - Allocate the Tx descriptors 324d337f2afSAnirudh Venkataramanan * @tx_ring: the Tx ring to set up 325cdedef59SAnirudh Venkataramanan * 326cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 327cdedef59SAnirudh Venkataramanan */ 328cdedef59SAnirudh Venkataramanan int ice_setup_tx_ring(struct ice_ring *tx_ring) 329cdedef59SAnirudh Venkataramanan { 330cdedef59SAnirudh Venkataramanan struct device *dev = tx_ring->dev; 331cdedef59SAnirudh Venkataramanan 332cdedef59SAnirudh Venkataramanan if (!dev) 333cdedef59SAnirudh Venkataramanan return -ENOMEM; 334cdedef59SAnirudh Venkataramanan 335cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 336cdedef59SAnirudh Venkataramanan WARN_ON(tx_ring->tx_buf); 337c6dfd690SBruce Allan tx_ring->tx_buf = 338c6dfd690SBruce Allan devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 339c6dfd690SBruce Allan GFP_KERNEL); 340cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 341cdedef59SAnirudh Venkataramanan return -ENOMEM; 342cdedef59SAnirudh Venkataramanan 343ad71b256SBrett Creeley /* round up to nearest page */ 344c6dfd690SBruce Allan tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 345ad71b256SBrett Creeley PAGE_SIZE); 346cdedef59SAnirudh Venkataramanan tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 347cdedef59SAnirudh Venkataramanan GFP_KERNEL); 348cdedef59SAnirudh Venkataramanan if (!tx_ring->desc) { 349cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 350cdedef59SAnirudh Venkataramanan tx_ring->size); 351cdedef59SAnirudh Venkataramanan goto err; 352cdedef59SAnirudh Venkataramanan } 353cdedef59SAnirudh Venkataramanan 354cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 355cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 356b3969fd7SSudheer Mogilappagari tx_ring->tx_stats.prev_pkt = -1; 357cdedef59SAnirudh Venkataramanan return 0; 358cdedef59SAnirudh Venkataramanan 359cdedef59SAnirudh Venkataramanan err: 360cdedef59SAnirudh Venkataramanan devm_kfree(dev, tx_ring->tx_buf); 361cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 362cdedef59SAnirudh Venkataramanan return -ENOMEM; 363cdedef59SAnirudh Venkataramanan } 364cdedef59SAnirudh Venkataramanan 365cdedef59SAnirudh Venkataramanan /** 366cdedef59SAnirudh Venkataramanan * ice_clean_rx_ring - Free Rx buffers 367cdedef59SAnirudh Venkataramanan * @rx_ring: ring to be cleaned 368cdedef59SAnirudh Venkataramanan */ 369cdedef59SAnirudh Venkataramanan void ice_clean_rx_ring(struct ice_ring *rx_ring) 370cdedef59SAnirudh Venkataramanan { 371cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 372cdedef59SAnirudh Venkataramanan u16 i; 373cdedef59SAnirudh Venkataramanan 374cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 375cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 376cdedef59SAnirudh Venkataramanan return; 377cdedef59SAnirudh Venkataramanan 378*29b82f2aSMaciej Fijalkowski if (rx_ring->skb) { 379*29b82f2aSMaciej Fijalkowski dev_kfree_skb(rx_ring->skb); 380*29b82f2aSMaciej Fijalkowski rx_ring->skb = NULL; 381*29b82f2aSMaciej Fijalkowski } 382*29b82f2aSMaciej Fijalkowski 3831742b3d5SMagnus Karlsson if (rx_ring->xsk_pool) { 3842d4238f5SKrzysztof Kazimierczak ice_xsk_clean_rx_ring(rx_ring); 3852d4238f5SKrzysztof Kazimierczak goto rx_skip_free; 3862d4238f5SKrzysztof Kazimierczak } 3872d4238f5SKrzysztof Kazimierczak 388cdedef59SAnirudh Venkataramanan /* Free all the Rx ring sk_buffs */ 389cdedef59SAnirudh Venkataramanan for (i = 0; i < rx_ring->count; i++) { 390cdedef59SAnirudh Venkataramanan struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 391cdedef59SAnirudh Venkataramanan 392cdedef59SAnirudh Venkataramanan if (!rx_buf->page) 393cdedef59SAnirudh Venkataramanan continue; 394cdedef59SAnirudh Venkataramanan 395a65f71feSMaciej Fijalkowski /* Invalidate cache lines that may have been written to by 396a65f71feSMaciej Fijalkowski * device so that we avoid corrupting memory. 397a65f71feSMaciej Fijalkowski */ 398a65f71feSMaciej Fijalkowski dma_sync_single_range_for_cpu(dev, rx_buf->dma, 399a65f71feSMaciej Fijalkowski rx_buf->page_offset, 4007237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 4017237f5b0SMaciej Fijalkowski DMA_FROM_DEVICE); 402a65f71feSMaciej Fijalkowski 403a65f71feSMaciej Fijalkowski /* free resources associated with mapping */ 4047237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 405a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 40603c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 407cdedef59SAnirudh Venkataramanan 408cdedef59SAnirudh Venkataramanan rx_buf->page = NULL; 409cdedef59SAnirudh Venkataramanan rx_buf->page_offset = 0; 410cdedef59SAnirudh Venkataramanan } 411cdedef59SAnirudh Venkataramanan 4122d4238f5SKrzysztof Kazimierczak rx_skip_free: 413c6dfd690SBruce Allan memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 414cdedef59SAnirudh Venkataramanan 415cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 416cdedef59SAnirudh Venkataramanan memset(rx_ring->desc, 0, rx_ring->size); 417cdedef59SAnirudh Venkataramanan 418cdedef59SAnirudh Venkataramanan rx_ring->next_to_alloc = 0; 419cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 420cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 421cdedef59SAnirudh Venkataramanan } 422cdedef59SAnirudh Venkataramanan 423cdedef59SAnirudh Venkataramanan /** 424cdedef59SAnirudh Venkataramanan * ice_free_rx_ring - Free Rx resources 425cdedef59SAnirudh Venkataramanan * @rx_ring: ring to clean the resources from 426cdedef59SAnirudh Venkataramanan * 427cdedef59SAnirudh Venkataramanan * Free all receive software resources 428cdedef59SAnirudh Venkataramanan */ 429cdedef59SAnirudh Venkataramanan void ice_free_rx_ring(struct ice_ring *rx_ring) 430cdedef59SAnirudh Venkataramanan { 431cdedef59SAnirudh Venkataramanan ice_clean_rx_ring(rx_ring); 432efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF) 433efc2214bSMaciej Fijalkowski if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 434efc2214bSMaciej Fijalkowski xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 435efc2214bSMaciej Fijalkowski rx_ring->xdp_prog = NULL; 436cdedef59SAnirudh Venkataramanan devm_kfree(rx_ring->dev, rx_ring->rx_buf); 437cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 438cdedef59SAnirudh Venkataramanan 439cdedef59SAnirudh Venkataramanan if (rx_ring->desc) { 440cdedef59SAnirudh Venkataramanan dmam_free_coherent(rx_ring->dev, rx_ring->size, 441cdedef59SAnirudh Venkataramanan rx_ring->desc, rx_ring->dma); 442cdedef59SAnirudh Venkataramanan rx_ring->desc = NULL; 443cdedef59SAnirudh Venkataramanan } 444cdedef59SAnirudh Venkataramanan } 445cdedef59SAnirudh Venkataramanan 446cdedef59SAnirudh Venkataramanan /** 447cdedef59SAnirudh Venkataramanan * ice_setup_rx_ring - Allocate the Rx descriptors 448d337f2afSAnirudh Venkataramanan * @rx_ring: the Rx ring to set up 449cdedef59SAnirudh Venkataramanan * 450cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 451cdedef59SAnirudh Venkataramanan */ 452cdedef59SAnirudh Venkataramanan int ice_setup_rx_ring(struct ice_ring *rx_ring) 453cdedef59SAnirudh Venkataramanan { 454cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 455cdedef59SAnirudh Venkataramanan 456cdedef59SAnirudh Venkataramanan if (!dev) 457cdedef59SAnirudh Venkataramanan return -ENOMEM; 458cdedef59SAnirudh Venkataramanan 459cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 460cdedef59SAnirudh Venkataramanan WARN_ON(rx_ring->rx_buf); 461c6dfd690SBruce Allan rx_ring->rx_buf = 462c6dfd690SBruce Allan devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 463c6dfd690SBruce Allan GFP_KERNEL); 464cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 465cdedef59SAnirudh Venkataramanan return -ENOMEM; 466cdedef59SAnirudh Venkataramanan 467ad71b256SBrett Creeley /* round up to nearest page */ 468ad71b256SBrett Creeley rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 469ad71b256SBrett Creeley PAGE_SIZE); 470cdedef59SAnirudh Venkataramanan rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 471cdedef59SAnirudh Venkataramanan GFP_KERNEL); 472cdedef59SAnirudh Venkataramanan if (!rx_ring->desc) { 473cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 474cdedef59SAnirudh Venkataramanan rx_ring->size); 475cdedef59SAnirudh Venkataramanan goto err; 476cdedef59SAnirudh Venkataramanan } 477cdedef59SAnirudh Venkataramanan 478cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 479cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 480efc2214bSMaciej Fijalkowski 481efc2214bSMaciej Fijalkowski if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 482efc2214bSMaciej Fijalkowski WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 483efc2214bSMaciej Fijalkowski 484efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF && 485efc2214bSMaciej Fijalkowski !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 486efc2214bSMaciej Fijalkowski if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 487b02e5a0eSBjörn Töpel rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) 488efc2214bSMaciej Fijalkowski goto err; 489cdedef59SAnirudh Venkataramanan return 0; 490cdedef59SAnirudh Venkataramanan 491cdedef59SAnirudh Venkataramanan err: 492cdedef59SAnirudh Venkataramanan devm_kfree(dev, rx_ring->rx_buf); 493cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 494cdedef59SAnirudh Venkataramanan return -ENOMEM; 495cdedef59SAnirudh Venkataramanan } 496cdedef59SAnirudh Venkataramanan 497cdedef59SAnirudh Venkataramanan /** 498efc2214bSMaciej Fijalkowski * ice_rx_offset - Return expected offset into page to access data 499efc2214bSMaciej Fijalkowski * @rx_ring: Ring we are requesting offset of 500efc2214bSMaciej Fijalkowski * 501efc2214bSMaciej Fijalkowski * Returns the offset value for ring into the data buffer. 502efc2214bSMaciej Fijalkowski */ 503efc2214bSMaciej Fijalkowski static unsigned int ice_rx_offset(struct ice_ring *rx_ring) 504efc2214bSMaciej Fijalkowski { 50559bb0808SMaciej Fijalkowski if (ice_ring_uses_build_skb(rx_ring)) 50659bb0808SMaciej Fijalkowski return ICE_SKB_PAD; 50759bb0808SMaciej Fijalkowski else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 50859bb0808SMaciej Fijalkowski return XDP_PACKET_HEADROOM; 50959bb0808SMaciej Fijalkowski 51059bb0808SMaciej Fijalkowski return 0; 511efc2214bSMaciej Fijalkowski } 512efc2214bSMaciej Fijalkowski 5136221595fSTony Nguyen static unsigned int 5146221595fSTony Nguyen ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size) 515d4ecdbf7SJesper Dangaard Brouer { 516d4ecdbf7SJesper Dangaard Brouer unsigned int truesize; 517d4ecdbf7SJesper Dangaard Brouer 518d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE < 8192) 519d4ecdbf7SJesper Dangaard Brouer truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 520d4ecdbf7SJesper Dangaard Brouer #else 521d4ecdbf7SJesper Dangaard Brouer truesize = ice_rx_offset(rx_ring) ? 522d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) + 523d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 524d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(size); 525d4ecdbf7SJesper Dangaard Brouer #endif 526d4ecdbf7SJesper Dangaard Brouer return truesize; 527d4ecdbf7SJesper Dangaard Brouer } 528d4ecdbf7SJesper Dangaard Brouer 529efc2214bSMaciej Fijalkowski /** 530efc2214bSMaciej Fijalkowski * ice_run_xdp - Executes an XDP program on initialized xdp_buff 531efc2214bSMaciej Fijalkowski * @rx_ring: Rx ring 532efc2214bSMaciej Fijalkowski * @xdp: xdp_buff used as input to the XDP program 533efc2214bSMaciej Fijalkowski * @xdp_prog: XDP program to run 534efc2214bSMaciej Fijalkowski * 535efc2214bSMaciej Fijalkowski * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 536efc2214bSMaciej Fijalkowski */ 537efc2214bSMaciej Fijalkowski static int 538efc2214bSMaciej Fijalkowski ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, 539efc2214bSMaciej Fijalkowski struct bpf_prog *xdp_prog) 540efc2214bSMaciej Fijalkowski { 541efc2214bSMaciej Fijalkowski struct ice_ring *xdp_ring; 54259c97d1bSMaciej Fijalkowski int err; 543efc2214bSMaciej Fijalkowski u32 act; 544efc2214bSMaciej Fijalkowski 545efc2214bSMaciej Fijalkowski act = bpf_prog_run_xdp(xdp_prog, xdp); 546efc2214bSMaciej Fijalkowski switch (act) { 547efc2214bSMaciej Fijalkowski case XDP_PASS: 54859c97d1bSMaciej Fijalkowski return ICE_XDP_PASS; 549efc2214bSMaciej Fijalkowski case XDP_TX: 550efc2214bSMaciej Fijalkowski xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; 55159c97d1bSMaciej Fijalkowski return ice_xmit_xdp_buff(xdp, xdp_ring); 552efc2214bSMaciej Fijalkowski case XDP_REDIRECT: 553efc2214bSMaciej Fijalkowski err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 55459c97d1bSMaciej Fijalkowski return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; 555efc2214bSMaciej Fijalkowski default: 556efc2214bSMaciej Fijalkowski bpf_warn_invalid_xdp_action(act); 5574e83fc93SBruce Allan fallthrough; 558efc2214bSMaciej Fijalkowski case XDP_ABORTED: 559efc2214bSMaciej Fijalkowski trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 5604e83fc93SBruce Allan fallthrough; 561efc2214bSMaciej Fijalkowski case XDP_DROP: 56259c97d1bSMaciej Fijalkowski return ICE_XDP_CONSUMED; 563efc2214bSMaciej Fijalkowski } 564efc2214bSMaciej Fijalkowski } 565efc2214bSMaciej Fijalkowski 566efc2214bSMaciej Fijalkowski /** 567efc2214bSMaciej Fijalkowski * ice_xdp_xmit - submit packets to XDP ring for transmission 568efc2214bSMaciej Fijalkowski * @dev: netdev 569efc2214bSMaciej Fijalkowski * @n: number of XDP frames to be transmitted 570efc2214bSMaciej Fijalkowski * @frames: XDP frames to be transmitted 571efc2214bSMaciej Fijalkowski * @flags: transmit flags 572efc2214bSMaciej Fijalkowski * 573efc2214bSMaciej Fijalkowski * Returns number of frames successfully sent. Frames that fail are 574efc2214bSMaciej Fijalkowski * free'ed via XDP return API. 575efc2214bSMaciej Fijalkowski * For error cases, a negative errno code is returned and no-frames 576efc2214bSMaciej Fijalkowski * are transmitted (caller must handle freeing frames). 577efc2214bSMaciej Fijalkowski */ 578efc2214bSMaciej Fijalkowski int 579efc2214bSMaciej Fijalkowski ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 580efc2214bSMaciej Fijalkowski u32 flags) 581efc2214bSMaciej Fijalkowski { 582efc2214bSMaciej Fijalkowski struct ice_netdev_priv *np = netdev_priv(dev); 583efc2214bSMaciej Fijalkowski unsigned int queue_index = smp_processor_id(); 584efc2214bSMaciej Fijalkowski struct ice_vsi *vsi = np->vsi; 585efc2214bSMaciej Fijalkowski struct ice_ring *xdp_ring; 586efc2214bSMaciej Fijalkowski int drops = 0, i; 587efc2214bSMaciej Fijalkowski 588efc2214bSMaciej Fijalkowski if (test_bit(__ICE_DOWN, vsi->state)) 589efc2214bSMaciej Fijalkowski return -ENETDOWN; 590efc2214bSMaciej Fijalkowski 591efc2214bSMaciej Fijalkowski if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) 592efc2214bSMaciej Fijalkowski return -ENXIO; 593efc2214bSMaciej Fijalkowski 594efc2214bSMaciej Fijalkowski if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 595efc2214bSMaciej Fijalkowski return -EINVAL; 596efc2214bSMaciej Fijalkowski 597efc2214bSMaciej Fijalkowski xdp_ring = vsi->xdp_rings[queue_index]; 598efc2214bSMaciej Fijalkowski for (i = 0; i < n; i++) { 599efc2214bSMaciej Fijalkowski struct xdp_frame *xdpf = frames[i]; 600efc2214bSMaciej Fijalkowski int err; 601efc2214bSMaciej Fijalkowski 602efc2214bSMaciej Fijalkowski err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 603efc2214bSMaciej Fijalkowski if (err != ICE_XDP_TX) { 604efc2214bSMaciej Fijalkowski xdp_return_frame_rx_napi(xdpf); 605efc2214bSMaciej Fijalkowski drops++; 606efc2214bSMaciej Fijalkowski } 607efc2214bSMaciej Fijalkowski } 608efc2214bSMaciej Fijalkowski 609efc2214bSMaciej Fijalkowski if (unlikely(flags & XDP_XMIT_FLUSH)) 610efc2214bSMaciej Fijalkowski ice_xdp_ring_update_tail(xdp_ring); 611efc2214bSMaciej Fijalkowski 612efc2214bSMaciej Fijalkowski return n - drops; 613efc2214bSMaciej Fijalkowski } 614efc2214bSMaciej Fijalkowski 615efc2214bSMaciej Fijalkowski /** 616cdedef59SAnirudh Venkataramanan * ice_alloc_mapped_page - recycle or make a new page 617cdedef59SAnirudh Venkataramanan * @rx_ring: ring to use 618cdedef59SAnirudh Venkataramanan * @bi: rx_buf struct to modify 619cdedef59SAnirudh Venkataramanan * 620cdedef59SAnirudh Venkataramanan * Returns true if the page was successfully allocated or 621cdedef59SAnirudh Venkataramanan * reused. 622cdedef59SAnirudh Venkataramanan */ 623c8b7abddSBruce Allan static bool 624c8b7abddSBruce Allan ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) 625cdedef59SAnirudh Venkataramanan { 626cdedef59SAnirudh Venkataramanan struct page *page = bi->page; 627cdedef59SAnirudh Venkataramanan dma_addr_t dma; 628cdedef59SAnirudh Venkataramanan 629cdedef59SAnirudh Venkataramanan /* since we are recycling buffers we should seldom need to alloc */ 6307dbc63f0STony Nguyen if (likely(page)) 631cdedef59SAnirudh Venkataramanan return true; 632cdedef59SAnirudh Venkataramanan 633cdedef59SAnirudh Venkataramanan /* alloc new page for storage */ 6347237f5b0SMaciej Fijalkowski page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 6352b245cb2SAnirudh Venkataramanan if (unlikely(!page)) { 6362b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 637cdedef59SAnirudh Venkataramanan return false; 6382b245cb2SAnirudh Venkataramanan } 639cdedef59SAnirudh Venkataramanan 640cdedef59SAnirudh Venkataramanan /* map page for use */ 6417237f5b0SMaciej Fijalkowski dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 642a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 643cdedef59SAnirudh Venkataramanan 644cdedef59SAnirudh Venkataramanan /* if mapping failed free memory back to system since 645cdedef59SAnirudh Venkataramanan * there isn't much point in holding memory we can't use 646cdedef59SAnirudh Venkataramanan */ 647cdedef59SAnirudh Venkataramanan if (dma_mapping_error(rx_ring->dev, dma)) { 6487237f5b0SMaciej Fijalkowski __free_pages(page, ice_rx_pg_order(rx_ring)); 6492b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 650cdedef59SAnirudh Venkataramanan return false; 651cdedef59SAnirudh Venkataramanan } 652cdedef59SAnirudh Venkataramanan 653cdedef59SAnirudh Venkataramanan bi->dma = dma; 654cdedef59SAnirudh Venkataramanan bi->page = page; 655efc2214bSMaciej Fijalkowski bi->page_offset = ice_rx_offset(rx_ring); 65603c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 65703c66a13SMaciej Fijalkowski bi->pagecnt_bias = USHRT_MAX; 658cdedef59SAnirudh Venkataramanan 659cdedef59SAnirudh Venkataramanan return true; 660cdedef59SAnirudh Venkataramanan } 661cdedef59SAnirudh Venkataramanan 662cdedef59SAnirudh Venkataramanan /** 663cdedef59SAnirudh Venkataramanan * ice_alloc_rx_bufs - Replace used receive buffers 664cdedef59SAnirudh Venkataramanan * @rx_ring: ring to place buffers on 665cdedef59SAnirudh Venkataramanan * @cleaned_count: number of buffers to replace 666cdedef59SAnirudh Venkataramanan * 667cb7db356SBrett Creeley * Returns false if all allocations were successful, true if any fail. Returning 668cb7db356SBrett Creeley * true signals to the caller that we didn't replace cleaned_count buffers and 669cb7db356SBrett Creeley * there is more work to do. 670cb7db356SBrett Creeley * 671cb7db356SBrett Creeley * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 672cb7db356SBrett Creeley * buffers. Then bump tail at most one time. Grouping like this lets us avoid 673cb7db356SBrett Creeley * multiple tail writes per call. 674cdedef59SAnirudh Venkataramanan */ 675cdedef59SAnirudh Venkataramanan bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) 676cdedef59SAnirudh Venkataramanan { 677cdedef59SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 678cdedef59SAnirudh Venkataramanan u16 ntu = rx_ring->next_to_use; 679cdedef59SAnirudh Venkataramanan struct ice_rx_buf *bi; 680cdedef59SAnirudh Venkataramanan 681cdedef59SAnirudh Venkataramanan /* do nothing if no valid netdev defined */ 682148beb61SHenry Tieman if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || 683148beb61SHenry Tieman !cleaned_count) 684cdedef59SAnirudh Venkataramanan return false; 685cdedef59SAnirudh Venkataramanan 686f9867df6SAnirudh Venkataramanan /* get the Rx descriptor and buffer based on next_to_use */ 687cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, ntu); 688cdedef59SAnirudh Venkataramanan bi = &rx_ring->rx_buf[ntu]; 689cdedef59SAnirudh Venkataramanan 690cdedef59SAnirudh Venkataramanan do { 691a1e99685SBrett Creeley /* if we fail here, we have work remaining */ 692cdedef59SAnirudh Venkataramanan if (!ice_alloc_mapped_page(rx_ring, bi)) 693a1e99685SBrett Creeley break; 694cdedef59SAnirudh Venkataramanan 695a65f71feSMaciej Fijalkowski /* sync the buffer for use by the device */ 696a65f71feSMaciej Fijalkowski dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 697a65f71feSMaciej Fijalkowski bi->page_offset, 6987237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 699a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE); 700a65f71feSMaciej Fijalkowski 701cdedef59SAnirudh Venkataramanan /* Refresh the desc even if buffer_addrs didn't change 702cdedef59SAnirudh Venkataramanan * because each write-back erases this info. 703cdedef59SAnirudh Venkataramanan */ 704cdedef59SAnirudh Venkataramanan rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 705cdedef59SAnirudh Venkataramanan 706cdedef59SAnirudh Venkataramanan rx_desc++; 707cdedef59SAnirudh Venkataramanan bi++; 708cdedef59SAnirudh Venkataramanan ntu++; 709cdedef59SAnirudh Venkataramanan if (unlikely(ntu == rx_ring->count)) { 710cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, 0); 711cdedef59SAnirudh Venkataramanan bi = rx_ring->rx_buf; 712cdedef59SAnirudh Venkataramanan ntu = 0; 713cdedef59SAnirudh Venkataramanan } 714cdedef59SAnirudh Venkataramanan 715cdedef59SAnirudh Venkataramanan /* clear the status bits for the next_to_use descriptor */ 716cdedef59SAnirudh Venkataramanan rx_desc->wb.status_error0 = 0; 717cdedef59SAnirudh Venkataramanan 718cdedef59SAnirudh Venkataramanan cleaned_count--; 719cdedef59SAnirudh Venkataramanan } while (cleaned_count); 720cdedef59SAnirudh Venkataramanan 721cdedef59SAnirudh Venkataramanan if (rx_ring->next_to_use != ntu) 722cdedef59SAnirudh Venkataramanan ice_release_rx_desc(rx_ring, ntu); 723cdedef59SAnirudh Venkataramanan 724a1e99685SBrett Creeley return !!cleaned_count; 725cdedef59SAnirudh Venkataramanan } 7262b245cb2SAnirudh Venkataramanan 7272b245cb2SAnirudh Venkataramanan /** 7281d032bc7SMaciej Fijalkowski * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 7291d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to adjust 7301d032bc7SMaciej Fijalkowski * @size: Size of adjustment 7312b245cb2SAnirudh Venkataramanan * 7321d032bc7SMaciej Fijalkowski * Update the offset within page so that Rx buf will be ready to be reused. 7331d032bc7SMaciej Fijalkowski * For systems with PAGE_SIZE < 8192 this function will flip the page offset 7341d032bc7SMaciej Fijalkowski * so the second half of page assigned to Rx buffer will be used, otherwise 7354ee656bbSTony Nguyen * the offset is moved by "size" bytes 7362b245cb2SAnirudh Venkataramanan */ 7371d032bc7SMaciej Fijalkowski static void 7381d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 7392b245cb2SAnirudh Venkataramanan { 7402b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 7411d032bc7SMaciej Fijalkowski /* flip page offset to other buffer */ 7421d032bc7SMaciej Fijalkowski rx_buf->page_offset ^= size; 7432b245cb2SAnirudh Venkataramanan #else 7441d032bc7SMaciej Fijalkowski /* move offset up to the next cache line */ 7451d032bc7SMaciej Fijalkowski rx_buf->page_offset += size; 7461d032bc7SMaciej Fijalkowski #endif 7472b245cb2SAnirudh Venkataramanan } 7482b245cb2SAnirudh Venkataramanan 7491d032bc7SMaciej Fijalkowski /** 750bbb97808SMaciej Fijalkowski * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 751bbb97808SMaciej Fijalkowski * @rx_buf: buffer containing the page 7521beb7830SBjörn Töpel * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call 753bbb97808SMaciej Fijalkowski * 754bbb97808SMaciej Fijalkowski * If page is reusable, we have a green light for calling ice_reuse_rx_page, 755bbb97808SMaciej Fijalkowski * which will assign the current buffer to the buffer that next_to_alloc is 756bbb97808SMaciej Fijalkowski * pointing to; otherwise, the DMA mapping needs to be destroyed and 757bbb97808SMaciej Fijalkowski * page freed 758bbb97808SMaciej Fijalkowski */ 7591beb7830SBjörn Töpel static bool 7601beb7830SBjörn Töpel ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) 761bbb97808SMaciej Fijalkowski { 76203c66a13SMaciej Fijalkowski unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 763bbb97808SMaciej Fijalkowski struct page *page = rx_buf->page; 7642b245cb2SAnirudh Venkataramanan 765a79afa78SAlexander Lobakin /* avoid re-using remote and pfmemalloc pages */ 766a79afa78SAlexander Lobakin if (!dev_page_is_reusable(page)) 7672b245cb2SAnirudh Venkataramanan return false; 7682b245cb2SAnirudh Venkataramanan 7692b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 7702b245cb2SAnirudh Venkataramanan /* if we are only owner of page we can reuse it */ 7711beb7830SBjörn Töpel if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) 7722b245cb2SAnirudh Venkataramanan return false; 7732b245cb2SAnirudh Venkataramanan #else 7747237f5b0SMaciej Fijalkowski #define ICE_LAST_OFFSET \ 7757237f5b0SMaciej Fijalkowski (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 7767237f5b0SMaciej Fijalkowski if (rx_buf->page_offset > ICE_LAST_OFFSET) 7772b245cb2SAnirudh Venkataramanan return false; 7782b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE < 8192) */ 7792b245cb2SAnirudh Venkataramanan 78003c66a13SMaciej Fijalkowski /* If we have drained the page fragment pool we need to update 78103c66a13SMaciej Fijalkowski * the pagecnt_bias and page count so that we fully restock the 78203c66a13SMaciej Fijalkowski * number of references the driver holds. 7832b245cb2SAnirudh Venkataramanan */ 78403c66a13SMaciej Fijalkowski if (unlikely(pagecnt_bias == 1)) { 78503c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 78603c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias = USHRT_MAX; 78703c66a13SMaciej Fijalkowski } 7882b245cb2SAnirudh Venkataramanan 7892b245cb2SAnirudh Venkataramanan return true; 7902b245cb2SAnirudh Venkataramanan } 7912b245cb2SAnirudh Venkataramanan 7922b245cb2SAnirudh Venkataramanan /** 793712edbbbSMaciej Fijalkowski * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 7947237f5b0SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 7952b245cb2SAnirudh Venkataramanan * @rx_buf: buffer containing page to add 796712edbbbSMaciej Fijalkowski * @skb: sk_buff to place the data into 797712edbbbSMaciej Fijalkowski * @size: packet length from rx_desc 7982b245cb2SAnirudh Venkataramanan * 7992b245cb2SAnirudh Venkataramanan * This function will add the data contained in rx_buf->page to the skb. 800712edbbbSMaciej Fijalkowski * It will just attach the page as a frag to the skb. 801712edbbbSMaciej Fijalkowski * The function will then update the page offset. 8022b245cb2SAnirudh Venkataramanan */ 8031d032bc7SMaciej Fijalkowski static void 8047237f5b0SMaciej Fijalkowski ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 8057237f5b0SMaciej Fijalkowski struct sk_buff *skb, unsigned int size) 8062b245cb2SAnirudh Venkataramanan { 807712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 80859bb0808SMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring)); 8092b245cb2SAnirudh Venkataramanan #else 8107237f5b0SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 811712edbbbSMaciej Fijalkowski #endif 8121857ca42SMaciej Fijalkowski 813ac6f733aSMitch Williams if (!size) 814ac6f733aSMitch Williams return; 815712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 816712edbbbSMaciej Fijalkowski rx_buf->page_offset, size, truesize); 8172b245cb2SAnirudh Venkataramanan 818712edbbbSMaciej Fijalkowski /* page is being used so we must update the page offset */ 8191d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 8202b245cb2SAnirudh Venkataramanan } 8212b245cb2SAnirudh Venkataramanan 8222b245cb2SAnirudh Venkataramanan /** 8232b245cb2SAnirudh Venkataramanan * ice_reuse_rx_page - page flip buffer and store it back on the ring 824d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to store buffers on 8252b245cb2SAnirudh Venkataramanan * @old_buf: donor buffer to have page reused 8262b245cb2SAnirudh Venkataramanan * 8272b245cb2SAnirudh Venkataramanan * Synchronizes page for reuse by the adapter 8282b245cb2SAnirudh Venkataramanan */ 829c8b7abddSBruce Allan static void 830c8b7abddSBruce Allan ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) 8312b245cb2SAnirudh Venkataramanan { 8322b245cb2SAnirudh Venkataramanan u16 nta = rx_ring->next_to_alloc; 8332b245cb2SAnirudh Venkataramanan struct ice_rx_buf *new_buf; 8342b245cb2SAnirudh Venkataramanan 8352b245cb2SAnirudh Venkataramanan new_buf = &rx_ring->rx_buf[nta]; 8362b245cb2SAnirudh Venkataramanan 8372b245cb2SAnirudh Venkataramanan /* update, and store next to alloc */ 8382b245cb2SAnirudh Venkataramanan nta++; 8392b245cb2SAnirudh Venkataramanan rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 8402b245cb2SAnirudh Venkataramanan 841712edbbbSMaciej Fijalkowski /* Transfer page from old buffer to new buffer. 842712edbbbSMaciej Fijalkowski * Move each member individually to avoid possible store 843712edbbbSMaciej Fijalkowski * forwarding stalls and unnecessary copy of skb. 844712edbbbSMaciej Fijalkowski */ 845712edbbbSMaciej Fijalkowski new_buf->dma = old_buf->dma; 846712edbbbSMaciej Fijalkowski new_buf->page = old_buf->page; 847712edbbbSMaciej Fijalkowski new_buf->page_offset = old_buf->page_offset; 848712edbbbSMaciej Fijalkowski new_buf->pagecnt_bias = old_buf->pagecnt_bias; 8492b245cb2SAnirudh Venkataramanan } 8502b245cb2SAnirudh Venkataramanan 8512b245cb2SAnirudh Venkataramanan /** 8526c869cb7SMaciej Fijalkowski * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 853d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 8546c869cb7SMaciej Fijalkowski * @size: size of buffer to add to skb 8551beb7830SBjörn Töpel * @rx_buf_pgcnt: rx_buf page refcount 8562b245cb2SAnirudh Venkataramanan * 8576c869cb7SMaciej Fijalkowski * This function will pull an Rx buffer from the ring and synchronize it 8586c869cb7SMaciej Fijalkowski * for use by the CPU. 8592b245cb2SAnirudh Venkataramanan */ 8606c869cb7SMaciej Fijalkowski static struct ice_rx_buf * 861*29b82f2aSMaciej Fijalkowski ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size, 862*29b82f2aSMaciej Fijalkowski int *rx_buf_pgcnt) 8632b245cb2SAnirudh Venkataramanan { 8642b245cb2SAnirudh Venkataramanan struct ice_rx_buf *rx_buf; 8652b245cb2SAnirudh Venkataramanan 8662b245cb2SAnirudh Venkataramanan rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 8671beb7830SBjörn Töpel *rx_buf_pgcnt = 8681beb7830SBjörn Töpel #if (PAGE_SIZE < 8192) 8691beb7830SBjörn Töpel page_count(rx_buf->page); 8701beb7830SBjörn Töpel #else 8711beb7830SBjörn Töpel 0; 8721beb7830SBjörn Töpel #endif 8736c869cb7SMaciej Fijalkowski prefetchw(rx_buf->page); 8742b245cb2SAnirudh Venkataramanan 875ac6f733aSMitch Williams if (!size) 876ac6f733aSMitch Williams return rx_buf; 8776c869cb7SMaciej Fijalkowski /* we are reusing so sync this buffer for CPU use */ 8786c869cb7SMaciej Fijalkowski dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 8796c869cb7SMaciej Fijalkowski rx_buf->page_offset, size, 8806c869cb7SMaciej Fijalkowski DMA_FROM_DEVICE); 8812b245cb2SAnirudh Venkataramanan 88203c66a13SMaciej Fijalkowski /* We have pulled a buffer for use, so decrement pagecnt_bias */ 88303c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias--; 88403c66a13SMaciej Fijalkowski 8856c869cb7SMaciej Fijalkowski return rx_buf; 8866c869cb7SMaciej Fijalkowski } 8876c869cb7SMaciej Fijalkowski 8886c869cb7SMaciej Fijalkowski /** 889aaf27254SMaciej Fijalkowski * ice_build_skb - Build skb around an existing buffer 890aaf27254SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 891aaf27254SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 892aaf27254SMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 893aaf27254SMaciej Fijalkowski * 894aaf27254SMaciej Fijalkowski * This function builds an skb around an existing Rx buffer, taking care 895aaf27254SMaciej Fijalkowski * to set up the skb correctly and avoid any memcpy overhead. 896aaf27254SMaciej Fijalkowski */ 897aaf27254SMaciej Fijalkowski static struct sk_buff * 898aaf27254SMaciej Fijalkowski ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 899aaf27254SMaciej Fijalkowski struct xdp_buff *xdp) 900aaf27254SMaciej Fijalkowski { 90188865fc4SKarol Kolacinski u8 metasize = xdp->data - xdp->data_meta; 902aaf27254SMaciej Fijalkowski #if (PAGE_SIZE < 8192) 903aaf27254SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 904aaf27254SMaciej Fijalkowski #else 905aaf27254SMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 906aaf27254SMaciej Fijalkowski SKB_DATA_ALIGN(xdp->data_end - 907aaf27254SMaciej Fijalkowski xdp->data_hard_start); 908aaf27254SMaciej Fijalkowski #endif 909aaf27254SMaciej Fijalkowski struct sk_buff *skb; 910aaf27254SMaciej Fijalkowski 911aaf27254SMaciej Fijalkowski /* Prefetch first cache line of first page. If xdp->data_meta 912aaf27254SMaciej Fijalkowski * is unused, this points exactly as xdp->data, otherwise we 913aaf27254SMaciej Fijalkowski * likely have a consumer accessing first few bytes of meta 914aaf27254SMaciej Fijalkowski * data, and then actual data. 915aaf27254SMaciej Fijalkowski */ 916f468f21bSTariq Toukan net_prefetch(xdp->data_meta); 917aaf27254SMaciej Fijalkowski /* build an skb around the page buffer */ 918aaf27254SMaciej Fijalkowski skb = build_skb(xdp->data_hard_start, truesize); 919aaf27254SMaciej Fijalkowski if (unlikely(!skb)) 920aaf27254SMaciej Fijalkowski return NULL; 921aaf27254SMaciej Fijalkowski 922aaf27254SMaciej Fijalkowski /* must to record Rx queue, otherwise OS features such as 923aaf27254SMaciej Fijalkowski * symmetric queue won't work 924aaf27254SMaciej Fijalkowski */ 925aaf27254SMaciej Fijalkowski skb_record_rx_queue(skb, rx_ring->q_index); 926aaf27254SMaciej Fijalkowski 927aaf27254SMaciej Fijalkowski /* update pointers within the skb to store the data */ 928aaf27254SMaciej Fijalkowski skb_reserve(skb, xdp->data - xdp->data_hard_start); 929aaf27254SMaciej Fijalkowski __skb_put(skb, xdp->data_end - xdp->data); 930aaf27254SMaciej Fijalkowski if (metasize) 931aaf27254SMaciej Fijalkowski skb_metadata_set(skb, metasize); 932aaf27254SMaciej Fijalkowski 933aaf27254SMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 934aaf27254SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 935aaf27254SMaciej Fijalkowski 936aaf27254SMaciej Fijalkowski return skb; 937aaf27254SMaciej Fijalkowski } 938aaf27254SMaciej Fijalkowski 939aaf27254SMaciej Fijalkowski /** 940712edbbbSMaciej Fijalkowski * ice_construct_skb - Allocate skb and populate it 9412b245cb2SAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 9426c869cb7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 943efc2214bSMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 9442b245cb2SAnirudh Venkataramanan * 945712edbbbSMaciej Fijalkowski * This function allocates an skb. It then populates it with the page 946712edbbbSMaciej Fijalkowski * data from the current receive descriptor, taking care to set up the 947712edbbbSMaciej Fijalkowski * skb correctly. 9482b245cb2SAnirudh Venkataramanan */ 949c8b7abddSBruce Allan static struct sk_buff * 950712edbbbSMaciej Fijalkowski ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 951efc2214bSMaciej Fijalkowski struct xdp_buff *xdp) 9522b245cb2SAnirudh Venkataramanan { 953efc2214bSMaciej Fijalkowski unsigned int size = xdp->data_end - xdp->data; 954712edbbbSMaciej Fijalkowski unsigned int headlen; 955712edbbbSMaciej Fijalkowski struct sk_buff *skb; 9562b245cb2SAnirudh Venkataramanan 9572b245cb2SAnirudh Venkataramanan /* prefetch first cache line of first page */ 958f468f21bSTariq Toukan net_prefetch(xdp->data); 9592b245cb2SAnirudh Venkataramanan 9602b245cb2SAnirudh Venkataramanan /* allocate a skb to store the frags */ 961712edbbbSMaciej Fijalkowski skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 9622b245cb2SAnirudh Venkataramanan GFP_ATOMIC | __GFP_NOWARN); 963712edbbbSMaciej Fijalkowski if (unlikely(!skb)) 9642b245cb2SAnirudh Venkataramanan return NULL; 9652b245cb2SAnirudh Venkataramanan 9662b245cb2SAnirudh Venkataramanan skb_record_rx_queue(skb, rx_ring->q_index); 967712edbbbSMaciej Fijalkowski /* Determine available headroom for copy */ 968712edbbbSMaciej Fijalkowski headlen = size; 969712edbbbSMaciej Fijalkowski if (headlen > ICE_RX_HDR_SIZE) 970efc2214bSMaciej Fijalkowski headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 9712b245cb2SAnirudh Venkataramanan 972712edbbbSMaciej Fijalkowski /* align pull length to size of long to optimize memcpy performance */ 973efc2214bSMaciej Fijalkowski memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 974efc2214bSMaciej Fijalkowski sizeof(long))); 975712edbbbSMaciej Fijalkowski 976712edbbbSMaciej Fijalkowski /* if we exhaust the linear part then add what is left as a frag */ 977712edbbbSMaciej Fijalkowski size -= headlen; 978712edbbbSMaciej Fijalkowski if (size) { 979712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 980712edbbbSMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size); 981712edbbbSMaciej Fijalkowski #else 9827237f5b0SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 983712edbbbSMaciej Fijalkowski #endif 984712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, 0, rx_buf->page, 985712edbbbSMaciej Fijalkowski rx_buf->page_offset + headlen, size, truesize); 986712edbbbSMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 987712edbbbSMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 9882b245cb2SAnirudh Venkataramanan } else { 989712edbbbSMaciej Fijalkowski /* buffer is unused, reset bias back to rx_buf; data was copied 990712edbbbSMaciej Fijalkowski * onto skb's linear part so there's no need for adjusting 991712edbbbSMaciej Fijalkowski * page offset and we can reuse this buffer as-is 992712edbbbSMaciej Fijalkowski */ 993712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 9942b245cb2SAnirudh Venkataramanan } 9952b245cb2SAnirudh Venkataramanan 9962b245cb2SAnirudh Venkataramanan return skb; 9972b245cb2SAnirudh Venkataramanan } 9982b245cb2SAnirudh Venkataramanan 9992b245cb2SAnirudh Venkataramanan /** 10001d032bc7SMaciej Fijalkowski * ice_put_rx_buf - Clean up used buffer and either recycle or free 10011d032bc7SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 10021d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 10031beb7830SBjörn Töpel * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect() 10042b245cb2SAnirudh Venkataramanan * 1005efc2214bSMaciej Fijalkowski * This function will update next_to_clean and then clean up the contents 1006efc2214bSMaciej Fijalkowski * of the rx_buf. It will either recycle the buffer or unmap it and free 1007efc2214bSMaciej Fijalkowski * the associated resources. 10082b245cb2SAnirudh Venkataramanan */ 10091beb7830SBjörn Töpel static void 10101beb7830SBjörn Töpel ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 10111beb7830SBjörn Töpel int rx_buf_pgcnt) 10122b245cb2SAnirudh Venkataramanan { 101388865fc4SKarol Kolacinski u16 ntc = rx_ring->next_to_clean + 1; 1014efc2214bSMaciej Fijalkowski 1015efc2214bSMaciej Fijalkowski /* fetch, update, and store next to clean */ 1016efc2214bSMaciej Fijalkowski ntc = (ntc < rx_ring->count) ? ntc : 0; 1017efc2214bSMaciej Fijalkowski rx_ring->next_to_clean = ntc; 1018efc2214bSMaciej Fijalkowski 1019ac6f733aSMitch Williams if (!rx_buf) 1020ac6f733aSMitch Williams return; 1021ac6f733aSMitch Williams 10221beb7830SBjörn Töpel if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) { 1023ac6f733aSMitch Williams /* hand second half of page back to the ring */ 10242b245cb2SAnirudh Venkataramanan ice_reuse_rx_page(rx_ring, rx_buf); 10252b245cb2SAnirudh Venkataramanan } else { 10262b245cb2SAnirudh Venkataramanan /* we are not reusing the buffer so unmap it */ 10277237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 10287237f5b0SMaciej Fijalkowski ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 10297237f5b0SMaciej Fijalkowski ICE_RX_DMA_ATTR); 103003c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 10312b245cb2SAnirudh Venkataramanan } 10322b245cb2SAnirudh Venkataramanan 10332b245cb2SAnirudh Venkataramanan /* clear contents of buffer_info */ 10342b245cb2SAnirudh Venkataramanan rx_buf->page = NULL; 10352b245cb2SAnirudh Venkataramanan } 10362b245cb2SAnirudh Venkataramanan 10372b245cb2SAnirudh Venkataramanan /** 10382b245cb2SAnirudh Venkataramanan * ice_is_non_eop - process handling of non-EOP buffers 10392b245cb2SAnirudh Venkataramanan * @rx_ring: Rx ring being processed 10402b245cb2SAnirudh Venkataramanan * @rx_desc: Rx descriptor for current buffer 10412b245cb2SAnirudh Venkataramanan * 1042efc2214bSMaciej Fijalkowski * If the buffer is an EOP buffer, this function exits returning false, 1043efc2214bSMaciej Fijalkowski * otherwise return true indicating that this is in fact a non-EOP buffer. 10442b245cb2SAnirudh Venkataramanan */ 1045c8b7abddSBruce Allan static bool 1046*29b82f2aSMaciej Fijalkowski ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) 10472b245cb2SAnirudh Venkataramanan { 10482b245cb2SAnirudh Venkataramanan /* if we are the last buffer then there is nothing else to do */ 10492b245cb2SAnirudh Venkataramanan #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 10502b245cb2SAnirudh Venkataramanan if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 10512b245cb2SAnirudh Venkataramanan return false; 10522b245cb2SAnirudh Venkataramanan 10532b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.non_eop_descs++; 10542b245cb2SAnirudh Venkataramanan 10552b245cb2SAnirudh Venkataramanan return true; 10562b245cb2SAnirudh Venkataramanan } 10572b245cb2SAnirudh Venkataramanan 10582b245cb2SAnirudh Venkataramanan /** 10592b245cb2SAnirudh Venkataramanan * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 1060d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 10612b245cb2SAnirudh Venkataramanan * @budget: Total limit on number of packets to process 10622b245cb2SAnirudh Venkataramanan * 10632b245cb2SAnirudh Venkataramanan * This function provides a "bounce buffer" approach to Rx interrupt 10642b245cb2SAnirudh Venkataramanan * processing. The advantage to this is that on systems that have 10652b245cb2SAnirudh Venkataramanan * expensive overhead for IOMMU access this provides a means of avoiding 10662b245cb2SAnirudh Venkataramanan * it by maintaining the mapping of the page to the system. 10672b245cb2SAnirudh Venkataramanan * 10682b245cb2SAnirudh Venkataramanan * Returns amount of work completed 10692b245cb2SAnirudh Venkataramanan */ 1070148beb61SHenry Tieman int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 10712b245cb2SAnirudh Venkataramanan { 107243b5169dSLorenzo Bianconi unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0; 10732b245cb2SAnirudh Venkataramanan u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1074efc2214bSMaciej Fijalkowski unsigned int xdp_res, xdp_xmit = 0; 1075*29b82f2aSMaciej Fijalkowski struct sk_buff *skb = rx_ring->skb; 1076efc2214bSMaciej Fijalkowski struct bpf_prog *xdp_prog = NULL; 1077efc2214bSMaciej Fijalkowski struct xdp_buff xdp; 1078cb7db356SBrett Creeley bool failure; 10792b245cb2SAnirudh Venkataramanan 1080d4ecdbf7SJesper Dangaard Brouer /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1081d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE < 8192) 108243b5169dSLorenzo Bianconi frame_sz = ice_rx_frame_truesize(rx_ring, 0); 1083d4ecdbf7SJesper Dangaard Brouer #endif 108443b5169dSLorenzo Bianconi xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); 1085efc2214bSMaciej Fijalkowski 1086f9867df6SAnirudh Venkataramanan /* start the loop to process Rx packets bounded by 'budget' */ 10872b245cb2SAnirudh Venkataramanan while (likely(total_rx_pkts < (unsigned int)budget)) { 1088be9df4afSLorenzo Bianconi unsigned int offset = ice_rx_offset(rx_ring); 10892b245cb2SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 10906c869cb7SMaciej Fijalkowski struct ice_rx_buf *rx_buf; 1091be9df4afSLorenzo Bianconi unsigned char *hard_start; 10926c869cb7SMaciej Fijalkowski unsigned int size; 10932b245cb2SAnirudh Venkataramanan u16 stat_err_bits; 10941beb7830SBjörn Töpel int rx_buf_pgcnt; 10952b245cb2SAnirudh Venkataramanan u16 vlan_tag = 0; 1096d76a60baSAnirudh Venkataramanan u8 rx_ptype; 10972b245cb2SAnirudh Venkataramanan 1098f9867df6SAnirudh Venkataramanan /* get the Rx desc from Rx ring based on 'next_to_clean' */ 10992b245cb2SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 11002b245cb2SAnirudh Venkataramanan 11012b245cb2SAnirudh Venkataramanan /* status_error_len will always be zero for unused descriptors 11022b245cb2SAnirudh Venkataramanan * because it's cleared in cleanup, and overlaps with hdr_addr 11032b245cb2SAnirudh Venkataramanan * which is always zero because packet split isn't used, if the 11042b245cb2SAnirudh Venkataramanan * hardware wrote DD then it will be non-zero 11052b245cb2SAnirudh Venkataramanan */ 11062b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 11072b245cb2SAnirudh Venkataramanan if (!ice_test_staterr(rx_desc, stat_err_bits)) 11082b245cb2SAnirudh Venkataramanan break; 11092b245cb2SAnirudh Venkataramanan 11102b245cb2SAnirudh Venkataramanan /* This memory barrier is needed to keep us from reading 11112b245cb2SAnirudh Venkataramanan * any other fields out of the rx_desc until we know the 11122b245cb2SAnirudh Venkataramanan * DD bit is set. 11132b245cb2SAnirudh Venkataramanan */ 11142b245cb2SAnirudh Venkataramanan dma_rmb(); 11152b245cb2SAnirudh Venkataramanan 1116148beb61SHenry Tieman if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 11171beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, NULL, 0); 1118148beb61SHenry Tieman cleaned_count++; 1119148beb61SHenry Tieman continue; 1120148beb61SHenry Tieman } 1121148beb61SHenry Tieman 11226c869cb7SMaciej Fijalkowski size = le16_to_cpu(rx_desc->wb.pkt_len) & 11236c869cb7SMaciej Fijalkowski ICE_RX_FLX_DESC_PKT_LEN_M; 11242b245cb2SAnirudh Venkataramanan 1125ac6f733aSMitch Williams /* retrieve a buffer from the ring */ 1126*29b82f2aSMaciej Fijalkowski rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt); 1127ac6f733aSMitch Williams 1128efc2214bSMaciej Fijalkowski if (!size) { 1129efc2214bSMaciej Fijalkowski xdp.data = NULL; 1130efc2214bSMaciej Fijalkowski xdp.data_end = NULL; 1131aaf27254SMaciej Fijalkowski xdp.data_hard_start = NULL; 1132aaf27254SMaciej Fijalkowski xdp.data_meta = NULL; 1133efc2214bSMaciej Fijalkowski goto construct_skb; 1134efc2214bSMaciej Fijalkowski } 1135efc2214bSMaciej Fijalkowski 1136be9df4afSLorenzo Bianconi hard_start = page_address(rx_buf->page) + rx_buf->page_offset - 1137be9df4afSLorenzo Bianconi offset; 1138be9df4afSLorenzo Bianconi xdp_prepare_buff(&xdp, hard_start, offset, size, true); 1139d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE > 4096) 1140d4ecdbf7SJesper Dangaard Brouer /* At larger PAGE_SIZE, frame_sz depend on len size */ 1141d4ecdbf7SJesper Dangaard Brouer xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); 1142d4ecdbf7SJesper Dangaard Brouer #endif 1143efc2214bSMaciej Fijalkowski 1144efc2214bSMaciej Fijalkowski rcu_read_lock(); 1145efc2214bSMaciej Fijalkowski xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1146efc2214bSMaciej Fijalkowski if (!xdp_prog) { 1147efc2214bSMaciej Fijalkowski rcu_read_unlock(); 1148efc2214bSMaciej Fijalkowski goto construct_skb; 1149efc2214bSMaciej Fijalkowski } 1150efc2214bSMaciej Fijalkowski 1151efc2214bSMaciej Fijalkowski xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog); 1152efc2214bSMaciej Fijalkowski rcu_read_unlock(); 115359bb0808SMaciej Fijalkowski if (!xdp_res) 115459bb0808SMaciej Fijalkowski goto construct_skb; 1155efc2214bSMaciej Fijalkowski if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1156efc2214bSMaciej Fijalkowski xdp_xmit |= xdp_res; 1157d4ecdbf7SJesper Dangaard Brouer ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); 1158efc2214bSMaciej Fijalkowski } else { 1159efc2214bSMaciej Fijalkowski rx_buf->pagecnt_bias++; 1160efc2214bSMaciej Fijalkowski } 1161efc2214bSMaciej Fijalkowski total_rx_bytes += size; 1162efc2214bSMaciej Fijalkowski total_rx_pkts++; 1163efc2214bSMaciej Fijalkowski 1164efc2214bSMaciej Fijalkowski cleaned_count++; 11651beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 1166efc2214bSMaciej Fijalkowski continue; 1167efc2214bSMaciej Fijalkowski construct_skb: 11681f45ebe0SMitch Williams if (skb) { 11697237f5b0SMaciej Fijalkowski ice_add_rx_frag(rx_ring, rx_buf, skb, size); 11701f45ebe0SMitch Williams } else if (likely(xdp.data)) { 11711f45ebe0SMitch Williams if (ice_ring_uses_build_skb(rx_ring)) 1172aaf27254SMaciej Fijalkowski skb = ice_build_skb(rx_ring, rx_buf, &xdp); 1173712edbbbSMaciej Fijalkowski else 1174efc2214bSMaciej Fijalkowski skb = ice_construct_skb(rx_ring, rx_buf, &xdp); 11751f45ebe0SMitch Williams } 1176712edbbbSMaciej Fijalkowski /* exit if we failed to retrieve a buffer */ 1177712edbbbSMaciej Fijalkowski if (!skb) { 1178712edbbbSMaciej Fijalkowski rx_ring->rx_stats.alloc_buf_failed++; 1179ac6f733aSMitch Williams if (rx_buf) 1180712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 11812b245cb2SAnirudh Venkataramanan break; 1182712edbbbSMaciej Fijalkowski } 11832b245cb2SAnirudh Venkataramanan 11841beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 11852b245cb2SAnirudh Venkataramanan cleaned_count++; 11862b245cb2SAnirudh Venkataramanan 11872b245cb2SAnirudh Venkataramanan /* skip if it is NOP desc */ 1188*29b82f2aSMaciej Fijalkowski if (ice_is_non_eop(rx_ring, rx_desc)) 11892b245cb2SAnirudh Venkataramanan continue; 11902b245cb2SAnirudh Venkataramanan 11912b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 11922b245cb2SAnirudh Venkataramanan if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 11932b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 11942b245cb2SAnirudh Venkataramanan continue; 11952b245cb2SAnirudh Venkataramanan } 11962b245cb2SAnirudh Venkataramanan 11972b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 11982b245cb2SAnirudh Venkataramanan if (ice_test_staterr(rx_desc, stat_err_bits)) 11992b245cb2SAnirudh Venkataramanan vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 12002b245cb2SAnirudh Venkataramanan 1201133f4883SKrzysztof Kazimierczak /* pad the skb if needed, to make a valid ethernet frame */ 1202133f4883SKrzysztof Kazimierczak if (eth_skb_pad(skb)) { 12032b245cb2SAnirudh Venkataramanan skb = NULL; 12042b245cb2SAnirudh Venkataramanan continue; 12052b245cb2SAnirudh Venkataramanan } 12062b245cb2SAnirudh Venkataramanan 12072b245cb2SAnirudh Venkataramanan /* probably a little skewed due to removing CRC */ 12082b245cb2SAnirudh Venkataramanan total_rx_bytes += skb->len; 12092b245cb2SAnirudh Venkataramanan 1210d76a60baSAnirudh Venkataramanan /* populate checksum, VLAN, and protocol */ 12116503b659SJesse Brandeburg rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 12126503b659SJesse Brandeburg ICE_RX_FLEX_DESC_PTYPE_M; 12136503b659SJesse Brandeburg 1214d76a60baSAnirudh Venkataramanan ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1215d76a60baSAnirudh Venkataramanan 12162b245cb2SAnirudh Venkataramanan /* send completed skb up the stack */ 12172b245cb2SAnirudh Venkataramanan ice_receive_skb(rx_ring, skb, vlan_tag); 1218*29b82f2aSMaciej Fijalkowski skb = NULL; 12192b245cb2SAnirudh Venkataramanan 12202b245cb2SAnirudh Venkataramanan /* update budget accounting */ 12212b245cb2SAnirudh Venkataramanan total_rx_pkts++; 12222b245cb2SAnirudh Venkataramanan } 12232b245cb2SAnirudh Venkataramanan 1224cb7db356SBrett Creeley /* return up to cleaned_count buffers to hardware */ 1225cb7db356SBrett Creeley failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1226cb7db356SBrett Creeley 1227efc2214bSMaciej Fijalkowski if (xdp_prog) 1228efc2214bSMaciej Fijalkowski ice_finalize_xdp_rx(rx_ring, xdp_xmit); 1229*29b82f2aSMaciej Fijalkowski rx_ring->skb = skb; 1230efc2214bSMaciej Fijalkowski 12312d4238f5SKrzysztof Kazimierczak ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); 12322b245cb2SAnirudh Venkataramanan 12332b245cb2SAnirudh Venkataramanan /* guarantee a trip back through this routine if there was a failure */ 12342b245cb2SAnirudh Venkataramanan return failure ? budget : (int)total_rx_pkts; 12352b245cb2SAnirudh Venkataramanan } 12362b245cb2SAnirudh Venkataramanan 12372b245cb2SAnirudh Venkataramanan /** 1238711987bbSBrett Creeley * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic 1239711987bbSBrett Creeley * @port_info: port_info structure containing the current link speed 1240711987bbSBrett Creeley * @avg_pkt_size: average size of Tx or Rx packets based on clean routine 12412f2da36eSAnirudh Venkataramanan * @itr: ITR value to update 1242711987bbSBrett Creeley * 1243711987bbSBrett Creeley * Calculate how big of an increment should be applied to the ITR value passed 12447dbc63f0STony Nguyen * in based on wmem_default, SKB overhead, ethernet overhead, and the current 1245711987bbSBrett Creeley * link speed. 1246711987bbSBrett Creeley * 1247711987bbSBrett Creeley * The following is a calculation derived from: 1248711987bbSBrett Creeley * wmem_default / (size + overhead) = desired_pkts_per_int 12497dbc63f0STony Nguyen * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 1250711987bbSBrett Creeley * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1251711987bbSBrett Creeley * 1252711987bbSBrett Creeley * Assuming wmem_default is 212992 and overhead is 640 bytes per 1253711987bbSBrett Creeley * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1254711987bbSBrett Creeley * formula down to: 1255711987bbSBrett Creeley * 1256711987bbSBrett Creeley * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 1257711987bbSBrett Creeley * ITR = -------------------------------------------- * -------------- 1258711987bbSBrett Creeley * rate pkt_size + 640 1259711987bbSBrett Creeley */ 1260711987bbSBrett Creeley static unsigned int 1261711987bbSBrett Creeley ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, 1262711987bbSBrett Creeley unsigned int avg_pkt_size, 1263711987bbSBrett Creeley unsigned int itr) 126464a59d05SAnirudh Venkataramanan { 1265711987bbSBrett Creeley switch (port_info->phy.link_info.link_speed) { 1266711987bbSBrett Creeley case ICE_AQ_LINK_SPEED_100GB: 1267711987bbSBrett Creeley itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), 1268711987bbSBrett Creeley avg_pkt_size + 640); 1269711987bbSBrett Creeley break; 1270711987bbSBrett Creeley case ICE_AQ_LINK_SPEED_50GB: 1271711987bbSBrett Creeley itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), 1272711987bbSBrett Creeley avg_pkt_size + 640); 1273711987bbSBrett Creeley break; 127464a59d05SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_40GB: 1275711987bbSBrett Creeley itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), 1276711987bbSBrett Creeley avg_pkt_size + 640); 1277711987bbSBrett Creeley break; 127864a59d05SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_25GB: 1279711987bbSBrett Creeley itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), 1280711987bbSBrett Creeley avg_pkt_size + 640); 1281711987bbSBrett Creeley break; 128264a59d05SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_20GB: 1283711987bbSBrett Creeley itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), 1284711987bbSBrett Creeley avg_pkt_size + 640); 1285711987bbSBrett Creeley break; 1286711987bbSBrett Creeley case ICE_AQ_LINK_SPEED_10GB: 128764a59d05SAnirudh Venkataramanan default: 1288711987bbSBrett Creeley itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), 1289711987bbSBrett Creeley avg_pkt_size + 640); 1290711987bbSBrett Creeley break; 129164a59d05SAnirudh Venkataramanan } 1292711987bbSBrett Creeley 1293711987bbSBrett Creeley if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1294711987bbSBrett Creeley itr &= ICE_ITR_ADAPTIVE_LATENCY; 1295711987bbSBrett Creeley itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1296711987bbSBrett Creeley } 1297711987bbSBrett Creeley 1298711987bbSBrett Creeley return itr; 129964a59d05SAnirudh Venkataramanan } 130064a59d05SAnirudh Venkataramanan 130164a59d05SAnirudh Venkataramanan /** 130264a59d05SAnirudh Venkataramanan * ice_update_itr - update the adaptive ITR value based on statistics 130364a59d05SAnirudh Venkataramanan * @q_vector: structure containing interrupt and ring information 130464a59d05SAnirudh Venkataramanan * @rc: structure containing ring performance data 130564a59d05SAnirudh Venkataramanan * 130664a59d05SAnirudh Venkataramanan * Stores a new ITR value based on packets and byte 130764a59d05SAnirudh Venkataramanan * counts during the last interrupt. The advantage of per interrupt 130864a59d05SAnirudh Venkataramanan * computation is faster updates and more accurate ITR for the current 130964a59d05SAnirudh Venkataramanan * traffic pattern. Constants in this function were computed 131064a59d05SAnirudh Venkataramanan * based on theoretical maximum wire speed and thresholds were set based 131164a59d05SAnirudh Venkataramanan * on testing data as well as attempting to minimize response time 131264a59d05SAnirudh Venkataramanan * while increasing bulk throughput. 131364a59d05SAnirudh Venkataramanan */ 131464a59d05SAnirudh Venkataramanan static void 131564a59d05SAnirudh Venkataramanan ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) 131664a59d05SAnirudh Venkataramanan { 131764a59d05SAnirudh Venkataramanan unsigned long next_update = jiffies; 1318711987bbSBrett Creeley unsigned int packets, bytes, itr; 131964a59d05SAnirudh Venkataramanan bool container_is_rx; 132064a59d05SAnirudh Venkataramanan 132164a59d05SAnirudh Venkataramanan if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) 132264a59d05SAnirudh Venkataramanan return; 132364a59d05SAnirudh Venkataramanan 132464a59d05SAnirudh Venkataramanan /* If itr_countdown is set it means we programmed an ITR within 132564a59d05SAnirudh Venkataramanan * the last 4 interrupt cycles. This has a side effect of us 132664a59d05SAnirudh Venkataramanan * potentially firing an early interrupt. In order to work around 132764a59d05SAnirudh Venkataramanan * this we need to throw out any data received for a few 132864a59d05SAnirudh Venkataramanan * interrupts following the update. 132964a59d05SAnirudh Venkataramanan */ 133064a59d05SAnirudh Venkataramanan if (q_vector->itr_countdown) { 133164a59d05SAnirudh Venkataramanan itr = rc->target_itr; 133264a59d05SAnirudh Venkataramanan goto clear_counts; 133364a59d05SAnirudh Venkataramanan } 133464a59d05SAnirudh Venkataramanan 133564a59d05SAnirudh Venkataramanan container_is_rx = (&q_vector->rx == rc); 133664a59d05SAnirudh Venkataramanan /* For Rx we want to push the delay up and default to low latency. 133764a59d05SAnirudh Venkataramanan * for Tx we want to pull the delay down and default to high latency. 133864a59d05SAnirudh Venkataramanan */ 133964a59d05SAnirudh Venkataramanan itr = container_is_rx ? 134064a59d05SAnirudh Venkataramanan ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : 134164a59d05SAnirudh Venkataramanan ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; 134264a59d05SAnirudh Venkataramanan 134364a59d05SAnirudh Venkataramanan /* If we didn't update within up to 1 - 2 jiffies we can assume 134464a59d05SAnirudh Venkataramanan * that either packets are coming in so slow there hasn't been 134564a59d05SAnirudh Venkataramanan * any work, or that there is so much work that NAPI is dealing 134664a59d05SAnirudh Venkataramanan * with interrupt moderation and we don't need to do anything. 134764a59d05SAnirudh Venkataramanan */ 134864a59d05SAnirudh Venkataramanan if (time_after(next_update, rc->next_update)) 134964a59d05SAnirudh Venkataramanan goto clear_counts; 135064a59d05SAnirudh Venkataramanan 1351d27525ecSJesse Brandeburg prefetch(q_vector->vsi->port_info); 1352d27525ecSJesse Brandeburg 135364a59d05SAnirudh Venkataramanan packets = rc->total_pkts; 135464a59d05SAnirudh Venkataramanan bytes = rc->total_bytes; 135564a59d05SAnirudh Venkataramanan 135664a59d05SAnirudh Venkataramanan if (container_is_rx) { 135764a59d05SAnirudh Venkataramanan /* If Rx there are 1 to 4 packets and bytes are less than 135864a59d05SAnirudh Venkataramanan * 9000 assume insufficient data to use bulk rate limiting 135964a59d05SAnirudh Venkataramanan * approach unless Tx is already in bulk rate limiting. We 136064a59d05SAnirudh Venkataramanan * are likely latency driven. 136164a59d05SAnirudh Venkataramanan */ 136264a59d05SAnirudh Venkataramanan if (packets && packets < 4 && bytes < 9000 && 136364a59d05SAnirudh Venkataramanan (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { 136464a59d05SAnirudh Venkataramanan itr = ICE_ITR_ADAPTIVE_LATENCY; 1365711987bbSBrett Creeley goto adjust_by_size_and_speed; 136664a59d05SAnirudh Venkataramanan } 136764a59d05SAnirudh Venkataramanan } else if (packets < 4) { 136864a59d05SAnirudh Venkataramanan /* If we have Tx and Rx ITR maxed and Tx ITR is running in 136964a59d05SAnirudh Venkataramanan * bulk mode and we are receiving 4 or fewer packets just 137064a59d05SAnirudh Venkataramanan * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 137164a59d05SAnirudh Venkataramanan * that the Rx can relax. 137264a59d05SAnirudh Venkataramanan */ 137364a59d05SAnirudh Venkataramanan if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && 137464a59d05SAnirudh Venkataramanan (q_vector->rx.target_itr & ICE_ITR_MASK) == 137564a59d05SAnirudh Venkataramanan ICE_ITR_ADAPTIVE_MAX_USECS) 137664a59d05SAnirudh Venkataramanan goto clear_counts; 137764a59d05SAnirudh Venkataramanan } else if (packets > 32) { 137864a59d05SAnirudh Venkataramanan /* If we have processed over 32 packets in a single interrupt 137964a59d05SAnirudh Venkataramanan * for Tx assume we need to switch over to "bulk" mode. 138064a59d05SAnirudh Venkataramanan */ 138164a59d05SAnirudh Venkataramanan rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; 138264a59d05SAnirudh Venkataramanan } 138364a59d05SAnirudh Venkataramanan 138464a59d05SAnirudh Venkataramanan /* We have no packets to actually measure against. This means 138564a59d05SAnirudh Venkataramanan * either one of the other queues on this vector is active or 138664a59d05SAnirudh Venkataramanan * we are a Tx queue doing TSO with too high of an interrupt rate. 138764a59d05SAnirudh Venkataramanan * 138864a59d05SAnirudh Venkataramanan * Between 4 and 56 we can assume that our current interrupt delay 138964a59d05SAnirudh Venkataramanan * is only slightly too low. As such we should increase it by a small 139064a59d05SAnirudh Venkataramanan * fixed amount. 139164a59d05SAnirudh Venkataramanan */ 139264a59d05SAnirudh Venkataramanan if (packets < 56) { 139364a59d05SAnirudh Venkataramanan itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; 139464a59d05SAnirudh Venkataramanan if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 139564a59d05SAnirudh Venkataramanan itr &= ICE_ITR_ADAPTIVE_LATENCY; 139664a59d05SAnirudh Venkataramanan itr += ICE_ITR_ADAPTIVE_MAX_USECS; 139764a59d05SAnirudh Venkataramanan } 139864a59d05SAnirudh Venkataramanan goto clear_counts; 139964a59d05SAnirudh Venkataramanan } 140064a59d05SAnirudh Venkataramanan 140164a59d05SAnirudh Venkataramanan if (packets <= 256) { 140264a59d05SAnirudh Venkataramanan itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 140364a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 140464a59d05SAnirudh Venkataramanan 140564a59d05SAnirudh Venkataramanan /* Between 56 and 112 is our "goldilocks" zone where we are 140664a59d05SAnirudh Venkataramanan * working out "just right". Just report that our current 140764a59d05SAnirudh Venkataramanan * ITR is good for us. 140864a59d05SAnirudh Venkataramanan */ 140964a59d05SAnirudh Venkataramanan if (packets <= 112) 141064a59d05SAnirudh Venkataramanan goto clear_counts; 141164a59d05SAnirudh Venkataramanan 141264a59d05SAnirudh Venkataramanan /* If packet count is 128 or greater we are likely looking 141364a59d05SAnirudh Venkataramanan * at a slight overrun of the delay we want. Try halving 141464a59d05SAnirudh Venkataramanan * our delay to see if that will cut the number of packets 141564a59d05SAnirudh Venkataramanan * in half per interrupt. 141664a59d05SAnirudh Venkataramanan */ 141764a59d05SAnirudh Venkataramanan itr >>= 1; 141864a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 141964a59d05SAnirudh Venkataramanan if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) 142064a59d05SAnirudh Venkataramanan itr = ICE_ITR_ADAPTIVE_MIN_USECS; 142164a59d05SAnirudh Venkataramanan 142264a59d05SAnirudh Venkataramanan goto clear_counts; 142364a59d05SAnirudh Venkataramanan } 142464a59d05SAnirudh Venkataramanan 142564a59d05SAnirudh Venkataramanan /* The paths below assume we are dealing with a bulk ITR since 142664a59d05SAnirudh Venkataramanan * number of packets is greater than 256. We are just going to have 142764a59d05SAnirudh Venkataramanan * to compute a value and try to bring the count under control, 142864a59d05SAnirudh Venkataramanan * though for smaller packet sizes there isn't much we can do as 142964a59d05SAnirudh Venkataramanan * NAPI polling will likely be kicking in sooner rather than later. 143064a59d05SAnirudh Venkataramanan */ 143164a59d05SAnirudh Venkataramanan itr = ICE_ITR_ADAPTIVE_BULK; 143264a59d05SAnirudh Venkataramanan 1433711987bbSBrett Creeley adjust_by_size_and_speed: 143464a59d05SAnirudh Venkataramanan 1435711987bbSBrett Creeley /* based on checks above packets cannot be 0 so division is safe */ 1436711987bbSBrett Creeley itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, 1437711987bbSBrett Creeley bytes / packets, itr); 143864a59d05SAnirudh Venkataramanan 143964a59d05SAnirudh Venkataramanan clear_counts: 144064a59d05SAnirudh Venkataramanan /* write back value */ 144164a59d05SAnirudh Venkataramanan rc->target_itr = itr; 144264a59d05SAnirudh Venkataramanan 144364a59d05SAnirudh Venkataramanan /* next update should occur within next jiffy */ 144464a59d05SAnirudh Venkataramanan rc->next_update = next_update + 1; 144564a59d05SAnirudh Venkataramanan 144664a59d05SAnirudh Venkataramanan rc->total_bytes = 0; 144764a59d05SAnirudh Venkataramanan rc->total_pkts = 0; 144864a59d05SAnirudh Venkataramanan } 144964a59d05SAnirudh Venkataramanan 14502b245cb2SAnirudh Venkataramanan /** 145163f545edSBrett Creeley * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 145263f545edSBrett Creeley * @itr_idx: interrupt throttling index 145364a59d05SAnirudh Venkataramanan * @itr: interrupt throttling value in usecs 145463f545edSBrett Creeley */ 14558244dd2dSBrett Creeley static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 145663f545edSBrett Creeley { 14572f2da36eSAnirudh Venkataramanan /* The ITR value is reported in microseconds, and the register value is 145864a59d05SAnirudh Venkataramanan * recorded in 2 microsecond units. For this reason we only need to 145964a59d05SAnirudh Venkataramanan * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 146064a59d05SAnirudh Venkataramanan * granularity as a shift instead of division. The mask makes sure the 146164a59d05SAnirudh Venkataramanan * ITR value is never odd so we don't accidentally write into the field 146264a59d05SAnirudh Venkataramanan * prior to the ITR field. 146364a59d05SAnirudh Venkataramanan */ 146464a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 146564a59d05SAnirudh Venkataramanan 146663f545edSBrett Creeley return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 146763f545edSBrett Creeley (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 146864a59d05SAnirudh Venkataramanan (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 146963f545edSBrett Creeley } 147063f545edSBrett Creeley 147164a59d05SAnirudh Venkataramanan /* The act of updating the ITR will cause it to immediately trigger. In order 147264a59d05SAnirudh Venkataramanan * to prevent this from throwing off adaptive update statistics we defer the 147364a59d05SAnirudh Venkataramanan * update so that it can only happen so often. So after either Tx or Rx are 147464a59d05SAnirudh Venkataramanan * updated we make the adaptive scheme wait until either the ITR completely 147564a59d05SAnirudh Venkataramanan * expires via the next_update expiration or we have been through at least 147664a59d05SAnirudh Venkataramanan * 3 interrupts. 147764a59d05SAnirudh Venkataramanan */ 147864a59d05SAnirudh Venkataramanan #define ITR_COUNTDOWN_START 3 147964a59d05SAnirudh Venkataramanan 148063f545edSBrett Creeley /** 148163f545edSBrett Creeley * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt 148263f545edSBrett Creeley * @q_vector: q_vector for which ITR is being updated and interrupt enabled 148363f545edSBrett Creeley */ 14842fb0821fSJesse Brandeburg static void ice_update_ena_itr(struct ice_q_vector *q_vector) 148563f545edSBrett Creeley { 148664a59d05SAnirudh Venkataramanan struct ice_ring_container *tx = &q_vector->tx; 148764a59d05SAnirudh Venkataramanan struct ice_ring_container *rx = &q_vector->rx; 14882fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 148963f545edSBrett Creeley u32 itr_val; 149063f545edSBrett Creeley 14911d9f7ca3SJesse Brandeburg /* when exiting WB_ON_ITR just reset the countdown and let ITR 14921d9f7ca3SJesse Brandeburg * resume it's normal "interrupts-enabled" path 14932ab28bb0SBrett Creeley */ 14941d9f7ca3SJesse Brandeburg if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) 14952ab28bb0SBrett Creeley q_vector->itr_countdown = 0; 14962ab28bb0SBrett Creeley 149764a59d05SAnirudh Venkataramanan /* This will do nothing if dynamic updates are not enabled */ 149864a59d05SAnirudh Venkataramanan ice_update_itr(q_vector, tx); 149964a59d05SAnirudh Venkataramanan ice_update_itr(q_vector, rx); 150064a59d05SAnirudh Venkataramanan 150163f545edSBrett Creeley /* This block of logic allows us to get away with only updating 150263f545edSBrett Creeley * one ITR value with each interrupt. The idea is to perform a 150363f545edSBrett Creeley * pseudo-lazy update with the following criteria. 150463f545edSBrett Creeley * 150563f545edSBrett Creeley * 1. Rx is given higher priority than Tx if both are in same state 150663f545edSBrett Creeley * 2. If we must reduce an ITR that is given highest priority. 150763f545edSBrett Creeley * 3. We then give priority to increasing ITR based on amount. 150863f545edSBrett Creeley */ 150964a59d05SAnirudh Venkataramanan if (rx->target_itr < rx->current_itr) { 151063f545edSBrett Creeley /* Rx ITR needs to be reduced, this is highest priority */ 151164a59d05SAnirudh Venkataramanan itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 151264a59d05SAnirudh Venkataramanan rx->current_itr = rx->target_itr; 151364a59d05SAnirudh Venkataramanan q_vector->itr_countdown = ITR_COUNTDOWN_START; 151464a59d05SAnirudh Venkataramanan } else if ((tx->target_itr < tx->current_itr) || 151564a59d05SAnirudh Venkataramanan ((rx->target_itr - rx->current_itr) < 151664a59d05SAnirudh Venkataramanan (tx->target_itr - tx->current_itr))) { 151763f545edSBrett Creeley /* Tx ITR needs to be reduced, this is second priority 151863f545edSBrett Creeley * Tx ITR needs to be increased more than Rx, fourth priority 151963f545edSBrett Creeley */ 152064a59d05SAnirudh Venkataramanan itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); 152164a59d05SAnirudh Venkataramanan tx->current_itr = tx->target_itr; 152264a59d05SAnirudh Venkataramanan q_vector->itr_countdown = ITR_COUNTDOWN_START; 152364a59d05SAnirudh Venkataramanan } else if (rx->current_itr != rx->target_itr) { 152463f545edSBrett Creeley /* Rx ITR needs to be increased, third priority */ 152564a59d05SAnirudh Venkataramanan itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 152664a59d05SAnirudh Venkataramanan rx->current_itr = rx->target_itr; 152764a59d05SAnirudh Venkataramanan q_vector->itr_countdown = ITR_COUNTDOWN_START; 152863f545edSBrett Creeley } else { 152963f545edSBrett Creeley /* Still have to re-enable the interrupts */ 153063f545edSBrett Creeley itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 153164a59d05SAnirudh Venkataramanan if (q_vector->itr_countdown) 153264a59d05SAnirudh Venkataramanan q_vector->itr_countdown--; 153363f545edSBrett Creeley } 153463f545edSBrett Creeley 15351d9f7ca3SJesse Brandeburg if (!test_bit(__ICE_DOWN, vsi->state)) 15361d9f7ca3SJesse Brandeburg wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 153763f545edSBrett Creeley } 153863f545edSBrett Creeley 153963f545edSBrett Creeley /** 15402ab28bb0SBrett Creeley * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 15412ab28bb0SBrett Creeley * @q_vector: q_vector to set WB_ON_ITR on 15422ab28bb0SBrett Creeley * 15432ab28bb0SBrett Creeley * We need to tell hardware to write-back completed descriptors even when 15442ab28bb0SBrett Creeley * interrupts are disabled. Descriptors will be written back on cache line 15452ab28bb0SBrett Creeley * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 15461d9f7ca3SJesse Brandeburg * descriptors may not be written back if they don't fill a cache line until 15471d9f7ca3SJesse Brandeburg * the next interrupt. 15482ab28bb0SBrett Creeley * 15491d9f7ca3SJesse Brandeburg * This sets the write-back frequency to whatever was set previously for the 15501d9f7ca3SJesse Brandeburg * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we 15511d9f7ca3SJesse Brandeburg * aren't meddling with the INTENA_M bit. 15522ab28bb0SBrett Creeley */ 15532fb0821fSJesse Brandeburg static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 15542ab28bb0SBrett Creeley { 15552fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 15562fb0821fSJesse Brandeburg 15571d9f7ca3SJesse Brandeburg /* already in wb_on_itr mode no need to change it */ 15582ab28bb0SBrett Creeley if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) 15592ab28bb0SBrett Creeley return; 15602ab28bb0SBrett Creeley 15611d9f7ca3SJesse Brandeburg /* use previously set ITR values for all of the ITR indices by 15621d9f7ca3SJesse Brandeburg * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and 15631d9f7ca3SJesse Brandeburg * be static in non-adaptive mode (user configured) 15641d9f7ca3SJesse Brandeburg */ 15652ab28bb0SBrett Creeley wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 15661d9f7ca3SJesse Brandeburg ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) & 15671d9f7ca3SJesse Brandeburg GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | 15681d9f7ca3SJesse Brandeburg GLINT_DYN_CTL_WB_ON_ITR_M); 15692ab28bb0SBrett Creeley 15702ab28bb0SBrett Creeley q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE; 15712ab28bb0SBrett Creeley } 15722ab28bb0SBrett Creeley 15732ab28bb0SBrett Creeley /** 15742b245cb2SAnirudh Venkataramanan * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 15752b245cb2SAnirudh Venkataramanan * @napi: napi struct with our devices info in it 15762b245cb2SAnirudh Venkataramanan * @budget: amount of work driver is allowed to do this pass, in packets 15772b245cb2SAnirudh Venkataramanan * 15782b245cb2SAnirudh Venkataramanan * This function will clean all queues associated with a q_vector. 15792b245cb2SAnirudh Venkataramanan * 15802b245cb2SAnirudh Venkataramanan * Returns the amount of work done 15812b245cb2SAnirudh Venkataramanan */ 15822b245cb2SAnirudh Venkataramanan int ice_napi_poll(struct napi_struct *napi, int budget) 15832b245cb2SAnirudh Venkataramanan { 15842b245cb2SAnirudh Venkataramanan struct ice_q_vector *q_vector = 15852b245cb2SAnirudh Venkataramanan container_of(napi, struct ice_q_vector, napi); 15862b245cb2SAnirudh Venkataramanan bool clean_complete = true; 15872b245cb2SAnirudh Venkataramanan struct ice_ring *ring; 15889118fcd5SBrett Creeley int budget_per_ring; 15892b245cb2SAnirudh Venkataramanan int work_done = 0; 15902b245cb2SAnirudh Venkataramanan 15912b245cb2SAnirudh Venkataramanan /* Since the actual Tx work is minimal, we can give the Tx a larger 15922b245cb2SAnirudh Venkataramanan * budget and be more aggressive about cleaning up the Tx descriptors. 15932b245cb2SAnirudh Venkataramanan */ 15942d4238f5SKrzysztof Kazimierczak ice_for_each_ring(ring, q_vector->tx) { 15951742b3d5SMagnus Karlsson bool wd = ring->xsk_pool ? 15962d4238f5SKrzysztof Kazimierczak ice_clean_tx_irq_zc(ring, budget) : 15972d4238f5SKrzysztof Kazimierczak ice_clean_tx_irq(ring, budget); 15982d4238f5SKrzysztof Kazimierczak 15992d4238f5SKrzysztof Kazimierczak if (!wd) 16002b245cb2SAnirudh Venkataramanan clean_complete = false; 16012d4238f5SKrzysztof Kazimierczak } 16022b245cb2SAnirudh Venkataramanan 16032b245cb2SAnirudh Venkataramanan /* Handle case where we are called by netpoll with a budget of 0 */ 1604d27525ecSJesse Brandeburg if (unlikely(budget <= 0)) 16052b245cb2SAnirudh Venkataramanan return budget; 16062b245cb2SAnirudh Venkataramanan 16079118fcd5SBrett Creeley /* normally we have 1 Rx ring per q_vector */ 16089118fcd5SBrett Creeley if (unlikely(q_vector->num_ring_rx > 1)) 16099118fcd5SBrett Creeley /* We attempt to distribute budget to each Rx queue fairly, but 16109118fcd5SBrett Creeley * don't allow the budget to go below 1 because that would exit 16119118fcd5SBrett Creeley * polling early. 16122b245cb2SAnirudh Venkataramanan */ 161388865fc4SKarol Kolacinski budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 16149118fcd5SBrett Creeley else 16159118fcd5SBrett Creeley /* Max of 1 Rx ring in this q_vector so give it the budget */ 16169118fcd5SBrett Creeley budget_per_ring = budget; 16172b245cb2SAnirudh Venkataramanan 16182b245cb2SAnirudh Venkataramanan ice_for_each_ring(ring, q_vector->rx) { 16192b245cb2SAnirudh Venkataramanan int cleaned; 16202b245cb2SAnirudh Venkataramanan 16212d4238f5SKrzysztof Kazimierczak /* A dedicated path for zero-copy allows making a single 16222d4238f5SKrzysztof Kazimierczak * comparison in the irq context instead of many inside the 16232d4238f5SKrzysztof Kazimierczak * ice_clean_rx_irq function and makes the codebase cleaner. 16242d4238f5SKrzysztof Kazimierczak */ 16251742b3d5SMagnus Karlsson cleaned = ring->xsk_pool ? 16262d4238f5SKrzysztof Kazimierczak ice_clean_rx_irq_zc(ring, budget_per_ring) : 16272d4238f5SKrzysztof Kazimierczak ice_clean_rx_irq(ring, budget_per_ring); 16282b245cb2SAnirudh Venkataramanan work_done += cleaned; 16292b245cb2SAnirudh Venkataramanan /* if we clean as many as budgeted, we must not be done */ 16302b245cb2SAnirudh Venkataramanan if (cleaned >= budget_per_ring) 16312b245cb2SAnirudh Venkataramanan clean_complete = false; 16322b245cb2SAnirudh Venkataramanan } 16332b245cb2SAnirudh Venkataramanan 16342b245cb2SAnirudh Venkataramanan /* If work not completed, return budget and polling will return */ 16351d9f7ca3SJesse Brandeburg if (!clean_complete) { 16361d9f7ca3SJesse Brandeburg /* Set the writeback on ITR so partial completions of 16371d9f7ca3SJesse Brandeburg * cache-lines will still continue even if we're polling. 16381d9f7ca3SJesse Brandeburg */ 16391d9f7ca3SJesse Brandeburg ice_set_wb_on_itr(q_vector); 16402b245cb2SAnirudh Venkataramanan return budget; 16411d9f7ca3SJesse Brandeburg } 16422b245cb2SAnirudh Venkataramanan 16430bcd952fSJesse Brandeburg /* Exit the polling mode, but don't re-enable interrupts if stack might 16440bcd952fSJesse Brandeburg * poll us due to busy-polling 16450bcd952fSJesse Brandeburg */ 16460bcd952fSJesse Brandeburg if (likely(napi_complete_done(napi, work_done))) 16472fb0821fSJesse Brandeburg ice_update_ena_itr(q_vector); 16482ab28bb0SBrett Creeley else 16492fb0821fSJesse Brandeburg ice_set_wb_on_itr(q_vector); 1650e0c9fd9bSDave Ertman 165132a64994SBruce Allan return min_t(int, work_done, budget - 1); 16522b245cb2SAnirudh Venkataramanan } 16532b245cb2SAnirudh Venkataramanan 16542b245cb2SAnirudh Venkataramanan /** 1655d337f2afSAnirudh Venkataramanan * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 16562b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 16572b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 16582b245cb2SAnirudh Venkataramanan * 16592b245cb2SAnirudh Venkataramanan * Returns -EBUSY if a stop is needed, else 0 16602b245cb2SAnirudh Venkataramanan */ 16612b245cb2SAnirudh Venkataramanan static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 16622b245cb2SAnirudh Venkataramanan { 16632b245cb2SAnirudh Venkataramanan netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 16642b245cb2SAnirudh Venkataramanan /* Memory barrier before checking head and tail */ 16652b245cb2SAnirudh Venkataramanan smp_mb(); 16662b245cb2SAnirudh Venkataramanan 16672b245cb2SAnirudh Venkataramanan /* Check again in a case another CPU has just made room available. */ 16682b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 16692b245cb2SAnirudh Venkataramanan return -EBUSY; 16702b245cb2SAnirudh Venkataramanan 16712b245cb2SAnirudh Venkataramanan /* A reprieve! - use start_subqueue because it doesn't call schedule */ 16722b245cb2SAnirudh Venkataramanan netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 16732b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 16742b245cb2SAnirudh Venkataramanan return 0; 16752b245cb2SAnirudh Venkataramanan } 16762b245cb2SAnirudh Venkataramanan 16772b245cb2SAnirudh Venkataramanan /** 1678d337f2afSAnirudh Venkataramanan * ice_maybe_stop_tx - 1st level check for Tx stop conditions 16792b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 16802b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 16812b245cb2SAnirudh Venkataramanan * 16822b245cb2SAnirudh Venkataramanan * Returns 0 if stop is not needed 16832b245cb2SAnirudh Venkataramanan */ 16842b245cb2SAnirudh Venkataramanan static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 16852b245cb2SAnirudh Venkataramanan { 16862b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 16872b245cb2SAnirudh Venkataramanan return 0; 1688d337f2afSAnirudh Venkataramanan 16892b245cb2SAnirudh Venkataramanan return __ice_maybe_stop_tx(tx_ring, size); 16902b245cb2SAnirudh Venkataramanan } 16912b245cb2SAnirudh Venkataramanan 16922b245cb2SAnirudh Venkataramanan /** 16932b245cb2SAnirudh Venkataramanan * ice_tx_map - Build the Tx descriptor 16942b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 16952b245cb2SAnirudh Venkataramanan * @first: first buffer info buffer to use 1696d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 16972b245cb2SAnirudh Venkataramanan * 16982b245cb2SAnirudh Venkataramanan * This function loops over the skb data pointed to by *first 16992b245cb2SAnirudh Venkataramanan * and gets a physical address for each memory location and programs 17002b245cb2SAnirudh Venkataramanan * it and the length into the transmit descriptor. 17012b245cb2SAnirudh Venkataramanan */ 1702d76a60baSAnirudh Venkataramanan static void 1703d76a60baSAnirudh Venkataramanan ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, 1704d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params *off) 17052b245cb2SAnirudh Venkataramanan { 1706d76a60baSAnirudh Venkataramanan u64 td_offset, td_tag, td_cmd; 17072b245cb2SAnirudh Venkataramanan u16 i = tx_ring->next_to_use; 17082b245cb2SAnirudh Venkataramanan unsigned int data_len, size; 17092b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 17102b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 17112b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 17124ee656bbSTony Nguyen skb_frag_t *frag; 17132b245cb2SAnirudh Venkataramanan dma_addr_t dma; 17142b245cb2SAnirudh Venkataramanan 1715d76a60baSAnirudh Venkataramanan td_tag = off->td_l2tag1; 1716d76a60baSAnirudh Venkataramanan td_cmd = off->td_cmd; 1717d76a60baSAnirudh Venkataramanan td_offset = off->td_offset; 17182b245cb2SAnirudh Venkataramanan skb = first->skb; 17192b245cb2SAnirudh Venkataramanan 17202b245cb2SAnirudh Venkataramanan data_len = skb->data_len; 17212b245cb2SAnirudh Venkataramanan size = skb_headlen(skb); 17222b245cb2SAnirudh Venkataramanan 17232b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 17242b245cb2SAnirudh Venkataramanan 1725d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1726d76a60baSAnirudh Venkataramanan td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1727d76a60baSAnirudh Venkataramanan td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1728d76a60baSAnirudh Venkataramanan ICE_TX_FLAGS_VLAN_S; 1729d76a60baSAnirudh Venkataramanan } 1730d76a60baSAnirudh Venkataramanan 17312b245cb2SAnirudh Venkataramanan dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 17322b245cb2SAnirudh Venkataramanan 17332b245cb2SAnirudh Venkataramanan tx_buf = first; 17342b245cb2SAnirudh Venkataramanan 17352b245cb2SAnirudh Venkataramanan for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 17362b245cb2SAnirudh Venkataramanan unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 17372b245cb2SAnirudh Venkataramanan 17382b245cb2SAnirudh Venkataramanan if (dma_mapping_error(tx_ring->dev, dma)) 17392b245cb2SAnirudh Venkataramanan goto dma_error; 17402b245cb2SAnirudh Venkataramanan 17412b245cb2SAnirudh Venkataramanan /* record length, and DMA address */ 17422b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, size); 17432b245cb2SAnirudh Venkataramanan dma_unmap_addr_set(tx_buf, dma, dma); 17442b245cb2SAnirudh Venkataramanan 17452b245cb2SAnirudh Venkataramanan /* align size to end of page */ 17462b245cb2SAnirudh Venkataramanan max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 17472b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 17482b245cb2SAnirudh Venkataramanan 17492b245cb2SAnirudh Venkataramanan /* account for data chunks larger than the hardware 17502b245cb2SAnirudh Venkataramanan * can handle 17512b245cb2SAnirudh Venkataramanan */ 17522b245cb2SAnirudh Venkataramanan while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 17532b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = 17545757cc7cSTony Nguyen ice_build_ctob(td_cmd, td_offset, max_data, 17555757cc7cSTony Nguyen td_tag); 17562b245cb2SAnirudh Venkataramanan 17572b245cb2SAnirudh Venkataramanan tx_desc++; 17582b245cb2SAnirudh Venkataramanan i++; 17592b245cb2SAnirudh Venkataramanan 17602b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 17612b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 17622b245cb2SAnirudh Venkataramanan i = 0; 17632b245cb2SAnirudh Venkataramanan } 17642b245cb2SAnirudh Venkataramanan 17652b245cb2SAnirudh Venkataramanan dma += max_data; 17662b245cb2SAnirudh Venkataramanan size -= max_data; 17672b245cb2SAnirudh Venkataramanan 17682b245cb2SAnirudh Venkataramanan max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 17692b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 17702b245cb2SAnirudh Venkataramanan } 17712b245cb2SAnirudh Venkataramanan 17722b245cb2SAnirudh Venkataramanan if (likely(!data_len)) 17732b245cb2SAnirudh Venkataramanan break; 17742b245cb2SAnirudh Venkataramanan 17755757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 17762b245cb2SAnirudh Venkataramanan size, td_tag); 17772b245cb2SAnirudh Venkataramanan 17782b245cb2SAnirudh Venkataramanan tx_desc++; 17792b245cb2SAnirudh Venkataramanan i++; 17802b245cb2SAnirudh Venkataramanan 17812b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 17822b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 17832b245cb2SAnirudh Venkataramanan i = 0; 17842b245cb2SAnirudh Venkataramanan } 17852b245cb2SAnirudh Venkataramanan 17862b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag); 17872b245cb2SAnirudh Venkataramanan data_len -= size; 17882b245cb2SAnirudh Venkataramanan 17892b245cb2SAnirudh Venkataramanan dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 17902b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 17912b245cb2SAnirudh Venkataramanan 17922b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 17932b245cb2SAnirudh Venkataramanan } 17942b245cb2SAnirudh Venkataramanan 17952b245cb2SAnirudh Venkataramanan /* record bytecount for BQL */ 17962b245cb2SAnirudh Venkataramanan netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 17972b245cb2SAnirudh Venkataramanan 17982b245cb2SAnirudh Venkataramanan /* record SW timestamp if HW timestamp is not available */ 17992b245cb2SAnirudh Venkataramanan skb_tx_timestamp(first->skb); 18002b245cb2SAnirudh Venkataramanan 18012b245cb2SAnirudh Venkataramanan i++; 18022b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) 18032b245cb2SAnirudh Venkataramanan i = 0; 18042b245cb2SAnirudh Venkataramanan 18052b245cb2SAnirudh Venkataramanan /* write last descriptor with RS and EOP bits */ 1806efc2214bSMaciej Fijalkowski td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 18075757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = 18085757cc7cSTony Nguyen ice_build_ctob(td_cmd, td_offset, size, td_tag); 18092b245cb2SAnirudh Venkataramanan 18102b245cb2SAnirudh Venkataramanan /* Force memory writes to complete before letting h/w know there 18112b245cb2SAnirudh Venkataramanan * are new descriptors to fetch. 18122b245cb2SAnirudh Venkataramanan * 18132b245cb2SAnirudh Venkataramanan * We also use this memory barrier to make certain all of the 18142b245cb2SAnirudh Venkataramanan * status bits have been updated before next_to_watch is written. 18152b245cb2SAnirudh Venkataramanan */ 18162b245cb2SAnirudh Venkataramanan wmb(); 18172b245cb2SAnirudh Venkataramanan 18182b245cb2SAnirudh Venkataramanan /* set next_to_watch value indicating a packet is present */ 18192b245cb2SAnirudh Venkataramanan first->next_to_watch = tx_desc; 18202b245cb2SAnirudh Venkataramanan 18212b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 18222b245cb2SAnirudh Venkataramanan 18232b245cb2SAnirudh Venkataramanan ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 18242b245cb2SAnirudh Venkataramanan 18252b245cb2SAnirudh Venkataramanan /* notify HW of packet */ 18264ee656bbSTony Nguyen if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) 18272b245cb2SAnirudh Venkataramanan writel(i, tx_ring->tail); 18282b245cb2SAnirudh Venkataramanan 18292b245cb2SAnirudh Venkataramanan return; 18302b245cb2SAnirudh Venkataramanan 18312b245cb2SAnirudh Venkataramanan dma_error: 18322f2da36eSAnirudh Venkataramanan /* clear DMA mappings for failed tx_buf map */ 18332b245cb2SAnirudh Venkataramanan for (;;) { 18342b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 18352b245cb2SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 18362b245cb2SAnirudh Venkataramanan if (tx_buf == first) 18372b245cb2SAnirudh Venkataramanan break; 18382b245cb2SAnirudh Venkataramanan if (i == 0) 18392b245cb2SAnirudh Venkataramanan i = tx_ring->count; 18402b245cb2SAnirudh Venkataramanan i--; 18412b245cb2SAnirudh Venkataramanan } 18422b245cb2SAnirudh Venkataramanan 18432b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 18442b245cb2SAnirudh Venkataramanan } 18452b245cb2SAnirudh Venkataramanan 18462b245cb2SAnirudh Venkataramanan /** 1847d76a60baSAnirudh Venkataramanan * ice_tx_csum - Enable Tx checksum offloads 1848d76a60baSAnirudh Venkataramanan * @first: pointer to the first descriptor 1849d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1850d76a60baSAnirudh Venkataramanan * 1851d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1852d76a60baSAnirudh Venkataramanan */ 1853d76a60baSAnirudh Venkataramanan static 1854d76a60baSAnirudh Venkataramanan int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1855d76a60baSAnirudh Venkataramanan { 1856d76a60baSAnirudh Venkataramanan u32 l4_len = 0, l3_len = 0, l2_len = 0; 1857d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1858d76a60baSAnirudh Venkataramanan union { 1859d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1860d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1861d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1862d76a60baSAnirudh Venkataramanan } ip; 1863d76a60baSAnirudh Venkataramanan union { 1864d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1865d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1866d76a60baSAnirudh Venkataramanan } l4; 1867d76a60baSAnirudh Venkataramanan __be16 frag_off, protocol; 1868d76a60baSAnirudh Venkataramanan unsigned char *exthdr; 1869d76a60baSAnirudh Venkataramanan u32 offset, cmd = 0; 1870d76a60baSAnirudh Venkataramanan u8 l4_proto = 0; 1871d76a60baSAnirudh Venkataramanan 1872d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1873d76a60baSAnirudh Venkataramanan return 0; 1874d76a60baSAnirudh Venkataramanan 1875d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 1876d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 1877d76a60baSAnirudh Venkataramanan 1878d76a60baSAnirudh Venkataramanan /* compute outer L2 header size */ 1879d76a60baSAnirudh Venkataramanan l2_len = ip.hdr - skb->data; 1880d76a60baSAnirudh Venkataramanan offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1881d76a60baSAnirudh Venkataramanan 1882a4e82a81STony Nguyen protocol = vlan_get_protocol(skb); 1883a4e82a81STony Nguyen 1884a4e82a81STony Nguyen if (protocol == htons(ETH_P_IP)) 1885a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV4; 1886a4e82a81STony Nguyen else if (protocol == htons(ETH_P_IPV6)) 1887a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV6; 1888a4e82a81STony Nguyen 1889a4e82a81STony Nguyen if (skb->encapsulation) { 1890a4e82a81STony Nguyen bool gso_ena = false; 1891a4e82a81STony Nguyen u32 tunnel = 0; 1892a4e82a81STony Nguyen 1893a4e82a81STony Nguyen /* define outer network header type */ 1894a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1895a4e82a81STony Nguyen tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 1896a4e82a81STony Nguyen ICE_TX_CTX_EIPT_IPV4 : 1897a4e82a81STony Nguyen ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 1898a4e82a81STony Nguyen l4_proto = ip.v4->protocol; 1899a4e82a81STony Nguyen } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 19001b0b0b58SNick Nunley int ret; 19011b0b0b58SNick Nunley 1902a4e82a81STony Nguyen tunnel |= ICE_TX_CTX_EIPT_IPV6; 1903a4e82a81STony Nguyen exthdr = ip.hdr + sizeof(*ip.v6); 1904a4e82a81STony Nguyen l4_proto = ip.v6->nexthdr; 19051b0b0b58SNick Nunley ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 1906a4e82a81STony Nguyen &l4_proto, &frag_off); 19071b0b0b58SNick Nunley if (ret < 0) 19081b0b0b58SNick Nunley return -1; 1909a4e82a81STony Nguyen } 1910a4e82a81STony Nguyen 1911a4e82a81STony Nguyen /* define outer transport */ 1912a4e82a81STony Nguyen switch (l4_proto) { 1913a4e82a81STony Nguyen case IPPROTO_UDP: 1914a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 1915a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1916a4e82a81STony Nguyen break; 1917a4e82a81STony Nguyen case IPPROTO_GRE: 1918a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 1919a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1920a4e82a81STony Nguyen break; 1921a4e82a81STony Nguyen case IPPROTO_IPIP: 1922a4e82a81STony Nguyen case IPPROTO_IPV6: 1923a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1924a4e82a81STony Nguyen l4.hdr = skb_inner_network_header(skb); 1925a4e82a81STony Nguyen break; 1926a4e82a81STony Nguyen default: 1927a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_TSO) 1928d76a60baSAnirudh Venkataramanan return -1; 1929d76a60baSAnirudh Venkataramanan 1930a4e82a81STony Nguyen skb_checksum_help(skb); 1931a4e82a81STony Nguyen return 0; 1932a4e82a81STony Nguyen } 1933a4e82a81STony Nguyen 1934a4e82a81STony Nguyen /* compute outer L3 header size */ 1935a4e82a81STony Nguyen tunnel |= ((l4.hdr - ip.hdr) / 4) << 1936a4e82a81STony Nguyen ICE_TXD_CTX_QW0_EIPLEN_S; 1937a4e82a81STony Nguyen 1938a4e82a81STony Nguyen /* switch IP header pointer from outer to inner header */ 1939a4e82a81STony Nguyen ip.hdr = skb_inner_network_header(skb); 1940a4e82a81STony Nguyen 1941a4e82a81STony Nguyen /* compute tunnel header size */ 1942a4e82a81STony Nguyen tunnel |= ((ip.hdr - l4.hdr) / 2) << 1943a4e82a81STony Nguyen ICE_TXD_CTX_QW0_NATLEN_S; 1944a4e82a81STony Nguyen 1945a4e82a81STony Nguyen gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 1946a4e82a81STony Nguyen /* indicate if we need to offload outer UDP header */ 1947a4e82a81STony Nguyen if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 1948a4e82a81STony Nguyen (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1949a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 1950a4e82a81STony Nguyen 1951a4e82a81STony Nguyen /* record tunnel offload values */ 1952a4e82a81STony Nguyen off->cd_tunnel_params |= tunnel; 1953a4e82a81STony Nguyen 1954a4e82a81STony Nguyen /* set DTYP=1 to indicate that it's an Tx context descriptor 1955a4e82a81STony Nguyen * in IPsec tunnel mode with Tx offloads in Quad word 1 1956a4e82a81STony Nguyen */ 1957a4e82a81STony Nguyen off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 1958a4e82a81STony Nguyen 1959a4e82a81STony Nguyen /* switch L4 header pointer from outer to inner */ 1960a4e82a81STony Nguyen l4.hdr = skb_inner_transport_header(skb); 1961a4e82a81STony Nguyen l4_proto = 0; 1962a4e82a81STony Nguyen 1963a4e82a81STony Nguyen /* reset type as we transition from outer to inner headers */ 1964a4e82a81STony Nguyen first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 1965a4e82a81STony Nguyen if (ip.v4->version == 4) 1966a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV4; 1967a4e82a81STony Nguyen if (ip.v6->version == 6) 1968a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV6; 1969a4e82a81STony Nguyen } 1970a4e82a81STony Nguyen 1971d76a60baSAnirudh Venkataramanan /* Enable IP checksum offloads */ 1972a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1973d76a60baSAnirudh Venkataramanan l4_proto = ip.v4->protocol; 1974d76a60baSAnirudh Venkataramanan /* the stack computes the IP header already, the only time we 1975d76a60baSAnirudh Venkataramanan * need the hardware to recompute it is in the case of TSO. 1976d76a60baSAnirudh Venkataramanan */ 1977d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1978d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1979d76a60baSAnirudh Venkataramanan else 1980d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1981d76a60baSAnirudh Venkataramanan 1982a4e82a81STony Nguyen } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1983d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1984d76a60baSAnirudh Venkataramanan exthdr = ip.hdr + sizeof(*ip.v6); 1985d76a60baSAnirudh Venkataramanan l4_proto = ip.v6->nexthdr; 1986d76a60baSAnirudh Venkataramanan if (l4.hdr != exthdr) 1987d76a60baSAnirudh Venkataramanan ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1988d76a60baSAnirudh Venkataramanan &frag_off); 1989d76a60baSAnirudh Venkataramanan } else { 1990d76a60baSAnirudh Venkataramanan return -1; 1991d76a60baSAnirudh Venkataramanan } 1992d76a60baSAnirudh Venkataramanan 1993d76a60baSAnirudh Venkataramanan /* compute inner L3 header size */ 1994d76a60baSAnirudh Venkataramanan l3_len = l4.hdr - ip.hdr; 1995d76a60baSAnirudh Venkataramanan offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1996d76a60baSAnirudh Venkataramanan 1997d76a60baSAnirudh Venkataramanan /* Enable L4 checksum offloads */ 1998d76a60baSAnirudh Venkataramanan switch (l4_proto) { 1999d76a60baSAnirudh Venkataramanan case IPPROTO_TCP: 2000d76a60baSAnirudh Venkataramanan /* enable checksum offloads */ 2001d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 2002d76a60baSAnirudh Venkataramanan l4_len = l4.tcp->doff; 2003d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 2004d76a60baSAnirudh Venkataramanan break; 2005d76a60baSAnirudh Venkataramanan case IPPROTO_UDP: 2006d76a60baSAnirudh Venkataramanan /* enable UDP checksum offload */ 2007d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 2008d76a60baSAnirudh Venkataramanan l4_len = (sizeof(struct udphdr) >> 2); 2009d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 2010d76a60baSAnirudh Venkataramanan break; 2011d76a60baSAnirudh Venkataramanan case IPPROTO_SCTP: 2012cf909e19SAnirudh Venkataramanan /* enable SCTP checksum offload */ 2013cf909e19SAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 2014cf909e19SAnirudh Venkataramanan l4_len = sizeof(struct sctphdr) >> 2; 2015cf909e19SAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 2016cf909e19SAnirudh Venkataramanan break; 2017cf909e19SAnirudh Venkataramanan 2018d76a60baSAnirudh Venkataramanan default: 2019d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 2020d76a60baSAnirudh Venkataramanan return -1; 2021d76a60baSAnirudh Venkataramanan skb_checksum_help(skb); 2022d76a60baSAnirudh Venkataramanan return 0; 2023d76a60baSAnirudh Venkataramanan } 2024d76a60baSAnirudh Venkataramanan 2025d76a60baSAnirudh Venkataramanan off->td_cmd |= cmd; 2026d76a60baSAnirudh Venkataramanan off->td_offset |= offset; 2027d76a60baSAnirudh Venkataramanan return 1; 2028d76a60baSAnirudh Venkataramanan } 2029d76a60baSAnirudh Venkataramanan 2030d76a60baSAnirudh Venkataramanan /** 2031f9867df6SAnirudh Venkataramanan * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 2032d76a60baSAnirudh Venkataramanan * @tx_ring: ring to send buffer on 2033d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 2034d76a60baSAnirudh Venkataramanan * 2035d76a60baSAnirudh Venkataramanan * Checks the skb and set up correspondingly several generic transmit flags 2036d76a60baSAnirudh Venkataramanan * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 2037d76a60baSAnirudh Venkataramanan */ 20382bb19d6eSBrett Creeley static void 2039d76a60baSAnirudh Venkataramanan ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) 2040d76a60baSAnirudh Venkataramanan { 2041d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 2042d76a60baSAnirudh Venkataramanan 20432bb19d6eSBrett Creeley /* nothing left to do, software offloaded VLAN */ 20442bb19d6eSBrett Creeley if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) 20452bb19d6eSBrett Creeley return; 20462bb19d6eSBrett Creeley 20472bb19d6eSBrett Creeley /* currently, we always assume 802.1Q for VLAN insertion as VLAN 20482bb19d6eSBrett Creeley * insertion for 802.1AD is not supported 2049d76a60baSAnirudh Venkataramanan */ 2050d76a60baSAnirudh Venkataramanan if (skb_vlan_tag_present(skb)) { 2051d76a60baSAnirudh Venkataramanan first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 2052d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 2053d76a60baSAnirudh Venkataramanan } 2054d76a60baSAnirudh Venkataramanan 20552bb19d6eSBrett Creeley ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 2056d76a60baSAnirudh Venkataramanan } 2057d76a60baSAnirudh Venkataramanan 2058d76a60baSAnirudh Venkataramanan /** 2059d76a60baSAnirudh Venkataramanan * ice_tso - computes mss and TSO length to prepare for TSO 2060d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 2061d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 2062d76a60baSAnirudh Venkataramanan * 2063d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 2064d76a60baSAnirudh Venkataramanan */ 2065d76a60baSAnirudh Venkataramanan static 2066d76a60baSAnirudh Venkataramanan int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2067d76a60baSAnirudh Venkataramanan { 2068d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 2069d76a60baSAnirudh Venkataramanan union { 2070d76a60baSAnirudh Venkataramanan struct iphdr *v4; 2071d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 2072d76a60baSAnirudh Venkataramanan unsigned char *hdr; 2073d76a60baSAnirudh Venkataramanan } ip; 2074d76a60baSAnirudh Venkataramanan union { 2075d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 2076a54e3b8cSBrett Creeley struct udphdr *udp; 2077d76a60baSAnirudh Venkataramanan unsigned char *hdr; 2078d76a60baSAnirudh Venkataramanan } l4; 2079d76a60baSAnirudh Venkataramanan u64 cd_mss, cd_tso_len; 208088865fc4SKarol Kolacinski u32 paylen; 208188865fc4SKarol Kolacinski u8 l4_start; 2082d76a60baSAnirudh Venkataramanan int err; 2083d76a60baSAnirudh Venkataramanan 2084d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 2085d76a60baSAnirudh Venkataramanan return 0; 2086d76a60baSAnirudh Venkataramanan 2087d76a60baSAnirudh Venkataramanan if (!skb_is_gso(skb)) 2088d76a60baSAnirudh Venkataramanan return 0; 2089d76a60baSAnirudh Venkataramanan 2090d76a60baSAnirudh Venkataramanan err = skb_cow_head(skb, 0); 2091d76a60baSAnirudh Venkataramanan if (err < 0) 2092d76a60baSAnirudh Venkataramanan return err; 2093d76a60baSAnirudh Venkataramanan 2094c3a6825eSBruce Allan /* cppcheck-suppress unreadVariable */ 2095d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 2096d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 2097d76a60baSAnirudh Venkataramanan 2098d76a60baSAnirudh Venkataramanan /* initialize outer IP header fields */ 2099d76a60baSAnirudh Venkataramanan if (ip.v4->version == 4) { 2100d76a60baSAnirudh Venkataramanan ip.v4->tot_len = 0; 2101d76a60baSAnirudh Venkataramanan ip.v4->check = 0; 2102d76a60baSAnirudh Venkataramanan } else { 2103d76a60baSAnirudh Venkataramanan ip.v6->payload_len = 0; 2104d76a60baSAnirudh Venkataramanan } 2105d76a60baSAnirudh Venkataramanan 2106a4e82a81STony Nguyen if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 2107a4e82a81STony Nguyen SKB_GSO_GRE_CSUM | 2108a4e82a81STony Nguyen SKB_GSO_IPXIP4 | 2109a4e82a81STony Nguyen SKB_GSO_IPXIP6 | 2110a4e82a81STony Nguyen SKB_GSO_UDP_TUNNEL | 2111a4e82a81STony Nguyen SKB_GSO_UDP_TUNNEL_CSUM)) { 2112a4e82a81STony Nguyen if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 2113a4e82a81STony Nguyen (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 2114a4e82a81STony Nguyen l4.udp->len = 0; 2115a4e82a81STony Nguyen 2116a4e82a81STony Nguyen /* determine offset of outer transport header */ 211788865fc4SKarol Kolacinski l4_start = (u8)(l4.hdr - skb->data); 2118a4e82a81STony Nguyen 2119a4e82a81STony Nguyen /* remove payload length from outer checksum */ 2120a4e82a81STony Nguyen paylen = skb->len - l4_start; 2121a4e82a81STony Nguyen csum_replace_by_diff(&l4.udp->check, 2122a4e82a81STony Nguyen (__force __wsum)htonl(paylen)); 2123a4e82a81STony Nguyen } 2124a4e82a81STony Nguyen 2125a4e82a81STony Nguyen /* reset pointers to inner headers */ 2126a4e82a81STony Nguyen 2127a4e82a81STony Nguyen /* cppcheck-suppress unreadVariable */ 2128a4e82a81STony Nguyen ip.hdr = skb_inner_network_header(skb); 2129a4e82a81STony Nguyen l4.hdr = skb_inner_transport_header(skb); 2130a4e82a81STony Nguyen 2131a4e82a81STony Nguyen /* initialize inner IP header fields */ 2132a4e82a81STony Nguyen if (ip.v4->version == 4) { 2133a4e82a81STony Nguyen ip.v4->tot_len = 0; 2134a4e82a81STony Nguyen ip.v4->check = 0; 2135a4e82a81STony Nguyen } else { 2136a4e82a81STony Nguyen ip.v6->payload_len = 0; 2137a4e82a81STony Nguyen } 2138a4e82a81STony Nguyen } 2139a4e82a81STony Nguyen 2140d76a60baSAnirudh Venkataramanan /* determine offset of transport header */ 214188865fc4SKarol Kolacinski l4_start = (u8)(l4.hdr - skb->data); 2142d76a60baSAnirudh Venkataramanan 2143d76a60baSAnirudh Venkataramanan /* remove payload length from checksum */ 2144d76a60baSAnirudh Venkataramanan paylen = skb->len - l4_start; 2145d76a60baSAnirudh Venkataramanan 2146a54e3b8cSBrett Creeley if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 2147a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.udp->check, 2148a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 2149a54e3b8cSBrett Creeley /* compute length of UDP segmentation header */ 215088865fc4SKarol Kolacinski off->header_len = (u8)sizeof(l4.udp) + l4_start; 2151a54e3b8cSBrett Creeley } else { 2152a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.tcp->check, 2153a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 2154a54e3b8cSBrett Creeley /* compute length of TCP segmentation header */ 215588865fc4SKarol Kolacinski off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 2156a54e3b8cSBrett Creeley } 2157d76a60baSAnirudh Venkataramanan 2158d76a60baSAnirudh Venkataramanan /* update gso_segs and bytecount */ 2159d76a60baSAnirudh Venkataramanan first->gso_segs = skb_shinfo(skb)->gso_segs; 2160d944b469SBrett Creeley first->bytecount += (first->gso_segs - 1) * off->header_len; 2161d76a60baSAnirudh Venkataramanan 2162d76a60baSAnirudh Venkataramanan cd_tso_len = skb->len - off->header_len; 2163d76a60baSAnirudh Venkataramanan cd_mss = skb_shinfo(skb)->gso_size; 2164d76a60baSAnirudh Venkataramanan 2165d76a60baSAnirudh Venkataramanan /* record cdesc_qw1 with TSO parameters */ 2166e65e9e15SBruce Allan off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2167d76a60baSAnirudh Venkataramanan (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 2168d76a60baSAnirudh Venkataramanan (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2169e65e9e15SBruce Allan (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2170d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_TSO; 2171d76a60baSAnirudh Venkataramanan return 1; 2172d76a60baSAnirudh Venkataramanan } 2173d76a60baSAnirudh Venkataramanan 2174d76a60baSAnirudh Venkataramanan /** 21752b245cb2SAnirudh Venkataramanan * ice_txd_use_count - estimate the number of descriptors needed for Tx 21762b245cb2SAnirudh Venkataramanan * @size: transmit request size in bytes 21772b245cb2SAnirudh Venkataramanan * 21782b245cb2SAnirudh Venkataramanan * Due to hardware alignment restrictions (4K alignment), we need to 21792b245cb2SAnirudh Venkataramanan * assume that we can have no more than 12K of data per descriptor, even 21802b245cb2SAnirudh Venkataramanan * though each descriptor can take up to 16K - 1 bytes of aligned memory. 21812b245cb2SAnirudh Venkataramanan * Thus, we need to divide by 12K. But division is slow! Instead, 21822b245cb2SAnirudh Venkataramanan * we decompose the operation into shifts and one relatively cheap 21832b245cb2SAnirudh Venkataramanan * multiply operation. 21842b245cb2SAnirudh Venkataramanan * 21852b245cb2SAnirudh Venkataramanan * To divide by 12K, we first divide by 4K, then divide by 3: 21862b245cb2SAnirudh Venkataramanan * To divide by 4K, shift right by 12 bits 21872b245cb2SAnirudh Venkataramanan * To divide by 3, multiply by 85, then divide by 256 21882b245cb2SAnirudh Venkataramanan * (Divide by 256 is done by shifting right by 8 bits) 21892b245cb2SAnirudh Venkataramanan * Finally, we add one to round up. Because 256 isn't an exact multiple of 21902b245cb2SAnirudh Venkataramanan * 3, we'll underestimate near each multiple of 12K. This is actually more 21912b245cb2SAnirudh Venkataramanan * accurate as we have 4K - 1 of wiggle room that we can fit into the last 21922b245cb2SAnirudh Venkataramanan * segment. For our purposes this is accurate out to 1M which is orders of 21932b245cb2SAnirudh Venkataramanan * magnitude greater than our largest possible GSO size. 21942b245cb2SAnirudh Venkataramanan * 21952b245cb2SAnirudh Venkataramanan * This would then be implemented as: 2196c585ea42SBrett Creeley * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 21972b245cb2SAnirudh Venkataramanan * 21982b245cb2SAnirudh Venkataramanan * Since multiplication and division are commutative, we can reorder 21992b245cb2SAnirudh Venkataramanan * operations into: 2200c585ea42SBrett Creeley * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 22012b245cb2SAnirudh Venkataramanan */ 22022b245cb2SAnirudh Venkataramanan static unsigned int ice_txd_use_count(unsigned int size) 22032b245cb2SAnirudh Venkataramanan { 2204c585ea42SBrett Creeley return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 22052b245cb2SAnirudh Venkataramanan } 22062b245cb2SAnirudh Venkataramanan 22072b245cb2SAnirudh Venkataramanan /** 2208d337f2afSAnirudh Venkataramanan * ice_xmit_desc_count - calculate number of Tx descriptors needed 22092b245cb2SAnirudh Venkataramanan * @skb: send buffer 22102b245cb2SAnirudh Venkataramanan * 22112b245cb2SAnirudh Venkataramanan * Returns number of data descriptors needed for this skb. 22122b245cb2SAnirudh Venkataramanan */ 22132b245cb2SAnirudh Venkataramanan static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 22142b245cb2SAnirudh Venkataramanan { 2215d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 22162b245cb2SAnirudh Venkataramanan unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 22172b245cb2SAnirudh Venkataramanan unsigned int count = 0, size = skb_headlen(skb); 22182b245cb2SAnirudh Venkataramanan 22192b245cb2SAnirudh Venkataramanan for (;;) { 22202b245cb2SAnirudh Venkataramanan count += ice_txd_use_count(size); 22212b245cb2SAnirudh Venkataramanan 22222b245cb2SAnirudh Venkataramanan if (!nr_frags--) 22232b245cb2SAnirudh Venkataramanan break; 22242b245cb2SAnirudh Venkataramanan 22252b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag++); 22262b245cb2SAnirudh Venkataramanan } 22272b245cb2SAnirudh Venkataramanan 22282b245cb2SAnirudh Venkataramanan return count; 22292b245cb2SAnirudh Venkataramanan } 22302b245cb2SAnirudh Venkataramanan 22312b245cb2SAnirudh Venkataramanan /** 22322b245cb2SAnirudh Venkataramanan * __ice_chk_linearize - Check if there are more than 8 buffers per packet 22332b245cb2SAnirudh Venkataramanan * @skb: send buffer 22342b245cb2SAnirudh Venkataramanan * 22352b245cb2SAnirudh Venkataramanan * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 22362b245cb2SAnirudh Venkataramanan * and so we need to figure out the cases where we need to linearize the skb. 22372b245cb2SAnirudh Venkataramanan * 22382b245cb2SAnirudh Venkataramanan * For TSO we need to count the TSO header and segment payload separately. 22392b245cb2SAnirudh Venkataramanan * As such we need to check cases where we have 7 fragments or more as we 22402b245cb2SAnirudh Venkataramanan * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 22412b245cb2SAnirudh Venkataramanan * the segment payload in the first descriptor, and another 7 for the 22422b245cb2SAnirudh Venkataramanan * fragments. 22432b245cb2SAnirudh Venkataramanan */ 22442b245cb2SAnirudh Venkataramanan static bool __ice_chk_linearize(struct sk_buff *skb) 22452b245cb2SAnirudh Venkataramanan { 2246d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag, *stale; 22472b245cb2SAnirudh Venkataramanan int nr_frags, sum; 22482b245cb2SAnirudh Venkataramanan 22492b245cb2SAnirudh Venkataramanan /* no need to check if number of frags is less than 7 */ 22502b245cb2SAnirudh Venkataramanan nr_frags = skb_shinfo(skb)->nr_frags; 22512b245cb2SAnirudh Venkataramanan if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 22522b245cb2SAnirudh Venkataramanan return false; 22532b245cb2SAnirudh Venkataramanan 22542b245cb2SAnirudh Venkataramanan /* We need to walk through the list and validate that each group 22552b245cb2SAnirudh Venkataramanan * of 6 fragments totals at least gso_size. 22562b245cb2SAnirudh Venkataramanan */ 22572b245cb2SAnirudh Venkataramanan nr_frags -= ICE_MAX_BUF_TXD - 2; 22582b245cb2SAnirudh Venkataramanan frag = &skb_shinfo(skb)->frags[0]; 22592b245cb2SAnirudh Venkataramanan 22602b245cb2SAnirudh Venkataramanan /* Initialize size to the negative value of gso_size minus 1. We 22614ee656bbSTony Nguyen * use this as the worst case scenario in which the frag ahead 22622b245cb2SAnirudh Venkataramanan * of us only provides one byte which is why we are limited to 6 22632b245cb2SAnirudh Venkataramanan * descriptors for a single transmit as the header and previous 22642b245cb2SAnirudh Venkataramanan * fragment are already consuming 2 descriptors. 22652b245cb2SAnirudh Venkataramanan */ 22662b245cb2SAnirudh Venkataramanan sum = 1 - skb_shinfo(skb)->gso_size; 22672b245cb2SAnirudh Venkataramanan 22682b245cb2SAnirudh Venkataramanan /* Add size of frags 0 through 4 to create our initial sum */ 22692b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22702b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22712b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22722b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22732b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22742b245cb2SAnirudh Venkataramanan 22752b245cb2SAnirudh Venkataramanan /* Walk through fragments adding latest fragment, testing it, and 22762b245cb2SAnirudh Venkataramanan * then removing stale fragments from the sum. 22772b245cb2SAnirudh Venkataramanan */ 22780a37abfaSKiran Patil for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 22790a37abfaSKiran Patil int stale_size = skb_frag_size(stale); 22800a37abfaSKiran Patil 22812b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22822b245cb2SAnirudh Venkataramanan 22830a37abfaSKiran Patil /* The stale fragment may present us with a smaller 22840a37abfaSKiran Patil * descriptor than the actual fragment size. To account 22850a37abfaSKiran Patil * for that we need to remove all the data on the front and 22860a37abfaSKiran Patil * figure out what the remainder would be in the last 22870a37abfaSKiran Patil * descriptor associated with the fragment. 22880a37abfaSKiran Patil */ 22890a37abfaSKiran Patil if (stale_size > ICE_MAX_DATA_PER_TXD) { 22900a37abfaSKiran Patil int align_pad = -(skb_frag_off(stale)) & 22910a37abfaSKiran Patil (ICE_MAX_READ_REQ_SIZE - 1); 22920a37abfaSKiran Patil 22930a37abfaSKiran Patil sum -= align_pad; 22940a37abfaSKiran Patil stale_size -= align_pad; 22950a37abfaSKiran Patil 22960a37abfaSKiran Patil do { 22970a37abfaSKiran Patil sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; 22980a37abfaSKiran Patil stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; 22990a37abfaSKiran Patil } while (stale_size > ICE_MAX_DATA_PER_TXD); 23000a37abfaSKiran Patil } 23010a37abfaSKiran Patil 23022b245cb2SAnirudh Venkataramanan /* if sum is negative we failed to make sufficient progress */ 23032b245cb2SAnirudh Venkataramanan if (sum < 0) 23042b245cb2SAnirudh Venkataramanan return true; 23052b245cb2SAnirudh Venkataramanan 23062b245cb2SAnirudh Venkataramanan if (!nr_frags--) 23072b245cb2SAnirudh Venkataramanan break; 23082b245cb2SAnirudh Venkataramanan 23090a37abfaSKiran Patil sum -= stale_size; 23102b245cb2SAnirudh Venkataramanan } 23112b245cb2SAnirudh Venkataramanan 23122b245cb2SAnirudh Venkataramanan return false; 23132b245cb2SAnirudh Venkataramanan } 23142b245cb2SAnirudh Venkataramanan 23152b245cb2SAnirudh Venkataramanan /** 23162b245cb2SAnirudh Venkataramanan * ice_chk_linearize - Check if there are more than 8 fragments per packet 23172b245cb2SAnirudh Venkataramanan * @skb: send buffer 23182b245cb2SAnirudh Venkataramanan * @count: number of buffers used 23192b245cb2SAnirudh Venkataramanan * 23202b245cb2SAnirudh Venkataramanan * Note: Our HW can't scatter-gather more than 8 fragments to build 23212b245cb2SAnirudh Venkataramanan * a packet on the wire and so we need to figure out the cases where we 23222b245cb2SAnirudh Venkataramanan * need to linearize the skb. 23232b245cb2SAnirudh Venkataramanan */ 23242b245cb2SAnirudh Venkataramanan static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 23252b245cb2SAnirudh Venkataramanan { 23262b245cb2SAnirudh Venkataramanan /* Both TSO and single send will work if count is less than 8 */ 23272b245cb2SAnirudh Venkataramanan if (likely(count < ICE_MAX_BUF_TXD)) 23282b245cb2SAnirudh Venkataramanan return false; 23292b245cb2SAnirudh Venkataramanan 23302b245cb2SAnirudh Venkataramanan if (skb_is_gso(skb)) 23312b245cb2SAnirudh Venkataramanan return __ice_chk_linearize(skb); 23322b245cb2SAnirudh Venkataramanan 23332b245cb2SAnirudh Venkataramanan /* we can support up to 8 data buffers for a single send */ 23342b245cb2SAnirudh Venkataramanan return count != ICE_MAX_BUF_TXD; 23352b245cb2SAnirudh Venkataramanan } 23362b245cb2SAnirudh Venkataramanan 23372b245cb2SAnirudh Venkataramanan /** 23382b245cb2SAnirudh Venkataramanan * ice_xmit_frame_ring - Sends buffer on Tx ring 23392b245cb2SAnirudh Venkataramanan * @skb: send buffer 23402b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 23412b245cb2SAnirudh Venkataramanan * 23422b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 23432b245cb2SAnirudh Venkataramanan */ 23442b245cb2SAnirudh Venkataramanan static netdev_tx_t 23452b245cb2SAnirudh Venkataramanan ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 23462b245cb2SAnirudh Venkataramanan { 2347d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params offload = { 0 }; 23480c3a6101SDave Ertman struct ice_vsi *vsi = tx_ring->vsi; 23492b245cb2SAnirudh Venkataramanan struct ice_tx_buf *first; 23502b245cb2SAnirudh Venkataramanan unsigned int count; 2351d76a60baSAnirudh Venkataramanan int tso, csum; 23522b245cb2SAnirudh Venkataramanan 23532b245cb2SAnirudh Venkataramanan count = ice_xmit_desc_count(skb); 23542b245cb2SAnirudh Venkataramanan if (ice_chk_linearize(skb, count)) { 23552b245cb2SAnirudh Venkataramanan if (__skb_linearize(skb)) 23562b245cb2SAnirudh Venkataramanan goto out_drop; 23572b245cb2SAnirudh Venkataramanan count = ice_txd_use_count(skb->len); 23582b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_linearize++; 23592b245cb2SAnirudh Venkataramanan } 23602b245cb2SAnirudh Venkataramanan 23612b245cb2SAnirudh Venkataramanan /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 23622b245cb2SAnirudh Venkataramanan * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 23632b245cb2SAnirudh Venkataramanan * + 4 desc gap to avoid the cache line where head is, 23642b245cb2SAnirudh Venkataramanan * + 1 desc for context descriptor, 23652b245cb2SAnirudh Venkataramanan * otherwise try next time 23662b245cb2SAnirudh Venkataramanan */ 2367c585ea42SBrett Creeley if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2368c585ea42SBrett Creeley ICE_DESCS_FOR_CTX_DESC)) { 23692b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_busy++; 23702b245cb2SAnirudh Venkataramanan return NETDEV_TX_BUSY; 23712b245cb2SAnirudh Venkataramanan } 23722b245cb2SAnirudh Venkataramanan 2373d76a60baSAnirudh Venkataramanan offload.tx_ring = tx_ring; 2374d76a60baSAnirudh Venkataramanan 23752b245cb2SAnirudh Venkataramanan /* record the location of the first descriptor for this packet */ 23762b245cb2SAnirudh Venkataramanan first = &tx_ring->tx_buf[tx_ring->next_to_use]; 23772b245cb2SAnirudh Venkataramanan first->skb = skb; 23782b245cb2SAnirudh Venkataramanan first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 23792b245cb2SAnirudh Venkataramanan first->gso_segs = 1; 2380d76a60baSAnirudh Venkataramanan first->tx_flags = 0; 23812b245cb2SAnirudh Venkataramanan 2382d76a60baSAnirudh Venkataramanan /* prepare the VLAN tagging flags for Tx */ 23832bb19d6eSBrett Creeley ice_tx_prepare_vlan_flags(tx_ring, first); 2384d76a60baSAnirudh Venkataramanan 2385d76a60baSAnirudh Venkataramanan /* set up TSO offload */ 2386d76a60baSAnirudh Venkataramanan tso = ice_tso(first, &offload); 2387d76a60baSAnirudh Venkataramanan if (tso < 0) 2388d76a60baSAnirudh Venkataramanan goto out_drop; 2389d76a60baSAnirudh Venkataramanan 2390d76a60baSAnirudh Venkataramanan /* always set up Tx checksum offload */ 2391d76a60baSAnirudh Venkataramanan csum = ice_tx_csum(first, &offload); 2392d76a60baSAnirudh Venkataramanan if (csum < 0) 2393d76a60baSAnirudh Venkataramanan goto out_drop; 2394d76a60baSAnirudh Venkataramanan 23950c3a6101SDave Ertman /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 23960c3a6101SDave Ertman if (unlikely(skb->priority == TC_PRIO_CONTROL && 23970c3a6101SDave Ertman vsi->type == ICE_VSI_PF && 2398fc2d1165SChinh T Cao vsi->port_info->qos_cfg.is_sw_lldp)) 23990c3a6101SDave Ertman offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 24000c3a6101SDave Ertman ICE_TX_CTX_DESC_SWTCH_UPLINK << 24010c3a6101SDave Ertman ICE_TXD_CTX_QW1_CMD_S); 24020c3a6101SDave Ertman 24030c3a6101SDave Ertman if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2404d76a60baSAnirudh Venkataramanan struct ice_tx_ctx_desc *cdesc; 240588865fc4SKarol Kolacinski u16 i = tx_ring->next_to_use; 2406d76a60baSAnirudh Venkataramanan 2407d76a60baSAnirudh Venkataramanan /* grab the next descriptor */ 2408d76a60baSAnirudh Venkataramanan cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2409d76a60baSAnirudh Venkataramanan i++; 2410d76a60baSAnirudh Venkataramanan tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2411d76a60baSAnirudh Venkataramanan 2412d76a60baSAnirudh Venkataramanan /* setup context descriptor */ 2413d76a60baSAnirudh Venkataramanan cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2414d76a60baSAnirudh Venkataramanan cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2415d76a60baSAnirudh Venkataramanan cdesc->rsvd = cpu_to_le16(0); 2416d76a60baSAnirudh Venkataramanan cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2417d76a60baSAnirudh Venkataramanan } 2418d76a60baSAnirudh Venkataramanan 2419d76a60baSAnirudh Venkataramanan ice_tx_map(tx_ring, first, &offload); 24202b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 24212b245cb2SAnirudh Venkataramanan 24222b245cb2SAnirudh Venkataramanan out_drop: 24232b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 24242b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 24252b245cb2SAnirudh Venkataramanan } 24262b245cb2SAnirudh Venkataramanan 24272b245cb2SAnirudh Venkataramanan /** 24282b245cb2SAnirudh Venkataramanan * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 24292b245cb2SAnirudh Venkataramanan * @skb: send buffer 24302b245cb2SAnirudh Venkataramanan * @netdev: network interface device structure 24312b245cb2SAnirudh Venkataramanan * 24322b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 24332b245cb2SAnirudh Venkataramanan */ 24342b245cb2SAnirudh Venkataramanan netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 24352b245cb2SAnirudh Venkataramanan { 24362b245cb2SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 24372b245cb2SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 24382b245cb2SAnirudh Venkataramanan struct ice_ring *tx_ring; 24392b245cb2SAnirudh Venkataramanan 24402b245cb2SAnirudh Venkataramanan tx_ring = vsi->tx_rings[skb->queue_mapping]; 24412b245cb2SAnirudh Venkataramanan 24422b245cb2SAnirudh Venkataramanan /* hardware can't handle really short frames, hardware padding works 24432b245cb2SAnirudh Venkataramanan * beyond this point 24442b245cb2SAnirudh Venkataramanan */ 24452b245cb2SAnirudh Venkataramanan if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 24462b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 24472b245cb2SAnirudh Venkataramanan 24482b245cb2SAnirudh Venkataramanan return ice_xmit_frame_ring(skb, tx_ring); 24492b245cb2SAnirudh Venkataramanan } 2450148beb61SHenry Tieman 2451148beb61SHenry Tieman /** 2452148beb61SHenry Tieman * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 2453148beb61SHenry Tieman * @tx_ring: tx_ring to clean 2454148beb61SHenry Tieman */ 2455148beb61SHenry Tieman void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring) 2456148beb61SHenry Tieman { 2457148beb61SHenry Tieman struct ice_vsi *vsi = tx_ring->vsi; 2458148beb61SHenry Tieman s16 i = tx_ring->next_to_clean; 2459148beb61SHenry Tieman int budget = ICE_DFLT_IRQ_WORK; 2460148beb61SHenry Tieman struct ice_tx_desc *tx_desc; 2461148beb61SHenry Tieman struct ice_tx_buf *tx_buf; 2462148beb61SHenry Tieman 2463148beb61SHenry Tieman tx_buf = &tx_ring->tx_buf[i]; 2464148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, i); 2465148beb61SHenry Tieman i -= tx_ring->count; 2466148beb61SHenry Tieman 2467148beb61SHenry Tieman do { 2468148beb61SHenry Tieman struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2469148beb61SHenry Tieman 2470148beb61SHenry Tieman /* if next_to_watch is not set then there is no pending work */ 2471148beb61SHenry Tieman if (!eop_desc) 2472148beb61SHenry Tieman break; 2473148beb61SHenry Tieman 2474148beb61SHenry Tieman /* prevent any other reads prior to eop_desc */ 2475148beb61SHenry Tieman smp_rmb(); 2476148beb61SHenry Tieman 2477148beb61SHenry Tieman /* if the descriptor isn't done, no work to do */ 2478148beb61SHenry Tieman if (!(eop_desc->cmd_type_offset_bsz & 2479148beb61SHenry Tieman cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2480148beb61SHenry Tieman break; 2481148beb61SHenry Tieman 2482148beb61SHenry Tieman /* clear next_to_watch to prevent false hangs */ 2483148beb61SHenry Tieman tx_buf->next_to_watch = NULL; 2484148beb61SHenry Tieman tx_desc->buf_addr = 0; 2485148beb61SHenry Tieman tx_desc->cmd_type_offset_bsz = 0; 2486148beb61SHenry Tieman 2487148beb61SHenry Tieman /* move past filter desc */ 2488148beb61SHenry Tieman tx_buf++; 2489148beb61SHenry Tieman tx_desc++; 2490148beb61SHenry Tieman i++; 2491148beb61SHenry Tieman if (unlikely(!i)) { 2492148beb61SHenry Tieman i -= tx_ring->count; 2493148beb61SHenry Tieman tx_buf = tx_ring->tx_buf; 2494148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, 0); 2495148beb61SHenry Tieman } 2496148beb61SHenry Tieman 2497148beb61SHenry Tieman /* unmap the data header */ 2498148beb61SHenry Tieman if (dma_unmap_len(tx_buf, len)) 2499148beb61SHenry Tieman dma_unmap_single(tx_ring->dev, 2500148beb61SHenry Tieman dma_unmap_addr(tx_buf, dma), 2501148beb61SHenry Tieman dma_unmap_len(tx_buf, len), 2502148beb61SHenry Tieman DMA_TO_DEVICE); 2503148beb61SHenry Tieman if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 2504148beb61SHenry Tieman devm_kfree(tx_ring->dev, tx_buf->raw_buf); 2505148beb61SHenry Tieman 2506148beb61SHenry Tieman /* clear next_to_watch to prevent false hangs */ 2507148beb61SHenry Tieman tx_buf->raw_buf = NULL; 2508148beb61SHenry Tieman tx_buf->tx_flags = 0; 2509148beb61SHenry Tieman tx_buf->next_to_watch = NULL; 2510148beb61SHenry Tieman dma_unmap_len_set(tx_buf, len, 0); 2511148beb61SHenry Tieman tx_desc->buf_addr = 0; 2512148beb61SHenry Tieman tx_desc->cmd_type_offset_bsz = 0; 2513148beb61SHenry Tieman 2514148beb61SHenry Tieman /* move past eop_desc for start of next FD desc */ 2515148beb61SHenry Tieman tx_buf++; 2516148beb61SHenry Tieman tx_desc++; 2517148beb61SHenry Tieman i++; 2518148beb61SHenry Tieman if (unlikely(!i)) { 2519148beb61SHenry Tieman i -= tx_ring->count; 2520148beb61SHenry Tieman tx_buf = tx_ring->tx_buf; 2521148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, 0); 2522148beb61SHenry Tieman } 2523148beb61SHenry Tieman 2524148beb61SHenry Tieman budget--; 2525148beb61SHenry Tieman } while (likely(budget)); 2526148beb61SHenry Tieman 2527148beb61SHenry Tieman i += tx_ring->count; 2528148beb61SHenry Tieman tx_ring->next_to_clean = i; 2529148beb61SHenry Tieman 2530148beb61SHenry Tieman /* re-enable interrupt if needed */ 2531148beb61SHenry Tieman ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 2532148beb61SHenry Tieman } 2533