1cdedef59SAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0 2cdedef59SAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */ 3cdedef59SAnirudh Venkataramanan 4cdedef59SAnirudh Venkataramanan /* The driver transmit and receive code */ 5cdedef59SAnirudh Venkataramanan 6cdedef59SAnirudh Venkataramanan #include <linux/prefetch.h> 7cdedef59SAnirudh Venkataramanan #include <linux/mm.h> 8efc2214bSMaciej Fijalkowski #include <linux/bpf_trace.h> 9efc2214bSMaciej Fijalkowski #include <net/xdp.h> 100891d6d4SKrzysztof Kazimierczak #include "ice_txrx_lib.h" 11efc2214bSMaciej Fijalkowski #include "ice_lib.h" 12cdedef59SAnirudh Venkataramanan #include "ice.h" 135f6aa50eSAnirudh Venkataramanan #include "ice_dcb_lib.h" 142d4238f5SKrzysztof Kazimierczak #include "ice_xsk.h" 15cdedef59SAnirudh Venkataramanan 162b245cb2SAnirudh Venkataramanan #define ICE_RX_HDR_SIZE 256 172b245cb2SAnirudh Venkataramanan 18148beb61SHenry Tieman #define FDIR_DESC_RXDID 0x40 19cac2a27cSHenry Tieman #define ICE_FDIR_CLEAN_DELAY 10 20cac2a27cSHenry Tieman 21cac2a27cSHenry Tieman /** 22cac2a27cSHenry Tieman * ice_prgm_fdir_fltr - Program a Flow Director filter 23cac2a27cSHenry Tieman * @vsi: VSI to send dummy packet 24cac2a27cSHenry Tieman * @fdir_desc: flow director descriptor 25cac2a27cSHenry Tieman * @raw_packet: allocated buffer for flow director 26cac2a27cSHenry Tieman */ 27cac2a27cSHenry Tieman int 28cac2a27cSHenry Tieman ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 29cac2a27cSHenry Tieman u8 *raw_packet) 30cac2a27cSHenry Tieman { 31cac2a27cSHenry Tieman struct ice_tx_buf *tx_buf, *first; 32cac2a27cSHenry Tieman struct ice_fltr_desc *f_desc; 33cac2a27cSHenry Tieman struct ice_tx_desc *tx_desc; 34cac2a27cSHenry Tieman struct ice_ring *tx_ring; 35cac2a27cSHenry Tieman struct device *dev; 36cac2a27cSHenry Tieman dma_addr_t dma; 37cac2a27cSHenry Tieman u32 td_cmd; 38cac2a27cSHenry Tieman u16 i; 39cac2a27cSHenry Tieman 40cac2a27cSHenry Tieman /* VSI and Tx ring */ 41cac2a27cSHenry Tieman if (!vsi) 42cac2a27cSHenry Tieman return -ENOENT; 43cac2a27cSHenry Tieman tx_ring = vsi->tx_rings[0]; 44cac2a27cSHenry Tieman if (!tx_ring || !tx_ring->desc) 45cac2a27cSHenry Tieman return -ENOENT; 46cac2a27cSHenry Tieman dev = tx_ring->dev; 47cac2a27cSHenry Tieman 48cac2a27cSHenry Tieman /* we are using two descriptors to add/del a filter and we can wait */ 49cac2a27cSHenry Tieman for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 50cac2a27cSHenry Tieman if (!i) 51cac2a27cSHenry Tieman return -EAGAIN; 52cac2a27cSHenry Tieman msleep_interruptible(1); 53cac2a27cSHenry Tieman } 54cac2a27cSHenry Tieman 55cac2a27cSHenry Tieman dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 56cac2a27cSHenry Tieman DMA_TO_DEVICE); 57cac2a27cSHenry Tieman 58cac2a27cSHenry Tieman if (dma_mapping_error(dev, dma)) 59cac2a27cSHenry Tieman return -EINVAL; 60cac2a27cSHenry Tieman 61cac2a27cSHenry Tieman /* grab the next descriptor */ 62cac2a27cSHenry Tieman i = tx_ring->next_to_use; 63cac2a27cSHenry Tieman first = &tx_ring->tx_buf[i]; 64cac2a27cSHenry Tieman f_desc = ICE_TX_FDIRDESC(tx_ring, i); 65cac2a27cSHenry Tieman memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 66cac2a27cSHenry Tieman 67cac2a27cSHenry Tieman i++; 68cac2a27cSHenry Tieman i = (i < tx_ring->count) ? i : 0; 69cac2a27cSHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, i); 70cac2a27cSHenry Tieman tx_buf = &tx_ring->tx_buf[i]; 71cac2a27cSHenry Tieman 72cac2a27cSHenry Tieman i++; 73cac2a27cSHenry Tieman tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 74cac2a27cSHenry Tieman 75cac2a27cSHenry Tieman memset(tx_buf, 0, sizeof(*tx_buf)); 76cac2a27cSHenry Tieman dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 77cac2a27cSHenry Tieman dma_unmap_addr_set(tx_buf, dma, dma); 78cac2a27cSHenry Tieman 79cac2a27cSHenry Tieman tx_desc->buf_addr = cpu_to_le64(dma); 80cac2a27cSHenry Tieman td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 81cac2a27cSHenry Tieman ICE_TX_DESC_CMD_RE; 82cac2a27cSHenry Tieman 83cac2a27cSHenry Tieman tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; 84cac2a27cSHenry Tieman tx_buf->raw_buf = raw_packet; 85cac2a27cSHenry Tieman 86cac2a27cSHenry Tieman tx_desc->cmd_type_offset_bsz = 87cac2a27cSHenry Tieman ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 88cac2a27cSHenry Tieman 89cac2a27cSHenry Tieman /* Force memory write to complete before letting h/w know 90cac2a27cSHenry Tieman * there are new descriptors to fetch. 91cac2a27cSHenry Tieman */ 92cac2a27cSHenry Tieman wmb(); 93cac2a27cSHenry Tieman 94cac2a27cSHenry Tieman /* mark the data descriptor to be watched */ 95cac2a27cSHenry Tieman first->next_to_watch = tx_desc; 96cac2a27cSHenry Tieman 97cac2a27cSHenry Tieman writel(tx_ring->next_to_use, tx_ring->tail); 98cac2a27cSHenry Tieman 99cac2a27cSHenry Tieman return 0; 100cac2a27cSHenry Tieman } 101148beb61SHenry Tieman 102cdedef59SAnirudh Venkataramanan /** 103cdedef59SAnirudh Venkataramanan * ice_unmap_and_free_tx_buf - Release a Tx buffer 104cdedef59SAnirudh Venkataramanan * @ring: the ring that owns the buffer 105cdedef59SAnirudh Venkataramanan * @tx_buf: the buffer to free 106cdedef59SAnirudh Venkataramanan */ 107cdedef59SAnirudh Venkataramanan static void 108cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 109cdedef59SAnirudh Venkataramanan { 110cdedef59SAnirudh Venkataramanan if (tx_buf->skb) { 111148beb61SHenry Tieman if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 112148beb61SHenry Tieman devm_kfree(ring->dev, tx_buf->raw_buf); 113148beb61SHenry Tieman else if (ice_ring_is_xdp(ring)) 114efc2214bSMaciej Fijalkowski page_frag_free(tx_buf->raw_buf); 115efc2214bSMaciej Fijalkowski else 116cdedef59SAnirudh Venkataramanan dev_kfree_skb_any(tx_buf->skb); 117cdedef59SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) 118cdedef59SAnirudh Venkataramanan dma_unmap_single(ring->dev, 119cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 120cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 121cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 122cdedef59SAnirudh Venkataramanan } else if (dma_unmap_len(tx_buf, len)) { 123cdedef59SAnirudh Venkataramanan dma_unmap_page(ring->dev, 124cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 125cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 126cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 127cdedef59SAnirudh Venkataramanan } 128cdedef59SAnirudh Venkataramanan 129cdedef59SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 130cdedef59SAnirudh Venkataramanan tx_buf->skb = NULL; 131cdedef59SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 132cdedef59SAnirudh Venkataramanan /* tx_buf must be completely set up in the transmit path */ 133cdedef59SAnirudh Venkataramanan } 134cdedef59SAnirudh Venkataramanan 135cdedef59SAnirudh Venkataramanan static struct netdev_queue *txring_txq(const struct ice_ring *ring) 136cdedef59SAnirudh Venkataramanan { 137cdedef59SAnirudh Venkataramanan return netdev_get_tx_queue(ring->netdev, ring->q_index); 138cdedef59SAnirudh Venkataramanan } 139cdedef59SAnirudh Venkataramanan 140cdedef59SAnirudh Venkataramanan /** 141cdedef59SAnirudh Venkataramanan * ice_clean_tx_ring - Free any empty Tx buffers 142cdedef59SAnirudh Venkataramanan * @tx_ring: ring to be cleaned 143cdedef59SAnirudh Venkataramanan */ 144cdedef59SAnirudh Venkataramanan void ice_clean_tx_ring(struct ice_ring *tx_ring) 145cdedef59SAnirudh Venkataramanan { 146cdedef59SAnirudh Venkataramanan u16 i; 147cdedef59SAnirudh Venkataramanan 1481742b3d5SMagnus Karlsson if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 1492d4238f5SKrzysztof Kazimierczak ice_xsk_clean_xdp_ring(tx_ring); 1502d4238f5SKrzysztof Kazimierczak goto tx_skip_free; 1512d4238f5SKrzysztof Kazimierczak } 1522d4238f5SKrzysztof Kazimierczak 153cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 154cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 155cdedef59SAnirudh Venkataramanan return; 156cdedef59SAnirudh Venkataramanan 1572f2da36eSAnirudh Venkataramanan /* Free all the Tx ring sk_buffs */ 158cdedef59SAnirudh Venkataramanan for (i = 0; i < tx_ring->count; i++) 159cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 160cdedef59SAnirudh Venkataramanan 1612d4238f5SKrzysztof Kazimierczak tx_skip_free: 162c6dfd690SBruce Allan memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 163cdedef59SAnirudh Venkataramanan 164cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 165cdedef59SAnirudh Venkataramanan memset(tx_ring->desc, 0, tx_ring->size); 166cdedef59SAnirudh Venkataramanan 167cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 168cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 169cdedef59SAnirudh Venkataramanan 170cdedef59SAnirudh Venkataramanan if (!tx_ring->netdev) 171cdedef59SAnirudh Venkataramanan return; 172cdedef59SAnirudh Venkataramanan 173cdedef59SAnirudh Venkataramanan /* cleanup Tx queue statistics */ 174cdedef59SAnirudh Venkataramanan netdev_tx_reset_queue(txring_txq(tx_ring)); 175cdedef59SAnirudh Venkataramanan } 176cdedef59SAnirudh Venkataramanan 177cdedef59SAnirudh Venkataramanan /** 178cdedef59SAnirudh Venkataramanan * ice_free_tx_ring - Free Tx resources per queue 179cdedef59SAnirudh Venkataramanan * @tx_ring: Tx descriptor ring for a specific queue 180cdedef59SAnirudh Venkataramanan * 181cdedef59SAnirudh Venkataramanan * Free all transmit software resources 182cdedef59SAnirudh Venkataramanan */ 183cdedef59SAnirudh Venkataramanan void ice_free_tx_ring(struct ice_ring *tx_ring) 184cdedef59SAnirudh Venkataramanan { 185cdedef59SAnirudh Venkataramanan ice_clean_tx_ring(tx_ring); 186cdedef59SAnirudh Venkataramanan devm_kfree(tx_ring->dev, tx_ring->tx_buf); 187cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 188cdedef59SAnirudh Venkataramanan 189cdedef59SAnirudh Venkataramanan if (tx_ring->desc) { 190cdedef59SAnirudh Venkataramanan dmam_free_coherent(tx_ring->dev, tx_ring->size, 191cdedef59SAnirudh Venkataramanan tx_ring->desc, tx_ring->dma); 192cdedef59SAnirudh Venkataramanan tx_ring->desc = NULL; 193cdedef59SAnirudh Venkataramanan } 194cdedef59SAnirudh Venkataramanan } 195cdedef59SAnirudh Venkataramanan 196cdedef59SAnirudh Venkataramanan /** 1972b245cb2SAnirudh Venkataramanan * ice_clean_tx_irq - Reclaim resources after transmit completes 1982b245cb2SAnirudh Venkataramanan * @tx_ring: Tx ring to clean 1992b245cb2SAnirudh Venkataramanan * @napi_budget: Used to determine if we are in netpoll 2002b245cb2SAnirudh Venkataramanan * 2012b245cb2SAnirudh Venkataramanan * Returns true if there's any budget left (e.g. the clean is finished) 2022b245cb2SAnirudh Venkataramanan */ 2032fb0821fSJesse Brandeburg static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) 2042b245cb2SAnirudh Venkataramanan { 2052b245cb2SAnirudh Venkataramanan unsigned int total_bytes = 0, total_pkts = 0; 2062fb0821fSJesse Brandeburg unsigned int budget = ICE_DFLT_IRQ_WORK; 2072fb0821fSJesse Brandeburg struct ice_vsi *vsi = tx_ring->vsi; 2082b245cb2SAnirudh Venkataramanan s16 i = tx_ring->next_to_clean; 2092b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 2102b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 2112b245cb2SAnirudh Venkataramanan 2122b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 2132b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 2142b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2152b245cb2SAnirudh Venkataramanan 2162fb0821fSJesse Brandeburg prefetch(&vsi->state); 2172fb0821fSJesse Brandeburg 2182b245cb2SAnirudh Venkataramanan do { 2192b245cb2SAnirudh Venkataramanan struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2202b245cb2SAnirudh Venkataramanan 2212b245cb2SAnirudh Venkataramanan /* if next_to_watch is not set then there is no work pending */ 2222b245cb2SAnirudh Venkataramanan if (!eop_desc) 2232b245cb2SAnirudh Venkataramanan break; 2242b245cb2SAnirudh Venkataramanan 2252b245cb2SAnirudh Venkataramanan smp_rmb(); /* prevent any other reads prior to eop_desc */ 2262b245cb2SAnirudh Venkataramanan 2272b245cb2SAnirudh Venkataramanan /* if the descriptor isn't done, no work yet to do */ 2282b245cb2SAnirudh Venkataramanan if (!(eop_desc->cmd_type_offset_bsz & 2292b245cb2SAnirudh Venkataramanan cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2302b245cb2SAnirudh Venkataramanan break; 2312b245cb2SAnirudh Venkataramanan 2322b245cb2SAnirudh Venkataramanan /* clear next_to_watch to prevent false hangs */ 2332b245cb2SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 2342b245cb2SAnirudh Venkataramanan 2352b245cb2SAnirudh Venkataramanan /* update the statistics for this packet */ 2362b245cb2SAnirudh Venkataramanan total_bytes += tx_buf->bytecount; 2372b245cb2SAnirudh Venkataramanan total_pkts += tx_buf->gso_segs; 2382b245cb2SAnirudh Venkataramanan 239efc2214bSMaciej Fijalkowski if (ice_ring_is_xdp(tx_ring)) 240efc2214bSMaciej Fijalkowski page_frag_free(tx_buf->raw_buf); 241efc2214bSMaciej Fijalkowski else 2422b245cb2SAnirudh Venkataramanan /* free the skb */ 2432b245cb2SAnirudh Venkataramanan napi_consume_skb(tx_buf->skb, napi_budget); 2442b245cb2SAnirudh Venkataramanan 2452b245cb2SAnirudh Venkataramanan /* unmap skb header data */ 2462b245cb2SAnirudh Venkataramanan dma_unmap_single(tx_ring->dev, 2472b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 2482b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 2492b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 2502b245cb2SAnirudh Venkataramanan 2512b245cb2SAnirudh Venkataramanan /* clear tx_buf data */ 2522b245cb2SAnirudh Venkataramanan tx_buf->skb = NULL; 2532b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 2542b245cb2SAnirudh Venkataramanan 2552b245cb2SAnirudh Venkataramanan /* unmap remaining buffers */ 2562b245cb2SAnirudh Venkataramanan while (tx_desc != eop_desc) { 2572b245cb2SAnirudh Venkataramanan tx_buf++; 2582b245cb2SAnirudh Venkataramanan tx_desc++; 2592b245cb2SAnirudh Venkataramanan i++; 2602b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 2612b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2622b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 2632b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 2642b245cb2SAnirudh Venkataramanan } 2652b245cb2SAnirudh Venkataramanan 2662b245cb2SAnirudh Venkataramanan /* unmap any remaining paged data */ 2672b245cb2SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) { 2682b245cb2SAnirudh Venkataramanan dma_unmap_page(tx_ring->dev, 2692b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 2702b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 2712b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 2722b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 2732b245cb2SAnirudh Venkataramanan } 2742b245cb2SAnirudh Venkataramanan } 2752b245cb2SAnirudh Venkataramanan 2762b245cb2SAnirudh Venkataramanan /* move us one more past the eop_desc for start of next pkt */ 2772b245cb2SAnirudh Venkataramanan tx_buf++; 2782b245cb2SAnirudh Venkataramanan tx_desc++; 2792b245cb2SAnirudh Venkataramanan i++; 2802b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 2812b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2822b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 2832b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 2842b245cb2SAnirudh Venkataramanan } 2852b245cb2SAnirudh Venkataramanan 2862b245cb2SAnirudh Venkataramanan prefetch(tx_desc); 2872b245cb2SAnirudh Venkataramanan 2882b245cb2SAnirudh Venkataramanan /* update budget accounting */ 2892b245cb2SAnirudh Venkataramanan budget--; 2902b245cb2SAnirudh Venkataramanan } while (likely(budget)); 2912b245cb2SAnirudh Venkataramanan 2922b245cb2SAnirudh Venkataramanan i += tx_ring->count; 2932b245cb2SAnirudh Venkataramanan tx_ring->next_to_clean = i; 2942d4238f5SKrzysztof Kazimierczak 2952d4238f5SKrzysztof Kazimierczak ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 2962b245cb2SAnirudh Venkataramanan 297efc2214bSMaciej Fijalkowski if (ice_ring_is_xdp(tx_ring)) 298efc2214bSMaciej Fijalkowski return !!budget; 299efc2214bSMaciej Fijalkowski 3002b245cb2SAnirudh Venkataramanan netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 3012b245cb2SAnirudh Venkataramanan total_bytes); 3022b245cb2SAnirudh Venkataramanan 3032b245cb2SAnirudh Venkataramanan #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 3042b245cb2SAnirudh Venkataramanan if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 3052b245cb2SAnirudh Venkataramanan (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 3062b245cb2SAnirudh Venkataramanan /* Make sure that anybody stopping the queue after this 3072b245cb2SAnirudh Venkataramanan * sees the new next_to_clean. 3082b245cb2SAnirudh Venkataramanan */ 3092b245cb2SAnirudh Venkataramanan smp_mb(); 3102b245cb2SAnirudh Venkataramanan if (__netif_subqueue_stopped(tx_ring->netdev, 3112b245cb2SAnirudh Venkataramanan tx_ring->q_index) && 312e97fb1aeSAnirudh Venkataramanan !test_bit(ICE_VSI_DOWN, vsi->state)) { 3132b245cb2SAnirudh Venkataramanan netif_wake_subqueue(tx_ring->netdev, 3142b245cb2SAnirudh Venkataramanan tx_ring->q_index); 3152b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 3162b245cb2SAnirudh Venkataramanan } 3172b245cb2SAnirudh Venkataramanan } 3182b245cb2SAnirudh Venkataramanan 3192b245cb2SAnirudh Venkataramanan return !!budget; 3202b245cb2SAnirudh Venkataramanan } 3212b245cb2SAnirudh Venkataramanan 3222b245cb2SAnirudh Venkataramanan /** 323cdedef59SAnirudh Venkataramanan * ice_setup_tx_ring - Allocate the Tx descriptors 324d337f2afSAnirudh Venkataramanan * @tx_ring: the Tx ring to set up 325cdedef59SAnirudh Venkataramanan * 326cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 327cdedef59SAnirudh Venkataramanan */ 328cdedef59SAnirudh Venkataramanan int ice_setup_tx_ring(struct ice_ring *tx_ring) 329cdedef59SAnirudh Venkataramanan { 330cdedef59SAnirudh Venkataramanan struct device *dev = tx_ring->dev; 331cdedef59SAnirudh Venkataramanan 332cdedef59SAnirudh Venkataramanan if (!dev) 333cdedef59SAnirudh Venkataramanan return -ENOMEM; 334cdedef59SAnirudh Venkataramanan 335cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 336cdedef59SAnirudh Venkataramanan WARN_ON(tx_ring->tx_buf); 337c6dfd690SBruce Allan tx_ring->tx_buf = 338c6dfd690SBruce Allan devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 339c6dfd690SBruce Allan GFP_KERNEL); 340cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 341cdedef59SAnirudh Venkataramanan return -ENOMEM; 342cdedef59SAnirudh Venkataramanan 343ad71b256SBrett Creeley /* round up to nearest page */ 344c6dfd690SBruce Allan tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 345ad71b256SBrett Creeley PAGE_SIZE); 346cdedef59SAnirudh Venkataramanan tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 347cdedef59SAnirudh Venkataramanan GFP_KERNEL); 348cdedef59SAnirudh Venkataramanan if (!tx_ring->desc) { 349cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 350cdedef59SAnirudh Venkataramanan tx_ring->size); 351cdedef59SAnirudh Venkataramanan goto err; 352cdedef59SAnirudh Venkataramanan } 353cdedef59SAnirudh Venkataramanan 354cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 355cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 356b3969fd7SSudheer Mogilappagari tx_ring->tx_stats.prev_pkt = -1; 357cdedef59SAnirudh Venkataramanan return 0; 358cdedef59SAnirudh Venkataramanan 359cdedef59SAnirudh Venkataramanan err: 360cdedef59SAnirudh Venkataramanan devm_kfree(dev, tx_ring->tx_buf); 361cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 362cdedef59SAnirudh Venkataramanan return -ENOMEM; 363cdedef59SAnirudh Venkataramanan } 364cdedef59SAnirudh Venkataramanan 365cdedef59SAnirudh Venkataramanan /** 366cdedef59SAnirudh Venkataramanan * ice_clean_rx_ring - Free Rx buffers 367cdedef59SAnirudh Venkataramanan * @rx_ring: ring to be cleaned 368cdedef59SAnirudh Venkataramanan */ 369cdedef59SAnirudh Venkataramanan void ice_clean_rx_ring(struct ice_ring *rx_ring) 370cdedef59SAnirudh Venkataramanan { 371cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 372cdedef59SAnirudh Venkataramanan u16 i; 373cdedef59SAnirudh Venkataramanan 374cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 375cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 376cdedef59SAnirudh Venkataramanan return; 377cdedef59SAnirudh Venkataramanan 37829b82f2aSMaciej Fijalkowski if (rx_ring->skb) { 37929b82f2aSMaciej Fijalkowski dev_kfree_skb(rx_ring->skb); 38029b82f2aSMaciej Fijalkowski rx_ring->skb = NULL; 38129b82f2aSMaciej Fijalkowski } 38229b82f2aSMaciej Fijalkowski 3831742b3d5SMagnus Karlsson if (rx_ring->xsk_pool) { 3842d4238f5SKrzysztof Kazimierczak ice_xsk_clean_rx_ring(rx_ring); 3852d4238f5SKrzysztof Kazimierczak goto rx_skip_free; 3862d4238f5SKrzysztof Kazimierczak } 3872d4238f5SKrzysztof Kazimierczak 388cdedef59SAnirudh Venkataramanan /* Free all the Rx ring sk_buffs */ 389cdedef59SAnirudh Venkataramanan for (i = 0; i < rx_ring->count; i++) { 390cdedef59SAnirudh Venkataramanan struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 391cdedef59SAnirudh Venkataramanan 392cdedef59SAnirudh Venkataramanan if (!rx_buf->page) 393cdedef59SAnirudh Venkataramanan continue; 394cdedef59SAnirudh Venkataramanan 395a65f71feSMaciej Fijalkowski /* Invalidate cache lines that may have been written to by 396a65f71feSMaciej Fijalkowski * device so that we avoid corrupting memory. 397a65f71feSMaciej Fijalkowski */ 398a65f71feSMaciej Fijalkowski dma_sync_single_range_for_cpu(dev, rx_buf->dma, 399a65f71feSMaciej Fijalkowski rx_buf->page_offset, 4007237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 4017237f5b0SMaciej Fijalkowski DMA_FROM_DEVICE); 402a65f71feSMaciej Fijalkowski 403a65f71feSMaciej Fijalkowski /* free resources associated with mapping */ 4047237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 405a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 40603c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 407cdedef59SAnirudh Venkataramanan 408cdedef59SAnirudh Venkataramanan rx_buf->page = NULL; 409cdedef59SAnirudh Venkataramanan rx_buf->page_offset = 0; 410cdedef59SAnirudh Venkataramanan } 411cdedef59SAnirudh Venkataramanan 4122d4238f5SKrzysztof Kazimierczak rx_skip_free: 413c6dfd690SBruce Allan memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 414cdedef59SAnirudh Venkataramanan 415cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 416cdedef59SAnirudh Venkataramanan memset(rx_ring->desc, 0, rx_ring->size); 417cdedef59SAnirudh Venkataramanan 418cdedef59SAnirudh Venkataramanan rx_ring->next_to_alloc = 0; 419cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 420cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 421cdedef59SAnirudh Venkataramanan } 422cdedef59SAnirudh Venkataramanan 423cdedef59SAnirudh Venkataramanan /** 424cdedef59SAnirudh Venkataramanan * ice_free_rx_ring - Free Rx resources 425cdedef59SAnirudh Venkataramanan * @rx_ring: ring to clean the resources from 426cdedef59SAnirudh Venkataramanan * 427cdedef59SAnirudh Venkataramanan * Free all receive software resources 428cdedef59SAnirudh Venkataramanan */ 429cdedef59SAnirudh Venkataramanan void ice_free_rx_ring(struct ice_ring *rx_ring) 430cdedef59SAnirudh Venkataramanan { 431cdedef59SAnirudh Venkataramanan ice_clean_rx_ring(rx_ring); 432efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF) 433efc2214bSMaciej Fijalkowski if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 434efc2214bSMaciej Fijalkowski xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 435efc2214bSMaciej Fijalkowski rx_ring->xdp_prog = NULL; 436cdedef59SAnirudh Venkataramanan devm_kfree(rx_ring->dev, rx_ring->rx_buf); 437cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 438cdedef59SAnirudh Venkataramanan 439cdedef59SAnirudh Venkataramanan if (rx_ring->desc) { 440cdedef59SAnirudh Venkataramanan dmam_free_coherent(rx_ring->dev, rx_ring->size, 441cdedef59SAnirudh Venkataramanan rx_ring->desc, rx_ring->dma); 442cdedef59SAnirudh Venkataramanan rx_ring->desc = NULL; 443cdedef59SAnirudh Venkataramanan } 444cdedef59SAnirudh Venkataramanan } 445cdedef59SAnirudh Venkataramanan 446cdedef59SAnirudh Venkataramanan /** 447cdedef59SAnirudh Venkataramanan * ice_setup_rx_ring - Allocate the Rx descriptors 448d337f2afSAnirudh Venkataramanan * @rx_ring: the Rx ring to set up 449cdedef59SAnirudh Venkataramanan * 450cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 451cdedef59SAnirudh Venkataramanan */ 452cdedef59SAnirudh Venkataramanan int ice_setup_rx_ring(struct ice_ring *rx_ring) 453cdedef59SAnirudh Venkataramanan { 454cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 455cdedef59SAnirudh Venkataramanan 456cdedef59SAnirudh Venkataramanan if (!dev) 457cdedef59SAnirudh Venkataramanan return -ENOMEM; 458cdedef59SAnirudh Venkataramanan 459cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 460cdedef59SAnirudh Venkataramanan WARN_ON(rx_ring->rx_buf); 461c6dfd690SBruce Allan rx_ring->rx_buf = 462c6dfd690SBruce Allan devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 463c6dfd690SBruce Allan GFP_KERNEL); 464cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 465cdedef59SAnirudh Venkataramanan return -ENOMEM; 466cdedef59SAnirudh Venkataramanan 467ad71b256SBrett Creeley /* round up to nearest page */ 468ad71b256SBrett Creeley rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 469ad71b256SBrett Creeley PAGE_SIZE); 470cdedef59SAnirudh Venkataramanan rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 471cdedef59SAnirudh Venkataramanan GFP_KERNEL); 472cdedef59SAnirudh Venkataramanan if (!rx_ring->desc) { 473cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 474cdedef59SAnirudh Venkataramanan rx_ring->size); 475cdedef59SAnirudh Venkataramanan goto err; 476cdedef59SAnirudh Venkataramanan } 477cdedef59SAnirudh Venkataramanan 478cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 479cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 480efc2214bSMaciej Fijalkowski 481efc2214bSMaciej Fijalkowski if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 482efc2214bSMaciej Fijalkowski WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 483efc2214bSMaciej Fijalkowski 484efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF && 485efc2214bSMaciej Fijalkowski !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 486efc2214bSMaciej Fijalkowski if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 487b02e5a0eSBjörn Töpel rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) 488efc2214bSMaciej Fijalkowski goto err; 489cdedef59SAnirudh Venkataramanan return 0; 490cdedef59SAnirudh Venkataramanan 491cdedef59SAnirudh Venkataramanan err: 492cdedef59SAnirudh Venkataramanan devm_kfree(dev, rx_ring->rx_buf); 493cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 494cdedef59SAnirudh Venkataramanan return -ENOMEM; 495cdedef59SAnirudh Venkataramanan } 496cdedef59SAnirudh Venkataramanan 4976221595fSTony Nguyen static unsigned int 4986221595fSTony Nguyen ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size) 499d4ecdbf7SJesper Dangaard Brouer { 500d4ecdbf7SJesper Dangaard Brouer unsigned int truesize; 501d4ecdbf7SJesper Dangaard Brouer 502d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE < 8192) 503d4ecdbf7SJesper Dangaard Brouer truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 504d4ecdbf7SJesper Dangaard Brouer #else 505f1b1f409SMaciej Fijalkowski truesize = rx_ring->rx_offset ? 506f1b1f409SMaciej Fijalkowski SKB_DATA_ALIGN(rx_ring->rx_offset + size) + 507d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 508d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(size); 509d4ecdbf7SJesper Dangaard Brouer #endif 510d4ecdbf7SJesper Dangaard Brouer return truesize; 511d4ecdbf7SJesper Dangaard Brouer } 512d4ecdbf7SJesper Dangaard Brouer 513efc2214bSMaciej Fijalkowski /** 514efc2214bSMaciej Fijalkowski * ice_run_xdp - Executes an XDP program on initialized xdp_buff 515efc2214bSMaciej Fijalkowski * @rx_ring: Rx ring 516efc2214bSMaciej Fijalkowski * @xdp: xdp_buff used as input to the XDP program 517efc2214bSMaciej Fijalkowski * @xdp_prog: XDP program to run 518efc2214bSMaciej Fijalkowski * 519efc2214bSMaciej Fijalkowski * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 520efc2214bSMaciej Fijalkowski */ 521efc2214bSMaciej Fijalkowski static int 522efc2214bSMaciej Fijalkowski ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, 523efc2214bSMaciej Fijalkowski struct bpf_prog *xdp_prog) 524efc2214bSMaciej Fijalkowski { 525efc2214bSMaciej Fijalkowski struct ice_ring *xdp_ring; 526*89d65df0SMagnus Karlsson int err, result; 527efc2214bSMaciej Fijalkowski u32 act; 528efc2214bSMaciej Fijalkowski 529efc2214bSMaciej Fijalkowski act = bpf_prog_run_xdp(xdp_prog, xdp); 530efc2214bSMaciej Fijalkowski switch (act) { 531efc2214bSMaciej Fijalkowski case XDP_PASS: 53259c97d1bSMaciej Fijalkowski return ICE_XDP_PASS; 533efc2214bSMaciej Fijalkowski case XDP_TX: 534efc2214bSMaciej Fijalkowski xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; 535*89d65df0SMagnus Karlsson result = ice_xmit_xdp_buff(xdp, xdp_ring); 536*89d65df0SMagnus Karlsson if (result == ICE_XDP_CONSUMED) 537*89d65df0SMagnus Karlsson goto out_failure; 538*89d65df0SMagnus Karlsson return result; 539efc2214bSMaciej Fijalkowski case XDP_REDIRECT: 540efc2214bSMaciej Fijalkowski err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 541*89d65df0SMagnus Karlsson if (err) 542*89d65df0SMagnus Karlsson goto out_failure; 543*89d65df0SMagnus Karlsson return ICE_XDP_REDIR; 544efc2214bSMaciej Fijalkowski default: 545efc2214bSMaciej Fijalkowski bpf_warn_invalid_xdp_action(act); 5464e83fc93SBruce Allan fallthrough; 547efc2214bSMaciej Fijalkowski case XDP_ABORTED: 548*89d65df0SMagnus Karlsson out_failure: 549efc2214bSMaciej Fijalkowski trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 5504e83fc93SBruce Allan fallthrough; 551efc2214bSMaciej Fijalkowski case XDP_DROP: 55259c97d1bSMaciej Fijalkowski return ICE_XDP_CONSUMED; 553efc2214bSMaciej Fijalkowski } 554efc2214bSMaciej Fijalkowski } 555efc2214bSMaciej Fijalkowski 556efc2214bSMaciej Fijalkowski /** 557efc2214bSMaciej Fijalkowski * ice_xdp_xmit - submit packets to XDP ring for transmission 558efc2214bSMaciej Fijalkowski * @dev: netdev 559efc2214bSMaciej Fijalkowski * @n: number of XDP frames to be transmitted 560efc2214bSMaciej Fijalkowski * @frames: XDP frames to be transmitted 561efc2214bSMaciej Fijalkowski * @flags: transmit flags 562efc2214bSMaciej Fijalkowski * 563fdc13979SLorenzo Bianconi * Returns number of frames successfully sent. Failed frames 564fdc13979SLorenzo Bianconi * will be free'ed by XDP core. 565efc2214bSMaciej Fijalkowski * For error cases, a negative errno code is returned and no-frames 566efc2214bSMaciej Fijalkowski * are transmitted (caller must handle freeing frames). 567efc2214bSMaciej Fijalkowski */ 568efc2214bSMaciej Fijalkowski int 569efc2214bSMaciej Fijalkowski ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 570efc2214bSMaciej Fijalkowski u32 flags) 571efc2214bSMaciej Fijalkowski { 572efc2214bSMaciej Fijalkowski struct ice_netdev_priv *np = netdev_priv(dev); 573efc2214bSMaciej Fijalkowski unsigned int queue_index = smp_processor_id(); 574efc2214bSMaciej Fijalkowski struct ice_vsi *vsi = np->vsi; 575efc2214bSMaciej Fijalkowski struct ice_ring *xdp_ring; 576fdc13979SLorenzo Bianconi int nxmit = 0, i; 577efc2214bSMaciej Fijalkowski 578e97fb1aeSAnirudh Venkataramanan if (test_bit(ICE_VSI_DOWN, vsi->state)) 579efc2214bSMaciej Fijalkowski return -ENETDOWN; 580efc2214bSMaciej Fijalkowski 581efc2214bSMaciej Fijalkowski if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) 582efc2214bSMaciej Fijalkowski return -ENXIO; 583efc2214bSMaciej Fijalkowski 584efc2214bSMaciej Fijalkowski if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 585efc2214bSMaciej Fijalkowski return -EINVAL; 586efc2214bSMaciej Fijalkowski 587efc2214bSMaciej Fijalkowski xdp_ring = vsi->xdp_rings[queue_index]; 588efc2214bSMaciej Fijalkowski for (i = 0; i < n; i++) { 589efc2214bSMaciej Fijalkowski struct xdp_frame *xdpf = frames[i]; 590efc2214bSMaciej Fijalkowski int err; 591efc2214bSMaciej Fijalkowski 592efc2214bSMaciej Fijalkowski err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 593fdc13979SLorenzo Bianconi if (err != ICE_XDP_TX) 594fdc13979SLorenzo Bianconi break; 595fdc13979SLorenzo Bianconi nxmit++; 596efc2214bSMaciej Fijalkowski } 597efc2214bSMaciej Fijalkowski 598efc2214bSMaciej Fijalkowski if (unlikely(flags & XDP_XMIT_FLUSH)) 599efc2214bSMaciej Fijalkowski ice_xdp_ring_update_tail(xdp_ring); 600efc2214bSMaciej Fijalkowski 601fdc13979SLorenzo Bianconi return nxmit; 602efc2214bSMaciej Fijalkowski } 603efc2214bSMaciej Fijalkowski 604efc2214bSMaciej Fijalkowski /** 605cdedef59SAnirudh Venkataramanan * ice_alloc_mapped_page - recycle or make a new page 606cdedef59SAnirudh Venkataramanan * @rx_ring: ring to use 607cdedef59SAnirudh Venkataramanan * @bi: rx_buf struct to modify 608cdedef59SAnirudh Venkataramanan * 609cdedef59SAnirudh Venkataramanan * Returns true if the page was successfully allocated or 610cdedef59SAnirudh Venkataramanan * reused. 611cdedef59SAnirudh Venkataramanan */ 612c8b7abddSBruce Allan static bool 613c8b7abddSBruce Allan ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) 614cdedef59SAnirudh Venkataramanan { 615cdedef59SAnirudh Venkataramanan struct page *page = bi->page; 616cdedef59SAnirudh Venkataramanan dma_addr_t dma; 617cdedef59SAnirudh Venkataramanan 618cdedef59SAnirudh Venkataramanan /* since we are recycling buffers we should seldom need to alloc */ 6197dbc63f0STony Nguyen if (likely(page)) 620cdedef59SAnirudh Venkataramanan return true; 621cdedef59SAnirudh Venkataramanan 622cdedef59SAnirudh Venkataramanan /* alloc new page for storage */ 6237237f5b0SMaciej Fijalkowski page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 6242b245cb2SAnirudh Venkataramanan if (unlikely(!page)) { 6252b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 626cdedef59SAnirudh Venkataramanan return false; 6272b245cb2SAnirudh Venkataramanan } 628cdedef59SAnirudh Venkataramanan 629cdedef59SAnirudh Venkataramanan /* map page for use */ 6307237f5b0SMaciej Fijalkowski dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 631a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 632cdedef59SAnirudh Venkataramanan 633cdedef59SAnirudh Venkataramanan /* if mapping failed free memory back to system since 634cdedef59SAnirudh Venkataramanan * there isn't much point in holding memory we can't use 635cdedef59SAnirudh Venkataramanan */ 636cdedef59SAnirudh Venkataramanan if (dma_mapping_error(rx_ring->dev, dma)) { 6377237f5b0SMaciej Fijalkowski __free_pages(page, ice_rx_pg_order(rx_ring)); 6382b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 639cdedef59SAnirudh Venkataramanan return false; 640cdedef59SAnirudh Venkataramanan } 641cdedef59SAnirudh Venkataramanan 642cdedef59SAnirudh Venkataramanan bi->dma = dma; 643cdedef59SAnirudh Venkataramanan bi->page = page; 644f1b1f409SMaciej Fijalkowski bi->page_offset = rx_ring->rx_offset; 64503c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 64603c66a13SMaciej Fijalkowski bi->pagecnt_bias = USHRT_MAX; 647cdedef59SAnirudh Venkataramanan 648cdedef59SAnirudh Venkataramanan return true; 649cdedef59SAnirudh Venkataramanan } 650cdedef59SAnirudh Venkataramanan 651cdedef59SAnirudh Venkataramanan /** 652cdedef59SAnirudh Venkataramanan * ice_alloc_rx_bufs - Replace used receive buffers 653cdedef59SAnirudh Venkataramanan * @rx_ring: ring to place buffers on 654cdedef59SAnirudh Venkataramanan * @cleaned_count: number of buffers to replace 655cdedef59SAnirudh Venkataramanan * 656cb7db356SBrett Creeley * Returns false if all allocations were successful, true if any fail. Returning 657cb7db356SBrett Creeley * true signals to the caller that we didn't replace cleaned_count buffers and 658cb7db356SBrett Creeley * there is more work to do. 659cb7db356SBrett Creeley * 660cb7db356SBrett Creeley * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 661cb7db356SBrett Creeley * buffers. Then bump tail at most one time. Grouping like this lets us avoid 662cb7db356SBrett Creeley * multiple tail writes per call. 663cdedef59SAnirudh Venkataramanan */ 664cdedef59SAnirudh Venkataramanan bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) 665cdedef59SAnirudh Venkataramanan { 666cdedef59SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 667cdedef59SAnirudh Venkataramanan u16 ntu = rx_ring->next_to_use; 668cdedef59SAnirudh Venkataramanan struct ice_rx_buf *bi; 669cdedef59SAnirudh Venkataramanan 670cdedef59SAnirudh Venkataramanan /* do nothing if no valid netdev defined */ 671148beb61SHenry Tieman if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || 672148beb61SHenry Tieman !cleaned_count) 673cdedef59SAnirudh Venkataramanan return false; 674cdedef59SAnirudh Venkataramanan 675f9867df6SAnirudh Venkataramanan /* get the Rx descriptor and buffer based on next_to_use */ 676cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, ntu); 677cdedef59SAnirudh Venkataramanan bi = &rx_ring->rx_buf[ntu]; 678cdedef59SAnirudh Venkataramanan 679cdedef59SAnirudh Venkataramanan do { 680a1e99685SBrett Creeley /* if we fail here, we have work remaining */ 681cdedef59SAnirudh Venkataramanan if (!ice_alloc_mapped_page(rx_ring, bi)) 682a1e99685SBrett Creeley break; 683cdedef59SAnirudh Venkataramanan 684a65f71feSMaciej Fijalkowski /* sync the buffer for use by the device */ 685a65f71feSMaciej Fijalkowski dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 686a65f71feSMaciej Fijalkowski bi->page_offset, 6877237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 688a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE); 689a65f71feSMaciej Fijalkowski 690cdedef59SAnirudh Venkataramanan /* Refresh the desc even if buffer_addrs didn't change 691cdedef59SAnirudh Venkataramanan * because each write-back erases this info. 692cdedef59SAnirudh Venkataramanan */ 693cdedef59SAnirudh Venkataramanan rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 694cdedef59SAnirudh Venkataramanan 695cdedef59SAnirudh Venkataramanan rx_desc++; 696cdedef59SAnirudh Venkataramanan bi++; 697cdedef59SAnirudh Venkataramanan ntu++; 698cdedef59SAnirudh Venkataramanan if (unlikely(ntu == rx_ring->count)) { 699cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, 0); 700cdedef59SAnirudh Venkataramanan bi = rx_ring->rx_buf; 701cdedef59SAnirudh Venkataramanan ntu = 0; 702cdedef59SAnirudh Venkataramanan } 703cdedef59SAnirudh Venkataramanan 704cdedef59SAnirudh Venkataramanan /* clear the status bits for the next_to_use descriptor */ 705cdedef59SAnirudh Venkataramanan rx_desc->wb.status_error0 = 0; 706cdedef59SAnirudh Venkataramanan 707cdedef59SAnirudh Venkataramanan cleaned_count--; 708cdedef59SAnirudh Venkataramanan } while (cleaned_count); 709cdedef59SAnirudh Venkataramanan 710cdedef59SAnirudh Venkataramanan if (rx_ring->next_to_use != ntu) 711cdedef59SAnirudh Venkataramanan ice_release_rx_desc(rx_ring, ntu); 712cdedef59SAnirudh Venkataramanan 713a1e99685SBrett Creeley return !!cleaned_count; 714cdedef59SAnirudh Venkataramanan } 7152b245cb2SAnirudh Venkataramanan 7162b245cb2SAnirudh Venkataramanan /** 7171d032bc7SMaciej Fijalkowski * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 7181d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to adjust 7191d032bc7SMaciej Fijalkowski * @size: Size of adjustment 7202b245cb2SAnirudh Venkataramanan * 7211d032bc7SMaciej Fijalkowski * Update the offset within page so that Rx buf will be ready to be reused. 7221d032bc7SMaciej Fijalkowski * For systems with PAGE_SIZE < 8192 this function will flip the page offset 7231d032bc7SMaciej Fijalkowski * so the second half of page assigned to Rx buffer will be used, otherwise 7244ee656bbSTony Nguyen * the offset is moved by "size" bytes 7252b245cb2SAnirudh Venkataramanan */ 7261d032bc7SMaciej Fijalkowski static void 7271d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 7282b245cb2SAnirudh Venkataramanan { 7292b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 7301d032bc7SMaciej Fijalkowski /* flip page offset to other buffer */ 7311d032bc7SMaciej Fijalkowski rx_buf->page_offset ^= size; 7322b245cb2SAnirudh Venkataramanan #else 7331d032bc7SMaciej Fijalkowski /* move offset up to the next cache line */ 7341d032bc7SMaciej Fijalkowski rx_buf->page_offset += size; 7351d032bc7SMaciej Fijalkowski #endif 7362b245cb2SAnirudh Venkataramanan } 7372b245cb2SAnirudh Venkataramanan 7381d032bc7SMaciej Fijalkowski /** 739bbb97808SMaciej Fijalkowski * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 740bbb97808SMaciej Fijalkowski * @rx_buf: buffer containing the page 7411beb7830SBjörn Töpel * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call 742bbb97808SMaciej Fijalkowski * 743bbb97808SMaciej Fijalkowski * If page is reusable, we have a green light for calling ice_reuse_rx_page, 744bbb97808SMaciej Fijalkowski * which will assign the current buffer to the buffer that next_to_alloc is 745bbb97808SMaciej Fijalkowski * pointing to; otherwise, the DMA mapping needs to be destroyed and 746bbb97808SMaciej Fijalkowski * page freed 747bbb97808SMaciej Fijalkowski */ 7481beb7830SBjörn Töpel static bool 7491beb7830SBjörn Töpel ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) 750bbb97808SMaciej Fijalkowski { 75103c66a13SMaciej Fijalkowski unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 752bbb97808SMaciej Fijalkowski struct page *page = rx_buf->page; 7532b245cb2SAnirudh Venkataramanan 754a79afa78SAlexander Lobakin /* avoid re-using remote and pfmemalloc pages */ 755a79afa78SAlexander Lobakin if (!dev_page_is_reusable(page)) 7562b245cb2SAnirudh Venkataramanan return false; 7572b245cb2SAnirudh Venkataramanan 7582b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 7592b245cb2SAnirudh Venkataramanan /* if we are only owner of page we can reuse it */ 7601beb7830SBjörn Töpel if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) 7612b245cb2SAnirudh Venkataramanan return false; 7622b245cb2SAnirudh Venkataramanan #else 7637237f5b0SMaciej Fijalkowski #define ICE_LAST_OFFSET \ 7647237f5b0SMaciej Fijalkowski (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 7657237f5b0SMaciej Fijalkowski if (rx_buf->page_offset > ICE_LAST_OFFSET) 7662b245cb2SAnirudh Venkataramanan return false; 7672b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE < 8192) */ 7682b245cb2SAnirudh Venkataramanan 76903c66a13SMaciej Fijalkowski /* If we have drained the page fragment pool we need to update 77003c66a13SMaciej Fijalkowski * the pagecnt_bias and page count so that we fully restock the 77103c66a13SMaciej Fijalkowski * number of references the driver holds. 7722b245cb2SAnirudh Venkataramanan */ 77303c66a13SMaciej Fijalkowski if (unlikely(pagecnt_bias == 1)) { 77403c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 77503c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias = USHRT_MAX; 77603c66a13SMaciej Fijalkowski } 7772b245cb2SAnirudh Venkataramanan 7782b245cb2SAnirudh Venkataramanan return true; 7792b245cb2SAnirudh Venkataramanan } 7802b245cb2SAnirudh Venkataramanan 7812b245cb2SAnirudh Venkataramanan /** 782712edbbbSMaciej Fijalkowski * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 7837237f5b0SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 7842b245cb2SAnirudh Venkataramanan * @rx_buf: buffer containing page to add 785712edbbbSMaciej Fijalkowski * @skb: sk_buff to place the data into 786712edbbbSMaciej Fijalkowski * @size: packet length from rx_desc 7872b245cb2SAnirudh Venkataramanan * 7882b245cb2SAnirudh Venkataramanan * This function will add the data contained in rx_buf->page to the skb. 789712edbbbSMaciej Fijalkowski * It will just attach the page as a frag to the skb. 790712edbbbSMaciej Fijalkowski * The function will then update the page offset. 7912b245cb2SAnirudh Venkataramanan */ 7921d032bc7SMaciej Fijalkowski static void 7937237f5b0SMaciej Fijalkowski ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 7947237f5b0SMaciej Fijalkowski struct sk_buff *skb, unsigned int size) 7952b245cb2SAnirudh Venkataramanan { 796712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 797f1b1f409SMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset); 7982b245cb2SAnirudh Venkataramanan #else 7997237f5b0SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 800712edbbbSMaciej Fijalkowski #endif 8011857ca42SMaciej Fijalkowski 802ac6f733aSMitch Williams if (!size) 803ac6f733aSMitch Williams return; 804712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 805712edbbbSMaciej Fijalkowski rx_buf->page_offset, size, truesize); 8062b245cb2SAnirudh Venkataramanan 807712edbbbSMaciej Fijalkowski /* page is being used so we must update the page offset */ 8081d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 8092b245cb2SAnirudh Venkataramanan } 8102b245cb2SAnirudh Venkataramanan 8112b245cb2SAnirudh Venkataramanan /** 8122b245cb2SAnirudh Venkataramanan * ice_reuse_rx_page - page flip buffer and store it back on the ring 813d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to store buffers on 8142b245cb2SAnirudh Venkataramanan * @old_buf: donor buffer to have page reused 8152b245cb2SAnirudh Venkataramanan * 8162b245cb2SAnirudh Venkataramanan * Synchronizes page for reuse by the adapter 8172b245cb2SAnirudh Venkataramanan */ 818c8b7abddSBruce Allan static void 819c8b7abddSBruce Allan ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) 8202b245cb2SAnirudh Venkataramanan { 8212b245cb2SAnirudh Venkataramanan u16 nta = rx_ring->next_to_alloc; 8222b245cb2SAnirudh Venkataramanan struct ice_rx_buf *new_buf; 8232b245cb2SAnirudh Venkataramanan 8242b245cb2SAnirudh Venkataramanan new_buf = &rx_ring->rx_buf[nta]; 8252b245cb2SAnirudh Venkataramanan 8262b245cb2SAnirudh Venkataramanan /* update, and store next to alloc */ 8272b245cb2SAnirudh Venkataramanan nta++; 8282b245cb2SAnirudh Venkataramanan rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 8292b245cb2SAnirudh Venkataramanan 830712edbbbSMaciej Fijalkowski /* Transfer page from old buffer to new buffer. 831712edbbbSMaciej Fijalkowski * Move each member individually to avoid possible store 832712edbbbSMaciej Fijalkowski * forwarding stalls and unnecessary copy of skb. 833712edbbbSMaciej Fijalkowski */ 834712edbbbSMaciej Fijalkowski new_buf->dma = old_buf->dma; 835712edbbbSMaciej Fijalkowski new_buf->page = old_buf->page; 836712edbbbSMaciej Fijalkowski new_buf->page_offset = old_buf->page_offset; 837712edbbbSMaciej Fijalkowski new_buf->pagecnt_bias = old_buf->pagecnt_bias; 8382b245cb2SAnirudh Venkataramanan } 8392b245cb2SAnirudh Venkataramanan 8402b245cb2SAnirudh Venkataramanan /** 8416c869cb7SMaciej Fijalkowski * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 842d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 8436c869cb7SMaciej Fijalkowski * @size: size of buffer to add to skb 8441beb7830SBjörn Töpel * @rx_buf_pgcnt: rx_buf page refcount 8452b245cb2SAnirudh Venkataramanan * 8466c869cb7SMaciej Fijalkowski * This function will pull an Rx buffer from the ring and synchronize it 8476c869cb7SMaciej Fijalkowski * for use by the CPU. 8482b245cb2SAnirudh Venkataramanan */ 8496c869cb7SMaciej Fijalkowski static struct ice_rx_buf * 85029b82f2aSMaciej Fijalkowski ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size, 85129b82f2aSMaciej Fijalkowski int *rx_buf_pgcnt) 8522b245cb2SAnirudh Venkataramanan { 8532b245cb2SAnirudh Venkataramanan struct ice_rx_buf *rx_buf; 8542b245cb2SAnirudh Venkataramanan 8552b245cb2SAnirudh Venkataramanan rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 8561beb7830SBjörn Töpel *rx_buf_pgcnt = 8571beb7830SBjörn Töpel #if (PAGE_SIZE < 8192) 8581beb7830SBjörn Töpel page_count(rx_buf->page); 8591beb7830SBjörn Töpel #else 8601beb7830SBjörn Töpel 0; 8611beb7830SBjörn Töpel #endif 8626c869cb7SMaciej Fijalkowski prefetchw(rx_buf->page); 8632b245cb2SAnirudh Venkataramanan 864ac6f733aSMitch Williams if (!size) 865ac6f733aSMitch Williams return rx_buf; 8666c869cb7SMaciej Fijalkowski /* we are reusing so sync this buffer for CPU use */ 8676c869cb7SMaciej Fijalkowski dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 8686c869cb7SMaciej Fijalkowski rx_buf->page_offset, size, 8696c869cb7SMaciej Fijalkowski DMA_FROM_DEVICE); 8702b245cb2SAnirudh Venkataramanan 87103c66a13SMaciej Fijalkowski /* We have pulled a buffer for use, so decrement pagecnt_bias */ 87203c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias--; 87303c66a13SMaciej Fijalkowski 8746c869cb7SMaciej Fijalkowski return rx_buf; 8756c869cb7SMaciej Fijalkowski } 8766c869cb7SMaciej Fijalkowski 8776c869cb7SMaciej Fijalkowski /** 878aaf27254SMaciej Fijalkowski * ice_build_skb - Build skb around an existing buffer 879aaf27254SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 880aaf27254SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 881aaf27254SMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 882aaf27254SMaciej Fijalkowski * 883aaf27254SMaciej Fijalkowski * This function builds an skb around an existing Rx buffer, taking care 884aaf27254SMaciej Fijalkowski * to set up the skb correctly and avoid any memcpy overhead. 885aaf27254SMaciej Fijalkowski */ 886aaf27254SMaciej Fijalkowski static struct sk_buff * 887aaf27254SMaciej Fijalkowski ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 888aaf27254SMaciej Fijalkowski struct xdp_buff *xdp) 889aaf27254SMaciej Fijalkowski { 89088865fc4SKarol Kolacinski u8 metasize = xdp->data - xdp->data_meta; 891aaf27254SMaciej Fijalkowski #if (PAGE_SIZE < 8192) 892aaf27254SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 893aaf27254SMaciej Fijalkowski #else 894aaf27254SMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 895aaf27254SMaciej Fijalkowski SKB_DATA_ALIGN(xdp->data_end - 896aaf27254SMaciej Fijalkowski xdp->data_hard_start); 897aaf27254SMaciej Fijalkowski #endif 898aaf27254SMaciej Fijalkowski struct sk_buff *skb; 899aaf27254SMaciej Fijalkowski 900aaf27254SMaciej Fijalkowski /* Prefetch first cache line of first page. If xdp->data_meta 901aaf27254SMaciej Fijalkowski * is unused, this points exactly as xdp->data, otherwise we 902aaf27254SMaciej Fijalkowski * likely have a consumer accessing first few bytes of meta 903aaf27254SMaciej Fijalkowski * data, and then actual data. 904aaf27254SMaciej Fijalkowski */ 905f468f21bSTariq Toukan net_prefetch(xdp->data_meta); 906aaf27254SMaciej Fijalkowski /* build an skb around the page buffer */ 907aaf27254SMaciej Fijalkowski skb = build_skb(xdp->data_hard_start, truesize); 908aaf27254SMaciej Fijalkowski if (unlikely(!skb)) 909aaf27254SMaciej Fijalkowski return NULL; 910aaf27254SMaciej Fijalkowski 911aaf27254SMaciej Fijalkowski /* must to record Rx queue, otherwise OS features such as 912aaf27254SMaciej Fijalkowski * symmetric queue won't work 913aaf27254SMaciej Fijalkowski */ 914aaf27254SMaciej Fijalkowski skb_record_rx_queue(skb, rx_ring->q_index); 915aaf27254SMaciej Fijalkowski 916aaf27254SMaciej Fijalkowski /* update pointers within the skb to store the data */ 917aaf27254SMaciej Fijalkowski skb_reserve(skb, xdp->data - xdp->data_hard_start); 918aaf27254SMaciej Fijalkowski __skb_put(skb, xdp->data_end - xdp->data); 919aaf27254SMaciej Fijalkowski if (metasize) 920aaf27254SMaciej Fijalkowski skb_metadata_set(skb, metasize); 921aaf27254SMaciej Fijalkowski 922aaf27254SMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 923aaf27254SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 924aaf27254SMaciej Fijalkowski 925aaf27254SMaciej Fijalkowski return skb; 926aaf27254SMaciej Fijalkowski } 927aaf27254SMaciej Fijalkowski 928aaf27254SMaciej Fijalkowski /** 929712edbbbSMaciej Fijalkowski * ice_construct_skb - Allocate skb and populate it 9302b245cb2SAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 9316c869cb7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 932efc2214bSMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 9332b245cb2SAnirudh Venkataramanan * 934712edbbbSMaciej Fijalkowski * This function allocates an skb. It then populates it with the page 935712edbbbSMaciej Fijalkowski * data from the current receive descriptor, taking care to set up the 936712edbbbSMaciej Fijalkowski * skb correctly. 9372b245cb2SAnirudh Venkataramanan */ 938c8b7abddSBruce Allan static struct sk_buff * 939712edbbbSMaciej Fijalkowski ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 940efc2214bSMaciej Fijalkowski struct xdp_buff *xdp) 9412b245cb2SAnirudh Venkataramanan { 942efc2214bSMaciej Fijalkowski unsigned int size = xdp->data_end - xdp->data; 943712edbbbSMaciej Fijalkowski unsigned int headlen; 944712edbbbSMaciej Fijalkowski struct sk_buff *skb; 9452b245cb2SAnirudh Venkataramanan 9462b245cb2SAnirudh Venkataramanan /* prefetch first cache line of first page */ 947f468f21bSTariq Toukan net_prefetch(xdp->data); 9482b245cb2SAnirudh Venkataramanan 9492b245cb2SAnirudh Venkataramanan /* allocate a skb to store the frags */ 950712edbbbSMaciej Fijalkowski skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 9512b245cb2SAnirudh Venkataramanan GFP_ATOMIC | __GFP_NOWARN); 952712edbbbSMaciej Fijalkowski if (unlikely(!skb)) 9532b245cb2SAnirudh Venkataramanan return NULL; 9542b245cb2SAnirudh Venkataramanan 9552b245cb2SAnirudh Venkataramanan skb_record_rx_queue(skb, rx_ring->q_index); 956712edbbbSMaciej Fijalkowski /* Determine available headroom for copy */ 957712edbbbSMaciej Fijalkowski headlen = size; 958712edbbbSMaciej Fijalkowski if (headlen > ICE_RX_HDR_SIZE) 959efc2214bSMaciej Fijalkowski headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 9602b245cb2SAnirudh Venkataramanan 961712edbbbSMaciej Fijalkowski /* align pull length to size of long to optimize memcpy performance */ 962efc2214bSMaciej Fijalkowski memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 963efc2214bSMaciej Fijalkowski sizeof(long))); 964712edbbbSMaciej Fijalkowski 965712edbbbSMaciej Fijalkowski /* if we exhaust the linear part then add what is left as a frag */ 966712edbbbSMaciej Fijalkowski size -= headlen; 967712edbbbSMaciej Fijalkowski if (size) { 968712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 969712edbbbSMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size); 970712edbbbSMaciej Fijalkowski #else 9717237f5b0SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 972712edbbbSMaciej Fijalkowski #endif 973712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, 0, rx_buf->page, 974712edbbbSMaciej Fijalkowski rx_buf->page_offset + headlen, size, truesize); 975712edbbbSMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 976712edbbbSMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 9772b245cb2SAnirudh Venkataramanan } else { 978712edbbbSMaciej Fijalkowski /* buffer is unused, reset bias back to rx_buf; data was copied 979712edbbbSMaciej Fijalkowski * onto skb's linear part so there's no need for adjusting 980712edbbbSMaciej Fijalkowski * page offset and we can reuse this buffer as-is 981712edbbbSMaciej Fijalkowski */ 982712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 9832b245cb2SAnirudh Venkataramanan } 9842b245cb2SAnirudh Venkataramanan 9852b245cb2SAnirudh Venkataramanan return skb; 9862b245cb2SAnirudh Venkataramanan } 9872b245cb2SAnirudh Venkataramanan 9882b245cb2SAnirudh Venkataramanan /** 9891d032bc7SMaciej Fijalkowski * ice_put_rx_buf - Clean up used buffer and either recycle or free 9901d032bc7SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 9911d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 9921beb7830SBjörn Töpel * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect() 9932b245cb2SAnirudh Venkataramanan * 994efc2214bSMaciej Fijalkowski * This function will update next_to_clean and then clean up the contents 995efc2214bSMaciej Fijalkowski * of the rx_buf. It will either recycle the buffer or unmap it and free 996efc2214bSMaciej Fijalkowski * the associated resources. 9972b245cb2SAnirudh Venkataramanan */ 9981beb7830SBjörn Töpel static void 9991beb7830SBjörn Töpel ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 10001beb7830SBjörn Töpel int rx_buf_pgcnt) 10012b245cb2SAnirudh Venkataramanan { 100288865fc4SKarol Kolacinski u16 ntc = rx_ring->next_to_clean + 1; 1003efc2214bSMaciej Fijalkowski 1004efc2214bSMaciej Fijalkowski /* fetch, update, and store next to clean */ 1005efc2214bSMaciej Fijalkowski ntc = (ntc < rx_ring->count) ? ntc : 0; 1006efc2214bSMaciej Fijalkowski rx_ring->next_to_clean = ntc; 1007efc2214bSMaciej Fijalkowski 1008ac6f733aSMitch Williams if (!rx_buf) 1009ac6f733aSMitch Williams return; 1010ac6f733aSMitch Williams 10111beb7830SBjörn Töpel if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) { 1012ac6f733aSMitch Williams /* hand second half of page back to the ring */ 10132b245cb2SAnirudh Venkataramanan ice_reuse_rx_page(rx_ring, rx_buf); 10142b245cb2SAnirudh Venkataramanan } else { 10152b245cb2SAnirudh Venkataramanan /* we are not reusing the buffer so unmap it */ 10167237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 10177237f5b0SMaciej Fijalkowski ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 10187237f5b0SMaciej Fijalkowski ICE_RX_DMA_ATTR); 101903c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 10202b245cb2SAnirudh Venkataramanan } 10212b245cb2SAnirudh Venkataramanan 10222b245cb2SAnirudh Venkataramanan /* clear contents of buffer_info */ 10232b245cb2SAnirudh Venkataramanan rx_buf->page = NULL; 10242b245cb2SAnirudh Venkataramanan } 10252b245cb2SAnirudh Venkataramanan 10262b245cb2SAnirudh Venkataramanan /** 10272b245cb2SAnirudh Venkataramanan * ice_is_non_eop - process handling of non-EOP buffers 10282b245cb2SAnirudh Venkataramanan * @rx_ring: Rx ring being processed 10292b245cb2SAnirudh Venkataramanan * @rx_desc: Rx descriptor for current buffer 10302b245cb2SAnirudh Venkataramanan * 1031efc2214bSMaciej Fijalkowski * If the buffer is an EOP buffer, this function exits returning false, 1032efc2214bSMaciej Fijalkowski * otherwise return true indicating that this is in fact a non-EOP buffer. 10332b245cb2SAnirudh Venkataramanan */ 1034c8b7abddSBruce Allan static bool 103529b82f2aSMaciej Fijalkowski ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) 10362b245cb2SAnirudh Venkataramanan { 10372b245cb2SAnirudh Venkataramanan /* if we are the last buffer then there is nothing else to do */ 10382b245cb2SAnirudh Venkataramanan #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 10392b245cb2SAnirudh Venkataramanan if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 10402b245cb2SAnirudh Venkataramanan return false; 10412b245cb2SAnirudh Venkataramanan 10422b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.non_eop_descs++; 10432b245cb2SAnirudh Venkataramanan 10442b245cb2SAnirudh Venkataramanan return true; 10452b245cb2SAnirudh Venkataramanan } 10462b245cb2SAnirudh Venkataramanan 10472b245cb2SAnirudh Venkataramanan /** 10482b245cb2SAnirudh Venkataramanan * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 1049d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 10502b245cb2SAnirudh Venkataramanan * @budget: Total limit on number of packets to process 10512b245cb2SAnirudh Venkataramanan * 10522b245cb2SAnirudh Venkataramanan * This function provides a "bounce buffer" approach to Rx interrupt 10532b245cb2SAnirudh Venkataramanan * processing. The advantage to this is that on systems that have 10542b245cb2SAnirudh Venkataramanan * expensive overhead for IOMMU access this provides a means of avoiding 10552b245cb2SAnirudh Venkataramanan * it by maintaining the mapping of the page to the system. 10562b245cb2SAnirudh Venkataramanan * 10572b245cb2SAnirudh Venkataramanan * Returns amount of work completed 10582b245cb2SAnirudh Venkataramanan */ 1059148beb61SHenry Tieman int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 10602b245cb2SAnirudh Venkataramanan { 106143b5169dSLorenzo Bianconi unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0; 10622b245cb2SAnirudh Venkataramanan u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1063f1b1f409SMaciej Fijalkowski unsigned int offset = rx_ring->rx_offset; 1064efc2214bSMaciej Fijalkowski unsigned int xdp_res, xdp_xmit = 0; 106529b82f2aSMaciej Fijalkowski struct sk_buff *skb = rx_ring->skb; 1066efc2214bSMaciej Fijalkowski struct bpf_prog *xdp_prog = NULL; 1067efc2214bSMaciej Fijalkowski struct xdp_buff xdp; 1068cb7db356SBrett Creeley bool failure; 10692b245cb2SAnirudh Venkataramanan 1070d4ecdbf7SJesper Dangaard Brouer /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1071d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE < 8192) 107243b5169dSLorenzo Bianconi frame_sz = ice_rx_frame_truesize(rx_ring, 0); 1073d4ecdbf7SJesper Dangaard Brouer #endif 107443b5169dSLorenzo Bianconi xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); 1075efc2214bSMaciej Fijalkowski 1076f9867df6SAnirudh Venkataramanan /* start the loop to process Rx packets bounded by 'budget' */ 10772b245cb2SAnirudh Venkataramanan while (likely(total_rx_pkts < (unsigned int)budget)) { 10782b245cb2SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 10796c869cb7SMaciej Fijalkowski struct ice_rx_buf *rx_buf; 1080be9df4afSLorenzo Bianconi unsigned char *hard_start; 10816c869cb7SMaciej Fijalkowski unsigned int size; 10822b245cb2SAnirudh Venkataramanan u16 stat_err_bits; 10831beb7830SBjörn Töpel int rx_buf_pgcnt; 10842b245cb2SAnirudh Venkataramanan u16 vlan_tag = 0; 1085d76a60baSAnirudh Venkataramanan u8 rx_ptype; 10862b245cb2SAnirudh Venkataramanan 1087f9867df6SAnirudh Venkataramanan /* get the Rx desc from Rx ring based on 'next_to_clean' */ 10882b245cb2SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 10892b245cb2SAnirudh Venkataramanan 10902b245cb2SAnirudh Venkataramanan /* status_error_len will always be zero for unused descriptors 10912b245cb2SAnirudh Venkataramanan * because it's cleared in cleanup, and overlaps with hdr_addr 10922b245cb2SAnirudh Venkataramanan * which is always zero because packet split isn't used, if the 10932b245cb2SAnirudh Venkataramanan * hardware wrote DD then it will be non-zero 10942b245cb2SAnirudh Venkataramanan */ 10952b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 10962b245cb2SAnirudh Venkataramanan if (!ice_test_staterr(rx_desc, stat_err_bits)) 10972b245cb2SAnirudh Venkataramanan break; 10982b245cb2SAnirudh Venkataramanan 10992b245cb2SAnirudh Venkataramanan /* This memory barrier is needed to keep us from reading 11002b245cb2SAnirudh Venkataramanan * any other fields out of the rx_desc until we know the 11012b245cb2SAnirudh Venkataramanan * DD bit is set. 11022b245cb2SAnirudh Venkataramanan */ 11032b245cb2SAnirudh Venkataramanan dma_rmb(); 11042b245cb2SAnirudh Venkataramanan 1105148beb61SHenry Tieman if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 1106d6218317SQi Zhang struct ice_vsi *ctrl_vsi = rx_ring->vsi; 1107d6218317SQi Zhang 1108d6218317SQi Zhang if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && 1109d6218317SQi Zhang ctrl_vsi->vf_id != ICE_INVAL_VFID) 1110d6218317SQi Zhang ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); 11111beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, NULL, 0); 1112148beb61SHenry Tieman cleaned_count++; 1113148beb61SHenry Tieman continue; 1114148beb61SHenry Tieman } 1115148beb61SHenry Tieman 11166c869cb7SMaciej Fijalkowski size = le16_to_cpu(rx_desc->wb.pkt_len) & 11176c869cb7SMaciej Fijalkowski ICE_RX_FLX_DESC_PKT_LEN_M; 11182b245cb2SAnirudh Venkataramanan 1119ac6f733aSMitch Williams /* retrieve a buffer from the ring */ 112029b82f2aSMaciej Fijalkowski rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt); 1121ac6f733aSMitch Williams 1122efc2214bSMaciej Fijalkowski if (!size) { 1123efc2214bSMaciej Fijalkowski xdp.data = NULL; 1124efc2214bSMaciej Fijalkowski xdp.data_end = NULL; 1125aaf27254SMaciej Fijalkowski xdp.data_hard_start = NULL; 1126aaf27254SMaciej Fijalkowski xdp.data_meta = NULL; 1127efc2214bSMaciej Fijalkowski goto construct_skb; 1128efc2214bSMaciej Fijalkowski } 1129efc2214bSMaciej Fijalkowski 1130be9df4afSLorenzo Bianconi hard_start = page_address(rx_buf->page) + rx_buf->page_offset - 1131be9df4afSLorenzo Bianconi offset; 1132be9df4afSLorenzo Bianconi xdp_prepare_buff(&xdp, hard_start, offset, size, true); 1133d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE > 4096) 1134d4ecdbf7SJesper Dangaard Brouer /* At larger PAGE_SIZE, frame_sz depend on len size */ 1135d4ecdbf7SJesper Dangaard Brouer xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); 1136d4ecdbf7SJesper Dangaard Brouer #endif 1137efc2214bSMaciej Fijalkowski 1138efc2214bSMaciej Fijalkowski rcu_read_lock(); 1139efc2214bSMaciej Fijalkowski xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1140efc2214bSMaciej Fijalkowski if (!xdp_prog) { 1141efc2214bSMaciej Fijalkowski rcu_read_unlock(); 1142efc2214bSMaciej Fijalkowski goto construct_skb; 1143efc2214bSMaciej Fijalkowski } 1144efc2214bSMaciej Fijalkowski 1145efc2214bSMaciej Fijalkowski xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog); 1146efc2214bSMaciej Fijalkowski rcu_read_unlock(); 114759bb0808SMaciej Fijalkowski if (!xdp_res) 114859bb0808SMaciej Fijalkowski goto construct_skb; 1149efc2214bSMaciej Fijalkowski if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1150efc2214bSMaciej Fijalkowski xdp_xmit |= xdp_res; 1151d4ecdbf7SJesper Dangaard Brouer ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); 1152efc2214bSMaciej Fijalkowski } else { 1153efc2214bSMaciej Fijalkowski rx_buf->pagecnt_bias++; 1154efc2214bSMaciej Fijalkowski } 1155efc2214bSMaciej Fijalkowski total_rx_bytes += size; 1156efc2214bSMaciej Fijalkowski total_rx_pkts++; 1157efc2214bSMaciej Fijalkowski 1158efc2214bSMaciej Fijalkowski cleaned_count++; 11591beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 1160efc2214bSMaciej Fijalkowski continue; 1161efc2214bSMaciej Fijalkowski construct_skb: 11621f45ebe0SMitch Williams if (skb) { 11637237f5b0SMaciej Fijalkowski ice_add_rx_frag(rx_ring, rx_buf, skb, size); 11641f45ebe0SMitch Williams } else if (likely(xdp.data)) { 11651f45ebe0SMitch Williams if (ice_ring_uses_build_skb(rx_ring)) 1166aaf27254SMaciej Fijalkowski skb = ice_build_skb(rx_ring, rx_buf, &xdp); 1167712edbbbSMaciej Fijalkowski else 1168efc2214bSMaciej Fijalkowski skb = ice_construct_skb(rx_ring, rx_buf, &xdp); 11691f45ebe0SMitch Williams } 1170712edbbbSMaciej Fijalkowski /* exit if we failed to retrieve a buffer */ 1171712edbbbSMaciej Fijalkowski if (!skb) { 1172712edbbbSMaciej Fijalkowski rx_ring->rx_stats.alloc_buf_failed++; 1173ac6f733aSMitch Williams if (rx_buf) 1174712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 11752b245cb2SAnirudh Venkataramanan break; 1176712edbbbSMaciej Fijalkowski } 11772b245cb2SAnirudh Venkataramanan 11781beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 11792b245cb2SAnirudh Venkataramanan cleaned_count++; 11802b245cb2SAnirudh Venkataramanan 11812b245cb2SAnirudh Venkataramanan /* skip if it is NOP desc */ 118229b82f2aSMaciej Fijalkowski if (ice_is_non_eop(rx_ring, rx_desc)) 11832b245cb2SAnirudh Venkataramanan continue; 11842b245cb2SAnirudh Venkataramanan 11852b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 11862b245cb2SAnirudh Venkataramanan if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 11872b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 11882b245cb2SAnirudh Venkataramanan continue; 11892b245cb2SAnirudh Venkataramanan } 11902b245cb2SAnirudh Venkataramanan 11912b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 11922b245cb2SAnirudh Venkataramanan if (ice_test_staterr(rx_desc, stat_err_bits)) 11932b245cb2SAnirudh Venkataramanan vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 11942b245cb2SAnirudh Venkataramanan 1195133f4883SKrzysztof Kazimierczak /* pad the skb if needed, to make a valid ethernet frame */ 1196133f4883SKrzysztof Kazimierczak if (eth_skb_pad(skb)) { 11972b245cb2SAnirudh Venkataramanan skb = NULL; 11982b245cb2SAnirudh Venkataramanan continue; 11992b245cb2SAnirudh Venkataramanan } 12002b245cb2SAnirudh Venkataramanan 12012b245cb2SAnirudh Venkataramanan /* probably a little skewed due to removing CRC */ 12022b245cb2SAnirudh Venkataramanan total_rx_bytes += skb->len; 12032b245cb2SAnirudh Venkataramanan 1204d76a60baSAnirudh Venkataramanan /* populate checksum, VLAN, and protocol */ 12056503b659SJesse Brandeburg rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 12066503b659SJesse Brandeburg ICE_RX_FLEX_DESC_PTYPE_M; 12076503b659SJesse Brandeburg 1208d76a60baSAnirudh Venkataramanan ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1209d76a60baSAnirudh Venkataramanan 12102b245cb2SAnirudh Venkataramanan /* send completed skb up the stack */ 12112b245cb2SAnirudh Venkataramanan ice_receive_skb(rx_ring, skb, vlan_tag); 121229b82f2aSMaciej Fijalkowski skb = NULL; 12132b245cb2SAnirudh Venkataramanan 12142b245cb2SAnirudh Venkataramanan /* update budget accounting */ 12152b245cb2SAnirudh Venkataramanan total_rx_pkts++; 12162b245cb2SAnirudh Venkataramanan } 12172b245cb2SAnirudh Venkataramanan 1218cb7db356SBrett Creeley /* return up to cleaned_count buffers to hardware */ 1219cb7db356SBrett Creeley failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1220cb7db356SBrett Creeley 1221efc2214bSMaciej Fijalkowski if (xdp_prog) 1222efc2214bSMaciej Fijalkowski ice_finalize_xdp_rx(rx_ring, xdp_xmit); 122329b82f2aSMaciej Fijalkowski rx_ring->skb = skb; 1224efc2214bSMaciej Fijalkowski 12252d4238f5SKrzysztof Kazimierczak ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); 12262b245cb2SAnirudh Venkataramanan 12272b245cb2SAnirudh Venkataramanan /* guarantee a trip back through this routine if there was a failure */ 12282b245cb2SAnirudh Venkataramanan return failure ? budget : (int)total_rx_pkts; 12292b245cb2SAnirudh Venkataramanan } 12302b245cb2SAnirudh Venkataramanan 12312b245cb2SAnirudh Venkataramanan /** 1232cdf1f1f1SJacob Keller * ice_net_dim - Update net DIM algorithm 1233cdf1f1f1SJacob Keller * @q_vector: the vector associated with the interrupt 1234711987bbSBrett Creeley * 1235cdf1f1f1SJacob Keller * Create a DIM sample and notify net_dim() so that it can possibly decide 1236cdf1f1f1SJacob Keller * a new ITR value based on incoming packets, bytes, and interrupts. 1237711987bbSBrett Creeley * 1238cdf1f1f1SJacob Keller * This function is a no-op if the ring is not configured to dynamic ITR. 1239711987bbSBrett Creeley */ 1240cdf1f1f1SJacob Keller static void ice_net_dim(struct ice_q_vector *q_vector) 124164a59d05SAnirudh Venkataramanan { 1242cdf1f1f1SJacob Keller struct ice_ring_container *tx = &q_vector->tx; 1243cdf1f1f1SJacob Keller struct ice_ring_container *rx = &q_vector->rx; 1244cdf1f1f1SJacob Keller 1245d59684a0SJesse Brandeburg if (ITR_IS_DYNAMIC(tx)) { 1246cdf1f1f1SJacob Keller struct dim_sample dim_sample = {}; 1247cdf1f1f1SJacob Keller u64 packets = 0, bytes = 0; 1248cdf1f1f1SJacob Keller struct ice_ring *ring; 1249cdf1f1f1SJacob Keller 1250cdf1f1f1SJacob Keller ice_for_each_ring(ring, q_vector->tx) { 1251cdf1f1f1SJacob Keller packets += ring->stats.pkts; 1252cdf1f1f1SJacob Keller bytes += ring->stats.bytes; 125364a59d05SAnirudh Venkataramanan } 1254711987bbSBrett Creeley 1255cdf1f1f1SJacob Keller dim_update_sample(q_vector->total_events, packets, bytes, 1256cdf1f1f1SJacob Keller &dim_sample); 1257cdf1f1f1SJacob Keller 1258cdf1f1f1SJacob Keller net_dim(&tx->dim, dim_sample); 1259711987bbSBrett Creeley } 1260711987bbSBrett Creeley 1261d59684a0SJesse Brandeburg if (ITR_IS_DYNAMIC(rx)) { 1262cdf1f1f1SJacob Keller struct dim_sample dim_sample = {}; 1263cdf1f1f1SJacob Keller u64 packets = 0, bytes = 0; 1264cdf1f1f1SJacob Keller struct ice_ring *ring; 1265cdf1f1f1SJacob Keller 1266cdf1f1f1SJacob Keller ice_for_each_ring(ring, q_vector->rx) { 1267cdf1f1f1SJacob Keller packets += ring->stats.pkts; 1268cdf1f1f1SJacob Keller bytes += ring->stats.bytes; 126964a59d05SAnirudh Venkataramanan } 127064a59d05SAnirudh Venkataramanan 1271cdf1f1f1SJacob Keller dim_update_sample(q_vector->total_events, packets, bytes, 1272cdf1f1f1SJacob Keller &dim_sample); 127364a59d05SAnirudh Venkataramanan 1274cdf1f1f1SJacob Keller net_dim(&rx->dim, dim_sample); 127564a59d05SAnirudh Venkataramanan } 127664a59d05SAnirudh Venkataramanan } 127764a59d05SAnirudh Venkataramanan 12782b245cb2SAnirudh Venkataramanan /** 127963f545edSBrett Creeley * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 128063f545edSBrett Creeley * @itr_idx: interrupt throttling index 128164a59d05SAnirudh Venkataramanan * @itr: interrupt throttling value in usecs 128263f545edSBrett Creeley */ 12838244dd2dSBrett Creeley static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 128463f545edSBrett Creeley { 12852f2da36eSAnirudh Venkataramanan /* The ITR value is reported in microseconds, and the register value is 128664a59d05SAnirudh Venkataramanan * recorded in 2 microsecond units. For this reason we only need to 128764a59d05SAnirudh Venkataramanan * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 128864a59d05SAnirudh Venkataramanan * granularity as a shift instead of division. The mask makes sure the 128964a59d05SAnirudh Venkataramanan * ITR value is never odd so we don't accidentally write into the field 129064a59d05SAnirudh Venkataramanan * prior to the ITR field. 129164a59d05SAnirudh Venkataramanan */ 129264a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 129364a59d05SAnirudh Venkataramanan 129463f545edSBrett Creeley return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 129563f545edSBrett Creeley (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 129664a59d05SAnirudh Venkataramanan (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 129763f545edSBrett Creeley } 129863f545edSBrett Creeley 129963f545edSBrett Creeley /** 1300cdf1f1f1SJacob Keller * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt 1301cdf1f1f1SJacob Keller * @q_vector: the vector associated with the interrupt to enable 1302cdf1f1f1SJacob Keller * 1303cdf1f1f1SJacob Keller * Update the net_dim() algorithm and re-enable the interrupt associated with 1304cdf1f1f1SJacob Keller * this vector. 1305cdf1f1f1SJacob Keller * 1306cdf1f1f1SJacob Keller * If the VSI is down, the interrupt will not be re-enabled. 130763f545edSBrett Creeley */ 13082fb0821fSJesse Brandeburg static void ice_update_ena_itr(struct ice_q_vector *q_vector) 130963f545edSBrett Creeley { 13102fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 1311b7306b42SJesse Brandeburg bool wb_en = q_vector->wb_on_itr; 131263f545edSBrett Creeley u32 itr_val; 131363f545edSBrett Creeley 1314cdf1f1f1SJacob Keller if (test_bit(ICE_DOWN, vsi->state)) 1315cdf1f1f1SJacob Keller return; 13162ab28bb0SBrett Creeley 1317cdf1f1f1SJacob Keller /* When exiting WB_ON_ITR, let ITR resume its normal 1318cdf1f1f1SJacob Keller * interrupts-enabled path. 1319cdf1f1f1SJacob Keller */ 1320b7306b42SJesse Brandeburg if (wb_en) 1321cdf1f1f1SJacob Keller q_vector->wb_on_itr = false; 132264a59d05SAnirudh Venkataramanan 1323cdf1f1f1SJacob Keller /* This will do nothing if dynamic updates are not enabled. */ 1324cdf1f1f1SJacob Keller ice_net_dim(q_vector); 1325cdf1f1f1SJacob Keller 1326cdf1f1f1SJacob Keller /* net_dim() updates ITR out-of-band using a work item */ 132763f545edSBrett Creeley itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 1328b7306b42SJesse Brandeburg /* trigger an immediate software interrupt when exiting 1329b7306b42SJesse Brandeburg * busy poll, to make sure to catch any pending cleanups 1330b7306b42SJesse Brandeburg * that might have been missed due to interrupt state 1331b7306b42SJesse Brandeburg * transition. 1332b7306b42SJesse Brandeburg */ 1333b7306b42SJesse Brandeburg if (wb_en) { 1334b7306b42SJesse Brandeburg itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M | 1335b7306b42SJesse Brandeburg GLINT_DYN_CTL_SW_ITR_INDX_M | 1336b7306b42SJesse Brandeburg GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; 1337b7306b42SJesse Brandeburg } 13381d9f7ca3SJesse Brandeburg wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 133963f545edSBrett Creeley } 134063f545edSBrett Creeley 134163f545edSBrett Creeley /** 13422ab28bb0SBrett Creeley * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 13432ab28bb0SBrett Creeley * @q_vector: q_vector to set WB_ON_ITR on 13442ab28bb0SBrett Creeley * 13452ab28bb0SBrett Creeley * We need to tell hardware to write-back completed descriptors even when 13462ab28bb0SBrett Creeley * interrupts are disabled. Descriptors will be written back on cache line 13472ab28bb0SBrett Creeley * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 13481d9f7ca3SJesse Brandeburg * descriptors may not be written back if they don't fill a cache line until 13491d9f7ca3SJesse Brandeburg * the next interrupt. 13502ab28bb0SBrett Creeley * 13511d9f7ca3SJesse Brandeburg * This sets the write-back frequency to whatever was set previously for the 13521d9f7ca3SJesse Brandeburg * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we 13531d9f7ca3SJesse Brandeburg * aren't meddling with the INTENA_M bit. 13542ab28bb0SBrett Creeley */ 13552fb0821fSJesse Brandeburg static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 13562ab28bb0SBrett Creeley { 13572fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 13582fb0821fSJesse Brandeburg 13591d9f7ca3SJesse Brandeburg /* already in wb_on_itr mode no need to change it */ 1360cdf1f1f1SJacob Keller if (q_vector->wb_on_itr) 13612ab28bb0SBrett Creeley return; 13622ab28bb0SBrett Creeley 13631d9f7ca3SJesse Brandeburg /* use previously set ITR values for all of the ITR indices by 13641d9f7ca3SJesse Brandeburg * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and 13651d9f7ca3SJesse Brandeburg * be static in non-adaptive mode (user configured) 13661d9f7ca3SJesse Brandeburg */ 13672ab28bb0SBrett Creeley wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 13681d9f7ca3SJesse Brandeburg ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) & 13691d9f7ca3SJesse Brandeburg GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | 13701d9f7ca3SJesse Brandeburg GLINT_DYN_CTL_WB_ON_ITR_M); 13712ab28bb0SBrett Creeley 1372cdf1f1f1SJacob Keller q_vector->wb_on_itr = true; 13732ab28bb0SBrett Creeley } 13742ab28bb0SBrett Creeley 13752ab28bb0SBrett Creeley /** 13762b245cb2SAnirudh Venkataramanan * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 13772b245cb2SAnirudh Venkataramanan * @napi: napi struct with our devices info in it 13782b245cb2SAnirudh Venkataramanan * @budget: amount of work driver is allowed to do this pass, in packets 13792b245cb2SAnirudh Venkataramanan * 13802b245cb2SAnirudh Venkataramanan * This function will clean all queues associated with a q_vector. 13812b245cb2SAnirudh Venkataramanan * 13822b245cb2SAnirudh Venkataramanan * Returns the amount of work done 13832b245cb2SAnirudh Venkataramanan */ 13842b245cb2SAnirudh Venkataramanan int ice_napi_poll(struct napi_struct *napi, int budget) 13852b245cb2SAnirudh Venkataramanan { 13862b245cb2SAnirudh Venkataramanan struct ice_q_vector *q_vector = 13872b245cb2SAnirudh Venkataramanan container_of(napi, struct ice_q_vector, napi); 13882b245cb2SAnirudh Venkataramanan bool clean_complete = true; 13892b245cb2SAnirudh Venkataramanan struct ice_ring *ring; 13909118fcd5SBrett Creeley int budget_per_ring; 13912b245cb2SAnirudh Venkataramanan int work_done = 0; 13922b245cb2SAnirudh Venkataramanan 13932b245cb2SAnirudh Venkataramanan /* Since the actual Tx work is minimal, we can give the Tx a larger 13942b245cb2SAnirudh Venkataramanan * budget and be more aggressive about cleaning up the Tx descriptors. 13952b245cb2SAnirudh Venkataramanan */ 13962d4238f5SKrzysztof Kazimierczak ice_for_each_ring(ring, q_vector->tx) { 13971742b3d5SMagnus Karlsson bool wd = ring->xsk_pool ? 13982d4238f5SKrzysztof Kazimierczak ice_clean_tx_irq_zc(ring, budget) : 13992d4238f5SKrzysztof Kazimierczak ice_clean_tx_irq(ring, budget); 14002d4238f5SKrzysztof Kazimierczak 14012d4238f5SKrzysztof Kazimierczak if (!wd) 14022b245cb2SAnirudh Venkataramanan clean_complete = false; 14032d4238f5SKrzysztof Kazimierczak } 14042b245cb2SAnirudh Venkataramanan 14052b245cb2SAnirudh Venkataramanan /* Handle case where we are called by netpoll with a budget of 0 */ 1406d27525ecSJesse Brandeburg if (unlikely(budget <= 0)) 14072b245cb2SAnirudh Venkataramanan return budget; 14082b245cb2SAnirudh Venkataramanan 14099118fcd5SBrett Creeley /* normally we have 1 Rx ring per q_vector */ 14109118fcd5SBrett Creeley if (unlikely(q_vector->num_ring_rx > 1)) 14119118fcd5SBrett Creeley /* We attempt to distribute budget to each Rx queue fairly, but 14129118fcd5SBrett Creeley * don't allow the budget to go below 1 because that would exit 14139118fcd5SBrett Creeley * polling early. 14142b245cb2SAnirudh Venkataramanan */ 141588865fc4SKarol Kolacinski budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 14169118fcd5SBrett Creeley else 14179118fcd5SBrett Creeley /* Max of 1 Rx ring in this q_vector so give it the budget */ 14189118fcd5SBrett Creeley budget_per_ring = budget; 14192b245cb2SAnirudh Venkataramanan 14202b245cb2SAnirudh Venkataramanan ice_for_each_ring(ring, q_vector->rx) { 14212b245cb2SAnirudh Venkataramanan int cleaned; 14222b245cb2SAnirudh Venkataramanan 14232d4238f5SKrzysztof Kazimierczak /* A dedicated path for zero-copy allows making a single 14242d4238f5SKrzysztof Kazimierczak * comparison in the irq context instead of many inside the 14252d4238f5SKrzysztof Kazimierczak * ice_clean_rx_irq function and makes the codebase cleaner. 14262d4238f5SKrzysztof Kazimierczak */ 14271742b3d5SMagnus Karlsson cleaned = ring->xsk_pool ? 14282d4238f5SKrzysztof Kazimierczak ice_clean_rx_irq_zc(ring, budget_per_ring) : 14292d4238f5SKrzysztof Kazimierczak ice_clean_rx_irq(ring, budget_per_ring); 14302b245cb2SAnirudh Venkataramanan work_done += cleaned; 14312b245cb2SAnirudh Venkataramanan /* if we clean as many as budgeted, we must not be done */ 14322b245cb2SAnirudh Venkataramanan if (cleaned >= budget_per_ring) 14332b245cb2SAnirudh Venkataramanan clean_complete = false; 14342b245cb2SAnirudh Venkataramanan } 14352b245cb2SAnirudh Venkataramanan 14362b245cb2SAnirudh Venkataramanan /* If work not completed, return budget and polling will return */ 14371d9f7ca3SJesse Brandeburg if (!clean_complete) { 14381d9f7ca3SJesse Brandeburg /* Set the writeback on ITR so partial completions of 14391d9f7ca3SJesse Brandeburg * cache-lines will still continue even if we're polling. 14401d9f7ca3SJesse Brandeburg */ 14411d9f7ca3SJesse Brandeburg ice_set_wb_on_itr(q_vector); 14422b245cb2SAnirudh Venkataramanan return budget; 14431d9f7ca3SJesse Brandeburg } 14442b245cb2SAnirudh Venkataramanan 14450bcd952fSJesse Brandeburg /* Exit the polling mode, but don't re-enable interrupts if stack might 14460bcd952fSJesse Brandeburg * poll us due to busy-polling 14470bcd952fSJesse Brandeburg */ 14480bcd952fSJesse Brandeburg if (likely(napi_complete_done(napi, work_done))) 14492fb0821fSJesse Brandeburg ice_update_ena_itr(q_vector); 14502ab28bb0SBrett Creeley else 14512fb0821fSJesse Brandeburg ice_set_wb_on_itr(q_vector); 1452e0c9fd9bSDave Ertman 145332a64994SBruce Allan return min_t(int, work_done, budget - 1); 14542b245cb2SAnirudh Venkataramanan } 14552b245cb2SAnirudh Venkataramanan 14562b245cb2SAnirudh Venkataramanan /** 1457d337f2afSAnirudh Venkataramanan * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 14582b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 14592b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 14602b245cb2SAnirudh Venkataramanan * 14612b245cb2SAnirudh Venkataramanan * Returns -EBUSY if a stop is needed, else 0 14622b245cb2SAnirudh Venkataramanan */ 14632b245cb2SAnirudh Venkataramanan static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 14642b245cb2SAnirudh Venkataramanan { 14652b245cb2SAnirudh Venkataramanan netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 14662b245cb2SAnirudh Venkataramanan /* Memory barrier before checking head and tail */ 14672b245cb2SAnirudh Venkataramanan smp_mb(); 14682b245cb2SAnirudh Venkataramanan 14692b245cb2SAnirudh Venkataramanan /* Check again in a case another CPU has just made room available. */ 14702b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 14712b245cb2SAnirudh Venkataramanan return -EBUSY; 14722b245cb2SAnirudh Venkataramanan 14732b245cb2SAnirudh Venkataramanan /* A reprieve! - use start_subqueue because it doesn't call schedule */ 14742b245cb2SAnirudh Venkataramanan netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 14752b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 14762b245cb2SAnirudh Venkataramanan return 0; 14772b245cb2SAnirudh Venkataramanan } 14782b245cb2SAnirudh Venkataramanan 14792b245cb2SAnirudh Venkataramanan /** 1480d337f2afSAnirudh Venkataramanan * ice_maybe_stop_tx - 1st level check for Tx stop conditions 14812b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 14822b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 14832b245cb2SAnirudh Venkataramanan * 14842b245cb2SAnirudh Venkataramanan * Returns 0 if stop is not needed 14852b245cb2SAnirudh Venkataramanan */ 14862b245cb2SAnirudh Venkataramanan static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 14872b245cb2SAnirudh Venkataramanan { 14882b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 14892b245cb2SAnirudh Venkataramanan return 0; 1490d337f2afSAnirudh Venkataramanan 14912b245cb2SAnirudh Venkataramanan return __ice_maybe_stop_tx(tx_ring, size); 14922b245cb2SAnirudh Venkataramanan } 14932b245cb2SAnirudh Venkataramanan 14942b245cb2SAnirudh Venkataramanan /** 14952b245cb2SAnirudh Venkataramanan * ice_tx_map - Build the Tx descriptor 14962b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 14972b245cb2SAnirudh Venkataramanan * @first: first buffer info buffer to use 1498d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 14992b245cb2SAnirudh Venkataramanan * 15002b245cb2SAnirudh Venkataramanan * This function loops over the skb data pointed to by *first 15012b245cb2SAnirudh Venkataramanan * and gets a physical address for each memory location and programs 15022b245cb2SAnirudh Venkataramanan * it and the length into the transmit descriptor. 15032b245cb2SAnirudh Venkataramanan */ 1504d76a60baSAnirudh Venkataramanan static void 1505d76a60baSAnirudh Venkataramanan ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, 1506d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params *off) 15072b245cb2SAnirudh Venkataramanan { 1508d76a60baSAnirudh Venkataramanan u64 td_offset, td_tag, td_cmd; 15092b245cb2SAnirudh Venkataramanan u16 i = tx_ring->next_to_use; 15102b245cb2SAnirudh Venkataramanan unsigned int data_len, size; 15112b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 15122b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 15132b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 15144ee656bbSTony Nguyen skb_frag_t *frag; 15152b245cb2SAnirudh Venkataramanan dma_addr_t dma; 15162b245cb2SAnirudh Venkataramanan 1517d76a60baSAnirudh Venkataramanan td_tag = off->td_l2tag1; 1518d76a60baSAnirudh Venkataramanan td_cmd = off->td_cmd; 1519d76a60baSAnirudh Venkataramanan td_offset = off->td_offset; 15202b245cb2SAnirudh Venkataramanan skb = first->skb; 15212b245cb2SAnirudh Venkataramanan 15222b245cb2SAnirudh Venkataramanan data_len = skb->data_len; 15232b245cb2SAnirudh Venkataramanan size = skb_headlen(skb); 15242b245cb2SAnirudh Venkataramanan 15252b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 15262b245cb2SAnirudh Venkataramanan 1527d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1528d76a60baSAnirudh Venkataramanan td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1529d76a60baSAnirudh Venkataramanan td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1530d76a60baSAnirudh Venkataramanan ICE_TX_FLAGS_VLAN_S; 1531d76a60baSAnirudh Venkataramanan } 1532d76a60baSAnirudh Venkataramanan 15332b245cb2SAnirudh Venkataramanan dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 15342b245cb2SAnirudh Venkataramanan 15352b245cb2SAnirudh Venkataramanan tx_buf = first; 15362b245cb2SAnirudh Venkataramanan 15372b245cb2SAnirudh Venkataramanan for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 15382b245cb2SAnirudh Venkataramanan unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 15392b245cb2SAnirudh Venkataramanan 15402b245cb2SAnirudh Venkataramanan if (dma_mapping_error(tx_ring->dev, dma)) 15412b245cb2SAnirudh Venkataramanan goto dma_error; 15422b245cb2SAnirudh Venkataramanan 15432b245cb2SAnirudh Venkataramanan /* record length, and DMA address */ 15442b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, size); 15452b245cb2SAnirudh Venkataramanan dma_unmap_addr_set(tx_buf, dma, dma); 15462b245cb2SAnirudh Venkataramanan 15472b245cb2SAnirudh Venkataramanan /* align size to end of page */ 15482b245cb2SAnirudh Venkataramanan max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 15492b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 15502b245cb2SAnirudh Venkataramanan 15512b245cb2SAnirudh Venkataramanan /* account for data chunks larger than the hardware 15522b245cb2SAnirudh Venkataramanan * can handle 15532b245cb2SAnirudh Venkataramanan */ 15542b245cb2SAnirudh Venkataramanan while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 15552b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = 15565757cc7cSTony Nguyen ice_build_ctob(td_cmd, td_offset, max_data, 15575757cc7cSTony Nguyen td_tag); 15582b245cb2SAnirudh Venkataramanan 15592b245cb2SAnirudh Venkataramanan tx_desc++; 15602b245cb2SAnirudh Venkataramanan i++; 15612b245cb2SAnirudh Venkataramanan 15622b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 15632b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 15642b245cb2SAnirudh Venkataramanan i = 0; 15652b245cb2SAnirudh Venkataramanan } 15662b245cb2SAnirudh Venkataramanan 15672b245cb2SAnirudh Venkataramanan dma += max_data; 15682b245cb2SAnirudh Venkataramanan size -= max_data; 15692b245cb2SAnirudh Venkataramanan 15702b245cb2SAnirudh Venkataramanan max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 15712b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 15722b245cb2SAnirudh Venkataramanan } 15732b245cb2SAnirudh Venkataramanan 15742b245cb2SAnirudh Venkataramanan if (likely(!data_len)) 15752b245cb2SAnirudh Venkataramanan break; 15762b245cb2SAnirudh Venkataramanan 15775757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 15782b245cb2SAnirudh Venkataramanan size, td_tag); 15792b245cb2SAnirudh Venkataramanan 15802b245cb2SAnirudh Venkataramanan tx_desc++; 15812b245cb2SAnirudh Venkataramanan i++; 15822b245cb2SAnirudh Venkataramanan 15832b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 15842b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 15852b245cb2SAnirudh Venkataramanan i = 0; 15862b245cb2SAnirudh Venkataramanan } 15872b245cb2SAnirudh Venkataramanan 15882b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag); 15892b245cb2SAnirudh Venkataramanan data_len -= size; 15902b245cb2SAnirudh Venkataramanan 15912b245cb2SAnirudh Venkataramanan dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 15922b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 15932b245cb2SAnirudh Venkataramanan 15942b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 15952b245cb2SAnirudh Venkataramanan } 15962b245cb2SAnirudh Venkataramanan 15972b245cb2SAnirudh Venkataramanan /* record bytecount for BQL */ 15982b245cb2SAnirudh Venkataramanan netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 15992b245cb2SAnirudh Venkataramanan 16002b245cb2SAnirudh Venkataramanan /* record SW timestamp if HW timestamp is not available */ 16012b245cb2SAnirudh Venkataramanan skb_tx_timestamp(first->skb); 16022b245cb2SAnirudh Venkataramanan 16032b245cb2SAnirudh Venkataramanan i++; 16042b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) 16052b245cb2SAnirudh Venkataramanan i = 0; 16062b245cb2SAnirudh Venkataramanan 16072b245cb2SAnirudh Venkataramanan /* write last descriptor with RS and EOP bits */ 1608efc2214bSMaciej Fijalkowski td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 16095757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = 16105757cc7cSTony Nguyen ice_build_ctob(td_cmd, td_offset, size, td_tag); 16112b245cb2SAnirudh Venkataramanan 16122b245cb2SAnirudh Venkataramanan /* Force memory writes to complete before letting h/w know there 16132b245cb2SAnirudh Venkataramanan * are new descriptors to fetch. 16142b245cb2SAnirudh Venkataramanan * 16152b245cb2SAnirudh Venkataramanan * We also use this memory barrier to make certain all of the 16162b245cb2SAnirudh Venkataramanan * status bits have been updated before next_to_watch is written. 16172b245cb2SAnirudh Venkataramanan */ 16182b245cb2SAnirudh Venkataramanan wmb(); 16192b245cb2SAnirudh Venkataramanan 16202b245cb2SAnirudh Venkataramanan /* set next_to_watch value indicating a packet is present */ 16212b245cb2SAnirudh Venkataramanan first->next_to_watch = tx_desc; 16222b245cb2SAnirudh Venkataramanan 16232b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 16242b245cb2SAnirudh Venkataramanan 16252b245cb2SAnirudh Venkataramanan ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 16262b245cb2SAnirudh Venkataramanan 16272b245cb2SAnirudh Venkataramanan /* notify HW of packet */ 16284ee656bbSTony Nguyen if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) 16292b245cb2SAnirudh Venkataramanan writel(i, tx_ring->tail); 16302b245cb2SAnirudh Venkataramanan 16312b245cb2SAnirudh Venkataramanan return; 16322b245cb2SAnirudh Venkataramanan 16332b245cb2SAnirudh Venkataramanan dma_error: 16342f2da36eSAnirudh Venkataramanan /* clear DMA mappings for failed tx_buf map */ 16352b245cb2SAnirudh Venkataramanan for (;;) { 16362b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 16372b245cb2SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 16382b245cb2SAnirudh Venkataramanan if (tx_buf == first) 16392b245cb2SAnirudh Venkataramanan break; 16402b245cb2SAnirudh Venkataramanan if (i == 0) 16412b245cb2SAnirudh Venkataramanan i = tx_ring->count; 16422b245cb2SAnirudh Venkataramanan i--; 16432b245cb2SAnirudh Venkataramanan } 16442b245cb2SAnirudh Venkataramanan 16452b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 16462b245cb2SAnirudh Venkataramanan } 16472b245cb2SAnirudh Venkataramanan 16482b245cb2SAnirudh Venkataramanan /** 1649d76a60baSAnirudh Venkataramanan * ice_tx_csum - Enable Tx checksum offloads 1650d76a60baSAnirudh Venkataramanan * @first: pointer to the first descriptor 1651d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1652d76a60baSAnirudh Venkataramanan * 1653d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1654d76a60baSAnirudh Venkataramanan */ 1655d76a60baSAnirudh Venkataramanan static 1656d76a60baSAnirudh Venkataramanan int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1657d76a60baSAnirudh Venkataramanan { 1658d76a60baSAnirudh Venkataramanan u32 l4_len = 0, l3_len = 0, l2_len = 0; 1659d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1660d76a60baSAnirudh Venkataramanan union { 1661d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1662d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1663d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1664d76a60baSAnirudh Venkataramanan } ip; 1665d76a60baSAnirudh Venkataramanan union { 1666d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1667d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1668d76a60baSAnirudh Venkataramanan } l4; 1669d76a60baSAnirudh Venkataramanan __be16 frag_off, protocol; 1670d76a60baSAnirudh Venkataramanan unsigned char *exthdr; 1671d76a60baSAnirudh Venkataramanan u32 offset, cmd = 0; 1672d76a60baSAnirudh Venkataramanan u8 l4_proto = 0; 1673d76a60baSAnirudh Venkataramanan 1674d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1675d76a60baSAnirudh Venkataramanan return 0; 1676d76a60baSAnirudh Venkataramanan 1677d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 1678d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 1679d76a60baSAnirudh Venkataramanan 1680d76a60baSAnirudh Venkataramanan /* compute outer L2 header size */ 1681d76a60baSAnirudh Venkataramanan l2_len = ip.hdr - skb->data; 1682d76a60baSAnirudh Venkataramanan offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1683d76a60baSAnirudh Venkataramanan 1684a4e82a81STony Nguyen protocol = vlan_get_protocol(skb); 1685a4e82a81STony Nguyen 1686a4e82a81STony Nguyen if (protocol == htons(ETH_P_IP)) 1687a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV4; 1688a4e82a81STony Nguyen else if (protocol == htons(ETH_P_IPV6)) 1689a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV6; 1690a4e82a81STony Nguyen 1691a4e82a81STony Nguyen if (skb->encapsulation) { 1692a4e82a81STony Nguyen bool gso_ena = false; 1693a4e82a81STony Nguyen u32 tunnel = 0; 1694a4e82a81STony Nguyen 1695a4e82a81STony Nguyen /* define outer network header type */ 1696a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1697a4e82a81STony Nguyen tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 1698a4e82a81STony Nguyen ICE_TX_CTX_EIPT_IPV4 : 1699a4e82a81STony Nguyen ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 1700a4e82a81STony Nguyen l4_proto = ip.v4->protocol; 1701a4e82a81STony Nguyen } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 17021b0b0b58SNick Nunley int ret; 17031b0b0b58SNick Nunley 1704a4e82a81STony Nguyen tunnel |= ICE_TX_CTX_EIPT_IPV6; 1705a4e82a81STony Nguyen exthdr = ip.hdr + sizeof(*ip.v6); 1706a4e82a81STony Nguyen l4_proto = ip.v6->nexthdr; 17071b0b0b58SNick Nunley ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 1708a4e82a81STony Nguyen &l4_proto, &frag_off); 17091b0b0b58SNick Nunley if (ret < 0) 17101b0b0b58SNick Nunley return -1; 1711a4e82a81STony Nguyen } 1712a4e82a81STony Nguyen 1713a4e82a81STony Nguyen /* define outer transport */ 1714a4e82a81STony Nguyen switch (l4_proto) { 1715a4e82a81STony Nguyen case IPPROTO_UDP: 1716a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 1717a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1718a4e82a81STony Nguyen break; 1719a4e82a81STony Nguyen case IPPROTO_GRE: 1720a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 1721a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1722a4e82a81STony Nguyen break; 1723a4e82a81STony Nguyen case IPPROTO_IPIP: 1724a4e82a81STony Nguyen case IPPROTO_IPV6: 1725a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1726a4e82a81STony Nguyen l4.hdr = skb_inner_network_header(skb); 1727a4e82a81STony Nguyen break; 1728a4e82a81STony Nguyen default: 1729a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_TSO) 1730d76a60baSAnirudh Venkataramanan return -1; 1731d76a60baSAnirudh Venkataramanan 1732a4e82a81STony Nguyen skb_checksum_help(skb); 1733a4e82a81STony Nguyen return 0; 1734a4e82a81STony Nguyen } 1735a4e82a81STony Nguyen 1736a4e82a81STony Nguyen /* compute outer L3 header size */ 1737a4e82a81STony Nguyen tunnel |= ((l4.hdr - ip.hdr) / 4) << 1738a4e82a81STony Nguyen ICE_TXD_CTX_QW0_EIPLEN_S; 1739a4e82a81STony Nguyen 1740a4e82a81STony Nguyen /* switch IP header pointer from outer to inner header */ 1741a4e82a81STony Nguyen ip.hdr = skb_inner_network_header(skb); 1742a4e82a81STony Nguyen 1743a4e82a81STony Nguyen /* compute tunnel header size */ 1744a4e82a81STony Nguyen tunnel |= ((ip.hdr - l4.hdr) / 2) << 1745a4e82a81STony Nguyen ICE_TXD_CTX_QW0_NATLEN_S; 1746a4e82a81STony Nguyen 1747a4e82a81STony Nguyen gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 1748a4e82a81STony Nguyen /* indicate if we need to offload outer UDP header */ 1749a4e82a81STony Nguyen if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 1750a4e82a81STony Nguyen (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1751a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 1752a4e82a81STony Nguyen 1753a4e82a81STony Nguyen /* record tunnel offload values */ 1754a4e82a81STony Nguyen off->cd_tunnel_params |= tunnel; 1755a4e82a81STony Nguyen 1756a4e82a81STony Nguyen /* set DTYP=1 to indicate that it's an Tx context descriptor 1757a4e82a81STony Nguyen * in IPsec tunnel mode with Tx offloads in Quad word 1 1758a4e82a81STony Nguyen */ 1759a4e82a81STony Nguyen off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 1760a4e82a81STony Nguyen 1761a4e82a81STony Nguyen /* switch L4 header pointer from outer to inner */ 1762a4e82a81STony Nguyen l4.hdr = skb_inner_transport_header(skb); 1763a4e82a81STony Nguyen l4_proto = 0; 1764a4e82a81STony Nguyen 1765a4e82a81STony Nguyen /* reset type as we transition from outer to inner headers */ 1766a4e82a81STony Nguyen first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 1767a4e82a81STony Nguyen if (ip.v4->version == 4) 1768a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV4; 1769a4e82a81STony Nguyen if (ip.v6->version == 6) 1770a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV6; 1771a4e82a81STony Nguyen } 1772a4e82a81STony Nguyen 1773d76a60baSAnirudh Venkataramanan /* Enable IP checksum offloads */ 1774a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1775d76a60baSAnirudh Venkataramanan l4_proto = ip.v4->protocol; 1776d76a60baSAnirudh Venkataramanan /* the stack computes the IP header already, the only time we 1777d76a60baSAnirudh Venkataramanan * need the hardware to recompute it is in the case of TSO. 1778d76a60baSAnirudh Venkataramanan */ 1779d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1780d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1781d76a60baSAnirudh Venkataramanan else 1782d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1783d76a60baSAnirudh Venkataramanan 1784a4e82a81STony Nguyen } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1785d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1786d76a60baSAnirudh Venkataramanan exthdr = ip.hdr + sizeof(*ip.v6); 1787d76a60baSAnirudh Venkataramanan l4_proto = ip.v6->nexthdr; 1788d76a60baSAnirudh Venkataramanan if (l4.hdr != exthdr) 1789d76a60baSAnirudh Venkataramanan ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1790d76a60baSAnirudh Venkataramanan &frag_off); 1791d76a60baSAnirudh Venkataramanan } else { 1792d76a60baSAnirudh Venkataramanan return -1; 1793d76a60baSAnirudh Venkataramanan } 1794d76a60baSAnirudh Venkataramanan 1795d76a60baSAnirudh Venkataramanan /* compute inner L3 header size */ 1796d76a60baSAnirudh Venkataramanan l3_len = l4.hdr - ip.hdr; 1797d76a60baSAnirudh Venkataramanan offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1798d76a60baSAnirudh Venkataramanan 1799d76a60baSAnirudh Venkataramanan /* Enable L4 checksum offloads */ 1800d76a60baSAnirudh Venkataramanan switch (l4_proto) { 1801d76a60baSAnirudh Venkataramanan case IPPROTO_TCP: 1802d76a60baSAnirudh Venkataramanan /* enable checksum offloads */ 1803d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1804d76a60baSAnirudh Venkataramanan l4_len = l4.tcp->doff; 1805d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1806d76a60baSAnirudh Venkataramanan break; 1807d76a60baSAnirudh Venkataramanan case IPPROTO_UDP: 1808d76a60baSAnirudh Venkataramanan /* enable UDP checksum offload */ 1809d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1810d76a60baSAnirudh Venkataramanan l4_len = (sizeof(struct udphdr) >> 2); 1811d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1812d76a60baSAnirudh Venkataramanan break; 1813d76a60baSAnirudh Venkataramanan case IPPROTO_SCTP: 1814cf909e19SAnirudh Venkataramanan /* enable SCTP checksum offload */ 1815cf909e19SAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1816cf909e19SAnirudh Venkataramanan l4_len = sizeof(struct sctphdr) >> 2; 1817cf909e19SAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1818cf909e19SAnirudh Venkataramanan break; 1819cf909e19SAnirudh Venkataramanan 1820d76a60baSAnirudh Venkataramanan default: 1821d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1822d76a60baSAnirudh Venkataramanan return -1; 1823d76a60baSAnirudh Venkataramanan skb_checksum_help(skb); 1824d76a60baSAnirudh Venkataramanan return 0; 1825d76a60baSAnirudh Venkataramanan } 1826d76a60baSAnirudh Venkataramanan 1827d76a60baSAnirudh Venkataramanan off->td_cmd |= cmd; 1828d76a60baSAnirudh Venkataramanan off->td_offset |= offset; 1829d76a60baSAnirudh Venkataramanan return 1; 1830d76a60baSAnirudh Venkataramanan } 1831d76a60baSAnirudh Venkataramanan 1832d76a60baSAnirudh Venkataramanan /** 1833f9867df6SAnirudh Venkataramanan * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1834d76a60baSAnirudh Venkataramanan * @tx_ring: ring to send buffer on 1835d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1836d76a60baSAnirudh Venkataramanan * 1837d76a60baSAnirudh Venkataramanan * Checks the skb and set up correspondingly several generic transmit flags 1838d76a60baSAnirudh Venkataramanan * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1839d76a60baSAnirudh Venkataramanan */ 18402bb19d6eSBrett Creeley static void 1841d76a60baSAnirudh Venkataramanan ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) 1842d76a60baSAnirudh Venkataramanan { 1843d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1844d76a60baSAnirudh Venkataramanan 18452bb19d6eSBrett Creeley /* nothing left to do, software offloaded VLAN */ 18462bb19d6eSBrett Creeley if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) 18472bb19d6eSBrett Creeley return; 18482bb19d6eSBrett Creeley 18492bb19d6eSBrett Creeley /* currently, we always assume 802.1Q for VLAN insertion as VLAN 18502bb19d6eSBrett Creeley * insertion for 802.1AD is not supported 1851d76a60baSAnirudh Venkataramanan */ 1852d76a60baSAnirudh Venkataramanan if (skb_vlan_tag_present(skb)) { 1853d76a60baSAnirudh Venkataramanan first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1854d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1855d76a60baSAnirudh Venkataramanan } 1856d76a60baSAnirudh Venkataramanan 18572bb19d6eSBrett Creeley ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 1858d76a60baSAnirudh Venkataramanan } 1859d76a60baSAnirudh Venkataramanan 1860d76a60baSAnirudh Venkataramanan /** 1861d76a60baSAnirudh Venkataramanan * ice_tso - computes mss and TSO length to prepare for TSO 1862d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1863d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1864d76a60baSAnirudh Venkataramanan * 1865d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1866d76a60baSAnirudh Venkataramanan */ 1867d76a60baSAnirudh Venkataramanan static 1868d76a60baSAnirudh Venkataramanan int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1869d76a60baSAnirudh Venkataramanan { 1870d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1871d76a60baSAnirudh Venkataramanan union { 1872d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1873d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1874d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1875d76a60baSAnirudh Venkataramanan } ip; 1876d76a60baSAnirudh Venkataramanan union { 1877d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1878a54e3b8cSBrett Creeley struct udphdr *udp; 1879d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1880d76a60baSAnirudh Venkataramanan } l4; 1881d76a60baSAnirudh Venkataramanan u64 cd_mss, cd_tso_len; 188288865fc4SKarol Kolacinski u32 paylen; 188388865fc4SKarol Kolacinski u8 l4_start; 1884d76a60baSAnirudh Venkataramanan int err; 1885d76a60baSAnirudh Venkataramanan 1886d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1887d76a60baSAnirudh Venkataramanan return 0; 1888d76a60baSAnirudh Venkataramanan 1889d76a60baSAnirudh Venkataramanan if (!skb_is_gso(skb)) 1890d76a60baSAnirudh Venkataramanan return 0; 1891d76a60baSAnirudh Venkataramanan 1892d76a60baSAnirudh Venkataramanan err = skb_cow_head(skb, 0); 1893d76a60baSAnirudh Venkataramanan if (err < 0) 1894d76a60baSAnirudh Venkataramanan return err; 1895d76a60baSAnirudh Venkataramanan 1896c3a6825eSBruce Allan /* cppcheck-suppress unreadVariable */ 1897d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 1898d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 1899d76a60baSAnirudh Venkataramanan 1900d76a60baSAnirudh Venkataramanan /* initialize outer IP header fields */ 1901d76a60baSAnirudh Venkataramanan if (ip.v4->version == 4) { 1902d76a60baSAnirudh Venkataramanan ip.v4->tot_len = 0; 1903d76a60baSAnirudh Venkataramanan ip.v4->check = 0; 1904d76a60baSAnirudh Venkataramanan } else { 1905d76a60baSAnirudh Venkataramanan ip.v6->payload_len = 0; 1906d76a60baSAnirudh Venkataramanan } 1907d76a60baSAnirudh Venkataramanan 1908a4e82a81STony Nguyen if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 1909a4e82a81STony Nguyen SKB_GSO_GRE_CSUM | 1910a4e82a81STony Nguyen SKB_GSO_IPXIP4 | 1911a4e82a81STony Nguyen SKB_GSO_IPXIP6 | 1912a4e82a81STony Nguyen SKB_GSO_UDP_TUNNEL | 1913a4e82a81STony Nguyen SKB_GSO_UDP_TUNNEL_CSUM)) { 1914a4e82a81STony Nguyen if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 1915a4e82a81STony Nguyen (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 1916a4e82a81STony Nguyen l4.udp->len = 0; 1917a4e82a81STony Nguyen 1918a4e82a81STony Nguyen /* determine offset of outer transport header */ 191988865fc4SKarol Kolacinski l4_start = (u8)(l4.hdr - skb->data); 1920a4e82a81STony Nguyen 1921a4e82a81STony Nguyen /* remove payload length from outer checksum */ 1922a4e82a81STony Nguyen paylen = skb->len - l4_start; 1923a4e82a81STony Nguyen csum_replace_by_diff(&l4.udp->check, 1924a4e82a81STony Nguyen (__force __wsum)htonl(paylen)); 1925a4e82a81STony Nguyen } 1926a4e82a81STony Nguyen 1927a4e82a81STony Nguyen /* reset pointers to inner headers */ 1928a4e82a81STony Nguyen 1929a4e82a81STony Nguyen /* cppcheck-suppress unreadVariable */ 1930a4e82a81STony Nguyen ip.hdr = skb_inner_network_header(skb); 1931a4e82a81STony Nguyen l4.hdr = skb_inner_transport_header(skb); 1932a4e82a81STony Nguyen 1933a4e82a81STony Nguyen /* initialize inner IP header fields */ 1934a4e82a81STony Nguyen if (ip.v4->version == 4) { 1935a4e82a81STony Nguyen ip.v4->tot_len = 0; 1936a4e82a81STony Nguyen ip.v4->check = 0; 1937a4e82a81STony Nguyen } else { 1938a4e82a81STony Nguyen ip.v6->payload_len = 0; 1939a4e82a81STony Nguyen } 1940a4e82a81STony Nguyen } 1941a4e82a81STony Nguyen 1942d76a60baSAnirudh Venkataramanan /* determine offset of transport header */ 194388865fc4SKarol Kolacinski l4_start = (u8)(l4.hdr - skb->data); 1944d76a60baSAnirudh Venkataramanan 1945d76a60baSAnirudh Venkataramanan /* remove payload length from checksum */ 1946d76a60baSAnirudh Venkataramanan paylen = skb->len - l4_start; 1947d76a60baSAnirudh Venkataramanan 1948a54e3b8cSBrett Creeley if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 1949a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.udp->check, 1950a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 1951a54e3b8cSBrett Creeley /* compute length of UDP segmentation header */ 195288865fc4SKarol Kolacinski off->header_len = (u8)sizeof(l4.udp) + l4_start; 1953a54e3b8cSBrett Creeley } else { 1954a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.tcp->check, 1955a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 1956a54e3b8cSBrett Creeley /* compute length of TCP segmentation header */ 195788865fc4SKarol Kolacinski off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 1958a54e3b8cSBrett Creeley } 1959d76a60baSAnirudh Venkataramanan 1960d76a60baSAnirudh Venkataramanan /* update gso_segs and bytecount */ 1961d76a60baSAnirudh Venkataramanan first->gso_segs = skb_shinfo(skb)->gso_segs; 1962d944b469SBrett Creeley first->bytecount += (first->gso_segs - 1) * off->header_len; 1963d76a60baSAnirudh Venkataramanan 1964d76a60baSAnirudh Venkataramanan cd_tso_len = skb->len - off->header_len; 1965d76a60baSAnirudh Venkataramanan cd_mss = skb_shinfo(skb)->gso_size; 1966d76a60baSAnirudh Venkataramanan 1967d76a60baSAnirudh Venkataramanan /* record cdesc_qw1 with TSO parameters */ 1968e65e9e15SBruce Allan off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 1969d76a60baSAnirudh Venkataramanan (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 1970d76a60baSAnirudh Venkataramanan (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 1971e65e9e15SBruce Allan (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 1972d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_TSO; 1973d76a60baSAnirudh Venkataramanan return 1; 1974d76a60baSAnirudh Venkataramanan } 1975d76a60baSAnirudh Venkataramanan 1976d76a60baSAnirudh Venkataramanan /** 19772b245cb2SAnirudh Venkataramanan * ice_txd_use_count - estimate the number of descriptors needed for Tx 19782b245cb2SAnirudh Venkataramanan * @size: transmit request size in bytes 19792b245cb2SAnirudh Venkataramanan * 19802b245cb2SAnirudh Venkataramanan * Due to hardware alignment restrictions (4K alignment), we need to 19812b245cb2SAnirudh Venkataramanan * assume that we can have no more than 12K of data per descriptor, even 19822b245cb2SAnirudh Venkataramanan * though each descriptor can take up to 16K - 1 bytes of aligned memory. 19832b245cb2SAnirudh Venkataramanan * Thus, we need to divide by 12K. But division is slow! Instead, 19842b245cb2SAnirudh Venkataramanan * we decompose the operation into shifts and one relatively cheap 19852b245cb2SAnirudh Venkataramanan * multiply operation. 19862b245cb2SAnirudh Venkataramanan * 19872b245cb2SAnirudh Venkataramanan * To divide by 12K, we first divide by 4K, then divide by 3: 19882b245cb2SAnirudh Venkataramanan * To divide by 4K, shift right by 12 bits 19892b245cb2SAnirudh Venkataramanan * To divide by 3, multiply by 85, then divide by 256 19902b245cb2SAnirudh Venkataramanan * (Divide by 256 is done by shifting right by 8 bits) 19912b245cb2SAnirudh Venkataramanan * Finally, we add one to round up. Because 256 isn't an exact multiple of 19922b245cb2SAnirudh Venkataramanan * 3, we'll underestimate near each multiple of 12K. This is actually more 19932b245cb2SAnirudh Venkataramanan * accurate as we have 4K - 1 of wiggle room that we can fit into the last 19942b245cb2SAnirudh Venkataramanan * segment. For our purposes this is accurate out to 1M which is orders of 19952b245cb2SAnirudh Venkataramanan * magnitude greater than our largest possible GSO size. 19962b245cb2SAnirudh Venkataramanan * 19972b245cb2SAnirudh Venkataramanan * This would then be implemented as: 1998c585ea42SBrett Creeley * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 19992b245cb2SAnirudh Venkataramanan * 20002b245cb2SAnirudh Venkataramanan * Since multiplication and division are commutative, we can reorder 20012b245cb2SAnirudh Venkataramanan * operations into: 2002c585ea42SBrett Creeley * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 20032b245cb2SAnirudh Venkataramanan */ 20042b245cb2SAnirudh Venkataramanan static unsigned int ice_txd_use_count(unsigned int size) 20052b245cb2SAnirudh Venkataramanan { 2006c585ea42SBrett Creeley return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 20072b245cb2SAnirudh Venkataramanan } 20082b245cb2SAnirudh Venkataramanan 20092b245cb2SAnirudh Venkataramanan /** 2010d337f2afSAnirudh Venkataramanan * ice_xmit_desc_count - calculate number of Tx descriptors needed 20112b245cb2SAnirudh Venkataramanan * @skb: send buffer 20122b245cb2SAnirudh Venkataramanan * 20132b245cb2SAnirudh Venkataramanan * Returns number of data descriptors needed for this skb. 20142b245cb2SAnirudh Venkataramanan */ 20152b245cb2SAnirudh Venkataramanan static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 20162b245cb2SAnirudh Venkataramanan { 2017d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 20182b245cb2SAnirudh Venkataramanan unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 20192b245cb2SAnirudh Venkataramanan unsigned int count = 0, size = skb_headlen(skb); 20202b245cb2SAnirudh Venkataramanan 20212b245cb2SAnirudh Venkataramanan for (;;) { 20222b245cb2SAnirudh Venkataramanan count += ice_txd_use_count(size); 20232b245cb2SAnirudh Venkataramanan 20242b245cb2SAnirudh Venkataramanan if (!nr_frags--) 20252b245cb2SAnirudh Venkataramanan break; 20262b245cb2SAnirudh Venkataramanan 20272b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag++); 20282b245cb2SAnirudh Venkataramanan } 20292b245cb2SAnirudh Venkataramanan 20302b245cb2SAnirudh Venkataramanan return count; 20312b245cb2SAnirudh Venkataramanan } 20322b245cb2SAnirudh Venkataramanan 20332b245cb2SAnirudh Venkataramanan /** 20342b245cb2SAnirudh Venkataramanan * __ice_chk_linearize - Check if there are more than 8 buffers per packet 20352b245cb2SAnirudh Venkataramanan * @skb: send buffer 20362b245cb2SAnirudh Venkataramanan * 20372b245cb2SAnirudh Venkataramanan * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 20382b245cb2SAnirudh Venkataramanan * and so we need to figure out the cases where we need to linearize the skb. 20392b245cb2SAnirudh Venkataramanan * 20402b245cb2SAnirudh Venkataramanan * For TSO we need to count the TSO header and segment payload separately. 20412b245cb2SAnirudh Venkataramanan * As such we need to check cases where we have 7 fragments or more as we 20422b245cb2SAnirudh Venkataramanan * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 20432b245cb2SAnirudh Venkataramanan * the segment payload in the first descriptor, and another 7 for the 20442b245cb2SAnirudh Venkataramanan * fragments. 20452b245cb2SAnirudh Venkataramanan */ 20462b245cb2SAnirudh Venkataramanan static bool __ice_chk_linearize(struct sk_buff *skb) 20472b245cb2SAnirudh Venkataramanan { 2048d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag, *stale; 20492b245cb2SAnirudh Venkataramanan int nr_frags, sum; 20502b245cb2SAnirudh Venkataramanan 20512b245cb2SAnirudh Venkataramanan /* no need to check if number of frags is less than 7 */ 20522b245cb2SAnirudh Venkataramanan nr_frags = skb_shinfo(skb)->nr_frags; 20532b245cb2SAnirudh Venkataramanan if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 20542b245cb2SAnirudh Venkataramanan return false; 20552b245cb2SAnirudh Venkataramanan 20562b245cb2SAnirudh Venkataramanan /* We need to walk through the list and validate that each group 20572b245cb2SAnirudh Venkataramanan * of 6 fragments totals at least gso_size. 20582b245cb2SAnirudh Venkataramanan */ 20592b245cb2SAnirudh Venkataramanan nr_frags -= ICE_MAX_BUF_TXD - 2; 20602b245cb2SAnirudh Venkataramanan frag = &skb_shinfo(skb)->frags[0]; 20612b245cb2SAnirudh Venkataramanan 20622b245cb2SAnirudh Venkataramanan /* Initialize size to the negative value of gso_size minus 1. We 20634ee656bbSTony Nguyen * use this as the worst case scenario in which the frag ahead 20642b245cb2SAnirudh Venkataramanan * of us only provides one byte which is why we are limited to 6 20652b245cb2SAnirudh Venkataramanan * descriptors for a single transmit as the header and previous 20662b245cb2SAnirudh Venkataramanan * fragment are already consuming 2 descriptors. 20672b245cb2SAnirudh Venkataramanan */ 20682b245cb2SAnirudh Venkataramanan sum = 1 - skb_shinfo(skb)->gso_size; 20692b245cb2SAnirudh Venkataramanan 20702b245cb2SAnirudh Venkataramanan /* Add size of frags 0 through 4 to create our initial sum */ 20712b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 20722b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 20732b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 20742b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 20752b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 20762b245cb2SAnirudh Venkataramanan 20772b245cb2SAnirudh Venkataramanan /* Walk through fragments adding latest fragment, testing it, and 20782b245cb2SAnirudh Venkataramanan * then removing stale fragments from the sum. 20792b245cb2SAnirudh Venkataramanan */ 20800a37abfaSKiran Patil for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 20810a37abfaSKiran Patil int stale_size = skb_frag_size(stale); 20820a37abfaSKiran Patil 20832b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 20842b245cb2SAnirudh Venkataramanan 20850a37abfaSKiran Patil /* The stale fragment may present us with a smaller 20860a37abfaSKiran Patil * descriptor than the actual fragment size. To account 20870a37abfaSKiran Patil * for that we need to remove all the data on the front and 20880a37abfaSKiran Patil * figure out what the remainder would be in the last 20890a37abfaSKiran Patil * descriptor associated with the fragment. 20900a37abfaSKiran Patil */ 20910a37abfaSKiran Patil if (stale_size > ICE_MAX_DATA_PER_TXD) { 20920a37abfaSKiran Patil int align_pad = -(skb_frag_off(stale)) & 20930a37abfaSKiran Patil (ICE_MAX_READ_REQ_SIZE - 1); 20940a37abfaSKiran Patil 20950a37abfaSKiran Patil sum -= align_pad; 20960a37abfaSKiran Patil stale_size -= align_pad; 20970a37abfaSKiran Patil 20980a37abfaSKiran Patil do { 20990a37abfaSKiran Patil sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; 21000a37abfaSKiran Patil stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; 21010a37abfaSKiran Patil } while (stale_size > ICE_MAX_DATA_PER_TXD); 21020a37abfaSKiran Patil } 21030a37abfaSKiran Patil 21042b245cb2SAnirudh Venkataramanan /* if sum is negative we failed to make sufficient progress */ 21052b245cb2SAnirudh Venkataramanan if (sum < 0) 21062b245cb2SAnirudh Venkataramanan return true; 21072b245cb2SAnirudh Venkataramanan 21082b245cb2SAnirudh Venkataramanan if (!nr_frags--) 21092b245cb2SAnirudh Venkataramanan break; 21102b245cb2SAnirudh Venkataramanan 21110a37abfaSKiran Patil sum -= stale_size; 21122b245cb2SAnirudh Venkataramanan } 21132b245cb2SAnirudh Venkataramanan 21142b245cb2SAnirudh Venkataramanan return false; 21152b245cb2SAnirudh Venkataramanan } 21162b245cb2SAnirudh Venkataramanan 21172b245cb2SAnirudh Venkataramanan /** 21182b245cb2SAnirudh Venkataramanan * ice_chk_linearize - Check if there are more than 8 fragments per packet 21192b245cb2SAnirudh Venkataramanan * @skb: send buffer 21202b245cb2SAnirudh Venkataramanan * @count: number of buffers used 21212b245cb2SAnirudh Venkataramanan * 21222b245cb2SAnirudh Venkataramanan * Note: Our HW can't scatter-gather more than 8 fragments to build 21232b245cb2SAnirudh Venkataramanan * a packet on the wire and so we need to figure out the cases where we 21242b245cb2SAnirudh Venkataramanan * need to linearize the skb. 21252b245cb2SAnirudh Venkataramanan */ 21262b245cb2SAnirudh Venkataramanan static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 21272b245cb2SAnirudh Venkataramanan { 21282b245cb2SAnirudh Venkataramanan /* Both TSO and single send will work if count is less than 8 */ 21292b245cb2SAnirudh Venkataramanan if (likely(count < ICE_MAX_BUF_TXD)) 21302b245cb2SAnirudh Venkataramanan return false; 21312b245cb2SAnirudh Venkataramanan 21322b245cb2SAnirudh Venkataramanan if (skb_is_gso(skb)) 21332b245cb2SAnirudh Venkataramanan return __ice_chk_linearize(skb); 21342b245cb2SAnirudh Venkataramanan 21352b245cb2SAnirudh Venkataramanan /* we can support up to 8 data buffers for a single send */ 21362b245cb2SAnirudh Venkataramanan return count != ICE_MAX_BUF_TXD; 21372b245cb2SAnirudh Venkataramanan } 21382b245cb2SAnirudh Venkataramanan 21392b245cb2SAnirudh Venkataramanan /** 21402b245cb2SAnirudh Venkataramanan * ice_xmit_frame_ring - Sends buffer on Tx ring 21412b245cb2SAnirudh Venkataramanan * @skb: send buffer 21422b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 21432b245cb2SAnirudh Venkataramanan * 21442b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 21452b245cb2SAnirudh Venkataramanan */ 21462b245cb2SAnirudh Venkataramanan static netdev_tx_t 21472b245cb2SAnirudh Venkataramanan ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 21482b245cb2SAnirudh Venkataramanan { 2149d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params offload = { 0 }; 21500c3a6101SDave Ertman struct ice_vsi *vsi = tx_ring->vsi; 21512b245cb2SAnirudh Venkataramanan struct ice_tx_buf *first; 21522b245cb2SAnirudh Venkataramanan unsigned int count; 2153d76a60baSAnirudh Venkataramanan int tso, csum; 21542b245cb2SAnirudh Venkataramanan 21552b245cb2SAnirudh Venkataramanan count = ice_xmit_desc_count(skb); 21562b245cb2SAnirudh Venkataramanan if (ice_chk_linearize(skb, count)) { 21572b245cb2SAnirudh Venkataramanan if (__skb_linearize(skb)) 21582b245cb2SAnirudh Venkataramanan goto out_drop; 21592b245cb2SAnirudh Venkataramanan count = ice_txd_use_count(skb->len); 21602b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_linearize++; 21612b245cb2SAnirudh Venkataramanan } 21622b245cb2SAnirudh Venkataramanan 21632b245cb2SAnirudh Venkataramanan /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 21642b245cb2SAnirudh Venkataramanan * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 21652b245cb2SAnirudh Venkataramanan * + 4 desc gap to avoid the cache line where head is, 21662b245cb2SAnirudh Venkataramanan * + 1 desc for context descriptor, 21672b245cb2SAnirudh Venkataramanan * otherwise try next time 21682b245cb2SAnirudh Venkataramanan */ 2169c585ea42SBrett Creeley if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2170c585ea42SBrett Creeley ICE_DESCS_FOR_CTX_DESC)) { 21712b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_busy++; 21722b245cb2SAnirudh Venkataramanan return NETDEV_TX_BUSY; 21732b245cb2SAnirudh Venkataramanan } 21742b245cb2SAnirudh Venkataramanan 2175d76a60baSAnirudh Venkataramanan offload.tx_ring = tx_ring; 2176d76a60baSAnirudh Venkataramanan 21772b245cb2SAnirudh Venkataramanan /* record the location of the first descriptor for this packet */ 21782b245cb2SAnirudh Venkataramanan first = &tx_ring->tx_buf[tx_ring->next_to_use]; 21792b245cb2SAnirudh Venkataramanan first->skb = skb; 21802b245cb2SAnirudh Venkataramanan first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 21812b245cb2SAnirudh Venkataramanan first->gso_segs = 1; 2182d76a60baSAnirudh Venkataramanan first->tx_flags = 0; 21832b245cb2SAnirudh Venkataramanan 2184d76a60baSAnirudh Venkataramanan /* prepare the VLAN tagging flags for Tx */ 21852bb19d6eSBrett Creeley ice_tx_prepare_vlan_flags(tx_ring, first); 2186d76a60baSAnirudh Venkataramanan 2187d76a60baSAnirudh Venkataramanan /* set up TSO offload */ 2188d76a60baSAnirudh Venkataramanan tso = ice_tso(first, &offload); 2189d76a60baSAnirudh Venkataramanan if (tso < 0) 2190d76a60baSAnirudh Venkataramanan goto out_drop; 2191d76a60baSAnirudh Venkataramanan 2192d76a60baSAnirudh Venkataramanan /* always set up Tx checksum offload */ 2193d76a60baSAnirudh Venkataramanan csum = ice_tx_csum(first, &offload); 2194d76a60baSAnirudh Venkataramanan if (csum < 0) 2195d76a60baSAnirudh Venkataramanan goto out_drop; 2196d76a60baSAnirudh Venkataramanan 21970c3a6101SDave Ertman /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 21980c3a6101SDave Ertman if (unlikely(skb->priority == TC_PRIO_CONTROL && 21990c3a6101SDave Ertman vsi->type == ICE_VSI_PF && 2200fc2d1165SChinh T Cao vsi->port_info->qos_cfg.is_sw_lldp)) 22010c3a6101SDave Ertman offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 22020c3a6101SDave Ertman ICE_TX_CTX_DESC_SWTCH_UPLINK << 22030c3a6101SDave Ertman ICE_TXD_CTX_QW1_CMD_S); 22040c3a6101SDave Ertman 22050c3a6101SDave Ertman if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2206d76a60baSAnirudh Venkataramanan struct ice_tx_ctx_desc *cdesc; 220788865fc4SKarol Kolacinski u16 i = tx_ring->next_to_use; 2208d76a60baSAnirudh Venkataramanan 2209d76a60baSAnirudh Venkataramanan /* grab the next descriptor */ 2210d76a60baSAnirudh Venkataramanan cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2211d76a60baSAnirudh Venkataramanan i++; 2212d76a60baSAnirudh Venkataramanan tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2213d76a60baSAnirudh Venkataramanan 2214d76a60baSAnirudh Venkataramanan /* setup context descriptor */ 2215d76a60baSAnirudh Venkataramanan cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2216d76a60baSAnirudh Venkataramanan cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2217d76a60baSAnirudh Venkataramanan cdesc->rsvd = cpu_to_le16(0); 2218d76a60baSAnirudh Venkataramanan cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2219d76a60baSAnirudh Venkataramanan } 2220d76a60baSAnirudh Venkataramanan 2221d76a60baSAnirudh Venkataramanan ice_tx_map(tx_ring, first, &offload); 22222b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 22232b245cb2SAnirudh Venkataramanan 22242b245cb2SAnirudh Venkataramanan out_drop: 22252b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 22262b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 22272b245cb2SAnirudh Venkataramanan } 22282b245cb2SAnirudh Venkataramanan 22292b245cb2SAnirudh Venkataramanan /** 22302b245cb2SAnirudh Venkataramanan * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 22312b245cb2SAnirudh Venkataramanan * @skb: send buffer 22322b245cb2SAnirudh Venkataramanan * @netdev: network interface device structure 22332b245cb2SAnirudh Venkataramanan * 22342b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 22352b245cb2SAnirudh Venkataramanan */ 22362b245cb2SAnirudh Venkataramanan netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 22372b245cb2SAnirudh Venkataramanan { 22382b245cb2SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 22392b245cb2SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 22402b245cb2SAnirudh Venkataramanan struct ice_ring *tx_ring; 22412b245cb2SAnirudh Venkataramanan 22422b245cb2SAnirudh Venkataramanan tx_ring = vsi->tx_rings[skb->queue_mapping]; 22432b245cb2SAnirudh Venkataramanan 22442b245cb2SAnirudh Venkataramanan /* hardware can't handle really short frames, hardware padding works 22452b245cb2SAnirudh Venkataramanan * beyond this point 22462b245cb2SAnirudh Venkataramanan */ 22472b245cb2SAnirudh Venkataramanan if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 22482b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 22492b245cb2SAnirudh Venkataramanan 22502b245cb2SAnirudh Venkataramanan return ice_xmit_frame_ring(skb, tx_ring); 22512b245cb2SAnirudh Venkataramanan } 2252148beb61SHenry Tieman 2253148beb61SHenry Tieman /** 2254148beb61SHenry Tieman * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 2255148beb61SHenry Tieman * @tx_ring: tx_ring to clean 2256148beb61SHenry Tieman */ 2257148beb61SHenry Tieman void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring) 2258148beb61SHenry Tieman { 2259148beb61SHenry Tieman struct ice_vsi *vsi = tx_ring->vsi; 2260148beb61SHenry Tieman s16 i = tx_ring->next_to_clean; 2261148beb61SHenry Tieman int budget = ICE_DFLT_IRQ_WORK; 2262148beb61SHenry Tieman struct ice_tx_desc *tx_desc; 2263148beb61SHenry Tieman struct ice_tx_buf *tx_buf; 2264148beb61SHenry Tieman 2265148beb61SHenry Tieman tx_buf = &tx_ring->tx_buf[i]; 2266148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, i); 2267148beb61SHenry Tieman i -= tx_ring->count; 2268148beb61SHenry Tieman 2269148beb61SHenry Tieman do { 2270148beb61SHenry Tieman struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2271148beb61SHenry Tieman 2272148beb61SHenry Tieman /* if next_to_watch is not set then there is no pending work */ 2273148beb61SHenry Tieman if (!eop_desc) 2274148beb61SHenry Tieman break; 2275148beb61SHenry Tieman 2276148beb61SHenry Tieman /* prevent any other reads prior to eop_desc */ 2277148beb61SHenry Tieman smp_rmb(); 2278148beb61SHenry Tieman 2279148beb61SHenry Tieman /* if the descriptor isn't done, no work to do */ 2280148beb61SHenry Tieman if (!(eop_desc->cmd_type_offset_bsz & 2281148beb61SHenry Tieman cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2282148beb61SHenry Tieman break; 2283148beb61SHenry Tieman 2284148beb61SHenry Tieman /* clear next_to_watch to prevent false hangs */ 2285148beb61SHenry Tieman tx_buf->next_to_watch = NULL; 2286148beb61SHenry Tieman tx_desc->buf_addr = 0; 2287148beb61SHenry Tieman tx_desc->cmd_type_offset_bsz = 0; 2288148beb61SHenry Tieman 2289148beb61SHenry Tieman /* move past filter desc */ 2290148beb61SHenry Tieman tx_buf++; 2291148beb61SHenry Tieman tx_desc++; 2292148beb61SHenry Tieman i++; 2293148beb61SHenry Tieman if (unlikely(!i)) { 2294148beb61SHenry Tieman i -= tx_ring->count; 2295148beb61SHenry Tieman tx_buf = tx_ring->tx_buf; 2296148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, 0); 2297148beb61SHenry Tieman } 2298148beb61SHenry Tieman 2299148beb61SHenry Tieman /* unmap the data header */ 2300148beb61SHenry Tieman if (dma_unmap_len(tx_buf, len)) 2301148beb61SHenry Tieman dma_unmap_single(tx_ring->dev, 2302148beb61SHenry Tieman dma_unmap_addr(tx_buf, dma), 2303148beb61SHenry Tieman dma_unmap_len(tx_buf, len), 2304148beb61SHenry Tieman DMA_TO_DEVICE); 2305148beb61SHenry Tieman if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 2306148beb61SHenry Tieman devm_kfree(tx_ring->dev, tx_buf->raw_buf); 2307148beb61SHenry Tieman 2308148beb61SHenry Tieman /* clear next_to_watch to prevent false hangs */ 2309148beb61SHenry Tieman tx_buf->raw_buf = NULL; 2310148beb61SHenry Tieman tx_buf->tx_flags = 0; 2311148beb61SHenry Tieman tx_buf->next_to_watch = NULL; 2312148beb61SHenry Tieman dma_unmap_len_set(tx_buf, len, 0); 2313148beb61SHenry Tieman tx_desc->buf_addr = 0; 2314148beb61SHenry Tieman tx_desc->cmd_type_offset_bsz = 0; 2315148beb61SHenry Tieman 2316148beb61SHenry Tieman /* move past eop_desc for start of next FD desc */ 2317148beb61SHenry Tieman tx_buf++; 2318148beb61SHenry Tieman tx_desc++; 2319148beb61SHenry Tieman i++; 2320148beb61SHenry Tieman if (unlikely(!i)) { 2321148beb61SHenry Tieman i -= tx_ring->count; 2322148beb61SHenry Tieman tx_buf = tx_ring->tx_buf; 2323148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, 0); 2324148beb61SHenry Tieman } 2325148beb61SHenry Tieman 2326148beb61SHenry Tieman budget--; 2327148beb61SHenry Tieman } while (likely(budget)); 2328148beb61SHenry Tieman 2329148beb61SHenry Tieman i += tx_ring->count; 2330148beb61SHenry Tieman tx_ring->next_to_clean = i; 2331148beb61SHenry Tieman 2332148beb61SHenry Tieman /* re-enable interrupt if needed */ 2333148beb61SHenry Tieman ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 2334148beb61SHenry Tieman } 2335