1cdedef59SAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0 2cdedef59SAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */ 3cdedef59SAnirudh Venkataramanan 4cdedef59SAnirudh Venkataramanan /* The driver transmit and receive code */ 5cdedef59SAnirudh Venkataramanan 6cdedef59SAnirudh Venkataramanan #include <linux/prefetch.h> 7cdedef59SAnirudh Venkataramanan #include <linux/mm.h> 8efc2214bSMaciej Fijalkowski #include <linux/bpf_trace.h> 92a87bd73SDave Ertman #include <net/dsfield.h> 10efc2214bSMaciej Fijalkowski #include <net/xdp.h> 110891d6d4SKrzysztof Kazimierczak #include "ice_txrx_lib.h" 12efc2214bSMaciej Fijalkowski #include "ice_lib.h" 13cdedef59SAnirudh Venkataramanan #include "ice.h" 143089cf6dSJesse Brandeburg #include "ice_trace.h" 155f6aa50eSAnirudh Venkataramanan #include "ice_dcb_lib.h" 162d4238f5SKrzysztof Kazimierczak #include "ice_xsk.h" 17f5396b8aSGrzegorz Nitka #include "ice_eswitch.h" 18cdedef59SAnirudh Venkataramanan 192b245cb2SAnirudh Venkataramanan #define ICE_RX_HDR_SIZE 256 202b245cb2SAnirudh Venkataramanan 21148beb61SHenry Tieman #define FDIR_DESC_RXDID 0x40 22cac2a27cSHenry Tieman #define ICE_FDIR_CLEAN_DELAY 10 23cac2a27cSHenry Tieman 24cac2a27cSHenry Tieman /** 25cac2a27cSHenry Tieman * ice_prgm_fdir_fltr - Program a Flow Director filter 26cac2a27cSHenry Tieman * @vsi: VSI to send dummy packet 27cac2a27cSHenry Tieman * @fdir_desc: flow director descriptor 28cac2a27cSHenry Tieman * @raw_packet: allocated buffer for flow director 29cac2a27cSHenry Tieman */ 30cac2a27cSHenry Tieman int 31cac2a27cSHenry Tieman ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 32cac2a27cSHenry Tieman u8 *raw_packet) 33cac2a27cSHenry Tieman { 34cac2a27cSHenry Tieman struct ice_tx_buf *tx_buf, *first; 35cac2a27cSHenry Tieman struct ice_fltr_desc *f_desc; 36cac2a27cSHenry Tieman struct ice_tx_desc *tx_desc; 37e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring; 38cac2a27cSHenry Tieman struct device *dev; 39cac2a27cSHenry Tieman dma_addr_t dma; 40cac2a27cSHenry Tieman u32 td_cmd; 41cac2a27cSHenry Tieman u16 i; 42cac2a27cSHenry Tieman 43cac2a27cSHenry Tieman /* VSI and Tx ring */ 44cac2a27cSHenry Tieman if (!vsi) 45cac2a27cSHenry Tieman return -ENOENT; 46cac2a27cSHenry Tieman tx_ring = vsi->tx_rings[0]; 47cac2a27cSHenry Tieman if (!tx_ring || !tx_ring->desc) 48cac2a27cSHenry Tieman return -ENOENT; 49cac2a27cSHenry Tieman dev = tx_ring->dev; 50cac2a27cSHenry Tieman 51cac2a27cSHenry Tieman /* we are using two descriptors to add/del a filter and we can wait */ 52cac2a27cSHenry Tieman for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 53cac2a27cSHenry Tieman if (!i) 54cac2a27cSHenry Tieman return -EAGAIN; 55cac2a27cSHenry Tieman msleep_interruptible(1); 56cac2a27cSHenry Tieman } 57cac2a27cSHenry Tieman 58cac2a27cSHenry Tieman dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 59cac2a27cSHenry Tieman DMA_TO_DEVICE); 60cac2a27cSHenry Tieman 61cac2a27cSHenry Tieman if (dma_mapping_error(dev, dma)) 62cac2a27cSHenry Tieman return -EINVAL; 63cac2a27cSHenry Tieman 64cac2a27cSHenry Tieman /* grab the next descriptor */ 65cac2a27cSHenry Tieman i = tx_ring->next_to_use; 66cac2a27cSHenry Tieman first = &tx_ring->tx_buf[i]; 67cac2a27cSHenry Tieman f_desc = ICE_TX_FDIRDESC(tx_ring, i); 68cac2a27cSHenry Tieman memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 69cac2a27cSHenry Tieman 70cac2a27cSHenry Tieman i++; 71cac2a27cSHenry Tieman i = (i < tx_ring->count) ? i : 0; 72cac2a27cSHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, i); 73cac2a27cSHenry Tieman tx_buf = &tx_ring->tx_buf[i]; 74cac2a27cSHenry Tieman 75cac2a27cSHenry Tieman i++; 76cac2a27cSHenry Tieman tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 77cac2a27cSHenry Tieman 78cac2a27cSHenry Tieman memset(tx_buf, 0, sizeof(*tx_buf)); 79cac2a27cSHenry Tieman dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 80cac2a27cSHenry Tieman dma_unmap_addr_set(tx_buf, dma, dma); 81cac2a27cSHenry Tieman 82cac2a27cSHenry Tieman tx_desc->buf_addr = cpu_to_le64(dma); 83cac2a27cSHenry Tieman td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 84cac2a27cSHenry Tieman ICE_TX_DESC_CMD_RE; 85cac2a27cSHenry Tieman 86cac2a27cSHenry Tieman tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; 87cac2a27cSHenry Tieman tx_buf->raw_buf = raw_packet; 88cac2a27cSHenry Tieman 89cac2a27cSHenry Tieman tx_desc->cmd_type_offset_bsz = 90cac2a27cSHenry Tieman ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 91cac2a27cSHenry Tieman 92cac2a27cSHenry Tieman /* Force memory write to complete before letting h/w know 93cac2a27cSHenry Tieman * there are new descriptors to fetch. 94cac2a27cSHenry Tieman */ 95cac2a27cSHenry Tieman wmb(); 96cac2a27cSHenry Tieman 97cac2a27cSHenry Tieman /* mark the data descriptor to be watched */ 98cac2a27cSHenry Tieman first->next_to_watch = tx_desc; 99cac2a27cSHenry Tieman 100cac2a27cSHenry Tieman writel(tx_ring->next_to_use, tx_ring->tail); 101cac2a27cSHenry Tieman 102cac2a27cSHenry Tieman return 0; 103cac2a27cSHenry Tieman } 104148beb61SHenry Tieman 105cdedef59SAnirudh Venkataramanan /** 106cdedef59SAnirudh Venkataramanan * ice_unmap_and_free_tx_buf - Release a Tx buffer 107cdedef59SAnirudh Venkataramanan * @ring: the ring that owns the buffer 108cdedef59SAnirudh Venkataramanan * @tx_buf: the buffer to free 109cdedef59SAnirudh Venkataramanan */ 110cdedef59SAnirudh Venkataramanan static void 111e72bba21SMaciej Fijalkowski ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) 112cdedef59SAnirudh Venkataramanan { 113cdedef59SAnirudh Venkataramanan if (tx_buf->skb) { 114148beb61SHenry Tieman if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 115148beb61SHenry Tieman devm_kfree(ring->dev, tx_buf->raw_buf); 116148beb61SHenry Tieman else if (ice_ring_is_xdp(ring)) 117efc2214bSMaciej Fijalkowski page_frag_free(tx_buf->raw_buf); 118efc2214bSMaciej Fijalkowski else 119cdedef59SAnirudh Venkataramanan dev_kfree_skb_any(tx_buf->skb); 120cdedef59SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) 121cdedef59SAnirudh Venkataramanan dma_unmap_single(ring->dev, 122cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 123cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 124cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 125cdedef59SAnirudh Venkataramanan } else if (dma_unmap_len(tx_buf, len)) { 126cdedef59SAnirudh Venkataramanan dma_unmap_page(ring->dev, 127cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 128cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 129cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 130cdedef59SAnirudh Venkataramanan } 131cdedef59SAnirudh Venkataramanan 132cdedef59SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 133cdedef59SAnirudh Venkataramanan tx_buf->skb = NULL; 134cdedef59SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 135cdedef59SAnirudh Venkataramanan /* tx_buf must be completely set up in the transmit path */ 136cdedef59SAnirudh Venkataramanan } 137cdedef59SAnirudh Venkataramanan 138e72bba21SMaciej Fijalkowski static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring) 139cdedef59SAnirudh Venkataramanan { 140cdedef59SAnirudh Venkataramanan return netdev_get_tx_queue(ring->netdev, ring->q_index); 141cdedef59SAnirudh Venkataramanan } 142cdedef59SAnirudh Venkataramanan 143cdedef59SAnirudh Venkataramanan /** 144cdedef59SAnirudh Venkataramanan * ice_clean_tx_ring - Free any empty Tx buffers 145cdedef59SAnirudh Venkataramanan * @tx_ring: ring to be cleaned 146cdedef59SAnirudh Venkataramanan */ 147e72bba21SMaciej Fijalkowski void ice_clean_tx_ring(struct ice_tx_ring *tx_ring) 148cdedef59SAnirudh Venkataramanan { 149e72bba21SMaciej Fijalkowski u32 size; 150cdedef59SAnirudh Venkataramanan u16 i; 151cdedef59SAnirudh Venkataramanan 1521742b3d5SMagnus Karlsson if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 1532d4238f5SKrzysztof Kazimierczak ice_xsk_clean_xdp_ring(tx_ring); 1542d4238f5SKrzysztof Kazimierczak goto tx_skip_free; 1552d4238f5SKrzysztof Kazimierczak } 1562d4238f5SKrzysztof Kazimierczak 157cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 158cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 159cdedef59SAnirudh Venkataramanan return; 160cdedef59SAnirudh Venkataramanan 1612f2da36eSAnirudh Venkataramanan /* Free all the Tx ring sk_buffs */ 162cdedef59SAnirudh Venkataramanan for (i = 0; i < tx_ring->count; i++) 163cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 164cdedef59SAnirudh Venkataramanan 1652d4238f5SKrzysztof Kazimierczak tx_skip_free: 166c6dfd690SBruce Allan memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 167cdedef59SAnirudh Venkataramanan 168e72bba21SMaciej Fijalkowski size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 169e72bba21SMaciej Fijalkowski PAGE_SIZE); 170cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 171e72bba21SMaciej Fijalkowski memset(tx_ring->desc, 0, size); 172cdedef59SAnirudh Venkataramanan 173cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 174cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 175cdedef59SAnirudh Venkataramanan 176cdedef59SAnirudh Venkataramanan if (!tx_ring->netdev) 177cdedef59SAnirudh Venkataramanan return; 178cdedef59SAnirudh Venkataramanan 179cdedef59SAnirudh Venkataramanan /* cleanup Tx queue statistics */ 180cdedef59SAnirudh Venkataramanan netdev_tx_reset_queue(txring_txq(tx_ring)); 181cdedef59SAnirudh Venkataramanan } 182cdedef59SAnirudh Venkataramanan 183cdedef59SAnirudh Venkataramanan /** 184cdedef59SAnirudh Venkataramanan * ice_free_tx_ring - Free Tx resources per queue 185cdedef59SAnirudh Venkataramanan * @tx_ring: Tx descriptor ring for a specific queue 186cdedef59SAnirudh Venkataramanan * 187cdedef59SAnirudh Venkataramanan * Free all transmit software resources 188cdedef59SAnirudh Venkataramanan */ 189e72bba21SMaciej Fijalkowski void ice_free_tx_ring(struct ice_tx_ring *tx_ring) 190cdedef59SAnirudh Venkataramanan { 191e72bba21SMaciej Fijalkowski u32 size; 192e72bba21SMaciej Fijalkowski 193cdedef59SAnirudh Venkataramanan ice_clean_tx_ring(tx_ring); 194cdedef59SAnirudh Venkataramanan devm_kfree(tx_ring->dev, tx_ring->tx_buf); 195cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 196cdedef59SAnirudh Venkataramanan 197cdedef59SAnirudh Venkataramanan if (tx_ring->desc) { 198e72bba21SMaciej Fijalkowski size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 199e72bba21SMaciej Fijalkowski PAGE_SIZE); 200e72bba21SMaciej Fijalkowski dmam_free_coherent(tx_ring->dev, size, 201cdedef59SAnirudh Venkataramanan tx_ring->desc, tx_ring->dma); 202cdedef59SAnirudh Venkataramanan tx_ring->desc = NULL; 203cdedef59SAnirudh Venkataramanan } 204cdedef59SAnirudh Venkataramanan } 205cdedef59SAnirudh Venkataramanan 206cdedef59SAnirudh Venkataramanan /** 2072b245cb2SAnirudh Venkataramanan * ice_clean_tx_irq - Reclaim resources after transmit completes 2082b245cb2SAnirudh Venkataramanan * @tx_ring: Tx ring to clean 2092b245cb2SAnirudh Venkataramanan * @napi_budget: Used to determine if we are in netpoll 2102b245cb2SAnirudh Venkataramanan * 2112b245cb2SAnirudh Venkataramanan * Returns true if there's any budget left (e.g. the clean is finished) 2122b245cb2SAnirudh Venkataramanan */ 213e72bba21SMaciej Fijalkowski static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) 2142b245cb2SAnirudh Venkataramanan { 2152b245cb2SAnirudh Venkataramanan unsigned int total_bytes = 0, total_pkts = 0; 2162fb0821fSJesse Brandeburg unsigned int budget = ICE_DFLT_IRQ_WORK; 2172fb0821fSJesse Brandeburg struct ice_vsi *vsi = tx_ring->vsi; 2182b245cb2SAnirudh Venkataramanan s16 i = tx_ring->next_to_clean; 2192b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 2202b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 2212b245cb2SAnirudh Venkataramanan 2222b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 2232b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 2242b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2252b245cb2SAnirudh Venkataramanan 2262fb0821fSJesse Brandeburg prefetch(&vsi->state); 2272fb0821fSJesse Brandeburg 2282b245cb2SAnirudh Venkataramanan do { 2292b245cb2SAnirudh Venkataramanan struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2302b245cb2SAnirudh Venkataramanan 2312b245cb2SAnirudh Venkataramanan /* if next_to_watch is not set then there is no work pending */ 2322b245cb2SAnirudh Venkataramanan if (!eop_desc) 2332b245cb2SAnirudh Venkataramanan break; 2342b245cb2SAnirudh Venkataramanan 2352b245cb2SAnirudh Venkataramanan smp_rmb(); /* prevent any other reads prior to eop_desc */ 2362b245cb2SAnirudh Venkataramanan 2373089cf6dSJesse Brandeburg ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 2382b245cb2SAnirudh Venkataramanan /* if the descriptor isn't done, no work yet to do */ 2392b245cb2SAnirudh Venkataramanan if (!(eop_desc->cmd_type_offset_bsz & 2402b245cb2SAnirudh Venkataramanan cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2412b245cb2SAnirudh Venkataramanan break; 2422b245cb2SAnirudh Venkataramanan 2432b245cb2SAnirudh Venkataramanan /* clear next_to_watch to prevent false hangs */ 2442b245cb2SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 2452b245cb2SAnirudh Venkataramanan 2462b245cb2SAnirudh Venkataramanan /* update the statistics for this packet */ 2472b245cb2SAnirudh Venkataramanan total_bytes += tx_buf->bytecount; 2482b245cb2SAnirudh Venkataramanan total_pkts += tx_buf->gso_segs; 2492b245cb2SAnirudh Venkataramanan 2502b245cb2SAnirudh Venkataramanan /* free the skb */ 2512b245cb2SAnirudh Venkataramanan napi_consume_skb(tx_buf->skb, napi_budget); 2522b245cb2SAnirudh Venkataramanan 2532b245cb2SAnirudh Venkataramanan /* unmap skb header data */ 2542b245cb2SAnirudh Venkataramanan dma_unmap_single(tx_ring->dev, 2552b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 2562b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 2572b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 2582b245cb2SAnirudh Venkataramanan 2592b245cb2SAnirudh Venkataramanan /* clear tx_buf data */ 2602b245cb2SAnirudh Venkataramanan tx_buf->skb = NULL; 2612b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 2622b245cb2SAnirudh Venkataramanan 2632b245cb2SAnirudh Venkataramanan /* unmap remaining buffers */ 2642b245cb2SAnirudh Venkataramanan while (tx_desc != eop_desc) { 2653089cf6dSJesse Brandeburg ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); 2662b245cb2SAnirudh Venkataramanan tx_buf++; 2672b245cb2SAnirudh Venkataramanan tx_desc++; 2682b245cb2SAnirudh Venkataramanan i++; 2692b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 2702b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2712b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 2722b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 2732b245cb2SAnirudh Venkataramanan } 2742b245cb2SAnirudh Venkataramanan 2752b245cb2SAnirudh Venkataramanan /* unmap any remaining paged data */ 2762b245cb2SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) { 2772b245cb2SAnirudh Venkataramanan dma_unmap_page(tx_ring->dev, 2782b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 2792b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 2802b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 2812b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 2822b245cb2SAnirudh Venkataramanan } 2832b245cb2SAnirudh Venkataramanan } 2843089cf6dSJesse Brandeburg ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf); 2852b245cb2SAnirudh Venkataramanan 2862b245cb2SAnirudh Venkataramanan /* move us one more past the eop_desc for start of next pkt */ 2872b245cb2SAnirudh Venkataramanan tx_buf++; 2882b245cb2SAnirudh Venkataramanan tx_desc++; 2892b245cb2SAnirudh Venkataramanan i++; 2902b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 2912b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2922b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 2932b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 2942b245cb2SAnirudh Venkataramanan } 2952b245cb2SAnirudh Venkataramanan 2962b245cb2SAnirudh Venkataramanan prefetch(tx_desc); 2972b245cb2SAnirudh Venkataramanan 2982b245cb2SAnirudh Venkataramanan /* update budget accounting */ 2992b245cb2SAnirudh Venkataramanan budget--; 3002b245cb2SAnirudh Venkataramanan } while (likely(budget)); 3012b245cb2SAnirudh Venkataramanan 3022b245cb2SAnirudh Venkataramanan i += tx_ring->count; 3032b245cb2SAnirudh Venkataramanan tx_ring->next_to_clean = i; 3042d4238f5SKrzysztof Kazimierczak 3052d4238f5SKrzysztof Kazimierczak ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 3062b245cb2SAnirudh Venkataramanan 3072b245cb2SAnirudh Venkataramanan netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 3082b245cb2SAnirudh Venkataramanan total_bytes); 3092b245cb2SAnirudh Venkataramanan 3102b245cb2SAnirudh Venkataramanan #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 3112b245cb2SAnirudh Venkataramanan if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 3122b245cb2SAnirudh Venkataramanan (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 3132b245cb2SAnirudh Venkataramanan /* Make sure that anybody stopping the queue after this 3142b245cb2SAnirudh Venkataramanan * sees the new next_to_clean. 3152b245cb2SAnirudh Venkataramanan */ 3162b245cb2SAnirudh Venkataramanan smp_mb(); 3172b245cb2SAnirudh Venkataramanan if (__netif_subqueue_stopped(tx_ring->netdev, 3182b245cb2SAnirudh Venkataramanan tx_ring->q_index) && 319e97fb1aeSAnirudh Venkataramanan !test_bit(ICE_VSI_DOWN, vsi->state)) { 3202b245cb2SAnirudh Venkataramanan netif_wake_subqueue(tx_ring->netdev, 3212b245cb2SAnirudh Venkataramanan tx_ring->q_index); 3222b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 3232b245cb2SAnirudh Venkataramanan } 3242b245cb2SAnirudh Venkataramanan } 3252b245cb2SAnirudh Venkataramanan 3262b245cb2SAnirudh Venkataramanan return !!budget; 3272b245cb2SAnirudh Venkataramanan } 3282b245cb2SAnirudh Venkataramanan 3292b245cb2SAnirudh Venkataramanan /** 330cdedef59SAnirudh Venkataramanan * ice_setup_tx_ring - Allocate the Tx descriptors 331d337f2afSAnirudh Venkataramanan * @tx_ring: the Tx ring to set up 332cdedef59SAnirudh Venkataramanan * 333cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 334cdedef59SAnirudh Venkataramanan */ 335e72bba21SMaciej Fijalkowski int ice_setup_tx_ring(struct ice_tx_ring *tx_ring) 336cdedef59SAnirudh Venkataramanan { 337cdedef59SAnirudh Venkataramanan struct device *dev = tx_ring->dev; 338e72bba21SMaciej Fijalkowski u32 size; 339cdedef59SAnirudh Venkataramanan 340cdedef59SAnirudh Venkataramanan if (!dev) 341cdedef59SAnirudh Venkataramanan return -ENOMEM; 342cdedef59SAnirudh Venkataramanan 343cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 344cdedef59SAnirudh Venkataramanan WARN_ON(tx_ring->tx_buf); 345c6dfd690SBruce Allan tx_ring->tx_buf = 3466f332353SGustavo A. R. Silva devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, 347c6dfd690SBruce Allan GFP_KERNEL); 348cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 349cdedef59SAnirudh Venkataramanan return -ENOMEM; 350cdedef59SAnirudh Venkataramanan 351ad71b256SBrett Creeley /* round up to nearest page */ 352e72bba21SMaciej Fijalkowski size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 353ad71b256SBrett Creeley PAGE_SIZE); 354e72bba21SMaciej Fijalkowski tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, 355cdedef59SAnirudh Venkataramanan GFP_KERNEL); 356cdedef59SAnirudh Venkataramanan if (!tx_ring->desc) { 357cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 358e72bba21SMaciej Fijalkowski size); 359cdedef59SAnirudh Venkataramanan goto err; 360cdedef59SAnirudh Venkataramanan } 361cdedef59SAnirudh Venkataramanan 362cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 363cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 364b3969fd7SSudheer Mogilappagari tx_ring->tx_stats.prev_pkt = -1; 365cdedef59SAnirudh Venkataramanan return 0; 366cdedef59SAnirudh Venkataramanan 367cdedef59SAnirudh Venkataramanan err: 368cdedef59SAnirudh Venkataramanan devm_kfree(dev, tx_ring->tx_buf); 369cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 370cdedef59SAnirudh Venkataramanan return -ENOMEM; 371cdedef59SAnirudh Venkataramanan } 372cdedef59SAnirudh Venkataramanan 373cdedef59SAnirudh Venkataramanan /** 374cdedef59SAnirudh Venkataramanan * ice_clean_rx_ring - Free Rx buffers 375cdedef59SAnirudh Venkataramanan * @rx_ring: ring to be cleaned 376cdedef59SAnirudh Venkataramanan */ 377e72bba21SMaciej Fijalkowski void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) 378cdedef59SAnirudh Venkataramanan { 379cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 380e72bba21SMaciej Fijalkowski u32 size; 381cdedef59SAnirudh Venkataramanan u16 i; 382cdedef59SAnirudh Venkataramanan 383cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 384cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 385cdedef59SAnirudh Venkataramanan return; 386cdedef59SAnirudh Venkataramanan 38729b82f2aSMaciej Fijalkowski if (rx_ring->skb) { 38829b82f2aSMaciej Fijalkowski dev_kfree_skb(rx_ring->skb); 38929b82f2aSMaciej Fijalkowski rx_ring->skb = NULL; 39029b82f2aSMaciej Fijalkowski } 39129b82f2aSMaciej Fijalkowski 3921742b3d5SMagnus Karlsson if (rx_ring->xsk_pool) { 3932d4238f5SKrzysztof Kazimierczak ice_xsk_clean_rx_ring(rx_ring); 3942d4238f5SKrzysztof Kazimierczak goto rx_skip_free; 3952d4238f5SKrzysztof Kazimierczak } 3962d4238f5SKrzysztof Kazimierczak 397cdedef59SAnirudh Venkataramanan /* Free all the Rx ring sk_buffs */ 398cdedef59SAnirudh Venkataramanan for (i = 0; i < rx_ring->count; i++) { 399cdedef59SAnirudh Venkataramanan struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 400cdedef59SAnirudh Venkataramanan 401cdedef59SAnirudh Venkataramanan if (!rx_buf->page) 402cdedef59SAnirudh Venkataramanan continue; 403cdedef59SAnirudh Venkataramanan 404a65f71feSMaciej Fijalkowski /* Invalidate cache lines that may have been written to by 405a65f71feSMaciej Fijalkowski * device so that we avoid corrupting memory. 406a65f71feSMaciej Fijalkowski */ 407a65f71feSMaciej Fijalkowski dma_sync_single_range_for_cpu(dev, rx_buf->dma, 408a65f71feSMaciej Fijalkowski rx_buf->page_offset, 4097237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 4107237f5b0SMaciej Fijalkowski DMA_FROM_DEVICE); 411a65f71feSMaciej Fijalkowski 412a65f71feSMaciej Fijalkowski /* free resources associated with mapping */ 4137237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 414a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 41503c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 416cdedef59SAnirudh Venkataramanan 417cdedef59SAnirudh Venkataramanan rx_buf->page = NULL; 418cdedef59SAnirudh Venkataramanan rx_buf->page_offset = 0; 419cdedef59SAnirudh Venkataramanan } 420cdedef59SAnirudh Venkataramanan 4212d4238f5SKrzysztof Kazimierczak rx_skip_free: 422*617f3e1bSMaciej Fijalkowski if (rx_ring->xsk_pool) 423*617f3e1bSMaciej Fijalkowski memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); 424*617f3e1bSMaciej Fijalkowski else 425*617f3e1bSMaciej Fijalkowski memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); 426cdedef59SAnirudh Venkataramanan 427cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 428e72bba21SMaciej Fijalkowski size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 429e72bba21SMaciej Fijalkowski PAGE_SIZE); 430e72bba21SMaciej Fijalkowski memset(rx_ring->desc, 0, size); 431cdedef59SAnirudh Venkataramanan 432cdedef59SAnirudh Venkataramanan rx_ring->next_to_alloc = 0; 433cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 434cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 435cdedef59SAnirudh Venkataramanan } 436cdedef59SAnirudh Venkataramanan 437cdedef59SAnirudh Venkataramanan /** 438cdedef59SAnirudh Venkataramanan * ice_free_rx_ring - Free Rx resources 439cdedef59SAnirudh Venkataramanan * @rx_ring: ring to clean the resources from 440cdedef59SAnirudh Venkataramanan * 441cdedef59SAnirudh Venkataramanan * Free all receive software resources 442cdedef59SAnirudh Venkataramanan */ 443e72bba21SMaciej Fijalkowski void ice_free_rx_ring(struct ice_rx_ring *rx_ring) 444cdedef59SAnirudh Venkataramanan { 445e72bba21SMaciej Fijalkowski u32 size; 446e72bba21SMaciej Fijalkowski 447cdedef59SAnirudh Venkataramanan ice_clean_rx_ring(rx_ring); 448efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF) 449efc2214bSMaciej Fijalkowski if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 450efc2214bSMaciej Fijalkowski xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 451efc2214bSMaciej Fijalkowski rx_ring->xdp_prog = NULL; 452*617f3e1bSMaciej Fijalkowski if (rx_ring->xsk_pool) { 453*617f3e1bSMaciej Fijalkowski kfree(rx_ring->xdp_buf); 454*617f3e1bSMaciej Fijalkowski rx_ring->xdp_buf = NULL; 455*617f3e1bSMaciej Fijalkowski } else { 456*617f3e1bSMaciej Fijalkowski kfree(rx_ring->rx_buf); 457cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 458*617f3e1bSMaciej Fijalkowski } 459cdedef59SAnirudh Venkataramanan 460cdedef59SAnirudh Venkataramanan if (rx_ring->desc) { 461e72bba21SMaciej Fijalkowski size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 462e72bba21SMaciej Fijalkowski PAGE_SIZE); 463e72bba21SMaciej Fijalkowski dmam_free_coherent(rx_ring->dev, size, 464cdedef59SAnirudh Venkataramanan rx_ring->desc, rx_ring->dma); 465cdedef59SAnirudh Venkataramanan rx_ring->desc = NULL; 466cdedef59SAnirudh Venkataramanan } 467cdedef59SAnirudh Venkataramanan } 468cdedef59SAnirudh Venkataramanan 469cdedef59SAnirudh Venkataramanan /** 470cdedef59SAnirudh Venkataramanan * ice_setup_rx_ring - Allocate the Rx descriptors 471d337f2afSAnirudh Venkataramanan * @rx_ring: the Rx ring to set up 472cdedef59SAnirudh Venkataramanan * 473cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 474cdedef59SAnirudh Venkataramanan */ 475e72bba21SMaciej Fijalkowski int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) 476cdedef59SAnirudh Venkataramanan { 477cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 478e72bba21SMaciej Fijalkowski u32 size; 479cdedef59SAnirudh Venkataramanan 480cdedef59SAnirudh Venkataramanan if (!dev) 481cdedef59SAnirudh Venkataramanan return -ENOMEM; 482cdedef59SAnirudh Venkataramanan 483cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 484cdedef59SAnirudh Venkataramanan WARN_ON(rx_ring->rx_buf); 485c6dfd690SBruce Allan rx_ring->rx_buf = 486*617f3e1bSMaciej Fijalkowski kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); 487cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 488cdedef59SAnirudh Venkataramanan return -ENOMEM; 489cdedef59SAnirudh Venkataramanan 490ad71b256SBrett Creeley /* round up to nearest page */ 491e72bba21SMaciej Fijalkowski size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 492ad71b256SBrett Creeley PAGE_SIZE); 493e72bba21SMaciej Fijalkowski rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, 494cdedef59SAnirudh Venkataramanan GFP_KERNEL); 495cdedef59SAnirudh Venkataramanan if (!rx_ring->desc) { 496cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 497e72bba21SMaciej Fijalkowski size); 498cdedef59SAnirudh Venkataramanan goto err; 499cdedef59SAnirudh Venkataramanan } 500cdedef59SAnirudh Venkataramanan 501cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 502cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 503efc2214bSMaciej Fijalkowski 504efc2214bSMaciej Fijalkowski if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 505efc2214bSMaciej Fijalkowski WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 506efc2214bSMaciej Fijalkowski 507efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF && 508efc2214bSMaciej Fijalkowski !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 509efc2214bSMaciej Fijalkowski if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 510b02e5a0eSBjörn Töpel rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) 511efc2214bSMaciej Fijalkowski goto err; 512cdedef59SAnirudh Venkataramanan return 0; 513cdedef59SAnirudh Venkataramanan 514cdedef59SAnirudh Venkataramanan err: 515*617f3e1bSMaciej Fijalkowski kfree(rx_ring->rx_buf); 516cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 517cdedef59SAnirudh Venkataramanan return -ENOMEM; 518cdedef59SAnirudh Venkataramanan } 519cdedef59SAnirudh Venkataramanan 5206221595fSTony Nguyen static unsigned int 521e72bba21SMaciej Fijalkowski ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size) 522d4ecdbf7SJesper Dangaard Brouer { 523d4ecdbf7SJesper Dangaard Brouer unsigned int truesize; 524d4ecdbf7SJesper Dangaard Brouer 525d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE < 8192) 526d4ecdbf7SJesper Dangaard Brouer truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 527d4ecdbf7SJesper Dangaard Brouer #else 528f1b1f409SMaciej Fijalkowski truesize = rx_ring->rx_offset ? 529f1b1f409SMaciej Fijalkowski SKB_DATA_ALIGN(rx_ring->rx_offset + size) + 530d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 531d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(size); 532d4ecdbf7SJesper Dangaard Brouer #endif 533d4ecdbf7SJesper Dangaard Brouer return truesize; 534d4ecdbf7SJesper Dangaard Brouer } 535d4ecdbf7SJesper Dangaard Brouer 536efc2214bSMaciej Fijalkowski /** 537efc2214bSMaciej Fijalkowski * ice_run_xdp - Executes an XDP program on initialized xdp_buff 538efc2214bSMaciej Fijalkowski * @rx_ring: Rx ring 539efc2214bSMaciej Fijalkowski * @xdp: xdp_buff used as input to the XDP program 540efc2214bSMaciej Fijalkowski * @xdp_prog: XDP program to run 541eb087cd8SMaciej Fijalkowski * @xdp_ring: ring to be used for XDP_TX action 542efc2214bSMaciej Fijalkowski * 543efc2214bSMaciej Fijalkowski * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 544efc2214bSMaciej Fijalkowski */ 545efc2214bSMaciej Fijalkowski static int 546e72bba21SMaciej Fijalkowski ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 547eb087cd8SMaciej Fijalkowski struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) 548efc2214bSMaciej Fijalkowski { 549eb087cd8SMaciej Fijalkowski int err; 550efc2214bSMaciej Fijalkowski u32 act; 551efc2214bSMaciej Fijalkowski 552efc2214bSMaciej Fijalkowski act = bpf_prog_run_xdp(xdp_prog, xdp); 553efc2214bSMaciej Fijalkowski switch (act) { 554efc2214bSMaciej Fijalkowski case XDP_PASS: 55559c97d1bSMaciej Fijalkowski return ICE_XDP_PASS; 556efc2214bSMaciej Fijalkowski case XDP_TX: 55722bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) 55822bf877eSMaciej Fijalkowski spin_lock(&xdp_ring->tx_lock); 559eb087cd8SMaciej Fijalkowski err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring); 56022bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) 56122bf877eSMaciej Fijalkowski spin_unlock(&xdp_ring->tx_lock); 562eb087cd8SMaciej Fijalkowski if (err == ICE_XDP_CONSUMED) 56389d65df0SMagnus Karlsson goto out_failure; 564eb087cd8SMaciej Fijalkowski return err; 565efc2214bSMaciej Fijalkowski case XDP_REDIRECT: 566efc2214bSMaciej Fijalkowski err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 56789d65df0SMagnus Karlsson if (err) 56889d65df0SMagnus Karlsson goto out_failure; 56989d65df0SMagnus Karlsson return ICE_XDP_REDIR; 570efc2214bSMaciej Fijalkowski default: 571efc2214bSMaciej Fijalkowski bpf_warn_invalid_xdp_action(act); 5724e83fc93SBruce Allan fallthrough; 573efc2214bSMaciej Fijalkowski case XDP_ABORTED: 57489d65df0SMagnus Karlsson out_failure: 575efc2214bSMaciej Fijalkowski trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 5764e83fc93SBruce Allan fallthrough; 577efc2214bSMaciej Fijalkowski case XDP_DROP: 57859c97d1bSMaciej Fijalkowski return ICE_XDP_CONSUMED; 579efc2214bSMaciej Fijalkowski } 580efc2214bSMaciej Fijalkowski } 581efc2214bSMaciej Fijalkowski 582efc2214bSMaciej Fijalkowski /** 583efc2214bSMaciej Fijalkowski * ice_xdp_xmit - submit packets to XDP ring for transmission 584efc2214bSMaciej Fijalkowski * @dev: netdev 585efc2214bSMaciej Fijalkowski * @n: number of XDP frames to be transmitted 586efc2214bSMaciej Fijalkowski * @frames: XDP frames to be transmitted 587efc2214bSMaciej Fijalkowski * @flags: transmit flags 588efc2214bSMaciej Fijalkowski * 589fdc13979SLorenzo Bianconi * Returns number of frames successfully sent. Failed frames 590fdc13979SLorenzo Bianconi * will be free'ed by XDP core. 591efc2214bSMaciej Fijalkowski * For error cases, a negative errno code is returned and no-frames 592efc2214bSMaciej Fijalkowski * are transmitted (caller must handle freeing frames). 593efc2214bSMaciej Fijalkowski */ 594efc2214bSMaciej Fijalkowski int 595efc2214bSMaciej Fijalkowski ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 596efc2214bSMaciej Fijalkowski u32 flags) 597efc2214bSMaciej Fijalkowski { 598efc2214bSMaciej Fijalkowski struct ice_netdev_priv *np = netdev_priv(dev); 599efc2214bSMaciej Fijalkowski unsigned int queue_index = smp_processor_id(); 600efc2214bSMaciej Fijalkowski struct ice_vsi *vsi = np->vsi; 601e72bba21SMaciej Fijalkowski struct ice_tx_ring *xdp_ring; 602fdc13979SLorenzo Bianconi int nxmit = 0, i; 603efc2214bSMaciej Fijalkowski 604e97fb1aeSAnirudh Venkataramanan if (test_bit(ICE_VSI_DOWN, vsi->state)) 605efc2214bSMaciej Fijalkowski return -ENETDOWN; 606efc2214bSMaciej Fijalkowski 607efc2214bSMaciej Fijalkowski if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) 608efc2214bSMaciej Fijalkowski return -ENXIO; 609efc2214bSMaciej Fijalkowski 610efc2214bSMaciej Fijalkowski if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 611efc2214bSMaciej Fijalkowski return -EINVAL; 612efc2214bSMaciej Fijalkowski 61322bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) { 61422bf877eSMaciej Fijalkowski queue_index %= vsi->num_xdp_txq; 615efc2214bSMaciej Fijalkowski xdp_ring = vsi->xdp_rings[queue_index]; 61622bf877eSMaciej Fijalkowski spin_lock(&xdp_ring->tx_lock); 61722bf877eSMaciej Fijalkowski } else { 61822bf877eSMaciej Fijalkowski xdp_ring = vsi->xdp_rings[queue_index]; 61922bf877eSMaciej Fijalkowski } 62022bf877eSMaciej Fijalkowski 621efc2214bSMaciej Fijalkowski for (i = 0; i < n; i++) { 622efc2214bSMaciej Fijalkowski struct xdp_frame *xdpf = frames[i]; 623efc2214bSMaciej Fijalkowski int err; 624efc2214bSMaciej Fijalkowski 625efc2214bSMaciej Fijalkowski err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 626fdc13979SLorenzo Bianconi if (err != ICE_XDP_TX) 627fdc13979SLorenzo Bianconi break; 628fdc13979SLorenzo Bianconi nxmit++; 629efc2214bSMaciej Fijalkowski } 630efc2214bSMaciej Fijalkowski 631efc2214bSMaciej Fijalkowski if (unlikely(flags & XDP_XMIT_FLUSH)) 632efc2214bSMaciej Fijalkowski ice_xdp_ring_update_tail(xdp_ring); 633efc2214bSMaciej Fijalkowski 63422bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) 63522bf877eSMaciej Fijalkowski spin_unlock(&xdp_ring->tx_lock); 63622bf877eSMaciej Fijalkowski 637fdc13979SLorenzo Bianconi return nxmit; 638efc2214bSMaciej Fijalkowski } 639efc2214bSMaciej Fijalkowski 640efc2214bSMaciej Fijalkowski /** 641cdedef59SAnirudh Venkataramanan * ice_alloc_mapped_page - recycle or make a new page 642cdedef59SAnirudh Venkataramanan * @rx_ring: ring to use 643cdedef59SAnirudh Venkataramanan * @bi: rx_buf struct to modify 644cdedef59SAnirudh Venkataramanan * 645cdedef59SAnirudh Venkataramanan * Returns true if the page was successfully allocated or 646cdedef59SAnirudh Venkataramanan * reused. 647cdedef59SAnirudh Venkataramanan */ 648c8b7abddSBruce Allan static bool 649e72bba21SMaciej Fijalkowski ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) 650cdedef59SAnirudh Venkataramanan { 651cdedef59SAnirudh Venkataramanan struct page *page = bi->page; 652cdedef59SAnirudh Venkataramanan dma_addr_t dma; 653cdedef59SAnirudh Venkataramanan 654cdedef59SAnirudh Venkataramanan /* since we are recycling buffers we should seldom need to alloc */ 6557dbc63f0STony Nguyen if (likely(page)) 656cdedef59SAnirudh Venkataramanan return true; 657cdedef59SAnirudh Venkataramanan 658cdedef59SAnirudh Venkataramanan /* alloc new page for storage */ 6597237f5b0SMaciej Fijalkowski page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 6602b245cb2SAnirudh Venkataramanan if (unlikely(!page)) { 6612b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 662cdedef59SAnirudh Venkataramanan return false; 6632b245cb2SAnirudh Venkataramanan } 664cdedef59SAnirudh Venkataramanan 665cdedef59SAnirudh Venkataramanan /* map page for use */ 6667237f5b0SMaciej Fijalkowski dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 667a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 668cdedef59SAnirudh Venkataramanan 669cdedef59SAnirudh Venkataramanan /* if mapping failed free memory back to system since 670cdedef59SAnirudh Venkataramanan * there isn't much point in holding memory we can't use 671cdedef59SAnirudh Venkataramanan */ 672cdedef59SAnirudh Venkataramanan if (dma_mapping_error(rx_ring->dev, dma)) { 6737237f5b0SMaciej Fijalkowski __free_pages(page, ice_rx_pg_order(rx_ring)); 6742b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 675cdedef59SAnirudh Venkataramanan return false; 676cdedef59SAnirudh Venkataramanan } 677cdedef59SAnirudh Venkataramanan 678cdedef59SAnirudh Venkataramanan bi->dma = dma; 679cdedef59SAnirudh Venkataramanan bi->page = page; 680f1b1f409SMaciej Fijalkowski bi->page_offset = rx_ring->rx_offset; 68103c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 68203c66a13SMaciej Fijalkowski bi->pagecnt_bias = USHRT_MAX; 683cdedef59SAnirudh Venkataramanan 684cdedef59SAnirudh Venkataramanan return true; 685cdedef59SAnirudh Venkataramanan } 686cdedef59SAnirudh Venkataramanan 687cdedef59SAnirudh Venkataramanan /** 688cdedef59SAnirudh Venkataramanan * ice_alloc_rx_bufs - Replace used receive buffers 689cdedef59SAnirudh Venkataramanan * @rx_ring: ring to place buffers on 690cdedef59SAnirudh Venkataramanan * @cleaned_count: number of buffers to replace 691cdedef59SAnirudh Venkataramanan * 692cb7db356SBrett Creeley * Returns false if all allocations were successful, true if any fail. Returning 693cb7db356SBrett Creeley * true signals to the caller that we didn't replace cleaned_count buffers and 694cb7db356SBrett Creeley * there is more work to do. 695cb7db356SBrett Creeley * 696cb7db356SBrett Creeley * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 697cb7db356SBrett Creeley * buffers. Then bump tail at most one time. Grouping like this lets us avoid 698cb7db356SBrett Creeley * multiple tail writes per call. 699cdedef59SAnirudh Venkataramanan */ 700e72bba21SMaciej Fijalkowski bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count) 701cdedef59SAnirudh Venkataramanan { 702cdedef59SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 703cdedef59SAnirudh Venkataramanan u16 ntu = rx_ring->next_to_use; 704cdedef59SAnirudh Venkataramanan struct ice_rx_buf *bi; 705cdedef59SAnirudh Venkataramanan 706cdedef59SAnirudh Venkataramanan /* do nothing if no valid netdev defined */ 707148beb61SHenry Tieman if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || 708148beb61SHenry Tieman !cleaned_count) 709cdedef59SAnirudh Venkataramanan return false; 710cdedef59SAnirudh Venkataramanan 711f9867df6SAnirudh Venkataramanan /* get the Rx descriptor and buffer based on next_to_use */ 712cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, ntu); 713cdedef59SAnirudh Venkataramanan bi = &rx_ring->rx_buf[ntu]; 714cdedef59SAnirudh Venkataramanan 715cdedef59SAnirudh Venkataramanan do { 716a1e99685SBrett Creeley /* if we fail here, we have work remaining */ 717cdedef59SAnirudh Venkataramanan if (!ice_alloc_mapped_page(rx_ring, bi)) 718a1e99685SBrett Creeley break; 719cdedef59SAnirudh Venkataramanan 720a65f71feSMaciej Fijalkowski /* sync the buffer for use by the device */ 721a65f71feSMaciej Fijalkowski dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 722a65f71feSMaciej Fijalkowski bi->page_offset, 7237237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 724a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE); 725a65f71feSMaciej Fijalkowski 726cdedef59SAnirudh Venkataramanan /* Refresh the desc even if buffer_addrs didn't change 727cdedef59SAnirudh Venkataramanan * because each write-back erases this info. 728cdedef59SAnirudh Venkataramanan */ 729cdedef59SAnirudh Venkataramanan rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 730cdedef59SAnirudh Venkataramanan 731cdedef59SAnirudh Venkataramanan rx_desc++; 732cdedef59SAnirudh Venkataramanan bi++; 733cdedef59SAnirudh Venkataramanan ntu++; 734cdedef59SAnirudh Venkataramanan if (unlikely(ntu == rx_ring->count)) { 735cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, 0); 736cdedef59SAnirudh Venkataramanan bi = rx_ring->rx_buf; 737cdedef59SAnirudh Venkataramanan ntu = 0; 738cdedef59SAnirudh Venkataramanan } 739cdedef59SAnirudh Venkataramanan 740cdedef59SAnirudh Venkataramanan /* clear the status bits for the next_to_use descriptor */ 741cdedef59SAnirudh Venkataramanan rx_desc->wb.status_error0 = 0; 742cdedef59SAnirudh Venkataramanan 743cdedef59SAnirudh Venkataramanan cleaned_count--; 744cdedef59SAnirudh Venkataramanan } while (cleaned_count); 745cdedef59SAnirudh Venkataramanan 746cdedef59SAnirudh Venkataramanan if (rx_ring->next_to_use != ntu) 747cdedef59SAnirudh Venkataramanan ice_release_rx_desc(rx_ring, ntu); 748cdedef59SAnirudh Venkataramanan 749a1e99685SBrett Creeley return !!cleaned_count; 750cdedef59SAnirudh Venkataramanan } 7512b245cb2SAnirudh Venkataramanan 7522b245cb2SAnirudh Venkataramanan /** 7531d032bc7SMaciej Fijalkowski * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 7541d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to adjust 7551d032bc7SMaciej Fijalkowski * @size: Size of adjustment 7562b245cb2SAnirudh Venkataramanan * 7571d032bc7SMaciej Fijalkowski * Update the offset within page so that Rx buf will be ready to be reused. 7581d032bc7SMaciej Fijalkowski * For systems with PAGE_SIZE < 8192 this function will flip the page offset 7591d032bc7SMaciej Fijalkowski * so the second half of page assigned to Rx buffer will be used, otherwise 7604ee656bbSTony Nguyen * the offset is moved by "size" bytes 7612b245cb2SAnirudh Venkataramanan */ 7621d032bc7SMaciej Fijalkowski static void 7631d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 7642b245cb2SAnirudh Venkataramanan { 7652b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 7661d032bc7SMaciej Fijalkowski /* flip page offset to other buffer */ 7671d032bc7SMaciej Fijalkowski rx_buf->page_offset ^= size; 7682b245cb2SAnirudh Venkataramanan #else 7691d032bc7SMaciej Fijalkowski /* move offset up to the next cache line */ 7701d032bc7SMaciej Fijalkowski rx_buf->page_offset += size; 7711d032bc7SMaciej Fijalkowski #endif 7722b245cb2SAnirudh Venkataramanan } 7732b245cb2SAnirudh Venkataramanan 7741d032bc7SMaciej Fijalkowski /** 775bbb97808SMaciej Fijalkowski * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 776bbb97808SMaciej Fijalkowski * @rx_buf: buffer containing the page 7771beb7830SBjörn Töpel * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call 778bbb97808SMaciej Fijalkowski * 779bbb97808SMaciej Fijalkowski * If page is reusable, we have a green light for calling ice_reuse_rx_page, 780bbb97808SMaciej Fijalkowski * which will assign the current buffer to the buffer that next_to_alloc is 781bbb97808SMaciej Fijalkowski * pointing to; otherwise, the DMA mapping needs to be destroyed and 782bbb97808SMaciej Fijalkowski * page freed 783bbb97808SMaciej Fijalkowski */ 7841beb7830SBjörn Töpel static bool 7851beb7830SBjörn Töpel ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) 786bbb97808SMaciej Fijalkowski { 78703c66a13SMaciej Fijalkowski unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 788bbb97808SMaciej Fijalkowski struct page *page = rx_buf->page; 7892b245cb2SAnirudh Venkataramanan 790a79afa78SAlexander Lobakin /* avoid re-using remote and pfmemalloc pages */ 791a79afa78SAlexander Lobakin if (!dev_page_is_reusable(page)) 7922b245cb2SAnirudh Venkataramanan return false; 7932b245cb2SAnirudh Venkataramanan 7942b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 7952b245cb2SAnirudh Venkataramanan /* if we are only owner of page we can reuse it */ 7961beb7830SBjörn Töpel if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) 7972b245cb2SAnirudh Venkataramanan return false; 7982b245cb2SAnirudh Venkataramanan #else 7997237f5b0SMaciej Fijalkowski #define ICE_LAST_OFFSET \ 8007237f5b0SMaciej Fijalkowski (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 8017237f5b0SMaciej Fijalkowski if (rx_buf->page_offset > ICE_LAST_OFFSET) 8022b245cb2SAnirudh Venkataramanan return false; 8032b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE < 8192) */ 8042b245cb2SAnirudh Venkataramanan 80503c66a13SMaciej Fijalkowski /* If we have drained the page fragment pool we need to update 80603c66a13SMaciej Fijalkowski * the pagecnt_bias and page count so that we fully restock the 80703c66a13SMaciej Fijalkowski * number of references the driver holds. 8082b245cb2SAnirudh Venkataramanan */ 80903c66a13SMaciej Fijalkowski if (unlikely(pagecnt_bias == 1)) { 81003c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 81103c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias = USHRT_MAX; 81203c66a13SMaciej Fijalkowski } 8132b245cb2SAnirudh Venkataramanan 8142b245cb2SAnirudh Venkataramanan return true; 8152b245cb2SAnirudh Venkataramanan } 8162b245cb2SAnirudh Venkataramanan 8172b245cb2SAnirudh Venkataramanan /** 818712edbbbSMaciej Fijalkowski * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 8197237f5b0SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 8202b245cb2SAnirudh Venkataramanan * @rx_buf: buffer containing page to add 821712edbbbSMaciej Fijalkowski * @skb: sk_buff to place the data into 822712edbbbSMaciej Fijalkowski * @size: packet length from rx_desc 8232b245cb2SAnirudh Venkataramanan * 8242b245cb2SAnirudh Venkataramanan * This function will add the data contained in rx_buf->page to the skb. 825712edbbbSMaciej Fijalkowski * It will just attach the page as a frag to the skb. 826712edbbbSMaciej Fijalkowski * The function will then update the page offset. 8272b245cb2SAnirudh Venkataramanan */ 8281d032bc7SMaciej Fijalkowski static void 829e72bba21SMaciej Fijalkowski ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 8307237f5b0SMaciej Fijalkowski struct sk_buff *skb, unsigned int size) 8312b245cb2SAnirudh Venkataramanan { 832712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 833f1b1f409SMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset); 8342b245cb2SAnirudh Venkataramanan #else 8357237f5b0SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 836712edbbbSMaciej Fijalkowski #endif 8371857ca42SMaciej Fijalkowski 838ac6f733aSMitch Williams if (!size) 839ac6f733aSMitch Williams return; 840712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 841712edbbbSMaciej Fijalkowski rx_buf->page_offset, size, truesize); 8422b245cb2SAnirudh Venkataramanan 843712edbbbSMaciej Fijalkowski /* page is being used so we must update the page offset */ 8441d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 8452b245cb2SAnirudh Venkataramanan } 8462b245cb2SAnirudh Venkataramanan 8472b245cb2SAnirudh Venkataramanan /** 8482b245cb2SAnirudh Venkataramanan * ice_reuse_rx_page - page flip buffer and store it back on the ring 849d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to store buffers on 8502b245cb2SAnirudh Venkataramanan * @old_buf: donor buffer to have page reused 8512b245cb2SAnirudh Venkataramanan * 8522b245cb2SAnirudh Venkataramanan * Synchronizes page for reuse by the adapter 8532b245cb2SAnirudh Venkataramanan */ 854c8b7abddSBruce Allan static void 855e72bba21SMaciej Fijalkowski ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) 8562b245cb2SAnirudh Venkataramanan { 8572b245cb2SAnirudh Venkataramanan u16 nta = rx_ring->next_to_alloc; 8582b245cb2SAnirudh Venkataramanan struct ice_rx_buf *new_buf; 8592b245cb2SAnirudh Venkataramanan 8602b245cb2SAnirudh Venkataramanan new_buf = &rx_ring->rx_buf[nta]; 8612b245cb2SAnirudh Venkataramanan 8622b245cb2SAnirudh Venkataramanan /* update, and store next to alloc */ 8632b245cb2SAnirudh Venkataramanan nta++; 8642b245cb2SAnirudh Venkataramanan rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 8652b245cb2SAnirudh Venkataramanan 866712edbbbSMaciej Fijalkowski /* Transfer page from old buffer to new buffer. 867712edbbbSMaciej Fijalkowski * Move each member individually to avoid possible store 868712edbbbSMaciej Fijalkowski * forwarding stalls and unnecessary copy of skb. 869712edbbbSMaciej Fijalkowski */ 870712edbbbSMaciej Fijalkowski new_buf->dma = old_buf->dma; 871712edbbbSMaciej Fijalkowski new_buf->page = old_buf->page; 872712edbbbSMaciej Fijalkowski new_buf->page_offset = old_buf->page_offset; 873712edbbbSMaciej Fijalkowski new_buf->pagecnt_bias = old_buf->pagecnt_bias; 8742b245cb2SAnirudh Venkataramanan } 8752b245cb2SAnirudh Venkataramanan 8762b245cb2SAnirudh Venkataramanan /** 8776c869cb7SMaciej Fijalkowski * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 878d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 8796c869cb7SMaciej Fijalkowski * @size: size of buffer to add to skb 8801beb7830SBjörn Töpel * @rx_buf_pgcnt: rx_buf page refcount 8812b245cb2SAnirudh Venkataramanan * 8826c869cb7SMaciej Fijalkowski * This function will pull an Rx buffer from the ring and synchronize it 8836c869cb7SMaciej Fijalkowski * for use by the CPU. 8842b245cb2SAnirudh Venkataramanan */ 8856c869cb7SMaciej Fijalkowski static struct ice_rx_buf * 886e72bba21SMaciej Fijalkowski ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, 88729b82f2aSMaciej Fijalkowski int *rx_buf_pgcnt) 8882b245cb2SAnirudh Venkataramanan { 8892b245cb2SAnirudh Venkataramanan struct ice_rx_buf *rx_buf; 8902b245cb2SAnirudh Venkataramanan 8912b245cb2SAnirudh Venkataramanan rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 8921beb7830SBjörn Töpel *rx_buf_pgcnt = 8931beb7830SBjörn Töpel #if (PAGE_SIZE < 8192) 8941beb7830SBjörn Töpel page_count(rx_buf->page); 8951beb7830SBjörn Töpel #else 8961beb7830SBjörn Töpel 0; 8971beb7830SBjörn Töpel #endif 8986c869cb7SMaciej Fijalkowski prefetchw(rx_buf->page); 8992b245cb2SAnirudh Venkataramanan 900ac6f733aSMitch Williams if (!size) 901ac6f733aSMitch Williams return rx_buf; 9026c869cb7SMaciej Fijalkowski /* we are reusing so sync this buffer for CPU use */ 9036c869cb7SMaciej Fijalkowski dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 9046c869cb7SMaciej Fijalkowski rx_buf->page_offset, size, 9056c869cb7SMaciej Fijalkowski DMA_FROM_DEVICE); 9062b245cb2SAnirudh Venkataramanan 90703c66a13SMaciej Fijalkowski /* We have pulled a buffer for use, so decrement pagecnt_bias */ 90803c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias--; 90903c66a13SMaciej Fijalkowski 9106c869cb7SMaciej Fijalkowski return rx_buf; 9116c869cb7SMaciej Fijalkowski } 9126c869cb7SMaciej Fijalkowski 9136c869cb7SMaciej Fijalkowski /** 914aaf27254SMaciej Fijalkowski * ice_build_skb - Build skb around an existing buffer 915aaf27254SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 916aaf27254SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 917aaf27254SMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 918aaf27254SMaciej Fijalkowski * 919aaf27254SMaciej Fijalkowski * This function builds an skb around an existing Rx buffer, taking care 920aaf27254SMaciej Fijalkowski * to set up the skb correctly and avoid any memcpy overhead. 921aaf27254SMaciej Fijalkowski */ 922aaf27254SMaciej Fijalkowski static struct sk_buff * 923e72bba21SMaciej Fijalkowski ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 924aaf27254SMaciej Fijalkowski struct xdp_buff *xdp) 925aaf27254SMaciej Fijalkowski { 92688865fc4SKarol Kolacinski u8 metasize = xdp->data - xdp->data_meta; 927aaf27254SMaciej Fijalkowski #if (PAGE_SIZE < 8192) 928aaf27254SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 929aaf27254SMaciej Fijalkowski #else 930aaf27254SMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 931aaf27254SMaciej Fijalkowski SKB_DATA_ALIGN(xdp->data_end - 932aaf27254SMaciej Fijalkowski xdp->data_hard_start); 933aaf27254SMaciej Fijalkowski #endif 934aaf27254SMaciej Fijalkowski struct sk_buff *skb; 935aaf27254SMaciej Fijalkowski 936aaf27254SMaciej Fijalkowski /* Prefetch first cache line of first page. If xdp->data_meta 937aaf27254SMaciej Fijalkowski * is unused, this points exactly as xdp->data, otherwise we 938aaf27254SMaciej Fijalkowski * likely have a consumer accessing first few bytes of meta 939aaf27254SMaciej Fijalkowski * data, and then actual data. 940aaf27254SMaciej Fijalkowski */ 941f468f21bSTariq Toukan net_prefetch(xdp->data_meta); 942aaf27254SMaciej Fijalkowski /* build an skb around the page buffer */ 943aaf27254SMaciej Fijalkowski skb = build_skb(xdp->data_hard_start, truesize); 944aaf27254SMaciej Fijalkowski if (unlikely(!skb)) 945aaf27254SMaciej Fijalkowski return NULL; 946aaf27254SMaciej Fijalkowski 947aaf27254SMaciej Fijalkowski /* must to record Rx queue, otherwise OS features such as 948aaf27254SMaciej Fijalkowski * symmetric queue won't work 949aaf27254SMaciej Fijalkowski */ 950aaf27254SMaciej Fijalkowski skb_record_rx_queue(skb, rx_ring->q_index); 951aaf27254SMaciej Fijalkowski 952aaf27254SMaciej Fijalkowski /* update pointers within the skb to store the data */ 953aaf27254SMaciej Fijalkowski skb_reserve(skb, xdp->data - xdp->data_hard_start); 954aaf27254SMaciej Fijalkowski __skb_put(skb, xdp->data_end - xdp->data); 955aaf27254SMaciej Fijalkowski if (metasize) 956aaf27254SMaciej Fijalkowski skb_metadata_set(skb, metasize); 957aaf27254SMaciej Fijalkowski 958aaf27254SMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 959aaf27254SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 960aaf27254SMaciej Fijalkowski 961aaf27254SMaciej Fijalkowski return skb; 962aaf27254SMaciej Fijalkowski } 963aaf27254SMaciej Fijalkowski 964aaf27254SMaciej Fijalkowski /** 965712edbbbSMaciej Fijalkowski * ice_construct_skb - Allocate skb and populate it 9662b245cb2SAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 9676c869cb7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 968efc2214bSMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 9692b245cb2SAnirudh Venkataramanan * 970712edbbbSMaciej Fijalkowski * This function allocates an skb. It then populates it with the page 971712edbbbSMaciej Fijalkowski * data from the current receive descriptor, taking care to set up the 972712edbbbSMaciej Fijalkowski * skb correctly. 9732b245cb2SAnirudh Venkataramanan */ 974c8b7abddSBruce Allan static struct sk_buff * 975e72bba21SMaciej Fijalkowski ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 976efc2214bSMaciej Fijalkowski struct xdp_buff *xdp) 9772b245cb2SAnirudh Venkataramanan { 978efc2214bSMaciej Fijalkowski unsigned int size = xdp->data_end - xdp->data; 979712edbbbSMaciej Fijalkowski unsigned int headlen; 980712edbbbSMaciej Fijalkowski struct sk_buff *skb; 9812b245cb2SAnirudh Venkataramanan 9822b245cb2SAnirudh Venkataramanan /* prefetch first cache line of first page */ 983f468f21bSTariq Toukan net_prefetch(xdp->data); 9842b245cb2SAnirudh Venkataramanan 9852b245cb2SAnirudh Venkataramanan /* allocate a skb to store the frags */ 986712edbbbSMaciej Fijalkowski skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 9872b245cb2SAnirudh Venkataramanan GFP_ATOMIC | __GFP_NOWARN); 988712edbbbSMaciej Fijalkowski if (unlikely(!skb)) 9892b245cb2SAnirudh Venkataramanan return NULL; 9902b245cb2SAnirudh Venkataramanan 9912b245cb2SAnirudh Venkataramanan skb_record_rx_queue(skb, rx_ring->q_index); 992712edbbbSMaciej Fijalkowski /* Determine available headroom for copy */ 993712edbbbSMaciej Fijalkowski headlen = size; 994712edbbbSMaciej Fijalkowski if (headlen > ICE_RX_HDR_SIZE) 995efc2214bSMaciej Fijalkowski headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 9962b245cb2SAnirudh Venkataramanan 997712edbbbSMaciej Fijalkowski /* align pull length to size of long to optimize memcpy performance */ 998efc2214bSMaciej Fijalkowski memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 999efc2214bSMaciej Fijalkowski sizeof(long))); 1000712edbbbSMaciej Fijalkowski 1001712edbbbSMaciej Fijalkowski /* if we exhaust the linear part then add what is left as a frag */ 1002712edbbbSMaciej Fijalkowski size -= headlen; 1003712edbbbSMaciej Fijalkowski if (size) { 1004712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 1005712edbbbSMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size); 1006712edbbbSMaciej Fijalkowski #else 10077237f5b0SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 1008712edbbbSMaciej Fijalkowski #endif 1009712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, 0, rx_buf->page, 1010712edbbbSMaciej Fijalkowski rx_buf->page_offset + headlen, size, truesize); 1011712edbbbSMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 1012712edbbbSMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 10132b245cb2SAnirudh Venkataramanan } else { 1014712edbbbSMaciej Fijalkowski /* buffer is unused, reset bias back to rx_buf; data was copied 1015712edbbbSMaciej Fijalkowski * onto skb's linear part so there's no need for adjusting 1016712edbbbSMaciej Fijalkowski * page offset and we can reuse this buffer as-is 1017712edbbbSMaciej Fijalkowski */ 1018712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 10192b245cb2SAnirudh Venkataramanan } 10202b245cb2SAnirudh Venkataramanan 10212b245cb2SAnirudh Venkataramanan return skb; 10222b245cb2SAnirudh Venkataramanan } 10232b245cb2SAnirudh Venkataramanan 10242b245cb2SAnirudh Venkataramanan /** 10251d032bc7SMaciej Fijalkowski * ice_put_rx_buf - Clean up used buffer and either recycle or free 10261d032bc7SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 10271d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 10281beb7830SBjörn Töpel * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect() 10292b245cb2SAnirudh Venkataramanan * 1030efc2214bSMaciej Fijalkowski * This function will update next_to_clean and then clean up the contents 1031efc2214bSMaciej Fijalkowski * of the rx_buf. It will either recycle the buffer or unmap it and free 1032efc2214bSMaciej Fijalkowski * the associated resources. 10332b245cb2SAnirudh Venkataramanan */ 10341beb7830SBjörn Töpel static void 1035e72bba21SMaciej Fijalkowski ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 10361beb7830SBjörn Töpel int rx_buf_pgcnt) 10372b245cb2SAnirudh Venkataramanan { 103888865fc4SKarol Kolacinski u16 ntc = rx_ring->next_to_clean + 1; 1039efc2214bSMaciej Fijalkowski 1040efc2214bSMaciej Fijalkowski /* fetch, update, and store next to clean */ 1041efc2214bSMaciej Fijalkowski ntc = (ntc < rx_ring->count) ? ntc : 0; 1042efc2214bSMaciej Fijalkowski rx_ring->next_to_clean = ntc; 1043efc2214bSMaciej Fijalkowski 1044ac6f733aSMitch Williams if (!rx_buf) 1045ac6f733aSMitch Williams return; 1046ac6f733aSMitch Williams 10471beb7830SBjörn Töpel if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) { 1048ac6f733aSMitch Williams /* hand second half of page back to the ring */ 10492b245cb2SAnirudh Venkataramanan ice_reuse_rx_page(rx_ring, rx_buf); 10502b245cb2SAnirudh Venkataramanan } else { 10512b245cb2SAnirudh Venkataramanan /* we are not reusing the buffer so unmap it */ 10527237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 10537237f5b0SMaciej Fijalkowski ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 10547237f5b0SMaciej Fijalkowski ICE_RX_DMA_ATTR); 105503c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 10562b245cb2SAnirudh Venkataramanan } 10572b245cb2SAnirudh Venkataramanan 10582b245cb2SAnirudh Venkataramanan /* clear contents of buffer_info */ 10592b245cb2SAnirudh Venkataramanan rx_buf->page = NULL; 10602b245cb2SAnirudh Venkataramanan } 10612b245cb2SAnirudh Venkataramanan 10622b245cb2SAnirudh Venkataramanan /** 10632b245cb2SAnirudh Venkataramanan * ice_is_non_eop - process handling of non-EOP buffers 10642b245cb2SAnirudh Venkataramanan * @rx_ring: Rx ring being processed 10652b245cb2SAnirudh Venkataramanan * @rx_desc: Rx descriptor for current buffer 10662b245cb2SAnirudh Venkataramanan * 1067efc2214bSMaciej Fijalkowski * If the buffer is an EOP buffer, this function exits returning false, 1068efc2214bSMaciej Fijalkowski * otherwise return true indicating that this is in fact a non-EOP buffer. 10692b245cb2SAnirudh Venkataramanan */ 1070c8b7abddSBruce Allan static bool 1071e72bba21SMaciej Fijalkowski ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) 10722b245cb2SAnirudh Venkataramanan { 10732b245cb2SAnirudh Venkataramanan /* if we are the last buffer then there is nothing else to do */ 10742b245cb2SAnirudh Venkataramanan #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 10752b245cb2SAnirudh Venkataramanan if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 10762b245cb2SAnirudh Venkataramanan return false; 10772b245cb2SAnirudh Venkataramanan 10782b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.non_eop_descs++; 10792b245cb2SAnirudh Venkataramanan 10802b245cb2SAnirudh Venkataramanan return true; 10812b245cb2SAnirudh Venkataramanan } 10822b245cb2SAnirudh Venkataramanan 10832b245cb2SAnirudh Venkataramanan /** 10842b245cb2SAnirudh Venkataramanan * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 1085d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 10862b245cb2SAnirudh Venkataramanan * @budget: Total limit on number of packets to process 10872b245cb2SAnirudh Venkataramanan * 10882b245cb2SAnirudh Venkataramanan * This function provides a "bounce buffer" approach to Rx interrupt 10892b245cb2SAnirudh Venkataramanan * processing. The advantage to this is that on systems that have 10902b245cb2SAnirudh Venkataramanan * expensive overhead for IOMMU access this provides a means of avoiding 10912b245cb2SAnirudh Venkataramanan * it by maintaining the mapping of the page to the system. 10922b245cb2SAnirudh Venkataramanan * 10932b245cb2SAnirudh Venkataramanan * Returns amount of work completed 10942b245cb2SAnirudh Venkataramanan */ 1095e72bba21SMaciej Fijalkowski int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) 10962b245cb2SAnirudh Venkataramanan { 109743b5169dSLorenzo Bianconi unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0; 10982b245cb2SAnirudh Venkataramanan u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1099f1b1f409SMaciej Fijalkowski unsigned int offset = rx_ring->rx_offset; 1100eb087cd8SMaciej Fijalkowski struct ice_tx_ring *xdp_ring = NULL; 1101efc2214bSMaciej Fijalkowski unsigned int xdp_res, xdp_xmit = 0; 110229b82f2aSMaciej Fijalkowski struct sk_buff *skb = rx_ring->skb; 1103efc2214bSMaciej Fijalkowski struct bpf_prog *xdp_prog = NULL; 1104efc2214bSMaciej Fijalkowski struct xdp_buff xdp; 1105cb7db356SBrett Creeley bool failure; 11062b245cb2SAnirudh Venkataramanan 1107d4ecdbf7SJesper Dangaard Brouer /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1108d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE < 8192) 110943b5169dSLorenzo Bianconi frame_sz = ice_rx_frame_truesize(rx_ring, 0); 1110d4ecdbf7SJesper Dangaard Brouer #endif 111143b5169dSLorenzo Bianconi xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); 1112efc2214bSMaciej Fijalkowski 1113eb087cd8SMaciej Fijalkowski xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1114eb087cd8SMaciej Fijalkowski if (xdp_prog) 1115eb087cd8SMaciej Fijalkowski xdp_ring = rx_ring->xdp_ring; 1116eb087cd8SMaciej Fijalkowski 1117f9867df6SAnirudh Venkataramanan /* start the loop to process Rx packets bounded by 'budget' */ 11182b245cb2SAnirudh Venkataramanan while (likely(total_rx_pkts < (unsigned int)budget)) { 11192b245cb2SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 11206c869cb7SMaciej Fijalkowski struct ice_rx_buf *rx_buf; 1121be9df4afSLorenzo Bianconi unsigned char *hard_start; 11226c869cb7SMaciej Fijalkowski unsigned int size; 11232b245cb2SAnirudh Venkataramanan u16 stat_err_bits; 11241beb7830SBjörn Töpel int rx_buf_pgcnt; 11252b245cb2SAnirudh Venkataramanan u16 vlan_tag = 0; 1126dda90cb9SJesse Brandeburg u16 rx_ptype; 11272b245cb2SAnirudh Venkataramanan 1128f9867df6SAnirudh Venkataramanan /* get the Rx desc from Rx ring based on 'next_to_clean' */ 11292b245cb2SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 11302b245cb2SAnirudh Venkataramanan 11312b245cb2SAnirudh Venkataramanan /* status_error_len will always be zero for unused descriptors 11322b245cb2SAnirudh Venkataramanan * because it's cleared in cleanup, and overlaps with hdr_addr 11332b245cb2SAnirudh Venkataramanan * which is always zero because packet split isn't used, if the 11342b245cb2SAnirudh Venkataramanan * hardware wrote DD then it will be non-zero 11352b245cb2SAnirudh Venkataramanan */ 11362b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 11372b245cb2SAnirudh Venkataramanan if (!ice_test_staterr(rx_desc, stat_err_bits)) 11382b245cb2SAnirudh Venkataramanan break; 11392b245cb2SAnirudh Venkataramanan 11402b245cb2SAnirudh Venkataramanan /* This memory barrier is needed to keep us from reading 11412b245cb2SAnirudh Venkataramanan * any other fields out of the rx_desc until we know the 11422b245cb2SAnirudh Venkataramanan * DD bit is set. 11432b245cb2SAnirudh Venkataramanan */ 11442b245cb2SAnirudh Venkataramanan dma_rmb(); 11452b245cb2SAnirudh Venkataramanan 11463089cf6dSJesse Brandeburg ice_trace(clean_rx_irq, rx_ring, rx_desc); 1147148beb61SHenry Tieman if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 1148d6218317SQi Zhang struct ice_vsi *ctrl_vsi = rx_ring->vsi; 1149d6218317SQi Zhang 1150d6218317SQi Zhang if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && 1151d6218317SQi Zhang ctrl_vsi->vf_id != ICE_INVAL_VFID) 1152d6218317SQi Zhang ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); 11531beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, NULL, 0); 1154148beb61SHenry Tieman cleaned_count++; 1155148beb61SHenry Tieman continue; 1156148beb61SHenry Tieman } 1157148beb61SHenry Tieman 11586c869cb7SMaciej Fijalkowski size = le16_to_cpu(rx_desc->wb.pkt_len) & 11596c869cb7SMaciej Fijalkowski ICE_RX_FLX_DESC_PKT_LEN_M; 11602b245cb2SAnirudh Venkataramanan 1161ac6f733aSMitch Williams /* retrieve a buffer from the ring */ 116229b82f2aSMaciej Fijalkowski rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt); 1163ac6f733aSMitch Williams 1164efc2214bSMaciej Fijalkowski if (!size) { 1165efc2214bSMaciej Fijalkowski xdp.data = NULL; 1166efc2214bSMaciej Fijalkowski xdp.data_end = NULL; 1167aaf27254SMaciej Fijalkowski xdp.data_hard_start = NULL; 1168aaf27254SMaciej Fijalkowski xdp.data_meta = NULL; 1169efc2214bSMaciej Fijalkowski goto construct_skb; 1170efc2214bSMaciej Fijalkowski } 1171efc2214bSMaciej Fijalkowski 1172be9df4afSLorenzo Bianconi hard_start = page_address(rx_buf->page) + rx_buf->page_offset - 1173be9df4afSLorenzo Bianconi offset; 1174be9df4afSLorenzo Bianconi xdp_prepare_buff(&xdp, hard_start, offset, size, true); 1175d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE > 4096) 1176d4ecdbf7SJesper Dangaard Brouer /* At larger PAGE_SIZE, frame_sz depend on len size */ 1177d4ecdbf7SJesper Dangaard Brouer xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); 1178d4ecdbf7SJesper Dangaard Brouer #endif 1179efc2214bSMaciej Fijalkowski 118049589b23SToke Høiland-Jørgensen if (!xdp_prog) 1181efc2214bSMaciej Fijalkowski goto construct_skb; 1182efc2214bSMaciej Fijalkowski 1183eb087cd8SMaciej Fijalkowski xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring); 118459bb0808SMaciej Fijalkowski if (!xdp_res) 118559bb0808SMaciej Fijalkowski goto construct_skb; 1186efc2214bSMaciej Fijalkowski if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1187efc2214bSMaciej Fijalkowski xdp_xmit |= xdp_res; 1188d4ecdbf7SJesper Dangaard Brouer ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); 1189efc2214bSMaciej Fijalkowski } else { 1190efc2214bSMaciej Fijalkowski rx_buf->pagecnt_bias++; 1191efc2214bSMaciej Fijalkowski } 1192efc2214bSMaciej Fijalkowski total_rx_bytes += size; 1193efc2214bSMaciej Fijalkowski total_rx_pkts++; 1194efc2214bSMaciej Fijalkowski 1195efc2214bSMaciej Fijalkowski cleaned_count++; 11961beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 1197efc2214bSMaciej Fijalkowski continue; 1198efc2214bSMaciej Fijalkowski construct_skb: 11991f45ebe0SMitch Williams if (skb) { 12007237f5b0SMaciej Fijalkowski ice_add_rx_frag(rx_ring, rx_buf, skb, size); 12011f45ebe0SMitch Williams } else if (likely(xdp.data)) { 12021f45ebe0SMitch Williams if (ice_ring_uses_build_skb(rx_ring)) 1203aaf27254SMaciej Fijalkowski skb = ice_build_skb(rx_ring, rx_buf, &xdp); 1204712edbbbSMaciej Fijalkowski else 1205efc2214bSMaciej Fijalkowski skb = ice_construct_skb(rx_ring, rx_buf, &xdp); 12061f45ebe0SMitch Williams } 1207712edbbbSMaciej Fijalkowski /* exit if we failed to retrieve a buffer */ 1208712edbbbSMaciej Fijalkowski if (!skb) { 1209712edbbbSMaciej Fijalkowski rx_ring->rx_stats.alloc_buf_failed++; 1210ac6f733aSMitch Williams if (rx_buf) 1211712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 12122b245cb2SAnirudh Venkataramanan break; 1213712edbbbSMaciej Fijalkowski } 12142b245cb2SAnirudh Venkataramanan 12151beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 12162b245cb2SAnirudh Venkataramanan cleaned_count++; 12172b245cb2SAnirudh Venkataramanan 12182b245cb2SAnirudh Venkataramanan /* skip if it is NOP desc */ 121929b82f2aSMaciej Fijalkowski if (ice_is_non_eop(rx_ring, rx_desc)) 12202b245cb2SAnirudh Venkataramanan continue; 12212b245cb2SAnirudh Venkataramanan 12222b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 12232b245cb2SAnirudh Venkataramanan if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 12242b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 12252b245cb2SAnirudh Venkataramanan continue; 12262b245cb2SAnirudh Venkataramanan } 12272b245cb2SAnirudh Venkataramanan 12282b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 12292b245cb2SAnirudh Venkataramanan if (ice_test_staterr(rx_desc, stat_err_bits)) 12302b245cb2SAnirudh Venkataramanan vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 12312b245cb2SAnirudh Venkataramanan 1232133f4883SKrzysztof Kazimierczak /* pad the skb if needed, to make a valid ethernet frame */ 1233133f4883SKrzysztof Kazimierczak if (eth_skb_pad(skb)) { 12342b245cb2SAnirudh Venkataramanan skb = NULL; 12352b245cb2SAnirudh Venkataramanan continue; 12362b245cb2SAnirudh Venkataramanan } 12372b245cb2SAnirudh Venkataramanan 12382b245cb2SAnirudh Venkataramanan /* probably a little skewed due to removing CRC */ 12392b245cb2SAnirudh Venkataramanan total_rx_bytes += skb->len; 12402b245cb2SAnirudh Venkataramanan 1241d76a60baSAnirudh Venkataramanan /* populate checksum, VLAN, and protocol */ 12426503b659SJesse Brandeburg rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 12436503b659SJesse Brandeburg ICE_RX_FLEX_DESC_PTYPE_M; 12446503b659SJesse Brandeburg 1245d76a60baSAnirudh Venkataramanan ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1246d76a60baSAnirudh Venkataramanan 12473089cf6dSJesse Brandeburg ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); 12482b245cb2SAnirudh Venkataramanan /* send completed skb up the stack */ 12492b245cb2SAnirudh Venkataramanan ice_receive_skb(rx_ring, skb, vlan_tag); 125029b82f2aSMaciej Fijalkowski skb = NULL; 12512b245cb2SAnirudh Venkataramanan 12522b245cb2SAnirudh Venkataramanan /* update budget accounting */ 12532b245cb2SAnirudh Venkataramanan total_rx_pkts++; 12542b245cb2SAnirudh Venkataramanan } 12552b245cb2SAnirudh Venkataramanan 1256cb7db356SBrett Creeley /* return up to cleaned_count buffers to hardware */ 1257cb7db356SBrett Creeley failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1258cb7db356SBrett Creeley 1259efc2214bSMaciej Fijalkowski if (xdp_prog) 1260eb087cd8SMaciej Fijalkowski ice_finalize_xdp_rx(xdp_ring, xdp_xmit); 126129b82f2aSMaciej Fijalkowski rx_ring->skb = skb; 1262efc2214bSMaciej Fijalkowski 12632d4238f5SKrzysztof Kazimierczak ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); 12642b245cb2SAnirudh Venkataramanan 12652b245cb2SAnirudh Venkataramanan /* guarantee a trip back through this routine if there was a failure */ 12662b245cb2SAnirudh Venkataramanan return failure ? budget : (int)total_rx_pkts; 12672b245cb2SAnirudh Venkataramanan } 12682b245cb2SAnirudh Venkataramanan 1269d8eb7ad5SJesse Brandeburg static void __ice_update_sample(struct ice_q_vector *q_vector, 1270d8eb7ad5SJesse Brandeburg struct ice_ring_container *rc, 1271d8eb7ad5SJesse Brandeburg struct dim_sample *sample, 1272d8eb7ad5SJesse Brandeburg bool is_tx) 1273d8eb7ad5SJesse Brandeburg { 1274d8eb7ad5SJesse Brandeburg u64 packets = 0, bytes = 0; 1275d8eb7ad5SJesse Brandeburg 1276d8eb7ad5SJesse Brandeburg if (is_tx) { 1277d8eb7ad5SJesse Brandeburg struct ice_tx_ring *tx_ring; 1278d8eb7ad5SJesse Brandeburg 1279d8eb7ad5SJesse Brandeburg ice_for_each_tx_ring(tx_ring, *rc) { 1280d8eb7ad5SJesse Brandeburg packets += tx_ring->stats.pkts; 1281d8eb7ad5SJesse Brandeburg bytes += tx_ring->stats.bytes; 1282d8eb7ad5SJesse Brandeburg } 1283d8eb7ad5SJesse Brandeburg } else { 1284d8eb7ad5SJesse Brandeburg struct ice_rx_ring *rx_ring; 1285d8eb7ad5SJesse Brandeburg 1286d8eb7ad5SJesse Brandeburg ice_for_each_rx_ring(rx_ring, *rc) { 1287d8eb7ad5SJesse Brandeburg packets += rx_ring->stats.pkts; 1288d8eb7ad5SJesse Brandeburg bytes += rx_ring->stats.bytes; 1289d8eb7ad5SJesse Brandeburg } 1290d8eb7ad5SJesse Brandeburg } 1291d8eb7ad5SJesse Brandeburg 1292d8eb7ad5SJesse Brandeburg dim_update_sample(q_vector->total_events, packets, bytes, sample); 1293d8eb7ad5SJesse Brandeburg sample->comp_ctr = 0; 1294d8eb7ad5SJesse Brandeburg 1295d8eb7ad5SJesse Brandeburg /* if dim settings get stale, like when not updated for 1 1296d8eb7ad5SJesse Brandeburg * second or longer, force it to start again. This addresses the 1297d8eb7ad5SJesse Brandeburg * frequent case of an idle queue being switched to by the 1298d8eb7ad5SJesse Brandeburg * scheduler. The 1,000 here means 1,000 milliseconds. 1299d8eb7ad5SJesse Brandeburg */ 1300d8eb7ad5SJesse Brandeburg if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) 1301d8eb7ad5SJesse Brandeburg rc->dim.state = DIM_START_MEASURE; 1302d8eb7ad5SJesse Brandeburg } 1303d8eb7ad5SJesse Brandeburg 13042b245cb2SAnirudh Venkataramanan /** 1305cdf1f1f1SJacob Keller * ice_net_dim - Update net DIM algorithm 1306cdf1f1f1SJacob Keller * @q_vector: the vector associated with the interrupt 1307711987bbSBrett Creeley * 1308cdf1f1f1SJacob Keller * Create a DIM sample and notify net_dim() so that it can possibly decide 1309cdf1f1f1SJacob Keller * a new ITR value based on incoming packets, bytes, and interrupts. 1310711987bbSBrett Creeley * 1311cdf1f1f1SJacob Keller * This function is a no-op if the ring is not configured to dynamic ITR. 1312711987bbSBrett Creeley */ 1313cdf1f1f1SJacob Keller static void ice_net_dim(struct ice_q_vector *q_vector) 131464a59d05SAnirudh Venkataramanan { 1315cdf1f1f1SJacob Keller struct ice_ring_container *tx = &q_vector->tx; 1316cdf1f1f1SJacob Keller struct ice_ring_container *rx = &q_vector->rx; 1317cdf1f1f1SJacob Keller 1318d59684a0SJesse Brandeburg if (ITR_IS_DYNAMIC(tx)) { 1319d8eb7ad5SJesse Brandeburg struct dim_sample dim_sample; 1320cdf1f1f1SJacob Keller 1321d8eb7ad5SJesse Brandeburg __ice_update_sample(q_vector, tx, &dim_sample, true); 1322cdf1f1f1SJacob Keller net_dim(&tx->dim, dim_sample); 1323711987bbSBrett Creeley } 1324711987bbSBrett Creeley 1325d59684a0SJesse Brandeburg if (ITR_IS_DYNAMIC(rx)) { 1326d8eb7ad5SJesse Brandeburg struct dim_sample dim_sample; 1327cdf1f1f1SJacob Keller 1328d8eb7ad5SJesse Brandeburg __ice_update_sample(q_vector, rx, &dim_sample, false); 1329cdf1f1f1SJacob Keller net_dim(&rx->dim, dim_sample); 133064a59d05SAnirudh Venkataramanan } 133164a59d05SAnirudh Venkataramanan } 133264a59d05SAnirudh Venkataramanan 13332b245cb2SAnirudh Venkataramanan /** 133463f545edSBrett Creeley * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 133563f545edSBrett Creeley * @itr_idx: interrupt throttling index 133664a59d05SAnirudh Venkataramanan * @itr: interrupt throttling value in usecs 133763f545edSBrett Creeley */ 13388244dd2dSBrett Creeley static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 133963f545edSBrett Creeley { 13402f2da36eSAnirudh Venkataramanan /* The ITR value is reported in microseconds, and the register value is 134164a59d05SAnirudh Venkataramanan * recorded in 2 microsecond units. For this reason we only need to 134264a59d05SAnirudh Venkataramanan * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 134364a59d05SAnirudh Venkataramanan * granularity as a shift instead of division. The mask makes sure the 134464a59d05SAnirudh Venkataramanan * ITR value is never odd so we don't accidentally write into the field 134564a59d05SAnirudh Venkataramanan * prior to the ITR field. 134664a59d05SAnirudh Venkataramanan */ 134764a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 134864a59d05SAnirudh Venkataramanan 134963f545edSBrett Creeley return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 135063f545edSBrett Creeley (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 135164a59d05SAnirudh Venkataramanan (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 135263f545edSBrett Creeley } 135363f545edSBrett Creeley 135463f545edSBrett Creeley /** 1355d8eb7ad5SJesse Brandeburg * ice_enable_interrupt - re-enable MSI-X interrupt 1356cdf1f1f1SJacob Keller * @q_vector: the vector associated with the interrupt to enable 1357cdf1f1f1SJacob Keller * 1358d8eb7ad5SJesse Brandeburg * If the VSI is down, the interrupt will not be re-enabled. Also, 1359d8eb7ad5SJesse Brandeburg * when enabling the interrupt always reset the wb_on_itr to false 1360d8eb7ad5SJesse Brandeburg * and trigger a software interrupt to clean out internal state. 136163f545edSBrett Creeley */ 1362d8eb7ad5SJesse Brandeburg static void ice_enable_interrupt(struct ice_q_vector *q_vector) 136363f545edSBrett Creeley { 13642fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 1365b7306b42SJesse Brandeburg bool wb_en = q_vector->wb_on_itr; 136663f545edSBrett Creeley u32 itr_val; 136763f545edSBrett Creeley 1368cdf1f1f1SJacob Keller if (test_bit(ICE_DOWN, vsi->state)) 1369cdf1f1f1SJacob Keller return; 13702ab28bb0SBrett Creeley 137123be7075SJesse Brandeburg /* trigger an ITR delayed software interrupt when exiting busy poll, to 137223be7075SJesse Brandeburg * make sure to catch any pending cleanups that might have been missed 137323be7075SJesse Brandeburg * due to interrupt state transition. If busy poll or poll isn't 137423be7075SJesse Brandeburg * enabled, then don't update ITR, and just enable the interrupt. 1375cdf1f1f1SJacob Keller */ 137623be7075SJesse Brandeburg if (!wb_en) { 137723be7075SJesse Brandeburg itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 137823be7075SJesse Brandeburg } else { 1379cdf1f1f1SJacob Keller q_vector->wb_on_itr = false; 138064a59d05SAnirudh Venkataramanan 138123be7075SJesse Brandeburg /* do two things here with a single write. Set up the third ITR 138223be7075SJesse Brandeburg * index to be used for software interrupt moderation, and then 138323be7075SJesse Brandeburg * trigger a software interrupt with a rate limit of 20K on 138423be7075SJesse Brandeburg * software interrupts, this will help avoid high interrupt 138523be7075SJesse Brandeburg * loads due to frequently polling and exiting polling. 1386b7306b42SJesse Brandeburg */ 138723be7075SJesse Brandeburg itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K); 1388b7306b42SJesse Brandeburg itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M | 138923be7075SJesse Brandeburg ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S | 1390b7306b42SJesse Brandeburg GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; 1391b7306b42SJesse Brandeburg } 13921d9f7ca3SJesse Brandeburg wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 139363f545edSBrett Creeley } 139463f545edSBrett Creeley 139563f545edSBrett Creeley /** 13962ab28bb0SBrett Creeley * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 13972ab28bb0SBrett Creeley * @q_vector: q_vector to set WB_ON_ITR on 13982ab28bb0SBrett Creeley * 13992ab28bb0SBrett Creeley * We need to tell hardware to write-back completed descriptors even when 14002ab28bb0SBrett Creeley * interrupts are disabled. Descriptors will be written back on cache line 14012ab28bb0SBrett Creeley * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 14021d9f7ca3SJesse Brandeburg * descriptors may not be written back if they don't fill a cache line until 14031d9f7ca3SJesse Brandeburg * the next interrupt. 14042ab28bb0SBrett Creeley * 14051d9f7ca3SJesse Brandeburg * This sets the write-back frequency to whatever was set previously for the 14061d9f7ca3SJesse Brandeburg * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we 14071d9f7ca3SJesse Brandeburg * aren't meddling with the INTENA_M bit. 14082ab28bb0SBrett Creeley */ 14092fb0821fSJesse Brandeburg static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 14102ab28bb0SBrett Creeley { 14112fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 14122fb0821fSJesse Brandeburg 14131d9f7ca3SJesse Brandeburg /* already in wb_on_itr mode no need to change it */ 1414cdf1f1f1SJacob Keller if (q_vector->wb_on_itr) 14152ab28bb0SBrett Creeley return; 14162ab28bb0SBrett Creeley 14171d9f7ca3SJesse Brandeburg /* use previously set ITR values for all of the ITR indices by 14181d9f7ca3SJesse Brandeburg * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and 14191d9f7ca3SJesse Brandeburg * be static in non-adaptive mode (user configured) 14201d9f7ca3SJesse Brandeburg */ 14212ab28bb0SBrett Creeley wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 14221d9f7ca3SJesse Brandeburg ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) & 14231d9f7ca3SJesse Brandeburg GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | 14241d9f7ca3SJesse Brandeburg GLINT_DYN_CTL_WB_ON_ITR_M); 14252ab28bb0SBrett Creeley 1426cdf1f1f1SJacob Keller q_vector->wb_on_itr = true; 14272ab28bb0SBrett Creeley } 14282ab28bb0SBrett Creeley 14292ab28bb0SBrett Creeley /** 14302b245cb2SAnirudh Venkataramanan * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 14312b245cb2SAnirudh Venkataramanan * @napi: napi struct with our devices info in it 14322b245cb2SAnirudh Venkataramanan * @budget: amount of work driver is allowed to do this pass, in packets 14332b245cb2SAnirudh Venkataramanan * 14342b245cb2SAnirudh Venkataramanan * This function will clean all queues associated with a q_vector. 14352b245cb2SAnirudh Venkataramanan * 14362b245cb2SAnirudh Venkataramanan * Returns the amount of work done 14372b245cb2SAnirudh Venkataramanan */ 14382b245cb2SAnirudh Venkataramanan int ice_napi_poll(struct napi_struct *napi, int budget) 14392b245cb2SAnirudh Venkataramanan { 14402b245cb2SAnirudh Venkataramanan struct ice_q_vector *q_vector = 14412b245cb2SAnirudh Venkataramanan container_of(napi, struct ice_q_vector, napi); 1442e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring; 1443e72bba21SMaciej Fijalkowski struct ice_rx_ring *rx_ring; 14442b245cb2SAnirudh Venkataramanan bool clean_complete = true; 14459118fcd5SBrett Creeley int budget_per_ring; 14462b245cb2SAnirudh Venkataramanan int work_done = 0; 14472b245cb2SAnirudh Venkataramanan 14482b245cb2SAnirudh Venkataramanan /* Since the actual Tx work is minimal, we can give the Tx a larger 14492b245cb2SAnirudh Venkataramanan * budget and be more aggressive about cleaning up the Tx descriptors. 14502b245cb2SAnirudh Venkataramanan */ 1451e72bba21SMaciej Fijalkowski ice_for_each_tx_ring(tx_ring, q_vector->tx) { 14529610bd98SMaciej Fijalkowski bool wd; 14539610bd98SMaciej Fijalkowski 14549610bd98SMaciej Fijalkowski if (tx_ring->xsk_pool) 14559610bd98SMaciej Fijalkowski wd = ice_clean_tx_irq_zc(tx_ring, budget); 14569610bd98SMaciej Fijalkowski else if (ice_ring_is_xdp(tx_ring)) 14579610bd98SMaciej Fijalkowski wd = true; 14589610bd98SMaciej Fijalkowski else 14599610bd98SMaciej Fijalkowski wd = ice_clean_tx_irq(tx_ring, budget); 14602d4238f5SKrzysztof Kazimierczak 14612d4238f5SKrzysztof Kazimierczak if (!wd) 14622b245cb2SAnirudh Venkataramanan clean_complete = false; 14632d4238f5SKrzysztof Kazimierczak } 14642b245cb2SAnirudh Venkataramanan 14652b245cb2SAnirudh Venkataramanan /* Handle case where we are called by netpoll with a budget of 0 */ 1466d27525ecSJesse Brandeburg if (unlikely(budget <= 0)) 14672b245cb2SAnirudh Venkataramanan return budget; 14682b245cb2SAnirudh Venkataramanan 14699118fcd5SBrett Creeley /* normally we have 1 Rx ring per q_vector */ 14709118fcd5SBrett Creeley if (unlikely(q_vector->num_ring_rx > 1)) 14719118fcd5SBrett Creeley /* We attempt to distribute budget to each Rx queue fairly, but 14729118fcd5SBrett Creeley * don't allow the budget to go below 1 because that would exit 14739118fcd5SBrett Creeley * polling early. 14742b245cb2SAnirudh Venkataramanan */ 147588865fc4SKarol Kolacinski budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 14769118fcd5SBrett Creeley else 14779118fcd5SBrett Creeley /* Max of 1 Rx ring in this q_vector so give it the budget */ 14789118fcd5SBrett Creeley budget_per_ring = budget; 14792b245cb2SAnirudh Venkataramanan 1480e72bba21SMaciej Fijalkowski ice_for_each_rx_ring(rx_ring, q_vector->rx) { 14812b245cb2SAnirudh Venkataramanan int cleaned; 14822b245cb2SAnirudh Venkataramanan 14832d4238f5SKrzysztof Kazimierczak /* A dedicated path for zero-copy allows making a single 14842d4238f5SKrzysztof Kazimierczak * comparison in the irq context instead of many inside the 14852d4238f5SKrzysztof Kazimierczak * ice_clean_rx_irq function and makes the codebase cleaner. 14862d4238f5SKrzysztof Kazimierczak */ 1487e72bba21SMaciej Fijalkowski cleaned = rx_ring->xsk_pool ? 1488e72bba21SMaciej Fijalkowski ice_clean_rx_irq_zc(rx_ring, budget_per_ring) : 1489e72bba21SMaciej Fijalkowski ice_clean_rx_irq(rx_ring, budget_per_ring); 14902b245cb2SAnirudh Venkataramanan work_done += cleaned; 14912b245cb2SAnirudh Venkataramanan /* if we clean as many as budgeted, we must not be done */ 14922b245cb2SAnirudh Venkataramanan if (cleaned >= budget_per_ring) 14932b245cb2SAnirudh Venkataramanan clean_complete = false; 14942b245cb2SAnirudh Venkataramanan } 14952b245cb2SAnirudh Venkataramanan 14962b245cb2SAnirudh Venkataramanan /* If work not completed, return budget and polling will return */ 14971d9f7ca3SJesse Brandeburg if (!clean_complete) { 14981d9f7ca3SJesse Brandeburg /* Set the writeback on ITR so partial completions of 14991d9f7ca3SJesse Brandeburg * cache-lines will still continue even if we're polling. 15001d9f7ca3SJesse Brandeburg */ 15011d9f7ca3SJesse Brandeburg ice_set_wb_on_itr(q_vector); 15022b245cb2SAnirudh Venkataramanan return budget; 15031d9f7ca3SJesse Brandeburg } 15042b245cb2SAnirudh Venkataramanan 15050bcd952fSJesse Brandeburg /* Exit the polling mode, but don't re-enable interrupts if stack might 15060bcd952fSJesse Brandeburg * poll us due to busy-polling 15070bcd952fSJesse Brandeburg */ 1508d8eb7ad5SJesse Brandeburg if (likely(napi_complete_done(napi, work_done))) { 1509d8eb7ad5SJesse Brandeburg ice_net_dim(q_vector); 1510d8eb7ad5SJesse Brandeburg ice_enable_interrupt(q_vector); 1511d8eb7ad5SJesse Brandeburg } else { 15122fb0821fSJesse Brandeburg ice_set_wb_on_itr(q_vector); 1513d8eb7ad5SJesse Brandeburg } 1514e0c9fd9bSDave Ertman 151532a64994SBruce Allan return min_t(int, work_done, budget - 1); 15162b245cb2SAnirudh Venkataramanan } 15172b245cb2SAnirudh Venkataramanan 15182b245cb2SAnirudh Venkataramanan /** 1519d337f2afSAnirudh Venkataramanan * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 15202b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 15212b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 15222b245cb2SAnirudh Venkataramanan * 15232b245cb2SAnirudh Venkataramanan * Returns -EBUSY if a stop is needed, else 0 15242b245cb2SAnirudh Venkataramanan */ 1525e72bba21SMaciej Fijalkowski static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 15262b245cb2SAnirudh Venkataramanan { 15272b245cb2SAnirudh Venkataramanan netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 15282b245cb2SAnirudh Venkataramanan /* Memory barrier before checking head and tail */ 15292b245cb2SAnirudh Venkataramanan smp_mb(); 15302b245cb2SAnirudh Venkataramanan 15312b245cb2SAnirudh Venkataramanan /* Check again in a case another CPU has just made room available. */ 15322b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 15332b245cb2SAnirudh Venkataramanan return -EBUSY; 15342b245cb2SAnirudh Venkataramanan 15352b245cb2SAnirudh Venkataramanan /* A reprieve! - use start_subqueue because it doesn't call schedule */ 15362b245cb2SAnirudh Venkataramanan netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 15372b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 15382b245cb2SAnirudh Venkataramanan return 0; 15392b245cb2SAnirudh Venkataramanan } 15402b245cb2SAnirudh Venkataramanan 15412b245cb2SAnirudh Venkataramanan /** 1542d337f2afSAnirudh Venkataramanan * ice_maybe_stop_tx - 1st level check for Tx stop conditions 15432b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 15442b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 15452b245cb2SAnirudh Venkataramanan * 15462b245cb2SAnirudh Venkataramanan * Returns 0 if stop is not needed 15472b245cb2SAnirudh Venkataramanan */ 1548e72bba21SMaciej Fijalkowski static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 15492b245cb2SAnirudh Venkataramanan { 15502b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 15512b245cb2SAnirudh Venkataramanan return 0; 1552d337f2afSAnirudh Venkataramanan 15532b245cb2SAnirudh Venkataramanan return __ice_maybe_stop_tx(tx_ring, size); 15542b245cb2SAnirudh Venkataramanan } 15552b245cb2SAnirudh Venkataramanan 15562b245cb2SAnirudh Venkataramanan /** 15572b245cb2SAnirudh Venkataramanan * ice_tx_map - Build the Tx descriptor 15582b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 15592b245cb2SAnirudh Venkataramanan * @first: first buffer info buffer to use 1560d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 15612b245cb2SAnirudh Venkataramanan * 15622b245cb2SAnirudh Venkataramanan * This function loops over the skb data pointed to by *first 15632b245cb2SAnirudh Venkataramanan * and gets a physical address for each memory location and programs 15642b245cb2SAnirudh Venkataramanan * it and the length into the transmit descriptor. 15652b245cb2SAnirudh Venkataramanan */ 1566d76a60baSAnirudh Venkataramanan static void 1567e72bba21SMaciej Fijalkowski ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, 1568d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params *off) 15692b245cb2SAnirudh Venkataramanan { 1570d76a60baSAnirudh Venkataramanan u64 td_offset, td_tag, td_cmd; 15712b245cb2SAnirudh Venkataramanan u16 i = tx_ring->next_to_use; 15722b245cb2SAnirudh Venkataramanan unsigned int data_len, size; 15732b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 15742b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 15752b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 15764ee656bbSTony Nguyen skb_frag_t *frag; 15772b245cb2SAnirudh Venkataramanan dma_addr_t dma; 15782b245cb2SAnirudh Venkataramanan 1579d76a60baSAnirudh Venkataramanan td_tag = off->td_l2tag1; 1580d76a60baSAnirudh Venkataramanan td_cmd = off->td_cmd; 1581d76a60baSAnirudh Venkataramanan td_offset = off->td_offset; 15822b245cb2SAnirudh Venkataramanan skb = first->skb; 15832b245cb2SAnirudh Venkataramanan 15842b245cb2SAnirudh Venkataramanan data_len = skb->data_len; 15852b245cb2SAnirudh Venkataramanan size = skb_headlen(skb); 15862b245cb2SAnirudh Venkataramanan 15872b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 15882b245cb2SAnirudh Venkataramanan 1589d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1590d76a60baSAnirudh Venkataramanan td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1591d76a60baSAnirudh Venkataramanan td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1592d76a60baSAnirudh Venkataramanan ICE_TX_FLAGS_VLAN_S; 1593d76a60baSAnirudh Venkataramanan } 1594d76a60baSAnirudh Venkataramanan 15952b245cb2SAnirudh Venkataramanan dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 15962b245cb2SAnirudh Venkataramanan 15972b245cb2SAnirudh Venkataramanan tx_buf = first; 15982b245cb2SAnirudh Venkataramanan 15992b245cb2SAnirudh Venkataramanan for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 16002b245cb2SAnirudh Venkataramanan unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 16012b245cb2SAnirudh Venkataramanan 16022b245cb2SAnirudh Venkataramanan if (dma_mapping_error(tx_ring->dev, dma)) 16032b245cb2SAnirudh Venkataramanan goto dma_error; 16042b245cb2SAnirudh Venkataramanan 16052b245cb2SAnirudh Venkataramanan /* record length, and DMA address */ 16062b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, size); 16072b245cb2SAnirudh Venkataramanan dma_unmap_addr_set(tx_buf, dma, dma); 16082b245cb2SAnirudh Venkataramanan 16092b245cb2SAnirudh Venkataramanan /* align size to end of page */ 16102b245cb2SAnirudh Venkataramanan max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 16112b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 16122b245cb2SAnirudh Venkataramanan 16132b245cb2SAnirudh Venkataramanan /* account for data chunks larger than the hardware 16142b245cb2SAnirudh Venkataramanan * can handle 16152b245cb2SAnirudh Venkataramanan */ 16162b245cb2SAnirudh Venkataramanan while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 16172b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = 16185757cc7cSTony Nguyen ice_build_ctob(td_cmd, td_offset, max_data, 16195757cc7cSTony Nguyen td_tag); 16202b245cb2SAnirudh Venkataramanan 16212b245cb2SAnirudh Venkataramanan tx_desc++; 16222b245cb2SAnirudh Venkataramanan i++; 16232b245cb2SAnirudh Venkataramanan 16242b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 16252b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 16262b245cb2SAnirudh Venkataramanan i = 0; 16272b245cb2SAnirudh Venkataramanan } 16282b245cb2SAnirudh Venkataramanan 16292b245cb2SAnirudh Venkataramanan dma += max_data; 16302b245cb2SAnirudh Venkataramanan size -= max_data; 16312b245cb2SAnirudh Venkataramanan 16322b245cb2SAnirudh Venkataramanan max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 16332b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 16342b245cb2SAnirudh Venkataramanan } 16352b245cb2SAnirudh Venkataramanan 16362b245cb2SAnirudh Venkataramanan if (likely(!data_len)) 16372b245cb2SAnirudh Venkataramanan break; 16382b245cb2SAnirudh Venkataramanan 16395757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 16402b245cb2SAnirudh Venkataramanan size, td_tag); 16412b245cb2SAnirudh Venkataramanan 16422b245cb2SAnirudh Venkataramanan tx_desc++; 16432b245cb2SAnirudh Venkataramanan i++; 16442b245cb2SAnirudh Venkataramanan 16452b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 16462b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 16472b245cb2SAnirudh Venkataramanan i = 0; 16482b245cb2SAnirudh Venkataramanan } 16492b245cb2SAnirudh Venkataramanan 16502b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag); 16512b245cb2SAnirudh Venkataramanan data_len -= size; 16522b245cb2SAnirudh Venkataramanan 16532b245cb2SAnirudh Venkataramanan dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 16542b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 16552b245cb2SAnirudh Venkataramanan 16562b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 16572b245cb2SAnirudh Venkataramanan } 16582b245cb2SAnirudh Venkataramanan 16592b245cb2SAnirudh Venkataramanan /* record bytecount for BQL */ 16602b245cb2SAnirudh Venkataramanan netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 16612b245cb2SAnirudh Venkataramanan 16622b245cb2SAnirudh Venkataramanan /* record SW timestamp if HW timestamp is not available */ 16632b245cb2SAnirudh Venkataramanan skb_tx_timestamp(first->skb); 16642b245cb2SAnirudh Venkataramanan 16652b245cb2SAnirudh Venkataramanan i++; 16662b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) 16672b245cb2SAnirudh Venkataramanan i = 0; 16682b245cb2SAnirudh Venkataramanan 16692b245cb2SAnirudh Venkataramanan /* write last descriptor with RS and EOP bits */ 1670efc2214bSMaciej Fijalkowski td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 16715757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = 16725757cc7cSTony Nguyen ice_build_ctob(td_cmd, td_offset, size, td_tag); 16732b245cb2SAnirudh Venkataramanan 16742b245cb2SAnirudh Venkataramanan /* Force memory writes to complete before letting h/w know there 16752b245cb2SAnirudh Venkataramanan * are new descriptors to fetch. 16762b245cb2SAnirudh Venkataramanan * 16772b245cb2SAnirudh Venkataramanan * We also use this memory barrier to make certain all of the 16782b245cb2SAnirudh Venkataramanan * status bits have been updated before next_to_watch is written. 16792b245cb2SAnirudh Venkataramanan */ 16802b245cb2SAnirudh Venkataramanan wmb(); 16812b245cb2SAnirudh Venkataramanan 16822b245cb2SAnirudh Venkataramanan /* set next_to_watch value indicating a packet is present */ 16832b245cb2SAnirudh Venkataramanan first->next_to_watch = tx_desc; 16842b245cb2SAnirudh Venkataramanan 16852b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 16862b245cb2SAnirudh Venkataramanan 16872b245cb2SAnirudh Venkataramanan ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 16882b245cb2SAnirudh Venkataramanan 16892b245cb2SAnirudh Venkataramanan /* notify HW of packet */ 16904ee656bbSTony Nguyen if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) 16912b245cb2SAnirudh Venkataramanan writel(i, tx_ring->tail); 16922b245cb2SAnirudh Venkataramanan 16932b245cb2SAnirudh Venkataramanan return; 16942b245cb2SAnirudh Venkataramanan 16952b245cb2SAnirudh Venkataramanan dma_error: 16962f2da36eSAnirudh Venkataramanan /* clear DMA mappings for failed tx_buf map */ 16972b245cb2SAnirudh Venkataramanan for (;;) { 16982b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 16992b245cb2SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 17002b245cb2SAnirudh Venkataramanan if (tx_buf == first) 17012b245cb2SAnirudh Venkataramanan break; 17022b245cb2SAnirudh Venkataramanan if (i == 0) 17032b245cb2SAnirudh Venkataramanan i = tx_ring->count; 17042b245cb2SAnirudh Venkataramanan i--; 17052b245cb2SAnirudh Venkataramanan } 17062b245cb2SAnirudh Venkataramanan 17072b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 17082b245cb2SAnirudh Venkataramanan } 17092b245cb2SAnirudh Venkataramanan 17102b245cb2SAnirudh Venkataramanan /** 1711d76a60baSAnirudh Venkataramanan * ice_tx_csum - Enable Tx checksum offloads 1712d76a60baSAnirudh Venkataramanan * @first: pointer to the first descriptor 1713d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1714d76a60baSAnirudh Venkataramanan * 1715d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1716d76a60baSAnirudh Venkataramanan */ 1717d76a60baSAnirudh Venkataramanan static 1718d76a60baSAnirudh Venkataramanan int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1719d76a60baSAnirudh Venkataramanan { 1720d76a60baSAnirudh Venkataramanan u32 l4_len = 0, l3_len = 0, l2_len = 0; 1721d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1722d76a60baSAnirudh Venkataramanan union { 1723d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1724d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1725d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1726d76a60baSAnirudh Venkataramanan } ip; 1727d76a60baSAnirudh Venkataramanan union { 1728d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1729d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1730d76a60baSAnirudh Venkataramanan } l4; 1731d76a60baSAnirudh Venkataramanan __be16 frag_off, protocol; 1732d76a60baSAnirudh Venkataramanan unsigned char *exthdr; 1733d76a60baSAnirudh Venkataramanan u32 offset, cmd = 0; 1734d76a60baSAnirudh Venkataramanan u8 l4_proto = 0; 1735d76a60baSAnirudh Venkataramanan 1736d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1737d76a60baSAnirudh Venkataramanan return 0; 1738d76a60baSAnirudh Venkataramanan 1739d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 1740d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 1741d76a60baSAnirudh Venkataramanan 1742d76a60baSAnirudh Venkataramanan /* compute outer L2 header size */ 1743d76a60baSAnirudh Venkataramanan l2_len = ip.hdr - skb->data; 1744d76a60baSAnirudh Venkataramanan offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1745d76a60baSAnirudh Venkataramanan 1746a4e82a81STony Nguyen protocol = vlan_get_protocol(skb); 1747a4e82a81STony Nguyen 1748a4e82a81STony Nguyen if (protocol == htons(ETH_P_IP)) 1749a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV4; 1750a4e82a81STony Nguyen else if (protocol == htons(ETH_P_IPV6)) 1751a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV6; 1752a4e82a81STony Nguyen 1753a4e82a81STony Nguyen if (skb->encapsulation) { 1754a4e82a81STony Nguyen bool gso_ena = false; 1755a4e82a81STony Nguyen u32 tunnel = 0; 1756a4e82a81STony Nguyen 1757a4e82a81STony Nguyen /* define outer network header type */ 1758a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1759a4e82a81STony Nguyen tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 1760a4e82a81STony Nguyen ICE_TX_CTX_EIPT_IPV4 : 1761a4e82a81STony Nguyen ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 1762a4e82a81STony Nguyen l4_proto = ip.v4->protocol; 1763a4e82a81STony Nguyen } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 17641b0b0b58SNick Nunley int ret; 17651b0b0b58SNick Nunley 1766a4e82a81STony Nguyen tunnel |= ICE_TX_CTX_EIPT_IPV6; 1767a4e82a81STony Nguyen exthdr = ip.hdr + sizeof(*ip.v6); 1768a4e82a81STony Nguyen l4_proto = ip.v6->nexthdr; 17691b0b0b58SNick Nunley ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 1770a4e82a81STony Nguyen &l4_proto, &frag_off); 17711b0b0b58SNick Nunley if (ret < 0) 17721b0b0b58SNick Nunley return -1; 1773a4e82a81STony Nguyen } 1774a4e82a81STony Nguyen 1775a4e82a81STony Nguyen /* define outer transport */ 1776a4e82a81STony Nguyen switch (l4_proto) { 1777a4e82a81STony Nguyen case IPPROTO_UDP: 1778a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 1779a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1780a4e82a81STony Nguyen break; 1781a4e82a81STony Nguyen case IPPROTO_GRE: 1782a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 1783a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1784a4e82a81STony Nguyen break; 1785a4e82a81STony Nguyen case IPPROTO_IPIP: 1786a4e82a81STony Nguyen case IPPROTO_IPV6: 1787a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1788a4e82a81STony Nguyen l4.hdr = skb_inner_network_header(skb); 1789a4e82a81STony Nguyen break; 1790a4e82a81STony Nguyen default: 1791a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_TSO) 1792d76a60baSAnirudh Venkataramanan return -1; 1793d76a60baSAnirudh Venkataramanan 1794a4e82a81STony Nguyen skb_checksum_help(skb); 1795a4e82a81STony Nguyen return 0; 1796a4e82a81STony Nguyen } 1797a4e82a81STony Nguyen 1798a4e82a81STony Nguyen /* compute outer L3 header size */ 1799a4e82a81STony Nguyen tunnel |= ((l4.hdr - ip.hdr) / 4) << 1800a4e82a81STony Nguyen ICE_TXD_CTX_QW0_EIPLEN_S; 1801a4e82a81STony Nguyen 1802a4e82a81STony Nguyen /* switch IP header pointer from outer to inner header */ 1803a4e82a81STony Nguyen ip.hdr = skb_inner_network_header(skb); 1804a4e82a81STony Nguyen 1805a4e82a81STony Nguyen /* compute tunnel header size */ 1806a4e82a81STony Nguyen tunnel |= ((ip.hdr - l4.hdr) / 2) << 1807a4e82a81STony Nguyen ICE_TXD_CTX_QW0_NATLEN_S; 1808a4e82a81STony Nguyen 1809a4e82a81STony Nguyen gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 1810a4e82a81STony Nguyen /* indicate if we need to offload outer UDP header */ 1811a4e82a81STony Nguyen if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 1812a4e82a81STony Nguyen (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1813a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 1814a4e82a81STony Nguyen 1815a4e82a81STony Nguyen /* record tunnel offload values */ 1816a4e82a81STony Nguyen off->cd_tunnel_params |= tunnel; 1817a4e82a81STony Nguyen 1818a4e82a81STony Nguyen /* set DTYP=1 to indicate that it's an Tx context descriptor 1819a4e82a81STony Nguyen * in IPsec tunnel mode with Tx offloads in Quad word 1 1820a4e82a81STony Nguyen */ 1821a4e82a81STony Nguyen off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 1822a4e82a81STony Nguyen 1823a4e82a81STony Nguyen /* switch L4 header pointer from outer to inner */ 1824a4e82a81STony Nguyen l4.hdr = skb_inner_transport_header(skb); 1825a4e82a81STony Nguyen l4_proto = 0; 1826a4e82a81STony Nguyen 1827a4e82a81STony Nguyen /* reset type as we transition from outer to inner headers */ 1828a4e82a81STony Nguyen first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 1829a4e82a81STony Nguyen if (ip.v4->version == 4) 1830a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV4; 1831a4e82a81STony Nguyen if (ip.v6->version == 6) 1832a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV6; 1833a4e82a81STony Nguyen } 1834a4e82a81STony Nguyen 1835d76a60baSAnirudh Venkataramanan /* Enable IP checksum offloads */ 1836a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1837d76a60baSAnirudh Venkataramanan l4_proto = ip.v4->protocol; 1838d76a60baSAnirudh Venkataramanan /* the stack computes the IP header already, the only time we 1839d76a60baSAnirudh Venkataramanan * need the hardware to recompute it is in the case of TSO. 1840d76a60baSAnirudh Venkataramanan */ 1841d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1842d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1843d76a60baSAnirudh Venkataramanan else 1844d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1845d76a60baSAnirudh Venkataramanan 1846a4e82a81STony Nguyen } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1847d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1848d76a60baSAnirudh Venkataramanan exthdr = ip.hdr + sizeof(*ip.v6); 1849d76a60baSAnirudh Venkataramanan l4_proto = ip.v6->nexthdr; 1850d76a60baSAnirudh Venkataramanan if (l4.hdr != exthdr) 1851d76a60baSAnirudh Venkataramanan ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1852d76a60baSAnirudh Venkataramanan &frag_off); 1853d76a60baSAnirudh Venkataramanan } else { 1854d76a60baSAnirudh Venkataramanan return -1; 1855d76a60baSAnirudh Venkataramanan } 1856d76a60baSAnirudh Venkataramanan 1857d76a60baSAnirudh Venkataramanan /* compute inner L3 header size */ 1858d76a60baSAnirudh Venkataramanan l3_len = l4.hdr - ip.hdr; 1859d76a60baSAnirudh Venkataramanan offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1860d76a60baSAnirudh Venkataramanan 1861d76a60baSAnirudh Venkataramanan /* Enable L4 checksum offloads */ 1862d76a60baSAnirudh Venkataramanan switch (l4_proto) { 1863d76a60baSAnirudh Venkataramanan case IPPROTO_TCP: 1864d76a60baSAnirudh Venkataramanan /* enable checksum offloads */ 1865d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1866d76a60baSAnirudh Venkataramanan l4_len = l4.tcp->doff; 1867d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1868d76a60baSAnirudh Venkataramanan break; 1869d76a60baSAnirudh Venkataramanan case IPPROTO_UDP: 1870d76a60baSAnirudh Venkataramanan /* enable UDP checksum offload */ 1871d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1872d76a60baSAnirudh Venkataramanan l4_len = (sizeof(struct udphdr) >> 2); 1873d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1874d76a60baSAnirudh Venkataramanan break; 1875d76a60baSAnirudh Venkataramanan case IPPROTO_SCTP: 1876cf909e19SAnirudh Venkataramanan /* enable SCTP checksum offload */ 1877cf909e19SAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1878cf909e19SAnirudh Venkataramanan l4_len = sizeof(struct sctphdr) >> 2; 1879cf909e19SAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1880cf909e19SAnirudh Venkataramanan break; 1881cf909e19SAnirudh Venkataramanan 1882d76a60baSAnirudh Venkataramanan default: 1883d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1884d76a60baSAnirudh Venkataramanan return -1; 1885d76a60baSAnirudh Venkataramanan skb_checksum_help(skb); 1886d76a60baSAnirudh Venkataramanan return 0; 1887d76a60baSAnirudh Venkataramanan } 1888d76a60baSAnirudh Venkataramanan 1889d76a60baSAnirudh Venkataramanan off->td_cmd |= cmd; 1890d76a60baSAnirudh Venkataramanan off->td_offset |= offset; 1891d76a60baSAnirudh Venkataramanan return 1; 1892d76a60baSAnirudh Venkataramanan } 1893d76a60baSAnirudh Venkataramanan 1894d76a60baSAnirudh Venkataramanan /** 1895f9867df6SAnirudh Venkataramanan * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1896d76a60baSAnirudh Venkataramanan * @tx_ring: ring to send buffer on 1897d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1898d76a60baSAnirudh Venkataramanan * 1899d76a60baSAnirudh Venkataramanan * Checks the skb and set up correspondingly several generic transmit flags 1900d76a60baSAnirudh Venkataramanan * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1901d76a60baSAnirudh Venkataramanan */ 19022bb19d6eSBrett Creeley static void 1903e72bba21SMaciej Fijalkowski ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) 1904d76a60baSAnirudh Venkataramanan { 1905d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1906d76a60baSAnirudh Venkataramanan 19072bb19d6eSBrett Creeley /* nothing left to do, software offloaded VLAN */ 19082bb19d6eSBrett Creeley if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) 19092bb19d6eSBrett Creeley return; 19102bb19d6eSBrett Creeley 19112bb19d6eSBrett Creeley /* currently, we always assume 802.1Q for VLAN insertion as VLAN 19122bb19d6eSBrett Creeley * insertion for 802.1AD is not supported 1913d76a60baSAnirudh Venkataramanan */ 1914d76a60baSAnirudh Venkataramanan if (skb_vlan_tag_present(skb)) { 1915d76a60baSAnirudh Venkataramanan first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1916d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1917d76a60baSAnirudh Venkataramanan } 1918d76a60baSAnirudh Venkataramanan 19192bb19d6eSBrett Creeley ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 1920d76a60baSAnirudh Venkataramanan } 1921d76a60baSAnirudh Venkataramanan 1922d76a60baSAnirudh Venkataramanan /** 1923d76a60baSAnirudh Venkataramanan * ice_tso - computes mss and TSO length to prepare for TSO 1924d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1925d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1926d76a60baSAnirudh Venkataramanan * 1927d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1928d76a60baSAnirudh Venkataramanan */ 1929d76a60baSAnirudh Venkataramanan static 1930d76a60baSAnirudh Venkataramanan int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1931d76a60baSAnirudh Venkataramanan { 1932d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1933d76a60baSAnirudh Venkataramanan union { 1934d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1935d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1936d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1937d76a60baSAnirudh Venkataramanan } ip; 1938d76a60baSAnirudh Venkataramanan union { 1939d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1940a54e3b8cSBrett Creeley struct udphdr *udp; 1941d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1942d76a60baSAnirudh Venkataramanan } l4; 1943d76a60baSAnirudh Venkataramanan u64 cd_mss, cd_tso_len; 194488865fc4SKarol Kolacinski u32 paylen; 194588865fc4SKarol Kolacinski u8 l4_start; 1946d76a60baSAnirudh Venkataramanan int err; 1947d76a60baSAnirudh Venkataramanan 1948d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1949d76a60baSAnirudh Venkataramanan return 0; 1950d76a60baSAnirudh Venkataramanan 1951d76a60baSAnirudh Venkataramanan if (!skb_is_gso(skb)) 1952d76a60baSAnirudh Venkataramanan return 0; 1953d76a60baSAnirudh Venkataramanan 1954d76a60baSAnirudh Venkataramanan err = skb_cow_head(skb, 0); 1955d76a60baSAnirudh Venkataramanan if (err < 0) 1956d76a60baSAnirudh Venkataramanan return err; 1957d76a60baSAnirudh Venkataramanan 1958c3a6825eSBruce Allan /* cppcheck-suppress unreadVariable */ 1959d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 1960d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 1961d76a60baSAnirudh Venkataramanan 1962d76a60baSAnirudh Venkataramanan /* initialize outer IP header fields */ 1963d76a60baSAnirudh Venkataramanan if (ip.v4->version == 4) { 1964d76a60baSAnirudh Venkataramanan ip.v4->tot_len = 0; 1965d76a60baSAnirudh Venkataramanan ip.v4->check = 0; 1966d76a60baSAnirudh Venkataramanan } else { 1967d76a60baSAnirudh Venkataramanan ip.v6->payload_len = 0; 1968d76a60baSAnirudh Venkataramanan } 1969d76a60baSAnirudh Venkataramanan 1970a4e82a81STony Nguyen if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 1971a4e82a81STony Nguyen SKB_GSO_GRE_CSUM | 1972a4e82a81STony Nguyen SKB_GSO_IPXIP4 | 1973a4e82a81STony Nguyen SKB_GSO_IPXIP6 | 1974a4e82a81STony Nguyen SKB_GSO_UDP_TUNNEL | 1975a4e82a81STony Nguyen SKB_GSO_UDP_TUNNEL_CSUM)) { 1976a4e82a81STony Nguyen if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 1977a4e82a81STony Nguyen (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 1978a4e82a81STony Nguyen l4.udp->len = 0; 1979a4e82a81STony Nguyen 1980a4e82a81STony Nguyen /* determine offset of outer transport header */ 198188865fc4SKarol Kolacinski l4_start = (u8)(l4.hdr - skb->data); 1982a4e82a81STony Nguyen 1983a4e82a81STony Nguyen /* remove payload length from outer checksum */ 1984a4e82a81STony Nguyen paylen = skb->len - l4_start; 1985a4e82a81STony Nguyen csum_replace_by_diff(&l4.udp->check, 1986a4e82a81STony Nguyen (__force __wsum)htonl(paylen)); 1987a4e82a81STony Nguyen } 1988a4e82a81STony Nguyen 1989a4e82a81STony Nguyen /* reset pointers to inner headers */ 1990a4e82a81STony Nguyen 1991a4e82a81STony Nguyen /* cppcheck-suppress unreadVariable */ 1992a4e82a81STony Nguyen ip.hdr = skb_inner_network_header(skb); 1993a4e82a81STony Nguyen l4.hdr = skb_inner_transport_header(skb); 1994a4e82a81STony Nguyen 1995a4e82a81STony Nguyen /* initialize inner IP header fields */ 1996a4e82a81STony Nguyen if (ip.v4->version == 4) { 1997a4e82a81STony Nguyen ip.v4->tot_len = 0; 1998a4e82a81STony Nguyen ip.v4->check = 0; 1999a4e82a81STony Nguyen } else { 2000a4e82a81STony Nguyen ip.v6->payload_len = 0; 2001a4e82a81STony Nguyen } 2002a4e82a81STony Nguyen } 2003a4e82a81STony Nguyen 2004d76a60baSAnirudh Venkataramanan /* determine offset of transport header */ 200588865fc4SKarol Kolacinski l4_start = (u8)(l4.hdr - skb->data); 2006d76a60baSAnirudh Venkataramanan 2007d76a60baSAnirudh Venkataramanan /* remove payload length from checksum */ 2008d76a60baSAnirudh Venkataramanan paylen = skb->len - l4_start; 2009d76a60baSAnirudh Venkataramanan 2010a54e3b8cSBrett Creeley if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 2011a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.udp->check, 2012a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 2013a54e3b8cSBrett Creeley /* compute length of UDP segmentation header */ 201488865fc4SKarol Kolacinski off->header_len = (u8)sizeof(l4.udp) + l4_start; 2015a54e3b8cSBrett Creeley } else { 2016a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.tcp->check, 2017a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 2018a54e3b8cSBrett Creeley /* compute length of TCP segmentation header */ 201988865fc4SKarol Kolacinski off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 2020a54e3b8cSBrett Creeley } 2021d76a60baSAnirudh Venkataramanan 2022d76a60baSAnirudh Venkataramanan /* update gso_segs and bytecount */ 2023d76a60baSAnirudh Venkataramanan first->gso_segs = skb_shinfo(skb)->gso_segs; 2024d944b469SBrett Creeley first->bytecount += (first->gso_segs - 1) * off->header_len; 2025d76a60baSAnirudh Venkataramanan 2026d76a60baSAnirudh Venkataramanan cd_tso_len = skb->len - off->header_len; 2027d76a60baSAnirudh Venkataramanan cd_mss = skb_shinfo(skb)->gso_size; 2028d76a60baSAnirudh Venkataramanan 2029d76a60baSAnirudh Venkataramanan /* record cdesc_qw1 with TSO parameters */ 2030e65e9e15SBruce Allan off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2031d76a60baSAnirudh Venkataramanan (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 2032d76a60baSAnirudh Venkataramanan (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2033e65e9e15SBruce Allan (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2034d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_TSO; 2035d76a60baSAnirudh Venkataramanan return 1; 2036d76a60baSAnirudh Venkataramanan } 2037d76a60baSAnirudh Venkataramanan 2038d76a60baSAnirudh Venkataramanan /** 20392b245cb2SAnirudh Venkataramanan * ice_txd_use_count - estimate the number of descriptors needed for Tx 20402b245cb2SAnirudh Venkataramanan * @size: transmit request size in bytes 20412b245cb2SAnirudh Venkataramanan * 20422b245cb2SAnirudh Venkataramanan * Due to hardware alignment restrictions (4K alignment), we need to 20432b245cb2SAnirudh Venkataramanan * assume that we can have no more than 12K of data per descriptor, even 20442b245cb2SAnirudh Venkataramanan * though each descriptor can take up to 16K - 1 bytes of aligned memory. 20452b245cb2SAnirudh Venkataramanan * Thus, we need to divide by 12K. But division is slow! Instead, 20462b245cb2SAnirudh Venkataramanan * we decompose the operation into shifts and one relatively cheap 20472b245cb2SAnirudh Venkataramanan * multiply operation. 20482b245cb2SAnirudh Venkataramanan * 20492b245cb2SAnirudh Venkataramanan * To divide by 12K, we first divide by 4K, then divide by 3: 20502b245cb2SAnirudh Venkataramanan * To divide by 4K, shift right by 12 bits 20512b245cb2SAnirudh Venkataramanan * To divide by 3, multiply by 85, then divide by 256 20522b245cb2SAnirudh Venkataramanan * (Divide by 256 is done by shifting right by 8 bits) 20532b245cb2SAnirudh Venkataramanan * Finally, we add one to round up. Because 256 isn't an exact multiple of 20542b245cb2SAnirudh Venkataramanan * 3, we'll underestimate near each multiple of 12K. This is actually more 20552b245cb2SAnirudh Venkataramanan * accurate as we have 4K - 1 of wiggle room that we can fit into the last 20562b245cb2SAnirudh Venkataramanan * segment. For our purposes this is accurate out to 1M which is orders of 20572b245cb2SAnirudh Venkataramanan * magnitude greater than our largest possible GSO size. 20582b245cb2SAnirudh Venkataramanan * 20592b245cb2SAnirudh Venkataramanan * This would then be implemented as: 2060c585ea42SBrett Creeley * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 20612b245cb2SAnirudh Venkataramanan * 20622b245cb2SAnirudh Venkataramanan * Since multiplication and division are commutative, we can reorder 20632b245cb2SAnirudh Venkataramanan * operations into: 2064c585ea42SBrett Creeley * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 20652b245cb2SAnirudh Venkataramanan */ 20662b245cb2SAnirudh Venkataramanan static unsigned int ice_txd_use_count(unsigned int size) 20672b245cb2SAnirudh Venkataramanan { 2068c585ea42SBrett Creeley return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 20692b245cb2SAnirudh Venkataramanan } 20702b245cb2SAnirudh Venkataramanan 20712b245cb2SAnirudh Venkataramanan /** 2072d337f2afSAnirudh Venkataramanan * ice_xmit_desc_count - calculate number of Tx descriptors needed 20732b245cb2SAnirudh Venkataramanan * @skb: send buffer 20742b245cb2SAnirudh Venkataramanan * 20752b245cb2SAnirudh Venkataramanan * Returns number of data descriptors needed for this skb. 20762b245cb2SAnirudh Venkataramanan */ 20772b245cb2SAnirudh Venkataramanan static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 20782b245cb2SAnirudh Venkataramanan { 2079d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 20802b245cb2SAnirudh Venkataramanan unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 20812b245cb2SAnirudh Venkataramanan unsigned int count = 0, size = skb_headlen(skb); 20822b245cb2SAnirudh Venkataramanan 20832b245cb2SAnirudh Venkataramanan for (;;) { 20842b245cb2SAnirudh Venkataramanan count += ice_txd_use_count(size); 20852b245cb2SAnirudh Venkataramanan 20862b245cb2SAnirudh Venkataramanan if (!nr_frags--) 20872b245cb2SAnirudh Venkataramanan break; 20882b245cb2SAnirudh Venkataramanan 20892b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag++); 20902b245cb2SAnirudh Venkataramanan } 20912b245cb2SAnirudh Venkataramanan 20922b245cb2SAnirudh Venkataramanan return count; 20932b245cb2SAnirudh Venkataramanan } 20942b245cb2SAnirudh Venkataramanan 20952b245cb2SAnirudh Venkataramanan /** 20962b245cb2SAnirudh Venkataramanan * __ice_chk_linearize - Check if there are more than 8 buffers per packet 20972b245cb2SAnirudh Venkataramanan * @skb: send buffer 20982b245cb2SAnirudh Venkataramanan * 20992b245cb2SAnirudh Venkataramanan * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 21002b245cb2SAnirudh Venkataramanan * and so we need to figure out the cases where we need to linearize the skb. 21012b245cb2SAnirudh Venkataramanan * 21022b245cb2SAnirudh Venkataramanan * For TSO we need to count the TSO header and segment payload separately. 21032b245cb2SAnirudh Venkataramanan * As such we need to check cases where we have 7 fragments or more as we 21042b245cb2SAnirudh Venkataramanan * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 21052b245cb2SAnirudh Venkataramanan * the segment payload in the first descriptor, and another 7 for the 21062b245cb2SAnirudh Venkataramanan * fragments. 21072b245cb2SAnirudh Venkataramanan */ 21082b245cb2SAnirudh Venkataramanan static bool __ice_chk_linearize(struct sk_buff *skb) 21092b245cb2SAnirudh Venkataramanan { 2110d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag, *stale; 21112b245cb2SAnirudh Venkataramanan int nr_frags, sum; 21122b245cb2SAnirudh Venkataramanan 21132b245cb2SAnirudh Venkataramanan /* no need to check if number of frags is less than 7 */ 21142b245cb2SAnirudh Venkataramanan nr_frags = skb_shinfo(skb)->nr_frags; 21152b245cb2SAnirudh Venkataramanan if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 21162b245cb2SAnirudh Venkataramanan return false; 21172b245cb2SAnirudh Venkataramanan 21182b245cb2SAnirudh Venkataramanan /* We need to walk through the list and validate that each group 21192b245cb2SAnirudh Venkataramanan * of 6 fragments totals at least gso_size. 21202b245cb2SAnirudh Venkataramanan */ 21212b245cb2SAnirudh Venkataramanan nr_frags -= ICE_MAX_BUF_TXD - 2; 21222b245cb2SAnirudh Venkataramanan frag = &skb_shinfo(skb)->frags[0]; 21232b245cb2SAnirudh Venkataramanan 21242b245cb2SAnirudh Venkataramanan /* Initialize size to the negative value of gso_size minus 1. We 21254ee656bbSTony Nguyen * use this as the worst case scenario in which the frag ahead 21262b245cb2SAnirudh Venkataramanan * of us only provides one byte which is why we are limited to 6 21272b245cb2SAnirudh Venkataramanan * descriptors for a single transmit as the header and previous 21282b245cb2SAnirudh Venkataramanan * fragment are already consuming 2 descriptors. 21292b245cb2SAnirudh Venkataramanan */ 21302b245cb2SAnirudh Venkataramanan sum = 1 - skb_shinfo(skb)->gso_size; 21312b245cb2SAnirudh Venkataramanan 21322b245cb2SAnirudh Venkataramanan /* Add size of frags 0 through 4 to create our initial sum */ 21332b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21342b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21352b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21362b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21372b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21382b245cb2SAnirudh Venkataramanan 21392b245cb2SAnirudh Venkataramanan /* Walk through fragments adding latest fragment, testing it, and 21402b245cb2SAnirudh Venkataramanan * then removing stale fragments from the sum. 21412b245cb2SAnirudh Venkataramanan */ 21420a37abfaSKiran Patil for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 21430a37abfaSKiran Patil int stale_size = skb_frag_size(stale); 21440a37abfaSKiran Patil 21452b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21462b245cb2SAnirudh Venkataramanan 21470a37abfaSKiran Patil /* The stale fragment may present us with a smaller 21480a37abfaSKiran Patil * descriptor than the actual fragment size. To account 21490a37abfaSKiran Patil * for that we need to remove all the data on the front and 21500a37abfaSKiran Patil * figure out what the remainder would be in the last 21510a37abfaSKiran Patil * descriptor associated with the fragment. 21520a37abfaSKiran Patil */ 21530a37abfaSKiran Patil if (stale_size > ICE_MAX_DATA_PER_TXD) { 21540a37abfaSKiran Patil int align_pad = -(skb_frag_off(stale)) & 21550a37abfaSKiran Patil (ICE_MAX_READ_REQ_SIZE - 1); 21560a37abfaSKiran Patil 21570a37abfaSKiran Patil sum -= align_pad; 21580a37abfaSKiran Patil stale_size -= align_pad; 21590a37abfaSKiran Patil 21600a37abfaSKiran Patil do { 21610a37abfaSKiran Patil sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; 21620a37abfaSKiran Patil stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; 21630a37abfaSKiran Patil } while (stale_size > ICE_MAX_DATA_PER_TXD); 21640a37abfaSKiran Patil } 21650a37abfaSKiran Patil 21662b245cb2SAnirudh Venkataramanan /* if sum is negative we failed to make sufficient progress */ 21672b245cb2SAnirudh Venkataramanan if (sum < 0) 21682b245cb2SAnirudh Venkataramanan return true; 21692b245cb2SAnirudh Venkataramanan 21702b245cb2SAnirudh Venkataramanan if (!nr_frags--) 21712b245cb2SAnirudh Venkataramanan break; 21722b245cb2SAnirudh Venkataramanan 21730a37abfaSKiran Patil sum -= stale_size; 21742b245cb2SAnirudh Venkataramanan } 21752b245cb2SAnirudh Venkataramanan 21762b245cb2SAnirudh Venkataramanan return false; 21772b245cb2SAnirudh Venkataramanan } 21782b245cb2SAnirudh Venkataramanan 21792b245cb2SAnirudh Venkataramanan /** 21802b245cb2SAnirudh Venkataramanan * ice_chk_linearize - Check if there are more than 8 fragments per packet 21812b245cb2SAnirudh Venkataramanan * @skb: send buffer 21822b245cb2SAnirudh Venkataramanan * @count: number of buffers used 21832b245cb2SAnirudh Venkataramanan * 21842b245cb2SAnirudh Venkataramanan * Note: Our HW can't scatter-gather more than 8 fragments to build 21852b245cb2SAnirudh Venkataramanan * a packet on the wire and so we need to figure out the cases where we 21862b245cb2SAnirudh Venkataramanan * need to linearize the skb. 21872b245cb2SAnirudh Venkataramanan */ 21882b245cb2SAnirudh Venkataramanan static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 21892b245cb2SAnirudh Venkataramanan { 21902b245cb2SAnirudh Venkataramanan /* Both TSO and single send will work if count is less than 8 */ 21912b245cb2SAnirudh Venkataramanan if (likely(count < ICE_MAX_BUF_TXD)) 21922b245cb2SAnirudh Venkataramanan return false; 21932b245cb2SAnirudh Venkataramanan 21942b245cb2SAnirudh Venkataramanan if (skb_is_gso(skb)) 21952b245cb2SAnirudh Venkataramanan return __ice_chk_linearize(skb); 21962b245cb2SAnirudh Venkataramanan 21972b245cb2SAnirudh Venkataramanan /* we can support up to 8 data buffers for a single send */ 21982b245cb2SAnirudh Venkataramanan return count != ICE_MAX_BUF_TXD; 21992b245cb2SAnirudh Venkataramanan } 22002b245cb2SAnirudh Venkataramanan 22012b245cb2SAnirudh Venkataramanan /** 2202ea9b847cSJacob Keller * ice_tstamp - set up context descriptor for hardware timestamp 2203ea9b847cSJacob Keller * @tx_ring: pointer to the Tx ring to send buffer on 2204ea9b847cSJacob Keller * @skb: pointer to the SKB we're sending 2205ea9b847cSJacob Keller * @first: Tx buffer 2206ea9b847cSJacob Keller * @off: Tx offload parameters 2207ea9b847cSJacob Keller */ 2208ea9b847cSJacob Keller static void 2209e72bba21SMaciej Fijalkowski ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, 2210ea9b847cSJacob Keller struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2211ea9b847cSJacob Keller { 2212ea9b847cSJacob Keller s8 idx; 2213ea9b847cSJacob Keller 2214ea9b847cSJacob Keller /* only timestamp the outbound packet if the user has requested it */ 2215ea9b847cSJacob Keller if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 2216ea9b847cSJacob Keller return; 2217ea9b847cSJacob Keller 2218ea9b847cSJacob Keller if (!tx_ring->ptp_tx) 2219ea9b847cSJacob Keller return; 2220ea9b847cSJacob Keller 2221ea9b847cSJacob Keller /* Tx timestamps cannot be sampled when doing TSO */ 2222ea9b847cSJacob Keller if (first->tx_flags & ICE_TX_FLAGS_TSO) 2223ea9b847cSJacob Keller return; 2224ea9b847cSJacob Keller 2225ea9b847cSJacob Keller /* Grab an open timestamp slot */ 2226ea9b847cSJacob Keller idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); 2227ea9b847cSJacob Keller if (idx < 0) 2228ea9b847cSJacob Keller return; 2229ea9b847cSJacob Keller 2230ea9b847cSJacob Keller off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2231ea9b847cSJacob Keller (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) | 2232ea9b847cSJacob Keller ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S)); 2233ea9b847cSJacob Keller first->tx_flags |= ICE_TX_FLAGS_TSYN; 2234ea9b847cSJacob Keller } 2235ea9b847cSJacob Keller 2236ea9b847cSJacob Keller /** 22372b245cb2SAnirudh Venkataramanan * ice_xmit_frame_ring - Sends buffer on Tx ring 22382b245cb2SAnirudh Venkataramanan * @skb: send buffer 22392b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 22402b245cb2SAnirudh Venkataramanan * 22412b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 22422b245cb2SAnirudh Venkataramanan */ 22432b245cb2SAnirudh Venkataramanan static netdev_tx_t 2244e72bba21SMaciej Fijalkowski ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) 22452b245cb2SAnirudh Venkataramanan { 2246d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params offload = { 0 }; 22470c3a6101SDave Ertman struct ice_vsi *vsi = tx_ring->vsi; 22482b245cb2SAnirudh Venkataramanan struct ice_tx_buf *first; 2249f9f83202SDave Ertman struct ethhdr *eth; 22502b245cb2SAnirudh Venkataramanan unsigned int count; 2251d76a60baSAnirudh Venkataramanan int tso, csum; 22522b245cb2SAnirudh Venkataramanan 22533089cf6dSJesse Brandeburg ice_trace(xmit_frame_ring, tx_ring, skb); 22543089cf6dSJesse Brandeburg 22552b245cb2SAnirudh Venkataramanan count = ice_xmit_desc_count(skb); 22562b245cb2SAnirudh Venkataramanan if (ice_chk_linearize(skb, count)) { 22572b245cb2SAnirudh Venkataramanan if (__skb_linearize(skb)) 22582b245cb2SAnirudh Venkataramanan goto out_drop; 22592b245cb2SAnirudh Venkataramanan count = ice_txd_use_count(skb->len); 22602b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_linearize++; 22612b245cb2SAnirudh Venkataramanan } 22622b245cb2SAnirudh Venkataramanan 22632b245cb2SAnirudh Venkataramanan /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 22642b245cb2SAnirudh Venkataramanan * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 22652b245cb2SAnirudh Venkataramanan * + 4 desc gap to avoid the cache line where head is, 22662b245cb2SAnirudh Venkataramanan * + 1 desc for context descriptor, 22672b245cb2SAnirudh Venkataramanan * otherwise try next time 22682b245cb2SAnirudh Venkataramanan */ 2269c585ea42SBrett Creeley if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2270c585ea42SBrett Creeley ICE_DESCS_FOR_CTX_DESC)) { 22712b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_busy++; 22722b245cb2SAnirudh Venkataramanan return NETDEV_TX_BUSY; 22732b245cb2SAnirudh Venkataramanan } 22742b245cb2SAnirudh Venkataramanan 2275d76a60baSAnirudh Venkataramanan offload.tx_ring = tx_ring; 2276d76a60baSAnirudh Venkataramanan 22772b245cb2SAnirudh Venkataramanan /* record the location of the first descriptor for this packet */ 22782b245cb2SAnirudh Venkataramanan first = &tx_ring->tx_buf[tx_ring->next_to_use]; 22792b245cb2SAnirudh Venkataramanan first->skb = skb; 22802b245cb2SAnirudh Venkataramanan first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 22812b245cb2SAnirudh Venkataramanan first->gso_segs = 1; 2282d76a60baSAnirudh Venkataramanan first->tx_flags = 0; 22832b245cb2SAnirudh Venkataramanan 2284d76a60baSAnirudh Venkataramanan /* prepare the VLAN tagging flags for Tx */ 22852bb19d6eSBrett Creeley ice_tx_prepare_vlan_flags(tx_ring, first); 2286d76a60baSAnirudh Venkataramanan 2287d76a60baSAnirudh Venkataramanan /* set up TSO offload */ 2288d76a60baSAnirudh Venkataramanan tso = ice_tso(first, &offload); 2289d76a60baSAnirudh Venkataramanan if (tso < 0) 2290d76a60baSAnirudh Venkataramanan goto out_drop; 2291d76a60baSAnirudh Venkataramanan 2292d76a60baSAnirudh Venkataramanan /* always set up Tx checksum offload */ 2293d76a60baSAnirudh Venkataramanan csum = ice_tx_csum(first, &offload); 2294d76a60baSAnirudh Venkataramanan if (csum < 0) 2295d76a60baSAnirudh Venkataramanan goto out_drop; 2296d76a60baSAnirudh Venkataramanan 22970c3a6101SDave Ertman /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 2298f9f83202SDave Ertman eth = (struct ethhdr *)skb_mac_header(skb); 2299f9f83202SDave Ertman if (unlikely((skb->priority == TC_PRIO_CONTROL || 2300f9f83202SDave Ertman eth->h_proto == htons(ETH_P_LLDP)) && 23010c3a6101SDave Ertman vsi->type == ICE_VSI_PF && 2302fc2d1165SChinh T Cao vsi->port_info->qos_cfg.is_sw_lldp)) 23030c3a6101SDave Ertman offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 23040c3a6101SDave Ertman ICE_TX_CTX_DESC_SWTCH_UPLINK << 23050c3a6101SDave Ertman ICE_TXD_CTX_QW1_CMD_S); 23060c3a6101SDave Ertman 2307ea9b847cSJacob Keller ice_tstamp(tx_ring, skb, first, &offload); 2308f5396b8aSGrzegorz Nitka if (ice_is_switchdev_running(vsi->back)) 2309f5396b8aSGrzegorz Nitka ice_eswitch_set_target_vsi(skb, &offload); 2310ea9b847cSJacob Keller 23110c3a6101SDave Ertman if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2312d76a60baSAnirudh Venkataramanan struct ice_tx_ctx_desc *cdesc; 231388865fc4SKarol Kolacinski u16 i = tx_ring->next_to_use; 2314d76a60baSAnirudh Venkataramanan 2315d76a60baSAnirudh Venkataramanan /* grab the next descriptor */ 2316d76a60baSAnirudh Venkataramanan cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2317d76a60baSAnirudh Venkataramanan i++; 2318d76a60baSAnirudh Venkataramanan tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2319d76a60baSAnirudh Venkataramanan 2320d76a60baSAnirudh Venkataramanan /* setup context descriptor */ 2321d76a60baSAnirudh Venkataramanan cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2322d76a60baSAnirudh Venkataramanan cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2323d76a60baSAnirudh Venkataramanan cdesc->rsvd = cpu_to_le16(0); 2324d76a60baSAnirudh Venkataramanan cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2325d76a60baSAnirudh Venkataramanan } 2326d76a60baSAnirudh Venkataramanan 2327d76a60baSAnirudh Venkataramanan ice_tx_map(tx_ring, first, &offload); 23282b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 23292b245cb2SAnirudh Venkataramanan 23302b245cb2SAnirudh Venkataramanan out_drop: 23313089cf6dSJesse Brandeburg ice_trace(xmit_frame_ring_drop, tx_ring, skb); 23322b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 23332b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 23342b245cb2SAnirudh Venkataramanan } 23352b245cb2SAnirudh Venkataramanan 23362b245cb2SAnirudh Venkataramanan /** 23372b245cb2SAnirudh Venkataramanan * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 23382b245cb2SAnirudh Venkataramanan * @skb: send buffer 23392b245cb2SAnirudh Venkataramanan * @netdev: network interface device structure 23402b245cb2SAnirudh Venkataramanan * 23412b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 23422b245cb2SAnirudh Venkataramanan */ 23432b245cb2SAnirudh Venkataramanan netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 23442b245cb2SAnirudh Venkataramanan { 23452b245cb2SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 23462b245cb2SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 2347e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring; 23482b245cb2SAnirudh Venkataramanan 23492b245cb2SAnirudh Venkataramanan tx_ring = vsi->tx_rings[skb->queue_mapping]; 23502b245cb2SAnirudh Venkataramanan 23512b245cb2SAnirudh Venkataramanan /* hardware can't handle really short frames, hardware padding works 23522b245cb2SAnirudh Venkataramanan * beyond this point 23532b245cb2SAnirudh Venkataramanan */ 23542b245cb2SAnirudh Venkataramanan if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 23552b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 23562b245cb2SAnirudh Venkataramanan 23572b245cb2SAnirudh Venkataramanan return ice_xmit_frame_ring(skb, tx_ring); 23582b245cb2SAnirudh Venkataramanan } 2359148beb61SHenry Tieman 2360148beb61SHenry Tieman /** 23612a87bd73SDave Ertman * ice_get_dscp_up - return the UP/TC value for a SKB 23622a87bd73SDave Ertman * @dcbcfg: DCB config that contains DSCP to UP/TC mapping 23632a87bd73SDave Ertman * @skb: SKB to query for info to determine UP/TC 23642a87bd73SDave Ertman * 23652a87bd73SDave Ertman * This function is to only be called when the PF is in L3 DSCP PFC mode 23662a87bd73SDave Ertman */ 23672a87bd73SDave Ertman static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb) 23682a87bd73SDave Ertman { 23692a87bd73SDave Ertman u8 dscp = 0; 23702a87bd73SDave Ertman 23712a87bd73SDave Ertman if (skb->protocol == htons(ETH_P_IP)) 23722a87bd73SDave Ertman dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 23732a87bd73SDave Ertman else if (skb->protocol == htons(ETH_P_IPV6)) 23742a87bd73SDave Ertman dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 23752a87bd73SDave Ertman 23762a87bd73SDave Ertman return dcbcfg->dscp_map[dscp]; 23772a87bd73SDave Ertman } 23782a87bd73SDave Ertman 23792a87bd73SDave Ertman u16 23802a87bd73SDave Ertman ice_select_queue(struct net_device *netdev, struct sk_buff *skb, 23812a87bd73SDave Ertman struct net_device *sb_dev) 23822a87bd73SDave Ertman { 23832a87bd73SDave Ertman struct ice_pf *pf = ice_netdev_to_pf(netdev); 23842a87bd73SDave Ertman struct ice_dcbx_cfg *dcbcfg; 23852a87bd73SDave Ertman 23862a87bd73SDave Ertman dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 23872a87bd73SDave Ertman if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) 23882a87bd73SDave Ertman skb->priority = ice_get_dscp_up(dcbcfg, skb); 23892a87bd73SDave Ertman 23902a87bd73SDave Ertman return netdev_pick_tx(netdev, skb, sb_dev); 23912a87bd73SDave Ertman } 23922a87bd73SDave Ertman 23932a87bd73SDave Ertman /** 2394148beb61SHenry Tieman * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 2395148beb61SHenry Tieman * @tx_ring: tx_ring to clean 2396148beb61SHenry Tieman */ 2397e72bba21SMaciej Fijalkowski void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) 2398148beb61SHenry Tieman { 2399148beb61SHenry Tieman struct ice_vsi *vsi = tx_ring->vsi; 2400148beb61SHenry Tieman s16 i = tx_ring->next_to_clean; 2401148beb61SHenry Tieman int budget = ICE_DFLT_IRQ_WORK; 2402148beb61SHenry Tieman struct ice_tx_desc *tx_desc; 2403148beb61SHenry Tieman struct ice_tx_buf *tx_buf; 2404148beb61SHenry Tieman 2405148beb61SHenry Tieman tx_buf = &tx_ring->tx_buf[i]; 2406148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, i); 2407148beb61SHenry Tieman i -= tx_ring->count; 2408148beb61SHenry Tieman 2409148beb61SHenry Tieman do { 2410148beb61SHenry Tieman struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2411148beb61SHenry Tieman 2412148beb61SHenry Tieman /* if next_to_watch is not set then there is no pending work */ 2413148beb61SHenry Tieman if (!eop_desc) 2414148beb61SHenry Tieman break; 2415148beb61SHenry Tieman 2416148beb61SHenry Tieman /* prevent any other reads prior to eop_desc */ 2417148beb61SHenry Tieman smp_rmb(); 2418148beb61SHenry Tieman 2419148beb61SHenry Tieman /* if the descriptor isn't done, no work to do */ 2420148beb61SHenry Tieman if (!(eop_desc->cmd_type_offset_bsz & 2421148beb61SHenry Tieman cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2422148beb61SHenry Tieman break; 2423148beb61SHenry Tieman 2424148beb61SHenry Tieman /* clear next_to_watch to prevent false hangs */ 2425148beb61SHenry Tieman tx_buf->next_to_watch = NULL; 2426148beb61SHenry Tieman tx_desc->buf_addr = 0; 2427148beb61SHenry Tieman tx_desc->cmd_type_offset_bsz = 0; 2428148beb61SHenry Tieman 2429148beb61SHenry Tieman /* move past filter desc */ 2430148beb61SHenry Tieman tx_buf++; 2431148beb61SHenry Tieman tx_desc++; 2432148beb61SHenry Tieman i++; 2433148beb61SHenry Tieman if (unlikely(!i)) { 2434148beb61SHenry Tieman i -= tx_ring->count; 2435148beb61SHenry Tieman tx_buf = tx_ring->tx_buf; 2436148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, 0); 2437148beb61SHenry Tieman } 2438148beb61SHenry Tieman 2439148beb61SHenry Tieman /* unmap the data header */ 2440148beb61SHenry Tieman if (dma_unmap_len(tx_buf, len)) 2441148beb61SHenry Tieman dma_unmap_single(tx_ring->dev, 2442148beb61SHenry Tieman dma_unmap_addr(tx_buf, dma), 2443148beb61SHenry Tieman dma_unmap_len(tx_buf, len), 2444148beb61SHenry Tieman DMA_TO_DEVICE); 2445148beb61SHenry Tieman if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 2446148beb61SHenry Tieman devm_kfree(tx_ring->dev, tx_buf->raw_buf); 2447148beb61SHenry Tieman 2448148beb61SHenry Tieman /* clear next_to_watch to prevent false hangs */ 2449148beb61SHenry Tieman tx_buf->raw_buf = NULL; 2450148beb61SHenry Tieman tx_buf->tx_flags = 0; 2451148beb61SHenry Tieman tx_buf->next_to_watch = NULL; 2452148beb61SHenry Tieman dma_unmap_len_set(tx_buf, len, 0); 2453148beb61SHenry Tieman tx_desc->buf_addr = 0; 2454148beb61SHenry Tieman tx_desc->cmd_type_offset_bsz = 0; 2455148beb61SHenry Tieman 2456148beb61SHenry Tieman /* move past eop_desc for start of next FD desc */ 2457148beb61SHenry Tieman tx_buf++; 2458148beb61SHenry Tieman tx_desc++; 2459148beb61SHenry Tieman i++; 2460148beb61SHenry Tieman if (unlikely(!i)) { 2461148beb61SHenry Tieman i -= tx_ring->count; 2462148beb61SHenry Tieman tx_buf = tx_ring->tx_buf; 2463148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, 0); 2464148beb61SHenry Tieman } 2465148beb61SHenry Tieman 2466148beb61SHenry Tieman budget--; 2467148beb61SHenry Tieman } while (likely(budget)); 2468148beb61SHenry Tieman 2469148beb61SHenry Tieman i += tx_ring->count; 2470148beb61SHenry Tieman tx_ring->next_to_clean = i; 2471148beb61SHenry Tieman 2472148beb61SHenry Tieman /* re-enable interrupt if needed */ 2473148beb61SHenry Tieman ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 2474148beb61SHenry Tieman } 2475