1cdedef59SAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0 2cdedef59SAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */ 3cdedef59SAnirudh Venkataramanan 4cdedef59SAnirudh Venkataramanan /* The driver transmit and receive code */ 5cdedef59SAnirudh Venkataramanan 6cdedef59SAnirudh Venkataramanan #include <linux/prefetch.h> 7cdedef59SAnirudh Venkataramanan #include <linux/mm.h> 8efc2214bSMaciej Fijalkowski #include <linux/bpf_trace.h> 92a87bd73SDave Ertman #include <net/dsfield.h> 10efc2214bSMaciej Fijalkowski #include <net/xdp.h> 110891d6d4SKrzysztof Kazimierczak #include "ice_txrx_lib.h" 12efc2214bSMaciej Fijalkowski #include "ice_lib.h" 13cdedef59SAnirudh Venkataramanan #include "ice.h" 143089cf6dSJesse Brandeburg #include "ice_trace.h" 155f6aa50eSAnirudh Venkataramanan #include "ice_dcb_lib.h" 162d4238f5SKrzysztof Kazimierczak #include "ice_xsk.h" 17f5396b8aSGrzegorz Nitka #include "ice_eswitch.h" 18cdedef59SAnirudh Venkataramanan 192b245cb2SAnirudh Venkataramanan #define ICE_RX_HDR_SIZE 256 202b245cb2SAnirudh Venkataramanan 21148beb61SHenry Tieman #define FDIR_DESC_RXDID 0x40 22cac2a27cSHenry Tieman #define ICE_FDIR_CLEAN_DELAY 10 23cac2a27cSHenry Tieman 24cac2a27cSHenry Tieman /** 25cac2a27cSHenry Tieman * ice_prgm_fdir_fltr - Program a Flow Director filter 26cac2a27cSHenry Tieman * @vsi: VSI to send dummy packet 27cac2a27cSHenry Tieman * @fdir_desc: flow director descriptor 28cac2a27cSHenry Tieman * @raw_packet: allocated buffer for flow director 29cac2a27cSHenry Tieman */ 30cac2a27cSHenry Tieman int 31cac2a27cSHenry Tieman ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 32cac2a27cSHenry Tieman u8 *raw_packet) 33cac2a27cSHenry Tieman { 34cac2a27cSHenry Tieman struct ice_tx_buf *tx_buf, *first; 35cac2a27cSHenry Tieman struct ice_fltr_desc *f_desc; 36cac2a27cSHenry Tieman struct ice_tx_desc *tx_desc; 37e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring; 38cac2a27cSHenry Tieman struct device *dev; 39cac2a27cSHenry Tieman dma_addr_t dma; 40cac2a27cSHenry Tieman u32 td_cmd; 41cac2a27cSHenry Tieman u16 i; 42cac2a27cSHenry Tieman 43cac2a27cSHenry Tieman /* VSI and Tx ring */ 44cac2a27cSHenry Tieman if (!vsi) 45cac2a27cSHenry Tieman return -ENOENT; 46cac2a27cSHenry Tieman tx_ring = vsi->tx_rings[0]; 47cac2a27cSHenry Tieman if (!tx_ring || !tx_ring->desc) 48cac2a27cSHenry Tieman return -ENOENT; 49cac2a27cSHenry Tieman dev = tx_ring->dev; 50cac2a27cSHenry Tieman 51cac2a27cSHenry Tieman /* we are using two descriptors to add/del a filter and we can wait */ 52cac2a27cSHenry Tieman for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 53cac2a27cSHenry Tieman if (!i) 54cac2a27cSHenry Tieman return -EAGAIN; 55cac2a27cSHenry Tieman msleep_interruptible(1); 56cac2a27cSHenry Tieman } 57cac2a27cSHenry Tieman 58cac2a27cSHenry Tieman dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 59cac2a27cSHenry Tieman DMA_TO_DEVICE); 60cac2a27cSHenry Tieman 61cac2a27cSHenry Tieman if (dma_mapping_error(dev, dma)) 62cac2a27cSHenry Tieman return -EINVAL; 63cac2a27cSHenry Tieman 64cac2a27cSHenry Tieman /* grab the next descriptor */ 65cac2a27cSHenry Tieman i = tx_ring->next_to_use; 66cac2a27cSHenry Tieman first = &tx_ring->tx_buf[i]; 67cac2a27cSHenry Tieman f_desc = ICE_TX_FDIRDESC(tx_ring, i); 68cac2a27cSHenry Tieman memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 69cac2a27cSHenry Tieman 70cac2a27cSHenry Tieman i++; 71cac2a27cSHenry Tieman i = (i < tx_ring->count) ? i : 0; 72cac2a27cSHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, i); 73cac2a27cSHenry Tieman tx_buf = &tx_ring->tx_buf[i]; 74cac2a27cSHenry Tieman 75cac2a27cSHenry Tieman i++; 76cac2a27cSHenry Tieman tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 77cac2a27cSHenry Tieman 78cac2a27cSHenry Tieman memset(tx_buf, 0, sizeof(*tx_buf)); 79cac2a27cSHenry Tieman dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 80cac2a27cSHenry Tieman dma_unmap_addr_set(tx_buf, dma, dma); 81cac2a27cSHenry Tieman 82cac2a27cSHenry Tieman tx_desc->buf_addr = cpu_to_le64(dma); 83cac2a27cSHenry Tieman td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 84cac2a27cSHenry Tieman ICE_TX_DESC_CMD_RE; 85cac2a27cSHenry Tieman 86cac2a27cSHenry Tieman tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; 87cac2a27cSHenry Tieman tx_buf->raw_buf = raw_packet; 88cac2a27cSHenry Tieman 89cac2a27cSHenry Tieman tx_desc->cmd_type_offset_bsz = 90cac2a27cSHenry Tieman ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 91cac2a27cSHenry Tieman 92cac2a27cSHenry Tieman /* Force memory write to complete before letting h/w know 93cac2a27cSHenry Tieman * there are new descriptors to fetch. 94cac2a27cSHenry Tieman */ 95cac2a27cSHenry Tieman wmb(); 96cac2a27cSHenry Tieman 97cac2a27cSHenry Tieman /* mark the data descriptor to be watched */ 98cac2a27cSHenry Tieman first->next_to_watch = tx_desc; 99cac2a27cSHenry Tieman 100cac2a27cSHenry Tieman writel(tx_ring->next_to_use, tx_ring->tail); 101cac2a27cSHenry Tieman 102cac2a27cSHenry Tieman return 0; 103cac2a27cSHenry Tieman } 104148beb61SHenry Tieman 105cdedef59SAnirudh Venkataramanan /** 106cdedef59SAnirudh Venkataramanan * ice_unmap_and_free_tx_buf - Release a Tx buffer 107cdedef59SAnirudh Venkataramanan * @ring: the ring that owns the buffer 108cdedef59SAnirudh Venkataramanan * @tx_buf: the buffer to free 109cdedef59SAnirudh Venkataramanan */ 110cdedef59SAnirudh Venkataramanan static void 111e72bba21SMaciej Fijalkowski ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) 112cdedef59SAnirudh Venkataramanan { 113cdedef59SAnirudh Venkataramanan if (tx_buf->skb) { 114148beb61SHenry Tieman if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 115148beb61SHenry Tieman devm_kfree(ring->dev, tx_buf->raw_buf); 116148beb61SHenry Tieman else if (ice_ring_is_xdp(ring)) 117efc2214bSMaciej Fijalkowski page_frag_free(tx_buf->raw_buf); 118efc2214bSMaciej Fijalkowski else 119cdedef59SAnirudh Venkataramanan dev_kfree_skb_any(tx_buf->skb); 120cdedef59SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) 121cdedef59SAnirudh Venkataramanan dma_unmap_single(ring->dev, 122cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 123cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 124cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 125cdedef59SAnirudh Venkataramanan } else if (dma_unmap_len(tx_buf, len)) { 126cdedef59SAnirudh Venkataramanan dma_unmap_page(ring->dev, 127cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 128cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 129cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 130cdedef59SAnirudh Venkataramanan } 131cdedef59SAnirudh Venkataramanan 132cdedef59SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 133cdedef59SAnirudh Venkataramanan tx_buf->skb = NULL; 134cdedef59SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 135cdedef59SAnirudh Venkataramanan /* tx_buf must be completely set up in the transmit path */ 136cdedef59SAnirudh Venkataramanan } 137cdedef59SAnirudh Venkataramanan 138e72bba21SMaciej Fijalkowski static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring) 139cdedef59SAnirudh Venkataramanan { 140cdedef59SAnirudh Venkataramanan return netdev_get_tx_queue(ring->netdev, ring->q_index); 141cdedef59SAnirudh Venkataramanan } 142cdedef59SAnirudh Venkataramanan 143cdedef59SAnirudh Venkataramanan /** 144cdedef59SAnirudh Venkataramanan * ice_clean_tx_ring - Free any empty Tx buffers 145cdedef59SAnirudh Venkataramanan * @tx_ring: ring to be cleaned 146cdedef59SAnirudh Venkataramanan */ 147e72bba21SMaciej Fijalkowski void ice_clean_tx_ring(struct ice_tx_ring *tx_ring) 148cdedef59SAnirudh Venkataramanan { 149e72bba21SMaciej Fijalkowski u32 size; 150cdedef59SAnirudh Venkataramanan u16 i; 151cdedef59SAnirudh Venkataramanan 1521742b3d5SMagnus Karlsson if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 1532d4238f5SKrzysztof Kazimierczak ice_xsk_clean_xdp_ring(tx_ring); 1542d4238f5SKrzysztof Kazimierczak goto tx_skip_free; 1552d4238f5SKrzysztof Kazimierczak } 1562d4238f5SKrzysztof Kazimierczak 157cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 158cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 159cdedef59SAnirudh Venkataramanan return; 160cdedef59SAnirudh Venkataramanan 1612f2da36eSAnirudh Venkataramanan /* Free all the Tx ring sk_buffs */ 162cdedef59SAnirudh Venkataramanan for (i = 0; i < tx_ring->count; i++) 163cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 164cdedef59SAnirudh Venkataramanan 1652d4238f5SKrzysztof Kazimierczak tx_skip_free: 166c6dfd690SBruce Allan memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 167cdedef59SAnirudh Venkataramanan 168e72bba21SMaciej Fijalkowski size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 169e72bba21SMaciej Fijalkowski PAGE_SIZE); 170cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 171e72bba21SMaciej Fijalkowski memset(tx_ring->desc, 0, size); 172cdedef59SAnirudh Venkataramanan 173cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 174cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 175cdedef59SAnirudh Venkataramanan 176cdedef59SAnirudh Venkataramanan if (!tx_ring->netdev) 177cdedef59SAnirudh Venkataramanan return; 178cdedef59SAnirudh Venkataramanan 179cdedef59SAnirudh Venkataramanan /* cleanup Tx queue statistics */ 180cdedef59SAnirudh Venkataramanan netdev_tx_reset_queue(txring_txq(tx_ring)); 181cdedef59SAnirudh Venkataramanan } 182cdedef59SAnirudh Venkataramanan 183cdedef59SAnirudh Venkataramanan /** 184cdedef59SAnirudh Venkataramanan * ice_free_tx_ring - Free Tx resources per queue 185cdedef59SAnirudh Venkataramanan * @tx_ring: Tx descriptor ring for a specific queue 186cdedef59SAnirudh Venkataramanan * 187cdedef59SAnirudh Venkataramanan * Free all transmit software resources 188cdedef59SAnirudh Venkataramanan */ 189e72bba21SMaciej Fijalkowski void ice_free_tx_ring(struct ice_tx_ring *tx_ring) 190cdedef59SAnirudh Venkataramanan { 191e72bba21SMaciej Fijalkowski u32 size; 192e72bba21SMaciej Fijalkowski 193cdedef59SAnirudh Venkataramanan ice_clean_tx_ring(tx_ring); 194cdedef59SAnirudh Venkataramanan devm_kfree(tx_ring->dev, tx_ring->tx_buf); 195cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 196cdedef59SAnirudh Venkataramanan 197cdedef59SAnirudh Venkataramanan if (tx_ring->desc) { 198e72bba21SMaciej Fijalkowski size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 199e72bba21SMaciej Fijalkowski PAGE_SIZE); 200e72bba21SMaciej Fijalkowski dmam_free_coherent(tx_ring->dev, size, 201cdedef59SAnirudh Venkataramanan tx_ring->desc, tx_ring->dma); 202cdedef59SAnirudh Venkataramanan tx_ring->desc = NULL; 203cdedef59SAnirudh Venkataramanan } 204cdedef59SAnirudh Venkataramanan } 205cdedef59SAnirudh Venkataramanan 206cdedef59SAnirudh Venkataramanan /** 2072b245cb2SAnirudh Venkataramanan * ice_clean_tx_irq - Reclaim resources after transmit completes 2082b245cb2SAnirudh Venkataramanan * @tx_ring: Tx ring to clean 2092b245cb2SAnirudh Venkataramanan * @napi_budget: Used to determine if we are in netpoll 2102b245cb2SAnirudh Venkataramanan * 2112b245cb2SAnirudh Venkataramanan * Returns true if there's any budget left (e.g. the clean is finished) 2122b245cb2SAnirudh Venkataramanan */ 213e72bba21SMaciej Fijalkowski static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) 2142b245cb2SAnirudh Venkataramanan { 2152b245cb2SAnirudh Venkataramanan unsigned int total_bytes = 0, total_pkts = 0; 2162fb0821fSJesse Brandeburg unsigned int budget = ICE_DFLT_IRQ_WORK; 2172fb0821fSJesse Brandeburg struct ice_vsi *vsi = tx_ring->vsi; 2182b245cb2SAnirudh Venkataramanan s16 i = tx_ring->next_to_clean; 2192b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 2202b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 2212b245cb2SAnirudh Venkataramanan 2222b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 2232b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 2242b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2252b245cb2SAnirudh Venkataramanan 2262fb0821fSJesse Brandeburg prefetch(&vsi->state); 2272fb0821fSJesse Brandeburg 2282b245cb2SAnirudh Venkataramanan do { 2292b245cb2SAnirudh Venkataramanan struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2302b245cb2SAnirudh Venkataramanan 2312b245cb2SAnirudh Venkataramanan /* if next_to_watch is not set then there is no work pending */ 2322b245cb2SAnirudh Venkataramanan if (!eop_desc) 2332b245cb2SAnirudh Venkataramanan break; 2342b245cb2SAnirudh Venkataramanan 2352b245cb2SAnirudh Venkataramanan smp_rmb(); /* prevent any other reads prior to eop_desc */ 2362b245cb2SAnirudh Venkataramanan 2373089cf6dSJesse Brandeburg ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 2382b245cb2SAnirudh Venkataramanan /* if the descriptor isn't done, no work yet to do */ 2392b245cb2SAnirudh Venkataramanan if (!(eop_desc->cmd_type_offset_bsz & 2402b245cb2SAnirudh Venkataramanan cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2412b245cb2SAnirudh Venkataramanan break; 2422b245cb2SAnirudh Venkataramanan 2432b245cb2SAnirudh Venkataramanan /* clear next_to_watch to prevent false hangs */ 2442b245cb2SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 2452b245cb2SAnirudh Venkataramanan 2462b245cb2SAnirudh Venkataramanan /* update the statistics for this packet */ 2472b245cb2SAnirudh Venkataramanan total_bytes += tx_buf->bytecount; 2482b245cb2SAnirudh Venkataramanan total_pkts += tx_buf->gso_segs; 2492b245cb2SAnirudh Venkataramanan 2502b245cb2SAnirudh Venkataramanan /* free the skb */ 2512b245cb2SAnirudh Venkataramanan napi_consume_skb(tx_buf->skb, napi_budget); 2522b245cb2SAnirudh Venkataramanan 2532b245cb2SAnirudh Venkataramanan /* unmap skb header data */ 2542b245cb2SAnirudh Venkataramanan dma_unmap_single(tx_ring->dev, 2552b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 2562b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 2572b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 2582b245cb2SAnirudh Venkataramanan 2592b245cb2SAnirudh Venkataramanan /* clear tx_buf data */ 2602b245cb2SAnirudh Venkataramanan tx_buf->skb = NULL; 2612b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 2622b245cb2SAnirudh Venkataramanan 2632b245cb2SAnirudh Venkataramanan /* unmap remaining buffers */ 2642b245cb2SAnirudh Venkataramanan while (tx_desc != eop_desc) { 2653089cf6dSJesse Brandeburg ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); 2662b245cb2SAnirudh Venkataramanan tx_buf++; 2672b245cb2SAnirudh Venkataramanan tx_desc++; 2682b245cb2SAnirudh Venkataramanan i++; 2692b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 2702b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2712b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 2722b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 2732b245cb2SAnirudh Venkataramanan } 2742b245cb2SAnirudh Venkataramanan 2752b245cb2SAnirudh Venkataramanan /* unmap any remaining paged data */ 2762b245cb2SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) { 2772b245cb2SAnirudh Venkataramanan dma_unmap_page(tx_ring->dev, 2782b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 2792b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 2802b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 2812b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 2822b245cb2SAnirudh Venkataramanan } 2832b245cb2SAnirudh Venkataramanan } 2843089cf6dSJesse Brandeburg ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf); 2852b245cb2SAnirudh Venkataramanan 2862b245cb2SAnirudh Venkataramanan /* move us one more past the eop_desc for start of next pkt */ 2872b245cb2SAnirudh Venkataramanan tx_buf++; 2882b245cb2SAnirudh Venkataramanan tx_desc++; 2892b245cb2SAnirudh Venkataramanan i++; 2902b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 2912b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2922b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 2932b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 2942b245cb2SAnirudh Venkataramanan } 2952b245cb2SAnirudh Venkataramanan 2962b245cb2SAnirudh Venkataramanan prefetch(tx_desc); 2972b245cb2SAnirudh Venkataramanan 2982b245cb2SAnirudh Venkataramanan /* update budget accounting */ 2992b245cb2SAnirudh Venkataramanan budget--; 3002b245cb2SAnirudh Venkataramanan } while (likely(budget)); 3012b245cb2SAnirudh Venkataramanan 3022b245cb2SAnirudh Venkataramanan i += tx_ring->count; 3032b245cb2SAnirudh Venkataramanan tx_ring->next_to_clean = i; 3042d4238f5SKrzysztof Kazimierczak 3052d4238f5SKrzysztof Kazimierczak ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 3062b245cb2SAnirudh Venkataramanan 3072b245cb2SAnirudh Venkataramanan netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 3082b245cb2SAnirudh Venkataramanan total_bytes); 3092b245cb2SAnirudh Venkataramanan 3102b245cb2SAnirudh Venkataramanan #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 3112b245cb2SAnirudh Venkataramanan if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 3122b245cb2SAnirudh Venkataramanan (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 3132b245cb2SAnirudh Venkataramanan /* Make sure that anybody stopping the queue after this 3142b245cb2SAnirudh Venkataramanan * sees the new next_to_clean. 3152b245cb2SAnirudh Venkataramanan */ 3162b245cb2SAnirudh Venkataramanan smp_mb(); 3172b245cb2SAnirudh Venkataramanan if (__netif_subqueue_stopped(tx_ring->netdev, 3182b245cb2SAnirudh Venkataramanan tx_ring->q_index) && 319e97fb1aeSAnirudh Venkataramanan !test_bit(ICE_VSI_DOWN, vsi->state)) { 3202b245cb2SAnirudh Venkataramanan netif_wake_subqueue(tx_ring->netdev, 3212b245cb2SAnirudh Venkataramanan tx_ring->q_index); 3222b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 3232b245cb2SAnirudh Venkataramanan } 3242b245cb2SAnirudh Venkataramanan } 3252b245cb2SAnirudh Venkataramanan 3262b245cb2SAnirudh Venkataramanan return !!budget; 3272b245cb2SAnirudh Venkataramanan } 3282b245cb2SAnirudh Venkataramanan 3292b245cb2SAnirudh Venkataramanan /** 330cdedef59SAnirudh Venkataramanan * ice_setup_tx_ring - Allocate the Tx descriptors 331d337f2afSAnirudh Venkataramanan * @tx_ring: the Tx ring to set up 332cdedef59SAnirudh Venkataramanan * 333cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 334cdedef59SAnirudh Venkataramanan */ 335e72bba21SMaciej Fijalkowski int ice_setup_tx_ring(struct ice_tx_ring *tx_ring) 336cdedef59SAnirudh Venkataramanan { 337cdedef59SAnirudh Venkataramanan struct device *dev = tx_ring->dev; 338e72bba21SMaciej Fijalkowski u32 size; 339cdedef59SAnirudh Venkataramanan 340cdedef59SAnirudh Venkataramanan if (!dev) 341cdedef59SAnirudh Venkataramanan return -ENOMEM; 342cdedef59SAnirudh Venkataramanan 343cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 344cdedef59SAnirudh Venkataramanan WARN_ON(tx_ring->tx_buf); 345c6dfd690SBruce Allan tx_ring->tx_buf = 346c6dfd690SBruce Allan devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 347c6dfd690SBruce Allan GFP_KERNEL); 348cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 349cdedef59SAnirudh Venkataramanan return -ENOMEM; 350cdedef59SAnirudh Venkataramanan 351ad71b256SBrett Creeley /* round up to nearest page */ 352e72bba21SMaciej Fijalkowski size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 353ad71b256SBrett Creeley PAGE_SIZE); 354e72bba21SMaciej Fijalkowski tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, 355cdedef59SAnirudh Venkataramanan GFP_KERNEL); 356cdedef59SAnirudh Venkataramanan if (!tx_ring->desc) { 357cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 358e72bba21SMaciej Fijalkowski size); 359cdedef59SAnirudh Venkataramanan goto err; 360cdedef59SAnirudh Venkataramanan } 361cdedef59SAnirudh Venkataramanan 362cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 363cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 364b3969fd7SSudheer Mogilappagari tx_ring->tx_stats.prev_pkt = -1; 365cdedef59SAnirudh Venkataramanan return 0; 366cdedef59SAnirudh Venkataramanan 367cdedef59SAnirudh Venkataramanan err: 368cdedef59SAnirudh Venkataramanan devm_kfree(dev, tx_ring->tx_buf); 369cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 370cdedef59SAnirudh Venkataramanan return -ENOMEM; 371cdedef59SAnirudh Venkataramanan } 372cdedef59SAnirudh Venkataramanan 373cdedef59SAnirudh Venkataramanan /** 374cdedef59SAnirudh Venkataramanan * ice_clean_rx_ring - Free Rx buffers 375cdedef59SAnirudh Venkataramanan * @rx_ring: ring to be cleaned 376cdedef59SAnirudh Venkataramanan */ 377e72bba21SMaciej Fijalkowski void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) 378cdedef59SAnirudh Venkataramanan { 379cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 380e72bba21SMaciej Fijalkowski u32 size; 381cdedef59SAnirudh Venkataramanan u16 i; 382cdedef59SAnirudh Venkataramanan 383cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 384cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 385cdedef59SAnirudh Venkataramanan return; 386cdedef59SAnirudh Venkataramanan 38729b82f2aSMaciej Fijalkowski if (rx_ring->skb) { 38829b82f2aSMaciej Fijalkowski dev_kfree_skb(rx_ring->skb); 38929b82f2aSMaciej Fijalkowski rx_ring->skb = NULL; 39029b82f2aSMaciej Fijalkowski } 39129b82f2aSMaciej Fijalkowski 3921742b3d5SMagnus Karlsson if (rx_ring->xsk_pool) { 3932d4238f5SKrzysztof Kazimierczak ice_xsk_clean_rx_ring(rx_ring); 3942d4238f5SKrzysztof Kazimierczak goto rx_skip_free; 3952d4238f5SKrzysztof Kazimierczak } 3962d4238f5SKrzysztof Kazimierczak 397cdedef59SAnirudh Venkataramanan /* Free all the Rx ring sk_buffs */ 398cdedef59SAnirudh Venkataramanan for (i = 0; i < rx_ring->count; i++) { 399cdedef59SAnirudh Venkataramanan struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 400cdedef59SAnirudh Venkataramanan 401cdedef59SAnirudh Venkataramanan if (!rx_buf->page) 402cdedef59SAnirudh Venkataramanan continue; 403cdedef59SAnirudh Venkataramanan 404a65f71feSMaciej Fijalkowski /* Invalidate cache lines that may have been written to by 405a65f71feSMaciej Fijalkowski * device so that we avoid corrupting memory. 406a65f71feSMaciej Fijalkowski */ 407a65f71feSMaciej Fijalkowski dma_sync_single_range_for_cpu(dev, rx_buf->dma, 408a65f71feSMaciej Fijalkowski rx_buf->page_offset, 4097237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 4107237f5b0SMaciej Fijalkowski DMA_FROM_DEVICE); 411a65f71feSMaciej Fijalkowski 412a65f71feSMaciej Fijalkowski /* free resources associated with mapping */ 4137237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 414a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 41503c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 416cdedef59SAnirudh Venkataramanan 417cdedef59SAnirudh Venkataramanan rx_buf->page = NULL; 418cdedef59SAnirudh Venkataramanan rx_buf->page_offset = 0; 419cdedef59SAnirudh Venkataramanan } 420cdedef59SAnirudh Venkataramanan 4212d4238f5SKrzysztof Kazimierczak rx_skip_free: 422c6dfd690SBruce Allan memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 423cdedef59SAnirudh Venkataramanan 424cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 425e72bba21SMaciej Fijalkowski size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 426e72bba21SMaciej Fijalkowski PAGE_SIZE); 427e72bba21SMaciej Fijalkowski memset(rx_ring->desc, 0, size); 428cdedef59SAnirudh Venkataramanan 429cdedef59SAnirudh Venkataramanan rx_ring->next_to_alloc = 0; 430cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 431cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 432cdedef59SAnirudh Venkataramanan } 433cdedef59SAnirudh Venkataramanan 434cdedef59SAnirudh Venkataramanan /** 435cdedef59SAnirudh Venkataramanan * ice_free_rx_ring - Free Rx resources 436cdedef59SAnirudh Venkataramanan * @rx_ring: ring to clean the resources from 437cdedef59SAnirudh Venkataramanan * 438cdedef59SAnirudh Venkataramanan * Free all receive software resources 439cdedef59SAnirudh Venkataramanan */ 440e72bba21SMaciej Fijalkowski void ice_free_rx_ring(struct ice_rx_ring *rx_ring) 441cdedef59SAnirudh Venkataramanan { 442e72bba21SMaciej Fijalkowski u32 size; 443e72bba21SMaciej Fijalkowski 444cdedef59SAnirudh Venkataramanan ice_clean_rx_ring(rx_ring); 445efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF) 446efc2214bSMaciej Fijalkowski if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 447efc2214bSMaciej Fijalkowski xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 448efc2214bSMaciej Fijalkowski rx_ring->xdp_prog = NULL; 449cdedef59SAnirudh Venkataramanan devm_kfree(rx_ring->dev, rx_ring->rx_buf); 450cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 451cdedef59SAnirudh Venkataramanan 452cdedef59SAnirudh Venkataramanan if (rx_ring->desc) { 453e72bba21SMaciej Fijalkowski size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 454e72bba21SMaciej Fijalkowski PAGE_SIZE); 455e72bba21SMaciej Fijalkowski dmam_free_coherent(rx_ring->dev, size, 456cdedef59SAnirudh Venkataramanan rx_ring->desc, rx_ring->dma); 457cdedef59SAnirudh Venkataramanan rx_ring->desc = NULL; 458cdedef59SAnirudh Venkataramanan } 459cdedef59SAnirudh Venkataramanan } 460cdedef59SAnirudh Venkataramanan 461cdedef59SAnirudh Venkataramanan /** 462cdedef59SAnirudh Venkataramanan * ice_setup_rx_ring - Allocate the Rx descriptors 463d337f2afSAnirudh Venkataramanan * @rx_ring: the Rx ring to set up 464cdedef59SAnirudh Venkataramanan * 465cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 466cdedef59SAnirudh Venkataramanan */ 467e72bba21SMaciej Fijalkowski int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) 468cdedef59SAnirudh Venkataramanan { 469cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 470e72bba21SMaciej Fijalkowski u32 size; 471cdedef59SAnirudh Venkataramanan 472cdedef59SAnirudh Venkataramanan if (!dev) 473cdedef59SAnirudh Venkataramanan return -ENOMEM; 474cdedef59SAnirudh Venkataramanan 475cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 476cdedef59SAnirudh Venkataramanan WARN_ON(rx_ring->rx_buf); 477c6dfd690SBruce Allan rx_ring->rx_buf = 478c6dfd690SBruce Allan devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 479c6dfd690SBruce Allan GFP_KERNEL); 480cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 481cdedef59SAnirudh Venkataramanan return -ENOMEM; 482cdedef59SAnirudh Venkataramanan 483ad71b256SBrett Creeley /* round up to nearest page */ 484e72bba21SMaciej Fijalkowski size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 485ad71b256SBrett Creeley PAGE_SIZE); 486e72bba21SMaciej Fijalkowski rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, 487cdedef59SAnirudh Venkataramanan GFP_KERNEL); 488cdedef59SAnirudh Venkataramanan if (!rx_ring->desc) { 489cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 490e72bba21SMaciej Fijalkowski size); 491cdedef59SAnirudh Venkataramanan goto err; 492cdedef59SAnirudh Venkataramanan } 493cdedef59SAnirudh Venkataramanan 494cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 495cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 496efc2214bSMaciej Fijalkowski 497efc2214bSMaciej Fijalkowski if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 498efc2214bSMaciej Fijalkowski WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 499efc2214bSMaciej Fijalkowski 500efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF && 501efc2214bSMaciej Fijalkowski !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 502efc2214bSMaciej Fijalkowski if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 503b02e5a0eSBjörn Töpel rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) 504efc2214bSMaciej Fijalkowski goto err; 505cdedef59SAnirudh Venkataramanan return 0; 506cdedef59SAnirudh Venkataramanan 507cdedef59SAnirudh Venkataramanan err: 508cdedef59SAnirudh Venkataramanan devm_kfree(dev, rx_ring->rx_buf); 509cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 510cdedef59SAnirudh Venkataramanan return -ENOMEM; 511cdedef59SAnirudh Venkataramanan } 512cdedef59SAnirudh Venkataramanan 5136221595fSTony Nguyen static unsigned int 514e72bba21SMaciej Fijalkowski ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size) 515d4ecdbf7SJesper Dangaard Brouer { 516d4ecdbf7SJesper Dangaard Brouer unsigned int truesize; 517d4ecdbf7SJesper Dangaard Brouer 518d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE < 8192) 519d4ecdbf7SJesper Dangaard Brouer truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 520d4ecdbf7SJesper Dangaard Brouer #else 521f1b1f409SMaciej Fijalkowski truesize = rx_ring->rx_offset ? 522f1b1f409SMaciej Fijalkowski SKB_DATA_ALIGN(rx_ring->rx_offset + size) + 523d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 524d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(size); 525d4ecdbf7SJesper Dangaard Brouer #endif 526d4ecdbf7SJesper Dangaard Brouer return truesize; 527d4ecdbf7SJesper Dangaard Brouer } 528d4ecdbf7SJesper Dangaard Brouer 529efc2214bSMaciej Fijalkowski /** 530efc2214bSMaciej Fijalkowski * ice_run_xdp - Executes an XDP program on initialized xdp_buff 531efc2214bSMaciej Fijalkowski * @rx_ring: Rx ring 532efc2214bSMaciej Fijalkowski * @xdp: xdp_buff used as input to the XDP program 533efc2214bSMaciej Fijalkowski * @xdp_prog: XDP program to run 534eb087cd8SMaciej Fijalkowski * @xdp_ring: ring to be used for XDP_TX action 535efc2214bSMaciej Fijalkowski * 536efc2214bSMaciej Fijalkowski * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 537efc2214bSMaciej Fijalkowski */ 538efc2214bSMaciej Fijalkowski static int 539e72bba21SMaciej Fijalkowski ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 540eb087cd8SMaciej Fijalkowski struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) 541efc2214bSMaciej Fijalkowski { 542eb087cd8SMaciej Fijalkowski int err; 543efc2214bSMaciej Fijalkowski u32 act; 544efc2214bSMaciej Fijalkowski 545efc2214bSMaciej Fijalkowski act = bpf_prog_run_xdp(xdp_prog, xdp); 546efc2214bSMaciej Fijalkowski switch (act) { 547efc2214bSMaciej Fijalkowski case XDP_PASS: 54859c97d1bSMaciej Fijalkowski return ICE_XDP_PASS; 549efc2214bSMaciej Fijalkowski case XDP_TX: 55022bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) 55122bf877eSMaciej Fijalkowski spin_lock(&xdp_ring->tx_lock); 552eb087cd8SMaciej Fijalkowski err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring); 55322bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) 55422bf877eSMaciej Fijalkowski spin_unlock(&xdp_ring->tx_lock); 555eb087cd8SMaciej Fijalkowski if (err == ICE_XDP_CONSUMED) 55689d65df0SMagnus Karlsson goto out_failure; 557eb087cd8SMaciej Fijalkowski return err; 558efc2214bSMaciej Fijalkowski case XDP_REDIRECT: 559efc2214bSMaciej Fijalkowski err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 56089d65df0SMagnus Karlsson if (err) 56189d65df0SMagnus Karlsson goto out_failure; 56289d65df0SMagnus Karlsson return ICE_XDP_REDIR; 563efc2214bSMaciej Fijalkowski default: 564efc2214bSMaciej Fijalkowski bpf_warn_invalid_xdp_action(act); 5654e83fc93SBruce Allan fallthrough; 566efc2214bSMaciej Fijalkowski case XDP_ABORTED: 56789d65df0SMagnus Karlsson out_failure: 568efc2214bSMaciej Fijalkowski trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 5694e83fc93SBruce Allan fallthrough; 570efc2214bSMaciej Fijalkowski case XDP_DROP: 57159c97d1bSMaciej Fijalkowski return ICE_XDP_CONSUMED; 572efc2214bSMaciej Fijalkowski } 573efc2214bSMaciej Fijalkowski } 574efc2214bSMaciej Fijalkowski 575efc2214bSMaciej Fijalkowski /** 576efc2214bSMaciej Fijalkowski * ice_xdp_xmit - submit packets to XDP ring for transmission 577efc2214bSMaciej Fijalkowski * @dev: netdev 578efc2214bSMaciej Fijalkowski * @n: number of XDP frames to be transmitted 579efc2214bSMaciej Fijalkowski * @frames: XDP frames to be transmitted 580efc2214bSMaciej Fijalkowski * @flags: transmit flags 581efc2214bSMaciej Fijalkowski * 582fdc13979SLorenzo Bianconi * Returns number of frames successfully sent. Failed frames 583fdc13979SLorenzo Bianconi * will be free'ed by XDP core. 584efc2214bSMaciej Fijalkowski * For error cases, a negative errno code is returned and no-frames 585efc2214bSMaciej Fijalkowski * are transmitted (caller must handle freeing frames). 586efc2214bSMaciej Fijalkowski */ 587efc2214bSMaciej Fijalkowski int 588efc2214bSMaciej Fijalkowski ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 589efc2214bSMaciej Fijalkowski u32 flags) 590efc2214bSMaciej Fijalkowski { 591efc2214bSMaciej Fijalkowski struct ice_netdev_priv *np = netdev_priv(dev); 592efc2214bSMaciej Fijalkowski unsigned int queue_index = smp_processor_id(); 593efc2214bSMaciej Fijalkowski struct ice_vsi *vsi = np->vsi; 594e72bba21SMaciej Fijalkowski struct ice_tx_ring *xdp_ring; 595fdc13979SLorenzo Bianconi int nxmit = 0, i; 596efc2214bSMaciej Fijalkowski 597e97fb1aeSAnirudh Venkataramanan if (test_bit(ICE_VSI_DOWN, vsi->state)) 598efc2214bSMaciej Fijalkowski return -ENETDOWN; 599efc2214bSMaciej Fijalkowski 600efc2214bSMaciej Fijalkowski if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) 601efc2214bSMaciej Fijalkowski return -ENXIO; 602efc2214bSMaciej Fijalkowski 603efc2214bSMaciej Fijalkowski if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 604efc2214bSMaciej Fijalkowski return -EINVAL; 605efc2214bSMaciej Fijalkowski 60622bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) { 60722bf877eSMaciej Fijalkowski queue_index %= vsi->num_xdp_txq; 608efc2214bSMaciej Fijalkowski xdp_ring = vsi->xdp_rings[queue_index]; 60922bf877eSMaciej Fijalkowski spin_lock(&xdp_ring->tx_lock); 61022bf877eSMaciej Fijalkowski } else { 61122bf877eSMaciej Fijalkowski xdp_ring = vsi->xdp_rings[queue_index]; 61222bf877eSMaciej Fijalkowski } 61322bf877eSMaciej Fijalkowski 614efc2214bSMaciej Fijalkowski for (i = 0; i < n; i++) { 615efc2214bSMaciej Fijalkowski struct xdp_frame *xdpf = frames[i]; 616efc2214bSMaciej Fijalkowski int err; 617efc2214bSMaciej Fijalkowski 618efc2214bSMaciej Fijalkowski err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 619fdc13979SLorenzo Bianconi if (err != ICE_XDP_TX) 620fdc13979SLorenzo Bianconi break; 621fdc13979SLorenzo Bianconi nxmit++; 622efc2214bSMaciej Fijalkowski } 623efc2214bSMaciej Fijalkowski 624efc2214bSMaciej Fijalkowski if (unlikely(flags & XDP_XMIT_FLUSH)) 625efc2214bSMaciej Fijalkowski ice_xdp_ring_update_tail(xdp_ring); 626efc2214bSMaciej Fijalkowski 62722bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) 62822bf877eSMaciej Fijalkowski spin_unlock(&xdp_ring->tx_lock); 62922bf877eSMaciej Fijalkowski 630fdc13979SLorenzo Bianconi return nxmit; 631efc2214bSMaciej Fijalkowski } 632efc2214bSMaciej Fijalkowski 633efc2214bSMaciej Fijalkowski /** 634cdedef59SAnirudh Venkataramanan * ice_alloc_mapped_page - recycle or make a new page 635cdedef59SAnirudh Venkataramanan * @rx_ring: ring to use 636cdedef59SAnirudh Venkataramanan * @bi: rx_buf struct to modify 637cdedef59SAnirudh Venkataramanan * 638cdedef59SAnirudh Venkataramanan * Returns true if the page was successfully allocated or 639cdedef59SAnirudh Venkataramanan * reused. 640cdedef59SAnirudh Venkataramanan */ 641c8b7abddSBruce Allan static bool 642e72bba21SMaciej Fijalkowski ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) 643cdedef59SAnirudh Venkataramanan { 644cdedef59SAnirudh Venkataramanan struct page *page = bi->page; 645cdedef59SAnirudh Venkataramanan dma_addr_t dma; 646cdedef59SAnirudh Venkataramanan 647cdedef59SAnirudh Venkataramanan /* since we are recycling buffers we should seldom need to alloc */ 6487dbc63f0STony Nguyen if (likely(page)) 649cdedef59SAnirudh Venkataramanan return true; 650cdedef59SAnirudh Venkataramanan 651cdedef59SAnirudh Venkataramanan /* alloc new page for storage */ 6527237f5b0SMaciej Fijalkowski page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 6532b245cb2SAnirudh Venkataramanan if (unlikely(!page)) { 6542b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 655cdedef59SAnirudh Venkataramanan return false; 6562b245cb2SAnirudh Venkataramanan } 657cdedef59SAnirudh Venkataramanan 658cdedef59SAnirudh Venkataramanan /* map page for use */ 6597237f5b0SMaciej Fijalkowski dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 660a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 661cdedef59SAnirudh Venkataramanan 662cdedef59SAnirudh Venkataramanan /* if mapping failed free memory back to system since 663cdedef59SAnirudh Venkataramanan * there isn't much point in holding memory we can't use 664cdedef59SAnirudh Venkataramanan */ 665cdedef59SAnirudh Venkataramanan if (dma_mapping_error(rx_ring->dev, dma)) { 6667237f5b0SMaciej Fijalkowski __free_pages(page, ice_rx_pg_order(rx_ring)); 6672b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 668cdedef59SAnirudh Venkataramanan return false; 669cdedef59SAnirudh Venkataramanan } 670cdedef59SAnirudh Venkataramanan 671cdedef59SAnirudh Venkataramanan bi->dma = dma; 672cdedef59SAnirudh Venkataramanan bi->page = page; 673f1b1f409SMaciej Fijalkowski bi->page_offset = rx_ring->rx_offset; 67403c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 67503c66a13SMaciej Fijalkowski bi->pagecnt_bias = USHRT_MAX; 676cdedef59SAnirudh Venkataramanan 677cdedef59SAnirudh Venkataramanan return true; 678cdedef59SAnirudh Venkataramanan } 679cdedef59SAnirudh Venkataramanan 680cdedef59SAnirudh Venkataramanan /** 681cdedef59SAnirudh Venkataramanan * ice_alloc_rx_bufs - Replace used receive buffers 682cdedef59SAnirudh Venkataramanan * @rx_ring: ring to place buffers on 683cdedef59SAnirudh Venkataramanan * @cleaned_count: number of buffers to replace 684cdedef59SAnirudh Venkataramanan * 685cb7db356SBrett Creeley * Returns false if all allocations were successful, true if any fail. Returning 686cb7db356SBrett Creeley * true signals to the caller that we didn't replace cleaned_count buffers and 687cb7db356SBrett Creeley * there is more work to do. 688cb7db356SBrett Creeley * 689cb7db356SBrett Creeley * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 690cb7db356SBrett Creeley * buffers. Then bump tail at most one time. Grouping like this lets us avoid 691cb7db356SBrett Creeley * multiple tail writes per call. 692cdedef59SAnirudh Venkataramanan */ 693e72bba21SMaciej Fijalkowski bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count) 694cdedef59SAnirudh Venkataramanan { 695cdedef59SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 696cdedef59SAnirudh Venkataramanan u16 ntu = rx_ring->next_to_use; 697cdedef59SAnirudh Venkataramanan struct ice_rx_buf *bi; 698cdedef59SAnirudh Venkataramanan 699cdedef59SAnirudh Venkataramanan /* do nothing if no valid netdev defined */ 700148beb61SHenry Tieman if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || 701148beb61SHenry Tieman !cleaned_count) 702cdedef59SAnirudh Venkataramanan return false; 703cdedef59SAnirudh Venkataramanan 704f9867df6SAnirudh Venkataramanan /* get the Rx descriptor and buffer based on next_to_use */ 705cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, ntu); 706cdedef59SAnirudh Venkataramanan bi = &rx_ring->rx_buf[ntu]; 707cdedef59SAnirudh Venkataramanan 708cdedef59SAnirudh Venkataramanan do { 709a1e99685SBrett Creeley /* if we fail here, we have work remaining */ 710cdedef59SAnirudh Venkataramanan if (!ice_alloc_mapped_page(rx_ring, bi)) 711a1e99685SBrett Creeley break; 712cdedef59SAnirudh Venkataramanan 713a65f71feSMaciej Fijalkowski /* sync the buffer for use by the device */ 714a65f71feSMaciej Fijalkowski dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 715a65f71feSMaciej Fijalkowski bi->page_offset, 7167237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 717a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE); 718a65f71feSMaciej Fijalkowski 719cdedef59SAnirudh Venkataramanan /* Refresh the desc even if buffer_addrs didn't change 720cdedef59SAnirudh Venkataramanan * because each write-back erases this info. 721cdedef59SAnirudh Venkataramanan */ 722cdedef59SAnirudh Venkataramanan rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 723cdedef59SAnirudh Venkataramanan 724cdedef59SAnirudh Venkataramanan rx_desc++; 725cdedef59SAnirudh Venkataramanan bi++; 726cdedef59SAnirudh Venkataramanan ntu++; 727cdedef59SAnirudh Venkataramanan if (unlikely(ntu == rx_ring->count)) { 728cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, 0); 729cdedef59SAnirudh Venkataramanan bi = rx_ring->rx_buf; 730cdedef59SAnirudh Venkataramanan ntu = 0; 731cdedef59SAnirudh Venkataramanan } 732cdedef59SAnirudh Venkataramanan 733cdedef59SAnirudh Venkataramanan /* clear the status bits for the next_to_use descriptor */ 734cdedef59SAnirudh Venkataramanan rx_desc->wb.status_error0 = 0; 735cdedef59SAnirudh Venkataramanan 736cdedef59SAnirudh Venkataramanan cleaned_count--; 737cdedef59SAnirudh Venkataramanan } while (cleaned_count); 738cdedef59SAnirudh Venkataramanan 739cdedef59SAnirudh Venkataramanan if (rx_ring->next_to_use != ntu) 740cdedef59SAnirudh Venkataramanan ice_release_rx_desc(rx_ring, ntu); 741cdedef59SAnirudh Venkataramanan 742a1e99685SBrett Creeley return !!cleaned_count; 743cdedef59SAnirudh Venkataramanan } 7442b245cb2SAnirudh Venkataramanan 7452b245cb2SAnirudh Venkataramanan /** 7461d032bc7SMaciej Fijalkowski * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 7471d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to adjust 7481d032bc7SMaciej Fijalkowski * @size: Size of adjustment 7492b245cb2SAnirudh Venkataramanan * 7501d032bc7SMaciej Fijalkowski * Update the offset within page so that Rx buf will be ready to be reused. 7511d032bc7SMaciej Fijalkowski * For systems with PAGE_SIZE < 8192 this function will flip the page offset 7521d032bc7SMaciej Fijalkowski * so the second half of page assigned to Rx buffer will be used, otherwise 7534ee656bbSTony Nguyen * the offset is moved by "size" bytes 7542b245cb2SAnirudh Venkataramanan */ 7551d032bc7SMaciej Fijalkowski static void 7561d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 7572b245cb2SAnirudh Venkataramanan { 7582b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 7591d032bc7SMaciej Fijalkowski /* flip page offset to other buffer */ 7601d032bc7SMaciej Fijalkowski rx_buf->page_offset ^= size; 7612b245cb2SAnirudh Venkataramanan #else 7621d032bc7SMaciej Fijalkowski /* move offset up to the next cache line */ 7631d032bc7SMaciej Fijalkowski rx_buf->page_offset += size; 7641d032bc7SMaciej Fijalkowski #endif 7652b245cb2SAnirudh Venkataramanan } 7662b245cb2SAnirudh Venkataramanan 7671d032bc7SMaciej Fijalkowski /** 768bbb97808SMaciej Fijalkowski * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 769bbb97808SMaciej Fijalkowski * @rx_buf: buffer containing the page 7701beb7830SBjörn Töpel * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call 771bbb97808SMaciej Fijalkowski * 772bbb97808SMaciej Fijalkowski * If page is reusable, we have a green light for calling ice_reuse_rx_page, 773bbb97808SMaciej Fijalkowski * which will assign the current buffer to the buffer that next_to_alloc is 774bbb97808SMaciej Fijalkowski * pointing to; otherwise, the DMA mapping needs to be destroyed and 775bbb97808SMaciej Fijalkowski * page freed 776bbb97808SMaciej Fijalkowski */ 7771beb7830SBjörn Töpel static bool 7781beb7830SBjörn Töpel ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) 779bbb97808SMaciej Fijalkowski { 78003c66a13SMaciej Fijalkowski unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 781bbb97808SMaciej Fijalkowski struct page *page = rx_buf->page; 7822b245cb2SAnirudh Venkataramanan 783a79afa78SAlexander Lobakin /* avoid re-using remote and pfmemalloc pages */ 784a79afa78SAlexander Lobakin if (!dev_page_is_reusable(page)) 7852b245cb2SAnirudh Venkataramanan return false; 7862b245cb2SAnirudh Venkataramanan 7872b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 7882b245cb2SAnirudh Venkataramanan /* if we are only owner of page we can reuse it */ 7891beb7830SBjörn Töpel if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) 7902b245cb2SAnirudh Venkataramanan return false; 7912b245cb2SAnirudh Venkataramanan #else 7927237f5b0SMaciej Fijalkowski #define ICE_LAST_OFFSET \ 7937237f5b0SMaciej Fijalkowski (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 7947237f5b0SMaciej Fijalkowski if (rx_buf->page_offset > ICE_LAST_OFFSET) 7952b245cb2SAnirudh Venkataramanan return false; 7962b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE < 8192) */ 7972b245cb2SAnirudh Venkataramanan 79803c66a13SMaciej Fijalkowski /* If we have drained the page fragment pool we need to update 79903c66a13SMaciej Fijalkowski * the pagecnt_bias and page count so that we fully restock the 80003c66a13SMaciej Fijalkowski * number of references the driver holds. 8012b245cb2SAnirudh Venkataramanan */ 80203c66a13SMaciej Fijalkowski if (unlikely(pagecnt_bias == 1)) { 80303c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 80403c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias = USHRT_MAX; 80503c66a13SMaciej Fijalkowski } 8062b245cb2SAnirudh Venkataramanan 8072b245cb2SAnirudh Venkataramanan return true; 8082b245cb2SAnirudh Venkataramanan } 8092b245cb2SAnirudh Venkataramanan 8102b245cb2SAnirudh Venkataramanan /** 811712edbbbSMaciej Fijalkowski * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 8127237f5b0SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 8132b245cb2SAnirudh Venkataramanan * @rx_buf: buffer containing page to add 814712edbbbSMaciej Fijalkowski * @skb: sk_buff to place the data into 815712edbbbSMaciej Fijalkowski * @size: packet length from rx_desc 8162b245cb2SAnirudh Venkataramanan * 8172b245cb2SAnirudh Venkataramanan * This function will add the data contained in rx_buf->page to the skb. 818712edbbbSMaciej Fijalkowski * It will just attach the page as a frag to the skb. 819712edbbbSMaciej Fijalkowski * The function will then update the page offset. 8202b245cb2SAnirudh Venkataramanan */ 8211d032bc7SMaciej Fijalkowski static void 822e72bba21SMaciej Fijalkowski ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 8237237f5b0SMaciej Fijalkowski struct sk_buff *skb, unsigned int size) 8242b245cb2SAnirudh Venkataramanan { 825712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 826f1b1f409SMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset); 8272b245cb2SAnirudh Venkataramanan #else 8287237f5b0SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 829712edbbbSMaciej Fijalkowski #endif 8301857ca42SMaciej Fijalkowski 831ac6f733aSMitch Williams if (!size) 832ac6f733aSMitch Williams return; 833712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 834712edbbbSMaciej Fijalkowski rx_buf->page_offset, size, truesize); 8352b245cb2SAnirudh Venkataramanan 836712edbbbSMaciej Fijalkowski /* page is being used so we must update the page offset */ 8371d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 8382b245cb2SAnirudh Venkataramanan } 8392b245cb2SAnirudh Venkataramanan 8402b245cb2SAnirudh Venkataramanan /** 8412b245cb2SAnirudh Venkataramanan * ice_reuse_rx_page - page flip buffer and store it back on the ring 842d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to store buffers on 8432b245cb2SAnirudh Venkataramanan * @old_buf: donor buffer to have page reused 8442b245cb2SAnirudh Venkataramanan * 8452b245cb2SAnirudh Venkataramanan * Synchronizes page for reuse by the adapter 8462b245cb2SAnirudh Venkataramanan */ 847c8b7abddSBruce Allan static void 848e72bba21SMaciej Fijalkowski ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) 8492b245cb2SAnirudh Venkataramanan { 8502b245cb2SAnirudh Venkataramanan u16 nta = rx_ring->next_to_alloc; 8512b245cb2SAnirudh Venkataramanan struct ice_rx_buf *new_buf; 8522b245cb2SAnirudh Venkataramanan 8532b245cb2SAnirudh Venkataramanan new_buf = &rx_ring->rx_buf[nta]; 8542b245cb2SAnirudh Venkataramanan 8552b245cb2SAnirudh Venkataramanan /* update, and store next to alloc */ 8562b245cb2SAnirudh Venkataramanan nta++; 8572b245cb2SAnirudh Venkataramanan rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 8582b245cb2SAnirudh Venkataramanan 859712edbbbSMaciej Fijalkowski /* Transfer page from old buffer to new buffer. 860712edbbbSMaciej Fijalkowski * Move each member individually to avoid possible store 861712edbbbSMaciej Fijalkowski * forwarding stalls and unnecessary copy of skb. 862712edbbbSMaciej Fijalkowski */ 863712edbbbSMaciej Fijalkowski new_buf->dma = old_buf->dma; 864712edbbbSMaciej Fijalkowski new_buf->page = old_buf->page; 865712edbbbSMaciej Fijalkowski new_buf->page_offset = old_buf->page_offset; 866712edbbbSMaciej Fijalkowski new_buf->pagecnt_bias = old_buf->pagecnt_bias; 8672b245cb2SAnirudh Venkataramanan } 8682b245cb2SAnirudh Venkataramanan 8692b245cb2SAnirudh Venkataramanan /** 8706c869cb7SMaciej Fijalkowski * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 871d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 8726c869cb7SMaciej Fijalkowski * @size: size of buffer to add to skb 8731beb7830SBjörn Töpel * @rx_buf_pgcnt: rx_buf page refcount 8742b245cb2SAnirudh Venkataramanan * 8756c869cb7SMaciej Fijalkowski * This function will pull an Rx buffer from the ring and synchronize it 8766c869cb7SMaciej Fijalkowski * for use by the CPU. 8772b245cb2SAnirudh Venkataramanan */ 8786c869cb7SMaciej Fijalkowski static struct ice_rx_buf * 879e72bba21SMaciej Fijalkowski ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, 88029b82f2aSMaciej Fijalkowski int *rx_buf_pgcnt) 8812b245cb2SAnirudh Venkataramanan { 8822b245cb2SAnirudh Venkataramanan struct ice_rx_buf *rx_buf; 8832b245cb2SAnirudh Venkataramanan 8842b245cb2SAnirudh Venkataramanan rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 8851beb7830SBjörn Töpel *rx_buf_pgcnt = 8861beb7830SBjörn Töpel #if (PAGE_SIZE < 8192) 8871beb7830SBjörn Töpel page_count(rx_buf->page); 8881beb7830SBjörn Töpel #else 8891beb7830SBjörn Töpel 0; 8901beb7830SBjörn Töpel #endif 8916c869cb7SMaciej Fijalkowski prefetchw(rx_buf->page); 8922b245cb2SAnirudh Venkataramanan 893ac6f733aSMitch Williams if (!size) 894ac6f733aSMitch Williams return rx_buf; 8956c869cb7SMaciej Fijalkowski /* we are reusing so sync this buffer for CPU use */ 8966c869cb7SMaciej Fijalkowski dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 8976c869cb7SMaciej Fijalkowski rx_buf->page_offset, size, 8986c869cb7SMaciej Fijalkowski DMA_FROM_DEVICE); 8992b245cb2SAnirudh Venkataramanan 90003c66a13SMaciej Fijalkowski /* We have pulled a buffer for use, so decrement pagecnt_bias */ 90103c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias--; 90203c66a13SMaciej Fijalkowski 9036c869cb7SMaciej Fijalkowski return rx_buf; 9046c869cb7SMaciej Fijalkowski } 9056c869cb7SMaciej Fijalkowski 9066c869cb7SMaciej Fijalkowski /** 907aaf27254SMaciej Fijalkowski * ice_build_skb - Build skb around an existing buffer 908aaf27254SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 909aaf27254SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 910aaf27254SMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 911aaf27254SMaciej Fijalkowski * 912aaf27254SMaciej Fijalkowski * This function builds an skb around an existing Rx buffer, taking care 913aaf27254SMaciej Fijalkowski * to set up the skb correctly and avoid any memcpy overhead. 914aaf27254SMaciej Fijalkowski */ 915aaf27254SMaciej Fijalkowski static struct sk_buff * 916e72bba21SMaciej Fijalkowski ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 917aaf27254SMaciej Fijalkowski struct xdp_buff *xdp) 918aaf27254SMaciej Fijalkowski { 91988865fc4SKarol Kolacinski u8 metasize = xdp->data - xdp->data_meta; 920aaf27254SMaciej Fijalkowski #if (PAGE_SIZE < 8192) 921aaf27254SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 922aaf27254SMaciej Fijalkowski #else 923aaf27254SMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 924aaf27254SMaciej Fijalkowski SKB_DATA_ALIGN(xdp->data_end - 925aaf27254SMaciej Fijalkowski xdp->data_hard_start); 926aaf27254SMaciej Fijalkowski #endif 927aaf27254SMaciej Fijalkowski struct sk_buff *skb; 928aaf27254SMaciej Fijalkowski 929aaf27254SMaciej Fijalkowski /* Prefetch first cache line of first page. If xdp->data_meta 930aaf27254SMaciej Fijalkowski * is unused, this points exactly as xdp->data, otherwise we 931aaf27254SMaciej Fijalkowski * likely have a consumer accessing first few bytes of meta 932aaf27254SMaciej Fijalkowski * data, and then actual data. 933aaf27254SMaciej Fijalkowski */ 934f468f21bSTariq Toukan net_prefetch(xdp->data_meta); 935aaf27254SMaciej Fijalkowski /* build an skb around the page buffer */ 936aaf27254SMaciej Fijalkowski skb = build_skb(xdp->data_hard_start, truesize); 937aaf27254SMaciej Fijalkowski if (unlikely(!skb)) 938aaf27254SMaciej Fijalkowski return NULL; 939aaf27254SMaciej Fijalkowski 940aaf27254SMaciej Fijalkowski /* must to record Rx queue, otherwise OS features such as 941aaf27254SMaciej Fijalkowski * symmetric queue won't work 942aaf27254SMaciej Fijalkowski */ 943aaf27254SMaciej Fijalkowski skb_record_rx_queue(skb, rx_ring->q_index); 944aaf27254SMaciej Fijalkowski 945aaf27254SMaciej Fijalkowski /* update pointers within the skb to store the data */ 946aaf27254SMaciej Fijalkowski skb_reserve(skb, xdp->data - xdp->data_hard_start); 947aaf27254SMaciej Fijalkowski __skb_put(skb, xdp->data_end - xdp->data); 948aaf27254SMaciej Fijalkowski if (metasize) 949aaf27254SMaciej Fijalkowski skb_metadata_set(skb, metasize); 950aaf27254SMaciej Fijalkowski 951aaf27254SMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 952aaf27254SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 953aaf27254SMaciej Fijalkowski 954aaf27254SMaciej Fijalkowski return skb; 955aaf27254SMaciej Fijalkowski } 956aaf27254SMaciej Fijalkowski 957aaf27254SMaciej Fijalkowski /** 958712edbbbSMaciej Fijalkowski * ice_construct_skb - Allocate skb and populate it 9592b245cb2SAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 9606c869cb7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 961efc2214bSMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 9622b245cb2SAnirudh Venkataramanan * 963712edbbbSMaciej Fijalkowski * This function allocates an skb. It then populates it with the page 964712edbbbSMaciej Fijalkowski * data from the current receive descriptor, taking care to set up the 965712edbbbSMaciej Fijalkowski * skb correctly. 9662b245cb2SAnirudh Venkataramanan */ 967c8b7abddSBruce Allan static struct sk_buff * 968e72bba21SMaciej Fijalkowski ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 969efc2214bSMaciej Fijalkowski struct xdp_buff *xdp) 9702b245cb2SAnirudh Venkataramanan { 971efc2214bSMaciej Fijalkowski unsigned int size = xdp->data_end - xdp->data; 972712edbbbSMaciej Fijalkowski unsigned int headlen; 973712edbbbSMaciej Fijalkowski struct sk_buff *skb; 9742b245cb2SAnirudh Venkataramanan 9752b245cb2SAnirudh Venkataramanan /* prefetch first cache line of first page */ 976f468f21bSTariq Toukan net_prefetch(xdp->data); 9772b245cb2SAnirudh Venkataramanan 9782b245cb2SAnirudh Venkataramanan /* allocate a skb to store the frags */ 979712edbbbSMaciej Fijalkowski skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 9802b245cb2SAnirudh Venkataramanan GFP_ATOMIC | __GFP_NOWARN); 981712edbbbSMaciej Fijalkowski if (unlikely(!skb)) 9822b245cb2SAnirudh Venkataramanan return NULL; 9832b245cb2SAnirudh Venkataramanan 9842b245cb2SAnirudh Venkataramanan skb_record_rx_queue(skb, rx_ring->q_index); 985712edbbbSMaciej Fijalkowski /* Determine available headroom for copy */ 986712edbbbSMaciej Fijalkowski headlen = size; 987712edbbbSMaciej Fijalkowski if (headlen > ICE_RX_HDR_SIZE) 988efc2214bSMaciej Fijalkowski headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 9892b245cb2SAnirudh Venkataramanan 990712edbbbSMaciej Fijalkowski /* align pull length to size of long to optimize memcpy performance */ 991efc2214bSMaciej Fijalkowski memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 992efc2214bSMaciej Fijalkowski sizeof(long))); 993712edbbbSMaciej Fijalkowski 994712edbbbSMaciej Fijalkowski /* if we exhaust the linear part then add what is left as a frag */ 995712edbbbSMaciej Fijalkowski size -= headlen; 996712edbbbSMaciej Fijalkowski if (size) { 997712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 998712edbbbSMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size); 999712edbbbSMaciej Fijalkowski #else 10007237f5b0SMaciej Fijalkowski unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 1001712edbbbSMaciej Fijalkowski #endif 1002712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, 0, rx_buf->page, 1003712edbbbSMaciej Fijalkowski rx_buf->page_offset + headlen, size, truesize); 1004712edbbbSMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 1005712edbbbSMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 10062b245cb2SAnirudh Venkataramanan } else { 1007712edbbbSMaciej Fijalkowski /* buffer is unused, reset bias back to rx_buf; data was copied 1008712edbbbSMaciej Fijalkowski * onto skb's linear part so there's no need for adjusting 1009712edbbbSMaciej Fijalkowski * page offset and we can reuse this buffer as-is 1010712edbbbSMaciej Fijalkowski */ 1011712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 10122b245cb2SAnirudh Venkataramanan } 10132b245cb2SAnirudh Venkataramanan 10142b245cb2SAnirudh Venkataramanan return skb; 10152b245cb2SAnirudh Venkataramanan } 10162b245cb2SAnirudh Venkataramanan 10172b245cb2SAnirudh Venkataramanan /** 10181d032bc7SMaciej Fijalkowski * ice_put_rx_buf - Clean up used buffer and either recycle or free 10191d032bc7SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 10201d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 10211beb7830SBjörn Töpel * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect() 10222b245cb2SAnirudh Venkataramanan * 1023efc2214bSMaciej Fijalkowski * This function will update next_to_clean and then clean up the contents 1024efc2214bSMaciej Fijalkowski * of the rx_buf. It will either recycle the buffer or unmap it and free 1025efc2214bSMaciej Fijalkowski * the associated resources. 10262b245cb2SAnirudh Venkataramanan */ 10271beb7830SBjörn Töpel static void 1028e72bba21SMaciej Fijalkowski ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 10291beb7830SBjörn Töpel int rx_buf_pgcnt) 10302b245cb2SAnirudh Venkataramanan { 103188865fc4SKarol Kolacinski u16 ntc = rx_ring->next_to_clean + 1; 1032efc2214bSMaciej Fijalkowski 1033efc2214bSMaciej Fijalkowski /* fetch, update, and store next to clean */ 1034efc2214bSMaciej Fijalkowski ntc = (ntc < rx_ring->count) ? ntc : 0; 1035efc2214bSMaciej Fijalkowski rx_ring->next_to_clean = ntc; 1036efc2214bSMaciej Fijalkowski 1037ac6f733aSMitch Williams if (!rx_buf) 1038ac6f733aSMitch Williams return; 1039ac6f733aSMitch Williams 10401beb7830SBjörn Töpel if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) { 1041ac6f733aSMitch Williams /* hand second half of page back to the ring */ 10422b245cb2SAnirudh Venkataramanan ice_reuse_rx_page(rx_ring, rx_buf); 10432b245cb2SAnirudh Venkataramanan } else { 10442b245cb2SAnirudh Venkataramanan /* we are not reusing the buffer so unmap it */ 10457237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 10467237f5b0SMaciej Fijalkowski ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 10477237f5b0SMaciej Fijalkowski ICE_RX_DMA_ATTR); 104803c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 10492b245cb2SAnirudh Venkataramanan } 10502b245cb2SAnirudh Venkataramanan 10512b245cb2SAnirudh Venkataramanan /* clear contents of buffer_info */ 10522b245cb2SAnirudh Venkataramanan rx_buf->page = NULL; 10532b245cb2SAnirudh Venkataramanan } 10542b245cb2SAnirudh Venkataramanan 10552b245cb2SAnirudh Venkataramanan /** 10562b245cb2SAnirudh Venkataramanan * ice_is_non_eop - process handling of non-EOP buffers 10572b245cb2SAnirudh Venkataramanan * @rx_ring: Rx ring being processed 10582b245cb2SAnirudh Venkataramanan * @rx_desc: Rx descriptor for current buffer 10592b245cb2SAnirudh Venkataramanan * 1060efc2214bSMaciej Fijalkowski * If the buffer is an EOP buffer, this function exits returning false, 1061efc2214bSMaciej Fijalkowski * otherwise return true indicating that this is in fact a non-EOP buffer. 10622b245cb2SAnirudh Venkataramanan */ 1063c8b7abddSBruce Allan static bool 1064e72bba21SMaciej Fijalkowski ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) 10652b245cb2SAnirudh Venkataramanan { 10662b245cb2SAnirudh Venkataramanan /* if we are the last buffer then there is nothing else to do */ 10672b245cb2SAnirudh Venkataramanan #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 10682b245cb2SAnirudh Venkataramanan if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 10692b245cb2SAnirudh Venkataramanan return false; 10702b245cb2SAnirudh Venkataramanan 10712b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.non_eop_descs++; 10722b245cb2SAnirudh Venkataramanan 10732b245cb2SAnirudh Venkataramanan return true; 10742b245cb2SAnirudh Venkataramanan } 10752b245cb2SAnirudh Venkataramanan 10762b245cb2SAnirudh Venkataramanan /** 10772b245cb2SAnirudh Venkataramanan * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 1078d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 10792b245cb2SAnirudh Venkataramanan * @budget: Total limit on number of packets to process 10802b245cb2SAnirudh Venkataramanan * 10812b245cb2SAnirudh Venkataramanan * This function provides a "bounce buffer" approach to Rx interrupt 10822b245cb2SAnirudh Venkataramanan * processing. The advantage to this is that on systems that have 10832b245cb2SAnirudh Venkataramanan * expensive overhead for IOMMU access this provides a means of avoiding 10842b245cb2SAnirudh Venkataramanan * it by maintaining the mapping of the page to the system. 10852b245cb2SAnirudh Venkataramanan * 10862b245cb2SAnirudh Venkataramanan * Returns amount of work completed 10872b245cb2SAnirudh Venkataramanan */ 1088e72bba21SMaciej Fijalkowski int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) 10892b245cb2SAnirudh Venkataramanan { 109043b5169dSLorenzo Bianconi unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0; 10912b245cb2SAnirudh Venkataramanan u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1092f1b1f409SMaciej Fijalkowski unsigned int offset = rx_ring->rx_offset; 1093eb087cd8SMaciej Fijalkowski struct ice_tx_ring *xdp_ring = NULL; 1094efc2214bSMaciej Fijalkowski unsigned int xdp_res, xdp_xmit = 0; 109529b82f2aSMaciej Fijalkowski struct sk_buff *skb = rx_ring->skb; 1096efc2214bSMaciej Fijalkowski struct bpf_prog *xdp_prog = NULL; 1097efc2214bSMaciej Fijalkowski struct xdp_buff xdp; 1098cb7db356SBrett Creeley bool failure; 10992b245cb2SAnirudh Venkataramanan 1100d4ecdbf7SJesper Dangaard Brouer /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1101d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE < 8192) 110243b5169dSLorenzo Bianconi frame_sz = ice_rx_frame_truesize(rx_ring, 0); 1103d4ecdbf7SJesper Dangaard Brouer #endif 110443b5169dSLorenzo Bianconi xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); 1105efc2214bSMaciej Fijalkowski 1106eb087cd8SMaciej Fijalkowski xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1107eb087cd8SMaciej Fijalkowski if (xdp_prog) 1108eb087cd8SMaciej Fijalkowski xdp_ring = rx_ring->xdp_ring; 1109eb087cd8SMaciej Fijalkowski 1110f9867df6SAnirudh Venkataramanan /* start the loop to process Rx packets bounded by 'budget' */ 11112b245cb2SAnirudh Venkataramanan while (likely(total_rx_pkts < (unsigned int)budget)) { 11122b245cb2SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 11136c869cb7SMaciej Fijalkowski struct ice_rx_buf *rx_buf; 1114be9df4afSLorenzo Bianconi unsigned char *hard_start; 11156c869cb7SMaciej Fijalkowski unsigned int size; 11162b245cb2SAnirudh Venkataramanan u16 stat_err_bits; 11171beb7830SBjörn Töpel int rx_buf_pgcnt; 11182b245cb2SAnirudh Venkataramanan u16 vlan_tag = 0; 1119dda90cb9SJesse Brandeburg u16 rx_ptype; 11202b245cb2SAnirudh Venkataramanan 1121f9867df6SAnirudh Venkataramanan /* get the Rx desc from Rx ring based on 'next_to_clean' */ 11222b245cb2SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 11232b245cb2SAnirudh Venkataramanan 11242b245cb2SAnirudh Venkataramanan /* status_error_len will always be zero for unused descriptors 11252b245cb2SAnirudh Venkataramanan * because it's cleared in cleanup, and overlaps with hdr_addr 11262b245cb2SAnirudh Venkataramanan * which is always zero because packet split isn't used, if the 11272b245cb2SAnirudh Venkataramanan * hardware wrote DD then it will be non-zero 11282b245cb2SAnirudh Venkataramanan */ 11292b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 11302b245cb2SAnirudh Venkataramanan if (!ice_test_staterr(rx_desc, stat_err_bits)) 11312b245cb2SAnirudh Venkataramanan break; 11322b245cb2SAnirudh Venkataramanan 11332b245cb2SAnirudh Venkataramanan /* This memory barrier is needed to keep us from reading 11342b245cb2SAnirudh Venkataramanan * any other fields out of the rx_desc until we know the 11352b245cb2SAnirudh Venkataramanan * DD bit is set. 11362b245cb2SAnirudh Venkataramanan */ 11372b245cb2SAnirudh Venkataramanan dma_rmb(); 11382b245cb2SAnirudh Venkataramanan 11393089cf6dSJesse Brandeburg ice_trace(clean_rx_irq, rx_ring, rx_desc); 1140148beb61SHenry Tieman if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 1141d6218317SQi Zhang struct ice_vsi *ctrl_vsi = rx_ring->vsi; 1142d6218317SQi Zhang 1143d6218317SQi Zhang if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && 1144d6218317SQi Zhang ctrl_vsi->vf_id != ICE_INVAL_VFID) 1145d6218317SQi Zhang ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); 11461beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, NULL, 0); 1147148beb61SHenry Tieman cleaned_count++; 1148148beb61SHenry Tieman continue; 1149148beb61SHenry Tieman } 1150148beb61SHenry Tieman 11516c869cb7SMaciej Fijalkowski size = le16_to_cpu(rx_desc->wb.pkt_len) & 11526c869cb7SMaciej Fijalkowski ICE_RX_FLX_DESC_PKT_LEN_M; 11532b245cb2SAnirudh Venkataramanan 1154ac6f733aSMitch Williams /* retrieve a buffer from the ring */ 115529b82f2aSMaciej Fijalkowski rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt); 1156ac6f733aSMitch Williams 1157efc2214bSMaciej Fijalkowski if (!size) { 1158efc2214bSMaciej Fijalkowski xdp.data = NULL; 1159efc2214bSMaciej Fijalkowski xdp.data_end = NULL; 1160aaf27254SMaciej Fijalkowski xdp.data_hard_start = NULL; 1161aaf27254SMaciej Fijalkowski xdp.data_meta = NULL; 1162efc2214bSMaciej Fijalkowski goto construct_skb; 1163efc2214bSMaciej Fijalkowski } 1164efc2214bSMaciej Fijalkowski 1165be9df4afSLorenzo Bianconi hard_start = page_address(rx_buf->page) + rx_buf->page_offset - 1166be9df4afSLorenzo Bianconi offset; 1167be9df4afSLorenzo Bianconi xdp_prepare_buff(&xdp, hard_start, offset, size, true); 1168d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE > 4096) 1169d4ecdbf7SJesper Dangaard Brouer /* At larger PAGE_SIZE, frame_sz depend on len size */ 1170d4ecdbf7SJesper Dangaard Brouer xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); 1171d4ecdbf7SJesper Dangaard Brouer #endif 1172efc2214bSMaciej Fijalkowski 117349589b23SToke Høiland-Jørgensen if (!xdp_prog) 1174efc2214bSMaciej Fijalkowski goto construct_skb; 1175efc2214bSMaciej Fijalkowski 1176eb087cd8SMaciej Fijalkowski xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring); 117759bb0808SMaciej Fijalkowski if (!xdp_res) 117859bb0808SMaciej Fijalkowski goto construct_skb; 1179efc2214bSMaciej Fijalkowski if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1180efc2214bSMaciej Fijalkowski xdp_xmit |= xdp_res; 1181d4ecdbf7SJesper Dangaard Brouer ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); 1182efc2214bSMaciej Fijalkowski } else { 1183efc2214bSMaciej Fijalkowski rx_buf->pagecnt_bias++; 1184efc2214bSMaciej Fijalkowski } 1185efc2214bSMaciej Fijalkowski total_rx_bytes += size; 1186efc2214bSMaciej Fijalkowski total_rx_pkts++; 1187efc2214bSMaciej Fijalkowski 1188efc2214bSMaciej Fijalkowski cleaned_count++; 11891beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 1190efc2214bSMaciej Fijalkowski continue; 1191efc2214bSMaciej Fijalkowski construct_skb: 11921f45ebe0SMitch Williams if (skb) { 11937237f5b0SMaciej Fijalkowski ice_add_rx_frag(rx_ring, rx_buf, skb, size); 11941f45ebe0SMitch Williams } else if (likely(xdp.data)) { 11951f45ebe0SMitch Williams if (ice_ring_uses_build_skb(rx_ring)) 1196aaf27254SMaciej Fijalkowski skb = ice_build_skb(rx_ring, rx_buf, &xdp); 1197712edbbbSMaciej Fijalkowski else 1198efc2214bSMaciej Fijalkowski skb = ice_construct_skb(rx_ring, rx_buf, &xdp); 11991f45ebe0SMitch Williams } 1200712edbbbSMaciej Fijalkowski /* exit if we failed to retrieve a buffer */ 1201712edbbbSMaciej Fijalkowski if (!skb) { 1202712edbbbSMaciej Fijalkowski rx_ring->rx_stats.alloc_buf_failed++; 1203ac6f733aSMitch Williams if (rx_buf) 1204712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 12052b245cb2SAnirudh Venkataramanan break; 1206712edbbbSMaciej Fijalkowski } 12072b245cb2SAnirudh Venkataramanan 12081beb7830SBjörn Töpel ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 12092b245cb2SAnirudh Venkataramanan cleaned_count++; 12102b245cb2SAnirudh Venkataramanan 12112b245cb2SAnirudh Venkataramanan /* skip if it is NOP desc */ 121229b82f2aSMaciej Fijalkowski if (ice_is_non_eop(rx_ring, rx_desc)) 12132b245cb2SAnirudh Venkataramanan continue; 12142b245cb2SAnirudh Venkataramanan 12152b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 12162b245cb2SAnirudh Venkataramanan if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 12172b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 12182b245cb2SAnirudh Venkataramanan continue; 12192b245cb2SAnirudh Venkataramanan } 12202b245cb2SAnirudh Venkataramanan 12212b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 12222b245cb2SAnirudh Venkataramanan if (ice_test_staterr(rx_desc, stat_err_bits)) 12232b245cb2SAnirudh Venkataramanan vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 12242b245cb2SAnirudh Venkataramanan 1225133f4883SKrzysztof Kazimierczak /* pad the skb if needed, to make a valid ethernet frame */ 1226133f4883SKrzysztof Kazimierczak if (eth_skb_pad(skb)) { 12272b245cb2SAnirudh Venkataramanan skb = NULL; 12282b245cb2SAnirudh Venkataramanan continue; 12292b245cb2SAnirudh Venkataramanan } 12302b245cb2SAnirudh Venkataramanan 12312b245cb2SAnirudh Venkataramanan /* probably a little skewed due to removing CRC */ 12322b245cb2SAnirudh Venkataramanan total_rx_bytes += skb->len; 12332b245cb2SAnirudh Venkataramanan 1234d76a60baSAnirudh Venkataramanan /* populate checksum, VLAN, and protocol */ 12356503b659SJesse Brandeburg rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 12366503b659SJesse Brandeburg ICE_RX_FLEX_DESC_PTYPE_M; 12376503b659SJesse Brandeburg 1238d76a60baSAnirudh Venkataramanan ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1239d76a60baSAnirudh Venkataramanan 12403089cf6dSJesse Brandeburg ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); 12412b245cb2SAnirudh Venkataramanan /* send completed skb up the stack */ 12422b245cb2SAnirudh Venkataramanan ice_receive_skb(rx_ring, skb, vlan_tag); 124329b82f2aSMaciej Fijalkowski skb = NULL; 12442b245cb2SAnirudh Venkataramanan 12452b245cb2SAnirudh Venkataramanan /* update budget accounting */ 12462b245cb2SAnirudh Venkataramanan total_rx_pkts++; 12472b245cb2SAnirudh Venkataramanan } 12482b245cb2SAnirudh Venkataramanan 1249cb7db356SBrett Creeley /* return up to cleaned_count buffers to hardware */ 1250cb7db356SBrett Creeley failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1251cb7db356SBrett Creeley 1252efc2214bSMaciej Fijalkowski if (xdp_prog) 1253eb087cd8SMaciej Fijalkowski ice_finalize_xdp_rx(xdp_ring, xdp_xmit); 125429b82f2aSMaciej Fijalkowski rx_ring->skb = skb; 1255efc2214bSMaciej Fijalkowski 12562d4238f5SKrzysztof Kazimierczak ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); 12572b245cb2SAnirudh Venkataramanan 12582b245cb2SAnirudh Venkataramanan /* guarantee a trip back through this routine if there was a failure */ 12592b245cb2SAnirudh Venkataramanan return failure ? budget : (int)total_rx_pkts; 12602b245cb2SAnirudh Venkataramanan } 12612b245cb2SAnirudh Venkataramanan 1262*d8eb7ad5SJesse Brandeburg static void __ice_update_sample(struct ice_q_vector *q_vector, 1263*d8eb7ad5SJesse Brandeburg struct ice_ring_container *rc, 1264*d8eb7ad5SJesse Brandeburg struct dim_sample *sample, 1265*d8eb7ad5SJesse Brandeburg bool is_tx) 1266*d8eb7ad5SJesse Brandeburg { 1267*d8eb7ad5SJesse Brandeburg u64 packets = 0, bytes = 0; 1268*d8eb7ad5SJesse Brandeburg 1269*d8eb7ad5SJesse Brandeburg if (is_tx) { 1270*d8eb7ad5SJesse Brandeburg struct ice_tx_ring *tx_ring; 1271*d8eb7ad5SJesse Brandeburg 1272*d8eb7ad5SJesse Brandeburg ice_for_each_tx_ring(tx_ring, *rc) { 1273*d8eb7ad5SJesse Brandeburg packets += tx_ring->stats.pkts; 1274*d8eb7ad5SJesse Brandeburg bytes += tx_ring->stats.bytes; 1275*d8eb7ad5SJesse Brandeburg } 1276*d8eb7ad5SJesse Brandeburg } else { 1277*d8eb7ad5SJesse Brandeburg struct ice_rx_ring *rx_ring; 1278*d8eb7ad5SJesse Brandeburg 1279*d8eb7ad5SJesse Brandeburg ice_for_each_rx_ring(rx_ring, *rc) { 1280*d8eb7ad5SJesse Brandeburg packets += rx_ring->stats.pkts; 1281*d8eb7ad5SJesse Brandeburg bytes += rx_ring->stats.bytes; 1282*d8eb7ad5SJesse Brandeburg } 1283*d8eb7ad5SJesse Brandeburg } 1284*d8eb7ad5SJesse Brandeburg 1285*d8eb7ad5SJesse Brandeburg dim_update_sample(q_vector->total_events, packets, bytes, sample); 1286*d8eb7ad5SJesse Brandeburg sample->comp_ctr = 0; 1287*d8eb7ad5SJesse Brandeburg 1288*d8eb7ad5SJesse Brandeburg /* if dim settings get stale, like when not updated for 1 1289*d8eb7ad5SJesse Brandeburg * second or longer, force it to start again. This addresses the 1290*d8eb7ad5SJesse Brandeburg * frequent case of an idle queue being switched to by the 1291*d8eb7ad5SJesse Brandeburg * scheduler. The 1,000 here means 1,000 milliseconds. 1292*d8eb7ad5SJesse Brandeburg */ 1293*d8eb7ad5SJesse Brandeburg if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) 1294*d8eb7ad5SJesse Brandeburg rc->dim.state = DIM_START_MEASURE; 1295*d8eb7ad5SJesse Brandeburg } 1296*d8eb7ad5SJesse Brandeburg 12972b245cb2SAnirudh Venkataramanan /** 1298cdf1f1f1SJacob Keller * ice_net_dim - Update net DIM algorithm 1299cdf1f1f1SJacob Keller * @q_vector: the vector associated with the interrupt 1300711987bbSBrett Creeley * 1301cdf1f1f1SJacob Keller * Create a DIM sample and notify net_dim() so that it can possibly decide 1302cdf1f1f1SJacob Keller * a new ITR value based on incoming packets, bytes, and interrupts. 1303711987bbSBrett Creeley * 1304cdf1f1f1SJacob Keller * This function is a no-op if the ring is not configured to dynamic ITR. 1305711987bbSBrett Creeley */ 1306cdf1f1f1SJacob Keller static void ice_net_dim(struct ice_q_vector *q_vector) 130764a59d05SAnirudh Venkataramanan { 1308cdf1f1f1SJacob Keller struct ice_ring_container *tx = &q_vector->tx; 1309cdf1f1f1SJacob Keller struct ice_ring_container *rx = &q_vector->rx; 1310cdf1f1f1SJacob Keller 1311d59684a0SJesse Brandeburg if (ITR_IS_DYNAMIC(tx)) { 1312*d8eb7ad5SJesse Brandeburg struct dim_sample dim_sample; 1313cdf1f1f1SJacob Keller 1314*d8eb7ad5SJesse Brandeburg __ice_update_sample(q_vector, tx, &dim_sample, true); 1315cdf1f1f1SJacob Keller net_dim(&tx->dim, dim_sample); 1316711987bbSBrett Creeley } 1317711987bbSBrett Creeley 1318d59684a0SJesse Brandeburg if (ITR_IS_DYNAMIC(rx)) { 1319*d8eb7ad5SJesse Brandeburg struct dim_sample dim_sample; 1320cdf1f1f1SJacob Keller 1321*d8eb7ad5SJesse Brandeburg __ice_update_sample(q_vector, rx, &dim_sample, false); 1322cdf1f1f1SJacob Keller net_dim(&rx->dim, dim_sample); 132364a59d05SAnirudh Venkataramanan } 132464a59d05SAnirudh Venkataramanan } 132564a59d05SAnirudh Venkataramanan 13262b245cb2SAnirudh Venkataramanan /** 132763f545edSBrett Creeley * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 132863f545edSBrett Creeley * @itr_idx: interrupt throttling index 132964a59d05SAnirudh Venkataramanan * @itr: interrupt throttling value in usecs 133063f545edSBrett Creeley */ 13318244dd2dSBrett Creeley static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 133263f545edSBrett Creeley { 13332f2da36eSAnirudh Venkataramanan /* The ITR value is reported in microseconds, and the register value is 133464a59d05SAnirudh Venkataramanan * recorded in 2 microsecond units. For this reason we only need to 133564a59d05SAnirudh Venkataramanan * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 133664a59d05SAnirudh Venkataramanan * granularity as a shift instead of division. The mask makes sure the 133764a59d05SAnirudh Venkataramanan * ITR value is never odd so we don't accidentally write into the field 133864a59d05SAnirudh Venkataramanan * prior to the ITR field. 133964a59d05SAnirudh Venkataramanan */ 134064a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 134164a59d05SAnirudh Venkataramanan 134263f545edSBrett Creeley return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 134363f545edSBrett Creeley (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 134464a59d05SAnirudh Venkataramanan (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 134563f545edSBrett Creeley } 134663f545edSBrett Creeley 134763f545edSBrett Creeley /** 1348*d8eb7ad5SJesse Brandeburg * ice_enable_interrupt - re-enable MSI-X interrupt 1349cdf1f1f1SJacob Keller * @q_vector: the vector associated with the interrupt to enable 1350cdf1f1f1SJacob Keller * 1351*d8eb7ad5SJesse Brandeburg * If the VSI is down, the interrupt will not be re-enabled. Also, 1352*d8eb7ad5SJesse Brandeburg * when enabling the interrupt always reset the wb_on_itr to false 1353*d8eb7ad5SJesse Brandeburg * and trigger a software interrupt to clean out internal state. 135463f545edSBrett Creeley */ 1355*d8eb7ad5SJesse Brandeburg static void ice_enable_interrupt(struct ice_q_vector *q_vector) 135663f545edSBrett Creeley { 13572fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 1358b7306b42SJesse Brandeburg bool wb_en = q_vector->wb_on_itr; 135963f545edSBrett Creeley u32 itr_val; 136063f545edSBrett Creeley 1361cdf1f1f1SJacob Keller if (test_bit(ICE_DOWN, vsi->state)) 1362cdf1f1f1SJacob Keller return; 13632ab28bb0SBrett Creeley 1364cdf1f1f1SJacob Keller /* When exiting WB_ON_ITR, let ITR resume its normal 1365cdf1f1f1SJacob Keller * interrupts-enabled path. 1366cdf1f1f1SJacob Keller */ 1367b7306b42SJesse Brandeburg if (wb_en) 1368cdf1f1f1SJacob Keller q_vector->wb_on_itr = false; 136964a59d05SAnirudh Venkataramanan 137063f545edSBrett Creeley itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 1371b7306b42SJesse Brandeburg /* trigger an immediate software interrupt when exiting 1372b7306b42SJesse Brandeburg * busy poll, to make sure to catch any pending cleanups 1373b7306b42SJesse Brandeburg * that might have been missed due to interrupt state 1374b7306b42SJesse Brandeburg * transition. 1375b7306b42SJesse Brandeburg */ 1376b7306b42SJesse Brandeburg if (wb_en) { 1377b7306b42SJesse Brandeburg itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M | 1378b7306b42SJesse Brandeburg GLINT_DYN_CTL_SW_ITR_INDX_M | 1379b7306b42SJesse Brandeburg GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; 1380b7306b42SJesse Brandeburg } 13811d9f7ca3SJesse Brandeburg wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 138263f545edSBrett Creeley } 138363f545edSBrett Creeley 138463f545edSBrett Creeley /** 13852ab28bb0SBrett Creeley * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 13862ab28bb0SBrett Creeley * @q_vector: q_vector to set WB_ON_ITR on 13872ab28bb0SBrett Creeley * 13882ab28bb0SBrett Creeley * We need to tell hardware to write-back completed descriptors even when 13892ab28bb0SBrett Creeley * interrupts are disabled. Descriptors will be written back on cache line 13902ab28bb0SBrett Creeley * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 13911d9f7ca3SJesse Brandeburg * descriptors may not be written back if they don't fill a cache line until 13921d9f7ca3SJesse Brandeburg * the next interrupt. 13932ab28bb0SBrett Creeley * 13941d9f7ca3SJesse Brandeburg * This sets the write-back frequency to whatever was set previously for the 13951d9f7ca3SJesse Brandeburg * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we 13961d9f7ca3SJesse Brandeburg * aren't meddling with the INTENA_M bit. 13972ab28bb0SBrett Creeley */ 13982fb0821fSJesse Brandeburg static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 13992ab28bb0SBrett Creeley { 14002fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 14012fb0821fSJesse Brandeburg 14021d9f7ca3SJesse Brandeburg /* already in wb_on_itr mode no need to change it */ 1403cdf1f1f1SJacob Keller if (q_vector->wb_on_itr) 14042ab28bb0SBrett Creeley return; 14052ab28bb0SBrett Creeley 14061d9f7ca3SJesse Brandeburg /* use previously set ITR values for all of the ITR indices by 14071d9f7ca3SJesse Brandeburg * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and 14081d9f7ca3SJesse Brandeburg * be static in non-adaptive mode (user configured) 14091d9f7ca3SJesse Brandeburg */ 14102ab28bb0SBrett Creeley wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 14111d9f7ca3SJesse Brandeburg ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) & 14121d9f7ca3SJesse Brandeburg GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | 14131d9f7ca3SJesse Brandeburg GLINT_DYN_CTL_WB_ON_ITR_M); 14142ab28bb0SBrett Creeley 1415cdf1f1f1SJacob Keller q_vector->wb_on_itr = true; 14162ab28bb0SBrett Creeley } 14172ab28bb0SBrett Creeley 14182ab28bb0SBrett Creeley /** 14192b245cb2SAnirudh Venkataramanan * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 14202b245cb2SAnirudh Venkataramanan * @napi: napi struct with our devices info in it 14212b245cb2SAnirudh Venkataramanan * @budget: amount of work driver is allowed to do this pass, in packets 14222b245cb2SAnirudh Venkataramanan * 14232b245cb2SAnirudh Venkataramanan * This function will clean all queues associated with a q_vector. 14242b245cb2SAnirudh Venkataramanan * 14252b245cb2SAnirudh Venkataramanan * Returns the amount of work done 14262b245cb2SAnirudh Venkataramanan */ 14272b245cb2SAnirudh Venkataramanan int ice_napi_poll(struct napi_struct *napi, int budget) 14282b245cb2SAnirudh Venkataramanan { 14292b245cb2SAnirudh Venkataramanan struct ice_q_vector *q_vector = 14302b245cb2SAnirudh Venkataramanan container_of(napi, struct ice_q_vector, napi); 1431e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring; 1432e72bba21SMaciej Fijalkowski struct ice_rx_ring *rx_ring; 14332b245cb2SAnirudh Venkataramanan bool clean_complete = true; 14349118fcd5SBrett Creeley int budget_per_ring; 14352b245cb2SAnirudh Venkataramanan int work_done = 0; 14362b245cb2SAnirudh Venkataramanan 14372b245cb2SAnirudh Venkataramanan /* Since the actual Tx work is minimal, we can give the Tx a larger 14382b245cb2SAnirudh Venkataramanan * budget and be more aggressive about cleaning up the Tx descriptors. 14392b245cb2SAnirudh Venkataramanan */ 1440e72bba21SMaciej Fijalkowski ice_for_each_tx_ring(tx_ring, q_vector->tx) { 14419610bd98SMaciej Fijalkowski bool wd; 14429610bd98SMaciej Fijalkowski 14439610bd98SMaciej Fijalkowski if (tx_ring->xsk_pool) 14449610bd98SMaciej Fijalkowski wd = ice_clean_tx_irq_zc(tx_ring, budget); 14459610bd98SMaciej Fijalkowski else if (ice_ring_is_xdp(tx_ring)) 14469610bd98SMaciej Fijalkowski wd = true; 14479610bd98SMaciej Fijalkowski else 14489610bd98SMaciej Fijalkowski wd = ice_clean_tx_irq(tx_ring, budget); 14492d4238f5SKrzysztof Kazimierczak 14502d4238f5SKrzysztof Kazimierczak if (!wd) 14512b245cb2SAnirudh Venkataramanan clean_complete = false; 14522d4238f5SKrzysztof Kazimierczak } 14532b245cb2SAnirudh Venkataramanan 14542b245cb2SAnirudh Venkataramanan /* Handle case where we are called by netpoll with a budget of 0 */ 1455d27525ecSJesse Brandeburg if (unlikely(budget <= 0)) 14562b245cb2SAnirudh Venkataramanan return budget; 14572b245cb2SAnirudh Venkataramanan 14589118fcd5SBrett Creeley /* normally we have 1 Rx ring per q_vector */ 14599118fcd5SBrett Creeley if (unlikely(q_vector->num_ring_rx > 1)) 14609118fcd5SBrett Creeley /* We attempt to distribute budget to each Rx queue fairly, but 14619118fcd5SBrett Creeley * don't allow the budget to go below 1 because that would exit 14629118fcd5SBrett Creeley * polling early. 14632b245cb2SAnirudh Venkataramanan */ 146488865fc4SKarol Kolacinski budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 14659118fcd5SBrett Creeley else 14669118fcd5SBrett Creeley /* Max of 1 Rx ring in this q_vector so give it the budget */ 14679118fcd5SBrett Creeley budget_per_ring = budget; 14682b245cb2SAnirudh Venkataramanan 1469e72bba21SMaciej Fijalkowski ice_for_each_rx_ring(rx_ring, q_vector->rx) { 14702b245cb2SAnirudh Venkataramanan int cleaned; 14712b245cb2SAnirudh Venkataramanan 14722d4238f5SKrzysztof Kazimierczak /* A dedicated path for zero-copy allows making a single 14732d4238f5SKrzysztof Kazimierczak * comparison in the irq context instead of many inside the 14742d4238f5SKrzysztof Kazimierczak * ice_clean_rx_irq function and makes the codebase cleaner. 14752d4238f5SKrzysztof Kazimierczak */ 1476e72bba21SMaciej Fijalkowski cleaned = rx_ring->xsk_pool ? 1477e72bba21SMaciej Fijalkowski ice_clean_rx_irq_zc(rx_ring, budget_per_ring) : 1478e72bba21SMaciej Fijalkowski ice_clean_rx_irq(rx_ring, budget_per_ring); 14792b245cb2SAnirudh Venkataramanan work_done += cleaned; 14802b245cb2SAnirudh Venkataramanan /* if we clean as many as budgeted, we must not be done */ 14812b245cb2SAnirudh Venkataramanan if (cleaned >= budget_per_ring) 14822b245cb2SAnirudh Venkataramanan clean_complete = false; 14832b245cb2SAnirudh Venkataramanan } 14842b245cb2SAnirudh Venkataramanan 14852b245cb2SAnirudh Venkataramanan /* If work not completed, return budget and polling will return */ 14861d9f7ca3SJesse Brandeburg if (!clean_complete) { 14871d9f7ca3SJesse Brandeburg /* Set the writeback on ITR so partial completions of 14881d9f7ca3SJesse Brandeburg * cache-lines will still continue even if we're polling. 14891d9f7ca3SJesse Brandeburg */ 14901d9f7ca3SJesse Brandeburg ice_set_wb_on_itr(q_vector); 14912b245cb2SAnirudh Venkataramanan return budget; 14921d9f7ca3SJesse Brandeburg } 14932b245cb2SAnirudh Venkataramanan 14940bcd952fSJesse Brandeburg /* Exit the polling mode, but don't re-enable interrupts if stack might 14950bcd952fSJesse Brandeburg * poll us due to busy-polling 14960bcd952fSJesse Brandeburg */ 1497*d8eb7ad5SJesse Brandeburg if (likely(napi_complete_done(napi, work_done))) { 1498*d8eb7ad5SJesse Brandeburg ice_net_dim(q_vector); 1499*d8eb7ad5SJesse Brandeburg ice_enable_interrupt(q_vector); 1500*d8eb7ad5SJesse Brandeburg } else { 15012fb0821fSJesse Brandeburg ice_set_wb_on_itr(q_vector); 1502*d8eb7ad5SJesse Brandeburg } 1503e0c9fd9bSDave Ertman 150432a64994SBruce Allan return min_t(int, work_done, budget - 1); 15052b245cb2SAnirudh Venkataramanan } 15062b245cb2SAnirudh Venkataramanan 15072b245cb2SAnirudh Venkataramanan /** 1508d337f2afSAnirudh Venkataramanan * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 15092b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 15102b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 15112b245cb2SAnirudh Venkataramanan * 15122b245cb2SAnirudh Venkataramanan * Returns -EBUSY if a stop is needed, else 0 15132b245cb2SAnirudh Venkataramanan */ 1514e72bba21SMaciej Fijalkowski static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 15152b245cb2SAnirudh Venkataramanan { 15162b245cb2SAnirudh Venkataramanan netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 15172b245cb2SAnirudh Venkataramanan /* Memory barrier before checking head and tail */ 15182b245cb2SAnirudh Venkataramanan smp_mb(); 15192b245cb2SAnirudh Venkataramanan 15202b245cb2SAnirudh Venkataramanan /* Check again in a case another CPU has just made room available. */ 15212b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 15222b245cb2SAnirudh Venkataramanan return -EBUSY; 15232b245cb2SAnirudh Venkataramanan 15242b245cb2SAnirudh Venkataramanan /* A reprieve! - use start_subqueue because it doesn't call schedule */ 15252b245cb2SAnirudh Venkataramanan netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 15262b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 15272b245cb2SAnirudh Venkataramanan return 0; 15282b245cb2SAnirudh Venkataramanan } 15292b245cb2SAnirudh Venkataramanan 15302b245cb2SAnirudh Venkataramanan /** 1531d337f2afSAnirudh Venkataramanan * ice_maybe_stop_tx - 1st level check for Tx stop conditions 15322b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 15332b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 15342b245cb2SAnirudh Venkataramanan * 15352b245cb2SAnirudh Venkataramanan * Returns 0 if stop is not needed 15362b245cb2SAnirudh Venkataramanan */ 1537e72bba21SMaciej Fijalkowski static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 15382b245cb2SAnirudh Venkataramanan { 15392b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 15402b245cb2SAnirudh Venkataramanan return 0; 1541d337f2afSAnirudh Venkataramanan 15422b245cb2SAnirudh Venkataramanan return __ice_maybe_stop_tx(tx_ring, size); 15432b245cb2SAnirudh Venkataramanan } 15442b245cb2SAnirudh Venkataramanan 15452b245cb2SAnirudh Venkataramanan /** 15462b245cb2SAnirudh Venkataramanan * ice_tx_map - Build the Tx descriptor 15472b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 15482b245cb2SAnirudh Venkataramanan * @first: first buffer info buffer to use 1549d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 15502b245cb2SAnirudh Venkataramanan * 15512b245cb2SAnirudh Venkataramanan * This function loops over the skb data pointed to by *first 15522b245cb2SAnirudh Venkataramanan * and gets a physical address for each memory location and programs 15532b245cb2SAnirudh Venkataramanan * it and the length into the transmit descriptor. 15542b245cb2SAnirudh Venkataramanan */ 1555d76a60baSAnirudh Venkataramanan static void 1556e72bba21SMaciej Fijalkowski ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, 1557d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params *off) 15582b245cb2SAnirudh Venkataramanan { 1559d76a60baSAnirudh Venkataramanan u64 td_offset, td_tag, td_cmd; 15602b245cb2SAnirudh Venkataramanan u16 i = tx_ring->next_to_use; 15612b245cb2SAnirudh Venkataramanan unsigned int data_len, size; 15622b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 15632b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 15642b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 15654ee656bbSTony Nguyen skb_frag_t *frag; 15662b245cb2SAnirudh Venkataramanan dma_addr_t dma; 15672b245cb2SAnirudh Venkataramanan 1568d76a60baSAnirudh Venkataramanan td_tag = off->td_l2tag1; 1569d76a60baSAnirudh Venkataramanan td_cmd = off->td_cmd; 1570d76a60baSAnirudh Venkataramanan td_offset = off->td_offset; 15712b245cb2SAnirudh Venkataramanan skb = first->skb; 15722b245cb2SAnirudh Venkataramanan 15732b245cb2SAnirudh Venkataramanan data_len = skb->data_len; 15742b245cb2SAnirudh Venkataramanan size = skb_headlen(skb); 15752b245cb2SAnirudh Venkataramanan 15762b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 15772b245cb2SAnirudh Venkataramanan 1578d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1579d76a60baSAnirudh Venkataramanan td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1580d76a60baSAnirudh Venkataramanan td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1581d76a60baSAnirudh Venkataramanan ICE_TX_FLAGS_VLAN_S; 1582d76a60baSAnirudh Venkataramanan } 1583d76a60baSAnirudh Venkataramanan 15842b245cb2SAnirudh Venkataramanan dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 15852b245cb2SAnirudh Venkataramanan 15862b245cb2SAnirudh Venkataramanan tx_buf = first; 15872b245cb2SAnirudh Venkataramanan 15882b245cb2SAnirudh Venkataramanan for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 15892b245cb2SAnirudh Venkataramanan unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 15902b245cb2SAnirudh Venkataramanan 15912b245cb2SAnirudh Venkataramanan if (dma_mapping_error(tx_ring->dev, dma)) 15922b245cb2SAnirudh Venkataramanan goto dma_error; 15932b245cb2SAnirudh Venkataramanan 15942b245cb2SAnirudh Venkataramanan /* record length, and DMA address */ 15952b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, size); 15962b245cb2SAnirudh Venkataramanan dma_unmap_addr_set(tx_buf, dma, dma); 15972b245cb2SAnirudh Venkataramanan 15982b245cb2SAnirudh Venkataramanan /* align size to end of page */ 15992b245cb2SAnirudh Venkataramanan max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 16002b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 16012b245cb2SAnirudh Venkataramanan 16022b245cb2SAnirudh Venkataramanan /* account for data chunks larger than the hardware 16032b245cb2SAnirudh Venkataramanan * can handle 16042b245cb2SAnirudh Venkataramanan */ 16052b245cb2SAnirudh Venkataramanan while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 16062b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = 16075757cc7cSTony Nguyen ice_build_ctob(td_cmd, td_offset, max_data, 16085757cc7cSTony Nguyen td_tag); 16092b245cb2SAnirudh Venkataramanan 16102b245cb2SAnirudh Venkataramanan tx_desc++; 16112b245cb2SAnirudh Venkataramanan i++; 16122b245cb2SAnirudh Venkataramanan 16132b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 16142b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 16152b245cb2SAnirudh Venkataramanan i = 0; 16162b245cb2SAnirudh Venkataramanan } 16172b245cb2SAnirudh Venkataramanan 16182b245cb2SAnirudh Venkataramanan dma += max_data; 16192b245cb2SAnirudh Venkataramanan size -= max_data; 16202b245cb2SAnirudh Venkataramanan 16212b245cb2SAnirudh Venkataramanan max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 16222b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 16232b245cb2SAnirudh Venkataramanan } 16242b245cb2SAnirudh Venkataramanan 16252b245cb2SAnirudh Venkataramanan if (likely(!data_len)) 16262b245cb2SAnirudh Venkataramanan break; 16272b245cb2SAnirudh Venkataramanan 16285757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 16292b245cb2SAnirudh Venkataramanan size, td_tag); 16302b245cb2SAnirudh Venkataramanan 16312b245cb2SAnirudh Venkataramanan tx_desc++; 16322b245cb2SAnirudh Venkataramanan i++; 16332b245cb2SAnirudh Venkataramanan 16342b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 16352b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 16362b245cb2SAnirudh Venkataramanan i = 0; 16372b245cb2SAnirudh Venkataramanan } 16382b245cb2SAnirudh Venkataramanan 16392b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag); 16402b245cb2SAnirudh Venkataramanan data_len -= size; 16412b245cb2SAnirudh Venkataramanan 16422b245cb2SAnirudh Venkataramanan dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 16432b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 16442b245cb2SAnirudh Venkataramanan 16452b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 16462b245cb2SAnirudh Venkataramanan } 16472b245cb2SAnirudh Venkataramanan 16482b245cb2SAnirudh Venkataramanan /* record bytecount for BQL */ 16492b245cb2SAnirudh Venkataramanan netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 16502b245cb2SAnirudh Venkataramanan 16512b245cb2SAnirudh Venkataramanan /* record SW timestamp if HW timestamp is not available */ 16522b245cb2SAnirudh Venkataramanan skb_tx_timestamp(first->skb); 16532b245cb2SAnirudh Venkataramanan 16542b245cb2SAnirudh Venkataramanan i++; 16552b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) 16562b245cb2SAnirudh Venkataramanan i = 0; 16572b245cb2SAnirudh Venkataramanan 16582b245cb2SAnirudh Venkataramanan /* write last descriptor with RS and EOP bits */ 1659efc2214bSMaciej Fijalkowski td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 16605757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = 16615757cc7cSTony Nguyen ice_build_ctob(td_cmd, td_offset, size, td_tag); 16622b245cb2SAnirudh Venkataramanan 16632b245cb2SAnirudh Venkataramanan /* Force memory writes to complete before letting h/w know there 16642b245cb2SAnirudh Venkataramanan * are new descriptors to fetch. 16652b245cb2SAnirudh Venkataramanan * 16662b245cb2SAnirudh Venkataramanan * We also use this memory barrier to make certain all of the 16672b245cb2SAnirudh Venkataramanan * status bits have been updated before next_to_watch is written. 16682b245cb2SAnirudh Venkataramanan */ 16692b245cb2SAnirudh Venkataramanan wmb(); 16702b245cb2SAnirudh Venkataramanan 16712b245cb2SAnirudh Venkataramanan /* set next_to_watch value indicating a packet is present */ 16722b245cb2SAnirudh Venkataramanan first->next_to_watch = tx_desc; 16732b245cb2SAnirudh Venkataramanan 16742b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 16752b245cb2SAnirudh Venkataramanan 16762b245cb2SAnirudh Venkataramanan ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 16772b245cb2SAnirudh Venkataramanan 16782b245cb2SAnirudh Venkataramanan /* notify HW of packet */ 16794ee656bbSTony Nguyen if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) 16802b245cb2SAnirudh Venkataramanan writel(i, tx_ring->tail); 16812b245cb2SAnirudh Venkataramanan 16822b245cb2SAnirudh Venkataramanan return; 16832b245cb2SAnirudh Venkataramanan 16842b245cb2SAnirudh Venkataramanan dma_error: 16852f2da36eSAnirudh Venkataramanan /* clear DMA mappings for failed tx_buf map */ 16862b245cb2SAnirudh Venkataramanan for (;;) { 16872b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 16882b245cb2SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 16892b245cb2SAnirudh Venkataramanan if (tx_buf == first) 16902b245cb2SAnirudh Venkataramanan break; 16912b245cb2SAnirudh Venkataramanan if (i == 0) 16922b245cb2SAnirudh Venkataramanan i = tx_ring->count; 16932b245cb2SAnirudh Venkataramanan i--; 16942b245cb2SAnirudh Venkataramanan } 16952b245cb2SAnirudh Venkataramanan 16962b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 16972b245cb2SAnirudh Venkataramanan } 16982b245cb2SAnirudh Venkataramanan 16992b245cb2SAnirudh Venkataramanan /** 1700d76a60baSAnirudh Venkataramanan * ice_tx_csum - Enable Tx checksum offloads 1701d76a60baSAnirudh Venkataramanan * @first: pointer to the first descriptor 1702d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1703d76a60baSAnirudh Venkataramanan * 1704d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1705d76a60baSAnirudh Venkataramanan */ 1706d76a60baSAnirudh Venkataramanan static 1707d76a60baSAnirudh Venkataramanan int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1708d76a60baSAnirudh Venkataramanan { 1709d76a60baSAnirudh Venkataramanan u32 l4_len = 0, l3_len = 0, l2_len = 0; 1710d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1711d76a60baSAnirudh Venkataramanan union { 1712d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1713d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1714d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1715d76a60baSAnirudh Venkataramanan } ip; 1716d76a60baSAnirudh Venkataramanan union { 1717d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1718d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1719d76a60baSAnirudh Venkataramanan } l4; 1720d76a60baSAnirudh Venkataramanan __be16 frag_off, protocol; 1721d76a60baSAnirudh Venkataramanan unsigned char *exthdr; 1722d76a60baSAnirudh Venkataramanan u32 offset, cmd = 0; 1723d76a60baSAnirudh Venkataramanan u8 l4_proto = 0; 1724d76a60baSAnirudh Venkataramanan 1725d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1726d76a60baSAnirudh Venkataramanan return 0; 1727d76a60baSAnirudh Venkataramanan 1728d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 1729d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 1730d76a60baSAnirudh Venkataramanan 1731d76a60baSAnirudh Venkataramanan /* compute outer L2 header size */ 1732d76a60baSAnirudh Venkataramanan l2_len = ip.hdr - skb->data; 1733d76a60baSAnirudh Venkataramanan offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1734d76a60baSAnirudh Venkataramanan 1735a4e82a81STony Nguyen protocol = vlan_get_protocol(skb); 1736a4e82a81STony Nguyen 1737a4e82a81STony Nguyen if (protocol == htons(ETH_P_IP)) 1738a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV4; 1739a4e82a81STony Nguyen else if (protocol == htons(ETH_P_IPV6)) 1740a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV6; 1741a4e82a81STony Nguyen 1742a4e82a81STony Nguyen if (skb->encapsulation) { 1743a4e82a81STony Nguyen bool gso_ena = false; 1744a4e82a81STony Nguyen u32 tunnel = 0; 1745a4e82a81STony Nguyen 1746a4e82a81STony Nguyen /* define outer network header type */ 1747a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1748a4e82a81STony Nguyen tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 1749a4e82a81STony Nguyen ICE_TX_CTX_EIPT_IPV4 : 1750a4e82a81STony Nguyen ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 1751a4e82a81STony Nguyen l4_proto = ip.v4->protocol; 1752a4e82a81STony Nguyen } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 17531b0b0b58SNick Nunley int ret; 17541b0b0b58SNick Nunley 1755a4e82a81STony Nguyen tunnel |= ICE_TX_CTX_EIPT_IPV6; 1756a4e82a81STony Nguyen exthdr = ip.hdr + sizeof(*ip.v6); 1757a4e82a81STony Nguyen l4_proto = ip.v6->nexthdr; 17581b0b0b58SNick Nunley ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 1759a4e82a81STony Nguyen &l4_proto, &frag_off); 17601b0b0b58SNick Nunley if (ret < 0) 17611b0b0b58SNick Nunley return -1; 1762a4e82a81STony Nguyen } 1763a4e82a81STony Nguyen 1764a4e82a81STony Nguyen /* define outer transport */ 1765a4e82a81STony Nguyen switch (l4_proto) { 1766a4e82a81STony Nguyen case IPPROTO_UDP: 1767a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 1768a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1769a4e82a81STony Nguyen break; 1770a4e82a81STony Nguyen case IPPROTO_GRE: 1771a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 1772a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1773a4e82a81STony Nguyen break; 1774a4e82a81STony Nguyen case IPPROTO_IPIP: 1775a4e82a81STony Nguyen case IPPROTO_IPV6: 1776a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1777a4e82a81STony Nguyen l4.hdr = skb_inner_network_header(skb); 1778a4e82a81STony Nguyen break; 1779a4e82a81STony Nguyen default: 1780a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_TSO) 1781d76a60baSAnirudh Venkataramanan return -1; 1782d76a60baSAnirudh Venkataramanan 1783a4e82a81STony Nguyen skb_checksum_help(skb); 1784a4e82a81STony Nguyen return 0; 1785a4e82a81STony Nguyen } 1786a4e82a81STony Nguyen 1787a4e82a81STony Nguyen /* compute outer L3 header size */ 1788a4e82a81STony Nguyen tunnel |= ((l4.hdr - ip.hdr) / 4) << 1789a4e82a81STony Nguyen ICE_TXD_CTX_QW0_EIPLEN_S; 1790a4e82a81STony Nguyen 1791a4e82a81STony Nguyen /* switch IP header pointer from outer to inner header */ 1792a4e82a81STony Nguyen ip.hdr = skb_inner_network_header(skb); 1793a4e82a81STony Nguyen 1794a4e82a81STony Nguyen /* compute tunnel header size */ 1795a4e82a81STony Nguyen tunnel |= ((ip.hdr - l4.hdr) / 2) << 1796a4e82a81STony Nguyen ICE_TXD_CTX_QW0_NATLEN_S; 1797a4e82a81STony Nguyen 1798a4e82a81STony Nguyen gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 1799a4e82a81STony Nguyen /* indicate if we need to offload outer UDP header */ 1800a4e82a81STony Nguyen if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 1801a4e82a81STony Nguyen (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1802a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 1803a4e82a81STony Nguyen 1804a4e82a81STony Nguyen /* record tunnel offload values */ 1805a4e82a81STony Nguyen off->cd_tunnel_params |= tunnel; 1806a4e82a81STony Nguyen 1807a4e82a81STony Nguyen /* set DTYP=1 to indicate that it's an Tx context descriptor 1808a4e82a81STony Nguyen * in IPsec tunnel mode with Tx offloads in Quad word 1 1809a4e82a81STony Nguyen */ 1810a4e82a81STony Nguyen off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 1811a4e82a81STony Nguyen 1812a4e82a81STony Nguyen /* switch L4 header pointer from outer to inner */ 1813a4e82a81STony Nguyen l4.hdr = skb_inner_transport_header(skb); 1814a4e82a81STony Nguyen l4_proto = 0; 1815a4e82a81STony Nguyen 1816a4e82a81STony Nguyen /* reset type as we transition from outer to inner headers */ 1817a4e82a81STony Nguyen first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 1818a4e82a81STony Nguyen if (ip.v4->version == 4) 1819a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV4; 1820a4e82a81STony Nguyen if (ip.v6->version == 6) 1821a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV6; 1822a4e82a81STony Nguyen } 1823a4e82a81STony Nguyen 1824d76a60baSAnirudh Venkataramanan /* Enable IP checksum offloads */ 1825a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1826d76a60baSAnirudh Venkataramanan l4_proto = ip.v4->protocol; 1827d76a60baSAnirudh Venkataramanan /* the stack computes the IP header already, the only time we 1828d76a60baSAnirudh Venkataramanan * need the hardware to recompute it is in the case of TSO. 1829d76a60baSAnirudh Venkataramanan */ 1830d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1831d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1832d76a60baSAnirudh Venkataramanan else 1833d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1834d76a60baSAnirudh Venkataramanan 1835a4e82a81STony Nguyen } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1836d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1837d76a60baSAnirudh Venkataramanan exthdr = ip.hdr + sizeof(*ip.v6); 1838d76a60baSAnirudh Venkataramanan l4_proto = ip.v6->nexthdr; 1839d76a60baSAnirudh Venkataramanan if (l4.hdr != exthdr) 1840d76a60baSAnirudh Venkataramanan ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1841d76a60baSAnirudh Venkataramanan &frag_off); 1842d76a60baSAnirudh Venkataramanan } else { 1843d76a60baSAnirudh Venkataramanan return -1; 1844d76a60baSAnirudh Venkataramanan } 1845d76a60baSAnirudh Venkataramanan 1846d76a60baSAnirudh Venkataramanan /* compute inner L3 header size */ 1847d76a60baSAnirudh Venkataramanan l3_len = l4.hdr - ip.hdr; 1848d76a60baSAnirudh Venkataramanan offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1849d76a60baSAnirudh Venkataramanan 1850d76a60baSAnirudh Venkataramanan /* Enable L4 checksum offloads */ 1851d76a60baSAnirudh Venkataramanan switch (l4_proto) { 1852d76a60baSAnirudh Venkataramanan case IPPROTO_TCP: 1853d76a60baSAnirudh Venkataramanan /* enable checksum offloads */ 1854d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1855d76a60baSAnirudh Venkataramanan l4_len = l4.tcp->doff; 1856d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1857d76a60baSAnirudh Venkataramanan break; 1858d76a60baSAnirudh Venkataramanan case IPPROTO_UDP: 1859d76a60baSAnirudh Venkataramanan /* enable UDP checksum offload */ 1860d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1861d76a60baSAnirudh Venkataramanan l4_len = (sizeof(struct udphdr) >> 2); 1862d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1863d76a60baSAnirudh Venkataramanan break; 1864d76a60baSAnirudh Venkataramanan case IPPROTO_SCTP: 1865cf909e19SAnirudh Venkataramanan /* enable SCTP checksum offload */ 1866cf909e19SAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1867cf909e19SAnirudh Venkataramanan l4_len = sizeof(struct sctphdr) >> 2; 1868cf909e19SAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1869cf909e19SAnirudh Venkataramanan break; 1870cf909e19SAnirudh Venkataramanan 1871d76a60baSAnirudh Venkataramanan default: 1872d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1873d76a60baSAnirudh Venkataramanan return -1; 1874d76a60baSAnirudh Venkataramanan skb_checksum_help(skb); 1875d76a60baSAnirudh Venkataramanan return 0; 1876d76a60baSAnirudh Venkataramanan } 1877d76a60baSAnirudh Venkataramanan 1878d76a60baSAnirudh Venkataramanan off->td_cmd |= cmd; 1879d76a60baSAnirudh Venkataramanan off->td_offset |= offset; 1880d76a60baSAnirudh Venkataramanan return 1; 1881d76a60baSAnirudh Venkataramanan } 1882d76a60baSAnirudh Venkataramanan 1883d76a60baSAnirudh Venkataramanan /** 1884f9867df6SAnirudh Venkataramanan * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1885d76a60baSAnirudh Venkataramanan * @tx_ring: ring to send buffer on 1886d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1887d76a60baSAnirudh Venkataramanan * 1888d76a60baSAnirudh Venkataramanan * Checks the skb and set up correspondingly several generic transmit flags 1889d76a60baSAnirudh Venkataramanan * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1890d76a60baSAnirudh Venkataramanan */ 18912bb19d6eSBrett Creeley static void 1892e72bba21SMaciej Fijalkowski ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) 1893d76a60baSAnirudh Venkataramanan { 1894d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1895d76a60baSAnirudh Venkataramanan 18962bb19d6eSBrett Creeley /* nothing left to do, software offloaded VLAN */ 18972bb19d6eSBrett Creeley if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) 18982bb19d6eSBrett Creeley return; 18992bb19d6eSBrett Creeley 19002bb19d6eSBrett Creeley /* currently, we always assume 802.1Q for VLAN insertion as VLAN 19012bb19d6eSBrett Creeley * insertion for 802.1AD is not supported 1902d76a60baSAnirudh Venkataramanan */ 1903d76a60baSAnirudh Venkataramanan if (skb_vlan_tag_present(skb)) { 1904d76a60baSAnirudh Venkataramanan first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1905d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1906d76a60baSAnirudh Venkataramanan } 1907d76a60baSAnirudh Venkataramanan 19082bb19d6eSBrett Creeley ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 1909d76a60baSAnirudh Venkataramanan } 1910d76a60baSAnirudh Venkataramanan 1911d76a60baSAnirudh Venkataramanan /** 1912d76a60baSAnirudh Venkataramanan * ice_tso - computes mss and TSO length to prepare for TSO 1913d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1914d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1915d76a60baSAnirudh Venkataramanan * 1916d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1917d76a60baSAnirudh Venkataramanan */ 1918d76a60baSAnirudh Venkataramanan static 1919d76a60baSAnirudh Venkataramanan int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1920d76a60baSAnirudh Venkataramanan { 1921d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1922d76a60baSAnirudh Venkataramanan union { 1923d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1924d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1925d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1926d76a60baSAnirudh Venkataramanan } ip; 1927d76a60baSAnirudh Venkataramanan union { 1928d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1929a54e3b8cSBrett Creeley struct udphdr *udp; 1930d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1931d76a60baSAnirudh Venkataramanan } l4; 1932d76a60baSAnirudh Venkataramanan u64 cd_mss, cd_tso_len; 193388865fc4SKarol Kolacinski u32 paylen; 193488865fc4SKarol Kolacinski u8 l4_start; 1935d76a60baSAnirudh Venkataramanan int err; 1936d76a60baSAnirudh Venkataramanan 1937d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1938d76a60baSAnirudh Venkataramanan return 0; 1939d76a60baSAnirudh Venkataramanan 1940d76a60baSAnirudh Venkataramanan if (!skb_is_gso(skb)) 1941d76a60baSAnirudh Venkataramanan return 0; 1942d76a60baSAnirudh Venkataramanan 1943d76a60baSAnirudh Venkataramanan err = skb_cow_head(skb, 0); 1944d76a60baSAnirudh Venkataramanan if (err < 0) 1945d76a60baSAnirudh Venkataramanan return err; 1946d76a60baSAnirudh Venkataramanan 1947c3a6825eSBruce Allan /* cppcheck-suppress unreadVariable */ 1948d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 1949d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 1950d76a60baSAnirudh Venkataramanan 1951d76a60baSAnirudh Venkataramanan /* initialize outer IP header fields */ 1952d76a60baSAnirudh Venkataramanan if (ip.v4->version == 4) { 1953d76a60baSAnirudh Venkataramanan ip.v4->tot_len = 0; 1954d76a60baSAnirudh Venkataramanan ip.v4->check = 0; 1955d76a60baSAnirudh Venkataramanan } else { 1956d76a60baSAnirudh Venkataramanan ip.v6->payload_len = 0; 1957d76a60baSAnirudh Venkataramanan } 1958d76a60baSAnirudh Venkataramanan 1959a4e82a81STony Nguyen if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 1960a4e82a81STony Nguyen SKB_GSO_GRE_CSUM | 1961a4e82a81STony Nguyen SKB_GSO_IPXIP4 | 1962a4e82a81STony Nguyen SKB_GSO_IPXIP6 | 1963a4e82a81STony Nguyen SKB_GSO_UDP_TUNNEL | 1964a4e82a81STony Nguyen SKB_GSO_UDP_TUNNEL_CSUM)) { 1965a4e82a81STony Nguyen if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 1966a4e82a81STony Nguyen (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 1967a4e82a81STony Nguyen l4.udp->len = 0; 1968a4e82a81STony Nguyen 1969a4e82a81STony Nguyen /* determine offset of outer transport header */ 197088865fc4SKarol Kolacinski l4_start = (u8)(l4.hdr - skb->data); 1971a4e82a81STony Nguyen 1972a4e82a81STony Nguyen /* remove payload length from outer checksum */ 1973a4e82a81STony Nguyen paylen = skb->len - l4_start; 1974a4e82a81STony Nguyen csum_replace_by_diff(&l4.udp->check, 1975a4e82a81STony Nguyen (__force __wsum)htonl(paylen)); 1976a4e82a81STony Nguyen } 1977a4e82a81STony Nguyen 1978a4e82a81STony Nguyen /* reset pointers to inner headers */ 1979a4e82a81STony Nguyen 1980a4e82a81STony Nguyen /* cppcheck-suppress unreadVariable */ 1981a4e82a81STony Nguyen ip.hdr = skb_inner_network_header(skb); 1982a4e82a81STony Nguyen l4.hdr = skb_inner_transport_header(skb); 1983a4e82a81STony Nguyen 1984a4e82a81STony Nguyen /* initialize inner IP header fields */ 1985a4e82a81STony Nguyen if (ip.v4->version == 4) { 1986a4e82a81STony Nguyen ip.v4->tot_len = 0; 1987a4e82a81STony Nguyen ip.v4->check = 0; 1988a4e82a81STony Nguyen } else { 1989a4e82a81STony Nguyen ip.v6->payload_len = 0; 1990a4e82a81STony Nguyen } 1991a4e82a81STony Nguyen } 1992a4e82a81STony Nguyen 1993d76a60baSAnirudh Venkataramanan /* determine offset of transport header */ 199488865fc4SKarol Kolacinski l4_start = (u8)(l4.hdr - skb->data); 1995d76a60baSAnirudh Venkataramanan 1996d76a60baSAnirudh Venkataramanan /* remove payload length from checksum */ 1997d76a60baSAnirudh Venkataramanan paylen = skb->len - l4_start; 1998d76a60baSAnirudh Venkataramanan 1999a54e3b8cSBrett Creeley if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 2000a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.udp->check, 2001a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 2002a54e3b8cSBrett Creeley /* compute length of UDP segmentation header */ 200388865fc4SKarol Kolacinski off->header_len = (u8)sizeof(l4.udp) + l4_start; 2004a54e3b8cSBrett Creeley } else { 2005a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.tcp->check, 2006a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 2007a54e3b8cSBrett Creeley /* compute length of TCP segmentation header */ 200888865fc4SKarol Kolacinski off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 2009a54e3b8cSBrett Creeley } 2010d76a60baSAnirudh Venkataramanan 2011d76a60baSAnirudh Venkataramanan /* update gso_segs and bytecount */ 2012d76a60baSAnirudh Venkataramanan first->gso_segs = skb_shinfo(skb)->gso_segs; 2013d944b469SBrett Creeley first->bytecount += (first->gso_segs - 1) * off->header_len; 2014d76a60baSAnirudh Venkataramanan 2015d76a60baSAnirudh Venkataramanan cd_tso_len = skb->len - off->header_len; 2016d76a60baSAnirudh Venkataramanan cd_mss = skb_shinfo(skb)->gso_size; 2017d76a60baSAnirudh Venkataramanan 2018d76a60baSAnirudh Venkataramanan /* record cdesc_qw1 with TSO parameters */ 2019e65e9e15SBruce Allan off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2020d76a60baSAnirudh Venkataramanan (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 2021d76a60baSAnirudh Venkataramanan (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2022e65e9e15SBruce Allan (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2023d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_TSO; 2024d76a60baSAnirudh Venkataramanan return 1; 2025d76a60baSAnirudh Venkataramanan } 2026d76a60baSAnirudh Venkataramanan 2027d76a60baSAnirudh Venkataramanan /** 20282b245cb2SAnirudh Venkataramanan * ice_txd_use_count - estimate the number of descriptors needed for Tx 20292b245cb2SAnirudh Venkataramanan * @size: transmit request size in bytes 20302b245cb2SAnirudh Venkataramanan * 20312b245cb2SAnirudh Venkataramanan * Due to hardware alignment restrictions (4K alignment), we need to 20322b245cb2SAnirudh Venkataramanan * assume that we can have no more than 12K of data per descriptor, even 20332b245cb2SAnirudh Venkataramanan * though each descriptor can take up to 16K - 1 bytes of aligned memory. 20342b245cb2SAnirudh Venkataramanan * Thus, we need to divide by 12K. But division is slow! Instead, 20352b245cb2SAnirudh Venkataramanan * we decompose the operation into shifts and one relatively cheap 20362b245cb2SAnirudh Venkataramanan * multiply operation. 20372b245cb2SAnirudh Venkataramanan * 20382b245cb2SAnirudh Venkataramanan * To divide by 12K, we first divide by 4K, then divide by 3: 20392b245cb2SAnirudh Venkataramanan * To divide by 4K, shift right by 12 bits 20402b245cb2SAnirudh Venkataramanan * To divide by 3, multiply by 85, then divide by 256 20412b245cb2SAnirudh Venkataramanan * (Divide by 256 is done by shifting right by 8 bits) 20422b245cb2SAnirudh Venkataramanan * Finally, we add one to round up. Because 256 isn't an exact multiple of 20432b245cb2SAnirudh Venkataramanan * 3, we'll underestimate near each multiple of 12K. This is actually more 20442b245cb2SAnirudh Venkataramanan * accurate as we have 4K - 1 of wiggle room that we can fit into the last 20452b245cb2SAnirudh Venkataramanan * segment. For our purposes this is accurate out to 1M which is orders of 20462b245cb2SAnirudh Venkataramanan * magnitude greater than our largest possible GSO size. 20472b245cb2SAnirudh Venkataramanan * 20482b245cb2SAnirudh Venkataramanan * This would then be implemented as: 2049c585ea42SBrett Creeley * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 20502b245cb2SAnirudh Venkataramanan * 20512b245cb2SAnirudh Venkataramanan * Since multiplication and division are commutative, we can reorder 20522b245cb2SAnirudh Venkataramanan * operations into: 2053c585ea42SBrett Creeley * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 20542b245cb2SAnirudh Venkataramanan */ 20552b245cb2SAnirudh Venkataramanan static unsigned int ice_txd_use_count(unsigned int size) 20562b245cb2SAnirudh Venkataramanan { 2057c585ea42SBrett Creeley return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 20582b245cb2SAnirudh Venkataramanan } 20592b245cb2SAnirudh Venkataramanan 20602b245cb2SAnirudh Venkataramanan /** 2061d337f2afSAnirudh Venkataramanan * ice_xmit_desc_count - calculate number of Tx descriptors needed 20622b245cb2SAnirudh Venkataramanan * @skb: send buffer 20632b245cb2SAnirudh Venkataramanan * 20642b245cb2SAnirudh Venkataramanan * Returns number of data descriptors needed for this skb. 20652b245cb2SAnirudh Venkataramanan */ 20662b245cb2SAnirudh Venkataramanan static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 20672b245cb2SAnirudh Venkataramanan { 2068d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 20692b245cb2SAnirudh Venkataramanan unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 20702b245cb2SAnirudh Venkataramanan unsigned int count = 0, size = skb_headlen(skb); 20712b245cb2SAnirudh Venkataramanan 20722b245cb2SAnirudh Venkataramanan for (;;) { 20732b245cb2SAnirudh Venkataramanan count += ice_txd_use_count(size); 20742b245cb2SAnirudh Venkataramanan 20752b245cb2SAnirudh Venkataramanan if (!nr_frags--) 20762b245cb2SAnirudh Venkataramanan break; 20772b245cb2SAnirudh Venkataramanan 20782b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag++); 20792b245cb2SAnirudh Venkataramanan } 20802b245cb2SAnirudh Venkataramanan 20812b245cb2SAnirudh Venkataramanan return count; 20822b245cb2SAnirudh Venkataramanan } 20832b245cb2SAnirudh Venkataramanan 20842b245cb2SAnirudh Venkataramanan /** 20852b245cb2SAnirudh Venkataramanan * __ice_chk_linearize - Check if there are more than 8 buffers per packet 20862b245cb2SAnirudh Venkataramanan * @skb: send buffer 20872b245cb2SAnirudh Venkataramanan * 20882b245cb2SAnirudh Venkataramanan * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 20892b245cb2SAnirudh Venkataramanan * and so we need to figure out the cases where we need to linearize the skb. 20902b245cb2SAnirudh Venkataramanan * 20912b245cb2SAnirudh Venkataramanan * For TSO we need to count the TSO header and segment payload separately. 20922b245cb2SAnirudh Venkataramanan * As such we need to check cases where we have 7 fragments or more as we 20932b245cb2SAnirudh Venkataramanan * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 20942b245cb2SAnirudh Venkataramanan * the segment payload in the first descriptor, and another 7 for the 20952b245cb2SAnirudh Venkataramanan * fragments. 20962b245cb2SAnirudh Venkataramanan */ 20972b245cb2SAnirudh Venkataramanan static bool __ice_chk_linearize(struct sk_buff *skb) 20982b245cb2SAnirudh Venkataramanan { 2099d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag, *stale; 21002b245cb2SAnirudh Venkataramanan int nr_frags, sum; 21012b245cb2SAnirudh Venkataramanan 21022b245cb2SAnirudh Venkataramanan /* no need to check if number of frags is less than 7 */ 21032b245cb2SAnirudh Venkataramanan nr_frags = skb_shinfo(skb)->nr_frags; 21042b245cb2SAnirudh Venkataramanan if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 21052b245cb2SAnirudh Venkataramanan return false; 21062b245cb2SAnirudh Venkataramanan 21072b245cb2SAnirudh Venkataramanan /* We need to walk through the list and validate that each group 21082b245cb2SAnirudh Venkataramanan * of 6 fragments totals at least gso_size. 21092b245cb2SAnirudh Venkataramanan */ 21102b245cb2SAnirudh Venkataramanan nr_frags -= ICE_MAX_BUF_TXD - 2; 21112b245cb2SAnirudh Venkataramanan frag = &skb_shinfo(skb)->frags[0]; 21122b245cb2SAnirudh Venkataramanan 21132b245cb2SAnirudh Venkataramanan /* Initialize size to the negative value of gso_size minus 1. We 21144ee656bbSTony Nguyen * use this as the worst case scenario in which the frag ahead 21152b245cb2SAnirudh Venkataramanan * of us only provides one byte which is why we are limited to 6 21162b245cb2SAnirudh Venkataramanan * descriptors for a single transmit as the header and previous 21172b245cb2SAnirudh Venkataramanan * fragment are already consuming 2 descriptors. 21182b245cb2SAnirudh Venkataramanan */ 21192b245cb2SAnirudh Venkataramanan sum = 1 - skb_shinfo(skb)->gso_size; 21202b245cb2SAnirudh Venkataramanan 21212b245cb2SAnirudh Venkataramanan /* Add size of frags 0 through 4 to create our initial sum */ 21222b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21232b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21242b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21252b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21262b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21272b245cb2SAnirudh Venkataramanan 21282b245cb2SAnirudh Venkataramanan /* Walk through fragments adding latest fragment, testing it, and 21292b245cb2SAnirudh Venkataramanan * then removing stale fragments from the sum. 21302b245cb2SAnirudh Venkataramanan */ 21310a37abfaSKiran Patil for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 21320a37abfaSKiran Patil int stale_size = skb_frag_size(stale); 21330a37abfaSKiran Patil 21342b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 21352b245cb2SAnirudh Venkataramanan 21360a37abfaSKiran Patil /* The stale fragment may present us with a smaller 21370a37abfaSKiran Patil * descriptor than the actual fragment size. To account 21380a37abfaSKiran Patil * for that we need to remove all the data on the front and 21390a37abfaSKiran Patil * figure out what the remainder would be in the last 21400a37abfaSKiran Patil * descriptor associated with the fragment. 21410a37abfaSKiran Patil */ 21420a37abfaSKiran Patil if (stale_size > ICE_MAX_DATA_PER_TXD) { 21430a37abfaSKiran Patil int align_pad = -(skb_frag_off(stale)) & 21440a37abfaSKiran Patil (ICE_MAX_READ_REQ_SIZE - 1); 21450a37abfaSKiran Patil 21460a37abfaSKiran Patil sum -= align_pad; 21470a37abfaSKiran Patil stale_size -= align_pad; 21480a37abfaSKiran Patil 21490a37abfaSKiran Patil do { 21500a37abfaSKiran Patil sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; 21510a37abfaSKiran Patil stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; 21520a37abfaSKiran Patil } while (stale_size > ICE_MAX_DATA_PER_TXD); 21530a37abfaSKiran Patil } 21540a37abfaSKiran Patil 21552b245cb2SAnirudh Venkataramanan /* if sum is negative we failed to make sufficient progress */ 21562b245cb2SAnirudh Venkataramanan if (sum < 0) 21572b245cb2SAnirudh Venkataramanan return true; 21582b245cb2SAnirudh Venkataramanan 21592b245cb2SAnirudh Venkataramanan if (!nr_frags--) 21602b245cb2SAnirudh Venkataramanan break; 21612b245cb2SAnirudh Venkataramanan 21620a37abfaSKiran Patil sum -= stale_size; 21632b245cb2SAnirudh Venkataramanan } 21642b245cb2SAnirudh Venkataramanan 21652b245cb2SAnirudh Venkataramanan return false; 21662b245cb2SAnirudh Venkataramanan } 21672b245cb2SAnirudh Venkataramanan 21682b245cb2SAnirudh Venkataramanan /** 21692b245cb2SAnirudh Venkataramanan * ice_chk_linearize - Check if there are more than 8 fragments per packet 21702b245cb2SAnirudh Venkataramanan * @skb: send buffer 21712b245cb2SAnirudh Venkataramanan * @count: number of buffers used 21722b245cb2SAnirudh Venkataramanan * 21732b245cb2SAnirudh Venkataramanan * Note: Our HW can't scatter-gather more than 8 fragments to build 21742b245cb2SAnirudh Venkataramanan * a packet on the wire and so we need to figure out the cases where we 21752b245cb2SAnirudh Venkataramanan * need to linearize the skb. 21762b245cb2SAnirudh Venkataramanan */ 21772b245cb2SAnirudh Venkataramanan static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 21782b245cb2SAnirudh Venkataramanan { 21792b245cb2SAnirudh Venkataramanan /* Both TSO and single send will work if count is less than 8 */ 21802b245cb2SAnirudh Venkataramanan if (likely(count < ICE_MAX_BUF_TXD)) 21812b245cb2SAnirudh Venkataramanan return false; 21822b245cb2SAnirudh Venkataramanan 21832b245cb2SAnirudh Venkataramanan if (skb_is_gso(skb)) 21842b245cb2SAnirudh Venkataramanan return __ice_chk_linearize(skb); 21852b245cb2SAnirudh Venkataramanan 21862b245cb2SAnirudh Venkataramanan /* we can support up to 8 data buffers for a single send */ 21872b245cb2SAnirudh Venkataramanan return count != ICE_MAX_BUF_TXD; 21882b245cb2SAnirudh Venkataramanan } 21892b245cb2SAnirudh Venkataramanan 21902b245cb2SAnirudh Venkataramanan /** 2191ea9b847cSJacob Keller * ice_tstamp - set up context descriptor for hardware timestamp 2192ea9b847cSJacob Keller * @tx_ring: pointer to the Tx ring to send buffer on 2193ea9b847cSJacob Keller * @skb: pointer to the SKB we're sending 2194ea9b847cSJacob Keller * @first: Tx buffer 2195ea9b847cSJacob Keller * @off: Tx offload parameters 2196ea9b847cSJacob Keller */ 2197ea9b847cSJacob Keller static void 2198e72bba21SMaciej Fijalkowski ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, 2199ea9b847cSJacob Keller struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2200ea9b847cSJacob Keller { 2201ea9b847cSJacob Keller s8 idx; 2202ea9b847cSJacob Keller 2203ea9b847cSJacob Keller /* only timestamp the outbound packet if the user has requested it */ 2204ea9b847cSJacob Keller if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 2205ea9b847cSJacob Keller return; 2206ea9b847cSJacob Keller 2207ea9b847cSJacob Keller if (!tx_ring->ptp_tx) 2208ea9b847cSJacob Keller return; 2209ea9b847cSJacob Keller 2210ea9b847cSJacob Keller /* Tx timestamps cannot be sampled when doing TSO */ 2211ea9b847cSJacob Keller if (first->tx_flags & ICE_TX_FLAGS_TSO) 2212ea9b847cSJacob Keller return; 2213ea9b847cSJacob Keller 2214ea9b847cSJacob Keller /* Grab an open timestamp slot */ 2215ea9b847cSJacob Keller idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); 2216ea9b847cSJacob Keller if (idx < 0) 2217ea9b847cSJacob Keller return; 2218ea9b847cSJacob Keller 2219ea9b847cSJacob Keller off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2220ea9b847cSJacob Keller (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) | 2221ea9b847cSJacob Keller ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S)); 2222ea9b847cSJacob Keller first->tx_flags |= ICE_TX_FLAGS_TSYN; 2223ea9b847cSJacob Keller } 2224ea9b847cSJacob Keller 2225ea9b847cSJacob Keller /** 22262b245cb2SAnirudh Venkataramanan * ice_xmit_frame_ring - Sends buffer on Tx ring 22272b245cb2SAnirudh Venkataramanan * @skb: send buffer 22282b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 22292b245cb2SAnirudh Venkataramanan * 22302b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 22312b245cb2SAnirudh Venkataramanan */ 22322b245cb2SAnirudh Venkataramanan static netdev_tx_t 2233e72bba21SMaciej Fijalkowski ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) 22342b245cb2SAnirudh Venkataramanan { 2235d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params offload = { 0 }; 22360c3a6101SDave Ertman struct ice_vsi *vsi = tx_ring->vsi; 22372b245cb2SAnirudh Venkataramanan struct ice_tx_buf *first; 2238f9f83202SDave Ertman struct ethhdr *eth; 22392b245cb2SAnirudh Venkataramanan unsigned int count; 2240d76a60baSAnirudh Venkataramanan int tso, csum; 22412b245cb2SAnirudh Venkataramanan 22423089cf6dSJesse Brandeburg ice_trace(xmit_frame_ring, tx_ring, skb); 22433089cf6dSJesse Brandeburg 22442b245cb2SAnirudh Venkataramanan count = ice_xmit_desc_count(skb); 22452b245cb2SAnirudh Venkataramanan if (ice_chk_linearize(skb, count)) { 22462b245cb2SAnirudh Venkataramanan if (__skb_linearize(skb)) 22472b245cb2SAnirudh Venkataramanan goto out_drop; 22482b245cb2SAnirudh Venkataramanan count = ice_txd_use_count(skb->len); 22492b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_linearize++; 22502b245cb2SAnirudh Venkataramanan } 22512b245cb2SAnirudh Venkataramanan 22522b245cb2SAnirudh Venkataramanan /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 22532b245cb2SAnirudh Venkataramanan * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 22542b245cb2SAnirudh Venkataramanan * + 4 desc gap to avoid the cache line where head is, 22552b245cb2SAnirudh Venkataramanan * + 1 desc for context descriptor, 22562b245cb2SAnirudh Venkataramanan * otherwise try next time 22572b245cb2SAnirudh Venkataramanan */ 2258c585ea42SBrett Creeley if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2259c585ea42SBrett Creeley ICE_DESCS_FOR_CTX_DESC)) { 22602b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_busy++; 22612b245cb2SAnirudh Venkataramanan return NETDEV_TX_BUSY; 22622b245cb2SAnirudh Venkataramanan } 22632b245cb2SAnirudh Venkataramanan 2264d76a60baSAnirudh Venkataramanan offload.tx_ring = tx_ring; 2265d76a60baSAnirudh Venkataramanan 22662b245cb2SAnirudh Venkataramanan /* record the location of the first descriptor for this packet */ 22672b245cb2SAnirudh Venkataramanan first = &tx_ring->tx_buf[tx_ring->next_to_use]; 22682b245cb2SAnirudh Venkataramanan first->skb = skb; 22692b245cb2SAnirudh Venkataramanan first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 22702b245cb2SAnirudh Venkataramanan first->gso_segs = 1; 2271d76a60baSAnirudh Venkataramanan first->tx_flags = 0; 22722b245cb2SAnirudh Venkataramanan 2273d76a60baSAnirudh Venkataramanan /* prepare the VLAN tagging flags for Tx */ 22742bb19d6eSBrett Creeley ice_tx_prepare_vlan_flags(tx_ring, first); 2275d76a60baSAnirudh Venkataramanan 2276d76a60baSAnirudh Venkataramanan /* set up TSO offload */ 2277d76a60baSAnirudh Venkataramanan tso = ice_tso(first, &offload); 2278d76a60baSAnirudh Venkataramanan if (tso < 0) 2279d76a60baSAnirudh Venkataramanan goto out_drop; 2280d76a60baSAnirudh Venkataramanan 2281d76a60baSAnirudh Venkataramanan /* always set up Tx checksum offload */ 2282d76a60baSAnirudh Venkataramanan csum = ice_tx_csum(first, &offload); 2283d76a60baSAnirudh Venkataramanan if (csum < 0) 2284d76a60baSAnirudh Venkataramanan goto out_drop; 2285d76a60baSAnirudh Venkataramanan 22860c3a6101SDave Ertman /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 2287f9f83202SDave Ertman eth = (struct ethhdr *)skb_mac_header(skb); 2288f9f83202SDave Ertman if (unlikely((skb->priority == TC_PRIO_CONTROL || 2289f9f83202SDave Ertman eth->h_proto == htons(ETH_P_LLDP)) && 22900c3a6101SDave Ertman vsi->type == ICE_VSI_PF && 2291fc2d1165SChinh T Cao vsi->port_info->qos_cfg.is_sw_lldp)) 22920c3a6101SDave Ertman offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 22930c3a6101SDave Ertman ICE_TX_CTX_DESC_SWTCH_UPLINK << 22940c3a6101SDave Ertman ICE_TXD_CTX_QW1_CMD_S); 22950c3a6101SDave Ertman 2296ea9b847cSJacob Keller ice_tstamp(tx_ring, skb, first, &offload); 2297f5396b8aSGrzegorz Nitka if (ice_is_switchdev_running(vsi->back)) 2298f5396b8aSGrzegorz Nitka ice_eswitch_set_target_vsi(skb, &offload); 2299ea9b847cSJacob Keller 23000c3a6101SDave Ertman if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2301d76a60baSAnirudh Venkataramanan struct ice_tx_ctx_desc *cdesc; 230288865fc4SKarol Kolacinski u16 i = tx_ring->next_to_use; 2303d76a60baSAnirudh Venkataramanan 2304d76a60baSAnirudh Venkataramanan /* grab the next descriptor */ 2305d76a60baSAnirudh Venkataramanan cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2306d76a60baSAnirudh Venkataramanan i++; 2307d76a60baSAnirudh Venkataramanan tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2308d76a60baSAnirudh Venkataramanan 2309d76a60baSAnirudh Venkataramanan /* setup context descriptor */ 2310d76a60baSAnirudh Venkataramanan cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2311d76a60baSAnirudh Venkataramanan cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2312d76a60baSAnirudh Venkataramanan cdesc->rsvd = cpu_to_le16(0); 2313d76a60baSAnirudh Venkataramanan cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2314d76a60baSAnirudh Venkataramanan } 2315d76a60baSAnirudh Venkataramanan 2316d76a60baSAnirudh Venkataramanan ice_tx_map(tx_ring, first, &offload); 23172b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 23182b245cb2SAnirudh Venkataramanan 23192b245cb2SAnirudh Venkataramanan out_drop: 23203089cf6dSJesse Brandeburg ice_trace(xmit_frame_ring_drop, tx_ring, skb); 23212b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 23222b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 23232b245cb2SAnirudh Venkataramanan } 23242b245cb2SAnirudh Venkataramanan 23252b245cb2SAnirudh Venkataramanan /** 23262b245cb2SAnirudh Venkataramanan * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 23272b245cb2SAnirudh Venkataramanan * @skb: send buffer 23282b245cb2SAnirudh Venkataramanan * @netdev: network interface device structure 23292b245cb2SAnirudh Venkataramanan * 23302b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 23312b245cb2SAnirudh Venkataramanan */ 23322b245cb2SAnirudh Venkataramanan netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 23332b245cb2SAnirudh Venkataramanan { 23342b245cb2SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 23352b245cb2SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 2336e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring; 23372b245cb2SAnirudh Venkataramanan 23382b245cb2SAnirudh Venkataramanan tx_ring = vsi->tx_rings[skb->queue_mapping]; 23392b245cb2SAnirudh Venkataramanan 23402b245cb2SAnirudh Venkataramanan /* hardware can't handle really short frames, hardware padding works 23412b245cb2SAnirudh Venkataramanan * beyond this point 23422b245cb2SAnirudh Venkataramanan */ 23432b245cb2SAnirudh Venkataramanan if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 23442b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 23452b245cb2SAnirudh Venkataramanan 23462b245cb2SAnirudh Venkataramanan return ice_xmit_frame_ring(skb, tx_ring); 23472b245cb2SAnirudh Venkataramanan } 2348148beb61SHenry Tieman 2349148beb61SHenry Tieman /** 23502a87bd73SDave Ertman * ice_get_dscp_up - return the UP/TC value for a SKB 23512a87bd73SDave Ertman * @dcbcfg: DCB config that contains DSCP to UP/TC mapping 23522a87bd73SDave Ertman * @skb: SKB to query for info to determine UP/TC 23532a87bd73SDave Ertman * 23542a87bd73SDave Ertman * This function is to only be called when the PF is in L3 DSCP PFC mode 23552a87bd73SDave Ertman */ 23562a87bd73SDave Ertman static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb) 23572a87bd73SDave Ertman { 23582a87bd73SDave Ertman u8 dscp = 0; 23592a87bd73SDave Ertman 23602a87bd73SDave Ertman if (skb->protocol == htons(ETH_P_IP)) 23612a87bd73SDave Ertman dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 23622a87bd73SDave Ertman else if (skb->protocol == htons(ETH_P_IPV6)) 23632a87bd73SDave Ertman dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 23642a87bd73SDave Ertman 23652a87bd73SDave Ertman return dcbcfg->dscp_map[dscp]; 23662a87bd73SDave Ertman } 23672a87bd73SDave Ertman 23682a87bd73SDave Ertman u16 23692a87bd73SDave Ertman ice_select_queue(struct net_device *netdev, struct sk_buff *skb, 23702a87bd73SDave Ertman struct net_device *sb_dev) 23712a87bd73SDave Ertman { 23722a87bd73SDave Ertman struct ice_pf *pf = ice_netdev_to_pf(netdev); 23732a87bd73SDave Ertman struct ice_dcbx_cfg *dcbcfg; 23742a87bd73SDave Ertman 23752a87bd73SDave Ertman dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 23762a87bd73SDave Ertman if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) 23772a87bd73SDave Ertman skb->priority = ice_get_dscp_up(dcbcfg, skb); 23782a87bd73SDave Ertman 23792a87bd73SDave Ertman return netdev_pick_tx(netdev, skb, sb_dev); 23802a87bd73SDave Ertman } 23812a87bd73SDave Ertman 23822a87bd73SDave Ertman /** 2383148beb61SHenry Tieman * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 2384148beb61SHenry Tieman * @tx_ring: tx_ring to clean 2385148beb61SHenry Tieman */ 2386e72bba21SMaciej Fijalkowski void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) 2387148beb61SHenry Tieman { 2388148beb61SHenry Tieman struct ice_vsi *vsi = tx_ring->vsi; 2389148beb61SHenry Tieman s16 i = tx_ring->next_to_clean; 2390148beb61SHenry Tieman int budget = ICE_DFLT_IRQ_WORK; 2391148beb61SHenry Tieman struct ice_tx_desc *tx_desc; 2392148beb61SHenry Tieman struct ice_tx_buf *tx_buf; 2393148beb61SHenry Tieman 2394148beb61SHenry Tieman tx_buf = &tx_ring->tx_buf[i]; 2395148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, i); 2396148beb61SHenry Tieman i -= tx_ring->count; 2397148beb61SHenry Tieman 2398148beb61SHenry Tieman do { 2399148beb61SHenry Tieman struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2400148beb61SHenry Tieman 2401148beb61SHenry Tieman /* if next_to_watch is not set then there is no pending work */ 2402148beb61SHenry Tieman if (!eop_desc) 2403148beb61SHenry Tieman break; 2404148beb61SHenry Tieman 2405148beb61SHenry Tieman /* prevent any other reads prior to eop_desc */ 2406148beb61SHenry Tieman smp_rmb(); 2407148beb61SHenry Tieman 2408148beb61SHenry Tieman /* if the descriptor isn't done, no work to do */ 2409148beb61SHenry Tieman if (!(eop_desc->cmd_type_offset_bsz & 2410148beb61SHenry Tieman cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2411148beb61SHenry Tieman break; 2412148beb61SHenry Tieman 2413148beb61SHenry Tieman /* clear next_to_watch to prevent false hangs */ 2414148beb61SHenry Tieman tx_buf->next_to_watch = NULL; 2415148beb61SHenry Tieman tx_desc->buf_addr = 0; 2416148beb61SHenry Tieman tx_desc->cmd_type_offset_bsz = 0; 2417148beb61SHenry Tieman 2418148beb61SHenry Tieman /* move past filter desc */ 2419148beb61SHenry Tieman tx_buf++; 2420148beb61SHenry Tieman tx_desc++; 2421148beb61SHenry Tieman i++; 2422148beb61SHenry Tieman if (unlikely(!i)) { 2423148beb61SHenry Tieman i -= tx_ring->count; 2424148beb61SHenry Tieman tx_buf = tx_ring->tx_buf; 2425148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, 0); 2426148beb61SHenry Tieman } 2427148beb61SHenry Tieman 2428148beb61SHenry Tieman /* unmap the data header */ 2429148beb61SHenry Tieman if (dma_unmap_len(tx_buf, len)) 2430148beb61SHenry Tieman dma_unmap_single(tx_ring->dev, 2431148beb61SHenry Tieman dma_unmap_addr(tx_buf, dma), 2432148beb61SHenry Tieman dma_unmap_len(tx_buf, len), 2433148beb61SHenry Tieman DMA_TO_DEVICE); 2434148beb61SHenry Tieman if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 2435148beb61SHenry Tieman devm_kfree(tx_ring->dev, tx_buf->raw_buf); 2436148beb61SHenry Tieman 2437148beb61SHenry Tieman /* clear next_to_watch to prevent false hangs */ 2438148beb61SHenry Tieman tx_buf->raw_buf = NULL; 2439148beb61SHenry Tieman tx_buf->tx_flags = 0; 2440148beb61SHenry Tieman tx_buf->next_to_watch = NULL; 2441148beb61SHenry Tieman dma_unmap_len_set(tx_buf, len, 0); 2442148beb61SHenry Tieman tx_desc->buf_addr = 0; 2443148beb61SHenry Tieman tx_desc->cmd_type_offset_bsz = 0; 2444148beb61SHenry Tieman 2445148beb61SHenry Tieman /* move past eop_desc for start of next FD desc */ 2446148beb61SHenry Tieman tx_buf++; 2447148beb61SHenry Tieman tx_desc++; 2448148beb61SHenry Tieman i++; 2449148beb61SHenry Tieman if (unlikely(!i)) { 2450148beb61SHenry Tieman i -= tx_ring->count; 2451148beb61SHenry Tieman tx_buf = tx_ring->tx_buf; 2452148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, 0); 2453148beb61SHenry Tieman } 2454148beb61SHenry Tieman 2455148beb61SHenry Tieman budget--; 2456148beb61SHenry Tieman } while (likely(budget)); 2457148beb61SHenry Tieman 2458148beb61SHenry Tieman i += tx_ring->count; 2459148beb61SHenry Tieman tx_ring->next_to_clean = i; 2460148beb61SHenry Tieman 2461148beb61SHenry Tieman /* re-enable interrupt if needed */ 2462148beb61SHenry Tieman ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 2463148beb61SHenry Tieman } 2464