1cdedef59SAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0 2cdedef59SAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */ 3cdedef59SAnirudh Venkataramanan 4cdedef59SAnirudh Venkataramanan /* The driver transmit and receive code */ 5cdedef59SAnirudh Venkataramanan 6cdedef59SAnirudh Venkataramanan #include <linux/mm.h> 7cc14db11SJesse Brandeburg #include <linux/netdevice.h> 8cc14db11SJesse Brandeburg #include <linux/prefetch.h> 9efc2214bSMaciej Fijalkowski #include <linux/bpf_trace.h> 102a87bd73SDave Ertman #include <net/dsfield.h> 1169e66c04SJoe Damato #include <net/mpls.h> 12efc2214bSMaciej Fijalkowski #include <net/xdp.h> 130891d6d4SKrzysztof Kazimierczak #include "ice_txrx_lib.h" 14efc2214bSMaciej Fijalkowski #include "ice_lib.h" 15cdedef59SAnirudh Venkataramanan #include "ice.h" 163089cf6dSJesse Brandeburg #include "ice_trace.h" 175f6aa50eSAnirudh Venkataramanan #include "ice_dcb_lib.h" 182d4238f5SKrzysztof Kazimierczak #include "ice_xsk.h" 19f5396b8aSGrzegorz Nitka #include "ice_eswitch.h" 20cdedef59SAnirudh Venkataramanan 212b245cb2SAnirudh Venkataramanan #define ICE_RX_HDR_SIZE 256 222b245cb2SAnirudh Venkataramanan 23148beb61SHenry Tieman #define FDIR_DESC_RXDID 0x40 24cac2a27cSHenry Tieman #define ICE_FDIR_CLEAN_DELAY 10 25cac2a27cSHenry Tieman 26cac2a27cSHenry Tieman /** 27cac2a27cSHenry Tieman * ice_prgm_fdir_fltr - Program a Flow Director filter 28cac2a27cSHenry Tieman * @vsi: VSI to send dummy packet 29cac2a27cSHenry Tieman * @fdir_desc: flow director descriptor 30cac2a27cSHenry Tieman * @raw_packet: allocated buffer for flow director 31cac2a27cSHenry Tieman */ 32cac2a27cSHenry Tieman int 33cac2a27cSHenry Tieman ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 34cac2a27cSHenry Tieman u8 *raw_packet) 35cac2a27cSHenry Tieman { 36cac2a27cSHenry Tieman struct ice_tx_buf *tx_buf, *first; 37cac2a27cSHenry Tieman struct ice_fltr_desc *f_desc; 38cac2a27cSHenry Tieman struct ice_tx_desc *tx_desc; 39e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring; 40cac2a27cSHenry Tieman struct device *dev; 41cac2a27cSHenry Tieman dma_addr_t dma; 42cac2a27cSHenry Tieman u32 td_cmd; 43cac2a27cSHenry Tieman u16 i; 44cac2a27cSHenry Tieman 45cac2a27cSHenry Tieman /* VSI and Tx ring */ 46cac2a27cSHenry Tieman if (!vsi) 47cac2a27cSHenry Tieman return -ENOENT; 48cac2a27cSHenry Tieman tx_ring = vsi->tx_rings[0]; 49cac2a27cSHenry Tieman if (!tx_ring || !tx_ring->desc) 50cac2a27cSHenry Tieman return -ENOENT; 51cac2a27cSHenry Tieman dev = tx_ring->dev; 52cac2a27cSHenry Tieman 53cac2a27cSHenry Tieman /* we are using two descriptors to add/del a filter and we can wait */ 54cac2a27cSHenry Tieman for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 55cac2a27cSHenry Tieman if (!i) 56cac2a27cSHenry Tieman return -EAGAIN; 57cac2a27cSHenry Tieman msleep_interruptible(1); 58cac2a27cSHenry Tieman } 59cac2a27cSHenry Tieman 60cac2a27cSHenry Tieman dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 61cac2a27cSHenry Tieman DMA_TO_DEVICE); 62cac2a27cSHenry Tieman 63cac2a27cSHenry Tieman if (dma_mapping_error(dev, dma)) 64cac2a27cSHenry Tieman return -EINVAL; 65cac2a27cSHenry Tieman 66cac2a27cSHenry Tieman /* grab the next descriptor */ 67cac2a27cSHenry Tieman i = tx_ring->next_to_use; 68cac2a27cSHenry Tieman first = &tx_ring->tx_buf[i]; 69cac2a27cSHenry Tieman f_desc = ICE_TX_FDIRDESC(tx_ring, i); 70cac2a27cSHenry Tieman memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 71cac2a27cSHenry Tieman 72cac2a27cSHenry Tieman i++; 73cac2a27cSHenry Tieman i = (i < tx_ring->count) ? i : 0; 74cac2a27cSHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, i); 75cac2a27cSHenry Tieman tx_buf = &tx_ring->tx_buf[i]; 76cac2a27cSHenry Tieman 77cac2a27cSHenry Tieman i++; 78cac2a27cSHenry Tieman tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 79cac2a27cSHenry Tieman 80cac2a27cSHenry Tieman memset(tx_buf, 0, sizeof(*tx_buf)); 81cac2a27cSHenry Tieman dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 82cac2a27cSHenry Tieman dma_unmap_addr_set(tx_buf, dma, dma); 83cac2a27cSHenry Tieman 84cac2a27cSHenry Tieman tx_desc->buf_addr = cpu_to_le64(dma); 85cac2a27cSHenry Tieman td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 86cac2a27cSHenry Tieman ICE_TX_DESC_CMD_RE; 87cac2a27cSHenry Tieman 88aa1d3fafSAlexander Lobakin tx_buf->type = ICE_TX_BUF_DUMMY; 89cac2a27cSHenry Tieman tx_buf->raw_buf = raw_packet; 90cac2a27cSHenry Tieman 91cac2a27cSHenry Tieman tx_desc->cmd_type_offset_bsz = 92cac2a27cSHenry Tieman ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 93cac2a27cSHenry Tieman 94cac2a27cSHenry Tieman /* Force memory write to complete before letting h/w know 95cac2a27cSHenry Tieman * there are new descriptors to fetch. 96cac2a27cSHenry Tieman */ 97cac2a27cSHenry Tieman wmb(); 98cac2a27cSHenry Tieman 99cac2a27cSHenry Tieman /* mark the data descriptor to be watched */ 100cac2a27cSHenry Tieman first->next_to_watch = tx_desc; 101cac2a27cSHenry Tieman 102cac2a27cSHenry Tieman writel(tx_ring->next_to_use, tx_ring->tail); 103cac2a27cSHenry Tieman 104cac2a27cSHenry Tieman return 0; 105cac2a27cSHenry Tieman } 106148beb61SHenry Tieman 107cdedef59SAnirudh Venkataramanan /** 108cdedef59SAnirudh Venkataramanan * ice_unmap_and_free_tx_buf - Release a Tx buffer 109cdedef59SAnirudh Venkataramanan * @ring: the ring that owns the buffer 110cdedef59SAnirudh Venkataramanan * @tx_buf: the buffer to free 111cdedef59SAnirudh Venkataramanan */ 112cdedef59SAnirudh Venkataramanan static void 113e72bba21SMaciej Fijalkowski ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) 114cdedef59SAnirudh Venkataramanan { 115cdedef59SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) 116cdedef59SAnirudh Venkataramanan dma_unmap_page(ring->dev, 117cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 118cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 119cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 120aa1d3fafSAlexander Lobakin 121aa1d3fafSAlexander Lobakin switch (tx_buf->type) { 122aa1d3fafSAlexander Lobakin case ICE_TX_BUF_DUMMY: 123aa1d3fafSAlexander Lobakin devm_kfree(ring->dev, tx_buf->raw_buf); 124aa1d3fafSAlexander Lobakin break; 125aa1d3fafSAlexander Lobakin case ICE_TX_BUF_SKB: 126aa1d3fafSAlexander Lobakin dev_kfree_skb_any(tx_buf->skb); 127aa1d3fafSAlexander Lobakin break; 128aa1d3fafSAlexander Lobakin case ICE_TX_BUF_XDP_TX: 129aa1d3fafSAlexander Lobakin page_frag_free(tx_buf->raw_buf); 130aa1d3fafSAlexander Lobakin break; 131055d0920SAlexander Lobakin case ICE_TX_BUF_XDP_XMIT: 132055d0920SAlexander Lobakin xdp_return_frame(tx_buf->xdpf); 133055d0920SAlexander Lobakin break; 134cdedef59SAnirudh Venkataramanan } 135cdedef59SAnirudh Venkataramanan 136cdedef59SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 137aa1d3fafSAlexander Lobakin tx_buf->type = ICE_TX_BUF_EMPTY; 138cdedef59SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 139cdedef59SAnirudh Venkataramanan /* tx_buf must be completely set up in the transmit path */ 140cdedef59SAnirudh Venkataramanan } 141cdedef59SAnirudh Venkataramanan 142e72bba21SMaciej Fijalkowski static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring) 143cdedef59SAnirudh Venkataramanan { 144cdedef59SAnirudh Venkataramanan return netdev_get_tx_queue(ring->netdev, ring->q_index); 145cdedef59SAnirudh Venkataramanan } 146cdedef59SAnirudh Venkataramanan 147cdedef59SAnirudh Venkataramanan /** 148cdedef59SAnirudh Venkataramanan * ice_clean_tx_ring - Free any empty Tx buffers 149cdedef59SAnirudh Venkataramanan * @tx_ring: ring to be cleaned 150cdedef59SAnirudh Venkataramanan */ 151e72bba21SMaciej Fijalkowski void ice_clean_tx_ring(struct ice_tx_ring *tx_ring) 152cdedef59SAnirudh Venkataramanan { 153e72bba21SMaciej Fijalkowski u32 size; 154cdedef59SAnirudh Venkataramanan u16 i; 155cdedef59SAnirudh Venkataramanan 1561742b3d5SMagnus Karlsson if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 1572d4238f5SKrzysztof Kazimierczak ice_xsk_clean_xdp_ring(tx_ring); 1582d4238f5SKrzysztof Kazimierczak goto tx_skip_free; 1592d4238f5SKrzysztof Kazimierczak } 1602d4238f5SKrzysztof Kazimierczak 161cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 162cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 163cdedef59SAnirudh Venkataramanan return; 164cdedef59SAnirudh Venkataramanan 1652f2da36eSAnirudh Venkataramanan /* Free all the Tx ring sk_buffs */ 166cdedef59SAnirudh Venkataramanan for (i = 0; i < tx_ring->count; i++) 167cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 168cdedef59SAnirudh Venkataramanan 1692d4238f5SKrzysztof Kazimierczak tx_skip_free: 170c6dfd690SBruce Allan memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 171cdedef59SAnirudh Venkataramanan 172e72bba21SMaciej Fijalkowski size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 173e72bba21SMaciej Fijalkowski PAGE_SIZE); 174cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 175e72bba21SMaciej Fijalkowski memset(tx_ring->desc, 0, size); 176cdedef59SAnirudh Venkataramanan 177cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 178cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 179cdedef59SAnirudh Venkataramanan 180cdedef59SAnirudh Venkataramanan if (!tx_ring->netdev) 181cdedef59SAnirudh Venkataramanan return; 182cdedef59SAnirudh Venkataramanan 183cdedef59SAnirudh Venkataramanan /* cleanup Tx queue statistics */ 184cdedef59SAnirudh Venkataramanan netdev_tx_reset_queue(txring_txq(tx_ring)); 185cdedef59SAnirudh Venkataramanan } 186cdedef59SAnirudh Venkataramanan 187cdedef59SAnirudh Venkataramanan /** 188cdedef59SAnirudh Venkataramanan * ice_free_tx_ring - Free Tx resources per queue 189cdedef59SAnirudh Venkataramanan * @tx_ring: Tx descriptor ring for a specific queue 190cdedef59SAnirudh Venkataramanan * 191cdedef59SAnirudh Venkataramanan * Free all transmit software resources 192cdedef59SAnirudh Venkataramanan */ 193e72bba21SMaciej Fijalkowski void ice_free_tx_ring(struct ice_tx_ring *tx_ring) 194cdedef59SAnirudh Venkataramanan { 195e72bba21SMaciej Fijalkowski u32 size; 196e72bba21SMaciej Fijalkowski 197cdedef59SAnirudh Venkataramanan ice_clean_tx_ring(tx_ring); 198cdedef59SAnirudh Venkataramanan devm_kfree(tx_ring->dev, tx_ring->tx_buf); 199cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 200cdedef59SAnirudh Venkataramanan 201cdedef59SAnirudh Venkataramanan if (tx_ring->desc) { 202e72bba21SMaciej Fijalkowski size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 203e72bba21SMaciej Fijalkowski PAGE_SIZE); 204e72bba21SMaciej Fijalkowski dmam_free_coherent(tx_ring->dev, size, 205cdedef59SAnirudh Venkataramanan tx_ring->desc, tx_ring->dma); 206cdedef59SAnirudh Venkataramanan tx_ring->desc = NULL; 207cdedef59SAnirudh Venkataramanan } 208cdedef59SAnirudh Venkataramanan } 209cdedef59SAnirudh Venkataramanan 210cdedef59SAnirudh Venkataramanan /** 2112b245cb2SAnirudh Venkataramanan * ice_clean_tx_irq - Reclaim resources after transmit completes 2122b245cb2SAnirudh Venkataramanan * @tx_ring: Tx ring to clean 2132b245cb2SAnirudh Venkataramanan * @napi_budget: Used to determine if we are in netpoll 2142b245cb2SAnirudh Venkataramanan * 2152b245cb2SAnirudh Venkataramanan * Returns true if there's any budget left (e.g. the clean is finished) 2162b245cb2SAnirudh Venkataramanan */ 217e72bba21SMaciej Fijalkowski static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) 2182b245cb2SAnirudh Venkataramanan { 2192b245cb2SAnirudh Venkataramanan unsigned int total_bytes = 0, total_pkts = 0; 2202fb0821fSJesse Brandeburg unsigned int budget = ICE_DFLT_IRQ_WORK; 2212fb0821fSJesse Brandeburg struct ice_vsi *vsi = tx_ring->vsi; 2222b245cb2SAnirudh Venkataramanan s16 i = tx_ring->next_to_clean; 2232b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 2242b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 2252b245cb2SAnirudh Venkataramanan 226cc14db11SJesse Brandeburg /* get the bql data ready */ 227cc14db11SJesse Brandeburg netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); 228cc14db11SJesse Brandeburg 2292b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 2302b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 2312b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2322b245cb2SAnirudh Venkataramanan 2332fb0821fSJesse Brandeburg prefetch(&vsi->state); 2342fb0821fSJesse Brandeburg 2352b245cb2SAnirudh Venkataramanan do { 2362b245cb2SAnirudh Venkataramanan struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2372b245cb2SAnirudh Venkataramanan 2382b245cb2SAnirudh Venkataramanan /* if next_to_watch is not set then there is no work pending */ 2392b245cb2SAnirudh Venkataramanan if (!eop_desc) 2402b245cb2SAnirudh Venkataramanan break; 2412b245cb2SAnirudh Venkataramanan 242cc14db11SJesse Brandeburg /* follow the guidelines of other drivers */ 243cc14db11SJesse Brandeburg prefetchw(&tx_buf->skb->users); 244cc14db11SJesse Brandeburg 2452b245cb2SAnirudh Venkataramanan smp_rmb(); /* prevent any other reads prior to eop_desc */ 2462b245cb2SAnirudh Venkataramanan 2473089cf6dSJesse Brandeburg ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 2482b245cb2SAnirudh Venkataramanan /* if the descriptor isn't done, no work yet to do */ 2492b245cb2SAnirudh Venkataramanan if (!(eop_desc->cmd_type_offset_bsz & 2502b245cb2SAnirudh Venkataramanan cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2512b245cb2SAnirudh Venkataramanan break; 2522b245cb2SAnirudh Venkataramanan 2532b245cb2SAnirudh Venkataramanan /* clear next_to_watch to prevent false hangs */ 2542b245cb2SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 2552b245cb2SAnirudh Venkataramanan 2562b245cb2SAnirudh Venkataramanan /* update the statistics for this packet */ 2572b245cb2SAnirudh Venkataramanan total_bytes += tx_buf->bytecount; 2582b245cb2SAnirudh Venkataramanan total_pkts += tx_buf->gso_segs; 2592b245cb2SAnirudh Venkataramanan 2602b245cb2SAnirudh Venkataramanan /* free the skb */ 2612b245cb2SAnirudh Venkataramanan napi_consume_skb(tx_buf->skb, napi_budget); 2622b245cb2SAnirudh Venkataramanan 2632b245cb2SAnirudh Venkataramanan /* unmap skb header data */ 2642b245cb2SAnirudh Venkataramanan dma_unmap_single(tx_ring->dev, 2652b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 2662b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 2672b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 2682b245cb2SAnirudh Venkataramanan 2692b245cb2SAnirudh Venkataramanan /* clear tx_buf data */ 270aa1d3fafSAlexander Lobakin tx_buf->type = ICE_TX_BUF_EMPTY; 2712b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 2722b245cb2SAnirudh Venkataramanan 2732b245cb2SAnirudh Venkataramanan /* unmap remaining buffers */ 2742b245cb2SAnirudh Venkataramanan while (tx_desc != eop_desc) { 2753089cf6dSJesse Brandeburg ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); 2762b245cb2SAnirudh Venkataramanan tx_buf++; 2772b245cb2SAnirudh Venkataramanan tx_desc++; 2782b245cb2SAnirudh Venkataramanan i++; 2792b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 2802b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 2812b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 2822b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 2832b245cb2SAnirudh Venkataramanan } 2842b245cb2SAnirudh Venkataramanan 2852b245cb2SAnirudh Venkataramanan /* unmap any remaining paged data */ 2862b245cb2SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) { 2872b245cb2SAnirudh Venkataramanan dma_unmap_page(tx_ring->dev, 2882b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 2892b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 2902b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 2912b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 2922b245cb2SAnirudh Venkataramanan } 2932b245cb2SAnirudh Venkataramanan } 2943089cf6dSJesse Brandeburg ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf); 2952b245cb2SAnirudh Venkataramanan 2962b245cb2SAnirudh Venkataramanan /* move us one more past the eop_desc for start of next pkt */ 2972b245cb2SAnirudh Venkataramanan tx_buf++; 2982b245cb2SAnirudh Venkataramanan tx_desc++; 2992b245cb2SAnirudh Venkataramanan i++; 3002b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 3012b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 3022b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 3032b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 3042b245cb2SAnirudh Venkataramanan } 3052b245cb2SAnirudh Venkataramanan 3062b245cb2SAnirudh Venkataramanan prefetch(tx_desc); 3072b245cb2SAnirudh Venkataramanan 3082b245cb2SAnirudh Venkataramanan /* update budget accounting */ 3092b245cb2SAnirudh Venkataramanan budget--; 3102b245cb2SAnirudh Venkataramanan } while (likely(budget)); 3112b245cb2SAnirudh Venkataramanan 3122b245cb2SAnirudh Venkataramanan i += tx_ring->count; 3132b245cb2SAnirudh Venkataramanan tx_ring->next_to_clean = i; 3142d4238f5SKrzysztof Kazimierczak 3152d4238f5SKrzysztof Kazimierczak ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 3161c96c168SJesse Brandeburg netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); 3172b245cb2SAnirudh Venkataramanan 3182b245cb2SAnirudh Venkataramanan #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 3192b245cb2SAnirudh Venkataramanan if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 3202b245cb2SAnirudh Venkataramanan (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 3212b245cb2SAnirudh Venkataramanan /* Make sure that anybody stopping the queue after this 3222b245cb2SAnirudh Venkataramanan * sees the new next_to_clean. 3232b245cb2SAnirudh Venkataramanan */ 3242b245cb2SAnirudh Venkataramanan smp_mb(); 3251c96c168SJesse Brandeburg if (netif_tx_queue_stopped(txring_txq(tx_ring)) && 326e97fb1aeSAnirudh Venkataramanan !test_bit(ICE_VSI_DOWN, vsi->state)) { 3271c96c168SJesse Brandeburg netif_tx_wake_queue(txring_txq(tx_ring)); 328288ecf49SBenjamin Mikailenko ++tx_ring->ring_stats->tx_stats.restart_q; 3292b245cb2SAnirudh Venkataramanan } 3302b245cb2SAnirudh Venkataramanan } 3312b245cb2SAnirudh Venkataramanan 3322b245cb2SAnirudh Venkataramanan return !!budget; 3332b245cb2SAnirudh Venkataramanan } 3342b245cb2SAnirudh Venkataramanan 3352b245cb2SAnirudh Venkataramanan /** 336cdedef59SAnirudh Venkataramanan * ice_setup_tx_ring - Allocate the Tx descriptors 337d337f2afSAnirudh Venkataramanan * @tx_ring: the Tx ring to set up 338cdedef59SAnirudh Venkataramanan * 339cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 340cdedef59SAnirudh Venkataramanan */ 341e72bba21SMaciej Fijalkowski int ice_setup_tx_ring(struct ice_tx_ring *tx_ring) 342cdedef59SAnirudh Venkataramanan { 343cdedef59SAnirudh Venkataramanan struct device *dev = tx_ring->dev; 344e72bba21SMaciej Fijalkowski u32 size; 345cdedef59SAnirudh Venkataramanan 346cdedef59SAnirudh Venkataramanan if (!dev) 347cdedef59SAnirudh Venkataramanan return -ENOMEM; 348cdedef59SAnirudh Venkataramanan 349cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 350cdedef59SAnirudh Venkataramanan WARN_ON(tx_ring->tx_buf); 351c6dfd690SBruce Allan tx_ring->tx_buf = 3526f332353SGustavo A. R. Silva devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, 353c6dfd690SBruce Allan GFP_KERNEL); 354cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 355cdedef59SAnirudh Venkataramanan return -ENOMEM; 356cdedef59SAnirudh Venkataramanan 357ad71b256SBrett Creeley /* round up to nearest page */ 358e72bba21SMaciej Fijalkowski size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 359ad71b256SBrett Creeley PAGE_SIZE); 360e72bba21SMaciej Fijalkowski tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, 361cdedef59SAnirudh Venkataramanan GFP_KERNEL); 362cdedef59SAnirudh Venkataramanan if (!tx_ring->desc) { 363cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 364e72bba21SMaciej Fijalkowski size); 365cdedef59SAnirudh Venkataramanan goto err; 366cdedef59SAnirudh Venkataramanan } 367cdedef59SAnirudh Venkataramanan 368cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 369cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 370288ecf49SBenjamin Mikailenko tx_ring->ring_stats->tx_stats.prev_pkt = -1; 371cdedef59SAnirudh Venkataramanan return 0; 372cdedef59SAnirudh Venkataramanan 373cdedef59SAnirudh Venkataramanan err: 374cdedef59SAnirudh Venkataramanan devm_kfree(dev, tx_ring->tx_buf); 375cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 376cdedef59SAnirudh Venkataramanan return -ENOMEM; 377cdedef59SAnirudh Venkataramanan } 378cdedef59SAnirudh Venkataramanan 379cdedef59SAnirudh Venkataramanan /** 380cdedef59SAnirudh Venkataramanan * ice_clean_rx_ring - Free Rx buffers 381cdedef59SAnirudh Venkataramanan * @rx_ring: ring to be cleaned 382cdedef59SAnirudh Venkataramanan */ 383e72bba21SMaciej Fijalkowski void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) 384cdedef59SAnirudh Venkataramanan { 3852fba7dc5SMaciej Fijalkowski struct xdp_buff *xdp = &rx_ring->xdp; 386cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 387e72bba21SMaciej Fijalkowski u32 size; 388cdedef59SAnirudh Venkataramanan u16 i; 389cdedef59SAnirudh Venkataramanan 390cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 391cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 392cdedef59SAnirudh Venkataramanan return; 393cdedef59SAnirudh Venkataramanan 3941742b3d5SMagnus Karlsson if (rx_ring->xsk_pool) { 3952d4238f5SKrzysztof Kazimierczak ice_xsk_clean_rx_ring(rx_ring); 3962d4238f5SKrzysztof Kazimierczak goto rx_skip_free; 3972d4238f5SKrzysztof Kazimierczak } 3982d4238f5SKrzysztof Kazimierczak 3992fba7dc5SMaciej Fijalkowski if (xdp->data) { 4002fba7dc5SMaciej Fijalkowski xdp_return_buff(xdp); 4012fba7dc5SMaciej Fijalkowski xdp->data = NULL; 4022fba7dc5SMaciej Fijalkowski } 4032fba7dc5SMaciej Fijalkowski 404cdedef59SAnirudh Venkataramanan /* Free all the Rx ring sk_buffs */ 405cdedef59SAnirudh Venkataramanan for (i = 0; i < rx_ring->count; i++) { 406cdedef59SAnirudh Venkataramanan struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 407cdedef59SAnirudh Venkataramanan 408cdedef59SAnirudh Venkataramanan if (!rx_buf->page) 409cdedef59SAnirudh Venkataramanan continue; 410cdedef59SAnirudh Venkataramanan 411a65f71feSMaciej Fijalkowski /* Invalidate cache lines that may have been written to by 412a65f71feSMaciej Fijalkowski * device so that we avoid corrupting memory. 413a65f71feSMaciej Fijalkowski */ 414a65f71feSMaciej Fijalkowski dma_sync_single_range_for_cpu(dev, rx_buf->dma, 415a65f71feSMaciej Fijalkowski rx_buf->page_offset, 4167237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 4177237f5b0SMaciej Fijalkowski DMA_FROM_DEVICE); 418a65f71feSMaciej Fijalkowski 419a65f71feSMaciej Fijalkowski /* free resources associated with mapping */ 4207237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 421a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 42203c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 423cdedef59SAnirudh Venkataramanan 424cdedef59SAnirudh Venkataramanan rx_buf->page = NULL; 425cdedef59SAnirudh Venkataramanan rx_buf->page_offset = 0; 426cdedef59SAnirudh Venkataramanan } 427cdedef59SAnirudh Venkataramanan 4282d4238f5SKrzysztof Kazimierczak rx_skip_free: 429617f3e1bSMaciej Fijalkowski if (rx_ring->xsk_pool) 430617f3e1bSMaciej Fijalkowski memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); 431617f3e1bSMaciej Fijalkowski else 432617f3e1bSMaciej Fijalkowski memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); 433cdedef59SAnirudh Venkataramanan 434cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 435e72bba21SMaciej Fijalkowski size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 436e72bba21SMaciej Fijalkowski PAGE_SIZE); 437e72bba21SMaciej Fijalkowski memset(rx_ring->desc, 0, size); 438cdedef59SAnirudh Venkataramanan 439cdedef59SAnirudh Venkataramanan rx_ring->next_to_alloc = 0; 440cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 4412fba7dc5SMaciej Fijalkowski rx_ring->first_desc = 0; 442cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 443cdedef59SAnirudh Venkataramanan } 444cdedef59SAnirudh Venkataramanan 445cdedef59SAnirudh Venkataramanan /** 446cdedef59SAnirudh Venkataramanan * ice_free_rx_ring - Free Rx resources 447cdedef59SAnirudh Venkataramanan * @rx_ring: ring to clean the resources from 448cdedef59SAnirudh Venkataramanan * 449cdedef59SAnirudh Venkataramanan * Free all receive software resources 450cdedef59SAnirudh Venkataramanan */ 451e72bba21SMaciej Fijalkowski void ice_free_rx_ring(struct ice_rx_ring *rx_ring) 452cdedef59SAnirudh Venkataramanan { 453e72bba21SMaciej Fijalkowski u32 size; 454e72bba21SMaciej Fijalkowski 455cdedef59SAnirudh Venkataramanan ice_clean_rx_ring(rx_ring); 456efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF) 457efc2214bSMaciej Fijalkowski if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 458efc2214bSMaciej Fijalkowski xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 459efc2214bSMaciej Fijalkowski rx_ring->xdp_prog = NULL; 460617f3e1bSMaciej Fijalkowski if (rx_ring->xsk_pool) { 461617f3e1bSMaciej Fijalkowski kfree(rx_ring->xdp_buf); 462617f3e1bSMaciej Fijalkowski rx_ring->xdp_buf = NULL; 463617f3e1bSMaciej Fijalkowski } else { 464617f3e1bSMaciej Fijalkowski kfree(rx_ring->rx_buf); 465cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 466617f3e1bSMaciej Fijalkowski } 467cdedef59SAnirudh Venkataramanan 468cdedef59SAnirudh Venkataramanan if (rx_ring->desc) { 469e72bba21SMaciej Fijalkowski size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 470e72bba21SMaciej Fijalkowski PAGE_SIZE); 471e72bba21SMaciej Fijalkowski dmam_free_coherent(rx_ring->dev, size, 472cdedef59SAnirudh Venkataramanan rx_ring->desc, rx_ring->dma); 473cdedef59SAnirudh Venkataramanan rx_ring->desc = NULL; 474cdedef59SAnirudh Venkataramanan } 475cdedef59SAnirudh Venkataramanan } 476cdedef59SAnirudh Venkataramanan 477cdedef59SAnirudh Venkataramanan /** 478cdedef59SAnirudh Venkataramanan * ice_setup_rx_ring - Allocate the Rx descriptors 479d337f2afSAnirudh Venkataramanan * @rx_ring: the Rx ring to set up 480cdedef59SAnirudh Venkataramanan * 481cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 482cdedef59SAnirudh Venkataramanan */ 483e72bba21SMaciej Fijalkowski int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) 484cdedef59SAnirudh Venkataramanan { 485cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 486e72bba21SMaciej Fijalkowski u32 size; 487cdedef59SAnirudh Venkataramanan 488cdedef59SAnirudh Venkataramanan if (!dev) 489cdedef59SAnirudh Venkataramanan return -ENOMEM; 490cdedef59SAnirudh Venkataramanan 491cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 492cdedef59SAnirudh Venkataramanan WARN_ON(rx_ring->rx_buf); 493c6dfd690SBruce Allan rx_ring->rx_buf = 494617f3e1bSMaciej Fijalkowski kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); 495cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 496cdedef59SAnirudh Venkataramanan return -ENOMEM; 497cdedef59SAnirudh Venkataramanan 498ad71b256SBrett Creeley /* round up to nearest page */ 499e72bba21SMaciej Fijalkowski size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 500ad71b256SBrett Creeley PAGE_SIZE); 501e72bba21SMaciej Fijalkowski rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, 502cdedef59SAnirudh Venkataramanan GFP_KERNEL); 503cdedef59SAnirudh Venkataramanan if (!rx_ring->desc) { 504cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 505e72bba21SMaciej Fijalkowski size); 506cdedef59SAnirudh Venkataramanan goto err; 507cdedef59SAnirudh Venkataramanan } 508cdedef59SAnirudh Venkataramanan 509cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 510cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 5112fba7dc5SMaciej Fijalkowski rx_ring->first_desc = 0; 512efc2214bSMaciej Fijalkowski 513efc2214bSMaciej Fijalkowski if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 514efc2214bSMaciej Fijalkowski WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 515efc2214bSMaciej Fijalkowski 516efc2214bSMaciej Fijalkowski if (rx_ring->vsi->type == ICE_VSI_PF && 517efc2214bSMaciej Fijalkowski !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 518efc2214bSMaciej Fijalkowski if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 519b02e5a0eSBjörn Töpel rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) 520efc2214bSMaciej Fijalkowski goto err; 521cdedef59SAnirudh Venkataramanan return 0; 522cdedef59SAnirudh Venkataramanan 523cdedef59SAnirudh Venkataramanan err: 524617f3e1bSMaciej Fijalkowski kfree(rx_ring->rx_buf); 525cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 526cdedef59SAnirudh Venkataramanan return -ENOMEM; 527cdedef59SAnirudh Venkataramanan } 528cdedef59SAnirudh Venkataramanan 529cb0473e0SMaciej Fijalkowski /** 530cb0473e0SMaciej Fijalkowski * ice_rx_frame_truesize 531cb0473e0SMaciej Fijalkowski * @rx_ring: ptr to Rx ring 532cb0473e0SMaciej Fijalkowski * @size: size 533cb0473e0SMaciej Fijalkowski * 534cb0473e0SMaciej Fijalkowski * calculate the truesize with taking into the account PAGE_SIZE of 535cb0473e0SMaciej Fijalkowski * underlying arch 536cb0473e0SMaciej Fijalkowski */ 5376221595fSTony Nguyen static unsigned int 538cb0473e0SMaciej Fijalkowski ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size) 539d4ecdbf7SJesper Dangaard Brouer { 540d4ecdbf7SJesper Dangaard Brouer unsigned int truesize; 541d4ecdbf7SJesper Dangaard Brouer 542d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE < 8192) 543d4ecdbf7SJesper Dangaard Brouer truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 544d4ecdbf7SJesper Dangaard Brouer #else 545f1b1f409SMaciej Fijalkowski truesize = rx_ring->rx_offset ? 546f1b1f409SMaciej Fijalkowski SKB_DATA_ALIGN(rx_ring->rx_offset + size) + 547d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 548d4ecdbf7SJesper Dangaard Brouer SKB_DATA_ALIGN(size); 549d4ecdbf7SJesper Dangaard Brouer #endif 550d4ecdbf7SJesper Dangaard Brouer return truesize; 551d4ecdbf7SJesper Dangaard Brouer } 552d4ecdbf7SJesper Dangaard Brouer 553efc2214bSMaciej Fijalkowski /** 554efc2214bSMaciej Fijalkowski * ice_run_xdp - Executes an XDP program on initialized xdp_buff 555efc2214bSMaciej Fijalkowski * @rx_ring: Rx ring 556efc2214bSMaciej Fijalkowski * @xdp: xdp_buff used as input to the XDP program 557efc2214bSMaciej Fijalkowski * @xdp_prog: XDP program to run 558eb087cd8SMaciej Fijalkowski * @xdp_ring: ring to be used for XDP_TX action 5591dc1a7e7SMaciej Fijalkowski * @rx_buf: Rx buffer to store the XDP action 560efc2214bSMaciej Fijalkowski * 561efc2214bSMaciej Fijalkowski * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 562efc2214bSMaciej Fijalkowski */ 5631dc1a7e7SMaciej Fijalkowski static void 564e72bba21SMaciej Fijalkowski ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 5651dc1a7e7SMaciej Fijalkowski struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, 5661dc1a7e7SMaciej Fijalkowski struct ice_rx_buf *rx_buf) 567efc2214bSMaciej Fijalkowski { 5681dc1a7e7SMaciej Fijalkowski unsigned int ret = ICE_XDP_PASS; 569efc2214bSMaciej Fijalkowski u32 act; 570efc2214bSMaciej Fijalkowski 5711dc1a7e7SMaciej Fijalkowski if (!xdp_prog) 5721dc1a7e7SMaciej Fijalkowski goto exit; 5731dc1a7e7SMaciej Fijalkowski 574efc2214bSMaciej Fijalkowski act = bpf_prog_run_xdp(xdp_prog, xdp); 575efc2214bSMaciej Fijalkowski switch (act) { 576efc2214bSMaciej Fijalkowski case XDP_PASS: 5771dc1a7e7SMaciej Fijalkowski break; 578efc2214bSMaciej Fijalkowski case XDP_TX: 57922bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) 58022bf877eSMaciej Fijalkowski spin_lock(&xdp_ring->tx_lock); 581055d0920SAlexander Lobakin ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false); 58222bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) 58322bf877eSMaciej Fijalkowski spin_unlock(&xdp_ring->tx_lock); 5841dc1a7e7SMaciej Fijalkowski if (ret == ICE_XDP_CONSUMED) 58589d65df0SMagnus Karlsson goto out_failure; 5861dc1a7e7SMaciej Fijalkowski break; 587efc2214bSMaciej Fijalkowski case XDP_REDIRECT: 5881dc1a7e7SMaciej Fijalkowski if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) 58989d65df0SMagnus Karlsson goto out_failure; 5901dc1a7e7SMaciej Fijalkowski ret = ICE_XDP_REDIR; 5911dc1a7e7SMaciej Fijalkowski break; 592efc2214bSMaciej Fijalkowski default: 593c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); 5944e83fc93SBruce Allan fallthrough; 595efc2214bSMaciej Fijalkowski case XDP_ABORTED: 59689d65df0SMagnus Karlsson out_failure: 597efc2214bSMaciej Fijalkowski trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 5984e83fc93SBruce Allan fallthrough; 599efc2214bSMaciej Fijalkowski case XDP_DROP: 6001dc1a7e7SMaciej Fijalkowski ret = ICE_XDP_CONSUMED; 601efc2214bSMaciej Fijalkowski } 6021dc1a7e7SMaciej Fijalkowski exit: 6031dc1a7e7SMaciej Fijalkowski rx_buf->act = ret; 6042fba7dc5SMaciej Fijalkowski if (unlikely(xdp_buff_has_frags(xdp))) 6052fba7dc5SMaciej Fijalkowski ice_set_rx_bufs_act(xdp, rx_ring, ret); 606efc2214bSMaciej Fijalkowski } 607efc2214bSMaciej Fijalkowski 608efc2214bSMaciej Fijalkowski /** 609ad07f29bSAlexander Lobakin * ice_xmit_xdp_ring - submit frame to XDP ring for transmission 610ad07f29bSAlexander Lobakin * @xdpf: XDP frame that will be converted to XDP buff 611ad07f29bSAlexander Lobakin * @xdp_ring: XDP ring for transmission 612ad07f29bSAlexander Lobakin */ 613ad07f29bSAlexander Lobakin static int ice_xmit_xdp_ring(const struct xdp_frame *xdpf, 614ad07f29bSAlexander Lobakin struct ice_tx_ring *xdp_ring) 615ad07f29bSAlexander Lobakin { 616ad07f29bSAlexander Lobakin struct xdp_buff xdp; 617ad07f29bSAlexander Lobakin 618ad07f29bSAlexander Lobakin xdp.data_hard_start = (void *)xdpf; 619ad07f29bSAlexander Lobakin xdp.data = xdpf->data; 620ad07f29bSAlexander Lobakin xdp.data_end = xdp.data + xdpf->len; 621ad07f29bSAlexander Lobakin xdp.frame_sz = xdpf->frame_sz; 622ad07f29bSAlexander Lobakin xdp.flags = xdpf->flags; 623ad07f29bSAlexander Lobakin 624ad07f29bSAlexander Lobakin return __ice_xmit_xdp_ring(&xdp, xdp_ring, true); 625ad07f29bSAlexander Lobakin } 626ad07f29bSAlexander Lobakin 627ad07f29bSAlexander Lobakin /** 628efc2214bSMaciej Fijalkowski * ice_xdp_xmit - submit packets to XDP ring for transmission 629efc2214bSMaciej Fijalkowski * @dev: netdev 630efc2214bSMaciej Fijalkowski * @n: number of XDP frames to be transmitted 631efc2214bSMaciej Fijalkowski * @frames: XDP frames to be transmitted 632efc2214bSMaciej Fijalkowski * @flags: transmit flags 633efc2214bSMaciej Fijalkowski * 634fdc13979SLorenzo Bianconi * Returns number of frames successfully sent. Failed frames 635fdc13979SLorenzo Bianconi * will be free'ed by XDP core. 636efc2214bSMaciej Fijalkowski * For error cases, a negative errno code is returned and no-frames 637efc2214bSMaciej Fijalkowski * are transmitted (caller must handle freeing frames). 638efc2214bSMaciej Fijalkowski */ 639efc2214bSMaciej Fijalkowski int 640efc2214bSMaciej Fijalkowski ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 641efc2214bSMaciej Fijalkowski u32 flags) 642efc2214bSMaciej Fijalkowski { 643efc2214bSMaciej Fijalkowski struct ice_netdev_priv *np = netdev_priv(dev); 644efc2214bSMaciej Fijalkowski unsigned int queue_index = smp_processor_id(); 645efc2214bSMaciej Fijalkowski struct ice_vsi *vsi = np->vsi; 646e72bba21SMaciej Fijalkowski struct ice_tx_ring *xdp_ring; 6473246a107SMaciej Fijalkowski struct ice_tx_buf *tx_buf; 648fdc13979SLorenzo Bianconi int nxmit = 0, i; 649efc2214bSMaciej Fijalkowski 650e97fb1aeSAnirudh Venkataramanan if (test_bit(ICE_VSI_DOWN, vsi->state)) 651efc2214bSMaciej Fijalkowski return -ENETDOWN; 652efc2214bSMaciej Fijalkowski 653114f398dSLarysa Zaremba if (!ice_is_xdp_ena_vsi(vsi)) 654efc2214bSMaciej Fijalkowski return -ENXIO; 655efc2214bSMaciej Fijalkowski 656efc2214bSMaciej Fijalkowski if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 657efc2214bSMaciej Fijalkowski return -EINVAL; 658efc2214bSMaciej Fijalkowski 65922bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) { 66022bf877eSMaciej Fijalkowski queue_index %= vsi->num_xdp_txq; 661efc2214bSMaciej Fijalkowski xdp_ring = vsi->xdp_rings[queue_index]; 66222bf877eSMaciej Fijalkowski spin_lock(&xdp_ring->tx_lock); 66322bf877eSMaciej Fijalkowski } else { 664114f398dSLarysa Zaremba /* Generally, should not happen */ 665114f398dSLarysa Zaremba if (unlikely(queue_index >= vsi->num_xdp_txq)) 666114f398dSLarysa Zaremba return -ENXIO; 66722bf877eSMaciej Fijalkowski xdp_ring = vsi->xdp_rings[queue_index]; 66822bf877eSMaciej Fijalkowski } 66922bf877eSMaciej Fijalkowski 6703246a107SMaciej Fijalkowski tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use]; 671efc2214bSMaciej Fijalkowski for (i = 0; i < n; i++) { 672ad07f29bSAlexander Lobakin const struct xdp_frame *xdpf = frames[i]; 673efc2214bSMaciej Fijalkowski int err; 674efc2214bSMaciej Fijalkowski 6753246a107SMaciej Fijalkowski err = ice_xmit_xdp_ring(xdpf, xdp_ring); 676fdc13979SLorenzo Bianconi if (err != ICE_XDP_TX) 677fdc13979SLorenzo Bianconi break; 678fdc13979SLorenzo Bianconi nxmit++; 679efc2214bSMaciej Fijalkowski } 680efc2214bSMaciej Fijalkowski 6813246a107SMaciej Fijalkowski tx_buf->rs_idx = ice_set_rs_bit(xdp_ring); 682efc2214bSMaciej Fijalkowski if (unlikely(flags & XDP_XMIT_FLUSH)) 683efc2214bSMaciej Fijalkowski ice_xdp_ring_update_tail(xdp_ring); 684efc2214bSMaciej Fijalkowski 68522bf877eSMaciej Fijalkowski if (static_branch_unlikely(&ice_xdp_locking_key)) 68622bf877eSMaciej Fijalkowski spin_unlock(&xdp_ring->tx_lock); 68722bf877eSMaciej Fijalkowski 688fdc13979SLorenzo Bianconi return nxmit; 689efc2214bSMaciej Fijalkowski } 690efc2214bSMaciej Fijalkowski 691efc2214bSMaciej Fijalkowski /** 692cdedef59SAnirudh Venkataramanan * ice_alloc_mapped_page - recycle or make a new page 693cdedef59SAnirudh Venkataramanan * @rx_ring: ring to use 694cdedef59SAnirudh Venkataramanan * @bi: rx_buf struct to modify 695cdedef59SAnirudh Venkataramanan * 696cdedef59SAnirudh Venkataramanan * Returns true if the page was successfully allocated or 697cdedef59SAnirudh Venkataramanan * reused. 698cdedef59SAnirudh Venkataramanan */ 699c8b7abddSBruce Allan static bool 700e72bba21SMaciej Fijalkowski ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) 701cdedef59SAnirudh Venkataramanan { 702cdedef59SAnirudh Venkataramanan struct page *page = bi->page; 703cdedef59SAnirudh Venkataramanan dma_addr_t dma; 704cdedef59SAnirudh Venkataramanan 705cdedef59SAnirudh Venkataramanan /* since we are recycling buffers we should seldom need to alloc */ 7067dbc63f0STony Nguyen if (likely(page)) 707cdedef59SAnirudh Venkataramanan return true; 708cdedef59SAnirudh Venkataramanan 709cdedef59SAnirudh Venkataramanan /* alloc new page for storage */ 7107237f5b0SMaciej Fijalkowski page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 7112b245cb2SAnirudh Venkataramanan if (unlikely(!page)) { 712288ecf49SBenjamin Mikailenko rx_ring->ring_stats->rx_stats.alloc_page_failed++; 713cdedef59SAnirudh Venkataramanan return false; 7142b245cb2SAnirudh Venkataramanan } 715cdedef59SAnirudh Venkataramanan 716cdedef59SAnirudh Venkataramanan /* map page for use */ 7177237f5b0SMaciej Fijalkowski dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 718a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 719cdedef59SAnirudh Venkataramanan 720cdedef59SAnirudh Venkataramanan /* if mapping failed free memory back to system since 721cdedef59SAnirudh Venkataramanan * there isn't much point in holding memory we can't use 722cdedef59SAnirudh Venkataramanan */ 723cdedef59SAnirudh Venkataramanan if (dma_mapping_error(rx_ring->dev, dma)) { 7247237f5b0SMaciej Fijalkowski __free_pages(page, ice_rx_pg_order(rx_ring)); 725288ecf49SBenjamin Mikailenko rx_ring->ring_stats->rx_stats.alloc_page_failed++; 726cdedef59SAnirudh Venkataramanan return false; 727cdedef59SAnirudh Venkataramanan } 728cdedef59SAnirudh Venkataramanan 729cdedef59SAnirudh Venkataramanan bi->dma = dma; 730cdedef59SAnirudh Venkataramanan bi->page = page; 731f1b1f409SMaciej Fijalkowski bi->page_offset = rx_ring->rx_offset; 73203c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 73303c66a13SMaciej Fijalkowski bi->pagecnt_bias = USHRT_MAX; 734cdedef59SAnirudh Venkataramanan 735cdedef59SAnirudh Venkataramanan return true; 736cdedef59SAnirudh Venkataramanan } 737cdedef59SAnirudh Venkataramanan 738cdedef59SAnirudh Venkataramanan /** 739cdedef59SAnirudh Venkataramanan * ice_alloc_rx_bufs - Replace used receive buffers 740cdedef59SAnirudh Venkataramanan * @rx_ring: ring to place buffers on 741cdedef59SAnirudh Venkataramanan * @cleaned_count: number of buffers to replace 742cdedef59SAnirudh Venkataramanan * 743cb7db356SBrett Creeley * Returns false if all allocations were successful, true if any fail. Returning 744cb7db356SBrett Creeley * true signals to the caller that we didn't replace cleaned_count buffers and 745cb7db356SBrett Creeley * there is more work to do. 746cb7db356SBrett Creeley * 747cb7db356SBrett Creeley * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 748cb7db356SBrett Creeley * buffers. Then bump tail at most one time. Grouping like this lets us avoid 749cb7db356SBrett Creeley * multiple tail writes per call. 750cdedef59SAnirudh Venkataramanan */ 7512fba7dc5SMaciej Fijalkowski bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) 752cdedef59SAnirudh Venkataramanan { 753cdedef59SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 754cdedef59SAnirudh Venkataramanan u16 ntu = rx_ring->next_to_use; 755cdedef59SAnirudh Venkataramanan struct ice_rx_buf *bi; 756cdedef59SAnirudh Venkataramanan 757cdedef59SAnirudh Venkataramanan /* do nothing if no valid netdev defined */ 758148beb61SHenry Tieman if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || 759148beb61SHenry Tieman !cleaned_count) 760cdedef59SAnirudh Venkataramanan return false; 761cdedef59SAnirudh Venkataramanan 762f9867df6SAnirudh Venkataramanan /* get the Rx descriptor and buffer based on next_to_use */ 763cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, ntu); 764cdedef59SAnirudh Venkataramanan bi = &rx_ring->rx_buf[ntu]; 765cdedef59SAnirudh Venkataramanan 766cdedef59SAnirudh Venkataramanan do { 767a1e99685SBrett Creeley /* if we fail here, we have work remaining */ 768cdedef59SAnirudh Venkataramanan if (!ice_alloc_mapped_page(rx_ring, bi)) 769a1e99685SBrett Creeley break; 770cdedef59SAnirudh Venkataramanan 771a65f71feSMaciej Fijalkowski /* sync the buffer for use by the device */ 772a65f71feSMaciej Fijalkowski dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 773a65f71feSMaciej Fijalkowski bi->page_offset, 7747237f5b0SMaciej Fijalkowski rx_ring->rx_buf_len, 775a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE); 776a65f71feSMaciej Fijalkowski 777cdedef59SAnirudh Venkataramanan /* Refresh the desc even if buffer_addrs didn't change 778cdedef59SAnirudh Venkataramanan * because each write-back erases this info. 779cdedef59SAnirudh Venkataramanan */ 780cdedef59SAnirudh Venkataramanan rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 781cdedef59SAnirudh Venkataramanan 782cdedef59SAnirudh Venkataramanan rx_desc++; 783cdedef59SAnirudh Venkataramanan bi++; 784cdedef59SAnirudh Venkataramanan ntu++; 785cdedef59SAnirudh Venkataramanan if (unlikely(ntu == rx_ring->count)) { 786cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, 0); 787cdedef59SAnirudh Venkataramanan bi = rx_ring->rx_buf; 788cdedef59SAnirudh Venkataramanan ntu = 0; 789cdedef59SAnirudh Venkataramanan } 790cdedef59SAnirudh Venkataramanan 791cdedef59SAnirudh Venkataramanan /* clear the status bits for the next_to_use descriptor */ 792cdedef59SAnirudh Venkataramanan rx_desc->wb.status_error0 = 0; 793cdedef59SAnirudh Venkataramanan 794cdedef59SAnirudh Venkataramanan cleaned_count--; 795cdedef59SAnirudh Venkataramanan } while (cleaned_count); 796cdedef59SAnirudh Venkataramanan 797cdedef59SAnirudh Venkataramanan if (rx_ring->next_to_use != ntu) 798cdedef59SAnirudh Venkataramanan ice_release_rx_desc(rx_ring, ntu); 799cdedef59SAnirudh Venkataramanan 800a1e99685SBrett Creeley return !!cleaned_count; 801cdedef59SAnirudh Venkataramanan } 8022b245cb2SAnirudh Venkataramanan 8032b245cb2SAnirudh Venkataramanan /** 8041d032bc7SMaciej Fijalkowski * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 8051d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to adjust 8061d032bc7SMaciej Fijalkowski * @size: Size of adjustment 8072b245cb2SAnirudh Venkataramanan * 8081d032bc7SMaciej Fijalkowski * Update the offset within page so that Rx buf will be ready to be reused. 8091d032bc7SMaciej Fijalkowski * For systems with PAGE_SIZE < 8192 this function will flip the page offset 8101d032bc7SMaciej Fijalkowski * so the second half of page assigned to Rx buffer will be used, otherwise 8114ee656bbSTony Nguyen * the offset is moved by "size" bytes 8122b245cb2SAnirudh Venkataramanan */ 8131d032bc7SMaciej Fijalkowski static void 8141d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 8152b245cb2SAnirudh Venkataramanan { 8162b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 8171d032bc7SMaciej Fijalkowski /* flip page offset to other buffer */ 8181d032bc7SMaciej Fijalkowski rx_buf->page_offset ^= size; 8192b245cb2SAnirudh Venkataramanan #else 8201d032bc7SMaciej Fijalkowski /* move offset up to the next cache line */ 8211d032bc7SMaciej Fijalkowski rx_buf->page_offset += size; 8221d032bc7SMaciej Fijalkowski #endif 8232b245cb2SAnirudh Venkataramanan } 8242b245cb2SAnirudh Venkataramanan 8251d032bc7SMaciej Fijalkowski /** 826bbb97808SMaciej Fijalkowski * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 827bbb97808SMaciej Fijalkowski * @rx_buf: buffer containing the page 828bbb97808SMaciej Fijalkowski * 829bbb97808SMaciej Fijalkowski * If page is reusable, we have a green light for calling ice_reuse_rx_page, 830bbb97808SMaciej Fijalkowski * which will assign the current buffer to the buffer that next_to_alloc is 831bbb97808SMaciej Fijalkowski * pointing to; otherwise, the DMA mapping needs to be destroyed and 832bbb97808SMaciej Fijalkowski * page freed 833bbb97808SMaciej Fijalkowski */ 8341beb7830SBjörn Töpel static bool 835ac075339SMaciej Fijalkowski ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) 836bbb97808SMaciej Fijalkowski { 83703c66a13SMaciej Fijalkowski unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 838bbb97808SMaciej Fijalkowski struct page *page = rx_buf->page; 8392b245cb2SAnirudh Venkataramanan 840a79afa78SAlexander Lobakin /* avoid re-using remote and pfmemalloc pages */ 841a79afa78SAlexander Lobakin if (!dev_page_is_reusable(page)) 8422b245cb2SAnirudh Venkataramanan return false; 8432b245cb2SAnirudh Venkataramanan 8442b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 8452b245cb2SAnirudh Venkataramanan /* if we are only owner of page we can reuse it */ 846ac075339SMaciej Fijalkowski if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) 8472b245cb2SAnirudh Venkataramanan return false; 8482b245cb2SAnirudh Venkataramanan #else 8497237f5b0SMaciej Fijalkowski #define ICE_LAST_OFFSET \ 8507237f5b0SMaciej Fijalkowski (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 8517237f5b0SMaciej Fijalkowski if (rx_buf->page_offset > ICE_LAST_OFFSET) 8522b245cb2SAnirudh Venkataramanan return false; 8532b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE < 8192) */ 8542b245cb2SAnirudh Venkataramanan 85503c66a13SMaciej Fijalkowski /* If we have drained the page fragment pool we need to update 85603c66a13SMaciej Fijalkowski * the pagecnt_bias and page count so that we fully restock the 85703c66a13SMaciej Fijalkowski * number of references the driver holds. 8582b245cb2SAnirudh Venkataramanan */ 85903c66a13SMaciej Fijalkowski if (unlikely(pagecnt_bias == 1)) { 86003c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 86103c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias = USHRT_MAX; 86203c66a13SMaciej Fijalkowski } 8632b245cb2SAnirudh Venkataramanan 8642b245cb2SAnirudh Venkataramanan return true; 8652b245cb2SAnirudh Venkataramanan } 8662b245cb2SAnirudh Venkataramanan 8672b245cb2SAnirudh Venkataramanan /** 8682fba7dc5SMaciej Fijalkowski * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag 8697237f5b0SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 8702fba7dc5SMaciej Fijalkowski * @xdp: xdp buff to place the data into 8712b245cb2SAnirudh Venkataramanan * @rx_buf: buffer containing page to add 872712edbbbSMaciej Fijalkowski * @size: packet length from rx_desc 8732b245cb2SAnirudh Venkataramanan * 8742fba7dc5SMaciej Fijalkowski * This function will add the data contained in rx_buf->page to the xdp buf. 8752fba7dc5SMaciej Fijalkowski * It will just attach the page as a frag. 8762b245cb2SAnirudh Venkataramanan */ 8772fba7dc5SMaciej Fijalkowski static int 8782fba7dc5SMaciej Fijalkowski ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 8792fba7dc5SMaciej Fijalkowski struct ice_rx_buf *rx_buf, const unsigned int size) 8802b245cb2SAnirudh Venkataramanan { 8812fba7dc5SMaciej Fijalkowski struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 8822fba7dc5SMaciej Fijalkowski 883ac6f733aSMitch Williams if (!size) 8842fba7dc5SMaciej Fijalkowski return 0; 8852fba7dc5SMaciej Fijalkowski 8862fba7dc5SMaciej Fijalkowski if (!xdp_buff_has_frags(xdp)) { 8872fba7dc5SMaciej Fijalkowski sinfo->nr_frags = 0; 8882fba7dc5SMaciej Fijalkowski sinfo->xdp_frags_size = 0; 8892fba7dc5SMaciej Fijalkowski xdp_buff_set_frags_flag(xdp); 8902fba7dc5SMaciej Fijalkowski } 8912fba7dc5SMaciej Fijalkowski 8922fba7dc5SMaciej Fijalkowski if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { 8932fba7dc5SMaciej Fijalkowski if (unlikely(xdp_buff_has_frags(xdp))) 8942fba7dc5SMaciej Fijalkowski ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED); 8952fba7dc5SMaciej Fijalkowski return -ENOMEM; 8962fba7dc5SMaciej Fijalkowski } 8972fba7dc5SMaciej Fijalkowski 8982fba7dc5SMaciej Fijalkowski __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, 8992fba7dc5SMaciej Fijalkowski rx_buf->page_offset, size); 9002fba7dc5SMaciej Fijalkowski sinfo->xdp_frags_size += size; 9012fba7dc5SMaciej Fijalkowski 9022fba7dc5SMaciej Fijalkowski if (page_is_pfmemalloc(rx_buf->page)) 9032fba7dc5SMaciej Fijalkowski xdp_buff_set_frag_pfmemalloc(xdp); 9042fba7dc5SMaciej Fijalkowski 9052fba7dc5SMaciej Fijalkowski return 0; 9062b245cb2SAnirudh Venkataramanan } 9072b245cb2SAnirudh Venkataramanan 9082b245cb2SAnirudh Venkataramanan /** 9092b245cb2SAnirudh Venkataramanan * ice_reuse_rx_page - page flip buffer and store it back on the ring 910d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to store buffers on 9112b245cb2SAnirudh Venkataramanan * @old_buf: donor buffer to have page reused 9122b245cb2SAnirudh Venkataramanan * 9132b245cb2SAnirudh Venkataramanan * Synchronizes page for reuse by the adapter 9142b245cb2SAnirudh Venkataramanan */ 915c8b7abddSBruce Allan static void 916e72bba21SMaciej Fijalkowski ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) 9172b245cb2SAnirudh Venkataramanan { 9182b245cb2SAnirudh Venkataramanan u16 nta = rx_ring->next_to_alloc; 9192b245cb2SAnirudh Venkataramanan struct ice_rx_buf *new_buf; 9202b245cb2SAnirudh Venkataramanan 9212b245cb2SAnirudh Venkataramanan new_buf = &rx_ring->rx_buf[nta]; 9222b245cb2SAnirudh Venkataramanan 9232b245cb2SAnirudh Venkataramanan /* update, and store next to alloc */ 9242b245cb2SAnirudh Venkataramanan nta++; 9252b245cb2SAnirudh Venkataramanan rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 9262b245cb2SAnirudh Venkataramanan 927712edbbbSMaciej Fijalkowski /* Transfer page from old buffer to new buffer. 928712edbbbSMaciej Fijalkowski * Move each member individually to avoid possible store 929712edbbbSMaciej Fijalkowski * forwarding stalls and unnecessary copy of skb. 930712edbbbSMaciej Fijalkowski */ 931712edbbbSMaciej Fijalkowski new_buf->dma = old_buf->dma; 932712edbbbSMaciej Fijalkowski new_buf->page = old_buf->page; 933712edbbbSMaciej Fijalkowski new_buf->page_offset = old_buf->page_offset; 934712edbbbSMaciej Fijalkowski new_buf->pagecnt_bias = old_buf->pagecnt_bias; 9352b245cb2SAnirudh Venkataramanan } 9362b245cb2SAnirudh Venkataramanan 9372b245cb2SAnirudh Venkataramanan /** 9386c869cb7SMaciej Fijalkowski * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 939d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 9406c869cb7SMaciej Fijalkowski * @size: size of buffer to add to skb 9412b245cb2SAnirudh Venkataramanan * 9426c869cb7SMaciej Fijalkowski * This function will pull an Rx buffer from the ring and synchronize it 9436c869cb7SMaciej Fijalkowski * for use by the CPU. 9442b245cb2SAnirudh Venkataramanan */ 9456c869cb7SMaciej Fijalkowski static struct ice_rx_buf * 946d7956d81SMaciej Fijalkowski ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, 947d7956d81SMaciej Fijalkowski const unsigned int ntc) 9482b245cb2SAnirudh Venkataramanan { 9492b245cb2SAnirudh Venkataramanan struct ice_rx_buf *rx_buf; 9502b245cb2SAnirudh Venkataramanan 951d7956d81SMaciej Fijalkowski rx_buf = &rx_ring->rx_buf[ntc]; 952ac075339SMaciej Fijalkowski rx_buf->pgcnt = 9531beb7830SBjörn Töpel #if (PAGE_SIZE < 8192) 9541beb7830SBjörn Töpel page_count(rx_buf->page); 9551beb7830SBjörn Töpel #else 9561beb7830SBjörn Töpel 0; 9571beb7830SBjörn Töpel #endif 9586c869cb7SMaciej Fijalkowski prefetchw(rx_buf->page); 9592b245cb2SAnirudh Venkataramanan 960ac6f733aSMitch Williams if (!size) 961ac6f733aSMitch Williams return rx_buf; 9626c869cb7SMaciej Fijalkowski /* we are reusing so sync this buffer for CPU use */ 9636c869cb7SMaciej Fijalkowski dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 9646c869cb7SMaciej Fijalkowski rx_buf->page_offset, size, 9656c869cb7SMaciej Fijalkowski DMA_FROM_DEVICE); 9662b245cb2SAnirudh Venkataramanan 96703c66a13SMaciej Fijalkowski /* We have pulled a buffer for use, so decrement pagecnt_bias */ 96803c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias--; 96903c66a13SMaciej Fijalkowski 9706c869cb7SMaciej Fijalkowski return rx_buf; 9716c869cb7SMaciej Fijalkowski } 9726c869cb7SMaciej Fijalkowski 9736c869cb7SMaciej Fijalkowski /** 974aaf27254SMaciej Fijalkowski * ice_build_skb - Build skb around an existing buffer 975aaf27254SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 976aaf27254SMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 977aaf27254SMaciej Fijalkowski * 9782fba7dc5SMaciej Fijalkowski * This function builds an skb around an existing XDP buffer, taking care 9792fba7dc5SMaciej Fijalkowski * to set up the skb correctly and avoid any memcpy overhead. Driver has 9802fba7dc5SMaciej Fijalkowski * already combined frags (if any) to skb_shared_info. 981aaf27254SMaciej Fijalkowski */ 982aaf27254SMaciej Fijalkowski static struct sk_buff * 9832fba7dc5SMaciej Fijalkowski ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) 984aaf27254SMaciej Fijalkowski { 98588865fc4SKarol Kolacinski u8 metasize = xdp->data - xdp->data_meta; 9862fba7dc5SMaciej Fijalkowski struct skb_shared_info *sinfo = NULL; 9872fba7dc5SMaciej Fijalkowski unsigned int nr_frags; 988aaf27254SMaciej Fijalkowski struct sk_buff *skb; 989aaf27254SMaciej Fijalkowski 9902fba7dc5SMaciej Fijalkowski if (unlikely(xdp_buff_has_frags(xdp))) { 9912fba7dc5SMaciej Fijalkowski sinfo = xdp_get_shared_info_from_buff(xdp); 9922fba7dc5SMaciej Fijalkowski nr_frags = sinfo->nr_frags; 9932fba7dc5SMaciej Fijalkowski } 9942fba7dc5SMaciej Fijalkowski 995aaf27254SMaciej Fijalkowski /* Prefetch first cache line of first page. If xdp->data_meta 996aaf27254SMaciej Fijalkowski * is unused, this points exactly as xdp->data, otherwise we 997aaf27254SMaciej Fijalkowski * likely have a consumer accessing first few bytes of meta 998aaf27254SMaciej Fijalkowski * data, and then actual data. 999aaf27254SMaciej Fijalkowski */ 1000f468f21bSTariq Toukan net_prefetch(xdp->data_meta); 1001aaf27254SMaciej Fijalkowski /* build an skb around the page buffer */ 10028a11b334SMaciej Fijalkowski skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); 1003aaf27254SMaciej Fijalkowski if (unlikely(!skb)) 1004aaf27254SMaciej Fijalkowski return NULL; 1005aaf27254SMaciej Fijalkowski 1006aaf27254SMaciej Fijalkowski /* must to record Rx queue, otherwise OS features such as 1007aaf27254SMaciej Fijalkowski * symmetric queue won't work 1008aaf27254SMaciej Fijalkowski */ 1009aaf27254SMaciej Fijalkowski skb_record_rx_queue(skb, rx_ring->q_index); 1010aaf27254SMaciej Fijalkowski 1011aaf27254SMaciej Fijalkowski /* update pointers within the skb to store the data */ 1012aaf27254SMaciej Fijalkowski skb_reserve(skb, xdp->data - xdp->data_hard_start); 1013aaf27254SMaciej Fijalkowski __skb_put(skb, xdp->data_end - xdp->data); 1014aaf27254SMaciej Fijalkowski if (metasize) 1015aaf27254SMaciej Fijalkowski skb_metadata_set(skb, metasize); 1016aaf27254SMaciej Fijalkowski 10172fba7dc5SMaciej Fijalkowski if (unlikely(xdp_buff_has_frags(xdp))) 10182fba7dc5SMaciej Fijalkowski xdp_update_skb_shared_info(skb, nr_frags, 10192fba7dc5SMaciej Fijalkowski sinfo->xdp_frags_size, 10202fba7dc5SMaciej Fijalkowski nr_frags * xdp->frame_sz, 10212fba7dc5SMaciej Fijalkowski xdp_buff_is_frag_pfmemalloc(xdp)); 10222fba7dc5SMaciej Fijalkowski 1023aaf27254SMaciej Fijalkowski return skb; 1024aaf27254SMaciej Fijalkowski } 1025aaf27254SMaciej Fijalkowski 1026aaf27254SMaciej Fijalkowski /** 1027712edbbbSMaciej Fijalkowski * ice_construct_skb - Allocate skb and populate it 10282b245cb2SAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 10296c869cb7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 1030efc2214bSMaciej Fijalkowski * @xdp: xdp_buff pointing to the data 10312b245cb2SAnirudh Venkataramanan * 1032712edbbbSMaciej Fijalkowski * This function allocates an skb. It then populates it with the page 1033712edbbbSMaciej Fijalkowski * data from the current receive descriptor, taking care to set up the 1034712edbbbSMaciej Fijalkowski * skb correctly. 10352b245cb2SAnirudh Venkataramanan */ 1036c8b7abddSBruce Allan static struct sk_buff * 10372fba7dc5SMaciej Fijalkowski ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) 10382b245cb2SAnirudh Venkataramanan { 1039efc2214bSMaciej Fijalkowski unsigned int size = xdp->data_end - xdp->data; 10402fba7dc5SMaciej Fijalkowski struct skb_shared_info *sinfo = NULL; 10412fba7dc5SMaciej Fijalkowski struct ice_rx_buf *rx_buf; 10422fba7dc5SMaciej Fijalkowski unsigned int nr_frags = 0; 1043712edbbbSMaciej Fijalkowski unsigned int headlen; 1044712edbbbSMaciej Fijalkowski struct sk_buff *skb; 10452b245cb2SAnirudh Venkataramanan 10462b245cb2SAnirudh Venkataramanan /* prefetch first cache line of first page */ 1047c61bcebdSMaciej Fijalkowski net_prefetch(xdp->data); 10482b245cb2SAnirudh Venkataramanan 10492fba7dc5SMaciej Fijalkowski if (unlikely(xdp_buff_has_frags(xdp))) { 10502fba7dc5SMaciej Fijalkowski sinfo = xdp_get_shared_info_from_buff(xdp); 10512fba7dc5SMaciej Fijalkowski nr_frags = sinfo->nr_frags; 10522fba7dc5SMaciej Fijalkowski } 10532fba7dc5SMaciej Fijalkowski 10542b245cb2SAnirudh Venkataramanan /* allocate a skb to store the frags */ 1055c61bcebdSMaciej Fijalkowski skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 10562b245cb2SAnirudh Venkataramanan GFP_ATOMIC | __GFP_NOWARN); 1057712edbbbSMaciej Fijalkowski if (unlikely(!skb)) 10582b245cb2SAnirudh Venkataramanan return NULL; 10592b245cb2SAnirudh Venkataramanan 10602fba7dc5SMaciej Fijalkowski rx_buf = &rx_ring->rx_buf[rx_ring->first_desc]; 10612b245cb2SAnirudh Venkataramanan skb_record_rx_queue(skb, rx_ring->q_index); 1062712edbbbSMaciej Fijalkowski /* Determine available headroom for copy */ 1063712edbbbSMaciej Fijalkowski headlen = size; 1064712edbbbSMaciej Fijalkowski if (headlen > ICE_RX_HDR_SIZE) 1065efc2214bSMaciej Fijalkowski headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 10662b245cb2SAnirudh Venkataramanan 1067712edbbbSMaciej Fijalkowski /* align pull length to size of long to optimize memcpy performance */ 1068c61bcebdSMaciej Fijalkowski memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 1069c61bcebdSMaciej Fijalkowski sizeof(long))); 1070712edbbbSMaciej Fijalkowski 1071712edbbbSMaciej Fijalkowski /* if we exhaust the linear part then add what is left as a frag */ 1072712edbbbSMaciej Fijalkowski size -= headlen; 1073712edbbbSMaciej Fijalkowski if (size) { 10742fba7dc5SMaciej Fijalkowski /* besides adding here a partial frag, we are going to add 10752fba7dc5SMaciej Fijalkowski * frags from xdp_buff, make sure there is enough space for 10762fba7dc5SMaciej Fijalkowski * them 10772fba7dc5SMaciej Fijalkowski */ 10782fba7dc5SMaciej Fijalkowski if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) { 10792fba7dc5SMaciej Fijalkowski dev_kfree_skb(skb); 10802fba7dc5SMaciej Fijalkowski return NULL; 10812fba7dc5SMaciej Fijalkowski } 1082712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, 0, rx_buf->page, 10838a11b334SMaciej Fijalkowski rx_buf->page_offset + headlen, size, 10848a11b334SMaciej Fijalkowski xdp->frame_sz); 10852b245cb2SAnirudh Venkataramanan } else { 10861dc1a7e7SMaciej Fijalkowski /* buffer is unused, change the act that should be taken later 10871dc1a7e7SMaciej Fijalkowski * on; data was copied onto skb's linear part so there's no 10881dc1a7e7SMaciej Fijalkowski * need for adjusting page offset and we can reuse this buffer 10891dc1a7e7SMaciej Fijalkowski * as-is 1090712edbbbSMaciej Fijalkowski */ 10912fba7dc5SMaciej Fijalkowski rx_buf->act = ICE_SKB_CONSUMED; 10922fba7dc5SMaciej Fijalkowski } 10932fba7dc5SMaciej Fijalkowski 10942fba7dc5SMaciej Fijalkowski if (unlikely(xdp_buff_has_frags(xdp))) { 10952fba7dc5SMaciej Fijalkowski struct skb_shared_info *skinfo = skb_shinfo(skb); 10962fba7dc5SMaciej Fijalkowski 10972fba7dc5SMaciej Fijalkowski memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], 10982fba7dc5SMaciej Fijalkowski sizeof(skb_frag_t) * nr_frags); 10992fba7dc5SMaciej Fijalkowski 11002fba7dc5SMaciej Fijalkowski xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags, 11012fba7dc5SMaciej Fijalkowski sinfo->xdp_frags_size, 11022fba7dc5SMaciej Fijalkowski nr_frags * xdp->frame_sz, 11032fba7dc5SMaciej Fijalkowski xdp_buff_is_frag_pfmemalloc(xdp)); 11042b245cb2SAnirudh Venkataramanan } 11052b245cb2SAnirudh Venkataramanan 11062b245cb2SAnirudh Venkataramanan return skb; 11072b245cb2SAnirudh Venkataramanan } 11082b245cb2SAnirudh Venkataramanan 11092b245cb2SAnirudh Venkataramanan /** 11101d032bc7SMaciej Fijalkowski * ice_put_rx_buf - Clean up used buffer and either recycle or free 11111d032bc7SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 11121d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 11132b245cb2SAnirudh Venkataramanan * 1114d7956d81SMaciej Fijalkowski * This function will clean up the contents of the rx_buf. It will either 1115d7956d81SMaciej Fijalkowski * recycle the buffer or unmap it and free the associated resources. 11162b245cb2SAnirudh Venkataramanan */ 11171beb7830SBjörn Töpel static void 1118ac075339SMaciej Fijalkowski ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) 11192b245cb2SAnirudh Venkataramanan { 1120ac6f733aSMitch Williams if (!rx_buf) 1121ac6f733aSMitch Williams return; 1122ac6f733aSMitch Williams 1123ac075339SMaciej Fijalkowski if (ice_can_reuse_rx_page(rx_buf)) { 1124ac6f733aSMitch Williams /* hand second half of page back to the ring */ 11252b245cb2SAnirudh Venkataramanan ice_reuse_rx_page(rx_ring, rx_buf); 11262b245cb2SAnirudh Venkataramanan } else { 11272b245cb2SAnirudh Venkataramanan /* we are not reusing the buffer so unmap it */ 11287237f5b0SMaciej Fijalkowski dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 11297237f5b0SMaciej Fijalkowski ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 11307237f5b0SMaciej Fijalkowski ICE_RX_DMA_ATTR); 113103c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 11322b245cb2SAnirudh Venkataramanan } 11332b245cb2SAnirudh Venkataramanan 11342b245cb2SAnirudh Venkataramanan /* clear contents of buffer_info */ 11352b245cb2SAnirudh Venkataramanan rx_buf->page = NULL; 11362b245cb2SAnirudh Venkataramanan } 11372b245cb2SAnirudh Venkataramanan 11382b245cb2SAnirudh Venkataramanan /** 11392b245cb2SAnirudh Venkataramanan * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 1140d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 11412b245cb2SAnirudh Venkataramanan * @budget: Total limit on number of packets to process 11422b245cb2SAnirudh Venkataramanan * 11432b245cb2SAnirudh Venkataramanan * This function provides a "bounce buffer" approach to Rx interrupt 11442b245cb2SAnirudh Venkataramanan * processing. The advantage to this is that on systems that have 11452b245cb2SAnirudh Venkataramanan * expensive overhead for IOMMU access this provides a means of avoiding 11462b245cb2SAnirudh Venkataramanan * it by maintaining the mapping of the page to the system. 11472b245cb2SAnirudh Venkataramanan * 11482b245cb2SAnirudh Venkataramanan * Returns amount of work completed 11492b245cb2SAnirudh Venkataramanan */ 1150e72bba21SMaciej Fijalkowski int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) 11512b245cb2SAnirudh Venkataramanan { 1152cb0473e0SMaciej Fijalkowski unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 1153f1b1f409SMaciej Fijalkowski unsigned int offset = rx_ring->rx_offset; 1154cb0473e0SMaciej Fijalkowski struct xdp_buff *xdp = &rx_ring->xdp; 1155eb087cd8SMaciej Fijalkowski struct ice_tx_ring *xdp_ring = NULL; 1156efc2214bSMaciej Fijalkowski struct bpf_prog *xdp_prog = NULL; 1157d7956d81SMaciej Fijalkowski u32 ntc = rx_ring->next_to_clean; 1158d7956d81SMaciej Fijalkowski u32 cnt = rx_ring->count; 11591dc1a7e7SMaciej Fijalkowski u32 cached_ntc = ntc; 11601dc1a7e7SMaciej Fijalkowski u32 xdp_xmit = 0; 11613246a107SMaciej Fijalkowski u32 cached_ntu; 1162cb7db356SBrett Creeley bool failure; 11632fba7dc5SMaciej Fijalkowski u32 first; 11642b245cb2SAnirudh Venkataramanan 1165d4ecdbf7SJesper Dangaard Brouer /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1166d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE < 8192) 1167cb0473e0SMaciej Fijalkowski xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0); 1168d4ecdbf7SJesper Dangaard Brouer #endif 1169efc2214bSMaciej Fijalkowski 1170eb087cd8SMaciej Fijalkowski xdp_prog = READ_ONCE(rx_ring->xdp_prog); 11713246a107SMaciej Fijalkowski if (xdp_prog) { 1172eb087cd8SMaciej Fijalkowski xdp_ring = rx_ring->xdp_ring; 11733246a107SMaciej Fijalkowski cached_ntu = xdp_ring->next_to_use; 11743246a107SMaciej Fijalkowski } 1175eb087cd8SMaciej Fijalkowski 1176f9867df6SAnirudh Venkataramanan /* start the loop to process Rx packets bounded by 'budget' */ 11772b245cb2SAnirudh Venkataramanan while (likely(total_rx_pkts < (unsigned int)budget)) { 11782b245cb2SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 11796c869cb7SMaciej Fijalkowski struct ice_rx_buf *rx_buf; 11802fba7dc5SMaciej Fijalkowski struct sk_buff *skb; 11816c869cb7SMaciej Fijalkowski unsigned int size; 11822b245cb2SAnirudh Venkataramanan u16 stat_err_bits; 11832b245cb2SAnirudh Venkataramanan u16 vlan_tag = 0; 1184dda90cb9SJesse Brandeburg u16 rx_ptype; 11852b245cb2SAnirudh Venkataramanan 1186f9867df6SAnirudh Venkataramanan /* get the Rx desc from Rx ring based on 'next_to_clean' */ 1187d7956d81SMaciej Fijalkowski rx_desc = ICE_RX_DESC(rx_ring, ntc); 11882b245cb2SAnirudh Venkataramanan 11892b245cb2SAnirudh Venkataramanan /* status_error_len will always be zero for unused descriptors 11902b245cb2SAnirudh Venkataramanan * because it's cleared in cleanup, and overlaps with hdr_addr 11912b245cb2SAnirudh Venkataramanan * which is always zero because packet split isn't used, if the 11922b245cb2SAnirudh Venkataramanan * hardware wrote DD then it will be non-zero 11932b245cb2SAnirudh Venkataramanan */ 11942b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 11950d54d8f7SBrett Creeley if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) 11962b245cb2SAnirudh Venkataramanan break; 11972b245cb2SAnirudh Venkataramanan 11982b245cb2SAnirudh Venkataramanan /* This memory barrier is needed to keep us from reading 11992b245cb2SAnirudh Venkataramanan * any other fields out of the rx_desc until we know the 12002b245cb2SAnirudh Venkataramanan * DD bit is set. 12012b245cb2SAnirudh Venkataramanan */ 12022b245cb2SAnirudh Venkataramanan dma_rmb(); 12032b245cb2SAnirudh Venkataramanan 12043089cf6dSJesse Brandeburg ice_trace(clean_rx_irq, rx_ring, rx_desc); 1205148beb61SHenry Tieman if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 1206d6218317SQi Zhang struct ice_vsi *ctrl_vsi = rx_ring->vsi; 1207d6218317SQi Zhang 1208d6218317SQi Zhang if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && 1209b03d519dSJacob Keller ctrl_vsi->vf) 1210d6218317SQi Zhang ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); 1211d7956d81SMaciej Fijalkowski if (++ntc == cnt) 1212d7956d81SMaciej Fijalkowski ntc = 0; 1213*387d42aeSPiotr Raczynski rx_ring->first_desc = ntc; 1214148beb61SHenry Tieman continue; 1215148beb61SHenry Tieman } 1216148beb61SHenry Tieman 12176c869cb7SMaciej Fijalkowski size = le16_to_cpu(rx_desc->wb.pkt_len) & 12186c869cb7SMaciej Fijalkowski ICE_RX_FLX_DESC_PKT_LEN_M; 12192b245cb2SAnirudh Venkataramanan 1220ac6f733aSMitch Williams /* retrieve a buffer from the ring */ 1221d7956d81SMaciej Fijalkowski rx_buf = ice_get_rx_buf(rx_ring, size, ntc); 1222ac6f733aSMitch Williams 12232fba7dc5SMaciej Fijalkowski if (!xdp->data) { 12242fba7dc5SMaciej Fijalkowski void *hard_start; 1225efc2214bSMaciej Fijalkowski 1226be9df4afSLorenzo Bianconi hard_start = page_address(rx_buf->page) + rx_buf->page_offset - 1227be9df4afSLorenzo Bianconi offset; 1228cb0473e0SMaciej Fijalkowski xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); 1229d4ecdbf7SJesper Dangaard Brouer #if (PAGE_SIZE > 4096) 1230d4ecdbf7SJesper Dangaard Brouer /* At larger PAGE_SIZE, frame_sz depend on len size */ 1231cb0473e0SMaciej Fijalkowski xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size); 1232d4ecdbf7SJesper Dangaard Brouer #endif 12332fba7dc5SMaciej Fijalkowski xdp_buff_clear_frags_flag(xdp); 12342fba7dc5SMaciej Fijalkowski } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { 12352b245cb2SAnirudh Venkataramanan break; 1236712edbbbSMaciej Fijalkowski } 1237d7956d81SMaciej Fijalkowski if (++ntc == cnt) 1238d7956d81SMaciej Fijalkowski ntc = 0; 12392b245cb2SAnirudh Venkataramanan 12402b245cb2SAnirudh Venkataramanan /* skip if it is NOP desc */ 124129b82f2aSMaciej Fijalkowski if (ice_is_non_eop(rx_ring, rx_desc)) 12422b245cb2SAnirudh Venkataramanan continue; 12432b245cb2SAnirudh Venkataramanan 12442fba7dc5SMaciej Fijalkowski ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf); 12452fba7dc5SMaciej Fijalkowski if (rx_buf->act == ICE_XDP_PASS) 12462fba7dc5SMaciej Fijalkowski goto construct_skb; 12472fba7dc5SMaciej Fijalkowski total_rx_bytes += xdp_get_buff_len(xdp); 12482fba7dc5SMaciej Fijalkowski total_rx_pkts++; 12492fba7dc5SMaciej Fijalkowski 12502fba7dc5SMaciej Fijalkowski xdp->data = NULL; 12512fba7dc5SMaciej Fijalkowski rx_ring->first_desc = ntc; 12522fba7dc5SMaciej Fijalkowski continue; 12532fba7dc5SMaciej Fijalkowski construct_skb: 12542fba7dc5SMaciej Fijalkowski if (likely(ice_ring_uses_build_skb(rx_ring))) 12552fba7dc5SMaciej Fijalkowski skb = ice_build_skb(rx_ring, xdp); 12562fba7dc5SMaciej Fijalkowski else 12572fba7dc5SMaciej Fijalkowski skb = ice_construct_skb(rx_ring, xdp); 12582fba7dc5SMaciej Fijalkowski /* exit if we failed to retrieve a buffer */ 12592fba7dc5SMaciej Fijalkowski if (!skb) { 12602fba7dc5SMaciej Fijalkowski rx_ring->ring_stats->rx_stats.alloc_page_failed++; 12612fba7dc5SMaciej Fijalkowski rx_buf->act = ICE_XDP_CONSUMED; 12622fba7dc5SMaciej Fijalkowski if (unlikely(xdp_buff_has_frags(xdp))) 12632fba7dc5SMaciej Fijalkowski ice_set_rx_bufs_act(xdp, rx_ring, 12642fba7dc5SMaciej Fijalkowski ICE_XDP_CONSUMED); 12652fba7dc5SMaciej Fijalkowski xdp->data = NULL; 12662fba7dc5SMaciej Fijalkowski rx_ring->first_desc = ntc; 12672fba7dc5SMaciej Fijalkowski break; 12682fba7dc5SMaciej Fijalkowski } 12692fba7dc5SMaciej Fijalkowski xdp->data = NULL; 12702fba7dc5SMaciej Fijalkowski rx_ring->first_desc = ntc; 12712fba7dc5SMaciej Fijalkowski 12722b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 12730d54d8f7SBrett Creeley if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, 12740d54d8f7SBrett Creeley stat_err_bits))) { 12752b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 12762b245cb2SAnirudh Venkataramanan continue; 12772b245cb2SAnirudh Venkataramanan } 12782b245cb2SAnirudh Venkataramanan 12790d54d8f7SBrett Creeley vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc); 12802b245cb2SAnirudh Venkataramanan 1281133f4883SKrzysztof Kazimierczak /* pad the skb if needed, to make a valid ethernet frame */ 12822fba7dc5SMaciej Fijalkowski if (eth_skb_pad(skb)) 12832b245cb2SAnirudh Venkataramanan continue; 12842b245cb2SAnirudh Venkataramanan 12852b245cb2SAnirudh Venkataramanan /* probably a little skewed due to removing CRC */ 12862b245cb2SAnirudh Venkataramanan total_rx_bytes += skb->len; 12872b245cb2SAnirudh Venkataramanan 1288d76a60baSAnirudh Venkataramanan /* populate checksum, VLAN, and protocol */ 12896503b659SJesse Brandeburg rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 12906503b659SJesse Brandeburg ICE_RX_FLEX_DESC_PTYPE_M; 12916503b659SJesse Brandeburg 1292d76a60baSAnirudh Venkataramanan ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1293d76a60baSAnirudh Venkataramanan 12943089cf6dSJesse Brandeburg ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); 12952b245cb2SAnirudh Venkataramanan /* send completed skb up the stack */ 12962b245cb2SAnirudh Venkataramanan ice_receive_skb(rx_ring, skb, vlan_tag); 12972b245cb2SAnirudh Venkataramanan 12982b245cb2SAnirudh Venkataramanan /* update budget accounting */ 12992b245cb2SAnirudh Venkataramanan total_rx_pkts++; 13002b245cb2SAnirudh Venkataramanan } 13012b245cb2SAnirudh Venkataramanan 13022fba7dc5SMaciej Fijalkowski first = rx_ring->first_desc; 13032fba7dc5SMaciej Fijalkowski while (cached_ntc != first) { 13041dc1a7e7SMaciej Fijalkowski struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc]; 13051dc1a7e7SMaciej Fijalkowski 13061dc1a7e7SMaciej Fijalkowski if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) { 13071dc1a7e7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); 13081dc1a7e7SMaciej Fijalkowski xdp_xmit |= buf->act; 13091dc1a7e7SMaciej Fijalkowski } else if (buf->act & ICE_XDP_CONSUMED) { 13101dc1a7e7SMaciej Fijalkowski buf->pagecnt_bias++; 13111dc1a7e7SMaciej Fijalkowski } else if (buf->act == ICE_XDP_PASS) { 13121dc1a7e7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); 13131dc1a7e7SMaciej Fijalkowski } 13141dc1a7e7SMaciej Fijalkowski 13151dc1a7e7SMaciej Fijalkowski ice_put_rx_buf(rx_ring, buf); 13161dc1a7e7SMaciej Fijalkowski if (++cached_ntc >= cnt) 13171dc1a7e7SMaciej Fijalkowski cached_ntc = 0; 13181dc1a7e7SMaciej Fijalkowski } 1319d7956d81SMaciej Fijalkowski rx_ring->next_to_clean = ntc; 1320cb7db356SBrett Creeley /* return up to cleaned_count buffers to hardware */ 13212fba7dc5SMaciej Fijalkowski failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); 1322cb7db356SBrett Creeley 13239070fe3dSMaciej Fijalkowski if (xdp_xmit) 13243246a107SMaciej Fijalkowski ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu); 1325efc2214bSMaciej Fijalkowski 1326288ecf49SBenjamin Mikailenko if (rx_ring->ring_stats) 1327288ecf49SBenjamin Mikailenko ice_update_rx_ring_stats(rx_ring, total_rx_pkts, 1328288ecf49SBenjamin Mikailenko total_rx_bytes); 13292b245cb2SAnirudh Venkataramanan 13302b245cb2SAnirudh Venkataramanan /* guarantee a trip back through this routine if there was a failure */ 13312b245cb2SAnirudh Venkataramanan return failure ? budget : (int)total_rx_pkts; 13322b245cb2SAnirudh Venkataramanan } 13332b245cb2SAnirudh Venkataramanan 1334d8eb7ad5SJesse Brandeburg static void __ice_update_sample(struct ice_q_vector *q_vector, 1335d8eb7ad5SJesse Brandeburg struct ice_ring_container *rc, 1336d8eb7ad5SJesse Brandeburg struct dim_sample *sample, 1337d8eb7ad5SJesse Brandeburg bool is_tx) 1338d8eb7ad5SJesse Brandeburg { 1339d8eb7ad5SJesse Brandeburg u64 packets = 0, bytes = 0; 1340d8eb7ad5SJesse Brandeburg 1341d8eb7ad5SJesse Brandeburg if (is_tx) { 1342d8eb7ad5SJesse Brandeburg struct ice_tx_ring *tx_ring; 1343d8eb7ad5SJesse Brandeburg 1344d8eb7ad5SJesse Brandeburg ice_for_each_tx_ring(tx_ring, *rc) { 1345288ecf49SBenjamin Mikailenko struct ice_ring_stats *ring_stats; 1346288ecf49SBenjamin Mikailenko 1347288ecf49SBenjamin Mikailenko ring_stats = tx_ring->ring_stats; 1348288ecf49SBenjamin Mikailenko if (!ring_stats) 1349288ecf49SBenjamin Mikailenko continue; 1350288ecf49SBenjamin Mikailenko packets += ring_stats->stats.pkts; 1351288ecf49SBenjamin Mikailenko bytes += ring_stats->stats.bytes; 1352d8eb7ad5SJesse Brandeburg } 1353d8eb7ad5SJesse Brandeburg } else { 1354d8eb7ad5SJesse Brandeburg struct ice_rx_ring *rx_ring; 1355d8eb7ad5SJesse Brandeburg 1356d8eb7ad5SJesse Brandeburg ice_for_each_rx_ring(rx_ring, *rc) { 1357288ecf49SBenjamin Mikailenko struct ice_ring_stats *ring_stats; 1358288ecf49SBenjamin Mikailenko 1359288ecf49SBenjamin Mikailenko ring_stats = rx_ring->ring_stats; 1360288ecf49SBenjamin Mikailenko if (!ring_stats) 1361288ecf49SBenjamin Mikailenko continue; 1362288ecf49SBenjamin Mikailenko packets += ring_stats->stats.pkts; 1363288ecf49SBenjamin Mikailenko bytes += ring_stats->stats.bytes; 1364d8eb7ad5SJesse Brandeburg } 1365d8eb7ad5SJesse Brandeburg } 1366d8eb7ad5SJesse Brandeburg 1367d8eb7ad5SJesse Brandeburg dim_update_sample(q_vector->total_events, packets, bytes, sample); 1368d8eb7ad5SJesse Brandeburg sample->comp_ctr = 0; 1369d8eb7ad5SJesse Brandeburg 1370d8eb7ad5SJesse Brandeburg /* if dim settings get stale, like when not updated for 1 1371d8eb7ad5SJesse Brandeburg * second or longer, force it to start again. This addresses the 1372d8eb7ad5SJesse Brandeburg * frequent case of an idle queue being switched to by the 1373d8eb7ad5SJesse Brandeburg * scheduler. The 1,000 here means 1,000 milliseconds. 1374d8eb7ad5SJesse Brandeburg */ 1375d8eb7ad5SJesse Brandeburg if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) 1376d8eb7ad5SJesse Brandeburg rc->dim.state = DIM_START_MEASURE; 1377d8eb7ad5SJesse Brandeburg } 1378d8eb7ad5SJesse Brandeburg 13792b245cb2SAnirudh Venkataramanan /** 1380cdf1f1f1SJacob Keller * ice_net_dim - Update net DIM algorithm 1381cdf1f1f1SJacob Keller * @q_vector: the vector associated with the interrupt 1382711987bbSBrett Creeley * 1383cdf1f1f1SJacob Keller * Create a DIM sample and notify net_dim() so that it can possibly decide 1384cdf1f1f1SJacob Keller * a new ITR value based on incoming packets, bytes, and interrupts. 1385711987bbSBrett Creeley * 1386cdf1f1f1SJacob Keller * This function is a no-op if the ring is not configured to dynamic ITR. 1387711987bbSBrett Creeley */ 1388cdf1f1f1SJacob Keller static void ice_net_dim(struct ice_q_vector *q_vector) 138964a59d05SAnirudh Venkataramanan { 1390cdf1f1f1SJacob Keller struct ice_ring_container *tx = &q_vector->tx; 1391cdf1f1f1SJacob Keller struct ice_ring_container *rx = &q_vector->rx; 1392cdf1f1f1SJacob Keller 1393d59684a0SJesse Brandeburg if (ITR_IS_DYNAMIC(tx)) { 1394d8eb7ad5SJesse Brandeburg struct dim_sample dim_sample; 1395cdf1f1f1SJacob Keller 1396d8eb7ad5SJesse Brandeburg __ice_update_sample(q_vector, tx, &dim_sample, true); 1397cdf1f1f1SJacob Keller net_dim(&tx->dim, dim_sample); 1398711987bbSBrett Creeley } 1399711987bbSBrett Creeley 1400d59684a0SJesse Brandeburg if (ITR_IS_DYNAMIC(rx)) { 1401d8eb7ad5SJesse Brandeburg struct dim_sample dim_sample; 1402cdf1f1f1SJacob Keller 1403d8eb7ad5SJesse Brandeburg __ice_update_sample(q_vector, rx, &dim_sample, false); 1404cdf1f1f1SJacob Keller net_dim(&rx->dim, dim_sample); 140564a59d05SAnirudh Venkataramanan } 140664a59d05SAnirudh Venkataramanan } 140764a59d05SAnirudh Venkataramanan 14082b245cb2SAnirudh Venkataramanan /** 140963f545edSBrett Creeley * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 141063f545edSBrett Creeley * @itr_idx: interrupt throttling index 141164a59d05SAnirudh Venkataramanan * @itr: interrupt throttling value in usecs 141263f545edSBrett Creeley */ 14138244dd2dSBrett Creeley static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 141463f545edSBrett Creeley { 14152f2da36eSAnirudh Venkataramanan /* The ITR value is reported in microseconds, and the register value is 141664a59d05SAnirudh Venkataramanan * recorded in 2 microsecond units. For this reason we only need to 141764a59d05SAnirudh Venkataramanan * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 141864a59d05SAnirudh Venkataramanan * granularity as a shift instead of division. The mask makes sure the 141964a59d05SAnirudh Venkataramanan * ITR value is never odd so we don't accidentally write into the field 142064a59d05SAnirudh Venkataramanan * prior to the ITR field. 142164a59d05SAnirudh Venkataramanan */ 142264a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 142364a59d05SAnirudh Venkataramanan 142463f545edSBrett Creeley return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 142563f545edSBrett Creeley (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 142664a59d05SAnirudh Venkataramanan (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 142763f545edSBrett Creeley } 142863f545edSBrett Creeley 142963f545edSBrett Creeley /** 1430d8eb7ad5SJesse Brandeburg * ice_enable_interrupt - re-enable MSI-X interrupt 1431cdf1f1f1SJacob Keller * @q_vector: the vector associated with the interrupt to enable 1432cdf1f1f1SJacob Keller * 1433d8eb7ad5SJesse Brandeburg * If the VSI is down, the interrupt will not be re-enabled. Also, 1434d8eb7ad5SJesse Brandeburg * when enabling the interrupt always reset the wb_on_itr to false 1435d8eb7ad5SJesse Brandeburg * and trigger a software interrupt to clean out internal state. 143663f545edSBrett Creeley */ 1437d8eb7ad5SJesse Brandeburg static void ice_enable_interrupt(struct ice_q_vector *q_vector) 143863f545edSBrett Creeley { 14392fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 1440b7306b42SJesse Brandeburg bool wb_en = q_vector->wb_on_itr; 144163f545edSBrett Creeley u32 itr_val; 144263f545edSBrett Creeley 1443cdf1f1f1SJacob Keller if (test_bit(ICE_DOWN, vsi->state)) 1444cdf1f1f1SJacob Keller return; 14452ab28bb0SBrett Creeley 144623be7075SJesse Brandeburg /* trigger an ITR delayed software interrupt when exiting busy poll, to 144723be7075SJesse Brandeburg * make sure to catch any pending cleanups that might have been missed 144823be7075SJesse Brandeburg * due to interrupt state transition. If busy poll or poll isn't 144923be7075SJesse Brandeburg * enabled, then don't update ITR, and just enable the interrupt. 1450cdf1f1f1SJacob Keller */ 145123be7075SJesse Brandeburg if (!wb_en) { 145223be7075SJesse Brandeburg itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 145323be7075SJesse Brandeburg } else { 1454cdf1f1f1SJacob Keller q_vector->wb_on_itr = false; 145564a59d05SAnirudh Venkataramanan 145623be7075SJesse Brandeburg /* do two things here with a single write. Set up the third ITR 145723be7075SJesse Brandeburg * index to be used for software interrupt moderation, and then 145823be7075SJesse Brandeburg * trigger a software interrupt with a rate limit of 20K on 145923be7075SJesse Brandeburg * software interrupts, this will help avoid high interrupt 146023be7075SJesse Brandeburg * loads due to frequently polling and exiting polling. 1461b7306b42SJesse Brandeburg */ 146223be7075SJesse Brandeburg itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K); 1463b7306b42SJesse Brandeburg itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M | 146423be7075SJesse Brandeburg ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S | 1465b7306b42SJesse Brandeburg GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; 1466b7306b42SJesse Brandeburg } 14671d9f7ca3SJesse Brandeburg wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 146863f545edSBrett Creeley } 146963f545edSBrett Creeley 147063f545edSBrett Creeley /** 14712ab28bb0SBrett Creeley * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 14722ab28bb0SBrett Creeley * @q_vector: q_vector to set WB_ON_ITR on 14732ab28bb0SBrett Creeley * 14742ab28bb0SBrett Creeley * We need to tell hardware to write-back completed descriptors even when 14752ab28bb0SBrett Creeley * interrupts are disabled. Descriptors will be written back on cache line 14762ab28bb0SBrett Creeley * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 14771d9f7ca3SJesse Brandeburg * descriptors may not be written back if they don't fill a cache line until 14781d9f7ca3SJesse Brandeburg * the next interrupt. 14792ab28bb0SBrett Creeley * 14801d9f7ca3SJesse Brandeburg * This sets the write-back frequency to whatever was set previously for the 14811d9f7ca3SJesse Brandeburg * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we 14821d9f7ca3SJesse Brandeburg * aren't meddling with the INTENA_M bit. 14832ab28bb0SBrett Creeley */ 14842fb0821fSJesse Brandeburg static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 14852ab28bb0SBrett Creeley { 14862fb0821fSJesse Brandeburg struct ice_vsi *vsi = q_vector->vsi; 14872fb0821fSJesse Brandeburg 14881d9f7ca3SJesse Brandeburg /* already in wb_on_itr mode no need to change it */ 1489cdf1f1f1SJacob Keller if (q_vector->wb_on_itr) 14902ab28bb0SBrett Creeley return; 14912ab28bb0SBrett Creeley 14921d9f7ca3SJesse Brandeburg /* use previously set ITR values for all of the ITR indices by 14931d9f7ca3SJesse Brandeburg * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and 14941d9f7ca3SJesse Brandeburg * be static in non-adaptive mode (user configured) 14951d9f7ca3SJesse Brandeburg */ 14962ab28bb0SBrett Creeley wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 14971d9f7ca3SJesse Brandeburg ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) & 14981d9f7ca3SJesse Brandeburg GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | 14991d9f7ca3SJesse Brandeburg GLINT_DYN_CTL_WB_ON_ITR_M); 15002ab28bb0SBrett Creeley 1501cdf1f1f1SJacob Keller q_vector->wb_on_itr = true; 15022ab28bb0SBrett Creeley } 15032ab28bb0SBrett Creeley 15042ab28bb0SBrett Creeley /** 15052b245cb2SAnirudh Venkataramanan * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 15062b245cb2SAnirudh Venkataramanan * @napi: napi struct with our devices info in it 15072b245cb2SAnirudh Venkataramanan * @budget: amount of work driver is allowed to do this pass, in packets 15082b245cb2SAnirudh Venkataramanan * 15092b245cb2SAnirudh Venkataramanan * This function will clean all queues associated with a q_vector. 15102b245cb2SAnirudh Venkataramanan * 15112b245cb2SAnirudh Venkataramanan * Returns the amount of work done 15122b245cb2SAnirudh Venkataramanan */ 15132b245cb2SAnirudh Venkataramanan int ice_napi_poll(struct napi_struct *napi, int budget) 15142b245cb2SAnirudh Venkataramanan { 15152b245cb2SAnirudh Venkataramanan struct ice_q_vector *q_vector = 15162b245cb2SAnirudh Venkataramanan container_of(napi, struct ice_q_vector, napi); 1517e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring; 1518e72bba21SMaciej Fijalkowski struct ice_rx_ring *rx_ring; 15192b245cb2SAnirudh Venkataramanan bool clean_complete = true; 15209118fcd5SBrett Creeley int budget_per_ring; 15212b245cb2SAnirudh Venkataramanan int work_done = 0; 15222b245cb2SAnirudh Venkataramanan 15232b245cb2SAnirudh Venkataramanan /* Since the actual Tx work is minimal, we can give the Tx a larger 15242b245cb2SAnirudh Venkataramanan * budget and be more aggressive about cleaning up the Tx descriptors. 15252b245cb2SAnirudh Venkataramanan */ 1526e72bba21SMaciej Fijalkowski ice_for_each_tx_ring(tx_ring, q_vector->tx) { 15279610bd98SMaciej Fijalkowski bool wd; 15289610bd98SMaciej Fijalkowski 15299610bd98SMaciej Fijalkowski if (tx_ring->xsk_pool) 153029322791SMaciej Fijalkowski wd = ice_xmit_zc(tx_ring); 15319610bd98SMaciej Fijalkowski else if (ice_ring_is_xdp(tx_ring)) 15329610bd98SMaciej Fijalkowski wd = true; 15339610bd98SMaciej Fijalkowski else 15349610bd98SMaciej Fijalkowski wd = ice_clean_tx_irq(tx_ring, budget); 15352d4238f5SKrzysztof Kazimierczak 15362d4238f5SKrzysztof Kazimierczak if (!wd) 15372b245cb2SAnirudh Venkataramanan clean_complete = false; 15382d4238f5SKrzysztof Kazimierczak } 15392b245cb2SAnirudh Venkataramanan 15402b245cb2SAnirudh Venkataramanan /* Handle case where we are called by netpoll with a budget of 0 */ 1541d27525ecSJesse Brandeburg if (unlikely(budget <= 0)) 15422b245cb2SAnirudh Venkataramanan return budget; 15432b245cb2SAnirudh Venkataramanan 15449118fcd5SBrett Creeley /* normally we have 1 Rx ring per q_vector */ 15459118fcd5SBrett Creeley if (unlikely(q_vector->num_ring_rx > 1)) 15469118fcd5SBrett Creeley /* We attempt to distribute budget to each Rx queue fairly, but 15479118fcd5SBrett Creeley * don't allow the budget to go below 1 because that would exit 15489118fcd5SBrett Creeley * polling early. 15492b245cb2SAnirudh Venkataramanan */ 155088865fc4SKarol Kolacinski budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 15519118fcd5SBrett Creeley else 15529118fcd5SBrett Creeley /* Max of 1 Rx ring in this q_vector so give it the budget */ 15539118fcd5SBrett Creeley budget_per_ring = budget; 15542b245cb2SAnirudh Venkataramanan 1555e72bba21SMaciej Fijalkowski ice_for_each_rx_ring(rx_ring, q_vector->rx) { 15562b245cb2SAnirudh Venkataramanan int cleaned; 15572b245cb2SAnirudh Venkataramanan 15582d4238f5SKrzysztof Kazimierczak /* A dedicated path for zero-copy allows making a single 15592d4238f5SKrzysztof Kazimierczak * comparison in the irq context instead of many inside the 15602d4238f5SKrzysztof Kazimierczak * ice_clean_rx_irq function and makes the codebase cleaner. 15612d4238f5SKrzysztof Kazimierczak */ 1562e72bba21SMaciej Fijalkowski cleaned = rx_ring->xsk_pool ? 1563e72bba21SMaciej Fijalkowski ice_clean_rx_irq_zc(rx_ring, budget_per_ring) : 1564e72bba21SMaciej Fijalkowski ice_clean_rx_irq(rx_ring, budget_per_ring); 15652b245cb2SAnirudh Venkataramanan work_done += cleaned; 15662b245cb2SAnirudh Venkataramanan /* if we clean as many as budgeted, we must not be done */ 15672b245cb2SAnirudh Venkataramanan if (cleaned >= budget_per_ring) 15682b245cb2SAnirudh Venkataramanan clean_complete = false; 15692b245cb2SAnirudh Venkataramanan } 15702b245cb2SAnirudh Venkataramanan 15712b245cb2SAnirudh Venkataramanan /* If work not completed, return budget and polling will return */ 15721d9f7ca3SJesse Brandeburg if (!clean_complete) { 15731d9f7ca3SJesse Brandeburg /* Set the writeback on ITR so partial completions of 15741d9f7ca3SJesse Brandeburg * cache-lines will still continue even if we're polling. 15751d9f7ca3SJesse Brandeburg */ 15761d9f7ca3SJesse Brandeburg ice_set_wb_on_itr(q_vector); 15772b245cb2SAnirudh Venkataramanan return budget; 15781d9f7ca3SJesse Brandeburg } 15792b245cb2SAnirudh Venkataramanan 15800bcd952fSJesse Brandeburg /* Exit the polling mode, but don't re-enable interrupts if stack might 15810bcd952fSJesse Brandeburg * poll us due to busy-polling 15820bcd952fSJesse Brandeburg */ 1583a4e18669SMaciej Fijalkowski if (napi_complete_done(napi, work_done)) { 1584d8eb7ad5SJesse Brandeburg ice_net_dim(q_vector); 1585d8eb7ad5SJesse Brandeburg ice_enable_interrupt(q_vector); 1586d8eb7ad5SJesse Brandeburg } else { 15872fb0821fSJesse Brandeburg ice_set_wb_on_itr(q_vector); 1588d8eb7ad5SJesse Brandeburg } 1589e0c9fd9bSDave Ertman 159032a64994SBruce Allan return min_t(int, work_done, budget - 1); 15912b245cb2SAnirudh Venkataramanan } 15922b245cb2SAnirudh Venkataramanan 15932b245cb2SAnirudh Venkataramanan /** 1594d337f2afSAnirudh Venkataramanan * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 15952b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 15962b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 15972b245cb2SAnirudh Venkataramanan * 15982b245cb2SAnirudh Venkataramanan * Returns -EBUSY if a stop is needed, else 0 15992b245cb2SAnirudh Venkataramanan */ 1600e72bba21SMaciej Fijalkowski static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 16012b245cb2SAnirudh Venkataramanan { 16021c96c168SJesse Brandeburg netif_tx_stop_queue(txring_txq(tx_ring)); 16032b245cb2SAnirudh Venkataramanan /* Memory barrier before checking head and tail */ 16042b245cb2SAnirudh Venkataramanan smp_mb(); 16052b245cb2SAnirudh Venkataramanan 16062b245cb2SAnirudh Venkataramanan /* Check again in a case another CPU has just made room available. */ 16072b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 16082b245cb2SAnirudh Venkataramanan return -EBUSY; 16092b245cb2SAnirudh Venkataramanan 16101c96c168SJesse Brandeburg /* A reprieve! - use start_queue because it doesn't call schedule */ 16111c96c168SJesse Brandeburg netif_tx_start_queue(txring_txq(tx_ring)); 1612288ecf49SBenjamin Mikailenko ++tx_ring->ring_stats->tx_stats.restart_q; 16132b245cb2SAnirudh Venkataramanan return 0; 16142b245cb2SAnirudh Venkataramanan } 16152b245cb2SAnirudh Venkataramanan 16162b245cb2SAnirudh Venkataramanan /** 1617d337f2afSAnirudh Venkataramanan * ice_maybe_stop_tx - 1st level check for Tx stop conditions 16182b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 16192b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 16202b245cb2SAnirudh Venkataramanan * 16212b245cb2SAnirudh Venkataramanan * Returns 0 if stop is not needed 16222b245cb2SAnirudh Venkataramanan */ 1623e72bba21SMaciej Fijalkowski static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 16242b245cb2SAnirudh Venkataramanan { 16252b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 16262b245cb2SAnirudh Venkataramanan return 0; 1627d337f2afSAnirudh Venkataramanan 16282b245cb2SAnirudh Venkataramanan return __ice_maybe_stop_tx(tx_ring, size); 16292b245cb2SAnirudh Venkataramanan } 16302b245cb2SAnirudh Venkataramanan 16312b245cb2SAnirudh Venkataramanan /** 16322b245cb2SAnirudh Venkataramanan * ice_tx_map - Build the Tx descriptor 16332b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 16342b245cb2SAnirudh Venkataramanan * @first: first buffer info buffer to use 1635d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 16362b245cb2SAnirudh Venkataramanan * 16372b245cb2SAnirudh Venkataramanan * This function loops over the skb data pointed to by *first 16382b245cb2SAnirudh Venkataramanan * and gets a physical address for each memory location and programs 16392b245cb2SAnirudh Venkataramanan * it and the length into the transmit descriptor. 16402b245cb2SAnirudh Venkataramanan */ 1641d76a60baSAnirudh Venkataramanan static void 1642e72bba21SMaciej Fijalkowski ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, 1643d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params *off) 16442b245cb2SAnirudh Venkataramanan { 1645d76a60baSAnirudh Venkataramanan u64 td_offset, td_tag, td_cmd; 16462b245cb2SAnirudh Venkataramanan u16 i = tx_ring->next_to_use; 16472b245cb2SAnirudh Venkataramanan unsigned int data_len, size; 16482b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 16492b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 16502b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 16514ee656bbSTony Nguyen skb_frag_t *frag; 16522b245cb2SAnirudh Venkataramanan dma_addr_t dma; 16539c99d099SJesse Brandeburg bool kick; 16542b245cb2SAnirudh Venkataramanan 1655d76a60baSAnirudh Venkataramanan td_tag = off->td_l2tag1; 1656d76a60baSAnirudh Venkataramanan td_cmd = off->td_cmd; 1657d76a60baSAnirudh Venkataramanan td_offset = off->td_offset; 16582b245cb2SAnirudh Venkataramanan skb = first->skb; 16592b245cb2SAnirudh Venkataramanan 16602b245cb2SAnirudh Venkataramanan data_len = skb->data_len; 16612b245cb2SAnirudh Venkataramanan size = skb_headlen(skb); 16622b245cb2SAnirudh Venkataramanan 16632b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 16642b245cb2SAnirudh Venkataramanan 1665d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1666d76a60baSAnirudh Venkataramanan td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1667d76a60baSAnirudh Venkataramanan td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1668d76a60baSAnirudh Venkataramanan ICE_TX_FLAGS_VLAN_S; 1669d76a60baSAnirudh Venkataramanan } 1670d76a60baSAnirudh Venkataramanan 16712b245cb2SAnirudh Venkataramanan dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 16722b245cb2SAnirudh Venkataramanan 16732b245cb2SAnirudh Venkataramanan tx_buf = first; 16742b245cb2SAnirudh Venkataramanan 16752b245cb2SAnirudh Venkataramanan for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 16762b245cb2SAnirudh Venkataramanan unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 16772b245cb2SAnirudh Venkataramanan 16782b245cb2SAnirudh Venkataramanan if (dma_mapping_error(tx_ring->dev, dma)) 16792b245cb2SAnirudh Venkataramanan goto dma_error; 16802b245cb2SAnirudh Venkataramanan 16812b245cb2SAnirudh Venkataramanan /* record length, and DMA address */ 16822b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, size); 16832b245cb2SAnirudh Venkataramanan dma_unmap_addr_set(tx_buf, dma, dma); 16842b245cb2SAnirudh Venkataramanan 16852b245cb2SAnirudh Venkataramanan /* align size to end of page */ 16862b245cb2SAnirudh Venkataramanan max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 16872b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 16882b245cb2SAnirudh Venkataramanan 16892b245cb2SAnirudh Venkataramanan /* account for data chunks larger than the hardware 16902b245cb2SAnirudh Venkataramanan * can handle 16912b245cb2SAnirudh Venkataramanan */ 16922b245cb2SAnirudh Venkataramanan while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 16932b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = 16945757cc7cSTony Nguyen ice_build_ctob(td_cmd, td_offset, max_data, 16955757cc7cSTony Nguyen td_tag); 16962b245cb2SAnirudh Venkataramanan 16972b245cb2SAnirudh Venkataramanan tx_desc++; 16982b245cb2SAnirudh Venkataramanan i++; 16992b245cb2SAnirudh Venkataramanan 17002b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 17012b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 17022b245cb2SAnirudh Venkataramanan i = 0; 17032b245cb2SAnirudh Venkataramanan } 17042b245cb2SAnirudh Venkataramanan 17052b245cb2SAnirudh Venkataramanan dma += max_data; 17062b245cb2SAnirudh Venkataramanan size -= max_data; 17072b245cb2SAnirudh Venkataramanan 17082b245cb2SAnirudh Venkataramanan max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 17092b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 17102b245cb2SAnirudh Venkataramanan } 17112b245cb2SAnirudh Venkataramanan 17122b245cb2SAnirudh Venkataramanan if (likely(!data_len)) 17132b245cb2SAnirudh Venkataramanan break; 17142b245cb2SAnirudh Venkataramanan 17155757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 17162b245cb2SAnirudh Venkataramanan size, td_tag); 17172b245cb2SAnirudh Venkataramanan 17182b245cb2SAnirudh Venkataramanan tx_desc++; 17192b245cb2SAnirudh Venkataramanan i++; 17202b245cb2SAnirudh Venkataramanan 17212b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 17222b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 17232b245cb2SAnirudh Venkataramanan i = 0; 17242b245cb2SAnirudh Venkataramanan } 17252b245cb2SAnirudh Venkataramanan 17262b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag); 17272b245cb2SAnirudh Venkataramanan data_len -= size; 17282b245cb2SAnirudh Venkataramanan 17292b245cb2SAnirudh Venkataramanan dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 17302b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 17312b245cb2SAnirudh Venkataramanan 17322b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 1733aa1d3fafSAlexander Lobakin tx_buf->type = ICE_TX_BUF_FRAG; 17342b245cb2SAnirudh Venkataramanan } 17352b245cb2SAnirudh Venkataramanan 17362b245cb2SAnirudh Venkataramanan /* record SW timestamp if HW timestamp is not available */ 17372b245cb2SAnirudh Venkataramanan skb_tx_timestamp(first->skb); 17382b245cb2SAnirudh Venkataramanan 17392b245cb2SAnirudh Venkataramanan i++; 17402b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) 17412b245cb2SAnirudh Venkataramanan i = 0; 17422b245cb2SAnirudh Venkataramanan 17432b245cb2SAnirudh Venkataramanan /* write last descriptor with RS and EOP bits */ 1744efc2214bSMaciej Fijalkowski td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 17455757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = 17465757cc7cSTony Nguyen ice_build_ctob(td_cmd, td_offset, size, td_tag); 17472b245cb2SAnirudh Venkataramanan 17482b245cb2SAnirudh Venkataramanan /* Force memory writes to complete before letting h/w know there 17492b245cb2SAnirudh Venkataramanan * are new descriptors to fetch. 17502b245cb2SAnirudh Venkataramanan * 17512b245cb2SAnirudh Venkataramanan * We also use this memory barrier to make certain all of the 17522b245cb2SAnirudh Venkataramanan * status bits have been updated before next_to_watch is written. 17532b245cb2SAnirudh Venkataramanan */ 17542b245cb2SAnirudh Venkataramanan wmb(); 17552b245cb2SAnirudh Venkataramanan 17562b245cb2SAnirudh Venkataramanan /* set next_to_watch value indicating a packet is present */ 17572b245cb2SAnirudh Venkataramanan first->next_to_watch = tx_desc; 17582b245cb2SAnirudh Venkataramanan 17592b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 17602b245cb2SAnirudh Venkataramanan 17612b245cb2SAnirudh Venkataramanan ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 17622b245cb2SAnirudh Venkataramanan 17632b245cb2SAnirudh Venkataramanan /* notify HW of packet */ 17649c99d099SJesse Brandeburg kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, 17659c99d099SJesse Brandeburg netdev_xmit_more()); 17669c99d099SJesse Brandeburg if (kick) 17679c99d099SJesse Brandeburg /* notify HW of packet */ 17682b245cb2SAnirudh Venkataramanan writel(i, tx_ring->tail); 17692b245cb2SAnirudh Venkataramanan 17702b245cb2SAnirudh Venkataramanan return; 17712b245cb2SAnirudh Venkataramanan 17722b245cb2SAnirudh Venkataramanan dma_error: 17732f2da36eSAnirudh Venkataramanan /* clear DMA mappings for failed tx_buf map */ 17742b245cb2SAnirudh Venkataramanan for (;;) { 17752b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 17762b245cb2SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 17772b245cb2SAnirudh Venkataramanan if (tx_buf == first) 17782b245cb2SAnirudh Venkataramanan break; 17792b245cb2SAnirudh Venkataramanan if (i == 0) 17802b245cb2SAnirudh Venkataramanan i = tx_ring->count; 17812b245cb2SAnirudh Venkataramanan i--; 17822b245cb2SAnirudh Venkataramanan } 17832b245cb2SAnirudh Venkataramanan 17842b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 17852b245cb2SAnirudh Venkataramanan } 17862b245cb2SAnirudh Venkataramanan 17872b245cb2SAnirudh Venkataramanan /** 1788d76a60baSAnirudh Venkataramanan * ice_tx_csum - Enable Tx checksum offloads 1789d76a60baSAnirudh Venkataramanan * @first: pointer to the first descriptor 1790d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1791d76a60baSAnirudh Venkataramanan * 1792d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1793d76a60baSAnirudh Venkataramanan */ 1794d76a60baSAnirudh Venkataramanan static 1795d76a60baSAnirudh Venkataramanan int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1796d76a60baSAnirudh Venkataramanan { 1797d76a60baSAnirudh Venkataramanan u32 l4_len = 0, l3_len = 0, l2_len = 0; 1798d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1799d76a60baSAnirudh Venkataramanan union { 1800d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1801d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1802d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1803d76a60baSAnirudh Venkataramanan } ip; 1804d76a60baSAnirudh Venkataramanan union { 1805d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1806d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1807d76a60baSAnirudh Venkataramanan } l4; 1808d76a60baSAnirudh Venkataramanan __be16 frag_off, protocol; 1809d76a60baSAnirudh Venkataramanan unsigned char *exthdr; 1810d76a60baSAnirudh Venkataramanan u32 offset, cmd = 0; 1811d76a60baSAnirudh Venkataramanan u8 l4_proto = 0; 1812d76a60baSAnirudh Venkataramanan 1813d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1814d76a60baSAnirudh Venkataramanan return 0; 1815d76a60baSAnirudh Venkataramanan 181669e66c04SJoe Damato protocol = vlan_get_protocol(skb); 181769e66c04SJoe Damato 181801658aeeSPrzemyslaw Patynowski if (eth_p_mpls(protocol)) { 181969e66c04SJoe Damato ip.hdr = skb_inner_network_header(skb); 182069e66c04SJoe Damato l4.hdr = skb_checksum_start(skb); 182101658aeeSPrzemyslaw Patynowski } else { 182201658aeeSPrzemyslaw Patynowski ip.hdr = skb_network_header(skb); 182301658aeeSPrzemyslaw Patynowski l4.hdr = skb_transport_header(skb); 182401658aeeSPrzemyslaw Patynowski } 1825d76a60baSAnirudh Venkataramanan 1826d76a60baSAnirudh Venkataramanan /* compute outer L2 header size */ 1827d76a60baSAnirudh Venkataramanan l2_len = ip.hdr - skb->data; 1828d76a60baSAnirudh Venkataramanan offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1829d76a60baSAnirudh Venkataramanan 183069e66c04SJoe Damato /* set the tx_flags to indicate the IP protocol type. this is 183169e66c04SJoe Damato * required so that checksum header computation below is accurate. 183269e66c04SJoe Damato */ 183369e66c04SJoe Damato if (ip.v4->version == 4) 1834a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV4; 183569e66c04SJoe Damato else if (ip.v6->version == 6) 1836a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV6; 1837a4e82a81STony Nguyen 1838a4e82a81STony Nguyen if (skb->encapsulation) { 1839a4e82a81STony Nguyen bool gso_ena = false; 1840a4e82a81STony Nguyen u32 tunnel = 0; 1841a4e82a81STony Nguyen 1842a4e82a81STony Nguyen /* define outer network header type */ 1843a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1844a4e82a81STony Nguyen tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 1845a4e82a81STony Nguyen ICE_TX_CTX_EIPT_IPV4 : 1846a4e82a81STony Nguyen ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 1847a4e82a81STony Nguyen l4_proto = ip.v4->protocol; 1848a4e82a81STony Nguyen } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 18491b0b0b58SNick Nunley int ret; 18501b0b0b58SNick Nunley 1851a4e82a81STony Nguyen tunnel |= ICE_TX_CTX_EIPT_IPV6; 1852a4e82a81STony Nguyen exthdr = ip.hdr + sizeof(*ip.v6); 1853a4e82a81STony Nguyen l4_proto = ip.v6->nexthdr; 18541b0b0b58SNick Nunley ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 1855a4e82a81STony Nguyen &l4_proto, &frag_off); 18561b0b0b58SNick Nunley if (ret < 0) 18571b0b0b58SNick Nunley return -1; 1858a4e82a81STony Nguyen } 1859a4e82a81STony Nguyen 1860a4e82a81STony Nguyen /* define outer transport */ 1861a4e82a81STony Nguyen switch (l4_proto) { 1862a4e82a81STony Nguyen case IPPROTO_UDP: 1863a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 1864a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1865a4e82a81STony Nguyen break; 1866a4e82a81STony Nguyen case IPPROTO_GRE: 1867a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 1868a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1869a4e82a81STony Nguyen break; 1870a4e82a81STony Nguyen case IPPROTO_IPIP: 1871a4e82a81STony Nguyen case IPPROTO_IPV6: 1872a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1873a4e82a81STony Nguyen l4.hdr = skb_inner_network_header(skb); 1874a4e82a81STony Nguyen break; 1875a4e82a81STony Nguyen default: 1876a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_TSO) 1877d76a60baSAnirudh Venkataramanan return -1; 1878d76a60baSAnirudh Venkataramanan 1879a4e82a81STony Nguyen skb_checksum_help(skb); 1880a4e82a81STony Nguyen return 0; 1881a4e82a81STony Nguyen } 1882a4e82a81STony Nguyen 1883a4e82a81STony Nguyen /* compute outer L3 header size */ 1884a4e82a81STony Nguyen tunnel |= ((l4.hdr - ip.hdr) / 4) << 1885a4e82a81STony Nguyen ICE_TXD_CTX_QW0_EIPLEN_S; 1886a4e82a81STony Nguyen 1887a4e82a81STony Nguyen /* switch IP header pointer from outer to inner header */ 1888a4e82a81STony Nguyen ip.hdr = skb_inner_network_header(skb); 1889a4e82a81STony Nguyen 1890a4e82a81STony Nguyen /* compute tunnel header size */ 1891a4e82a81STony Nguyen tunnel |= ((ip.hdr - l4.hdr) / 2) << 1892a4e82a81STony Nguyen ICE_TXD_CTX_QW0_NATLEN_S; 1893a4e82a81STony Nguyen 1894a4e82a81STony Nguyen gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 1895a4e82a81STony Nguyen /* indicate if we need to offload outer UDP header */ 1896a4e82a81STony Nguyen if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 1897a4e82a81STony Nguyen (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1898a4e82a81STony Nguyen tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 1899a4e82a81STony Nguyen 1900a4e82a81STony Nguyen /* record tunnel offload values */ 1901a4e82a81STony Nguyen off->cd_tunnel_params |= tunnel; 1902a4e82a81STony Nguyen 1903a4e82a81STony Nguyen /* set DTYP=1 to indicate that it's an Tx context descriptor 1904a4e82a81STony Nguyen * in IPsec tunnel mode with Tx offloads in Quad word 1 1905a4e82a81STony Nguyen */ 1906a4e82a81STony Nguyen off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 1907a4e82a81STony Nguyen 1908a4e82a81STony Nguyen /* switch L4 header pointer from outer to inner */ 1909a4e82a81STony Nguyen l4.hdr = skb_inner_transport_header(skb); 1910a4e82a81STony Nguyen l4_proto = 0; 1911a4e82a81STony Nguyen 1912a4e82a81STony Nguyen /* reset type as we transition from outer to inner headers */ 1913a4e82a81STony Nguyen first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 1914a4e82a81STony Nguyen if (ip.v4->version == 4) 1915a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV4; 1916a4e82a81STony Nguyen if (ip.v6->version == 6) 1917a4e82a81STony Nguyen first->tx_flags |= ICE_TX_FLAGS_IPV6; 1918a4e82a81STony Nguyen } 1919a4e82a81STony Nguyen 1920d76a60baSAnirudh Venkataramanan /* Enable IP checksum offloads */ 1921a4e82a81STony Nguyen if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1922d76a60baSAnirudh Venkataramanan l4_proto = ip.v4->protocol; 1923d76a60baSAnirudh Venkataramanan /* the stack computes the IP header already, the only time we 1924d76a60baSAnirudh Venkataramanan * need the hardware to recompute it is in the case of TSO. 1925d76a60baSAnirudh Venkataramanan */ 1926d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1927d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1928d76a60baSAnirudh Venkataramanan else 1929d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1930d76a60baSAnirudh Venkataramanan 1931a4e82a81STony Nguyen } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1932d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1933d76a60baSAnirudh Venkataramanan exthdr = ip.hdr + sizeof(*ip.v6); 1934d76a60baSAnirudh Venkataramanan l4_proto = ip.v6->nexthdr; 1935d76a60baSAnirudh Venkataramanan if (l4.hdr != exthdr) 1936d76a60baSAnirudh Venkataramanan ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1937d76a60baSAnirudh Venkataramanan &frag_off); 1938d76a60baSAnirudh Venkataramanan } else { 1939d76a60baSAnirudh Venkataramanan return -1; 1940d76a60baSAnirudh Venkataramanan } 1941d76a60baSAnirudh Venkataramanan 1942d76a60baSAnirudh Venkataramanan /* compute inner L3 header size */ 1943d76a60baSAnirudh Venkataramanan l3_len = l4.hdr - ip.hdr; 1944d76a60baSAnirudh Venkataramanan offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1945d76a60baSAnirudh Venkataramanan 1946d76a60baSAnirudh Venkataramanan /* Enable L4 checksum offloads */ 1947d76a60baSAnirudh Venkataramanan switch (l4_proto) { 1948d76a60baSAnirudh Venkataramanan case IPPROTO_TCP: 1949d76a60baSAnirudh Venkataramanan /* enable checksum offloads */ 1950d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1951d76a60baSAnirudh Venkataramanan l4_len = l4.tcp->doff; 1952d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1953d76a60baSAnirudh Venkataramanan break; 1954d76a60baSAnirudh Venkataramanan case IPPROTO_UDP: 1955d76a60baSAnirudh Venkataramanan /* enable UDP checksum offload */ 1956d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1957d76a60baSAnirudh Venkataramanan l4_len = (sizeof(struct udphdr) >> 2); 1958d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1959d76a60baSAnirudh Venkataramanan break; 1960d76a60baSAnirudh Venkataramanan case IPPROTO_SCTP: 1961cf909e19SAnirudh Venkataramanan /* enable SCTP checksum offload */ 1962cf909e19SAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1963cf909e19SAnirudh Venkataramanan l4_len = sizeof(struct sctphdr) >> 2; 1964cf909e19SAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1965cf909e19SAnirudh Venkataramanan break; 1966cf909e19SAnirudh Venkataramanan 1967d76a60baSAnirudh Venkataramanan default: 1968d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1969d76a60baSAnirudh Venkataramanan return -1; 1970d76a60baSAnirudh Venkataramanan skb_checksum_help(skb); 1971d76a60baSAnirudh Venkataramanan return 0; 1972d76a60baSAnirudh Venkataramanan } 1973d76a60baSAnirudh Venkataramanan 1974d76a60baSAnirudh Venkataramanan off->td_cmd |= cmd; 1975d76a60baSAnirudh Venkataramanan off->td_offset |= offset; 1976d76a60baSAnirudh Venkataramanan return 1; 1977d76a60baSAnirudh Venkataramanan } 1978d76a60baSAnirudh Venkataramanan 1979d76a60baSAnirudh Venkataramanan /** 1980f9867df6SAnirudh Venkataramanan * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1981d76a60baSAnirudh Venkataramanan * @tx_ring: ring to send buffer on 1982d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1983d76a60baSAnirudh Venkataramanan * 1984d76a60baSAnirudh Venkataramanan * Checks the skb and set up correspondingly several generic transmit flags 1985d76a60baSAnirudh Venkataramanan * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1986d76a60baSAnirudh Venkataramanan */ 19872bb19d6eSBrett Creeley static void 1988e72bba21SMaciej Fijalkowski ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) 1989d76a60baSAnirudh Venkataramanan { 1990d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1991d76a60baSAnirudh Venkataramanan 19922bb19d6eSBrett Creeley /* nothing left to do, software offloaded VLAN */ 19932bb19d6eSBrett Creeley if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) 19942bb19d6eSBrett Creeley return; 19952bb19d6eSBrett Creeley 19960d54d8f7SBrett Creeley /* the VLAN ethertype/tpid is determined by VSI configuration and netdev 19970d54d8f7SBrett Creeley * feature flags, which the driver only allows either 802.1Q or 802.1ad 19980d54d8f7SBrett Creeley * VLAN offloads exclusively so we only care about the VLAN ID here 1999d76a60baSAnirudh Venkataramanan */ 2000d76a60baSAnirudh Venkataramanan if (skb_vlan_tag_present(skb)) { 2001d76a60baSAnirudh Venkataramanan first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 20020d54d8f7SBrett Creeley if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2) 20030d54d8f7SBrett Creeley first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN; 20040d54d8f7SBrett Creeley else 2005d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 2006d76a60baSAnirudh Venkataramanan } 2007d76a60baSAnirudh Venkataramanan 20082bb19d6eSBrett Creeley ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 2009d76a60baSAnirudh Venkataramanan } 2010d76a60baSAnirudh Venkataramanan 2011d76a60baSAnirudh Venkataramanan /** 2012d76a60baSAnirudh Venkataramanan * ice_tso - computes mss and TSO length to prepare for TSO 2013d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 2014d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 2015d76a60baSAnirudh Venkataramanan * 2016d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 2017d76a60baSAnirudh Venkataramanan */ 2018d76a60baSAnirudh Venkataramanan static 2019d76a60baSAnirudh Venkataramanan int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2020d76a60baSAnirudh Venkataramanan { 2021d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 2022d76a60baSAnirudh Venkataramanan union { 2023d76a60baSAnirudh Venkataramanan struct iphdr *v4; 2024d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 2025d76a60baSAnirudh Venkataramanan unsigned char *hdr; 2026d76a60baSAnirudh Venkataramanan } ip; 2027d76a60baSAnirudh Venkataramanan union { 2028d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 2029a54e3b8cSBrett Creeley struct udphdr *udp; 2030d76a60baSAnirudh Venkataramanan unsigned char *hdr; 2031d76a60baSAnirudh Venkataramanan } l4; 2032d76a60baSAnirudh Venkataramanan u64 cd_mss, cd_tso_len; 203369e66c04SJoe Damato __be16 protocol; 203488865fc4SKarol Kolacinski u32 paylen; 203588865fc4SKarol Kolacinski u8 l4_start; 2036d76a60baSAnirudh Venkataramanan int err; 2037d76a60baSAnirudh Venkataramanan 2038d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 2039d76a60baSAnirudh Venkataramanan return 0; 2040d76a60baSAnirudh Venkataramanan 2041d76a60baSAnirudh Venkataramanan if (!skb_is_gso(skb)) 2042d76a60baSAnirudh Venkataramanan return 0; 2043d76a60baSAnirudh Venkataramanan 2044d76a60baSAnirudh Venkataramanan err = skb_cow_head(skb, 0); 2045d76a60baSAnirudh Venkataramanan if (err < 0) 2046d76a60baSAnirudh Venkataramanan return err; 2047d76a60baSAnirudh Venkataramanan 204869e66c04SJoe Damato protocol = vlan_get_protocol(skb); 204969e66c04SJoe Damato 205069e66c04SJoe Damato if (eth_p_mpls(protocol)) 205169e66c04SJoe Damato ip.hdr = skb_inner_network_header(skb); 205269e66c04SJoe Damato else 2053d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 205469e66c04SJoe Damato l4.hdr = skb_checksum_start(skb); 2055d76a60baSAnirudh Venkataramanan 2056d76a60baSAnirudh Venkataramanan /* initialize outer IP header fields */ 2057d76a60baSAnirudh Venkataramanan if (ip.v4->version == 4) { 2058d76a60baSAnirudh Venkataramanan ip.v4->tot_len = 0; 2059d76a60baSAnirudh Venkataramanan ip.v4->check = 0; 2060d76a60baSAnirudh Venkataramanan } else { 2061d76a60baSAnirudh Venkataramanan ip.v6->payload_len = 0; 2062d76a60baSAnirudh Venkataramanan } 2063d76a60baSAnirudh Venkataramanan 2064a4e82a81STony Nguyen if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 2065a4e82a81STony Nguyen SKB_GSO_GRE_CSUM | 2066a4e82a81STony Nguyen SKB_GSO_IPXIP4 | 2067a4e82a81STony Nguyen SKB_GSO_IPXIP6 | 2068a4e82a81STony Nguyen SKB_GSO_UDP_TUNNEL | 2069a4e82a81STony Nguyen SKB_GSO_UDP_TUNNEL_CSUM)) { 2070a4e82a81STony Nguyen if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 2071a4e82a81STony Nguyen (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 2072a4e82a81STony Nguyen l4.udp->len = 0; 2073a4e82a81STony Nguyen 2074a4e82a81STony Nguyen /* determine offset of outer transport header */ 207588865fc4SKarol Kolacinski l4_start = (u8)(l4.hdr - skb->data); 2076a4e82a81STony Nguyen 2077a4e82a81STony Nguyen /* remove payload length from outer checksum */ 2078a4e82a81STony Nguyen paylen = skb->len - l4_start; 2079a4e82a81STony Nguyen csum_replace_by_diff(&l4.udp->check, 2080a4e82a81STony Nguyen (__force __wsum)htonl(paylen)); 2081a4e82a81STony Nguyen } 2082a4e82a81STony Nguyen 2083a4e82a81STony Nguyen /* reset pointers to inner headers */ 2084a4e82a81STony Nguyen ip.hdr = skb_inner_network_header(skb); 2085a4e82a81STony Nguyen l4.hdr = skb_inner_transport_header(skb); 2086a4e82a81STony Nguyen 2087a4e82a81STony Nguyen /* initialize inner IP header fields */ 2088a4e82a81STony Nguyen if (ip.v4->version == 4) { 2089a4e82a81STony Nguyen ip.v4->tot_len = 0; 2090a4e82a81STony Nguyen ip.v4->check = 0; 2091a4e82a81STony Nguyen } else { 2092a4e82a81STony Nguyen ip.v6->payload_len = 0; 2093a4e82a81STony Nguyen } 2094a4e82a81STony Nguyen } 2095a4e82a81STony Nguyen 2096d76a60baSAnirudh Venkataramanan /* determine offset of transport header */ 209788865fc4SKarol Kolacinski l4_start = (u8)(l4.hdr - skb->data); 2098d76a60baSAnirudh Venkataramanan 2099d76a60baSAnirudh Venkataramanan /* remove payload length from checksum */ 2100d76a60baSAnirudh Venkataramanan paylen = skb->len - l4_start; 2101d76a60baSAnirudh Venkataramanan 2102a54e3b8cSBrett Creeley if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 2103a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.udp->check, 2104a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 2105a54e3b8cSBrett Creeley /* compute length of UDP segmentation header */ 210688865fc4SKarol Kolacinski off->header_len = (u8)sizeof(l4.udp) + l4_start; 2107a54e3b8cSBrett Creeley } else { 2108a54e3b8cSBrett Creeley csum_replace_by_diff(&l4.tcp->check, 2109a54e3b8cSBrett Creeley (__force __wsum)htonl(paylen)); 2110a54e3b8cSBrett Creeley /* compute length of TCP segmentation header */ 211188865fc4SKarol Kolacinski off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 2112a54e3b8cSBrett Creeley } 2113d76a60baSAnirudh Venkataramanan 2114d76a60baSAnirudh Venkataramanan /* update gso_segs and bytecount */ 2115d76a60baSAnirudh Venkataramanan first->gso_segs = skb_shinfo(skb)->gso_segs; 2116d944b469SBrett Creeley first->bytecount += (first->gso_segs - 1) * off->header_len; 2117d76a60baSAnirudh Venkataramanan 2118d76a60baSAnirudh Venkataramanan cd_tso_len = skb->len - off->header_len; 2119d76a60baSAnirudh Venkataramanan cd_mss = skb_shinfo(skb)->gso_size; 2120d76a60baSAnirudh Venkataramanan 2121d76a60baSAnirudh Venkataramanan /* record cdesc_qw1 with TSO parameters */ 2122e65e9e15SBruce Allan off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2123d76a60baSAnirudh Venkataramanan (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 2124d76a60baSAnirudh Venkataramanan (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2125e65e9e15SBruce Allan (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2126d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_TSO; 2127d76a60baSAnirudh Venkataramanan return 1; 2128d76a60baSAnirudh Venkataramanan } 2129d76a60baSAnirudh Venkataramanan 2130d76a60baSAnirudh Venkataramanan /** 21312b245cb2SAnirudh Venkataramanan * ice_txd_use_count - estimate the number of descriptors needed for Tx 21322b245cb2SAnirudh Venkataramanan * @size: transmit request size in bytes 21332b245cb2SAnirudh Venkataramanan * 21342b245cb2SAnirudh Venkataramanan * Due to hardware alignment restrictions (4K alignment), we need to 21352b245cb2SAnirudh Venkataramanan * assume that we can have no more than 12K of data per descriptor, even 21362b245cb2SAnirudh Venkataramanan * though each descriptor can take up to 16K - 1 bytes of aligned memory. 21372b245cb2SAnirudh Venkataramanan * Thus, we need to divide by 12K. But division is slow! Instead, 21382b245cb2SAnirudh Venkataramanan * we decompose the operation into shifts and one relatively cheap 21392b245cb2SAnirudh Venkataramanan * multiply operation. 21402b245cb2SAnirudh Venkataramanan * 21412b245cb2SAnirudh Venkataramanan * To divide by 12K, we first divide by 4K, then divide by 3: 21422b245cb2SAnirudh Venkataramanan * To divide by 4K, shift right by 12 bits 21432b245cb2SAnirudh Venkataramanan * To divide by 3, multiply by 85, then divide by 256 21442b245cb2SAnirudh Venkataramanan * (Divide by 256 is done by shifting right by 8 bits) 21452b245cb2SAnirudh Venkataramanan * Finally, we add one to round up. Because 256 isn't an exact multiple of 21462b245cb2SAnirudh Venkataramanan * 3, we'll underestimate near each multiple of 12K. This is actually more 21472b245cb2SAnirudh Venkataramanan * accurate as we have 4K - 1 of wiggle room that we can fit into the last 21482b245cb2SAnirudh Venkataramanan * segment. For our purposes this is accurate out to 1M which is orders of 21492b245cb2SAnirudh Venkataramanan * magnitude greater than our largest possible GSO size. 21502b245cb2SAnirudh Venkataramanan * 21512b245cb2SAnirudh Venkataramanan * This would then be implemented as: 2152c585ea42SBrett Creeley * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 21532b245cb2SAnirudh Venkataramanan * 21542b245cb2SAnirudh Venkataramanan * Since multiplication and division are commutative, we can reorder 21552b245cb2SAnirudh Venkataramanan * operations into: 2156c585ea42SBrett Creeley * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 21572b245cb2SAnirudh Venkataramanan */ 21582b245cb2SAnirudh Venkataramanan static unsigned int ice_txd_use_count(unsigned int size) 21592b245cb2SAnirudh Venkataramanan { 2160c585ea42SBrett Creeley return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 21612b245cb2SAnirudh Venkataramanan } 21622b245cb2SAnirudh Venkataramanan 21632b245cb2SAnirudh Venkataramanan /** 2164d337f2afSAnirudh Venkataramanan * ice_xmit_desc_count - calculate number of Tx descriptors needed 21652b245cb2SAnirudh Venkataramanan * @skb: send buffer 21662b245cb2SAnirudh Venkataramanan * 21672b245cb2SAnirudh Venkataramanan * Returns number of data descriptors needed for this skb. 21682b245cb2SAnirudh Venkataramanan */ 21692b245cb2SAnirudh Venkataramanan static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 21702b245cb2SAnirudh Venkataramanan { 2171d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 21722b245cb2SAnirudh Venkataramanan unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 21732b245cb2SAnirudh Venkataramanan unsigned int count = 0, size = skb_headlen(skb); 21742b245cb2SAnirudh Venkataramanan 21752b245cb2SAnirudh Venkataramanan for (;;) { 21762b245cb2SAnirudh Venkataramanan count += ice_txd_use_count(size); 21772b245cb2SAnirudh Venkataramanan 21782b245cb2SAnirudh Venkataramanan if (!nr_frags--) 21792b245cb2SAnirudh Venkataramanan break; 21802b245cb2SAnirudh Venkataramanan 21812b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag++); 21822b245cb2SAnirudh Venkataramanan } 21832b245cb2SAnirudh Venkataramanan 21842b245cb2SAnirudh Venkataramanan return count; 21852b245cb2SAnirudh Venkataramanan } 21862b245cb2SAnirudh Venkataramanan 21872b245cb2SAnirudh Venkataramanan /** 21882b245cb2SAnirudh Venkataramanan * __ice_chk_linearize - Check if there are more than 8 buffers per packet 21892b245cb2SAnirudh Venkataramanan * @skb: send buffer 21902b245cb2SAnirudh Venkataramanan * 21912b245cb2SAnirudh Venkataramanan * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 21922b245cb2SAnirudh Venkataramanan * and so we need to figure out the cases where we need to linearize the skb. 21932b245cb2SAnirudh Venkataramanan * 21942b245cb2SAnirudh Venkataramanan * For TSO we need to count the TSO header and segment payload separately. 21952b245cb2SAnirudh Venkataramanan * As such we need to check cases where we have 7 fragments or more as we 21962b245cb2SAnirudh Venkataramanan * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 21972b245cb2SAnirudh Venkataramanan * the segment payload in the first descriptor, and another 7 for the 21982b245cb2SAnirudh Venkataramanan * fragments. 21992b245cb2SAnirudh Venkataramanan */ 22002b245cb2SAnirudh Venkataramanan static bool __ice_chk_linearize(struct sk_buff *skb) 22012b245cb2SAnirudh Venkataramanan { 2202d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag, *stale; 22032b245cb2SAnirudh Venkataramanan int nr_frags, sum; 22042b245cb2SAnirudh Venkataramanan 22052b245cb2SAnirudh Venkataramanan /* no need to check if number of frags is less than 7 */ 22062b245cb2SAnirudh Venkataramanan nr_frags = skb_shinfo(skb)->nr_frags; 22072b245cb2SAnirudh Venkataramanan if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 22082b245cb2SAnirudh Venkataramanan return false; 22092b245cb2SAnirudh Venkataramanan 22102b245cb2SAnirudh Venkataramanan /* We need to walk through the list and validate that each group 22112b245cb2SAnirudh Venkataramanan * of 6 fragments totals at least gso_size. 22122b245cb2SAnirudh Venkataramanan */ 22132b245cb2SAnirudh Venkataramanan nr_frags -= ICE_MAX_BUF_TXD - 2; 22142b245cb2SAnirudh Venkataramanan frag = &skb_shinfo(skb)->frags[0]; 22152b245cb2SAnirudh Venkataramanan 22162b245cb2SAnirudh Venkataramanan /* Initialize size to the negative value of gso_size minus 1. We 22174ee656bbSTony Nguyen * use this as the worst case scenario in which the frag ahead 22182b245cb2SAnirudh Venkataramanan * of us only provides one byte which is why we are limited to 6 22192b245cb2SAnirudh Venkataramanan * descriptors for a single transmit as the header and previous 22202b245cb2SAnirudh Venkataramanan * fragment are already consuming 2 descriptors. 22212b245cb2SAnirudh Venkataramanan */ 22222b245cb2SAnirudh Venkataramanan sum = 1 - skb_shinfo(skb)->gso_size; 22232b245cb2SAnirudh Venkataramanan 22242b245cb2SAnirudh Venkataramanan /* Add size of frags 0 through 4 to create our initial sum */ 22252b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22262b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22272b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22282b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22292b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22302b245cb2SAnirudh Venkataramanan 22312b245cb2SAnirudh Venkataramanan /* Walk through fragments adding latest fragment, testing it, and 22322b245cb2SAnirudh Venkataramanan * then removing stale fragments from the sum. 22332b245cb2SAnirudh Venkataramanan */ 22340a37abfaSKiran Patil for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 22350a37abfaSKiran Patil int stale_size = skb_frag_size(stale); 22360a37abfaSKiran Patil 22372b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 22382b245cb2SAnirudh Venkataramanan 22390a37abfaSKiran Patil /* The stale fragment may present us with a smaller 22400a37abfaSKiran Patil * descriptor than the actual fragment size. To account 22410a37abfaSKiran Patil * for that we need to remove all the data on the front and 22420a37abfaSKiran Patil * figure out what the remainder would be in the last 22430a37abfaSKiran Patil * descriptor associated with the fragment. 22440a37abfaSKiran Patil */ 22450a37abfaSKiran Patil if (stale_size > ICE_MAX_DATA_PER_TXD) { 22460a37abfaSKiran Patil int align_pad = -(skb_frag_off(stale)) & 22470a37abfaSKiran Patil (ICE_MAX_READ_REQ_SIZE - 1); 22480a37abfaSKiran Patil 22490a37abfaSKiran Patil sum -= align_pad; 22500a37abfaSKiran Patil stale_size -= align_pad; 22510a37abfaSKiran Patil 22520a37abfaSKiran Patil do { 22530a37abfaSKiran Patil sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; 22540a37abfaSKiran Patil stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; 22550a37abfaSKiran Patil } while (stale_size > ICE_MAX_DATA_PER_TXD); 22560a37abfaSKiran Patil } 22570a37abfaSKiran Patil 22582b245cb2SAnirudh Venkataramanan /* if sum is negative we failed to make sufficient progress */ 22592b245cb2SAnirudh Venkataramanan if (sum < 0) 22602b245cb2SAnirudh Venkataramanan return true; 22612b245cb2SAnirudh Venkataramanan 22622b245cb2SAnirudh Venkataramanan if (!nr_frags--) 22632b245cb2SAnirudh Venkataramanan break; 22642b245cb2SAnirudh Venkataramanan 22650a37abfaSKiran Patil sum -= stale_size; 22662b245cb2SAnirudh Venkataramanan } 22672b245cb2SAnirudh Venkataramanan 22682b245cb2SAnirudh Venkataramanan return false; 22692b245cb2SAnirudh Venkataramanan } 22702b245cb2SAnirudh Venkataramanan 22712b245cb2SAnirudh Venkataramanan /** 22722b245cb2SAnirudh Venkataramanan * ice_chk_linearize - Check if there are more than 8 fragments per packet 22732b245cb2SAnirudh Venkataramanan * @skb: send buffer 22742b245cb2SAnirudh Venkataramanan * @count: number of buffers used 22752b245cb2SAnirudh Venkataramanan * 22762b245cb2SAnirudh Venkataramanan * Note: Our HW can't scatter-gather more than 8 fragments to build 22772b245cb2SAnirudh Venkataramanan * a packet on the wire and so we need to figure out the cases where we 22782b245cb2SAnirudh Venkataramanan * need to linearize the skb. 22792b245cb2SAnirudh Venkataramanan */ 22802b245cb2SAnirudh Venkataramanan static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 22812b245cb2SAnirudh Venkataramanan { 22822b245cb2SAnirudh Venkataramanan /* Both TSO and single send will work if count is less than 8 */ 22832b245cb2SAnirudh Venkataramanan if (likely(count < ICE_MAX_BUF_TXD)) 22842b245cb2SAnirudh Venkataramanan return false; 22852b245cb2SAnirudh Venkataramanan 22862b245cb2SAnirudh Venkataramanan if (skb_is_gso(skb)) 22872b245cb2SAnirudh Venkataramanan return __ice_chk_linearize(skb); 22882b245cb2SAnirudh Venkataramanan 22892b245cb2SAnirudh Venkataramanan /* we can support up to 8 data buffers for a single send */ 22902b245cb2SAnirudh Venkataramanan return count != ICE_MAX_BUF_TXD; 22912b245cb2SAnirudh Venkataramanan } 22922b245cb2SAnirudh Venkataramanan 22932b245cb2SAnirudh Venkataramanan /** 2294ea9b847cSJacob Keller * ice_tstamp - set up context descriptor for hardware timestamp 2295ea9b847cSJacob Keller * @tx_ring: pointer to the Tx ring to send buffer on 2296ea9b847cSJacob Keller * @skb: pointer to the SKB we're sending 2297ea9b847cSJacob Keller * @first: Tx buffer 2298ea9b847cSJacob Keller * @off: Tx offload parameters 2299ea9b847cSJacob Keller */ 2300ea9b847cSJacob Keller static void 2301e72bba21SMaciej Fijalkowski ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, 2302ea9b847cSJacob Keller struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2303ea9b847cSJacob Keller { 2304ea9b847cSJacob Keller s8 idx; 2305ea9b847cSJacob Keller 2306ea9b847cSJacob Keller /* only timestamp the outbound packet if the user has requested it */ 2307ea9b847cSJacob Keller if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 2308ea9b847cSJacob Keller return; 2309ea9b847cSJacob Keller 2310ea9b847cSJacob Keller if (!tx_ring->ptp_tx) 2311ea9b847cSJacob Keller return; 2312ea9b847cSJacob Keller 2313ea9b847cSJacob Keller /* Tx timestamps cannot be sampled when doing TSO */ 2314ea9b847cSJacob Keller if (first->tx_flags & ICE_TX_FLAGS_TSO) 2315ea9b847cSJacob Keller return; 2316ea9b847cSJacob Keller 2317ea9b847cSJacob Keller /* Grab an open timestamp slot */ 2318ea9b847cSJacob Keller idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); 2319f020481bSJacob Keller if (idx < 0) { 2320f020481bSJacob Keller tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++; 2321ea9b847cSJacob Keller return; 2322f020481bSJacob Keller } 2323ea9b847cSJacob Keller 2324ea9b847cSJacob Keller off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2325ea9b847cSJacob Keller (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) | 2326ea9b847cSJacob Keller ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S)); 2327ea9b847cSJacob Keller first->tx_flags |= ICE_TX_FLAGS_TSYN; 2328ea9b847cSJacob Keller } 2329ea9b847cSJacob Keller 2330ea9b847cSJacob Keller /** 23312b245cb2SAnirudh Venkataramanan * ice_xmit_frame_ring - Sends buffer on Tx ring 23322b245cb2SAnirudh Venkataramanan * @skb: send buffer 23332b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 23342b245cb2SAnirudh Venkataramanan * 23352b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 23362b245cb2SAnirudh Venkataramanan */ 23372b245cb2SAnirudh Venkataramanan static netdev_tx_t 2338e72bba21SMaciej Fijalkowski ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) 23392b245cb2SAnirudh Venkataramanan { 2340d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params offload = { 0 }; 23410c3a6101SDave Ertman struct ice_vsi *vsi = tx_ring->vsi; 23422b245cb2SAnirudh Venkataramanan struct ice_tx_buf *first; 2343f9f83202SDave Ertman struct ethhdr *eth; 23442b245cb2SAnirudh Venkataramanan unsigned int count; 2345d76a60baSAnirudh Venkataramanan int tso, csum; 23462b245cb2SAnirudh Venkataramanan 23473089cf6dSJesse Brandeburg ice_trace(xmit_frame_ring, tx_ring, skb); 23483089cf6dSJesse Brandeburg 2349fce92dbcSPawel Chmielewski if (unlikely(ipv6_hopopt_jumbo_remove(skb))) 2350fce92dbcSPawel Chmielewski goto out_drop; 2351fce92dbcSPawel Chmielewski 23522b245cb2SAnirudh Venkataramanan count = ice_xmit_desc_count(skb); 23532b245cb2SAnirudh Venkataramanan if (ice_chk_linearize(skb, count)) { 23542b245cb2SAnirudh Venkataramanan if (__skb_linearize(skb)) 23552b245cb2SAnirudh Venkataramanan goto out_drop; 23562b245cb2SAnirudh Venkataramanan count = ice_txd_use_count(skb->len); 2357288ecf49SBenjamin Mikailenko tx_ring->ring_stats->tx_stats.tx_linearize++; 23582b245cb2SAnirudh Venkataramanan } 23592b245cb2SAnirudh Venkataramanan 23602b245cb2SAnirudh Venkataramanan /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 23612b245cb2SAnirudh Venkataramanan * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 23622b245cb2SAnirudh Venkataramanan * + 4 desc gap to avoid the cache line where head is, 23632b245cb2SAnirudh Venkataramanan * + 1 desc for context descriptor, 23642b245cb2SAnirudh Venkataramanan * otherwise try next time 23652b245cb2SAnirudh Venkataramanan */ 2366c585ea42SBrett Creeley if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2367c585ea42SBrett Creeley ICE_DESCS_FOR_CTX_DESC)) { 2368288ecf49SBenjamin Mikailenko tx_ring->ring_stats->tx_stats.tx_busy++; 23692b245cb2SAnirudh Venkataramanan return NETDEV_TX_BUSY; 23702b245cb2SAnirudh Venkataramanan } 23712b245cb2SAnirudh Venkataramanan 2372cc14db11SJesse Brandeburg /* prefetch for bql data which is infrequently used */ 2373cc14db11SJesse Brandeburg netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring)); 2374cc14db11SJesse Brandeburg 2375d76a60baSAnirudh Venkataramanan offload.tx_ring = tx_ring; 2376d76a60baSAnirudh Venkataramanan 23772b245cb2SAnirudh Venkataramanan /* record the location of the first descriptor for this packet */ 23782b245cb2SAnirudh Venkataramanan first = &tx_ring->tx_buf[tx_ring->next_to_use]; 23792b245cb2SAnirudh Venkataramanan first->skb = skb; 2380aa1d3fafSAlexander Lobakin first->type = ICE_TX_BUF_SKB; 23812b245cb2SAnirudh Venkataramanan first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 23822b245cb2SAnirudh Venkataramanan first->gso_segs = 1; 2383d76a60baSAnirudh Venkataramanan first->tx_flags = 0; 23842b245cb2SAnirudh Venkataramanan 2385d76a60baSAnirudh Venkataramanan /* prepare the VLAN tagging flags for Tx */ 23862bb19d6eSBrett Creeley ice_tx_prepare_vlan_flags(tx_ring, first); 23870d54d8f7SBrett Creeley if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { 23880d54d8f7SBrett Creeley offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 23890d54d8f7SBrett Creeley (ICE_TX_CTX_DESC_IL2TAG2 << 23900d54d8f7SBrett Creeley ICE_TXD_CTX_QW1_CMD_S)); 23910d54d8f7SBrett Creeley offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 23920d54d8f7SBrett Creeley ICE_TX_FLAGS_VLAN_S; 23930d54d8f7SBrett Creeley } 2394d76a60baSAnirudh Venkataramanan 2395d76a60baSAnirudh Venkataramanan /* set up TSO offload */ 2396d76a60baSAnirudh Venkataramanan tso = ice_tso(first, &offload); 2397d76a60baSAnirudh Venkataramanan if (tso < 0) 2398d76a60baSAnirudh Venkataramanan goto out_drop; 2399d76a60baSAnirudh Venkataramanan 2400d76a60baSAnirudh Venkataramanan /* always set up Tx checksum offload */ 2401d76a60baSAnirudh Venkataramanan csum = ice_tx_csum(first, &offload); 2402d76a60baSAnirudh Venkataramanan if (csum < 0) 2403d76a60baSAnirudh Venkataramanan goto out_drop; 2404d76a60baSAnirudh Venkataramanan 24050c3a6101SDave Ertman /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 2406f9f83202SDave Ertman eth = (struct ethhdr *)skb_mac_header(skb); 2407f9f83202SDave Ertman if (unlikely((skb->priority == TC_PRIO_CONTROL || 2408f9f83202SDave Ertman eth->h_proto == htons(ETH_P_LLDP)) && 24090c3a6101SDave Ertman vsi->type == ICE_VSI_PF && 2410fc2d1165SChinh T Cao vsi->port_info->qos_cfg.is_sw_lldp)) 24110c3a6101SDave Ertman offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 24120c3a6101SDave Ertman ICE_TX_CTX_DESC_SWTCH_UPLINK << 24130c3a6101SDave Ertman ICE_TXD_CTX_QW1_CMD_S); 24140c3a6101SDave Ertman 2415ea9b847cSJacob Keller ice_tstamp(tx_ring, skb, first, &offload); 2416f5396b8aSGrzegorz Nitka if (ice_is_switchdev_running(vsi->back)) 2417f5396b8aSGrzegorz Nitka ice_eswitch_set_target_vsi(skb, &offload); 2418ea9b847cSJacob Keller 24190c3a6101SDave Ertman if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2420d76a60baSAnirudh Venkataramanan struct ice_tx_ctx_desc *cdesc; 242188865fc4SKarol Kolacinski u16 i = tx_ring->next_to_use; 2422d76a60baSAnirudh Venkataramanan 2423d76a60baSAnirudh Venkataramanan /* grab the next descriptor */ 2424d76a60baSAnirudh Venkataramanan cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2425d76a60baSAnirudh Venkataramanan i++; 2426d76a60baSAnirudh Venkataramanan tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2427d76a60baSAnirudh Venkataramanan 2428d76a60baSAnirudh Venkataramanan /* setup context descriptor */ 2429d76a60baSAnirudh Venkataramanan cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2430d76a60baSAnirudh Venkataramanan cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2431d76a60baSAnirudh Venkataramanan cdesc->rsvd = cpu_to_le16(0); 2432d76a60baSAnirudh Venkataramanan cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2433d76a60baSAnirudh Venkataramanan } 2434d76a60baSAnirudh Venkataramanan 2435d76a60baSAnirudh Venkataramanan ice_tx_map(tx_ring, first, &offload); 24362b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 24372b245cb2SAnirudh Venkataramanan 24382b245cb2SAnirudh Venkataramanan out_drop: 24393089cf6dSJesse Brandeburg ice_trace(xmit_frame_ring_drop, tx_ring, skb); 24402b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 24412b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 24422b245cb2SAnirudh Venkataramanan } 24432b245cb2SAnirudh Venkataramanan 24442b245cb2SAnirudh Venkataramanan /** 24452b245cb2SAnirudh Venkataramanan * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 24462b245cb2SAnirudh Venkataramanan * @skb: send buffer 24472b245cb2SAnirudh Venkataramanan * @netdev: network interface device structure 24482b245cb2SAnirudh Venkataramanan * 24492b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 24502b245cb2SAnirudh Venkataramanan */ 24512b245cb2SAnirudh Venkataramanan netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 24522b245cb2SAnirudh Venkataramanan { 24532b245cb2SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 24542b245cb2SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 2455e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring; 24562b245cb2SAnirudh Venkataramanan 24572b245cb2SAnirudh Venkataramanan tx_ring = vsi->tx_rings[skb->queue_mapping]; 24582b245cb2SAnirudh Venkataramanan 24592b245cb2SAnirudh Venkataramanan /* hardware can't handle really short frames, hardware padding works 24602b245cb2SAnirudh Venkataramanan * beyond this point 24612b245cb2SAnirudh Venkataramanan */ 24622b245cb2SAnirudh Venkataramanan if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 24632b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 24642b245cb2SAnirudh Venkataramanan 24652b245cb2SAnirudh Venkataramanan return ice_xmit_frame_ring(skb, tx_ring); 24662b245cb2SAnirudh Venkataramanan } 2467148beb61SHenry Tieman 2468148beb61SHenry Tieman /** 24692a87bd73SDave Ertman * ice_get_dscp_up - return the UP/TC value for a SKB 24702a87bd73SDave Ertman * @dcbcfg: DCB config that contains DSCP to UP/TC mapping 24712a87bd73SDave Ertman * @skb: SKB to query for info to determine UP/TC 24722a87bd73SDave Ertman * 24732a87bd73SDave Ertman * This function is to only be called when the PF is in L3 DSCP PFC mode 24742a87bd73SDave Ertman */ 24752a87bd73SDave Ertman static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb) 24762a87bd73SDave Ertman { 24772a87bd73SDave Ertman u8 dscp = 0; 24782a87bd73SDave Ertman 24792a87bd73SDave Ertman if (skb->protocol == htons(ETH_P_IP)) 24802a87bd73SDave Ertman dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 24812a87bd73SDave Ertman else if (skb->protocol == htons(ETH_P_IPV6)) 24822a87bd73SDave Ertman dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 24832a87bd73SDave Ertman 24842a87bd73SDave Ertman return dcbcfg->dscp_map[dscp]; 24852a87bd73SDave Ertman } 24862a87bd73SDave Ertman 24872a87bd73SDave Ertman u16 24882a87bd73SDave Ertman ice_select_queue(struct net_device *netdev, struct sk_buff *skb, 24892a87bd73SDave Ertman struct net_device *sb_dev) 24902a87bd73SDave Ertman { 24912a87bd73SDave Ertman struct ice_pf *pf = ice_netdev_to_pf(netdev); 24922a87bd73SDave Ertman struct ice_dcbx_cfg *dcbcfg; 24932a87bd73SDave Ertman 24942a87bd73SDave Ertman dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 24952a87bd73SDave Ertman if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) 24962a87bd73SDave Ertman skb->priority = ice_get_dscp_up(dcbcfg, skb); 24972a87bd73SDave Ertman 24982a87bd73SDave Ertman return netdev_pick_tx(netdev, skb, sb_dev); 24992a87bd73SDave Ertman } 25002a87bd73SDave Ertman 25012a87bd73SDave Ertman /** 2502148beb61SHenry Tieman * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 2503148beb61SHenry Tieman * @tx_ring: tx_ring to clean 2504148beb61SHenry Tieman */ 2505e72bba21SMaciej Fijalkowski void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) 2506148beb61SHenry Tieman { 2507148beb61SHenry Tieman struct ice_vsi *vsi = tx_ring->vsi; 2508148beb61SHenry Tieman s16 i = tx_ring->next_to_clean; 2509148beb61SHenry Tieman int budget = ICE_DFLT_IRQ_WORK; 2510148beb61SHenry Tieman struct ice_tx_desc *tx_desc; 2511148beb61SHenry Tieman struct ice_tx_buf *tx_buf; 2512148beb61SHenry Tieman 2513148beb61SHenry Tieman tx_buf = &tx_ring->tx_buf[i]; 2514148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, i); 2515148beb61SHenry Tieman i -= tx_ring->count; 2516148beb61SHenry Tieman 2517148beb61SHenry Tieman do { 2518148beb61SHenry Tieman struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2519148beb61SHenry Tieman 2520148beb61SHenry Tieman /* if next_to_watch is not set then there is no pending work */ 2521148beb61SHenry Tieman if (!eop_desc) 2522148beb61SHenry Tieman break; 2523148beb61SHenry Tieman 2524148beb61SHenry Tieman /* prevent any other reads prior to eop_desc */ 2525148beb61SHenry Tieman smp_rmb(); 2526148beb61SHenry Tieman 2527148beb61SHenry Tieman /* if the descriptor isn't done, no work to do */ 2528148beb61SHenry Tieman if (!(eop_desc->cmd_type_offset_bsz & 2529148beb61SHenry Tieman cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2530148beb61SHenry Tieman break; 2531148beb61SHenry Tieman 2532148beb61SHenry Tieman /* clear next_to_watch to prevent false hangs */ 2533148beb61SHenry Tieman tx_buf->next_to_watch = NULL; 2534148beb61SHenry Tieman tx_desc->buf_addr = 0; 2535148beb61SHenry Tieman tx_desc->cmd_type_offset_bsz = 0; 2536148beb61SHenry Tieman 2537148beb61SHenry Tieman /* move past filter desc */ 2538148beb61SHenry Tieman tx_buf++; 2539148beb61SHenry Tieman tx_desc++; 2540148beb61SHenry Tieman i++; 2541148beb61SHenry Tieman if (unlikely(!i)) { 2542148beb61SHenry Tieman i -= tx_ring->count; 2543148beb61SHenry Tieman tx_buf = tx_ring->tx_buf; 2544148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, 0); 2545148beb61SHenry Tieman } 2546148beb61SHenry Tieman 2547148beb61SHenry Tieman /* unmap the data header */ 2548148beb61SHenry Tieman if (dma_unmap_len(tx_buf, len)) 2549148beb61SHenry Tieman dma_unmap_single(tx_ring->dev, 2550148beb61SHenry Tieman dma_unmap_addr(tx_buf, dma), 2551148beb61SHenry Tieman dma_unmap_len(tx_buf, len), 2552148beb61SHenry Tieman DMA_TO_DEVICE); 2553aa1d3fafSAlexander Lobakin if (tx_buf->type == ICE_TX_BUF_DUMMY) 2554148beb61SHenry Tieman devm_kfree(tx_ring->dev, tx_buf->raw_buf); 2555148beb61SHenry Tieman 2556148beb61SHenry Tieman /* clear next_to_watch to prevent false hangs */ 2557aa1d3fafSAlexander Lobakin tx_buf->type = ICE_TX_BUF_EMPTY; 2558148beb61SHenry Tieman tx_buf->tx_flags = 0; 2559148beb61SHenry Tieman tx_buf->next_to_watch = NULL; 2560148beb61SHenry Tieman dma_unmap_len_set(tx_buf, len, 0); 2561148beb61SHenry Tieman tx_desc->buf_addr = 0; 2562148beb61SHenry Tieman tx_desc->cmd_type_offset_bsz = 0; 2563148beb61SHenry Tieman 2564148beb61SHenry Tieman /* move past eop_desc for start of next FD desc */ 2565148beb61SHenry Tieman tx_buf++; 2566148beb61SHenry Tieman tx_desc++; 2567148beb61SHenry Tieman i++; 2568148beb61SHenry Tieman if (unlikely(!i)) { 2569148beb61SHenry Tieman i -= tx_ring->count; 2570148beb61SHenry Tieman tx_buf = tx_ring->tx_buf; 2571148beb61SHenry Tieman tx_desc = ICE_TX_DESC(tx_ring, 0); 2572148beb61SHenry Tieman } 2573148beb61SHenry Tieman 2574148beb61SHenry Tieman budget--; 2575148beb61SHenry Tieman } while (likely(budget)); 2576148beb61SHenry Tieman 2577148beb61SHenry Tieman i += tx_ring->count; 2578148beb61SHenry Tieman tx_ring->next_to_clean = i; 2579148beb61SHenry Tieman 2580148beb61SHenry Tieman /* re-enable interrupt if needed */ 2581148beb61SHenry Tieman ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 2582148beb61SHenry Tieman } 2583