1cdedef59SAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0 2cdedef59SAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */ 3cdedef59SAnirudh Venkataramanan 4cdedef59SAnirudh Venkataramanan /* The driver transmit and receive code */ 5cdedef59SAnirudh Venkataramanan 6cdedef59SAnirudh Venkataramanan #include <linux/prefetch.h> 7cdedef59SAnirudh Venkataramanan #include <linux/mm.h> 8cdedef59SAnirudh Venkataramanan #include "ice.h" 95f6aa50eSAnirudh Venkataramanan #include "ice_dcb_lib.h" 10cdedef59SAnirudh Venkataramanan 112b245cb2SAnirudh Venkataramanan #define ICE_RX_HDR_SIZE 256 122b245cb2SAnirudh Venkataramanan 13cdedef59SAnirudh Venkataramanan /** 14cdedef59SAnirudh Venkataramanan * ice_unmap_and_free_tx_buf - Release a Tx buffer 15cdedef59SAnirudh Venkataramanan * @ring: the ring that owns the buffer 16cdedef59SAnirudh Venkataramanan * @tx_buf: the buffer to free 17cdedef59SAnirudh Venkataramanan */ 18cdedef59SAnirudh Venkataramanan static void 19cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 20cdedef59SAnirudh Venkataramanan { 21cdedef59SAnirudh Venkataramanan if (tx_buf->skb) { 22cdedef59SAnirudh Venkataramanan dev_kfree_skb_any(tx_buf->skb); 23cdedef59SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) 24cdedef59SAnirudh Venkataramanan dma_unmap_single(ring->dev, 25cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 26cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 27cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 28cdedef59SAnirudh Venkataramanan } else if (dma_unmap_len(tx_buf, len)) { 29cdedef59SAnirudh Venkataramanan dma_unmap_page(ring->dev, 30cdedef59SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 31cdedef59SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 32cdedef59SAnirudh Venkataramanan DMA_TO_DEVICE); 33cdedef59SAnirudh Venkataramanan } 34cdedef59SAnirudh Venkataramanan 35cdedef59SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 36cdedef59SAnirudh Venkataramanan tx_buf->skb = NULL; 37cdedef59SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 38cdedef59SAnirudh Venkataramanan /* tx_buf must be completely set up in the transmit path */ 39cdedef59SAnirudh Venkataramanan } 40cdedef59SAnirudh Venkataramanan 41cdedef59SAnirudh Venkataramanan static struct netdev_queue *txring_txq(const struct ice_ring *ring) 42cdedef59SAnirudh Venkataramanan { 43cdedef59SAnirudh Venkataramanan return netdev_get_tx_queue(ring->netdev, ring->q_index); 44cdedef59SAnirudh Venkataramanan } 45cdedef59SAnirudh Venkataramanan 46cdedef59SAnirudh Venkataramanan /** 47cdedef59SAnirudh Venkataramanan * ice_clean_tx_ring - Free any empty Tx buffers 48cdedef59SAnirudh Venkataramanan * @tx_ring: ring to be cleaned 49cdedef59SAnirudh Venkataramanan */ 50cdedef59SAnirudh Venkataramanan void ice_clean_tx_ring(struct ice_ring *tx_ring) 51cdedef59SAnirudh Venkataramanan { 52cdedef59SAnirudh Venkataramanan u16 i; 53cdedef59SAnirudh Venkataramanan 54cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 55cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 56cdedef59SAnirudh Venkataramanan return; 57cdedef59SAnirudh Venkataramanan 582f2da36eSAnirudh Venkataramanan /* Free all the Tx ring sk_buffs */ 59cdedef59SAnirudh Venkataramanan for (i = 0; i < tx_ring->count; i++) 60cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 61cdedef59SAnirudh Venkataramanan 62c6dfd690SBruce Allan memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 63cdedef59SAnirudh Venkataramanan 64cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 65cdedef59SAnirudh Venkataramanan memset(tx_ring->desc, 0, tx_ring->size); 66cdedef59SAnirudh Venkataramanan 67cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 68cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 69cdedef59SAnirudh Venkataramanan 70cdedef59SAnirudh Venkataramanan if (!tx_ring->netdev) 71cdedef59SAnirudh Venkataramanan return; 72cdedef59SAnirudh Venkataramanan 73cdedef59SAnirudh Venkataramanan /* cleanup Tx queue statistics */ 74cdedef59SAnirudh Venkataramanan netdev_tx_reset_queue(txring_txq(tx_ring)); 75cdedef59SAnirudh Venkataramanan } 76cdedef59SAnirudh Venkataramanan 77cdedef59SAnirudh Venkataramanan /** 78cdedef59SAnirudh Venkataramanan * ice_free_tx_ring - Free Tx resources per queue 79cdedef59SAnirudh Venkataramanan * @tx_ring: Tx descriptor ring for a specific queue 80cdedef59SAnirudh Venkataramanan * 81cdedef59SAnirudh Venkataramanan * Free all transmit software resources 82cdedef59SAnirudh Venkataramanan */ 83cdedef59SAnirudh Venkataramanan void ice_free_tx_ring(struct ice_ring *tx_ring) 84cdedef59SAnirudh Venkataramanan { 85cdedef59SAnirudh Venkataramanan ice_clean_tx_ring(tx_ring); 86cdedef59SAnirudh Venkataramanan devm_kfree(tx_ring->dev, tx_ring->tx_buf); 87cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 88cdedef59SAnirudh Venkataramanan 89cdedef59SAnirudh Venkataramanan if (tx_ring->desc) { 90cdedef59SAnirudh Venkataramanan dmam_free_coherent(tx_ring->dev, tx_ring->size, 91cdedef59SAnirudh Venkataramanan tx_ring->desc, tx_ring->dma); 92cdedef59SAnirudh Venkataramanan tx_ring->desc = NULL; 93cdedef59SAnirudh Venkataramanan } 94cdedef59SAnirudh Venkataramanan } 95cdedef59SAnirudh Venkataramanan 96cdedef59SAnirudh Venkataramanan /** 972b245cb2SAnirudh Venkataramanan * ice_clean_tx_irq - Reclaim resources after transmit completes 982b245cb2SAnirudh Venkataramanan * @vsi: the VSI we care about 992b245cb2SAnirudh Venkataramanan * @tx_ring: Tx ring to clean 1002b245cb2SAnirudh Venkataramanan * @napi_budget: Used to determine if we are in netpoll 1012b245cb2SAnirudh Venkataramanan * 1022b245cb2SAnirudh Venkataramanan * Returns true if there's any budget left (e.g. the clean is finished) 1032b245cb2SAnirudh Venkataramanan */ 104c8b7abddSBruce Allan static bool 105c8b7abddSBruce Allan ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) 1062b245cb2SAnirudh Venkataramanan { 1072b245cb2SAnirudh Venkataramanan unsigned int total_bytes = 0, total_pkts = 0; 1082b245cb2SAnirudh Venkataramanan unsigned int budget = vsi->work_lmt; 1092b245cb2SAnirudh Venkataramanan s16 i = tx_ring->next_to_clean; 1102b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 1112b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 1122b245cb2SAnirudh Venkataramanan 1132b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 1142b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 1152b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 1162b245cb2SAnirudh Venkataramanan 1172b245cb2SAnirudh Venkataramanan do { 1182b245cb2SAnirudh Venkataramanan struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 1192b245cb2SAnirudh Venkataramanan 1202b245cb2SAnirudh Venkataramanan /* if next_to_watch is not set then there is no work pending */ 1212b245cb2SAnirudh Venkataramanan if (!eop_desc) 1222b245cb2SAnirudh Venkataramanan break; 1232b245cb2SAnirudh Venkataramanan 1242b245cb2SAnirudh Venkataramanan smp_rmb(); /* prevent any other reads prior to eop_desc */ 1252b245cb2SAnirudh Venkataramanan 1262b245cb2SAnirudh Venkataramanan /* if the descriptor isn't done, no work yet to do */ 1272b245cb2SAnirudh Venkataramanan if (!(eop_desc->cmd_type_offset_bsz & 1282b245cb2SAnirudh Venkataramanan cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 1292b245cb2SAnirudh Venkataramanan break; 1302b245cb2SAnirudh Venkataramanan 1312b245cb2SAnirudh Venkataramanan /* clear next_to_watch to prevent false hangs */ 1322b245cb2SAnirudh Venkataramanan tx_buf->next_to_watch = NULL; 1332b245cb2SAnirudh Venkataramanan 1342b245cb2SAnirudh Venkataramanan /* update the statistics for this packet */ 1352b245cb2SAnirudh Venkataramanan total_bytes += tx_buf->bytecount; 1362b245cb2SAnirudh Venkataramanan total_pkts += tx_buf->gso_segs; 1372b245cb2SAnirudh Venkataramanan 1382b245cb2SAnirudh Venkataramanan /* free the skb */ 1392b245cb2SAnirudh Venkataramanan napi_consume_skb(tx_buf->skb, napi_budget); 1402b245cb2SAnirudh Venkataramanan 1412b245cb2SAnirudh Venkataramanan /* unmap skb header data */ 1422b245cb2SAnirudh Venkataramanan dma_unmap_single(tx_ring->dev, 1432b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 1442b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 1452b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 1462b245cb2SAnirudh Venkataramanan 1472b245cb2SAnirudh Venkataramanan /* clear tx_buf data */ 1482b245cb2SAnirudh Venkataramanan tx_buf->skb = NULL; 1492b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 1502b245cb2SAnirudh Venkataramanan 1512b245cb2SAnirudh Venkataramanan /* unmap remaining buffers */ 1522b245cb2SAnirudh Venkataramanan while (tx_desc != eop_desc) { 1532b245cb2SAnirudh Venkataramanan tx_buf++; 1542b245cb2SAnirudh Venkataramanan tx_desc++; 1552b245cb2SAnirudh Venkataramanan i++; 1562b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 1572b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 1582b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 1592b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 1602b245cb2SAnirudh Venkataramanan } 1612b245cb2SAnirudh Venkataramanan 1622b245cb2SAnirudh Venkataramanan /* unmap any remaining paged data */ 1632b245cb2SAnirudh Venkataramanan if (dma_unmap_len(tx_buf, len)) { 1642b245cb2SAnirudh Venkataramanan dma_unmap_page(tx_ring->dev, 1652b245cb2SAnirudh Venkataramanan dma_unmap_addr(tx_buf, dma), 1662b245cb2SAnirudh Venkataramanan dma_unmap_len(tx_buf, len), 1672b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 1682b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, 0); 1692b245cb2SAnirudh Venkataramanan } 1702b245cb2SAnirudh Venkataramanan } 1712b245cb2SAnirudh Venkataramanan 1722b245cb2SAnirudh Venkataramanan /* move us one more past the eop_desc for start of next pkt */ 1732b245cb2SAnirudh Venkataramanan tx_buf++; 1742b245cb2SAnirudh Venkataramanan tx_desc++; 1752b245cb2SAnirudh Venkataramanan i++; 1762b245cb2SAnirudh Venkataramanan if (unlikely(!i)) { 1772b245cb2SAnirudh Venkataramanan i -= tx_ring->count; 1782b245cb2SAnirudh Venkataramanan tx_buf = tx_ring->tx_buf; 1792b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 1802b245cb2SAnirudh Venkataramanan } 1812b245cb2SAnirudh Venkataramanan 1822b245cb2SAnirudh Venkataramanan prefetch(tx_desc); 1832b245cb2SAnirudh Venkataramanan 1842b245cb2SAnirudh Venkataramanan /* update budget accounting */ 1852b245cb2SAnirudh Venkataramanan budget--; 1862b245cb2SAnirudh Venkataramanan } while (likely(budget)); 1872b245cb2SAnirudh Venkataramanan 1882b245cb2SAnirudh Venkataramanan i += tx_ring->count; 1892b245cb2SAnirudh Venkataramanan tx_ring->next_to_clean = i; 1902b245cb2SAnirudh Venkataramanan u64_stats_update_begin(&tx_ring->syncp); 1912b245cb2SAnirudh Venkataramanan tx_ring->stats.bytes += total_bytes; 1922b245cb2SAnirudh Venkataramanan tx_ring->stats.pkts += total_pkts; 1932b245cb2SAnirudh Venkataramanan u64_stats_update_end(&tx_ring->syncp); 1942b245cb2SAnirudh Venkataramanan tx_ring->q_vector->tx.total_bytes += total_bytes; 1952b245cb2SAnirudh Venkataramanan tx_ring->q_vector->tx.total_pkts += total_pkts; 1962b245cb2SAnirudh Venkataramanan 1972b245cb2SAnirudh Venkataramanan netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 1982b245cb2SAnirudh Venkataramanan total_bytes); 1992b245cb2SAnirudh Venkataramanan 2002b245cb2SAnirudh Venkataramanan #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 2012b245cb2SAnirudh Venkataramanan if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 2022b245cb2SAnirudh Venkataramanan (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 2032b245cb2SAnirudh Venkataramanan /* Make sure that anybody stopping the queue after this 2042b245cb2SAnirudh Venkataramanan * sees the new next_to_clean. 2052b245cb2SAnirudh Venkataramanan */ 2062b245cb2SAnirudh Venkataramanan smp_mb(); 2072b245cb2SAnirudh Venkataramanan if (__netif_subqueue_stopped(tx_ring->netdev, 2082b245cb2SAnirudh Venkataramanan tx_ring->q_index) && 2092b245cb2SAnirudh Venkataramanan !test_bit(__ICE_DOWN, vsi->state)) { 2102b245cb2SAnirudh Venkataramanan netif_wake_subqueue(tx_ring->netdev, 2112b245cb2SAnirudh Venkataramanan tx_ring->q_index); 2122b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 2132b245cb2SAnirudh Venkataramanan } 2142b245cb2SAnirudh Venkataramanan } 2152b245cb2SAnirudh Venkataramanan 2162b245cb2SAnirudh Venkataramanan return !!budget; 2172b245cb2SAnirudh Venkataramanan } 2182b245cb2SAnirudh Venkataramanan 2192b245cb2SAnirudh Venkataramanan /** 220cdedef59SAnirudh Venkataramanan * ice_setup_tx_ring - Allocate the Tx descriptors 221d337f2afSAnirudh Venkataramanan * @tx_ring: the Tx ring to set up 222cdedef59SAnirudh Venkataramanan * 223cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 224cdedef59SAnirudh Venkataramanan */ 225cdedef59SAnirudh Venkataramanan int ice_setup_tx_ring(struct ice_ring *tx_ring) 226cdedef59SAnirudh Venkataramanan { 227cdedef59SAnirudh Venkataramanan struct device *dev = tx_ring->dev; 228cdedef59SAnirudh Venkataramanan 229cdedef59SAnirudh Venkataramanan if (!dev) 230cdedef59SAnirudh Venkataramanan return -ENOMEM; 231cdedef59SAnirudh Venkataramanan 232cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 233cdedef59SAnirudh Venkataramanan WARN_ON(tx_ring->tx_buf); 234c6dfd690SBruce Allan tx_ring->tx_buf = 235c6dfd690SBruce Allan devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 236c6dfd690SBruce Allan GFP_KERNEL); 237cdedef59SAnirudh Venkataramanan if (!tx_ring->tx_buf) 238cdedef59SAnirudh Venkataramanan return -ENOMEM; 239cdedef59SAnirudh Venkataramanan 240ad71b256SBrett Creeley /* round up to nearest page */ 241c6dfd690SBruce Allan tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 242ad71b256SBrett Creeley PAGE_SIZE); 243cdedef59SAnirudh Venkataramanan tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 244cdedef59SAnirudh Venkataramanan GFP_KERNEL); 245cdedef59SAnirudh Venkataramanan if (!tx_ring->desc) { 246cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 247cdedef59SAnirudh Venkataramanan tx_ring->size); 248cdedef59SAnirudh Venkataramanan goto err; 249cdedef59SAnirudh Venkataramanan } 250cdedef59SAnirudh Venkataramanan 251cdedef59SAnirudh Venkataramanan tx_ring->next_to_use = 0; 252cdedef59SAnirudh Venkataramanan tx_ring->next_to_clean = 0; 253b3969fd7SSudheer Mogilappagari tx_ring->tx_stats.prev_pkt = -1; 254cdedef59SAnirudh Venkataramanan return 0; 255cdedef59SAnirudh Venkataramanan 256cdedef59SAnirudh Venkataramanan err: 257cdedef59SAnirudh Venkataramanan devm_kfree(dev, tx_ring->tx_buf); 258cdedef59SAnirudh Venkataramanan tx_ring->tx_buf = NULL; 259cdedef59SAnirudh Venkataramanan return -ENOMEM; 260cdedef59SAnirudh Venkataramanan } 261cdedef59SAnirudh Venkataramanan 262cdedef59SAnirudh Venkataramanan /** 263cdedef59SAnirudh Venkataramanan * ice_clean_rx_ring - Free Rx buffers 264cdedef59SAnirudh Venkataramanan * @rx_ring: ring to be cleaned 265cdedef59SAnirudh Venkataramanan */ 266cdedef59SAnirudh Venkataramanan void ice_clean_rx_ring(struct ice_ring *rx_ring) 267cdedef59SAnirudh Venkataramanan { 268cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 269cdedef59SAnirudh Venkataramanan u16 i; 270cdedef59SAnirudh Venkataramanan 271cdedef59SAnirudh Venkataramanan /* ring already cleared, nothing to do */ 272cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 273cdedef59SAnirudh Venkataramanan return; 274cdedef59SAnirudh Venkataramanan 275cdedef59SAnirudh Venkataramanan /* Free all the Rx ring sk_buffs */ 276cdedef59SAnirudh Venkataramanan for (i = 0; i < rx_ring->count; i++) { 277cdedef59SAnirudh Venkataramanan struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 278cdedef59SAnirudh Venkataramanan 279cdedef59SAnirudh Venkataramanan if (rx_buf->skb) { 280cdedef59SAnirudh Venkataramanan dev_kfree_skb(rx_buf->skb); 281cdedef59SAnirudh Venkataramanan rx_buf->skb = NULL; 282cdedef59SAnirudh Venkataramanan } 283cdedef59SAnirudh Venkataramanan if (!rx_buf->page) 284cdedef59SAnirudh Venkataramanan continue; 285cdedef59SAnirudh Venkataramanan 286a65f71feSMaciej Fijalkowski /* Invalidate cache lines that may have been written to by 287a65f71feSMaciej Fijalkowski * device so that we avoid corrupting memory. 288a65f71feSMaciej Fijalkowski */ 289a65f71feSMaciej Fijalkowski dma_sync_single_range_for_cpu(dev, rx_buf->dma, 290a65f71feSMaciej Fijalkowski rx_buf->page_offset, 291a65f71feSMaciej Fijalkowski ICE_RXBUF_2048, DMA_FROM_DEVICE); 292a65f71feSMaciej Fijalkowski 293a65f71feSMaciej Fijalkowski /* free resources associated with mapping */ 294a65f71feSMaciej Fijalkowski dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE, 295a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 29603c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 297cdedef59SAnirudh Venkataramanan 298cdedef59SAnirudh Venkataramanan rx_buf->page = NULL; 299cdedef59SAnirudh Venkataramanan rx_buf->page_offset = 0; 300cdedef59SAnirudh Venkataramanan } 301cdedef59SAnirudh Venkataramanan 302c6dfd690SBruce Allan memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 303cdedef59SAnirudh Venkataramanan 304cdedef59SAnirudh Venkataramanan /* Zero out the descriptor ring */ 305cdedef59SAnirudh Venkataramanan memset(rx_ring->desc, 0, rx_ring->size); 306cdedef59SAnirudh Venkataramanan 307cdedef59SAnirudh Venkataramanan rx_ring->next_to_alloc = 0; 308cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 309cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 310cdedef59SAnirudh Venkataramanan } 311cdedef59SAnirudh Venkataramanan 312cdedef59SAnirudh Venkataramanan /** 313cdedef59SAnirudh Venkataramanan * ice_free_rx_ring - Free Rx resources 314cdedef59SAnirudh Venkataramanan * @rx_ring: ring to clean the resources from 315cdedef59SAnirudh Venkataramanan * 316cdedef59SAnirudh Venkataramanan * Free all receive software resources 317cdedef59SAnirudh Venkataramanan */ 318cdedef59SAnirudh Venkataramanan void ice_free_rx_ring(struct ice_ring *rx_ring) 319cdedef59SAnirudh Venkataramanan { 320cdedef59SAnirudh Venkataramanan ice_clean_rx_ring(rx_ring); 321cdedef59SAnirudh Venkataramanan devm_kfree(rx_ring->dev, rx_ring->rx_buf); 322cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 323cdedef59SAnirudh Venkataramanan 324cdedef59SAnirudh Venkataramanan if (rx_ring->desc) { 325cdedef59SAnirudh Venkataramanan dmam_free_coherent(rx_ring->dev, rx_ring->size, 326cdedef59SAnirudh Venkataramanan rx_ring->desc, rx_ring->dma); 327cdedef59SAnirudh Venkataramanan rx_ring->desc = NULL; 328cdedef59SAnirudh Venkataramanan } 329cdedef59SAnirudh Venkataramanan } 330cdedef59SAnirudh Venkataramanan 331cdedef59SAnirudh Venkataramanan /** 332cdedef59SAnirudh Venkataramanan * ice_setup_rx_ring - Allocate the Rx descriptors 333d337f2afSAnirudh Venkataramanan * @rx_ring: the Rx ring to set up 334cdedef59SAnirudh Venkataramanan * 335cdedef59SAnirudh Venkataramanan * Return 0 on success, negative on error 336cdedef59SAnirudh Venkataramanan */ 337cdedef59SAnirudh Venkataramanan int ice_setup_rx_ring(struct ice_ring *rx_ring) 338cdedef59SAnirudh Venkataramanan { 339cdedef59SAnirudh Venkataramanan struct device *dev = rx_ring->dev; 340cdedef59SAnirudh Venkataramanan 341cdedef59SAnirudh Venkataramanan if (!dev) 342cdedef59SAnirudh Venkataramanan return -ENOMEM; 343cdedef59SAnirudh Venkataramanan 344cdedef59SAnirudh Venkataramanan /* warn if we are about to overwrite the pointer */ 345cdedef59SAnirudh Venkataramanan WARN_ON(rx_ring->rx_buf); 346c6dfd690SBruce Allan rx_ring->rx_buf = 347c6dfd690SBruce Allan devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 348c6dfd690SBruce Allan GFP_KERNEL); 349cdedef59SAnirudh Venkataramanan if (!rx_ring->rx_buf) 350cdedef59SAnirudh Venkataramanan return -ENOMEM; 351cdedef59SAnirudh Venkataramanan 352ad71b256SBrett Creeley /* round up to nearest page */ 353ad71b256SBrett Creeley rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 354ad71b256SBrett Creeley PAGE_SIZE); 355cdedef59SAnirudh Venkataramanan rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 356cdedef59SAnirudh Venkataramanan GFP_KERNEL); 357cdedef59SAnirudh Venkataramanan if (!rx_ring->desc) { 358cdedef59SAnirudh Venkataramanan dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 359cdedef59SAnirudh Venkataramanan rx_ring->size); 360cdedef59SAnirudh Venkataramanan goto err; 361cdedef59SAnirudh Venkataramanan } 362cdedef59SAnirudh Venkataramanan 363cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = 0; 364cdedef59SAnirudh Venkataramanan rx_ring->next_to_clean = 0; 365cdedef59SAnirudh Venkataramanan return 0; 366cdedef59SAnirudh Venkataramanan 367cdedef59SAnirudh Venkataramanan err: 368cdedef59SAnirudh Venkataramanan devm_kfree(dev, rx_ring->rx_buf); 369cdedef59SAnirudh Venkataramanan rx_ring->rx_buf = NULL; 370cdedef59SAnirudh Venkataramanan return -ENOMEM; 371cdedef59SAnirudh Venkataramanan } 372cdedef59SAnirudh Venkataramanan 373cdedef59SAnirudh Venkataramanan /** 374cdedef59SAnirudh Venkataramanan * ice_release_rx_desc - Store the new tail and head values 375cdedef59SAnirudh Venkataramanan * @rx_ring: ring to bump 376cdedef59SAnirudh Venkataramanan * @val: new head index 377cdedef59SAnirudh Venkataramanan */ 378cdedef59SAnirudh Venkataramanan static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) 379cdedef59SAnirudh Venkataramanan { 380cb7db356SBrett Creeley u16 prev_ntu = rx_ring->next_to_use; 381cb7db356SBrett Creeley 382cdedef59SAnirudh Venkataramanan rx_ring->next_to_use = val; 383cdedef59SAnirudh Venkataramanan 384cdedef59SAnirudh Venkataramanan /* update next to alloc since we have filled the ring */ 385cdedef59SAnirudh Venkataramanan rx_ring->next_to_alloc = val; 386cdedef59SAnirudh Venkataramanan 387cb7db356SBrett Creeley /* QRX_TAIL will be updated with any tail value, but hardware ignores 388cb7db356SBrett Creeley * the lower 3 bits. This makes it so we only bump tail on meaningful 389cb7db356SBrett Creeley * boundaries. Also, this allows us to bump tail on intervals of 8 up to 390cb7db356SBrett Creeley * the budget depending on the current traffic load. 391cb7db356SBrett Creeley */ 392cb7db356SBrett Creeley val &= ~0x7; 393cb7db356SBrett Creeley if (prev_ntu != val) { 394cdedef59SAnirudh Venkataramanan /* Force memory writes to complete before letting h/w 395cdedef59SAnirudh Venkataramanan * know there are new descriptors to fetch. (Only 396cdedef59SAnirudh Venkataramanan * applicable for weak-ordered memory model archs, 397cdedef59SAnirudh Venkataramanan * such as IA-64). 398cdedef59SAnirudh Venkataramanan */ 399cdedef59SAnirudh Venkataramanan wmb(); 400cdedef59SAnirudh Venkataramanan writel(val, rx_ring->tail); 401cdedef59SAnirudh Venkataramanan } 402cb7db356SBrett Creeley } 403cdedef59SAnirudh Venkataramanan 404cdedef59SAnirudh Venkataramanan /** 405cdedef59SAnirudh Venkataramanan * ice_alloc_mapped_page - recycle or make a new page 406cdedef59SAnirudh Venkataramanan * @rx_ring: ring to use 407cdedef59SAnirudh Venkataramanan * @bi: rx_buf struct to modify 408cdedef59SAnirudh Venkataramanan * 409cdedef59SAnirudh Venkataramanan * Returns true if the page was successfully allocated or 410cdedef59SAnirudh Venkataramanan * reused. 411cdedef59SAnirudh Venkataramanan */ 412c8b7abddSBruce Allan static bool 413c8b7abddSBruce Allan ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) 414cdedef59SAnirudh Venkataramanan { 415cdedef59SAnirudh Venkataramanan struct page *page = bi->page; 416cdedef59SAnirudh Venkataramanan dma_addr_t dma; 417cdedef59SAnirudh Venkataramanan 418cdedef59SAnirudh Venkataramanan /* since we are recycling buffers we should seldom need to alloc */ 4192b245cb2SAnirudh Venkataramanan if (likely(page)) { 4202b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.page_reuse_count++; 421cdedef59SAnirudh Venkataramanan return true; 4222b245cb2SAnirudh Venkataramanan } 423cdedef59SAnirudh Venkataramanan 424cdedef59SAnirudh Venkataramanan /* alloc new page for storage */ 425cdedef59SAnirudh Venkataramanan page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 4262b245cb2SAnirudh Venkataramanan if (unlikely(!page)) { 4272b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 428cdedef59SAnirudh Venkataramanan return false; 4292b245cb2SAnirudh Venkataramanan } 430cdedef59SAnirudh Venkataramanan 431cdedef59SAnirudh Venkataramanan /* map page for use */ 432a65f71feSMaciej Fijalkowski dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, 433a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 434cdedef59SAnirudh Venkataramanan 435cdedef59SAnirudh Venkataramanan /* if mapping failed free memory back to system since 436cdedef59SAnirudh Venkataramanan * there isn't much point in holding memory we can't use 437cdedef59SAnirudh Venkataramanan */ 438cdedef59SAnirudh Venkataramanan if (dma_mapping_error(rx_ring->dev, dma)) { 439cdedef59SAnirudh Venkataramanan __free_pages(page, 0); 4402b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.alloc_page_failed++; 441cdedef59SAnirudh Venkataramanan return false; 442cdedef59SAnirudh Venkataramanan } 443cdedef59SAnirudh Venkataramanan 444cdedef59SAnirudh Venkataramanan bi->dma = dma; 445cdedef59SAnirudh Venkataramanan bi->page = page; 446cdedef59SAnirudh Venkataramanan bi->page_offset = 0; 44703c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 44803c66a13SMaciej Fijalkowski bi->pagecnt_bias = USHRT_MAX; 449cdedef59SAnirudh Venkataramanan 450cdedef59SAnirudh Venkataramanan return true; 451cdedef59SAnirudh Venkataramanan } 452cdedef59SAnirudh Venkataramanan 453cdedef59SAnirudh Venkataramanan /** 454cdedef59SAnirudh Venkataramanan * ice_alloc_rx_bufs - Replace used receive buffers 455cdedef59SAnirudh Venkataramanan * @rx_ring: ring to place buffers on 456cdedef59SAnirudh Venkataramanan * @cleaned_count: number of buffers to replace 457cdedef59SAnirudh Venkataramanan * 458cb7db356SBrett Creeley * Returns false if all allocations were successful, true if any fail. Returning 459cb7db356SBrett Creeley * true signals to the caller that we didn't replace cleaned_count buffers and 460cb7db356SBrett Creeley * there is more work to do. 461cb7db356SBrett Creeley * 462cb7db356SBrett Creeley * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 463cb7db356SBrett Creeley * buffers. Then bump tail at most one time. Grouping like this lets us avoid 464cb7db356SBrett Creeley * multiple tail writes per call. 465cdedef59SAnirudh Venkataramanan */ 466cdedef59SAnirudh Venkataramanan bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) 467cdedef59SAnirudh Venkataramanan { 468cdedef59SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 469cdedef59SAnirudh Venkataramanan u16 ntu = rx_ring->next_to_use; 470cdedef59SAnirudh Venkataramanan struct ice_rx_buf *bi; 471cdedef59SAnirudh Venkataramanan 472cdedef59SAnirudh Venkataramanan /* do nothing if no valid netdev defined */ 473cdedef59SAnirudh Venkataramanan if (!rx_ring->netdev || !cleaned_count) 474cdedef59SAnirudh Venkataramanan return false; 475cdedef59SAnirudh Venkataramanan 476f9867df6SAnirudh Venkataramanan /* get the Rx descriptor and buffer based on next_to_use */ 477cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, ntu); 478cdedef59SAnirudh Venkataramanan bi = &rx_ring->rx_buf[ntu]; 479cdedef59SAnirudh Venkataramanan 480cdedef59SAnirudh Venkataramanan do { 481a1e99685SBrett Creeley /* if we fail here, we have work remaining */ 482cdedef59SAnirudh Venkataramanan if (!ice_alloc_mapped_page(rx_ring, bi)) 483a1e99685SBrett Creeley break; 484cdedef59SAnirudh Venkataramanan 485a65f71feSMaciej Fijalkowski /* sync the buffer for use by the device */ 486a65f71feSMaciej Fijalkowski dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 487a65f71feSMaciej Fijalkowski bi->page_offset, 488a65f71feSMaciej Fijalkowski ICE_RXBUF_2048, 489a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE); 490a65f71feSMaciej Fijalkowski 491cdedef59SAnirudh Venkataramanan /* Refresh the desc even if buffer_addrs didn't change 492cdedef59SAnirudh Venkataramanan * because each write-back erases this info. 493cdedef59SAnirudh Venkataramanan */ 494cdedef59SAnirudh Venkataramanan rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 495cdedef59SAnirudh Venkataramanan 496cdedef59SAnirudh Venkataramanan rx_desc++; 497cdedef59SAnirudh Venkataramanan bi++; 498cdedef59SAnirudh Venkataramanan ntu++; 499cdedef59SAnirudh Venkataramanan if (unlikely(ntu == rx_ring->count)) { 500cdedef59SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, 0); 501cdedef59SAnirudh Venkataramanan bi = rx_ring->rx_buf; 502cdedef59SAnirudh Venkataramanan ntu = 0; 503cdedef59SAnirudh Venkataramanan } 504cdedef59SAnirudh Venkataramanan 505cdedef59SAnirudh Venkataramanan /* clear the status bits for the next_to_use descriptor */ 506cdedef59SAnirudh Venkataramanan rx_desc->wb.status_error0 = 0; 507cdedef59SAnirudh Venkataramanan 508cdedef59SAnirudh Venkataramanan cleaned_count--; 509cdedef59SAnirudh Venkataramanan } while (cleaned_count); 510cdedef59SAnirudh Venkataramanan 511cdedef59SAnirudh Venkataramanan if (rx_ring->next_to_use != ntu) 512cdedef59SAnirudh Venkataramanan ice_release_rx_desc(rx_ring, ntu); 513cdedef59SAnirudh Venkataramanan 514a1e99685SBrett Creeley return !!cleaned_count; 515cdedef59SAnirudh Venkataramanan } 5162b245cb2SAnirudh Venkataramanan 5172b245cb2SAnirudh Venkataramanan /** 5182b245cb2SAnirudh Venkataramanan * ice_page_is_reserved - check if reuse is possible 5192b245cb2SAnirudh Venkataramanan * @page: page struct to check 5202b245cb2SAnirudh Venkataramanan */ 5212b245cb2SAnirudh Venkataramanan static bool ice_page_is_reserved(struct page *page) 5222b245cb2SAnirudh Venkataramanan { 5232b245cb2SAnirudh Venkataramanan return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 5242b245cb2SAnirudh Venkataramanan } 5252b245cb2SAnirudh Venkataramanan 5262b245cb2SAnirudh Venkataramanan /** 5271d032bc7SMaciej Fijalkowski * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 5281d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to adjust 5291d032bc7SMaciej Fijalkowski * @size: Size of adjustment 5302b245cb2SAnirudh Venkataramanan * 5311d032bc7SMaciej Fijalkowski * Update the offset within page so that Rx buf will be ready to be reused. 5321d032bc7SMaciej Fijalkowski * For systems with PAGE_SIZE < 8192 this function will flip the page offset 5331d032bc7SMaciej Fijalkowski * so the second half of page assigned to Rx buffer will be used, otherwise 5341d032bc7SMaciej Fijalkowski * the offset is moved by the @size bytes 5352b245cb2SAnirudh Venkataramanan */ 5361d032bc7SMaciej Fijalkowski static void 5371d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 5382b245cb2SAnirudh Venkataramanan { 5392b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 5401d032bc7SMaciej Fijalkowski /* flip page offset to other buffer */ 5411d032bc7SMaciej Fijalkowski rx_buf->page_offset ^= size; 5422b245cb2SAnirudh Venkataramanan #else 5431d032bc7SMaciej Fijalkowski /* move offset up to the next cache line */ 5441d032bc7SMaciej Fijalkowski rx_buf->page_offset += size; 5451d032bc7SMaciej Fijalkowski #endif 5462b245cb2SAnirudh Venkataramanan } 5472b245cb2SAnirudh Venkataramanan 5481d032bc7SMaciej Fijalkowski /** 549bbb97808SMaciej Fijalkowski * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 550bbb97808SMaciej Fijalkowski * @rx_buf: buffer containing the page 551bbb97808SMaciej Fijalkowski * 552bbb97808SMaciej Fijalkowski * If page is reusable, we have a green light for calling ice_reuse_rx_page, 553bbb97808SMaciej Fijalkowski * which will assign the current buffer to the buffer that next_to_alloc is 554bbb97808SMaciej Fijalkowski * pointing to; otherwise, the DMA mapping needs to be destroyed and 555bbb97808SMaciej Fijalkowski * page freed 556bbb97808SMaciej Fijalkowski */ 5571d032bc7SMaciej Fijalkowski static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) 558bbb97808SMaciej Fijalkowski { 5591d032bc7SMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 5601d032bc7SMaciej Fijalkowski unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; 5611d032bc7SMaciej Fijalkowski #endif 56203c66a13SMaciej Fijalkowski unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 563bbb97808SMaciej Fijalkowski struct page *page = rx_buf->page; 5642b245cb2SAnirudh Venkataramanan 5652b245cb2SAnirudh Venkataramanan /* avoid re-using remote pages */ 5662b245cb2SAnirudh Venkataramanan if (unlikely(ice_page_is_reserved(page))) 5672b245cb2SAnirudh Venkataramanan return false; 5682b245cb2SAnirudh Venkataramanan 5692b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192) 5702b245cb2SAnirudh Venkataramanan /* if we are only owner of page we can reuse it */ 57103c66a13SMaciej Fijalkowski if (unlikely((page_count(page) - pagecnt_bias) > 1)) 5722b245cb2SAnirudh Venkataramanan return false; 5732b245cb2SAnirudh Venkataramanan #else 5742b245cb2SAnirudh Venkataramanan if (rx_buf->page_offset > last_offset) 5752b245cb2SAnirudh Venkataramanan return false; 5762b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE < 8192) */ 5772b245cb2SAnirudh Venkataramanan 57803c66a13SMaciej Fijalkowski /* If we have drained the page fragment pool we need to update 57903c66a13SMaciej Fijalkowski * the pagecnt_bias and page count so that we fully restock the 58003c66a13SMaciej Fijalkowski * number of references the driver holds. 5812b245cb2SAnirudh Venkataramanan */ 58203c66a13SMaciej Fijalkowski if (unlikely(pagecnt_bias == 1)) { 58303c66a13SMaciej Fijalkowski page_ref_add(page, USHRT_MAX - 1); 58403c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias = USHRT_MAX; 58503c66a13SMaciej Fijalkowski } 5862b245cb2SAnirudh Venkataramanan 5872b245cb2SAnirudh Venkataramanan return true; 5882b245cb2SAnirudh Venkataramanan } 5892b245cb2SAnirudh Venkataramanan 5902b245cb2SAnirudh Venkataramanan /** 591712edbbbSMaciej Fijalkowski * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 5922b245cb2SAnirudh Venkataramanan * @rx_buf: buffer containing page to add 593712edbbbSMaciej Fijalkowski * @skb: sk_buff to place the data into 594712edbbbSMaciej Fijalkowski * @size: packet length from rx_desc 5952b245cb2SAnirudh Venkataramanan * 5962b245cb2SAnirudh Venkataramanan * This function will add the data contained in rx_buf->page to the skb. 597712edbbbSMaciej Fijalkowski * It will just attach the page as a frag to the skb. 598712edbbbSMaciej Fijalkowski * The function will then update the page offset. 5992b245cb2SAnirudh Venkataramanan */ 6001d032bc7SMaciej Fijalkowski static void 6016c869cb7SMaciej Fijalkowski ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, 6026c869cb7SMaciej Fijalkowski unsigned int size) 6032b245cb2SAnirudh Venkataramanan { 604712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 605712edbbbSMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size); 6062b245cb2SAnirudh Venkataramanan #else 607712edbbbSMaciej Fijalkowski unsigned int truesize = ICE_RXBUF_2048; 608712edbbbSMaciej Fijalkowski #endif 6091857ca42SMaciej Fijalkowski 610712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 611712edbbbSMaciej Fijalkowski rx_buf->page_offset, size, truesize); 6122b245cb2SAnirudh Venkataramanan 613712edbbbSMaciej Fijalkowski /* page is being used so we must update the page offset */ 6141d032bc7SMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 6152b245cb2SAnirudh Venkataramanan } 6162b245cb2SAnirudh Venkataramanan 6172b245cb2SAnirudh Venkataramanan /** 6182b245cb2SAnirudh Venkataramanan * ice_reuse_rx_page - page flip buffer and store it back on the ring 619d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to store buffers on 6202b245cb2SAnirudh Venkataramanan * @old_buf: donor buffer to have page reused 6212b245cb2SAnirudh Venkataramanan * 6222b245cb2SAnirudh Venkataramanan * Synchronizes page for reuse by the adapter 6232b245cb2SAnirudh Venkataramanan */ 624c8b7abddSBruce Allan static void 625c8b7abddSBruce Allan ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) 6262b245cb2SAnirudh Venkataramanan { 6272b245cb2SAnirudh Venkataramanan u16 nta = rx_ring->next_to_alloc; 6282b245cb2SAnirudh Venkataramanan struct ice_rx_buf *new_buf; 6292b245cb2SAnirudh Venkataramanan 6302b245cb2SAnirudh Venkataramanan new_buf = &rx_ring->rx_buf[nta]; 6312b245cb2SAnirudh Venkataramanan 6322b245cb2SAnirudh Venkataramanan /* update, and store next to alloc */ 6332b245cb2SAnirudh Venkataramanan nta++; 6342b245cb2SAnirudh Venkataramanan rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 6352b245cb2SAnirudh Venkataramanan 636712edbbbSMaciej Fijalkowski /* Transfer page from old buffer to new buffer. 637712edbbbSMaciej Fijalkowski * Move each member individually to avoid possible store 638712edbbbSMaciej Fijalkowski * forwarding stalls and unnecessary copy of skb. 639712edbbbSMaciej Fijalkowski */ 640712edbbbSMaciej Fijalkowski new_buf->dma = old_buf->dma; 641712edbbbSMaciej Fijalkowski new_buf->page = old_buf->page; 642712edbbbSMaciej Fijalkowski new_buf->page_offset = old_buf->page_offset; 643712edbbbSMaciej Fijalkowski new_buf->pagecnt_bias = old_buf->pagecnt_bias; 6442b245cb2SAnirudh Venkataramanan } 6452b245cb2SAnirudh Venkataramanan 6462b245cb2SAnirudh Venkataramanan /** 6476c869cb7SMaciej Fijalkowski * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 648d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 649712edbbbSMaciej Fijalkowski * @skb: skb to be used 6506c869cb7SMaciej Fijalkowski * @size: size of buffer to add to skb 6512b245cb2SAnirudh Venkataramanan * 6526c869cb7SMaciej Fijalkowski * This function will pull an Rx buffer from the ring and synchronize it 6536c869cb7SMaciej Fijalkowski * for use by the CPU. 6542b245cb2SAnirudh Venkataramanan */ 6556c869cb7SMaciej Fijalkowski static struct ice_rx_buf * 656712edbbbSMaciej Fijalkowski ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, 657712edbbbSMaciej Fijalkowski const unsigned int size) 6582b245cb2SAnirudh Venkataramanan { 6592b245cb2SAnirudh Venkataramanan struct ice_rx_buf *rx_buf; 6602b245cb2SAnirudh Venkataramanan 6612b245cb2SAnirudh Venkataramanan rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 6626c869cb7SMaciej Fijalkowski prefetchw(rx_buf->page); 663712edbbbSMaciej Fijalkowski *skb = rx_buf->skb; 6642b245cb2SAnirudh Venkataramanan 6656c869cb7SMaciej Fijalkowski /* we are reusing so sync this buffer for CPU use */ 6666c869cb7SMaciej Fijalkowski dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 6676c869cb7SMaciej Fijalkowski rx_buf->page_offset, size, 6686c869cb7SMaciej Fijalkowski DMA_FROM_DEVICE); 6692b245cb2SAnirudh Venkataramanan 67003c66a13SMaciej Fijalkowski /* We have pulled a buffer for use, so decrement pagecnt_bias */ 67103c66a13SMaciej Fijalkowski rx_buf->pagecnt_bias--; 67203c66a13SMaciej Fijalkowski 6736c869cb7SMaciej Fijalkowski return rx_buf; 6746c869cb7SMaciej Fijalkowski } 6756c869cb7SMaciej Fijalkowski 6766c869cb7SMaciej Fijalkowski /** 677712edbbbSMaciej Fijalkowski * ice_construct_skb - Allocate skb and populate it 6782b245cb2SAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 6796c869cb7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 6806c869cb7SMaciej Fijalkowski * @size: the length of the packet 6812b245cb2SAnirudh Venkataramanan * 682712edbbbSMaciej Fijalkowski * This function allocates an skb. It then populates it with the page 683712edbbbSMaciej Fijalkowski * data from the current receive descriptor, taking care to set up the 684712edbbbSMaciej Fijalkowski * skb correctly. 6852b245cb2SAnirudh Venkataramanan */ 686c8b7abddSBruce Allan static struct sk_buff * 687712edbbbSMaciej Fijalkowski ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 6886c869cb7SMaciej Fijalkowski unsigned int size) 6892b245cb2SAnirudh Venkataramanan { 690712edbbbSMaciej Fijalkowski void *va = page_address(rx_buf->page) + rx_buf->page_offset; 691712edbbbSMaciej Fijalkowski unsigned int headlen; 692712edbbbSMaciej Fijalkowski struct sk_buff *skb; 6932b245cb2SAnirudh Venkataramanan 6942b245cb2SAnirudh Venkataramanan /* prefetch first cache line of first page */ 695712edbbbSMaciej Fijalkowski prefetch(va); 6962b245cb2SAnirudh Venkataramanan #if L1_CACHE_BYTES < 128 697712edbbbSMaciej Fijalkowski prefetch((u8 *)va + L1_CACHE_BYTES); 6982b245cb2SAnirudh Venkataramanan #endif /* L1_CACHE_BYTES */ 6992b245cb2SAnirudh Venkataramanan 7002b245cb2SAnirudh Venkataramanan /* allocate a skb to store the frags */ 701712edbbbSMaciej Fijalkowski skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 7022b245cb2SAnirudh Venkataramanan GFP_ATOMIC | __GFP_NOWARN); 703712edbbbSMaciej Fijalkowski if (unlikely(!skb)) 7042b245cb2SAnirudh Venkataramanan return NULL; 7052b245cb2SAnirudh Venkataramanan 7062b245cb2SAnirudh Venkataramanan skb_record_rx_queue(skb, rx_ring->q_index); 707712edbbbSMaciej Fijalkowski /* Determine available headroom for copy */ 708712edbbbSMaciej Fijalkowski headlen = size; 709712edbbbSMaciej Fijalkowski if (headlen > ICE_RX_HDR_SIZE) 710c43f1255SStanislav Fomichev headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE); 7112b245cb2SAnirudh Venkataramanan 712712edbbbSMaciej Fijalkowski /* align pull length to size of long to optimize memcpy performance */ 713712edbbbSMaciej Fijalkowski memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 714712edbbbSMaciej Fijalkowski 715712edbbbSMaciej Fijalkowski /* if we exhaust the linear part then add what is left as a frag */ 716712edbbbSMaciej Fijalkowski size -= headlen; 717712edbbbSMaciej Fijalkowski if (size) { 718712edbbbSMaciej Fijalkowski #if (PAGE_SIZE >= 8192) 719712edbbbSMaciej Fijalkowski unsigned int truesize = SKB_DATA_ALIGN(size); 720712edbbbSMaciej Fijalkowski #else 721712edbbbSMaciej Fijalkowski unsigned int truesize = ICE_RXBUF_2048; 722712edbbbSMaciej Fijalkowski #endif 723712edbbbSMaciej Fijalkowski skb_add_rx_frag(skb, 0, rx_buf->page, 724712edbbbSMaciej Fijalkowski rx_buf->page_offset + headlen, size, truesize); 725712edbbbSMaciej Fijalkowski /* buffer is used by skb, update page_offset */ 726712edbbbSMaciej Fijalkowski ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 7272b245cb2SAnirudh Venkataramanan } else { 728712edbbbSMaciej Fijalkowski /* buffer is unused, reset bias back to rx_buf; data was copied 729712edbbbSMaciej Fijalkowski * onto skb's linear part so there's no need for adjusting 730712edbbbSMaciej Fijalkowski * page offset and we can reuse this buffer as-is 731712edbbbSMaciej Fijalkowski */ 732712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 7332b245cb2SAnirudh Venkataramanan } 7342b245cb2SAnirudh Venkataramanan 7352b245cb2SAnirudh Venkataramanan return skb; 7362b245cb2SAnirudh Venkataramanan } 7372b245cb2SAnirudh Venkataramanan 7382b245cb2SAnirudh Venkataramanan /** 7391d032bc7SMaciej Fijalkowski * ice_put_rx_buf - Clean up used buffer and either recycle or free 7401d032bc7SMaciej Fijalkowski * @rx_ring: Rx descriptor ring to transact packets on 7411d032bc7SMaciej Fijalkowski * @rx_buf: Rx buffer to pull data from 7422b245cb2SAnirudh Venkataramanan * 7431d032bc7SMaciej Fijalkowski * This function will clean up the contents of the rx_buf. It will 7441d032bc7SMaciej Fijalkowski * either recycle the buffer or unmap it and free the associated resources. 7452b245cb2SAnirudh Venkataramanan */ 7461d032bc7SMaciej Fijalkowski static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) 7472b245cb2SAnirudh Venkataramanan { 7482b245cb2SAnirudh Venkataramanan /* hand second half of page back to the ring */ 7491d032bc7SMaciej Fijalkowski if (ice_can_reuse_rx_page(rx_buf)) { 7502b245cb2SAnirudh Venkataramanan ice_reuse_rx_page(rx_ring, rx_buf); 7512b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.page_reuse_count++; 7522b245cb2SAnirudh Venkataramanan } else { 7532b245cb2SAnirudh Venkataramanan /* we are not reusing the buffer so unmap it */ 754a65f71feSMaciej Fijalkowski dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE, 755a65f71feSMaciej Fijalkowski DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 75603c66a13SMaciej Fijalkowski __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 7572b245cb2SAnirudh Venkataramanan } 7582b245cb2SAnirudh Venkataramanan 7592b245cb2SAnirudh Venkataramanan /* clear contents of buffer_info */ 7602b245cb2SAnirudh Venkataramanan rx_buf->page = NULL; 761712edbbbSMaciej Fijalkowski rx_buf->skb = NULL; 7622b245cb2SAnirudh Venkataramanan } 7632b245cb2SAnirudh Venkataramanan 7642b245cb2SAnirudh Venkataramanan /** 7652b245cb2SAnirudh Venkataramanan * ice_cleanup_headers - Correct empty headers 7662b245cb2SAnirudh Venkataramanan * @skb: pointer to current skb being fixed 7672b245cb2SAnirudh Venkataramanan * 7682b245cb2SAnirudh Venkataramanan * Also address the case where we are pulling data in on pages only 7692b245cb2SAnirudh Venkataramanan * and as such no data is present in the skb header. 7702b245cb2SAnirudh Venkataramanan * 7712b245cb2SAnirudh Venkataramanan * In addition if skb is not at least 60 bytes we need to pad it so that 7722b245cb2SAnirudh Venkataramanan * it is large enough to qualify as a valid Ethernet frame. 7732b245cb2SAnirudh Venkataramanan * 7742b245cb2SAnirudh Venkataramanan * Returns true if an error was encountered and skb was freed. 7752b245cb2SAnirudh Venkataramanan */ 7762b245cb2SAnirudh Venkataramanan static bool ice_cleanup_headers(struct sk_buff *skb) 7772b245cb2SAnirudh Venkataramanan { 7782b245cb2SAnirudh Venkataramanan /* if eth_skb_pad returns an error the skb was freed */ 7792b245cb2SAnirudh Venkataramanan if (eth_skb_pad(skb)) 7802b245cb2SAnirudh Venkataramanan return true; 7812b245cb2SAnirudh Venkataramanan 7822b245cb2SAnirudh Venkataramanan return false; 7832b245cb2SAnirudh Venkataramanan } 7842b245cb2SAnirudh Venkataramanan 7852b245cb2SAnirudh Venkataramanan /** 7862b245cb2SAnirudh Venkataramanan * ice_test_staterr - tests bits in Rx descriptor status and error fields 7872b245cb2SAnirudh Venkataramanan * @rx_desc: pointer to receive descriptor (in le64 format) 7882b245cb2SAnirudh Venkataramanan * @stat_err_bits: value to mask 7892b245cb2SAnirudh Venkataramanan * 7902b245cb2SAnirudh Venkataramanan * This function does some fast chicanery in order to return the 7912b245cb2SAnirudh Venkataramanan * value of the mask which is really only used for boolean tests. 7922b245cb2SAnirudh Venkataramanan * The status_error_len doesn't need to be shifted because it begins 7932b245cb2SAnirudh Venkataramanan * at offset zero. 7942b245cb2SAnirudh Venkataramanan */ 795c8b7abddSBruce Allan static bool 796c8b7abddSBruce Allan ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits) 7972b245cb2SAnirudh Venkataramanan { 7982b245cb2SAnirudh Venkataramanan return !!(rx_desc->wb.status_error0 & 7992b245cb2SAnirudh Venkataramanan cpu_to_le16(stat_err_bits)); 8002b245cb2SAnirudh Venkataramanan } 8012b245cb2SAnirudh Venkataramanan 8022b245cb2SAnirudh Venkataramanan /** 8032b245cb2SAnirudh Venkataramanan * ice_is_non_eop - process handling of non-EOP buffers 8042b245cb2SAnirudh Venkataramanan * @rx_ring: Rx ring being processed 8052b245cb2SAnirudh Venkataramanan * @rx_desc: Rx descriptor for current buffer 8062b245cb2SAnirudh Venkataramanan * @skb: Current socket buffer containing buffer in progress 8072b245cb2SAnirudh Venkataramanan * 8082b245cb2SAnirudh Venkataramanan * This function updates next to clean. If the buffer is an EOP buffer 8092b245cb2SAnirudh Venkataramanan * this function exits returning false, otherwise it will place the 8102b245cb2SAnirudh Venkataramanan * sk_buff in the next buffer to be chained and return true indicating 8112b245cb2SAnirudh Venkataramanan * that this is in fact a non-EOP buffer. 8122b245cb2SAnirudh Venkataramanan */ 813c8b7abddSBruce Allan static bool 814c8b7abddSBruce Allan ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 8152b245cb2SAnirudh Venkataramanan struct sk_buff *skb) 8162b245cb2SAnirudh Venkataramanan { 8172b245cb2SAnirudh Venkataramanan u32 ntc = rx_ring->next_to_clean + 1; 8182b245cb2SAnirudh Venkataramanan 8192b245cb2SAnirudh Venkataramanan /* fetch, update, and store next to clean */ 8202b245cb2SAnirudh Venkataramanan ntc = (ntc < rx_ring->count) ? ntc : 0; 8212b245cb2SAnirudh Venkataramanan rx_ring->next_to_clean = ntc; 8222b245cb2SAnirudh Venkataramanan 8232b245cb2SAnirudh Venkataramanan prefetch(ICE_RX_DESC(rx_ring, ntc)); 8242b245cb2SAnirudh Venkataramanan 8252b245cb2SAnirudh Venkataramanan /* if we are the last buffer then there is nothing else to do */ 8262b245cb2SAnirudh Venkataramanan #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 8272b245cb2SAnirudh Venkataramanan if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 8282b245cb2SAnirudh Venkataramanan return false; 8292b245cb2SAnirudh Venkataramanan 8302b245cb2SAnirudh Venkataramanan /* place skb in next buffer to be received */ 8312b245cb2SAnirudh Venkataramanan rx_ring->rx_buf[ntc].skb = skb; 8322b245cb2SAnirudh Venkataramanan rx_ring->rx_stats.non_eop_descs++; 8332b245cb2SAnirudh Venkataramanan 8342b245cb2SAnirudh Venkataramanan return true; 8352b245cb2SAnirudh Venkataramanan } 8362b245cb2SAnirudh Venkataramanan 8372b245cb2SAnirudh Venkataramanan /** 838d76a60baSAnirudh Venkataramanan * ice_ptype_to_htype - get a hash type 839d76a60baSAnirudh Venkataramanan * @ptype: the ptype value from the descriptor 840d76a60baSAnirudh Venkataramanan * 841d76a60baSAnirudh Venkataramanan * Returns a hash type to be used by skb_set_hash 842d76a60baSAnirudh Venkataramanan */ 843d76a60baSAnirudh Venkataramanan static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype) 844d76a60baSAnirudh Venkataramanan { 845d76a60baSAnirudh Venkataramanan return PKT_HASH_TYPE_NONE; 846d76a60baSAnirudh Venkataramanan } 847d76a60baSAnirudh Venkataramanan 848d76a60baSAnirudh Venkataramanan /** 849d76a60baSAnirudh Venkataramanan * ice_rx_hash - set the hash value in the skb 850d76a60baSAnirudh Venkataramanan * @rx_ring: descriptor ring 851d76a60baSAnirudh Venkataramanan * @rx_desc: specific descriptor 852d76a60baSAnirudh Venkataramanan * @skb: pointer to current skb 853d76a60baSAnirudh Venkataramanan * @rx_ptype: the ptype value from the descriptor 854d76a60baSAnirudh Venkataramanan */ 855d76a60baSAnirudh Venkataramanan static void 856d76a60baSAnirudh Venkataramanan ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 857d76a60baSAnirudh Venkataramanan struct sk_buff *skb, u8 rx_ptype) 858d76a60baSAnirudh Venkataramanan { 859d76a60baSAnirudh Venkataramanan struct ice_32b_rx_flex_desc_nic *nic_mdid; 860d76a60baSAnirudh Venkataramanan u32 hash; 861d76a60baSAnirudh Venkataramanan 862d76a60baSAnirudh Venkataramanan if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) 863d76a60baSAnirudh Venkataramanan return; 864d76a60baSAnirudh Venkataramanan 865d76a60baSAnirudh Venkataramanan if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) 866d76a60baSAnirudh Venkataramanan return; 867d76a60baSAnirudh Venkataramanan 868d76a60baSAnirudh Venkataramanan nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; 869d76a60baSAnirudh Venkataramanan hash = le32_to_cpu(nic_mdid->rss_hash); 870d76a60baSAnirudh Venkataramanan skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); 871d76a60baSAnirudh Venkataramanan } 872d76a60baSAnirudh Venkataramanan 873d76a60baSAnirudh Venkataramanan /** 874d76a60baSAnirudh Venkataramanan * ice_rx_csum - Indicate in skb if checksum is good 875d76a60baSAnirudh Venkataramanan * @vsi: the VSI we care about 876d76a60baSAnirudh Venkataramanan * @skb: skb currently being received and modified 877d76a60baSAnirudh Venkataramanan * @rx_desc: the receive descriptor 878d76a60baSAnirudh Venkataramanan * @ptype: the packet type decoded by hardware 879d76a60baSAnirudh Venkataramanan * 880d76a60baSAnirudh Venkataramanan * skb->protocol must be set before this function is called 881d76a60baSAnirudh Venkataramanan */ 882c8b7abddSBruce Allan static void 883c8b7abddSBruce Allan ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, 884d76a60baSAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc, u8 ptype) 885d76a60baSAnirudh Venkataramanan { 886d76a60baSAnirudh Venkataramanan struct ice_rx_ptype_decoded decoded; 887d76a60baSAnirudh Venkataramanan u32 rx_error, rx_status; 888d76a60baSAnirudh Venkataramanan bool ipv4, ipv6; 889d76a60baSAnirudh Venkataramanan 890d76a60baSAnirudh Venkataramanan rx_status = le16_to_cpu(rx_desc->wb.status_error0); 891d76a60baSAnirudh Venkataramanan rx_error = rx_status; 892d76a60baSAnirudh Venkataramanan 893d76a60baSAnirudh Venkataramanan decoded = ice_decode_rx_desc_ptype(ptype); 894d76a60baSAnirudh Venkataramanan 895d76a60baSAnirudh Venkataramanan /* Start with CHECKSUM_NONE and by default csum_level = 0 */ 896d76a60baSAnirudh Venkataramanan skb->ip_summed = CHECKSUM_NONE; 897d76a60baSAnirudh Venkataramanan skb_checksum_none_assert(skb); 898d76a60baSAnirudh Venkataramanan 899d76a60baSAnirudh Venkataramanan /* check if Rx checksum is enabled */ 900d76a60baSAnirudh Venkataramanan if (!(vsi->netdev->features & NETIF_F_RXCSUM)) 901d76a60baSAnirudh Venkataramanan return; 902d76a60baSAnirudh Venkataramanan 903d76a60baSAnirudh Venkataramanan /* check if HW has decoded the packet and checksum */ 904d76a60baSAnirudh Venkataramanan if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) 905d76a60baSAnirudh Venkataramanan return; 906d76a60baSAnirudh Venkataramanan 907d76a60baSAnirudh Venkataramanan if (!(decoded.known && decoded.outer_ip)) 908d76a60baSAnirudh Venkataramanan return; 909d76a60baSAnirudh Venkataramanan 910d76a60baSAnirudh Venkataramanan ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 911d76a60baSAnirudh Venkataramanan (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); 912d76a60baSAnirudh Venkataramanan ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 913d76a60baSAnirudh Venkataramanan (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); 914d76a60baSAnirudh Venkataramanan 915d76a60baSAnirudh Venkataramanan if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | 916d76a60baSAnirudh Venkataramanan BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) 917d76a60baSAnirudh Venkataramanan goto checksum_fail; 918d76a60baSAnirudh Venkataramanan else if (ipv6 && (rx_status & 919d76a60baSAnirudh Venkataramanan (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) 920d76a60baSAnirudh Venkataramanan goto checksum_fail; 921d76a60baSAnirudh Venkataramanan 922d76a60baSAnirudh Venkataramanan /* check for L4 errors and handle packets that were not able to be 923d76a60baSAnirudh Venkataramanan * checksummed due to arrival speed 924d76a60baSAnirudh Venkataramanan */ 925d76a60baSAnirudh Venkataramanan if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) 926d76a60baSAnirudh Venkataramanan goto checksum_fail; 927d76a60baSAnirudh Venkataramanan 928d76a60baSAnirudh Venkataramanan /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 929d76a60baSAnirudh Venkataramanan switch (decoded.inner_prot) { 930d76a60baSAnirudh Venkataramanan case ICE_RX_PTYPE_INNER_PROT_TCP: 931d76a60baSAnirudh Venkataramanan case ICE_RX_PTYPE_INNER_PROT_UDP: 932d76a60baSAnirudh Venkataramanan case ICE_RX_PTYPE_INNER_PROT_SCTP: 933d76a60baSAnirudh Venkataramanan skb->ip_summed = CHECKSUM_UNNECESSARY; 934d76a60baSAnirudh Venkataramanan default: 935d76a60baSAnirudh Venkataramanan break; 936d76a60baSAnirudh Venkataramanan } 937d76a60baSAnirudh Venkataramanan return; 938d76a60baSAnirudh Venkataramanan 939d76a60baSAnirudh Venkataramanan checksum_fail: 940d76a60baSAnirudh Venkataramanan vsi->back->hw_csum_rx_error++; 941d76a60baSAnirudh Venkataramanan } 942d76a60baSAnirudh Venkataramanan 943d76a60baSAnirudh Venkataramanan /** 944d76a60baSAnirudh Venkataramanan * ice_process_skb_fields - Populate skb header fields from Rx descriptor 945d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring packet is being transacted on 946d76a60baSAnirudh Venkataramanan * @rx_desc: pointer to the EOP Rx descriptor 947d76a60baSAnirudh Venkataramanan * @skb: pointer to current skb being populated 948d76a60baSAnirudh Venkataramanan * @ptype: the packet type decoded by hardware 949d76a60baSAnirudh Venkataramanan * 950d76a60baSAnirudh Venkataramanan * This function checks the ring, descriptor, and packet information in 951d76a60baSAnirudh Venkataramanan * order to populate the hash, checksum, VLAN, protocol, and 952d76a60baSAnirudh Venkataramanan * other fields within the skb. 953d76a60baSAnirudh Venkataramanan */ 954c8b7abddSBruce Allan static void 955c8b7abddSBruce Allan ice_process_skb_fields(struct ice_ring *rx_ring, 956d76a60baSAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc, 957d76a60baSAnirudh Venkataramanan struct sk_buff *skb, u8 ptype) 958d76a60baSAnirudh Venkataramanan { 959d76a60baSAnirudh Venkataramanan ice_rx_hash(rx_ring, rx_desc, skb, ptype); 960d76a60baSAnirudh Venkataramanan 961d76a60baSAnirudh Venkataramanan /* modifies the skb - consumes the enet header */ 962d76a60baSAnirudh Venkataramanan skb->protocol = eth_type_trans(skb, rx_ring->netdev); 963d76a60baSAnirudh Venkataramanan 964d76a60baSAnirudh Venkataramanan ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype); 965d76a60baSAnirudh Venkataramanan } 966d76a60baSAnirudh Venkataramanan 967d76a60baSAnirudh Venkataramanan /** 9682b245cb2SAnirudh Venkataramanan * ice_receive_skb - Send a completed packet up the stack 969d337f2afSAnirudh Venkataramanan * @rx_ring: Rx ring in play 9702b245cb2SAnirudh Venkataramanan * @skb: packet to send up 971f9867df6SAnirudh Venkataramanan * @vlan_tag: VLAN tag for packet 9722b245cb2SAnirudh Venkataramanan * 9732b245cb2SAnirudh Venkataramanan * This function sends the completed packet (via. skb) up the stack using 974f9867df6SAnirudh Venkataramanan * gro receive functions (with/without VLAN tag) 9752b245cb2SAnirudh Venkataramanan */ 976c8b7abddSBruce Allan static void 977c8b7abddSBruce Allan ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) 9782b245cb2SAnirudh Venkataramanan { 9792b245cb2SAnirudh Venkataramanan if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 980ac466755SAnirudh Venkataramanan (vlan_tag & VLAN_VID_MASK)) 9812b245cb2SAnirudh Venkataramanan __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 9822b245cb2SAnirudh Venkataramanan napi_gro_receive(&rx_ring->q_vector->napi, skb); 9832b245cb2SAnirudh Venkataramanan } 9842b245cb2SAnirudh Venkataramanan 9852b245cb2SAnirudh Venkataramanan /** 9862b245cb2SAnirudh Venkataramanan * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 987d337f2afSAnirudh Venkataramanan * @rx_ring: Rx descriptor ring to transact packets on 9882b245cb2SAnirudh Venkataramanan * @budget: Total limit on number of packets to process 9892b245cb2SAnirudh Venkataramanan * 9902b245cb2SAnirudh Venkataramanan * This function provides a "bounce buffer" approach to Rx interrupt 9912b245cb2SAnirudh Venkataramanan * processing. The advantage to this is that on systems that have 9922b245cb2SAnirudh Venkataramanan * expensive overhead for IOMMU access this provides a means of avoiding 9932b245cb2SAnirudh Venkataramanan * it by maintaining the mapping of the page to the system. 9942b245cb2SAnirudh Venkataramanan * 9952b245cb2SAnirudh Venkataramanan * Returns amount of work completed 9962b245cb2SAnirudh Venkataramanan */ 9972b245cb2SAnirudh Venkataramanan static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 9982b245cb2SAnirudh Venkataramanan { 9992b245cb2SAnirudh Venkataramanan unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 10002b245cb2SAnirudh Venkataramanan u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1001cb7db356SBrett Creeley bool failure; 10022b245cb2SAnirudh Venkataramanan 1003f9867df6SAnirudh Venkataramanan /* start the loop to process Rx packets bounded by 'budget' */ 10042b245cb2SAnirudh Venkataramanan while (likely(total_rx_pkts < (unsigned int)budget)) { 10052b245cb2SAnirudh Venkataramanan union ice_32b_rx_flex_desc *rx_desc; 10066c869cb7SMaciej Fijalkowski struct ice_rx_buf *rx_buf; 10072b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 10086c869cb7SMaciej Fijalkowski unsigned int size; 10092b245cb2SAnirudh Venkataramanan u16 stat_err_bits; 10102b245cb2SAnirudh Venkataramanan u16 vlan_tag = 0; 1011d76a60baSAnirudh Venkataramanan u8 rx_ptype; 10122b245cb2SAnirudh Venkataramanan 1013f9867df6SAnirudh Venkataramanan /* get the Rx desc from Rx ring based on 'next_to_clean' */ 10142b245cb2SAnirudh Venkataramanan rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 10152b245cb2SAnirudh Venkataramanan 10162b245cb2SAnirudh Venkataramanan /* status_error_len will always be zero for unused descriptors 10172b245cb2SAnirudh Venkataramanan * because it's cleared in cleanup, and overlaps with hdr_addr 10182b245cb2SAnirudh Venkataramanan * which is always zero because packet split isn't used, if the 10192b245cb2SAnirudh Venkataramanan * hardware wrote DD then it will be non-zero 10202b245cb2SAnirudh Venkataramanan */ 10212b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 10222b245cb2SAnirudh Venkataramanan if (!ice_test_staterr(rx_desc, stat_err_bits)) 10232b245cb2SAnirudh Venkataramanan break; 10242b245cb2SAnirudh Venkataramanan 10252b245cb2SAnirudh Venkataramanan /* This memory barrier is needed to keep us from reading 10262b245cb2SAnirudh Venkataramanan * any other fields out of the rx_desc until we know the 10272b245cb2SAnirudh Venkataramanan * DD bit is set. 10282b245cb2SAnirudh Venkataramanan */ 10292b245cb2SAnirudh Venkataramanan dma_rmb(); 10302b245cb2SAnirudh Venkataramanan 10316c869cb7SMaciej Fijalkowski size = le16_to_cpu(rx_desc->wb.pkt_len) & 10326c869cb7SMaciej Fijalkowski ICE_RX_FLX_DESC_PKT_LEN_M; 10332b245cb2SAnirudh Venkataramanan 1034712edbbbSMaciej Fijalkowski rx_buf = ice_get_rx_buf(rx_ring, &skb, size); 10352b245cb2SAnirudh Venkataramanan /* allocate (if needed) and populate skb */ 1036712edbbbSMaciej Fijalkowski if (skb) 1037712edbbbSMaciej Fijalkowski ice_add_rx_frag(rx_buf, skb, size); 1038712edbbbSMaciej Fijalkowski else 1039712edbbbSMaciej Fijalkowski skb = ice_construct_skb(rx_ring, rx_buf, size); 1040712edbbbSMaciej Fijalkowski 1041712edbbbSMaciej Fijalkowski /* exit if we failed to retrieve a buffer */ 1042712edbbbSMaciej Fijalkowski if (!skb) { 1043712edbbbSMaciej Fijalkowski rx_ring->rx_stats.alloc_buf_failed++; 1044712edbbbSMaciej Fijalkowski rx_buf->pagecnt_bias++; 10452b245cb2SAnirudh Venkataramanan break; 1046712edbbbSMaciej Fijalkowski } 10472b245cb2SAnirudh Venkataramanan 10481d032bc7SMaciej Fijalkowski ice_put_rx_buf(rx_ring, rx_buf); 10492b245cb2SAnirudh Venkataramanan cleaned_count++; 10502b245cb2SAnirudh Venkataramanan 10512b245cb2SAnirudh Venkataramanan /* skip if it is NOP desc */ 10522b245cb2SAnirudh Venkataramanan if (ice_is_non_eop(rx_ring, rx_desc, skb)) 10532b245cb2SAnirudh Venkataramanan continue; 10542b245cb2SAnirudh Venkataramanan 10552b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 10562b245cb2SAnirudh Venkataramanan if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 10572b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 10582b245cb2SAnirudh Venkataramanan continue; 10592b245cb2SAnirudh Venkataramanan } 10602b245cb2SAnirudh Venkataramanan 1061d76a60baSAnirudh Venkataramanan rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 1062d76a60baSAnirudh Venkataramanan ICE_RX_FLEX_DESC_PTYPE_M; 1063d76a60baSAnirudh Venkataramanan 10642b245cb2SAnirudh Venkataramanan stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 10652b245cb2SAnirudh Venkataramanan if (ice_test_staterr(rx_desc, stat_err_bits)) 10662b245cb2SAnirudh Venkataramanan vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 10672b245cb2SAnirudh Venkataramanan 10682b245cb2SAnirudh Venkataramanan /* correct empty headers and pad skb if needed (to make valid 10692b245cb2SAnirudh Venkataramanan * ethernet frame 10702b245cb2SAnirudh Venkataramanan */ 10712b245cb2SAnirudh Venkataramanan if (ice_cleanup_headers(skb)) { 10722b245cb2SAnirudh Venkataramanan skb = NULL; 10732b245cb2SAnirudh Venkataramanan continue; 10742b245cb2SAnirudh Venkataramanan } 10752b245cb2SAnirudh Venkataramanan 10762b245cb2SAnirudh Venkataramanan /* probably a little skewed due to removing CRC */ 10772b245cb2SAnirudh Venkataramanan total_rx_bytes += skb->len; 10782b245cb2SAnirudh Venkataramanan 1079d76a60baSAnirudh Venkataramanan /* populate checksum, VLAN, and protocol */ 1080d76a60baSAnirudh Venkataramanan ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1081d76a60baSAnirudh Venkataramanan 10822b245cb2SAnirudh Venkataramanan /* send completed skb up the stack */ 10832b245cb2SAnirudh Venkataramanan ice_receive_skb(rx_ring, skb, vlan_tag); 10842b245cb2SAnirudh Venkataramanan 10852b245cb2SAnirudh Venkataramanan /* update budget accounting */ 10862b245cb2SAnirudh Venkataramanan total_rx_pkts++; 10872b245cb2SAnirudh Venkataramanan } 10882b245cb2SAnirudh Venkataramanan 1089cb7db356SBrett Creeley /* return up to cleaned_count buffers to hardware */ 1090cb7db356SBrett Creeley failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1091cb7db356SBrett Creeley 10922b245cb2SAnirudh Venkataramanan /* update queue and vector specific stats */ 10932b245cb2SAnirudh Venkataramanan u64_stats_update_begin(&rx_ring->syncp); 10942b245cb2SAnirudh Venkataramanan rx_ring->stats.pkts += total_rx_pkts; 10952b245cb2SAnirudh Venkataramanan rx_ring->stats.bytes += total_rx_bytes; 10962b245cb2SAnirudh Venkataramanan u64_stats_update_end(&rx_ring->syncp); 10972b245cb2SAnirudh Venkataramanan rx_ring->q_vector->rx.total_pkts += total_rx_pkts; 10982b245cb2SAnirudh Venkataramanan rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 10992b245cb2SAnirudh Venkataramanan 11002b245cb2SAnirudh Venkataramanan /* guarantee a trip back through this routine if there was a failure */ 11012b245cb2SAnirudh Venkataramanan return failure ? budget : (int)total_rx_pkts; 11022b245cb2SAnirudh Venkataramanan } 11032b245cb2SAnirudh Venkataramanan 11042b245cb2SAnirudh Venkataramanan /** 1105711987bbSBrett Creeley * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic 1106711987bbSBrett Creeley * @port_info: port_info structure containing the current link speed 1107711987bbSBrett Creeley * @avg_pkt_size: average size of Tx or Rx packets based on clean routine 11082f2da36eSAnirudh Venkataramanan * @itr: ITR value to update 1109711987bbSBrett Creeley * 1110711987bbSBrett Creeley * Calculate how big of an increment should be applied to the ITR value passed 1111711987bbSBrett Creeley * in based on wmem_default, SKB overhead, Ethernet overhead, and the current 1112711987bbSBrett Creeley * link speed. 1113711987bbSBrett Creeley * 1114711987bbSBrett Creeley * The following is a calculation derived from: 1115711987bbSBrett Creeley * wmem_default / (size + overhead) = desired_pkts_per_int 1116711987bbSBrett Creeley * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate 1117711987bbSBrett Creeley * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1118711987bbSBrett Creeley * 1119711987bbSBrett Creeley * Assuming wmem_default is 212992 and overhead is 640 bytes per 1120711987bbSBrett Creeley * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1121711987bbSBrett Creeley * formula down to: 1122711987bbSBrett Creeley * 1123711987bbSBrett Creeley * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 1124711987bbSBrett Creeley * ITR = -------------------------------------------- * -------------- 1125711987bbSBrett Creeley * rate pkt_size + 640 1126711987bbSBrett Creeley */ 1127711987bbSBrett Creeley static unsigned int 1128711987bbSBrett Creeley ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, 1129711987bbSBrett Creeley unsigned int avg_pkt_size, 1130711987bbSBrett Creeley unsigned int itr) 113164a59d05SAnirudh Venkataramanan { 1132711987bbSBrett Creeley switch (port_info->phy.link_info.link_speed) { 1133711987bbSBrett Creeley case ICE_AQ_LINK_SPEED_100GB: 1134711987bbSBrett Creeley itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), 1135711987bbSBrett Creeley avg_pkt_size + 640); 1136711987bbSBrett Creeley break; 1137711987bbSBrett Creeley case ICE_AQ_LINK_SPEED_50GB: 1138711987bbSBrett Creeley itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), 1139711987bbSBrett Creeley avg_pkt_size + 640); 1140711987bbSBrett Creeley break; 114164a59d05SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_40GB: 1142711987bbSBrett Creeley itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), 1143711987bbSBrett Creeley avg_pkt_size + 640); 1144711987bbSBrett Creeley break; 114564a59d05SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_25GB: 1146711987bbSBrett Creeley itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), 1147711987bbSBrett Creeley avg_pkt_size + 640); 1148711987bbSBrett Creeley break; 114964a59d05SAnirudh Venkataramanan case ICE_AQ_LINK_SPEED_20GB: 1150711987bbSBrett Creeley itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), 1151711987bbSBrett Creeley avg_pkt_size + 640); 1152711987bbSBrett Creeley break; 1153711987bbSBrett Creeley case ICE_AQ_LINK_SPEED_10GB: 1154711987bbSBrett Creeley /* fall through */ 115564a59d05SAnirudh Venkataramanan default: 1156711987bbSBrett Creeley itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), 1157711987bbSBrett Creeley avg_pkt_size + 640); 1158711987bbSBrett Creeley break; 115964a59d05SAnirudh Venkataramanan } 1160711987bbSBrett Creeley 1161711987bbSBrett Creeley if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1162711987bbSBrett Creeley itr &= ICE_ITR_ADAPTIVE_LATENCY; 1163711987bbSBrett Creeley itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1164711987bbSBrett Creeley } 1165711987bbSBrett Creeley 1166711987bbSBrett Creeley return itr; 116764a59d05SAnirudh Venkataramanan } 116864a59d05SAnirudh Venkataramanan 116964a59d05SAnirudh Venkataramanan /** 117064a59d05SAnirudh Venkataramanan * ice_update_itr - update the adaptive ITR value based on statistics 117164a59d05SAnirudh Venkataramanan * @q_vector: structure containing interrupt and ring information 117264a59d05SAnirudh Venkataramanan * @rc: structure containing ring performance data 117364a59d05SAnirudh Venkataramanan * 117464a59d05SAnirudh Venkataramanan * Stores a new ITR value based on packets and byte 117564a59d05SAnirudh Venkataramanan * counts during the last interrupt. The advantage of per interrupt 117664a59d05SAnirudh Venkataramanan * computation is faster updates and more accurate ITR for the current 117764a59d05SAnirudh Venkataramanan * traffic pattern. Constants in this function were computed 117864a59d05SAnirudh Venkataramanan * based on theoretical maximum wire speed and thresholds were set based 117964a59d05SAnirudh Venkataramanan * on testing data as well as attempting to minimize response time 118064a59d05SAnirudh Venkataramanan * while increasing bulk throughput. 118164a59d05SAnirudh Venkataramanan */ 118264a59d05SAnirudh Venkataramanan static void 118364a59d05SAnirudh Venkataramanan ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) 118464a59d05SAnirudh Venkataramanan { 118564a59d05SAnirudh Venkataramanan unsigned long next_update = jiffies; 1186711987bbSBrett Creeley unsigned int packets, bytes, itr; 118764a59d05SAnirudh Venkataramanan bool container_is_rx; 118864a59d05SAnirudh Venkataramanan 118964a59d05SAnirudh Venkataramanan if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) 119064a59d05SAnirudh Venkataramanan return; 119164a59d05SAnirudh Venkataramanan 119264a59d05SAnirudh Venkataramanan /* If itr_countdown is set it means we programmed an ITR within 119364a59d05SAnirudh Venkataramanan * the last 4 interrupt cycles. This has a side effect of us 119464a59d05SAnirudh Venkataramanan * potentially firing an early interrupt. In order to work around 119564a59d05SAnirudh Venkataramanan * this we need to throw out any data received for a few 119664a59d05SAnirudh Venkataramanan * interrupts following the update. 119764a59d05SAnirudh Venkataramanan */ 119864a59d05SAnirudh Venkataramanan if (q_vector->itr_countdown) { 119964a59d05SAnirudh Venkataramanan itr = rc->target_itr; 120064a59d05SAnirudh Venkataramanan goto clear_counts; 120164a59d05SAnirudh Venkataramanan } 120264a59d05SAnirudh Venkataramanan 120364a59d05SAnirudh Venkataramanan container_is_rx = (&q_vector->rx == rc); 120464a59d05SAnirudh Venkataramanan /* For Rx we want to push the delay up and default to low latency. 120564a59d05SAnirudh Venkataramanan * for Tx we want to pull the delay down and default to high latency. 120664a59d05SAnirudh Venkataramanan */ 120764a59d05SAnirudh Venkataramanan itr = container_is_rx ? 120864a59d05SAnirudh Venkataramanan ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : 120964a59d05SAnirudh Venkataramanan ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; 121064a59d05SAnirudh Venkataramanan 121164a59d05SAnirudh Venkataramanan /* If we didn't update within up to 1 - 2 jiffies we can assume 121264a59d05SAnirudh Venkataramanan * that either packets are coming in so slow there hasn't been 121364a59d05SAnirudh Venkataramanan * any work, or that there is so much work that NAPI is dealing 121464a59d05SAnirudh Venkataramanan * with interrupt moderation and we don't need to do anything. 121564a59d05SAnirudh Venkataramanan */ 121664a59d05SAnirudh Venkataramanan if (time_after(next_update, rc->next_update)) 121764a59d05SAnirudh Venkataramanan goto clear_counts; 121864a59d05SAnirudh Venkataramanan 121964a59d05SAnirudh Venkataramanan packets = rc->total_pkts; 122064a59d05SAnirudh Venkataramanan bytes = rc->total_bytes; 122164a59d05SAnirudh Venkataramanan 122264a59d05SAnirudh Venkataramanan if (container_is_rx) { 122364a59d05SAnirudh Venkataramanan /* If Rx there are 1 to 4 packets and bytes are less than 122464a59d05SAnirudh Venkataramanan * 9000 assume insufficient data to use bulk rate limiting 122564a59d05SAnirudh Venkataramanan * approach unless Tx is already in bulk rate limiting. We 122664a59d05SAnirudh Venkataramanan * are likely latency driven. 122764a59d05SAnirudh Venkataramanan */ 122864a59d05SAnirudh Venkataramanan if (packets && packets < 4 && bytes < 9000 && 122964a59d05SAnirudh Venkataramanan (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { 123064a59d05SAnirudh Venkataramanan itr = ICE_ITR_ADAPTIVE_LATENCY; 1231711987bbSBrett Creeley goto adjust_by_size_and_speed; 123264a59d05SAnirudh Venkataramanan } 123364a59d05SAnirudh Venkataramanan } else if (packets < 4) { 123464a59d05SAnirudh Venkataramanan /* If we have Tx and Rx ITR maxed and Tx ITR is running in 123564a59d05SAnirudh Venkataramanan * bulk mode and we are receiving 4 or fewer packets just 123664a59d05SAnirudh Venkataramanan * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 123764a59d05SAnirudh Venkataramanan * that the Rx can relax. 123864a59d05SAnirudh Venkataramanan */ 123964a59d05SAnirudh Venkataramanan if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && 124064a59d05SAnirudh Venkataramanan (q_vector->rx.target_itr & ICE_ITR_MASK) == 124164a59d05SAnirudh Venkataramanan ICE_ITR_ADAPTIVE_MAX_USECS) 124264a59d05SAnirudh Venkataramanan goto clear_counts; 124364a59d05SAnirudh Venkataramanan } else if (packets > 32) { 124464a59d05SAnirudh Venkataramanan /* If we have processed over 32 packets in a single interrupt 124564a59d05SAnirudh Venkataramanan * for Tx assume we need to switch over to "bulk" mode. 124664a59d05SAnirudh Venkataramanan */ 124764a59d05SAnirudh Venkataramanan rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; 124864a59d05SAnirudh Venkataramanan } 124964a59d05SAnirudh Venkataramanan 125064a59d05SAnirudh Venkataramanan /* We have no packets to actually measure against. This means 125164a59d05SAnirudh Venkataramanan * either one of the other queues on this vector is active or 125264a59d05SAnirudh Venkataramanan * we are a Tx queue doing TSO with too high of an interrupt rate. 125364a59d05SAnirudh Venkataramanan * 125464a59d05SAnirudh Venkataramanan * Between 4 and 56 we can assume that our current interrupt delay 125564a59d05SAnirudh Venkataramanan * is only slightly too low. As such we should increase it by a small 125664a59d05SAnirudh Venkataramanan * fixed amount. 125764a59d05SAnirudh Venkataramanan */ 125864a59d05SAnirudh Venkataramanan if (packets < 56) { 125964a59d05SAnirudh Venkataramanan itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; 126064a59d05SAnirudh Venkataramanan if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 126164a59d05SAnirudh Venkataramanan itr &= ICE_ITR_ADAPTIVE_LATENCY; 126264a59d05SAnirudh Venkataramanan itr += ICE_ITR_ADAPTIVE_MAX_USECS; 126364a59d05SAnirudh Venkataramanan } 126464a59d05SAnirudh Venkataramanan goto clear_counts; 126564a59d05SAnirudh Venkataramanan } 126664a59d05SAnirudh Venkataramanan 126764a59d05SAnirudh Venkataramanan if (packets <= 256) { 126864a59d05SAnirudh Venkataramanan itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 126964a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 127064a59d05SAnirudh Venkataramanan 127164a59d05SAnirudh Venkataramanan /* Between 56 and 112 is our "goldilocks" zone where we are 127264a59d05SAnirudh Venkataramanan * working out "just right". Just report that our current 127364a59d05SAnirudh Venkataramanan * ITR is good for us. 127464a59d05SAnirudh Venkataramanan */ 127564a59d05SAnirudh Venkataramanan if (packets <= 112) 127664a59d05SAnirudh Venkataramanan goto clear_counts; 127764a59d05SAnirudh Venkataramanan 127864a59d05SAnirudh Venkataramanan /* If packet count is 128 or greater we are likely looking 127964a59d05SAnirudh Venkataramanan * at a slight overrun of the delay we want. Try halving 128064a59d05SAnirudh Venkataramanan * our delay to see if that will cut the number of packets 128164a59d05SAnirudh Venkataramanan * in half per interrupt. 128264a59d05SAnirudh Venkataramanan */ 128364a59d05SAnirudh Venkataramanan itr >>= 1; 128464a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 128564a59d05SAnirudh Venkataramanan if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) 128664a59d05SAnirudh Venkataramanan itr = ICE_ITR_ADAPTIVE_MIN_USECS; 128764a59d05SAnirudh Venkataramanan 128864a59d05SAnirudh Venkataramanan goto clear_counts; 128964a59d05SAnirudh Venkataramanan } 129064a59d05SAnirudh Venkataramanan 129164a59d05SAnirudh Venkataramanan /* The paths below assume we are dealing with a bulk ITR since 129264a59d05SAnirudh Venkataramanan * number of packets is greater than 256. We are just going to have 129364a59d05SAnirudh Venkataramanan * to compute a value and try to bring the count under control, 129464a59d05SAnirudh Venkataramanan * though for smaller packet sizes there isn't much we can do as 129564a59d05SAnirudh Venkataramanan * NAPI polling will likely be kicking in sooner rather than later. 129664a59d05SAnirudh Venkataramanan */ 129764a59d05SAnirudh Venkataramanan itr = ICE_ITR_ADAPTIVE_BULK; 129864a59d05SAnirudh Venkataramanan 1299711987bbSBrett Creeley adjust_by_size_and_speed: 130064a59d05SAnirudh Venkataramanan 1301711987bbSBrett Creeley /* based on checks above packets cannot be 0 so division is safe */ 1302711987bbSBrett Creeley itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, 1303711987bbSBrett Creeley bytes / packets, itr); 130464a59d05SAnirudh Venkataramanan 130564a59d05SAnirudh Venkataramanan clear_counts: 130664a59d05SAnirudh Venkataramanan /* write back value */ 130764a59d05SAnirudh Venkataramanan rc->target_itr = itr; 130864a59d05SAnirudh Venkataramanan 130964a59d05SAnirudh Venkataramanan /* next update should occur within next jiffy */ 131064a59d05SAnirudh Venkataramanan rc->next_update = next_update + 1; 131164a59d05SAnirudh Venkataramanan 131264a59d05SAnirudh Venkataramanan rc->total_bytes = 0; 131364a59d05SAnirudh Venkataramanan rc->total_pkts = 0; 131464a59d05SAnirudh Venkataramanan } 131564a59d05SAnirudh Venkataramanan 13162b245cb2SAnirudh Venkataramanan /** 131763f545edSBrett Creeley * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 131863f545edSBrett Creeley * @itr_idx: interrupt throttling index 131964a59d05SAnirudh Venkataramanan * @itr: interrupt throttling value in usecs 132063f545edSBrett Creeley */ 13218244dd2dSBrett Creeley static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 132263f545edSBrett Creeley { 13232f2da36eSAnirudh Venkataramanan /* The ITR value is reported in microseconds, and the register value is 132464a59d05SAnirudh Venkataramanan * recorded in 2 microsecond units. For this reason we only need to 132564a59d05SAnirudh Venkataramanan * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 132664a59d05SAnirudh Venkataramanan * granularity as a shift instead of division. The mask makes sure the 132764a59d05SAnirudh Venkataramanan * ITR value is never odd so we don't accidentally write into the field 132864a59d05SAnirudh Venkataramanan * prior to the ITR field. 132964a59d05SAnirudh Venkataramanan */ 133064a59d05SAnirudh Venkataramanan itr &= ICE_ITR_MASK; 133164a59d05SAnirudh Venkataramanan 133263f545edSBrett Creeley return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 133363f545edSBrett Creeley (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 133464a59d05SAnirudh Venkataramanan (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 133563f545edSBrett Creeley } 133663f545edSBrett Creeley 133764a59d05SAnirudh Venkataramanan /* The act of updating the ITR will cause it to immediately trigger. In order 133864a59d05SAnirudh Venkataramanan * to prevent this from throwing off adaptive update statistics we defer the 133964a59d05SAnirudh Venkataramanan * update so that it can only happen so often. So after either Tx or Rx are 134064a59d05SAnirudh Venkataramanan * updated we make the adaptive scheme wait until either the ITR completely 134164a59d05SAnirudh Venkataramanan * expires via the next_update expiration or we have been through at least 134264a59d05SAnirudh Venkataramanan * 3 interrupts. 134364a59d05SAnirudh Venkataramanan */ 134464a59d05SAnirudh Venkataramanan #define ITR_COUNTDOWN_START 3 134564a59d05SAnirudh Venkataramanan 134663f545edSBrett Creeley /** 134763f545edSBrett Creeley * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt 134863f545edSBrett Creeley * @vsi: the VSI associated with the q_vector 134963f545edSBrett Creeley * @q_vector: q_vector for which ITR is being updated and interrupt enabled 135063f545edSBrett Creeley */ 135163f545edSBrett Creeley static void 135263f545edSBrett Creeley ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) 135363f545edSBrett Creeley { 135464a59d05SAnirudh Venkataramanan struct ice_ring_container *tx = &q_vector->tx; 135564a59d05SAnirudh Venkataramanan struct ice_ring_container *rx = &q_vector->rx; 135663f545edSBrett Creeley u32 itr_val; 135763f545edSBrett Creeley 135864a59d05SAnirudh Venkataramanan /* This will do nothing if dynamic updates are not enabled */ 135964a59d05SAnirudh Venkataramanan ice_update_itr(q_vector, tx); 136064a59d05SAnirudh Venkataramanan ice_update_itr(q_vector, rx); 136164a59d05SAnirudh Venkataramanan 136263f545edSBrett Creeley /* This block of logic allows us to get away with only updating 136363f545edSBrett Creeley * one ITR value with each interrupt. The idea is to perform a 136463f545edSBrett Creeley * pseudo-lazy update with the following criteria. 136563f545edSBrett Creeley * 136663f545edSBrett Creeley * 1. Rx is given higher priority than Tx if both are in same state 136763f545edSBrett Creeley * 2. If we must reduce an ITR that is given highest priority. 136863f545edSBrett Creeley * 3. We then give priority to increasing ITR based on amount. 136963f545edSBrett Creeley */ 137064a59d05SAnirudh Venkataramanan if (rx->target_itr < rx->current_itr) { 137163f545edSBrett Creeley /* Rx ITR needs to be reduced, this is highest priority */ 137264a59d05SAnirudh Venkataramanan itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 137364a59d05SAnirudh Venkataramanan rx->current_itr = rx->target_itr; 137464a59d05SAnirudh Venkataramanan q_vector->itr_countdown = ITR_COUNTDOWN_START; 137564a59d05SAnirudh Venkataramanan } else if ((tx->target_itr < tx->current_itr) || 137664a59d05SAnirudh Venkataramanan ((rx->target_itr - rx->current_itr) < 137764a59d05SAnirudh Venkataramanan (tx->target_itr - tx->current_itr))) { 137863f545edSBrett Creeley /* Tx ITR needs to be reduced, this is second priority 137963f545edSBrett Creeley * Tx ITR needs to be increased more than Rx, fourth priority 138063f545edSBrett Creeley */ 138164a59d05SAnirudh Venkataramanan itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); 138264a59d05SAnirudh Venkataramanan tx->current_itr = tx->target_itr; 138364a59d05SAnirudh Venkataramanan q_vector->itr_countdown = ITR_COUNTDOWN_START; 138464a59d05SAnirudh Venkataramanan } else if (rx->current_itr != rx->target_itr) { 138563f545edSBrett Creeley /* Rx ITR needs to be increased, third priority */ 138664a59d05SAnirudh Venkataramanan itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 138764a59d05SAnirudh Venkataramanan rx->current_itr = rx->target_itr; 138864a59d05SAnirudh Venkataramanan q_vector->itr_countdown = ITR_COUNTDOWN_START; 138963f545edSBrett Creeley } else { 139063f545edSBrett Creeley /* Still have to re-enable the interrupts */ 139163f545edSBrett Creeley itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 139264a59d05SAnirudh Venkataramanan if (q_vector->itr_countdown) 139364a59d05SAnirudh Venkataramanan q_vector->itr_countdown--; 139463f545edSBrett Creeley } 139563f545edSBrett Creeley 139664a59d05SAnirudh Venkataramanan if (!test_bit(__ICE_DOWN, vsi->state)) 139764a59d05SAnirudh Venkataramanan wr32(&vsi->back->hw, 1398b07833a0SBrett Creeley GLINT_DYN_CTL(q_vector->reg_idx), 139964a59d05SAnirudh Venkataramanan itr_val); 140063f545edSBrett Creeley } 140163f545edSBrett Creeley 140263f545edSBrett Creeley /** 14032b245cb2SAnirudh Venkataramanan * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 14042b245cb2SAnirudh Venkataramanan * @napi: napi struct with our devices info in it 14052b245cb2SAnirudh Venkataramanan * @budget: amount of work driver is allowed to do this pass, in packets 14062b245cb2SAnirudh Venkataramanan * 14072b245cb2SAnirudh Venkataramanan * This function will clean all queues associated with a q_vector. 14082b245cb2SAnirudh Venkataramanan * 14092b245cb2SAnirudh Venkataramanan * Returns the amount of work done 14102b245cb2SAnirudh Venkataramanan */ 14112b245cb2SAnirudh Venkataramanan int ice_napi_poll(struct napi_struct *napi, int budget) 14122b245cb2SAnirudh Venkataramanan { 14132b245cb2SAnirudh Venkataramanan struct ice_q_vector *q_vector = 14142b245cb2SAnirudh Venkataramanan container_of(napi, struct ice_q_vector, napi); 14152b245cb2SAnirudh Venkataramanan struct ice_vsi *vsi = q_vector->vsi; 14162b245cb2SAnirudh Venkataramanan struct ice_pf *pf = vsi->back; 14172b245cb2SAnirudh Venkataramanan bool clean_complete = true; 14182b245cb2SAnirudh Venkataramanan int budget_per_ring = 0; 14192b245cb2SAnirudh Venkataramanan struct ice_ring *ring; 14202b245cb2SAnirudh Venkataramanan int work_done = 0; 14212b245cb2SAnirudh Venkataramanan 14222b245cb2SAnirudh Venkataramanan /* Since the actual Tx work is minimal, we can give the Tx a larger 14232b245cb2SAnirudh Venkataramanan * budget and be more aggressive about cleaning up the Tx descriptors. 14242b245cb2SAnirudh Venkataramanan */ 14252b245cb2SAnirudh Venkataramanan ice_for_each_ring(ring, q_vector->tx) 14262b245cb2SAnirudh Venkataramanan if (!ice_clean_tx_irq(vsi, ring, budget)) 14272b245cb2SAnirudh Venkataramanan clean_complete = false; 14282b245cb2SAnirudh Venkataramanan 14292b245cb2SAnirudh Venkataramanan /* Handle case where we are called by netpoll with a budget of 0 */ 14302b245cb2SAnirudh Venkataramanan if (budget <= 0) 14312b245cb2SAnirudh Venkataramanan return budget; 14322b245cb2SAnirudh Venkataramanan 14332b245cb2SAnirudh Venkataramanan /* We attempt to distribute budget to each Rx queue fairly, but don't 14342b245cb2SAnirudh Venkataramanan * allow the budget to go below 1 because that would exit polling early. 14352b245cb2SAnirudh Venkataramanan */ 14362b245cb2SAnirudh Venkataramanan if (q_vector->num_ring_rx) 14372b245cb2SAnirudh Venkataramanan budget_per_ring = max(budget / q_vector->num_ring_rx, 1); 14382b245cb2SAnirudh Venkataramanan 14392b245cb2SAnirudh Venkataramanan ice_for_each_ring(ring, q_vector->rx) { 14402b245cb2SAnirudh Venkataramanan int cleaned; 14412b245cb2SAnirudh Venkataramanan 14422b245cb2SAnirudh Venkataramanan cleaned = ice_clean_rx_irq(ring, budget_per_ring); 14432b245cb2SAnirudh Venkataramanan work_done += cleaned; 14442b245cb2SAnirudh Venkataramanan /* if we clean as many as budgeted, we must not be done */ 14452b245cb2SAnirudh Venkataramanan if (cleaned >= budget_per_ring) 14462b245cb2SAnirudh Venkataramanan clean_complete = false; 14472b245cb2SAnirudh Venkataramanan } 14482b245cb2SAnirudh Venkataramanan 14492b245cb2SAnirudh Venkataramanan /* If work not completed, return budget and polling will return */ 14502b245cb2SAnirudh Venkataramanan if (!clean_complete) 14512b245cb2SAnirudh Venkataramanan return budget; 14522b245cb2SAnirudh Venkataramanan 14530bcd952fSJesse Brandeburg /* Exit the polling mode, but don't re-enable interrupts if stack might 14540bcd952fSJesse Brandeburg * poll us due to busy-polling 14550bcd952fSJesse Brandeburg */ 14560bcd952fSJesse Brandeburg if (likely(napi_complete_done(napi, work_done))) 14572b245cb2SAnirudh Venkataramanan if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 145863f545edSBrett Creeley ice_update_ena_itr(vsi, q_vector); 1459e0c9fd9bSDave Ertman 146032a64994SBruce Allan return min_t(int, work_done, budget - 1); 14612b245cb2SAnirudh Venkataramanan } 14622b245cb2SAnirudh Venkataramanan 14632b245cb2SAnirudh Venkataramanan /* helper function for building cmd/type/offset */ 14642b245cb2SAnirudh Venkataramanan static __le64 14652b245cb2SAnirudh Venkataramanan build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) 14662b245cb2SAnirudh Venkataramanan { 14672b245cb2SAnirudh Venkataramanan return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | 14682b245cb2SAnirudh Venkataramanan (td_cmd << ICE_TXD_QW1_CMD_S) | 14692b245cb2SAnirudh Venkataramanan (td_offset << ICE_TXD_QW1_OFFSET_S) | 14702b245cb2SAnirudh Venkataramanan ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | 14712b245cb2SAnirudh Venkataramanan (td_tag << ICE_TXD_QW1_L2TAG1_S)); 14722b245cb2SAnirudh Venkataramanan } 14732b245cb2SAnirudh Venkataramanan 14742b245cb2SAnirudh Venkataramanan /** 1475d337f2afSAnirudh Venkataramanan * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 14762b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 14772b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 14782b245cb2SAnirudh Venkataramanan * 14792b245cb2SAnirudh Venkataramanan * Returns -EBUSY if a stop is needed, else 0 14802b245cb2SAnirudh Venkataramanan */ 14812b245cb2SAnirudh Venkataramanan static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 14822b245cb2SAnirudh Venkataramanan { 14832b245cb2SAnirudh Venkataramanan netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 14842b245cb2SAnirudh Venkataramanan /* Memory barrier before checking head and tail */ 14852b245cb2SAnirudh Venkataramanan smp_mb(); 14862b245cb2SAnirudh Venkataramanan 14872b245cb2SAnirudh Venkataramanan /* Check again in a case another CPU has just made room available. */ 14882b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 14892b245cb2SAnirudh Venkataramanan return -EBUSY; 14902b245cb2SAnirudh Venkataramanan 14912b245cb2SAnirudh Venkataramanan /* A reprieve! - use start_subqueue because it doesn't call schedule */ 14922b245cb2SAnirudh Venkataramanan netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 14932b245cb2SAnirudh Venkataramanan ++tx_ring->tx_stats.restart_q; 14942b245cb2SAnirudh Venkataramanan return 0; 14952b245cb2SAnirudh Venkataramanan } 14962b245cb2SAnirudh Venkataramanan 14972b245cb2SAnirudh Venkataramanan /** 1498d337f2afSAnirudh Venkataramanan * ice_maybe_stop_tx - 1st level check for Tx stop conditions 14992b245cb2SAnirudh Venkataramanan * @tx_ring: the ring to be checked 15002b245cb2SAnirudh Venkataramanan * @size: the size buffer we want to assure is available 15012b245cb2SAnirudh Venkataramanan * 15022b245cb2SAnirudh Venkataramanan * Returns 0 if stop is not needed 15032b245cb2SAnirudh Venkataramanan */ 15042b245cb2SAnirudh Venkataramanan static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 15052b245cb2SAnirudh Venkataramanan { 15062b245cb2SAnirudh Venkataramanan if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 15072b245cb2SAnirudh Venkataramanan return 0; 1508d337f2afSAnirudh Venkataramanan 15092b245cb2SAnirudh Venkataramanan return __ice_maybe_stop_tx(tx_ring, size); 15102b245cb2SAnirudh Venkataramanan } 15112b245cb2SAnirudh Venkataramanan 15122b245cb2SAnirudh Venkataramanan /** 15132b245cb2SAnirudh Venkataramanan * ice_tx_map - Build the Tx descriptor 15142b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 15152b245cb2SAnirudh Venkataramanan * @first: first buffer info buffer to use 1516d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 15172b245cb2SAnirudh Venkataramanan * 15182b245cb2SAnirudh Venkataramanan * This function loops over the skb data pointed to by *first 15192b245cb2SAnirudh Venkataramanan * and gets a physical address for each memory location and programs 15202b245cb2SAnirudh Venkataramanan * it and the length into the transmit descriptor. 15212b245cb2SAnirudh Venkataramanan */ 1522d76a60baSAnirudh Venkataramanan static void 1523d76a60baSAnirudh Venkataramanan ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, 1524d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params *off) 15252b245cb2SAnirudh Venkataramanan { 1526d76a60baSAnirudh Venkataramanan u64 td_offset, td_tag, td_cmd; 15272b245cb2SAnirudh Venkataramanan u16 i = tx_ring->next_to_use; 1528d7840976SMatthew Wilcox (Oracle) skb_frag_t *frag; 15292b245cb2SAnirudh Venkataramanan unsigned int data_len, size; 15302b245cb2SAnirudh Venkataramanan struct ice_tx_desc *tx_desc; 15312b245cb2SAnirudh Venkataramanan struct ice_tx_buf *tx_buf; 15322b245cb2SAnirudh Venkataramanan struct sk_buff *skb; 15332b245cb2SAnirudh Venkataramanan dma_addr_t dma; 15342b245cb2SAnirudh Venkataramanan 1535d76a60baSAnirudh Venkataramanan td_tag = off->td_l2tag1; 1536d76a60baSAnirudh Venkataramanan td_cmd = off->td_cmd; 1537d76a60baSAnirudh Venkataramanan td_offset = off->td_offset; 15382b245cb2SAnirudh Venkataramanan skb = first->skb; 15392b245cb2SAnirudh Venkataramanan 15402b245cb2SAnirudh Venkataramanan data_len = skb->data_len; 15412b245cb2SAnirudh Venkataramanan size = skb_headlen(skb); 15422b245cb2SAnirudh Venkataramanan 15432b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, i); 15442b245cb2SAnirudh Venkataramanan 1545d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1546d76a60baSAnirudh Venkataramanan td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1547d76a60baSAnirudh Venkataramanan td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1548d76a60baSAnirudh Venkataramanan ICE_TX_FLAGS_VLAN_S; 1549d76a60baSAnirudh Venkataramanan } 1550d76a60baSAnirudh Venkataramanan 15512b245cb2SAnirudh Venkataramanan dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 15522b245cb2SAnirudh Venkataramanan 15532b245cb2SAnirudh Venkataramanan tx_buf = first; 15542b245cb2SAnirudh Venkataramanan 15552b245cb2SAnirudh Venkataramanan for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 15562b245cb2SAnirudh Venkataramanan unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 15572b245cb2SAnirudh Venkataramanan 15582b245cb2SAnirudh Venkataramanan if (dma_mapping_error(tx_ring->dev, dma)) 15592b245cb2SAnirudh Venkataramanan goto dma_error; 15602b245cb2SAnirudh Venkataramanan 15612b245cb2SAnirudh Venkataramanan /* record length, and DMA address */ 15622b245cb2SAnirudh Venkataramanan dma_unmap_len_set(tx_buf, len, size); 15632b245cb2SAnirudh Venkataramanan dma_unmap_addr_set(tx_buf, dma, dma); 15642b245cb2SAnirudh Venkataramanan 15652b245cb2SAnirudh Venkataramanan /* align size to end of page */ 15662b245cb2SAnirudh Venkataramanan max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 15672b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 15682b245cb2SAnirudh Venkataramanan 15692b245cb2SAnirudh Venkataramanan /* account for data chunks larger than the hardware 15702b245cb2SAnirudh Venkataramanan * can handle 15712b245cb2SAnirudh Venkataramanan */ 15722b245cb2SAnirudh Venkataramanan while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 15732b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = 15742b245cb2SAnirudh Venkataramanan build_ctob(td_cmd, td_offset, max_data, td_tag); 15752b245cb2SAnirudh Venkataramanan 15762b245cb2SAnirudh Venkataramanan tx_desc++; 15772b245cb2SAnirudh Venkataramanan i++; 15782b245cb2SAnirudh Venkataramanan 15792b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 15802b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 15812b245cb2SAnirudh Venkataramanan i = 0; 15822b245cb2SAnirudh Venkataramanan } 15832b245cb2SAnirudh Venkataramanan 15842b245cb2SAnirudh Venkataramanan dma += max_data; 15852b245cb2SAnirudh Venkataramanan size -= max_data; 15862b245cb2SAnirudh Venkataramanan 15872b245cb2SAnirudh Venkataramanan max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 15882b245cb2SAnirudh Venkataramanan tx_desc->buf_addr = cpu_to_le64(dma); 15892b245cb2SAnirudh Venkataramanan } 15902b245cb2SAnirudh Venkataramanan 15912b245cb2SAnirudh Venkataramanan if (likely(!data_len)) 15922b245cb2SAnirudh Venkataramanan break; 15932b245cb2SAnirudh Venkataramanan 15942b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 15952b245cb2SAnirudh Venkataramanan size, td_tag); 15962b245cb2SAnirudh Venkataramanan 15972b245cb2SAnirudh Venkataramanan tx_desc++; 15982b245cb2SAnirudh Venkataramanan i++; 15992b245cb2SAnirudh Venkataramanan 16002b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) { 16012b245cb2SAnirudh Venkataramanan tx_desc = ICE_TX_DESC(tx_ring, 0); 16022b245cb2SAnirudh Venkataramanan i = 0; 16032b245cb2SAnirudh Venkataramanan } 16042b245cb2SAnirudh Venkataramanan 16052b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag); 16062b245cb2SAnirudh Venkataramanan data_len -= size; 16072b245cb2SAnirudh Venkataramanan 16082b245cb2SAnirudh Venkataramanan dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 16092b245cb2SAnirudh Venkataramanan DMA_TO_DEVICE); 16102b245cb2SAnirudh Venkataramanan 16112b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 16122b245cb2SAnirudh Venkataramanan } 16132b245cb2SAnirudh Venkataramanan 16142b245cb2SAnirudh Venkataramanan /* record bytecount for BQL */ 16152b245cb2SAnirudh Venkataramanan netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 16162b245cb2SAnirudh Venkataramanan 16172b245cb2SAnirudh Venkataramanan /* record SW timestamp if HW timestamp is not available */ 16182b245cb2SAnirudh Venkataramanan skb_tx_timestamp(first->skb); 16192b245cb2SAnirudh Venkataramanan 16202b245cb2SAnirudh Venkataramanan i++; 16212b245cb2SAnirudh Venkataramanan if (i == tx_ring->count) 16222b245cb2SAnirudh Venkataramanan i = 0; 16232b245cb2SAnirudh Venkataramanan 16242b245cb2SAnirudh Venkataramanan /* write last descriptor with RS and EOP bits */ 16252b245cb2SAnirudh Venkataramanan td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS); 16262b245cb2SAnirudh Venkataramanan tx_desc->cmd_type_offset_bsz = 16272b245cb2SAnirudh Venkataramanan build_ctob(td_cmd, td_offset, size, td_tag); 16282b245cb2SAnirudh Venkataramanan 16292b245cb2SAnirudh Venkataramanan /* Force memory writes to complete before letting h/w know there 16302b245cb2SAnirudh Venkataramanan * are new descriptors to fetch. 16312b245cb2SAnirudh Venkataramanan * 16322b245cb2SAnirudh Venkataramanan * We also use this memory barrier to make certain all of the 16332b245cb2SAnirudh Venkataramanan * status bits have been updated before next_to_watch is written. 16342b245cb2SAnirudh Venkataramanan */ 16352b245cb2SAnirudh Venkataramanan wmb(); 16362b245cb2SAnirudh Venkataramanan 16372b245cb2SAnirudh Venkataramanan /* set next_to_watch value indicating a packet is present */ 16382b245cb2SAnirudh Venkataramanan first->next_to_watch = tx_desc; 16392b245cb2SAnirudh Venkataramanan 16402b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 16412b245cb2SAnirudh Venkataramanan 16422b245cb2SAnirudh Venkataramanan ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 16432b245cb2SAnirudh Venkataramanan 16442b245cb2SAnirudh Venkataramanan /* notify HW of packet */ 16456b16f9eeSFlorian Westphal if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 16462b245cb2SAnirudh Venkataramanan writel(i, tx_ring->tail); 16472b245cb2SAnirudh Venkataramanan } 16482b245cb2SAnirudh Venkataramanan 16492b245cb2SAnirudh Venkataramanan return; 16502b245cb2SAnirudh Venkataramanan 16512b245cb2SAnirudh Venkataramanan dma_error: 16522f2da36eSAnirudh Venkataramanan /* clear DMA mappings for failed tx_buf map */ 16532b245cb2SAnirudh Venkataramanan for (;;) { 16542b245cb2SAnirudh Venkataramanan tx_buf = &tx_ring->tx_buf[i]; 16552b245cb2SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 16562b245cb2SAnirudh Venkataramanan if (tx_buf == first) 16572b245cb2SAnirudh Venkataramanan break; 16582b245cb2SAnirudh Venkataramanan if (i == 0) 16592b245cb2SAnirudh Venkataramanan i = tx_ring->count; 16602b245cb2SAnirudh Venkataramanan i--; 16612b245cb2SAnirudh Venkataramanan } 16622b245cb2SAnirudh Venkataramanan 16632b245cb2SAnirudh Venkataramanan tx_ring->next_to_use = i; 16642b245cb2SAnirudh Venkataramanan } 16652b245cb2SAnirudh Venkataramanan 16662b245cb2SAnirudh Venkataramanan /** 1667d76a60baSAnirudh Venkataramanan * ice_tx_csum - Enable Tx checksum offloads 1668d76a60baSAnirudh Venkataramanan * @first: pointer to the first descriptor 1669d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1670d76a60baSAnirudh Venkataramanan * 1671d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1672d76a60baSAnirudh Venkataramanan */ 1673d76a60baSAnirudh Venkataramanan static 1674d76a60baSAnirudh Venkataramanan int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1675d76a60baSAnirudh Venkataramanan { 1676d76a60baSAnirudh Venkataramanan u32 l4_len = 0, l3_len = 0, l2_len = 0; 1677d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1678d76a60baSAnirudh Venkataramanan union { 1679d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1680d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1681d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1682d76a60baSAnirudh Venkataramanan } ip; 1683d76a60baSAnirudh Venkataramanan union { 1684d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1685d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1686d76a60baSAnirudh Venkataramanan } l4; 1687d76a60baSAnirudh Venkataramanan __be16 frag_off, protocol; 1688d76a60baSAnirudh Venkataramanan unsigned char *exthdr; 1689d76a60baSAnirudh Venkataramanan u32 offset, cmd = 0; 1690d76a60baSAnirudh Venkataramanan u8 l4_proto = 0; 1691d76a60baSAnirudh Venkataramanan 1692d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1693d76a60baSAnirudh Venkataramanan return 0; 1694d76a60baSAnirudh Venkataramanan 1695d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 1696d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 1697d76a60baSAnirudh Venkataramanan 1698d76a60baSAnirudh Venkataramanan /* compute outer L2 header size */ 1699d76a60baSAnirudh Venkataramanan l2_len = ip.hdr - skb->data; 1700d76a60baSAnirudh Venkataramanan offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1701d76a60baSAnirudh Venkataramanan 1702d76a60baSAnirudh Venkataramanan if (skb->encapsulation) 1703d76a60baSAnirudh Venkataramanan return -1; 1704d76a60baSAnirudh Venkataramanan 1705d76a60baSAnirudh Venkataramanan /* Enable IP checksum offloads */ 1706d76a60baSAnirudh Venkataramanan protocol = vlan_get_protocol(skb); 1707d76a60baSAnirudh Venkataramanan if (protocol == htons(ETH_P_IP)) { 1708d76a60baSAnirudh Venkataramanan l4_proto = ip.v4->protocol; 1709d76a60baSAnirudh Venkataramanan /* the stack computes the IP header already, the only time we 1710d76a60baSAnirudh Venkataramanan * need the hardware to recompute it is in the case of TSO. 1711d76a60baSAnirudh Venkataramanan */ 1712d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1713d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1714d76a60baSAnirudh Venkataramanan else 1715d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1716d76a60baSAnirudh Venkataramanan 1717d76a60baSAnirudh Venkataramanan } else if (protocol == htons(ETH_P_IPV6)) { 1718d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1719d76a60baSAnirudh Venkataramanan exthdr = ip.hdr + sizeof(*ip.v6); 1720d76a60baSAnirudh Venkataramanan l4_proto = ip.v6->nexthdr; 1721d76a60baSAnirudh Venkataramanan if (l4.hdr != exthdr) 1722d76a60baSAnirudh Venkataramanan ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1723d76a60baSAnirudh Venkataramanan &frag_off); 1724d76a60baSAnirudh Venkataramanan } else { 1725d76a60baSAnirudh Venkataramanan return -1; 1726d76a60baSAnirudh Venkataramanan } 1727d76a60baSAnirudh Venkataramanan 1728d76a60baSAnirudh Venkataramanan /* compute inner L3 header size */ 1729d76a60baSAnirudh Venkataramanan l3_len = l4.hdr - ip.hdr; 1730d76a60baSAnirudh Venkataramanan offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1731d76a60baSAnirudh Venkataramanan 1732d76a60baSAnirudh Venkataramanan /* Enable L4 checksum offloads */ 1733d76a60baSAnirudh Venkataramanan switch (l4_proto) { 1734d76a60baSAnirudh Venkataramanan case IPPROTO_TCP: 1735d76a60baSAnirudh Venkataramanan /* enable checksum offloads */ 1736d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1737d76a60baSAnirudh Venkataramanan l4_len = l4.tcp->doff; 1738d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1739d76a60baSAnirudh Venkataramanan break; 1740d76a60baSAnirudh Venkataramanan case IPPROTO_UDP: 1741d76a60baSAnirudh Venkataramanan /* enable UDP checksum offload */ 1742d76a60baSAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1743d76a60baSAnirudh Venkataramanan l4_len = (sizeof(struct udphdr) >> 2); 1744d76a60baSAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1745d76a60baSAnirudh Venkataramanan break; 1746d76a60baSAnirudh Venkataramanan case IPPROTO_SCTP: 1747cf909e19SAnirudh Venkataramanan /* enable SCTP checksum offload */ 1748cf909e19SAnirudh Venkataramanan cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1749cf909e19SAnirudh Venkataramanan l4_len = sizeof(struct sctphdr) >> 2; 1750cf909e19SAnirudh Venkataramanan offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1751cf909e19SAnirudh Venkataramanan break; 1752cf909e19SAnirudh Venkataramanan 1753d76a60baSAnirudh Venkataramanan default: 1754d76a60baSAnirudh Venkataramanan if (first->tx_flags & ICE_TX_FLAGS_TSO) 1755d76a60baSAnirudh Venkataramanan return -1; 1756d76a60baSAnirudh Venkataramanan skb_checksum_help(skb); 1757d76a60baSAnirudh Venkataramanan return 0; 1758d76a60baSAnirudh Venkataramanan } 1759d76a60baSAnirudh Venkataramanan 1760d76a60baSAnirudh Venkataramanan off->td_cmd |= cmd; 1761d76a60baSAnirudh Venkataramanan off->td_offset |= offset; 1762d76a60baSAnirudh Venkataramanan return 1; 1763d76a60baSAnirudh Venkataramanan } 1764d76a60baSAnirudh Venkataramanan 1765d76a60baSAnirudh Venkataramanan /** 1766f9867df6SAnirudh Venkataramanan * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1767d76a60baSAnirudh Venkataramanan * @tx_ring: ring to send buffer on 1768d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1769d76a60baSAnirudh Venkataramanan * 1770d76a60baSAnirudh Venkataramanan * Checks the skb and set up correspondingly several generic transmit flags 1771d76a60baSAnirudh Venkataramanan * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1772d76a60baSAnirudh Venkataramanan * 1773d76a60baSAnirudh Venkataramanan * Returns error code indicate the frame should be dropped upon error and the 1774d76a60baSAnirudh Venkataramanan * otherwise returns 0 to indicate the flags has been set properly. 1775d76a60baSAnirudh Venkataramanan */ 1776d76a60baSAnirudh Venkataramanan static int 1777d76a60baSAnirudh Venkataramanan ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) 1778d76a60baSAnirudh Venkataramanan { 1779d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1780d76a60baSAnirudh Venkataramanan __be16 protocol = skb->protocol; 1781d76a60baSAnirudh Venkataramanan 1782d76a60baSAnirudh Venkataramanan if (protocol == htons(ETH_P_8021Q) && 1783d76a60baSAnirudh Venkataramanan !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 1784d76a60baSAnirudh Venkataramanan /* when HW VLAN acceleration is turned off by the user the 1785d76a60baSAnirudh Venkataramanan * stack sets the protocol to 8021q so that the driver 1786d76a60baSAnirudh Venkataramanan * can take any steps required to support the SW only 1787d76a60baSAnirudh Venkataramanan * VLAN handling. In our case the driver doesn't need 1788d76a60baSAnirudh Venkataramanan * to take any further steps so just set the protocol 1789d76a60baSAnirudh Venkataramanan * to the encapsulated ethertype. 1790d76a60baSAnirudh Venkataramanan */ 1791d76a60baSAnirudh Venkataramanan skb->protocol = vlan_get_protocol(skb); 17925f6aa50eSAnirudh Venkataramanan return 0; 1793d76a60baSAnirudh Venkataramanan } 1794d76a60baSAnirudh Venkataramanan 1795d76a60baSAnirudh Venkataramanan /* if we have a HW VLAN tag being added, default to the HW one */ 1796d76a60baSAnirudh Venkataramanan if (skb_vlan_tag_present(skb)) { 1797d76a60baSAnirudh Venkataramanan first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1798d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1799d76a60baSAnirudh Venkataramanan } else if (protocol == htons(ETH_P_8021Q)) { 1800d76a60baSAnirudh Venkataramanan struct vlan_hdr *vhdr, _vhdr; 1801d76a60baSAnirudh Venkataramanan 1802d76a60baSAnirudh Venkataramanan /* for SW VLAN, check the next protocol and store the tag */ 1803d76a60baSAnirudh Venkataramanan vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN, 1804d76a60baSAnirudh Venkataramanan sizeof(_vhdr), 1805d76a60baSAnirudh Venkataramanan &_vhdr); 1806d76a60baSAnirudh Venkataramanan if (!vhdr) 1807d76a60baSAnirudh Venkataramanan return -EINVAL; 1808d76a60baSAnirudh Venkataramanan 1809d76a60baSAnirudh Venkataramanan first->tx_flags |= ntohs(vhdr->h_vlan_TCI) << 1810d76a60baSAnirudh Venkataramanan ICE_TX_FLAGS_VLAN_S; 1811d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_SW_VLAN; 1812d76a60baSAnirudh Venkataramanan } 1813d76a60baSAnirudh Venkataramanan 18145f6aa50eSAnirudh Venkataramanan return ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 1815d76a60baSAnirudh Venkataramanan } 1816d76a60baSAnirudh Venkataramanan 1817d76a60baSAnirudh Venkataramanan /** 1818d76a60baSAnirudh Venkataramanan * ice_tso - computes mss and TSO length to prepare for TSO 1819d76a60baSAnirudh Venkataramanan * @first: pointer to struct ice_tx_buf 1820d76a60baSAnirudh Venkataramanan * @off: pointer to struct that holds offload parameters 1821d76a60baSAnirudh Venkataramanan * 1822d76a60baSAnirudh Venkataramanan * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1823d76a60baSAnirudh Venkataramanan */ 1824d76a60baSAnirudh Venkataramanan static 1825d76a60baSAnirudh Venkataramanan int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1826d76a60baSAnirudh Venkataramanan { 1827d76a60baSAnirudh Venkataramanan struct sk_buff *skb = first->skb; 1828d76a60baSAnirudh Venkataramanan union { 1829d76a60baSAnirudh Venkataramanan struct iphdr *v4; 1830d76a60baSAnirudh Venkataramanan struct ipv6hdr *v6; 1831d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1832d76a60baSAnirudh Venkataramanan } ip; 1833d76a60baSAnirudh Venkataramanan union { 1834d76a60baSAnirudh Venkataramanan struct tcphdr *tcp; 1835d76a60baSAnirudh Venkataramanan unsigned char *hdr; 1836d76a60baSAnirudh Venkataramanan } l4; 1837d76a60baSAnirudh Venkataramanan u64 cd_mss, cd_tso_len; 1838d76a60baSAnirudh Venkataramanan u32 paylen, l4_start; 1839d76a60baSAnirudh Venkataramanan int err; 1840d76a60baSAnirudh Venkataramanan 1841d76a60baSAnirudh Venkataramanan if (skb->ip_summed != CHECKSUM_PARTIAL) 1842d76a60baSAnirudh Venkataramanan return 0; 1843d76a60baSAnirudh Venkataramanan 1844d76a60baSAnirudh Venkataramanan if (!skb_is_gso(skb)) 1845d76a60baSAnirudh Venkataramanan return 0; 1846d76a60baSAnirudh Venkataramanan 1847d76a60baSAnirudh Venkataramanan err = skb_cow_head(skb, 0); 1848d76a60baSAnirudh Venkataramanan if (err < 0) 1849d76a60baSAnirudh Venkataramanan return err; 1850d76a60baSAnirudh Venkataramanan 1851c3a6825eSBruce Allan /* cppcheck-suppress unreadVariable */ 1852d76a60baSAnirudh Venkataramanan ip.hdr = skb_network_header(skb); 1853d76a60baSAnirudh Venkataramanan l4.hdr = skb_transport_header(skb); 1854d76a60baSAnirudh Venkataramanan 1855d76a60baSAnirudh Venkataramanan /* initialize outer IP header fields */ 1856d76a60baSAnirudh Venkataramanan if (ip.v4->version == 4) { 1857d76a60baSAnirudh Venkataramanan ip.v4->tot_len = 0; 1858d76a60baSAnirudh Venkataramanan ip.v4->check = 0; 1859d76a60baSAnirudh Venkataramanan } else { 1860d76a60baSAnirudh Venkataramanan ip.v6->payload_len = 0; 1861d76a60baSAnirudh Venkataramanan } 1862d76a60baSAnirudh Venkataramanan 1863d76a60baSAnirudh Venkataramanan /* determine offset of transport header */ 1864d76a60baSAnirudh Venkataramanan l4_start = l4.hdr - skb->data; 1865d76a60baSAnirudh Venkataramanan 1866d76a60baSAnirudh Venkataramanan /* remove payload length from checksum */ 1867d76a60baSAnirudh Venkataramanan paylen = skb->len - l4_start; 1868d76a60baSAnirudh Venkataramanan csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); 1869d76a60baSAnirudh Venkataramanan 1870d76a60baSAnirudh Venkataramanan /* compute length of segmentation header */ 1871d76a60baSAnirudh Venkataramanan off->header_len = (l4.tcp->doff * 4) + l4_start; 1872d76a60baSAnirudh Venkataramanan 1873d76a60baSAnirudh Venkataramanan /* update gso_segs and bytecount */ 1874d76a60baSAnirudh Venkataramanan first->gso_segs = skb_shinfo(skb)->gso_segs; 1875d944b469SBrett Creeley first->bytecount += (first->gso_segs - 1) * off->header_len; 1876d76a60baSAnirudh Venkataramanan 1877d76a60baSAnirudh Venkataramanan cd_tso_len = skb->len - off->header_len; 1878d76a60baSAnirudh Venkataramanan cd_mss = skb_shinfo(skb)->gso_size; 1879d76a60baSAnirudh Venkataramanan 1880d76a60baSAnirudh Venkataramanan /* record cdesc_qw1 with TSO parameters */ 1881e65e9e15SBruce Allan off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 1882d76a60baSAnirudh Venkataramanan (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 1883d76a60baSAnirudh Venkataramanan (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 1884e65e9e15SBruce Allan (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 1885d76a60baSAnirudh Venkataramanan first->tx_flags |= ICE_TX_FLAGS_TSO; 1886d76a60baSAnirudh Venkataramanan return 1; 1887d76a60baSAnirudh Venkataramanan } 1888d76a60baSAnirudh Venkataramanan 1889d76a60baSAnirudh Venkataramanan /** 18902b245cb2SAnirudh Venkataramanan * ice_txd_use_count - estimate the number of descriptors needed for Tx 18912b245cb2SAnirudh Venkataramanan * @size: transmit request size in bytes 18922b245cb2SAnirudh Venkataramanan * 18932b245cb2SAnirudh Venkataramanan * Due to hardware alignment restrictions (4K alignment), we need to 18942b245cb2SAnirudh Venkataramanan * assume that we can have no more than 12K of data per descriptor, even 18952b245cb2SAnirudh Venkataramanan * though each descriptor can take up to 16K - 1 bytes of aligned memory. 18962b245cb2SAnirudh Venkataramanan * Thus, we need to divide by 12K. But division is slow! Instead, 18972b245cb2SAnirudh Venkataramanan * we decompose the operation into shifts and one relatively cheap 18982b245cb2SAnirudh Venkataramanan * multiply operation. 18992b245cb2SAnirudh Venkataramanan * 19002b245cb2SAnirudh Venkataramanan * To divide by 12K, we first divide by 4K, then divide by 3: 19012b245cb2SAnirudh Venkataramanan * To divide by 4K, shift right by 12 bits 19022b245cb2SAnirudh Venkataramanan * To divide by 3, multiply by 85, then divide by 256 19032b245cb2SAnirudh Venkataramanan * (Divide by 256 is done by shifting right by 8 bits) 19042b245cb2SAnirudh Venkataramanan * Finally, we add one to round up. Because 256 isn't an exact multiple of 19052b245cb2SAnirudh Venkataramanan * 3, we'll underestimate near each multiple of 12K. This is actually more 19062b245cb2SAnirudh Venkataramanan * accurate as we have 4K - 1 of wiggle room that we can fit into the last 19072b245cb2SAnirudh Venkataramanan * segment. For our purposes this is accurate out to 1M which is orders of 19082b245cb2SAnirudh Venkataramanan * magnitude greater than our largest possible GSO size. 19092b245cb2SAnirudh Venkataramanan * 19102b245cb2SAnirudh Venkataramanan * This would then be implemented as: 1911c585ea42SBrett Creeley * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 19122b245cb2SAnirudh Venkataramanan * 19132b245cb2SAnirudh Venkataramanan * Since multiplication and division are commutative, we can reorder 19142b245cb2SAnirudh Venkataramanan * operations into: 1915c585ea42SBrett Creeley * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 19162b245cb2SAnirudh Venkataramanan */ 19172b245cb2SAnirudh Venkataramanan static unsigned int ice_txd_use_count(unsigned int size) 19182b245cb2SAnirudh Venkataramanan { 1919c585ea42SBrett Creeley return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 19202b245cb2SAnirudh Venkataramanan } 19212b245cb2SAnirudh Venkataramanan 19222b245cb2SAnirudh Venkataramanan /** 1923d337f2afSAnirudh Venkataramanan * ice_xmit_desc_count - calculate number of Tx descriptors needed 19242b245cb2SAnirudh Venkataramanan * @skb: send buffer 19252b245cb2SAnirudh Venkataramanan * 19262b245cb2SAnirudh Venkataramanan * Returns number of data descriptors needed for this skb. 19272b245cb2SAnirudh Venkataramanan */ 19282b245cb2SAnirudh Venkataramanan static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 19292b245cb2SAnirudh Venkataramanan { 1930d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 19312b245cb2SAnirudh Venkataramanan unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 19322b245cb2SAnirudh Venkataramanan unsigned int count = 0, size = skb_headlen(skb); 19332b245cb2SAnirudh Venkataramanan 19342b245cb2SAnirudh Venkataramanan for (;;) { 19352b245cb2SAnirudh Venkataramanan count += ice_txd_use_count(size); 19362b245cb2SAnirudh Venkataramanan 19372b245cb2SAnirudh Venkataramanan if (!nr_frags--) 19382b245cb2SAnirudh Venkataramanan break; 19392b245cb2SAnirudh Venkataramanan 19402b245cb2SAnirudh Venkataramanan size = skb_frag_size(frag++); 19412b245cb2SAnirudh Venkataramanan } 19422b245cb2SAnirudh Venkataramanan 19432b245cb2SAnirudh Venkataramanan return count; 19442b245cb2SAnirudh Venkataramanan } 19452b245cb2SAnirudh Venkataramanan 19462b245cb2SAnirudh Venkataramanan /** 19472b245cb2SAnirudh Venkataramanan * __ice_chk_linearize - Check if there are more than 8 buffers per packet 19482b245cb2SAnirudh Venkataramanan * @skb: send buffer 19492b245cb2SAnirudh Venkataramanan * 19502b245cb2SAnirudh Venkataramanan * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 19512b245cb2SAnirudh Venkataramanan * and so we need to figure out the cases where we need to linearize the skb. 19522b245cb2SAnirudh Venkataramanan * 19532b245cb2SAnirudh Venkataramanan * For TSO we need to count the TSO header and segment payload separately. 19542b245cb2SAnirudh Venkataramanan * As such we need to check cases where we have 7 fragments or more as we 19552b245cb2SAnirudh Venkataramanan * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 19562b245cb2SAnirudh Venkataramanan * the segment payload in the first descriptor, and another 7 for the 19572b245cb2SAnirudh Venkataramanan * fragments. 19582b245cb2SAnirudh Venkataramanan */ 19592b245cb2SAnirudh Venkataramanan static bool __ice_chk_linearize(struct sk_buff *skb) 19602b245cb2SAnirudh Venkataramanan { 1961d7840976SMatthew Wilcox (Oracle) const skb_frag_t *frag, *stale; 19622b245cb2SAnirudh Venkataramanan int nr_frags, sum; 19632b245cb2SAnirudh Venkataramanan 19642b245cb2SAnirudh Venkataramanan /* no need to check if number of frags is less than 7 */ 19652b245cb2SAnirudh Venkataramanan nr_frags = skb_shinfo(skb)->nr_frags; 19662b245cb2SAnirudh Venkataramanan if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 19672b245cb2SAnirudh Venkataramanan return false; 19682b245cb2SAnirudh Venkataramanan 19692b245cb2SAnirudh Venkataramanan /* We need to walk through the list and validate that each group 19702b245cb2SAnirudh Venkataramanan * of 6 fragments totals at least gso_size. 19712b245cb2SAnirudh Venkataramanan */ 19722b245cb2SAnirudh Venkataramanan nr_frags -= ICE_MAX_BUF_TXD - 2; 19732b245cb2SAnirudh Venkataramanan frag = &skb_shinfo(skb)->frags[0]; 19742b245cb2SAnirudh Venkataramanan 19752b245cb2SAnirudh Venkataramanan /* Initialize size to the negative value of gso_size minus 1. We 19762b245cb2SAnirudh Venkataramanan * use this as the worst case scenerio in which the frag ahead 19772b245cb2SAnirudh Venkataramanan * of us only provides one byte which is why we are limited to 6 19782b245cb2SAnirudh Venkataramanan * descriptors for a single transmit as the header and previous 19792b245cb2SAnirudh Venkataramanan * fragment are already consuming 2 descriptors. 19802b245cb2SAnirudh Venkataramanan */ 19812b245cb2SAnirudh Venkataramanan sum = 1 - skb_shinfo(skb)->gso_size; 19822b245cb2SAnirudh Venkataramanan 19832b245cb2SAnirudh Venkataramanan /* Add size of frags 0 through 4 to create our initial sum */ 19842b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 19852b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 19862b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 19872b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 19882b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 19892b245cb2SAnirudh Venkataramanan 19902b245cb2SAnirudh Venkataramanan /* Walk through fragments adding latest fragment, testing it, and 19912b245cb2SAnirudh Venkataramanan * then removing stale fragments from the sum. 19922b245cb2SAnirudh Venkataramanan */ 19932b245cb2SAnirudh Venkataramanan stale = &skb_shinfo(skb)->frags[0]; 19942b245cb2SAnirudh Venkataramanan for (;;) { 19952b245cb2SAnirudh Venkataramanan sum += skb_frag_size(frag++); 19962b245cb2SAnirudh Venkataramanan 19972b245cb2SAnirudh Venkataramanan /* if sum is negative we failed to make sufficient progress */ 19982b245cb2SAnirudh Venkataramanan if (sum < 0) 19992b245cb2SAnirudh Venkataramanan return true; 20002b245cb2SAnirudh Venkataramanan 20012b245cb2SAnirudh Venkataramanan if (!nr_frags--) 20022b245cb2SAnirudh Venkataramanan break; 20032b245cb2SAnirudh Venkataramanan 20042b245cb2SAnirudh Venkataramanan sum -= skb_frag_size(stale++); 20052b245cb2SAnirudh Venkataramanan } 20062b245cb2SAnirudh Venkataramanan 20072b245cb2SAnirudh Venkataramanan return false; 20082b245cb2SAnirudh Venkataramanan } 20092b245cb2SAnirudh Venkataramanan 20102b245cb2SAnirudh Venkataramanan /** 20112b245cb2SAnirudh Venkataramanan * ice_chk_linearize - Check if there are more than 8 fragments per packet 20122b245cb2SAnirudh Venkataramanan * @skb: send buffer 20132b245cb2SAnirudh Venkataramanan * @count: number of buffers used 20142b245cb2SAnirudh Venkataramanan * 20152b245cb2SAnirudh Venkataramanan * Note: Our HW can't scatter-gather more than 8 fragments to build 20162b245cb2SAnirudh Venkataramanan * a packet on the wire and so we need to figure out the cases where we 20172b245cb2SAnirudh Venkataramanan * need to linearize the skb. 20182b245cb2SAnirudh Venkataramanan */ 20192b245cb2SAnirudh Venkataramanan static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 20202b245cb2SAnirudh Venkataramanan { 20212b245cb2SAnirudh Venkataramanan /* Both TSO and single send will work if count is less than 8 */ 20222b245cb2SAnirudh Venkataramanan if (likely(count < ICE_MAX_BUF_TXD)) 20232b245cb2SAnirudh Venkataramanan return false; 20242b245cb2SAnirudh Venkataramanan 20252b245cb2SAnirudh Venkataramanan if (skb_is_gso(skb)) 20262b245cb2SAnirudh Venkataramanan return __ice_chk_linearize(skb); 20272b245cb2SAnirudh Venkataramanan 20282b245cb2SAnirudh Venkataramanan /* we can support up to 8 data buffers for a single send */ 20292b245cb2SAnirudh Venkataramanan return count != ICE_MAX_BUF_TXD; 20302b245cb2SAnirudh Venkataramanan } 20312b245cb2SAnirudh Venkataramanan 20322b245cb2SAnirudh Venkataramanan /** 20332b245cb2SAnirudh Venkataramanan * ice_xmit_frame_ring - Sends buffer on Tx ring 20342b245cb2SAnirudh Venkataramanan * @skb: send buffer 20352b245cb2SAnirudh Venkataramanan * @tx_ring: ring to send buffer on 20362b245cb2SAnirudh Venkataramanan * 20372b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 20382b245cb2SAnirudh Venkataramanan */ 20392b245cb2SAnirudh Venkataramanan static netdev_tx_t 20402b245cb2SAnirudh Venkataramanan ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 20412b245cb2SAnirudh Venkataramanan { 2042d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params offload = { 0 }; 20432b245cb2SAnirudh Venkataramanan struct ice_tx_buf *first; 20442b245cb2SAnirudh Venkataramanan unsigned int count; 2045d76a60baSAnirudh Venkataramanan int tso, csum; 20462b245cb2SAnirudh Venkataramanan 20472b245cb2SAnirudh Venkataramanan count = ice_xmit_desc_count(skb); 20482b245cb2SAnirudh Venkataramanan if (ice_chk_linearize(skb, count)) { 20492b245cb2SAnirudh Venkataramanan if (__skb_linearize(skb)) 20502b245cb2SAnirudh Venkataramanan goto out_drop; 20512b245cb2SAnirudh Venkataramanan count = ice_txd_use_count(skb->len); 20522b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_linearize++; 20532b245cb2SAnirudh Venkataramanan } 20542b245cb2SAnirudh Venkataramanan 20552b245cb2SAnirudh Venkataramanan /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 20562b245cb2SAnirudh Venkataramanan * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 20572b245cb2SAnirudh Venkataramanan * + 4 desc gap to avoid the cache line where head is, 20582b245cb2SAnirudh Venkataramanan * + 1 desc for context descriptor, 20592b245cb2SAnirudh Venkataramanan * otherwise try next time 20602b245cb2SAnirudh Venkataramanan */ 2061c585ea42SBrett Creeley if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2062c585ea42SBrett Creeley ICE_DESCS_FOR_CTX_DESC)) { 20632b245cb2SAnirudh Venkataramanan tx_ring->tx_stats.tx_busy++; 20642b245cb2SAnirudh Venkataramanan return NETDEV_TX_BUSY; 20652b245cb2SAnirudh Venkataramanan } 20662b245cb2SAnirudh Venkataramanan 2067d76a60baSAnirudh Venkataramanan offload.tx_ring = tx_ring; 2068d76a60baSAnirudh Venkataramanan 20692b245cb2SAnirudh Venkataramanan /* record the location of the first descriptor for this packet */ 20702b245cb2SAnirudh Venkataramanan first = &tx_ring->tx_buf[tx_ring->next_to_use]; 20712b245cb2SAnirudh Venkataramanan first->skb = skb; 20722b245cb2SAnirudh Venkataramanan first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 20732b245cb2SAnirudh Venkataramanan first->gso_segs = 1; 2074d76a60baSAnirudh Venkataramanan first->tx_flags = 0; 20752b245cb2SAnirudh Venkataramanan 2076d76a60baSAnirudh Venkataramanan /* prepare the VLAN tagging flags for Tx */ 2077d76a60baSAnirudh Venkataramanan if (ice_tx_prepare_vlan_flags(tx_ring, first)) 2078d76a60baSAnirudh Venkataramanan goto out_drop; 2079d76a60baSAnirudh Venkataramanan 2080d76a60baSAnirudh Venkataramanan /* set up TSO offload */ 2081d76a60baSAnirudh Venkataramanan tso = ice_tso(first, &offload); 2082d76a60baSAnirudh Venkataramanan if (tso < 0) 2083d76a60baSAnirudh Venkataramanan goto out_drop; 2084d76a60baSAnirudh Venkataramanan 2085d76a60baSAnirudh Venkataramanan /* always set up Tx checksum offload */ 2086d76a60baSAnirudh Venkataramanan csum = ice_tx_csum(first, &offload); 2087d76a60baSAnirudh Venkataramanan if (csum < 0) 2088d76a60baSAnirudh Venkataramanan goto out_drop; 2089d76a60baSAnirudh Venkataramanan 2090d76a60baSAnirudh Venkataramanan if (tso || offload.cd_tunnel_params) { 2091d76a60baSAnirudh Venkataramanan struct ice_tx_ctx_desc *cdesc; 2092d76a60baSAnirudh Venkataramanan int i = tx_ring->next_to_use; 2093d76a60baSAnirudh Venkataramanan 2094d76a60baSAnirudh Venkataramanan /* grab the next descriptor */ 2095d76a60baSAnirudh Venkataramanan cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2096d76a60baSAnirudh Venkataramanan i++; 2097d76a60baSAnirudh Venkataramanan tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2098d76a60baSAnirudh Venkataramanan 2099d76a60baSAnirudh Venkataramanan /* setup context descriptor */ 2100d76a60baSAnirudh Venkataramanan cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2101d76a60baSAnirudh Venkataramanan cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2102d76a60baSAnirudh Venkataramanan cdesc->rsvd = cpu_to_le16(0); 2103d76a60baSAnirudh Venkataramanan cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2104d76a60baSAnirudh Venkataramanan } 2105d76a60baSAnirudh Venkataramanan 2106d76a60baSAnirudh Venkataramanan ice_tx_map(tx_ring, first, &offload); 21072b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 21082b245cb2SAnirudh Venkataramanan 21092b245cb2SAnirudh Venkataramanan out_drop: 21102b245cb2SAnirudh Venkataramanan dev_kfree_skb_any(skb); 21112b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 21122b245cb2SAnirudh Venkataramanan } 21132b245cb2SAnirudh Venkataramanan 21142b245cb2SAnirudh Venkataramanan /** 21152b245cb2SAnirudh Venkataramanan * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 21162b245cb2SAnirudh Venkataramanan * @skb: send buffer 21172b245cb2SAnirudh Venkataramanan * @netdev: network interface device structure 21182b245cb2SAnirudh Venkataramanan * 21192b245cb2SAnirudh Venkataramanan * Returns NETDEV_TX_OK if sent, else an error code 21202b245cb2SAnirudh Venkataramanan */ 21212b245cb2SAnirudh Venkataramanan netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 21222b245cb2SAnirudh Venkataramanan { 21232b245cb2SAnirudh Venkataramanan struct ice_netdev_priv *np = netdev_priv(netdev); 21242b245cb2SAnirudh Venkataramanan struct ice_vsi *vsi = np->vsi; 21252b245cb2SAnirudh Venkataramanan struct ice_ring *tx_ring; 21262b245cb2SAnirudh Venkataramanan 21272b245cb2SAnirudh Venkataramanan tx_ring = vsi->tx_rings[skb->queue_mapping]; 21282b245cb2SAnirudh Venkataramanan 21292b245cb2SAnirudh Venkataramanan /* hardware can't handle really short frames, hardware padding works 21302b245cb2SAnirudh Venkataramanan * beyond this point 21312b245cb2SAnirudh Venkataramanan */ 21322b245cb2SAnirudh Venkataramanan if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 21332b245cb2SAnirudh Venkataramanan return NETDEV_TX_OK; 21342b245cb2SAnirudh Venkataramanan 21352b245cb2SAnirudh Venkataramanan return ice_xmit_frame_ring(skb, tx_ring); 21362b245cb2SAnirudh Venkataramanan } 2137