1cdedef59SAnirudh Venkataramanan // SPDX-License-Identifier: GPL-2.0
2cdedef59SAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */
3cdedef59SAnirudh Venkataramanan 
4cdedef59SAnirudh Venkataramanan /* The driver transmit and receive code */
5cdedef59SAnirudh Venkataramanan 
6cdedef59SAnirudh Venkataramanan #include <linux/prefetch.h>
7cdedef59SAnirudh Venkataramanan #include <linux/mm.h>
8cdedef59SAnirudh Venkataramanan #include "ice.h"
9cdedef59SAnirudh Venkataramanan 
102b245cb2SAnirudh Venkataramanan #define ICE_RX_HDR_SIZE		256
112b245cb2SAnirudh Venkataramanan 
12cdedef59SAnirudh Venkataramanan /**
13cdedef59SAnirudh Venkataramanan  * ice_unmap_and_free_tx_buf - Release a Tx buffer
14cdedef59SAnirudh Venkataramanan  * @ring: the ring that owns the buffer
15cdedef59SAnirudh Venkataramanan  * @tx_buf: the buffer to free
16cdedef59SAnirudh Venkataramanan  */
17cdedef59SAnirudh Venkataramanan static void
18cdedef59SAnirudh Venkataramanan ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
19cdedef59SAnirudh Venkataramanan {
20cdedef59SAnirudh Venkataramanan 	if (tx_buf->skb) {
21cdedef59SAnirudh Venkataramanan 		dev_kfree_skb_any(tx_buf->skb);
22cdedef59SAnirudh Venkataramanan 		if (dma_unmap_len(tx_buf, len))
23cdedef59SAnirudh Venkataramanan 			dma_unmap_single(ring->dev,
24cdedef59SAnirudh Venkataramanan 					 dma_unmap_addr(tx_buf, dma),
25cdedef59SAnirudh Venkataramanan 					 dma_unmap_len(tx_buf, len),
26cdedef59SAnirudh Venkataramanan 					 DMA_TO_DEVICE);
27cdedef59SAnirudh Venkataramanan 	} else if (dma_unmap_len(tx_buf, len)) {
28cdedef59SAnirudh Venkataramanan 		dma_unmap_page(ring->dev,
29cdedef59SAnirudh Venkataramanan 			       dma_unmap_addr(tx_buf, dma),
30cdedef59SAnirudh Venkataramanan 			       dma_unmap_len(tx_buf, len),
31cdedef59SAnirudh Venkataramanan 			       DMA_TO_DEVICE);
32cdedef59SAnirudh Venkataramanan 	}
33cdedef59SAnirudh Venkataramanan 
34cdedef59SAnirudh Venkataramanan 	tx_buf->next_to_watch = NULL;
35cdedef59SAnirudh Venkataramanan 	tx_buf->skb = NULL;
36cdedef59SAnirudh Venkataramanan 	dma_unmap_len_set(tx_buf, len, 0);
37cdedef59SAnirudh Venkataramanan 	/* tx_buf must be completely set up in the transmit path */
38cdedef59SAnirudh Venkataramanan }
39cdedef59SAnirudh Venkataramanan 
40cdedef59SAnirudh Venkataramanan static struct netdev_queue *txring_txq(const struct ice_ring *ring)
41cdedef59SAnirudh Venkataramanan {
42cdedef59SAnirudh Venkataramanan 	return netdev_get_tx_queue(ring->netdev, ring->q_index);
43cdedef59SAnirudh Venkataramanan }
44cdedef59SAnirudh Venkataramanan 
45cdedef59SAnirudh Venkataramanan /**
46cdedef59SAnirudh Venkataramanan  * ice_clean_tx_ring - Free any empty Tx buffers
47cdedef59SAnirudh Venkataramanan  * @tx_ring: ring to be cleaned
48cdedef59SAnirudh Venkataramanan  */
49cdedef59SAnirudh Venkataramanan void ice_clean_tx_ring(struct ice_ring *tx_ring)
50cdedef59SAnirudh Venkataramanan {
51cdedef59SAnirudh Venkataramanan 	unsigned long size;
52cdedef59SAnirudh Venkataramanan 	u16 i;
53cdedef59SAnirudh Venkataramanan 
54cdedef59SAnirudh Venkataramanan 	/* ring already cleared, nothing to do */
55cdedef59SAnirudh Venkataramanan 	if (!tx_ring->tx_buf)
56cdedef59SAnirudh Venkataramanan 		return;
57cdedef59SAnirudh Venkataramanan 
58cdedef59SAnirudh Venkataramanan 	/* Free all the Tx ring sk_bufss */
59cdedef59SAnirudh Venkataramanan 	for (i = 0; i < tx_ring->count; i++)
60cdedef59SAnirudh Venkataramanan 		ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
61cdedef59SAnirudh Venkataramanan 
62cdedef59SAnirudh Venkataramanan 	size = sizeof(struct ice_tx_buf) * tx_ring->count;
63cdedef59SAnirudh Venkataramanan 	memset(tx_ring->tx_buf, 0, size);
64cdedef59SAnirudh Venkataramanan 
65cdedef59SAnirudh Venkataramanan 	/* Zero out the descriptor ring */
66cdedef59SAnirudh Venkataramanan 	memset(tx_ring->desc, 0, tx_ring->size);
67cdedef59SAnirudh Venkataramanan 
68cdedef59SAnirudh Venkataramanan 	tx_ring->next_to_use = 0;
69cdedef59SAnirudh Venkataramanan 	tx_ring->next_to_clean = 0;
70cdedef59SAnirudh Venkataramanan 
71cdedef59SAnirudh Venkataramanan 	if (!tx_ring->netdev)
72cdedef59SAnirudh Venkataramanan 		return;
73cdedef59SAnirudh Venkataramanan 
74cdedef59SAnirudh Venkataramanan 	/* cleanup Tx queue statistics */
75cdedef59SAnirudh Venkataramanan 	netdev_tx_reset_queue(txring_txq(tx_ring));
76cdedef59SAnirudh Venkataramanan }
77cdedef59SAnirudh Venkataramanan 
78cdedef59SAnirudh Venkataramanan /**
79cdedef59SAnirudh Venkataramanan  * ice_free_tx_ring - Free Tx resources per queue
80cdedef59SAnirudh Venkataramanan  * @tx_ring: Tx descriptor ring for a specific queue
81cdedef59SAnirudh Venkataramanan  *
82cdedef59SAnirudh Venkataramanan  * Free all transmit software resources
83cdedef59SAnirudh Venkataramanan  */
84cdedef59SAnirudh Venkataramanan void ice_free_tx_ring(struct ice_ring *tx_ring)
85cdedef59SAnirudh Venkataramanan {
86cdedef59SAnirudh Venkataramanan 	ice_clean_tx_ring(tx_ring);
87cdedef59SAnirudh Venkataramanan 	devm_kfree(tx_ring->dev, tx_ring->tx_buf);
88cdedef59SAnirudh Venkataramanan 	tx_ring->tx_buf = NULL;
89cdedef59SAnirudh Venkataramanan 
90cdedef59SAnirudh Venkataramanan 	if (tx_ring->desc) {
91cdedef59SAnirudh Venkataramanan 		dmam_free_coherent(tx_ring->dev, tx_ring->size,
92cdedef59SAnirudh Venkataramanan 				   tx_ring->desc, tx_ring->dma);
93cdedef59SAnirudh Venkataramanan 		tx_ring->desc = NULL;
94cdedef59SAnirudh Venkataramanan 	}
95cdedef59SAnirudh Venkataramanan }
96cdedef59SAnirudh Venkataramanan 
97cdedef59SAnirudh Venkataramanan /**
982b245cb2SAnirudh Venkataramanan  * ice_clean_tx_irq - Reclaim resources after transmit completes
992b245cb2SAnirudh Venkataramanan  * @vsi: the VSI we care about
1002b245cb2SAnirudh Venkataramanan  * @tx_ring: Tx ring to clean
1012b245cb2SAnirudh Venkataramanan  * @napi_budget: Used to determine if we are in netpoll
1022b245cb2SAnirudh Venkataramanan  *
1032b245cb2SAnirudh Venkataramanan  * Returns true if there's any budget left (e.g. the clean is finished)
1042b245cb2SAnirudh Venkataramanan  */
1052b245cb2SAnirudh Venkataramanan static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
1062b245cb2SAnirudh Venkataramanan 			     int napi_budget)
1072b245cb2SAnirudh Venkataramanan {
1082b245cb2SAnirudh Venkataramanan 	unsigned int total_bytes = 0, total_pkts = 0;
1092b245cb2SAnirudh Venkataramanan 	unsigned int budget = vsi->work_lmt;
1102b245cb2SAnirudh Venkataramanan 	s16 i = tx_ring->next_to_clean;
1112b245cb2SAnirudh Venkataramanan 	struct ice_tx_desc *tx_desc;
1122b245cb2SAnirudh Venkataramanan 	struct ice_tx_buf *tx_buf;
1132b245cb2SAnirudh Venkataramanan 
1142b245cb2SAnirudh Venkataramanan 	tx_buf = &tx_ring->tx_buf[i];
1152b245cb2SAnirudh Venkataramanan 	tx_desc = ICE_TX_DESC(tx_ring, i);
1162b245cb2SAnirudh Venkataramanan 	i -= tx_ring->count;
1172b245cb2SAnirudh Venkataramanan 
1182b245cb2SAnirudh Venkataramanan 	do {
1192b245cb2SAnirudh Venkataramanan 		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
1202b245cb2SAnirudh Venkataramanan 
1212b245cb2SAnirudh Venkataramanan 		/* if next_to_watch is not set then there is no work pending */
1222b245cb2SAnirudh Venkataramanan 		if (!eop_desc)
1232b245cb2SAnirudh Venkataramanan 			break;
1242b245cb2SAnirudh Venkataramanan 
1252b245cb2SAnirudh Venkataramanan 		smp_rmb();	/* prevent any other reads prior to eop_desc */
1262b245cb2SAnirudh Venkataramanan 
1272b245cb2SAnirudh Venkataramanan 		/* if the descriptor isn't done, no work yet to do */
1282b245cb2SAnirudh Venkataramanan 		if (!(eop_desc->cmd_type_offset_bsz &
1292b245cb2SAnirudh Venkataramanan 		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
1302b245cb2SAnirudh Venkataramanan 			break;
1312b245cb2SAnirudh Venkataramanan 
1322b245cb2SAnirudh Venkataramanan 		/* clear next_to_watch to prevent false hangs */
1332b245cb2SAnirudh Venkataramanan 		tx_buf->next_to_watch = NULL;
1342b245cb2SAnirudh Venkataramanan 
1352b245cb2SAnirudh Venkataramanan 		/* update the statistics for this packet */
1362b245cb2SAnirudh Venkataramanan 		total_bytes += tx_buf->bytecount;
1372b245cb2SAnirudh Venkataramanan 		total_pkts += tx_buf->gso_segs;
1382b245cb2SAnirudh Venkataramanan 
1392b245cb2SAnirudh Venkataramanan 		/* free the skb */
1402b245cb2SAnirudh Venkataramanan 		napi_consume_skb(tx_buf->skb, napi_budget);
1412b245cb2SAnirudh Venkataramanan 
1422b245cb2SAnirudh Venkataramanan 		/* unmap skb header data */
1432b245cb2SAnirudh Venkataramanan 		dma_unmap_single(tx_ring->dev,
1442b245cb2SAnirudh Venkataramanan 				 dma_unmap_addr(tx_buf, dma),
1452b245cb2SAnirudh Venkataramanan 				 dma_unmap_len(tx_buf, len),
1462b245cb2SAnirudh Venkataramanan 				 DMA_TO_DEVICE);
1472b245cb2SAnirudh Venkataramanan 
1482b245cb2SAnirudh Venkataramanan 		/* clear tx_buf data */
1492b245cb2SAnirudh Venkataramanan 		tx_buf->skb = NULL;
1502b245cb2SAnirudh Venkataramanan 		dma_unmap_len_set(tx_buf, len, 0);
1512b245cb2SAnirudh Venkataramanan 
1522b245cb2SAnirudh Venkataramanan 		/* unmap remaining buffers */
1532b245cb2SAnirudh Venkataramanan 		while (tx_desc != eop_desc) {
1542b245cb2SAnirudh Venkataramanan 			tx_buf++;
1552b245cb2SAnirudh Venkataramanan 			tx_desc++;
1562b245cb2SAnirudh Venkataramanan 			i++;
1572b245cb2SAnirudh Venkataramanan 			if (unlikely(!i)) {
1582b245cb2SAnirudh Venkataramanan 				i -= tx_ring->count;
1592b245cb2SAnirudh Venkataramanan 				tx_buf = tx_ring->tx_buf;
1602b245cb2SAnirudh Venkataramanan 				tx_desc = ICE_TX_DESC(tx_ring, 0);
1612b245cb2SAnirudh Venkataramanan 			}
1622b245cb2SAnirudh Venkataramanan 
1632b245cb2SAnirudh Venkataramanan 			/* unmap any remaining paged data */
1642b245cb2SAnirudh Venkataramanan 			if (dma_unmap_len(tx_buf, len)) {
1652b245cb2SAnirudh Venkataramanan 				dma_unmap_page(tx_ring->dev,
1662b245cb2SAnirudh Venkataramanan 					       dma_unmap_addr(tx_buf, dma),
1672b245cb2SAnirudh Venkataramanan 					       dma_unmap_len(tx_buf, len),
1682b245cb2SAnirudh Venkataramanan 					       DMA_TO_DEVICE);
1692b245cb2SAnirudh Venkataramanan 				dma_unmap_len_set(tx_buf, len, 0);
1702b245cb2SAnirudh Venkataramanan 			}
1712b245cb2SAnirudh Venkataramanan 		}
1722b245cb2SAnirudh Venkataramanan 
1732b245cb2SAnirudh Venkataramanan 		/* move us one more past the eop_desc for start of next pkt */
1742b245cb2SAnirudh Venkataramanan 		tx_buf++;
1752b245cb2SAnirudh Venkataramanan 		tx_desc++;
1762b245cb2SAnirudh Venkataramanan 		i++;
1772b245cb2SAnirudh Venkataramanan 		if (unlikely(!i)) {
1782b245cb2SAnirudh Venkataramanan 			i -= tx_ring->count;
1792b245cb2SAnirudh Venkataramanan 			tx_buf = tx_ring->tx_buf;
1802b245cb2SAnirudh Venkataramanan 			tx_desc = ICE_TX_DESC(tx_ring, 0);
1812b245cb2SAnirudh Venkataramanan 		}
1822b245cb2SAnirudh Venkataramanan 
1832b245cb2SAnirudh Venkataramanan 		prefetch(tx_desc);
1842b245cb2SAnirudh Venkataramanan 
1852b245cb2SAnirudh Venkataramanan 		/* update budget accounting */
1862b245cb2SAnirudh Venkataramanan 		budget--;
1872b245cb2SAnirudh Venkataramanan 	} while (likely(budget));
1882b245cb2SAnirudh Venkataramanan 
1892b245cb2SAnirudh Venkataramanan 	i += tx_ring->count;
1902b245cb2SAnirudh Venkataramanan 	tx_ring->next_to_clean = i;
1912b245cb2SAnirudh Venkataramanan 	u64_stats_update_begin(&tx_ring->syncp);
1922b245cb2SAnirudh Venkataramanan 	tx_ring->stats.bytes += total_bytes;
1932b245cb2SAnirudh Venkataramanan 	tx_ring->stats.pkts += total_pkts;
1942b245cb2SAnirudh Venkataramanan 	u64_stats_update_end(&tx_ring->syncp);
1952b245cb2SAnirudh Venkataramanan 	tx_ring->q_vector->tx.total_bytes += total_bytes;
1962b245cb2SAnirudh Venkataramanan 	tx_ring->q_vector->tx.total_pkts += total_pkts;
1972b245cb2SAnirudh Venkataramanan 
1982b245cb2SAnirudh Venkataramanan 	netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
1992b245cb2SAnirudh Venkataramanan 				  total_bytes);
2002b245cb2SAnirudh Venkataramanan 
2012b245cb2SAnirudh Venkataramanan #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
2022b245cb2SAnirudh Venkataramanan 	if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
2032b245cb2SAnirudh Venkataramanan 		     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
2042b245cb2SAnirudh Venkataramanan 		/* Make sure that anybody stopping the queue after this
2052b245cb2SAnirudh Venkataramanan 		 * sees the new next_to_clean.
2062b245cb2SAnirudh Venkataramanan 		 */
2072b245cb2SAnirudh Venkataramanan 		smp_mb();
2082b245cb2SAnirudh Venkataramanan 		if (__netif_subqueue_stopped(tx_ring->netdev,
2092b245cb2SAnirudh Venkataramanan 					     tx_ring->q_index) &&
2102b245cb2SAnirudh Venkataramanan 		   !test_bit(__ICE_DOWN, vsi->state)) {
2112b245cb2SAnirudh Venkataramanan 			netif_wake_subqueue(tx_ring->netdev,
2122b245cb2SAnirudh Venkataramanan 					    tx_ring->q_index);
2132b245cb2SAnirudh Venkataramanan 			++tx_ring->tx_stats.restart_q;
2142b245cb2SAnirudh Venkataramanan 		}
2152b245cb2SAnirudh Venkataramanan 	}
2162b245cb2SAnirudh Venkataramanan 
2172b245cb2SAnirudh Venkataramanan 	return !!budget;
2182b245cb2SAnirudh Venkataramanan }
2192b245cb2SAnirudh Venkataramanan 
2202b245cb2SAnirudh Venkataramanan /**
221cdedef59SAnirudh Venkataramanan  * ice_setup_tx_ring - Allocate the Tx descriptors
222d337f2afSAnirudh Venkataramanan  * @tx_ring: the Tx ring to set up
223cdedef59SAnirudh Venkataramanan  *
224cdedef59SAnirudh Venkataramanan  * Return 0 on success, negative on error
225cdedef59SAnirudh Venkataramanan  */
226cdedef59SAnirudh Venkataramanan int ice_setup_tx_ring(struct ice_ring *tx_ring)
227cdedef59SAnirudh Venkataramanan {
228cdedef59SAnirudh Venkataramanan 	struct device *dev = tx_ring->dev;
229cdedef59SAnirudh Venkataramanan 	int bi_size;
230cdedef59SAnirudh Venkataramanan 
231cdedef59SAnirudh Venkataramanan 	if (!dev)
232cdedef59SAnirudh Venkataramanan 		return -ENOMEM;
233cdedef59SAnirudh Venkataramanan 
234cdedef59SAnirudh Venkataramanan 	/* warn if we are about to overwrite the pointer */
235cdedef59SAnirudh Venkataramanan 	WARN_ON(tx_ring->tx_buf);
236cdedef59SAnirudh Venkataramanan 	bi_size = sizeof(struct ice_tx_buf) * tx_ring->count;
237cdedef59SAnirudh Venkataramanan 	tx_ring->tx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL);
238cdedef59SAnirudh Venkataramanan 	if (!tx_ring->tx_buf)
239cdedef59SAnirudh Venkataramanan 		return -ENOMEM;
240cdedef59SAnirudh Venkataramanan 
241cdedef59SAnirudh Venkataramanan 	/* round up to nearest 4K */
242cdedef59SAnirudh Venkataramanan 	tx_ring->size = tx_ring->count * sizeof(struct ice_tx_desc);
243cdedef59SAnirudh Venkataramanan 	tx_ring->size = ALIGN(tx_ring->size, 4096);
244cdedef59SAnirudh Venkataramanan 	tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
245cdedef59SAnirudh Venkataramanan 					    GFP_KERNEL);
246cdedef59SAnirudh Venkataramanan 	if (!tx_ring->desc) {
247cdedef59SAnirudh Venkataramanan 		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
248cdedef59SAnirudh Venkataramanan 			tx_ring->size);
249cdedef59SAnirudh Venkataramanan 		goto err;
250cdedef59SAnirudh Venkataramanan 	}
251cdedef59SAnirudh Venkataramanan 
252cdedef59SAnirudh Venkataramanan 	tx_ring->next_to_use = 0;
253cdedef59SAnirudh Venkataramanan 	tx_ring->next_to_clean = 0;
254b3969fd7SSudheer Mogilappagari 	tx_ring->tx_stats.prev_pkt = -1;
255cdedef59SAnirudh Venkataramanan 	return 0;
256cdedef59SAnirudh Venkataramanan 
257cdedef59SAnirudh Venkataramanan err:
258cdedef59SAnirudh Venkataramanan 	devm_kfree(dev, tx_ring->tx_buf);
259cdedef59SAnirudh Venkataramanan 	tx_ring->tx_buf = NULL;
260cdedef59SAnirudh Venkataramanan 	return -ENOMEM;
261cdedef59SAnirudh Venkataramanan }
262cdedef59SAnirudh Venkataramanan 
263cdedef59SAnirudh Venkataramanan /**
264cdedef59SAnirudh Venkataramanan  * ice_clean_rx_ring - Free Rx buffers
265cdedef59SAnirudh Venkataramanan  * @rx_ring: ring to be cleaned
266cdedef59SAnirudh Venkataramanan  */
267cdedef59SAnirudh Venkataramanan void ice_clean_rx_ring(struct ice_ring *rx_ring)
268cdedef59SAnirudh Venkataramanan {
269cdedef59SAnirudh Venkataramanan 	struct device *dev = rx_ring->dev;
270cdedef59SAnirudh Venkataramanan 	unsigned long size;
271cdedef59SAnirudh Venkataramanan 	u16 i;
272cdedef59SAnirudh Venkataramanan 
273cdedef59SAnirudh Venkataramanan 	/* ring already cleared, nothing to do */
274cdedef59SAnirudh Venkataramanan 	if (!rx_ring->rx_buf)
275cdedef59SAnirudh Venkataramanan 		return;
276cdedef59SAnirudh Venkataramanan 
277cdedef59SAnirudh Venkataramanan 	/* Free all the Rx ring sk_buffs */
278cdedef59SAnirudh Venkataramanan 	for (i = 0; i < rx_ring->count; i++) {
279cdedef59SAnirudh Venkataramanan 		struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
280cdedef59SAnirudh Venkataramanan 
281cdedef59SAnirudh Venkataramanan 		if (rx_buf->skb) {
282cdedef59SAnirudh Venkataramanan 			dev_kfree_skb(rx_buf->skb);
283cdedef59SAnirudh Venkataramanan 			rx_buf->skb = NULL;
284cdedef59SAnirudh Venkataramanan 		}
285cdedef59SAnirudh Venkataramanan 		if (!rx_buf->page)
286cdedef59SAnirudh Venkataramanan 			continue;
287cdedef59SAnirudh Venkataramanan 
288cdedef59SAnirudh Venkataramanan 		dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
289cdedef59SAnirudh Venkataramanan 		__free_pages(rx_buf->page, 0);
290cdedef59SAnirudh Venkataramanan 
291cdedef59SAnirudh Venkataramanan 		rx_buf->page = NULL;
292cdedef59SAnirudh Venkataramanan 		rx_buf->page_offset = 0;
293cdedef59SAnirudh Venkataramanan 	}
294cdedef59SAnirudh Venkataramanan 
295cdedef59SAnirudh Venkataramanan 	size = sizeof(struct ice_rx_buf) * rx_ring->count;
296cdedef59SAnirudh Venkataramanan 	memset(rx_ring->rx_buf, 0, size);
297cdedef59SAnirudh Venkataramanan 
298cdedef59SAnirudh Venkataramanan 	/* Zero out the descriptor ring */
299cdedef59SAnirudh Venkataramanan 	memset(rx_ring->desc, 0, rx_ring->size);
300cdedef59SAnirudh Venkataramanan 
301cdedef59SAnirudh Venkataramanan 	rx_ring->next_to_alloc = 0;
302cdedef59SAnirudh Venkataramanan 	rx_ring->next_to_clean = 0;
303cdedef59SAnirudh Venkataramanan 	rx_ring->next_to_use = 0;
304cdedef59SAnirudh Venkataramanan }
305cdedef59SAnirudh Venkataramanan 
306cdedef59SAnirudh Venkataramanan /**
307cdedef59SAnirudh Venkataramanan  * ice_free_rx_ring - Free Rx resources
308cdedef59SAnirudh Venkataramanan  * @rx_ring: ring to clean the resources from
309cdedef59SAnirudh Venkataramanan  *
310cdedef59SAnirudh Venkataramanan  * Free all receive software resources
311cdedef59SAnirudh Venkataramanan  */
312cdedef59SAnirudh Venkataramanan void ice_free_rx_ring(struct ice_ring *rx_ring)
313cdedef59SAnirudh Venkataramanan {
314cdedef59SAnirudh Venkataramanan 	ice_clean_rx_ring(rx_ring);
315cdedef59SAnirudh Venkataramanan 	devm_kfree(rx_ring->dev, rx_ring->rx_buf);
316cdedef59SAnirudh Venkataramanan 	rx_ring->rx_buf = NULL;
317cdedef59SAnirudh Venkataramanan 
318cdedef59SAnirudh Venkataramanan 	if (rx_ring->desc) {
319cdedef59SAnirudh Venkataramanan 		dmam_free_coherent(rx_ring->dev, rx_ring->size,
320cdedef59SAnirudh Venkataramanan 				   rx_ring->desc, rx_ring->dma);
321cdedef59SAnirudh Venkataramanan 		rx_ring->desc = NULL;
322cdedef59SAnirudh Venkataramanan 	}
323cdedef59SAnirudh Venkataramanan }
324cdedef59SAnirudh Venkataramanan 
325cdedef59SAnirudh Venkataramanan /**
326cdedef59SAnirudh Venkataramanan  * ice_setup_rx_ring - Allocate the Rx descriptors
327d337f2afSAnirudh Venkataramanan  * @rx_ring: the Rx ring to set up
328cdedef59SAnirudh Venkataramanan  *
329cdedef59SAnirudh Venkataramanan  * Return 0 on success, negative on error
330cdedef59SAnirudh Venkataramanan  */
331cdedef59SAnirudh Venkataramanan int ice_setup_rx_ring(struct ice_ring *rx_ring)
332cdedef59SAnirudh Venkataramanan {
333cdedef59SAnirudh Venkataramanan 	struct device *dev = rx_ring->dev;
334cdedef59SAnirudh Venkataramanan 	int bi_size;
335cdedef59SAnirudh Venkataramanan 
336cdedef59SAnirudh Venkataramanan 	if (!dev)
337cdedef59SAnirudh Venkataramanan 		return -ENOMEM;
338cdedef59SAnirudh Venkataramanan 
339cdedef59SAnirudh Venkataramanan 	/* warn if we are about to overwrite the pointer */
340cdedef59SAnirudh Venkataramanan 	WARN_ON(rx_ring->rx_buf);
341cdedef59SAnirudh Venkataramanan 	bi_size = sizeof(struct ice_rx_buf) * rx_ring->count;
342cdedef59SAnirudh Venkataramanan 	rx_ring->rx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL);
343cdedef59SAnirudh Venkataramanan 	if (!rx_ring->rx_buf)
344cdedef59SAnirudh Venkataramanan 		return -ENOMEM;
345cdedef59SAnirudh Venkataramanan 
346cdedef59SAnirudh Venkataramanan 	/* round up to nearest 4K */
347cdedef59SAnirudh Venkataramanan 	rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc);
348cdedef59SAnirudh Venkataramanan 	rx_ring->size = ALIGN(rx_ring->size, 4096);
349cdedef59SAnirudh Venkataramanan 	rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
350cdedef59SAnirudh Venkataramanan 					    GFP_KERNEL);
351cdedef59SAnirudh Venkataramanan 	if (!rx_ring->desc) {
352cdedef59SAnirudh Venkataramanan 		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
353cdedef59SAnirudh Venkataramanan 			rx_ring->size);
354cdedef59SAnirudh Venkataramanan 		goto err;
355cdedef59SAnirudh Venkataramanan 	}
356cdedef59SAnirudh Venkataramanan 
357cdedef59SAnirudh Venkataramanan 	rx_ring->next_to_use = 0;
358cdedef59SAnirudh Venkataramanan 	rx_ring->next_to_clean = 0;
359cdedef59SAnirudh Venkataramanan 	return 0;
360cdedef59SAnirudh Venkataramanan 
361cdedef59SAnirudh Venkataramanan err:
362cdedef59SAnirudh Venkataramanan 	devm_kfree(dev, rx_ring->rx_buf);
363cdedef59SAnirudh Venkataramanan 	rx_ring->rx_buf = NULL;
364cdedef59SAnirudh Venkataramanan 	return -ENOMEM;
365cdedef59SAnirudh Venkataramanan }
366cdedef59SAnirudh Venkataramanan 
367cdedef59SAnirudh Venkataramanan /**
368cdedef59SAnirudh Venkataramanan  * ice_release_rx_desc - Store the new tail and head values
369cdedef59SAnirudh Venkataramanan  * @rx_ring: ring to bump
370cdedef59SAnirudh Venkataramanan  * @val: new head index
371cdedef59SAnirudh Venkataramanan  */
372cdedef59SAnirudh Venkataramanan static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
373cdedef59SAnirudh Venkataramanan {
374cdedef59SAnirudh Venkataramanan 	rx_ring->next_to_use = val;
375cdedef59SAnirudh Venkataramanan 
376cdedef59SAnirudh Venkataramanan 	/* update next to alloc since we have filled the ring */
377cdedef59SAnirudh Venkataramanan 	rx_ring->next_to_alloc = val;
378cdedef59SAnirudh Venkataramanan 
379cdedef59SAnirudh Venkataramanan 	/* Force memory writes to complete before letting h/w
380cdedef59SAnirudh Venkataramanan 	 * know there are new descriptors to fetch. (Only
381cdedef59SAnirudh Venkataramanan 	 * applicable for weak-ordered memory model archs,
382cdedef59SAnirudh Venkataramanan 	 * such as IA-64).
383cdedef59SAnirudh Venkataramanan 	 */
384cdedef59SAnirudh Venkataramanan 	wmb();
385cdedef59SAnirudh Venkataramanan 	writel(val, rx_ring->tail);
386cdedef59SAnirudh Venkataramanan }
387cdedef59SAnirudh Venkataramanan 
388cdedef59SAnirudh Venkataramanan /**
389cdedef59SAnirudh Venkataramanan  * ice_alloc_mapped_page - recycle or make a new page
390cdedef59SAnirudh Venkataramanan  * @rx_ring: ring to use
391cdedef59SAnirudh Venkataramanan  * @bi: rx_buf struct to modify
392cdedef59SAnirudh Venkataramanan  *
393cdedef59SAnirudh Venkataramanan  * Returns true if the page was successfully allocated or
394cdedef59SAnirudh Venkataramanan  * reused.
395cdedef59SAnirudh Venkataramanan  */
396cdedef59SAnirudh Venkataramanan static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
397cdedef59SAnirudh Venkataramanan 				  struct ice_rx_buf *bi)
398cdedef59SAnirudh Venkataramanan {
399cdedef59SAnirudh Venkataramanan 	struct page *page = bi->page;
400cdedef59SAnirudh Venkataramanan 	dma_addr_t dma;
401cdedef59SAnirudh Venkataramanan 
402cdedef59SAnirudh Venkataramanan 	/* since we are recycling buffers we should seldom need to alloc */
4032b245cb2SAnirudh Venkataramanan 	if (likely(page)) {
4042b245cb2SAnirudh Venkataramanan 		rx_ring->rx_stats.page_reuse_count++;
405cdedef59SAnirudh Venkataramanan 		return true;
4062b245cb2SAnirudh Venkataramanan 	}
407cdedef59SAnirudh Venkataramanan 
408cdedef59SAnirudh Venkataramanan 	/* alloc new page for storage */
409cdedef59SAnirudh Venkataramanan 	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
4102b245cb2SAnirudh Venkataramanan 	if (unlikely(!page)) {
4112b245cb2SAnirudh Venkataramanan 		rx_ring->rx_stats.alloc_page_failed++;
412cdedef59SAnirudh Venkataramanan 		return false;
4132b245cb2SAnirudh Venkataramanan 	}
414cdedef59SAnirudh Venkataramanan 
415cdedef59SAnirudh Venkataramanan 	/* map page for use */
416cdedef59SAnirudh Venkataramanan 	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
417cdedef59SAnirudh Venkataramanan 
418cdedef59SAnirudh Venkataramanan 	/* if mapping failed free memory back to system since
419cdedef59SAnirudh Venkataramanan 	 * there isn't much point in holding memory we can't use
420cdedef59SAnirudh Venkataramanan 	 */
421cdedef59SAnirudh Venkataramanan 	if (dma_mapping_error(rx_ring->dev, dma)) {
422cdedef59SAnirudh Venkataramanan 		__free_pages(page, 0);
4232b245cb2SAnirudh Venkataramanan 		rx_ring->rx_stats.alloc_page_failed++;
424cdedef59SAnirudh Venkataramanan 		return false;
425cdedef59SAnirudh Venkataramanan 	}
426cdedef59SAnirudh Venkataramanan 
427cdedef59SAnirudh Venkataramanan 	bi->dma = dma;
428cdedef59SAnirudh Venkataramanan 	bi->page = page;
429cdedef59SAnirudh Venkataramanan 	bi->page_offset = 0;
430cdedef59SAnirudh Venkataramanan 
431cdedef59SAnirudh Venkataramanan 	return true;
432cdedef59SAnirudh Venkataramanan }
433cdedef59SAnirudh Venkataramanan 
434cdedef59SAnirudh Venkataramanan /**
435cdedef59SAnirudh Venkataramanan  * ice_alloc_rx_bufs - Replace used receive buffers
436cdedef59SAnirudh Venkataramanan  * @rx_ring: ring to place buffers on
437cdedef59SAnirudh Venkataramanan  * @cleaned_count: number of buffers to replace
438cdedef59SAnirudh Venkataramanan  *
439cdedef59SAnirudh Venkataramanan  * Returns false if all allocations were successful, true if any fail
440cdedef59SAnirudh Venkataramanan  */
441cdedef59SAnirudh Venkataramanan bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
442cdedef59SAnirudh Venkataramanan {
443cdedef59SAnirudh Venkataramanan 	union ice_32b_rx_flex_desc *rx_desc;
444cdedef59SAnirudh Venkataramanan 	u16 ntu = rx_ring->next_to_use;
445cdedef59SAnirudh Venkataramanan 	struct ice_rx_buf *bi;
446cdedef59SAnirudh Venkataramanan 
447cdedef59SAnirudh Venkataramanan 	/* do nothing if no valid netdev defined */
448cdedef59SAnirudh Venkataramanan 	if (!rx_ring->netdev || !cleaned_count)
449cdedef59SAnirudh Venkataramanan 		return false;
450cdedef59SAnirudh Venkataramanan 
451cdedef59SAnirudh Venkataramanan 	/* get the RX descriptor and buffer based on next_to_use */
452cdedef59SAnirudh Venkataramanan 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
453cdedef59SAnirudh Venkataramanan 	bi = &rx_ring->rx_buf[ntu];
454cdedef59SAnirudh Venkataramanan 
455cdedef59SAnirudh Venkataramanan 	do {
456cdedef59SAnirudh Venkataramanan 		if (!ice_alloc_mapped_page(rx_ring, bi))
457cdedef59SAnirudh Venkataramanan 			goto no_bufs;
458cdedef59SAnirudh Venkataramanan 
459cdedef59SAnirudh Venkataramanan 		/* Refresh the desc even if buffer_addrs didn't change
460cdedef59SAnirudh Venkataramanan 		 * because each write-back erases this info.
461cdedef59SAnirudh Venkataramanan 		 */
462cdedef59SAnirudh Venkataramanan 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
463cdedef59SAnirudh Venkataramanan 
464cdedef59SAnirudh Venkataramanan 		rx_desc++;
465cdedef59SAnirudh Venkataramanan 		bi++;
466cdedef59SAnirudh Venkataramanan 		ntu++;
467cdedef59SAnirudh Venkataramanan 		if (unlikely(ntu == rx_ring->count)) {
468cdedef59SAnirudh Venkataramanan 			rx_desc = ICE_RX_DESC(rx_ring, 0);
469cdedef59SAnirudh Venkataramanan 			bi = rx_ring->rx_buf;
470cdedef59SAnirudh Venkataramanan 			ntu = 0;
471cdedef59SAnirudh Venkataramanan 		}
472cdedef59SAnirudh Venkataramanan 
473cdedef59SAnirudh Venkataramanan 		/* clear the status bits for the next_to_use descriptor */
474cdedef59SAnirudh Venkataramanan 		rx_desc->wb.status_error0 = 0;
475cdedef59SAnirudh Venkataramanan 
476cdedef59SAnirudh Venkataramanan 		cleaned_count--;
477cdedef59SAnirudh Venkataramanan 	} while (cleaned_count);
478cdedef59SAnirudh Venkataramanan 
479cdedef59SAnirudh Venkataramanan 	if (rx_ring->next_to_use != ntu)
480cdedef59SAnirudh Venkataramanan 		ice_release_rx_desc(rx_ring, ntu);
481cdedef59SAnirudh Venkataramanan 
482cdedef59SAnirudh Venkataramanan 	return false;
483cdedef59SAnirudh Venkataramanan 
484cdedef59SAnirudh Venkataramanan no_bufs:
485cdedef59SAnirudh Venkataramanan 	if (rx_ring->next_to_use != ntu)
486cdedef59SAnirudh Venkataramanan 		ice_release_rx_desc(rx_ring, ntu);
487cdedef59SAnirudh Venkataramanan 
488cdedef59SAnirudh Venkataramanan 	/* make sure to come back via polling to try again after
489cdedef59SAnirudh Venkataramanan 	 * allocation failure
490cdedef59SAnirudh Venkataramanan 	 */
491cdedef59SAnirudh Venkataramanan 	return true;
492cdedef59SAnirudh Venkataramanan }
4932b245cb2SAnirudh Venkataramanan 
4942b245cb2SAnirudh Venkataramanan /**
4952b245cb2SAnirudh Venkataramanan  * ice_page_is_reserved - check if reuse is possible
4962b245cb2SAnirudh Venkataramanan  * @page: page struct to check
4972b245cb2SAnirudh Venkataramanan  */
4982b245cb2SAnirudh Venkataramanan static bool ice_page_is_reserved(struct page *page)
4992b245cb2SAnirudh Venkataramanan {
5002b245cb2SAnirudh Venkataramanan 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
5012b245cb2SAnirudh Venkataramanan }
5022b245cb2SAnirudh Venkataramanan 
5032b245cb2SAnirudh Venkataramanan /**
5042b245cb2SAnirudh Venkataramanan  * ice_add_rx_frag - Add contents of Rx buffer to sk_buff
5052b245cb2SAnirudh Venkataramanan  * @rx_buf: buffer containing page to add
5062b245cb2SAnirudh Venkataramanan  * @rx_desc: descriptor containing length of buffer written by hardware
5072b245cb2SAnirudh Venkataramanan  * @skb: sk_buf to place the data into
5082b245cb2SAnirudh Venkataramanan  *
5092b245cb2SAnirudh Venkataramanan  * This function will add the data contained in rx_buf->page to the skb.
5102b245cb2SAnirudh Venkataramanan  * This is done either through a direct copy if the data in the buffer is
5112b245cb2SAnirudh Venkataramanan  * less than the skb header size, otherwise it will just attach the page as
5122b245cb2SAnirudh Venkataramanan  * a frag to the skb.
5132b245cb2SAnirudh Venkataramanan  *
5142b245cb2SAnirudh Venkataramanan  * The function will then update the page offset if necessary and return
5152b245cb2SAnirudh Venkataramanan  * true if the buffer can be reused by the adapter.
5162b245cb2SAnirudh Venkataramanan  */
5172b245cb2SAnirudh Venkataramanan static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
5182b245cb2SAnirudh Venkataramanan 			    union ice_32b_rx_flex_desc *rx_desc,
5192b245cb2SAnirudh Venkataramanan 			    struct sk_buff *skb)
5202b245cb2SAnirudh Venkataramanan {
5212b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192)
5222b245cb2SAnirudh Venkataramanan 	unsigned int truesize = ICE_RXBUF_2048;
5232b245cb2SAnirudh Venkataramanan #else
5242b245cb2SAnirudh Venkataramanan 	unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
5252b245cb2SAnirudh Venkataramanan 	unsigned int truesize;
5262b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE < 8192) */
5272b245cb2SAnirudh Venkataramanan 
5282b245cb2SAnirudh Venkataramanan 	struct page *page;
5292b245cb2SAnirudh Venkataramanan 	unsigned int size;
5302b245cb2SAnirudh Venkataramanan 
5312b245cb2SAnirudh Venkataramanan 	size = le16_to_cpu(rx_desc->wb.pkt_len) &
5322b245cb2SAnirudh Venkataramanan 		ICE_RX_FLX_DESC_PKT_LEN_M;
5332b245cb2SAnirudh Venkataramanan 
5342b245cb2SAnirudh Venkataramanan 	page = rx_buf->page;
5352b245cb2SAnirudh Venkataramanan 
5362b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE >= 8192)
5372b245cb2SAnirudh Venkataramanan 	truesize = ALIGN(size, L1_CACHE_BYTES);
5382b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE >= 8192) */
5392b245cb2SAnirudh Venkataramanan 
5402b245cb2SAnirudh Venkataramanan 	/* will the data fit in the skb we allocated? if so, just
5412b245cb2SAnirudh Venkataramanan 	 * copy it as it is pretty small anyway
5422b245cb2SAnirudh Venkataramanan 	 */
5432b245cb2SAnirudh Venkataramanan 	if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
5442b245cb2SAnirudh Venkataramanan 		unsigned char *va = page_address(page) + rx_buf->page_offset;
5452b245cb2SAnirudh Venkataramanan 
5462b245cb2SAnirudh Venkataramanan 		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
5472b245cb2SAnirudh Venkataramanan 
5482b245cb2SAnirudh Venkataramanan 		/* page is not reserved, we can reuse buffer as-is */
5492b245cb2SAnirudh Venkataramanan 		if (likely(!ice_page_is_reserved(page)))
5502b245cb2SAnirudh Venkataramanan 			return true;
5512b245cb2SAnirudh Venkataramanan 
5522b245cb2SAnirudh Venkataramanan 		/* this page cannot be reused so discard it */
5532b245cb2SAnirudh Venkataramanan 		__free_pages(page, 0);
5542b245cb2SAnirudh Venkataramanan 		return false;
5552b245cb2SAnirudh Venkataramanan 	}
5562b245cb2SAnirudh Venkataramanan 
5572b245cb2SAnirudh Venkataramanan 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
5582b245cb2SAnirudh Venkataramanan 			rx_buf->page_offset, size, truesize);
5592b245cb2SAnirudh Venkataramanan 
5602b245cb2SAnirudh Venkataramanan 	/* avoid re-using remote pages */
5612b245cb2SAnirudh Venkataramanan 	if (unlikely(ice_page_is_reserved(page)))
5622b245cb2SAnirudh Venkataramanan 		return false;
5632b245cb2SAnirudh Venkataramanan 
5642b245cb2SAnirudh Venkataramanan #if (PAGE_SIZE < 8192)
5652b245cb2SAnirudh Venkataramanan 	/* if we are only owner of page we can reuse it */
5662b245cb2SAnirudh Venkataramanan 	if (unlikely(page_count(page) != 1))
5672b245cb2SAnirudh Venkataramanan 		return false;
5682b245cb2SAnirudh Venkataramanan 
5692b245cb2SAnirudh Venkataramanan 	/* flip page offset to other buffer */
5702b245cb2SAnirudh Venkataramanan 	rx_buf->page_offset ^= truesize;
5712b245cb2SAnirudh Venkataramanan #else
5722b245cb2SAnirudh Venkataramanan 	/* move offset up to the next cache line */
5732b245cb2SAnirudh Venkataramanan 	rx_buf->page_offset += truesize;
5742b245cb2SAnirudh Venkataramanan 
5752b245cb2SAnirudh Venkataramanan 	if (rx_buf->page_offset > last_offset)
5762b245cb2SAnirudh Venkataramanan 		return false;
5772b245cb2SAnirudh Venkataramanan #endif /* PAGE_SIZE < 8192) */
5782b245cb2SAnirudh Venkataramanan 
5792b245cb2SAnirudh Venkataramanan 	/* Even if we own the page, we are not allowed to use atomic_set()
5802b245cb2SAnirudh Venkataramanan 	 * This would break get_page_unless_zero() users.
5812b245cb2SAnirudh Venkataramanan 	 */
5822b245cb2SAnirudh Venkataramanan 	get_page(rx_buf->page);
5832b245cb2SAnirudh Venkataramanan 
5842b245cb2SAnirudh Venkataramanan 	return true;
5852b245cb2SAnirudh Venkataramanan }
5862b245cb2SAnirudh Venkataramanan 
5872b245cb2SAnirudh Venkataramanan /**
5882b245cb2SAnirudh Venkataramanan  * ice_reuse_rx_page - page flip buffer and store it back on the ring
589d337f2afSAnirudh Venkataramanan  * @rx_ring: Rx descriptor ring to store buffers on
5902b245cb2SAnirudh Venkataramanan  * @old_buf: donor buffer to have page reused
5912b245cb2SAnirudh Venkataramanan  *
5922b245cb2SAnirudh Venkataramanan  * Synchronizes page for reuse by the adapter
5932b245cb2SAnirudh Venkataramanan  */
5942b245cb2SAnirudh Venkataramanan static void ice_reuse_rx_page(struct ice_ring *rx_ring,
5952b245cb2SAnirudh Venkataramanan 			      struct ice_rx_buf *old_buf)
5962b245cb2SAnirudh Venkataramanan {
5972b245cb2SAnirudh Venkataramanan 	u16 nta = rx_ring->next_to_alloc;
5982b245cb2SAnirudh Venkataramanan 	struct ice_rx_buf *new_buf;
5992b245cb2SAnirudh Venkataramanan 
6002b245cb2SAnirudh Venkataramanan 	new_buf = &rx_ring->rx_buf[nta];
6012b245cb2SAnirudh Venkataramanan 
6022b245cb2SAnirudh Venkataramanan 	/* update, and store next to alloc */
6032b245cb2SAnirudh Venkataramanan 	nta++;
6042b245cb2SAnirudh Venkataramanan 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
6052b245cb2SAnirudh Venkataramanan 
6062b245cb2SAnirudh Venkataramanan 	/* transfer page from old buffer to new buffer */
6072b245cb2SAnirudh Venkataramanan 	*new_buf = *old_buf;
6082b245cb2SAnirudh Venkataramanan }
6092b245cb2SAnirudh Venkataramanan 
6102b245cb2SAnirudh Venkataramanan /**
6112b245cb2SAnirudh Venkataramanan  * ice_fetch_rx_buf - Allocate skb and populate it
612d337f2afSAnirudh Venkataramanan  * @rx_ring: Rx descriptor ring to transact packets on
6132b245cb2SAnirudh Venkataramanan  * @rx_desc: descriptor containing info written by hardware
6142b245cb2SAnirudh Venkataramanan  *
6152b245cb2SAnirudh Venkataramanan  * This function allocates an skb on the fly, and populates it with the page
6162b245cb2SAnirudh Venkataramanan  * data from the current receive descriptor, taking care to set up the skb
6172b245cb2SAnirudh Venkataramanan  * correctly, as well as handling calling the page recycle function if
6182b245cb2SAnirudh Venkataramanan  * necessary.
6192b245cb2SAnirudh Venkataramanan  */
6202b245cb2SAnirudh Venkataramanan static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
6212b245cb2SAnirudh Venkataramanan 					union ice_32b_rx_flex_desc *rx_desc)
6222b245cb2SAnirudh Venkataramanan {
6232b245cb2SAnirudh Venkataramanan 	struct ice_rx_buf *rx_buf;
6242b245cb2SAnirudh Venkataramanan 	struct sk_buff *skb;
6252b245cb2SAnirudh Venkataramanan 	struct page *page;
6262b245cb2SAnirudh Venkataramanan 
6272b245cb2SAnirudh Venkataramanan 	rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
6282b245cb2SAnirudh Venkataramanan 	page = rx_buf->page;
6292b245cb2SAnirudh Venkataramanan 	prefetchw(page);
6302b245cb2SAnirudh Venkataramanan 
6312b245cb2SAnirudh Venkataramanan 	skb = rx_buf->skb;
6322b245cb2SAnirudh Venkataramanan 
6332b245cb2SAnirudh Venkataramanan 	if (likely(!skb)) {
6342b245cb2SAnirudh Venkataramanan 		u8 *page_addr = page_address(page) + rx_buf->page_offset;
6352b245cb2SAnirudh Venkataramanan 
6362b245cb2SAnirudh Venkataramanan 		/* prefetch first cache line of first page */
6372b245cb2SAnirudh Venkataramanan 		prefetch(page_addr);
6382b245cb2SAnirudh Venkataramanan #if L1_CACHE_BYTES < 128
6392b245cb2SAnirudh Venkataramanan 		prefetch((void *)(page_addr + L1_CACHE_BYTES));
6402b245cb2SAnirudh Venkataramanan #endif /* L1_CACHE_BYTES */
6412b245cb2SAnirudh Venkataramanan 
6422b245cb2SAnirudh Venkataramanan 		/* allocate a skb to store the frags */
6432b245cb2SAnirudh Venkataramanan 		skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
6442b245cb2SAnirudh Venkataramanan 				       ICE_RX_HDR_SIZE,
6452b245cb2SAnirudh Venkataramanan 				       GFP_ATOMIC | __GFP_NOWARN);
6462b245cb2SAnirudh Venkataramanan 		if (unlikely(!skb)) {
6472b245cb2SAnirudh Venkataramanan 			rx_ring->rx_stats.alloc_buf_failed++;
6482b245cb2SAnirudh Venkataramanan 			return NULL;
6492b245cb2SAnirudh Venkataramanan 		}
6502b245cb2SAnirudh Venkataramanan 
6512b245cb2SAnirudh Venkataramanan 		/* we will be copying header into skb->data in
6522b245cb2SAnirudh Venkataramanan 		 * pskb_may_pull so it is in our interest to prefetch
6532b245cb2SAnirudh Venkataramanan 		 * it now to avoid a possible cache miss
6542b245cb2SAnirudh Venkataramanan 		 */
6552b245cb2SAnirudh Venkataramanan 		prefetchw(skb->data);
6562b245cb2SAnirudh Venkataramanan 
6572b245cb2SAnirudh Venkataramanan 		skb_record_rx_queue(skb, rx_ring->q_index);
6582b245cb2SAnirudh Venkataramanan 	} else {
6592b245cb2SAnirudh Venkataramanan 		/* we are reusing so sync this buffer for CPU use */
6602b245cb2SAnirudh Venkataramanan 		dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
6612b245cb2SAnirudh Venkataramanan 					      rx_buf->page_offset,
6622b245cb2SAnirudh Venkataramanan 					      ICE_RXBUF_2048,
6632b245cb2SAnirudh Venkataramanan 					      DMA_FROM_DEVICE);
6642b245cb2SAnirudh Venkataramanan 
6652b245cb2SAnirudh Venkataramanan 		rx_buf->skb = NULL;
6662b245cb2SAnirudh Venkataramanan 	}
6672b245cb2SAnirudh Venkataramanan 
6682b245cb2SAnirudh Venkataramanan 	/* pull page into skb */
6692b245cb2SAnirudh Venkataramanan 	if (ice_add_rx_frag(rx_buf, rx_desc, skb)) {
6702b245cb2SAnirudh Venkataramanan 		/* hand second half of page back to the ring */
6712b245cb2SAnirudh Venkataramanan 		ice_reuse_rx_page(rx_ring, rx_buf);
6722b245cb2SAnirudh Venkataramanan 		rx_ring->rx_stats.page_reuse_count++;
6732b245cb2SAnirudh Venkataramanan 	} else {
6742b245cb2SAnirudh Venkataramanan 		/* we are not reusing the buffer so unmap it */
6752b245cb2SAnirudh Venkataramanan 		dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
6762b245cb2SAnirudh Venkataramanan 			       DMA_FROM_DEVICE);
6772b245cb2SAnirudh Venkataramanan 	}
6782b245cb2SAnirudh Venkataramanan 
6792b245cb2SAnirudh Venkataramanan 	/* clear contents of buffer_info */
6802b245cb2SAnirudh Venkataramanan 	rx_buf->page = NULL;
6812b245cb2SAnirudh Venkataramanan 
6822b245cb2SAnirudh Venkataramanan 	return skb;
6832b245cb2SAnirudh Venkataramanan }
6842b245cb2SAnirudh Venkataramanan 
6852b245cb2SAnirudh Venkataramanan /**
6862b245cb2SAnirudh Venkataramanan  * ice_pull_tail - ice specific version of skb_pull_tail
6872b245cb2SAnirudh Venkataramanan  * @skb: pointer to current skb being adjusted
6882b245cb2SAnirudh Venkataramanan  *
6892b245cb2SAnirudh Venkataramanan  * This function is an ice specific version of __pskb_pull_tail. The
6902b245cb2SAnirudh Venkataramanan  * main difference between this version and the original function is that
6912b245cb2SAnirudh Venkataramanan  * this function can make several assumptions about the state of things
6922b245cb2SAnirudh Venkataramanan  * that allow for significant optimizations versus the standard function.
6932b245cb2SAnirudh Venkataramanan  * As a result we can do things like drop a frag and maintain an accurate
6942b245cb2SAnirudh Venkataramanan  * truesize for the skb.
6952b245cb2SAnirudh Venkataramanan  */
6962b245cb2SAnirudh Venkataramanan static void ice_pull_tail(struct sk_buff *skb)
6972b245cb2SAnirudh Venkataramanan {
6982b245cb2SAnirudh Venkataramanan 	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
6992b245cb2SAnirudh Venkataramanan 	unsigned int pull_len;
7002b245cb2SAnirudh Venkataramanan 	unsigned char *va;
7012b245cb2SAnirudh Venkataramanan 
7022b245cb2SAnirudh Venkataramanan 	/* it is valid to use page_address instead of kmap since we are
7032b245cb2SAnirudh Venkataramanan 	 * working with pages allocated out of the lomem pool per
7042b245cb2SAnirudh Venkataramanan 	 * alloc_page(GFP_ATOMIC)
7052b245cb2SAnirudh Venkataramanan 	 */
7062b245cb2SAnirudh Venkataramanan 	va = skb_frag_address(frag);
7072b245cb2SAnirudh Venkataramanan 
7082b245cb2SAnirudh Venkataramanan 	/* we need the header to contain the greater of either ETH_HLEN or
7092b245cb2SAnirudh Venkataramanan 	 * 60 bytes if the skb->len is less than 60 for skb_pad.
7102b245cb2SAnirudh Venkataramanan 	 */
7112b245cb2SAnirudh Venkataramanan 	pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE);
7122b245cb2SAnirudh Venkataramanan 
7132b245cb2SAnirudh Venkataramanan 	/* align pull length to size of long to optimize memcpy performance */
7142b245cb2SAnirudh Venkataramanan 	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
7152b245cb2SAnirudh Venkataramanan 
7162b245cb2SAnirudh Venkataramanan 	/* update all of the pointers */
7172b245cb2SAnirudh Venkataramanan 	skb_frag_size_sub(frag, pull_len);
7182b245cb2SAnirudh Venkataramanan 	frag->page_offset += pull_len;
7192b245cb2SAnirudh Venkataramanan 	skb->data_len -= pull_len;
7202b245cb2SAnirudh Venkataramanan 	skb->tail += pull_len;
7212b245cb2SAnirudh Venkataramanan }
7222b245cb2SAnirudh Venkataramanan 
7232b245cb2SAnirudh Venkataramanan /**
7242b245cb2SAnirudh Venkataramanan  * ice_cleanup_headers - Correct empty headers
7252b245cb2SAnirudh Venkataramanan  * @skb: pointer to current skb being fixed
7262b245cb2SAnirudh Venkataramanan  *
7272b245cb2SAnirudh Venkataramanan  * Also address the case where we are pulling data in on pages only
7282b245cb2SAnirudh Venkataramanan  * and as such no data is present in the skb header.
7292b245cb2SAnirudh Venkataramanan  *
7302b245cb2SAnirudh Venkataramanan  * In addition if skb is not at least 60 bytes we need to pad it so that
7312b245cb2SAnirudh Venkataramanan  * it is large enough to qualify as a valid Ethernet frame.
7322b245cb2SAnirudh Venkataramanan  *
7332b245cb2SAnirudh Venkataramanan  * Returns true if an error was encountered and skb was freed.
7342b245cb2SAnirudh Venkataramanan  */
7352b245cb2SAnirudh Venkataramanan static bool ice_cleanup_headers(struct sk_buff *skb)
7362b245cb2SAnirudh Venkataramanan {
7372b245cb2SAnirudh Venkataramanan 	/* place header in linear portion of buffer */
7382b245cb2SAnirudh Venkataramanan 	if (skb_is_nonlinear(skb))
7392b245cb2SAnirudh Venkataramanan 		ice_pull_tail(skb);
7402b245cb2SAnirudh Venkataramanan 
7412b245cb2SAnirudh Venkataramanan 	/* if eth_skb_pad returns an error the skb was freed */
7422b245cb2SAnirudh Venkataramanan 	if (eth_skb_pad(skb))
7432b245cb2SAnirudh Venkataramanan 		return true;
7442b245cb2SAnirudh Venkataramanan 
7452b245cb2SAnirudh Venkataramanan 	return false;
7462b245cb2SAnirudh Venkataramanan }
7472b245cb2SAnirudh Venkataramanan 
7482b245cb2SAnirudh Venkataramanan /**
7492b245cb2SAnirudh Venkataramanan  * ice_test_staterr - tests bits in Rx descriptor status and error fields
7502b245cb2SAnirudh Venkataramanan  * @rx_desc: pointer to receive descriptor (in le64 format)
7512b245cb2SAnirudh Venkataramanan  * @stat_err_bits: value to mask
7522b245cb2SAnirudh Venkataramanan  *
7532b245cb2SAnirudh Venkataramanan  * This function does some fast chicanery in order to return the
7542b245cb2SAnirudh Venkataramanan  * value of the mask which is really only used for boolean tests.
7552b245cb2SAnirudh Venkataramanan  * The status_error_len doesn't need to be shifted because it begins
7562b245cb2SAnirudh Venkataramanan  * at offset zero.
7572b245cb2SAnirudh Venkataramanan  */
7582b245cb2SAnirudh Venkataramanan static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
7592b245cb2SAnirudh Venkataramanan 			     const u16 stat_err_bits)
7602b245cb2SAnirudh Venkataramanan {
7612b245cb2SAnirudh Venkataramanan 	return !!(rx_desc->wb.status_error0 &
7622b245cb2SAnirudh Venkataramanan 		  cpu_to_le16(stat_err_bits));
7632b245cb2SAnirudh Venkataramanan }
7642b245cb2SAnirudh Venkataramanan 
7652b245cb2SAnirudh Venkataramanan /**
7662b245cb2SAnirudh Venkataramanan  * ice_is_non_eop - process handling of non-EOP buffers
7672b245cb2SAnirudh Venkataramanan  * @rx_ring: Rx ring being processed
7682b245cb2SAnirudh Venkataramanan  * @rx_desc: Rx descriptor for current buffer
7692b245cb2SAnirudh Venkataramanan  * @skb: Current socket buffer containing buffer in progress
7702b245cb2SAnirudh Venkataramanan  *
7712b245cb2SAnirudh Venkataramanan  * This function updates next to clean. If the buffer is an EOP buffer
7722b245cb2SAnirudh Venkataramanan  * this function exits returning false, otherwise it will place the
7732b245cb2SAnirudh Venkataramanan  * sk_buff in the next buffer to be chained and return true indicating
7742b245cb2SAnirudh Venkataramanan  * that this is in fact a non-EOP buffer.
7752b245cb2SAnirudh Venkataramanan  */
7762b245cb2SAnirudh Venkataramanan static bool ice_is_non_eop(struct ice_ring *rx_ring,
7772b245cb2SAnirudh Venkataramanan 			   union ice_32b_rx_flex_desc *rx_desc,
7782b245cb2SAnirudh Venkataramanan 			   struct sk_buff *skb)
7792b245cb2SAnirudh Venkataramanan {
7802b245cb2SAnirudh Venkataramanan 	u32 ntc = rx_ring->next_to_clean + 1;
7812b245cb2SAnirudh Venkataramanan 
7822b245cb2SAnirudh Venkataramanan 	/* fetch, update, and store next to clean */
7832b245cb2SAnirudh Venkataramanan 	ntc = (ntc < rx_ring->count) ? ntc : 0;
7842b245cb2SAnirudh Venkataramanan 	rx_ring->next_to_clean = ntc;
7852b245cb2SAnirudh Venkataramanan 
7862b245cb2SAnirudh Venkataramanan 	prefetch(ICE_RX_DESC(rx_ring, ntc));
7872b245cb2SAnirudh Venkataramanan 
7882b245cb2SAnirudh Venkataramanan 	/* if we are the last buffer then there is nothing else to do */
7892b245cb2SAnirudh Venkataramanan #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
7902b245cb2SAnirudh Venkataramanan 	if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
7912b245cb2SAnirudh Venkataramanan 		return false;
7922b245cb2SAnirudh Venkataramanan 
7932b245cb2SAnirudh Venkataramanan 	/* place skb in next buffer to be received */
7942b245cb2SAnirudh Venkataramanan 	rx_ring->rx_buf[ntc].skb = skb;
7952b245cb2SAnirudh Venkataramanan 	rx_ring->rx_stats.non_eop_descs++;
7962b245cb2SAnirudh Venkataramanan 
7972b245cb2SAnirudh Venkataramanan 	return true;
7982b245cb2SAnirudh Venkataramanan }
7992b245cb2SAnirudh Venkataramanan 
8002b245cb2SAnirudh Venkataramanan /**
801d76a60baSAnirudh Venkataramanan  * ice_ptype_to_htype - get a hash type
802d76a60baSAnirudh Venkataramanan  * @ptype: the ptype value from the descriptor
803d76a60baSAnirudh Venkataramanan  *
804d76a60baSAnirudh Venkataramanan  * Returns a hash type to be used by skb_set_hash
805d76a60baSAnirudh Venkataramanan  */
806d76a60baSAnirudh Venkataramanan static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
807d76a60baSAnirudh Venkataramanan {
808d76a60baSAnirudh Venkataramanan 	return PKT_HASH_TYPE_NONE;
809d76a60baSAnirudh Venkataramanan }
810d76a60baSAnirudh Venkataramanan 
811d76a60baSAnirudh Venkataramanan /**
812d76a60baSAnirudh Venkataramanan  * ice_rx_hash - set the hash value in the skb
813d76a60baSAnirudh Venkataramanan  * @rx_ring: descriptor ring
814d76a60baSAnirudh Venkataramanan  * @rx_desc: specific descriptor
815d76a60baSAnirudh Venkataramanan  * @skb: pointer to current skb
816d76a60baSAnirudh Venkataramanan  * @rx_ptype: the ptype value from the descriptor
817d76a60baSAnirudh Venkataramanan  */
818d76a60baSAnirudh Venkataramanan static void
819d76a60baSAnirudh Venkataramanan ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
820d76a60baSAnirudh Venkataramanan 	    struct sk_buff *skb, u8 rx_ptype)
821d76a60baSAnirudh Venkataramanan {
822d76a60baSAnirudh Venkataramanan 	struct ice_32b_rx_flex_desc_nic *nic_mdid;
823d76a60baSAnirudh Venkataramanan 	u32 hash;
824d76a60baSAnirudh Venkataramanan 
825d76a60baSAnirudh Venkataramanan 	if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
826d76a60baSAnirudh Venkataramanan 		return;
827d76a60baSAnirudh Venkataramanan 
828d76a60baSAnirudh Venkataramanan 	if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
829d76a60baSAnirudh Venkataramanan 		return;
830d76a60baSAnirudh Venkataramanan 
831d76a60baSAnirudh Venkataramanan 	nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
832d76a60baSAnirudh Venkataramanan 	hash = le32_to_cpu(nic_mdid->rss_hash);
833d76a60baSAnirudh Venkataramanan 	skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
834d76a60baSAnirudh Venkataramanan }
835d76a60baSAnirudh Venkataramanan 
836d76a60baSAnirudh Venkataramanan /**
837d76a60baSAnirudh Venkataramanan  * ice_rx_csum - Indicate in skb if checksum is good
838d76a60baSAnirudh Venkataramanan  * @vsi: the VSI we care about
839d76a60baSAnirudh Venkataramanan  * @skb: skb currently being received and modified
840d76a60baSAnirudh Venkataramanan  * @rx_desc: the receive descriptor
841d76a60baSAnirudh Venkataramanan  * @ptype: the packet type decoded by hardware
842d76a60baSAnirudh Venkataramanan  *
843d76a60baSAnirudh Venkataramanan  * skb->protocol must be set before this function is called
844d76a60baSAnirudh Venkataramanan  */
845d76a60baSAnirudh Venkataramanan static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
846d76a60baSAnirudh Venkataramanan 			union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
847d76a60baSAnirudh Venkataramanan {
848d76a60baSAnirudh Venkataramanan 	struct ice_rx_ptype_decoded decoded;
849d76a60baSAnirudh Venkataramanan 	u32 rx_error, rx_status;
850d76a60baSAnirudh Venkataramanan 	bool ipv4, ipv6;
851d76a60baSAnirudh Venkataramanan 
852d76a60baSAnirudh Venkataramanan 	rx_status = le16_to_cpu(rx_desc->wb.status_error0);
853d76a60baSAnirudh Venkataramanan 	rx_error = rx_status;
854d76a60baSAnirudh Venkataramanan 
855d76a60baSAnirudh Venkataramanan 	decoded = ice_decode_rx_desc_ptype(ptype);
856d76a60baSAnirudh Venkataramanan 
857d76a60baSAnirudh Venkataramanan 	/* Start with CHECKSUM_NONE and by default csum_level = 0 */
858d76a60baSAnirudh Venkataramanan 	skb->ip_summed = CHECKSUM_NONE;
859d76a60baSAnirudh Venkataramanan 	skb_checksum_none_assert(skb);
860d76a60baSAnirudh Venkataramanan 
861d76a60baSAnirudh Venkataramanan 	/* check if Rx checksum is enabled */
862d76a60baSAnirudh Venkataramanan 	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
863d76a60baSAnirudh Venkataramanan 		return;
864d76a60baSAnirudh Venkataramanan 
865d76a60baSAnirudh Venkataramanan 	/* check if HW has decoded the packet and checksum */
866d76a60baSAnirudh Venkataramanan 	if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
867d76a60baSAnirudh Venkataramanan 		return;
868d76a60baSAnirudh Venkataramanan 
869d76a60baSAnirudh Venkataramanan 	if (!(decoded.known && decoded.outer_ip))
870d76a60baSAnirudh Venkataramanan 		return;
871d76a60baSAnirudh Venkataramanan 
872d76a60baSAnirudh Venkataramanan 	ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
873d76a60baSAnirudh Venkataramanan 	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
874d76a60baSAnirudh Venkataramanan 	ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
875d76a60baSAnirudh Venkataramanan 	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
876d76a60baSAnirudh Venkataramanan 
877d76a60baSAnirudh Venkataramanan 	if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
878d76a60baSAnirudh Venkataramanan 				 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
879d76a60baSAnirudh Venkataramanan 		goto checksum_fail;
880d76a60baSAnirudh Venkataramanan 	else if (ipv6 && (rx_status &
881d76a60baSAnirudh Venkataramanan 		 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
882d76a60baSAnirudh Venkataramanan 		goto checksum_fail;
883d76a60baSAnirudh Venkataramanan 
884d76a60baSAnirudh Venkataramanan 	/* check for L4 errors and handle packets that were not able to be
885d76a60baSAnirudh Venkataramanan 	 * checksummed due to arrival speed
886d76a60baSAnirudh Venkataramanan 	 */
887d76a60baSAnirudh Venkataramanan 	if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
888d76a60baSAnirudh Venkataramanan 		goto checksum_fail;
889d76a60baSAnirudh Venkataramanan 
890d76a60baSAnirudh Venkataramanan 	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
891d76a60baSAnirudh Venkataramanan 	switch (decoded.inner_prot) {
892d76a60baSAnirudh Venkataramanan 	case ICE_RX_PTYPE_INNER_PROT_TCP:
893d76a60baSAnirudh Venkataramanan 	case ICE_RX_PTYPE_INNER_PROT_UDP:
894d76a60baSAnirudh Venkataramanan 	case ICE_RX_PTYPE_INNER_PROT_SCTP:
895d76a60baSAnirudh Venkataramanan 		skb->ip_summed = CHECKSUM_UNNECESSARY;
896d76a60baSAnirudh Venkataramanan 	default:
897d76a60baSAnirudh Venkataramanan 		break;
898d76a60baSAnirudh Venkataramanan 	}
899d76a60baSAnirudh Venkataramanan 	return;
900d76a60baSAnirudh Venkataramanan 
901d76a60baSAnirudh Venkataramanan checksum_fail:
902d76a60baSAnirudh Venkataramanan 	vsi->back->hw_csum_rx_error++;
903d76a60baSAnirudh Venkataramanan }
904d76a60baSAnirudh Venkataramanan 
905d76a60baSAnirudh Venkataramanan /**
906d76a60baSAnirudh Venkataramanan  * ice_process_skb_fields - Populate skb header fields from Rx descriptor
907d337f2afSAnirudh Venkataramanan  * @rx_ring: Rx descriptor ring packet is being transacted on
908d76a60baSAnirudh Venkataramanan  * @rx_desc: pointer to the EOP Rx descriptor
909d76a60baSAnirudh Venkataramanan  * @skb: pointer to current skb being populated
910d76a60baSAnirudh Venkataramanan  * @ptype: the packet type decoded by hardware
911d76a60baSAnirudh Venkataramanan  *
912d76a60baSAnirudh Venkataramanan  * This function checks the ring, descriptor, and packet information in
913d76a60baSAnirudh Venkataramanan  * order to populate the hash, checksum, VLAN, protocol, and
914d76a60baSAnirudh Venkataramanan  * other fields within the skb.
915d76a60baSAnirudh Venkataramanan  */
916d76a60baSAnirudh Venkataramanan static void ice_process_skb_fields(struct ice_ring *rx_ring,
917d76a60baSAnirudh Venkataramanan 				   union ice_32b_rx_flex_desc *rx_desc,
918d76a60baSAnirudh Venkataramanan 				   struct sk_buff *skb, u8 ptype)
919d76a60baSAnirudh Venkataramanan {
920d76a60baSAnirudh Venkataramanan 	ice_rx_hash(rx_ring, rx_desc, skb, ptype);
921d76a60baSAnirudh Venkataramanan 
922d76a60baSAnirudh Venkataramanan 	/* modifies the skb - consumes the enet header */
923d76a60baSAnirudh Venkataramanan 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
924d76a60baSAnirudh Venkataramanan 
925d76a60baSAnirudh Venkataramanan 	ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype);
926d76a60baSAnirudh Venkataramanan }
927d76a60baSAnirudh Venkataramanan 
928d76a60baSAnirudh Venkataramanan /**
9292b245cb2SAnirudh Venkataramanan  * ice_receive_skb - Send a completed packet up the stack
930d337f2afSAnirudh Venkataramanan  * @rx_ring: Rx ring in play
9312b245cb2SAnirudh Venkataramanan  * @skb: packet to send up
9322b245cb2SAnirudh Venkataramanan  * @vlan_tag: vlan tag for packet
9332b245cb2SAnirudh Venkataramanan  *
9342b245cb2SAnirudh Venkataramanan  * This function sends the completed packet (via. skb) up the stack using
9352b245cb2SAnirudh Venkataramanan  * gro receive functions (with/without vlan tag)
9362b245cb2SAnirudh Venkataramanan  */
9372b245cb2SAnirudh Venkataramanan static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
9382b245cb2SAnirudh Venkataramanan 			    u16 vlan_tag)
9392b245cb2SAnirudh Venkataramanan {
9402b245cb2SAnirudh Venkataramanan 	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
9412b245cb2SAnirudh Venkataramanan 	    (vlan_tag & VLAN_VID_MASK)) {
9422b245cb2SAnirudh Venkataramanan 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
9432b245cb2SAnirudh Venkataramanan 	}
9442b245cb2SAnirudh Venkataramanan 	napi_gro_receive(&rx_ring->q_vector->napi, skb);
9452b245cb2SAnirudh Venkataramanan }
9462b245cb2SAnirudh Venkataramanan 
9472b245cb2SAnirudh Venkataramanan /**
9482b245cb2SAnirudh Venkataramanan  * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
949d337f2afSAnirudh Venkataramanan  * @rx_ring: Rx descriptor ring to transact packets on
9502b245cb2SAnirudh Venkataramanan  * @budget: Total limit on number of packets to process
9512b245cb2SAnirudh Venkataramanan  *
9522b245cb2SAnirudh Venkataramanan  * This function provides a "bounce buffer" approach to Rx interrupt
9532b245cb2SAnirudh Venkataramanan  * processing. The advantage to this is that on systems that have
9542b245cb2SAnirudh Venkataramanan  * expensive overhead for IOMMU access this provides a means of avoiding
9552b245cb2SAnirudh Venkataramanan  * it by maintaining the mapping of the page to the system.
9562b245cb2SAnirudh Venkataramanan  *
9572b245cb2SAnirudh Venkataramanan  * Returns amount of work completed
9582b245cb2SAnirudh Venkataramanan  */
9592b245cb2SAnirudh Venkataramanan static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
9602b245cb2SAnirudh Venkataramanan {
9612b245cb2SAnirudh Venkataramanan 	unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
9622b245cb2SAnirudh Venkataramanan 	u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
9632b245cb2SAnirudh Venkataramanan 	bool failure = false;
9642b245cb2SAnirudh Venkataramanan 
9652b245cb2SAnirudh Venkataramanan 	/* start the loop to process RX packets bounded by 'budget' */
9662b245cb2SAnirudh Venkataramanan 	while (likely(total_rx_pkts < (unsigned int)budget)) {
9672b245cb2SAnirudh Venkataramanan 		union ice_32b_rx_flex_desc *rx_desc;
9682b245cb2SAnirudh Venkataramanan 		struct sk_buff *skb;
9692b245cb2SAnirudh Venkataramanan 		u16 stat_err_bits;
9702b245cb2SAnirudh Venkataramanan 		u16 vlan_tag = 0;
971d76a60baSAnirudh Venkataramanan 		u8 rx_ptype;
9722b245cb2SAnirudh Venkataramanan 
9732b245cb2SAnirudh Venkataramanan 		/* return some buffers to hardware, one at a time is too slow */
9742b245cb2SAnirudh Venkataramanan 		if (cleaned_count >= ICE_RX_BUF_WRITE) {
9752b245cb2SAnirudh Venkataramanan 			failure = failure ||
9762b245cb2SAnirudh Venkataramanan 				  ice_alloc_rx_bufs(rx_ring, cleaned_count);
9772b245cb2SAnirudh Venkataramanan 			cleaned_count = 0;
9782b245cb2SAnirudh Venkataramanan 		}
9792b245cb2SAnirudh Venkataramanan 
9802b245cb2SAnirudh Venkataramanan 		/* get the RX desc from RX ring based on 'next_to_clean' */
9812b245cb2SAnirudh Venkataramanan 		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
9822b245cb2SAnirudh Venkataramanan 
9832b245cb2SAnirudh Venkataramanan 		/* status_error_len will always be zero for unused descriptors
9842b245cb2SAnirudh Venkataramanan 		 * because it's cleared in cleanup, and overlaps with hdr_addr
9852b245cb2SAnirudh Venkataramanan 		 * which is always zero because packet split isn't used, if the
9862b245cb2SAnirudh Venkataramanan 		 * hardware wrote DD then it will be non-zero
9872b245cb2SAnirudh Venkataramanan 		 */
9882b245cb2SAnirudh Venkataramanan 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
9892b245cb2SAnirudh Venkataramanan 		if (!ice_test_staterr(rx_desc, stat_err_bits))
9902b245cb2SAnirudh Venkataramanan 			break;
9912b245cb2SAnirudh Venkataramanan 
9922b245cb2SAnirudh Venkataramanan 		/* This memory barrier is needed to keep us from reading
9932b245cb2SAnirudh Venkataramanan 		 * any other fields out of the rx_desc until we know the
9942b245cb2SAnirudh Venkataramanan 		 * DD bit is set.
9952b245cb2SAnirudh Venkataramanan 		 */
9962b245cb2SAnirudh Venkataramanan 		dma_rmb();
9972b245cb2SAnirudh Venkataramanan 
9982b245cb2SAnirudh Venkataramanan 		/* allocate (if needed) and populate skb */
9992b245cb2SAnirudh Venkataramanan 		skb = ice_fetch_rx_buf(rx_ring, rx_desc);
10002b245cb2SAnirudh Venkataramanan 		if (!skb)
10012b245cb2SAnirudh Venkataramanan 			break;
10022b245cb2SAnirudh Venkataramanan 
10032b245cb2SAnirudh Venkataramanan 		cleaned_count++;
10042b245cb2SAnirudh Venkataramanan 
10052b245cb2SAnirudh Venkataramanan 		/* skip if it is NOP desc */
10062b245cb2SAnirudh Venkataramanan 		if (ice_is_non_eop(rx_ring, rx_desc, skb))
10072b245cb2SAnirudh Venkataramanan 			continue;
10082b245cb2SAnirudh Venkataramanan 
10092b245cb2SAnirudh Venkataramanan 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
10102b245cb2SAnirudh Venkataramanan 		if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
10112b245cb2SAnirudh Venkataramanan 			dev_kfree_skb_any(skb);
10122b245cb2SAnirudh Venkataramanan 			continue;
10132b245cb2SAnirudh Venkataramanan 		}
10142b245cb2SAnirudh Venkataramanan 
1015d76a60baSAnirudh Venkataramanan 		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1016d76a60baSAnirudh Venkataramanan 			ICE_RX_FLEX_DESC_PTYPE_M;
1017d76a60baSAnirudh Venkataramanan 
10182b245cb2SAnirudh Venkataramanan 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
10192b245cb2SAnirudh Venkataramanan 		if (ice_test_staterr(rx_desc, stat_err_bits))
10202b245cb2SAnirudh Venkataramanan 			vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
10212b245cb2SAnirudh Venkataramanan 
10222b245cb2SAnirudh Venkataramanan 		/* correct empty headers and pad skb if needed (to make valid
10232b245cb2SAnirudh Venkataramanan 		 * ethernet frame
10242b245cb2SAnirudh Venkataramanan 		 */
10252b245cb2SAnirudh Venkataramanan 		if (ice_cleanup_headers(skb)) {
10262b245cb2SAnirudh Venkataramanan 			skb = NULL;
10272b245cb2SAnirudh Venkataramanan 			continue;
10282b245cb2SAnirudh Venkataramanan 		}
10292b245cb2SAnirudh Venkataramanan 
10302b245cb2SAnirudh Venkataramanan 		/* probably a little skewed due to removing CRC */
10312b245cb2SAnirudh Venkataramanan 		total_rx_bytes += skb->len;
10322b245cb2SAnirudh Venkataramanan 
1033d76a60baSAnirudh Venkataramanan 		/* populate checksum, VLAN, and protocol */
1034d76a60baSAnirudh Venkataramanan 		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1035d76a60baSAnirudh Venkataramanan 
10362b245cb2SAnirudh Venkataramanan 		/* send completed skb up the stack */
10372b245cb2SAnirudh Venkataramanan 		ice_receive_skb(rx_ring, skb, vlan_tag);
10382b245cb2SAnirudh Venkataramanan 
10392b245cb2SAnirudh Venkataramanan 		/* update budget accounting */
10402b245cb2SAnirudh Venkataramanan 		total_rx_pkts++;
10412b245cb2SAnirudh Venkataramanan 	}
10422b245cb2SAnirudh Venkataramanan 
10432b245cb2SAnirudh Venkataramanan 	/* update queue and vector specific stats */
10442b245cb2SAnirudh Venkataramanan 	u64_stats_update_begin(&rx_ring->syncp);
10452b245cb2SAnirudh Venkataramanan 	rx_ring->stats.pkts += total_rx_pkts;
10462b245cb2SAnirudh Venkataramanan 	rx_ring->stats.bytes += total_rx_bytes;
10472b245cb2SAnirudh Venkataramanan 	u64_stats_update_end(&rx_ring->syncp);
10482b245cb2SAnirudh Venkataramanan 	rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
10492b245cb2SAnirudh Venkataramanan 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
10502b245cb2SAnirudh Venkataramanan 
10512b245cb2SAnirudh Venkataramanan 	/* guarantee a trip back through this routine if there was a failure */
10522b245cb2SAnirudh Venkataramanan 	return failure ? budget : (int)total_rx_pkts;
10532b245cb2SAnirudh Venkataramanan }
10542b245cb2SAnirudh Venkataramanan 
10552b245cb2SAnirudh Venkataramanan /**
10562b245cb2SAnirudh Venkataramanan  * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
10572b245cb2SAnirudh Venkataramanan  * @napi: napi struct with our devices info in it
10582b245cb2SAnirudh Venkataramanan  * @budget: amount of work driver is allowed to do this pass, in packets
10592b245cb2SAnirudh Venkataramanan  *
10602b245cb2SAnirudh Venkataramanan  * This function will clean all queues associated with a q_vector.
10612b245cb2SAnirudh Venkataramanan  *
10622b245cb2SAnirudh Venkataramanan  * Returns the amount of work done
10632b245cb2SAnirudh Venkataramanan  */
10642b245cb2SAnirudh Venkataramanan int ice_napi_poll(struct napi_struct *napi, int budget)
10652b245cb2SAnirudh Venkataramanan {
10662b245cb2SAnirudh Venkataramanan 	struct ice_q_vector *q_vector =
10672b245cb2SAnirudh Venkataramanan 				container_of(napi, struct ice_q_vector, napi);
10682b245cb2SAnirudh Venkataramanan 	struct ice_vsi *vsi = q_vector->vsi;
10692b245cb2SAnirudh Venkataramanan 	struct ice_pf *pf = vsi->back;
10702b245cb2SAnirudh Venkataramanan 	bool clean_complete = true;
10712b245cb2SAnirudh Venkataramanan 	int budget_per_ring = 0;
10722b245cb2SAnirudh Venkataramanan 	struct ice_ring *ring;
10732b245cb2SAnirudh Venkataramanan 	int work_done = 0;
10742b245cb2SAnirudh Venkataramanan 
10752b245cb2SAnirudh Venkataramanan 	/* Since the actual Tx work is minimal, we can give the Tx a larger
10762b245cb2SAnirudh Venkataramanan 	 * budget and be more aggressive about cleaning up the Tx descriptors.
10772b245cb2SAnirudh Venkataramanan 	 */
10782b245cb2SAnirudh Venkataramanan 	ice_for_each_ring(ring, q_vector->tx)
10792b245cb2SAnirudh Venkataramanan 		if (!ice_clean_tx_irq(vsi, ring, budget))
10802b245cb2SAnirudh Venkataramanan 			clean_complete = false;
10812b245cb2SAnirudh Venkataramanan 
10822b245cb2SAnirudh Venkataramanan 	/* Handle case where we are called by netpoll with a budget of 0 */
10832b245cb2SAnirudh Venkataramanan 	if (budget <= 0)
10842b245cb2SAnirudh Venkataramanan 		return budget;
10852b245cb2SAnirudh Venkataramanan 
10862b245cb2SAnirudh Venkataramanan 	/* We attempt to distribute budget to each Rx queue fairly, but don't
10872b245cb2SAnirudh Venkataramanan 	 * allow the budget to go below 1 because that would exit polling early.
10882b245cb2SAnirudh Venkataramanan 	 */
10892b245cb2SAnirudh Venkataramanan 	if (q_vector->num_ring_rx)
10902b245cb2SAnirudh Venkataramanan 		budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
10912b245cb2SAnirudh Venkataramanan 
10922b245cb2SAnirudh Venkataramanan 	ice_for_each_ring(ring, q_vector->rx) {
10932b245cb2SAnirudh Venkataramanan 		int cleaned;
10942b245cb2SAnirudh Venkataramanan 
10952b245cb2SAnirudh Venkataramanan 		cleaned = ice_clean_rx_irq(ring, budget_per_ring);
10962b245cb2SAnirudh Venkataramanan 		work_done += cleaned;
10972b245cb2SAnirudh Venkataramanan 		/* if we clean as many as budgeted, we must not be done */
10982b245cb2SAnirudh Venkataramanan 		if (cleaned >= budget_per_ring)
10992b245cb2SAnirudh Venkataramanan 			clean_complete = false;
11002b245cb2SAnirudh Venkataramanan 	}
11012b245cb2SAnirudh Venkataramanan 
11022b245cb2SAnirudh Venkataramanan 	/* If work not completed, return budget and polling will return */
11032b245cb2SAnirudh Venkataramanan 	if (!clean_complete)
11042b245cb2SAnirudh Venkataramanan 		return budget;
11052b245cb2SAnirudh Venkataramanan 
11062b245cb2SAnirudh Venkataramanan 	/* Work is done so exit the polling mode and re-enable the interrupt */
11072b245cb2SAnirudh Venkataramanan 	napi_complete_done(napi, work_done);
11082b245cb2SAnirudh Venkataramanan 	if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
11092b245cb2SAnirudh Venkataramanan 		ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
1110e0c9fd9bSDave Ertman 
1111e0c9fd9bSDave Ertman 	return min(work_done, budget - 1);
11122b245cb2SAnirudh Venkataramanan }
11132b245cb2SAnirudh Venkataramanan 
11142b245cb2SAnirudh Venkataramanan /* helper function for building cmd/type/offset */
11152b245cb2SAnirudh Venkataramanan static __le64
11162b245cb2SAnirudh Venkataramanan build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
11172b245cb2SAnirudh Venkataramanan {
11182b245cb2SAnirudh Venkataramanan 	return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
11192b245cb2SAnirudh Venkataramanan 			   (td_cmd    << ICE_TXD_QW1_CMD_S) |
11202b245cb2SAnirudh Venkataramanan 			   (td_offset << ICE_TXD_QW1_OFFSET_S) |
11212b245cb2SAnirudh Venkataramanan 			   ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
11222b245cb2SAnirudh Venkataramanan 			   (td_tag    << ICE_TXD_QW1_L2TAG1_S));
11232b245cb2SAnirudh Venkataramanan }
11242b245cb2SAnirudh Venkataramanan 
11252b245cb2SAnirudh Venkataramanan /**
1126d337f2afSAnirudh Venkataramanan  * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
11272b245cb2SAnirudh Venkataramanan  * @tx_ring: the ring to be checked
11282b245cb2SAnirudh Venkataramanan  * @size: the size buffer we want to assure is available
11292b245cb2SAnirudh Venkataramanan  *
11302b245cb2SAnirudh Venkataramanan  * Returns -EBUSY if a stop is needed, else 0
11312b245cb2SAnirudh Venkataramanan  */
11322b245cb2SAnirudh Venkataramanan static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
11332b245cb2SAnirudh Venkataramanan {
11342b245cb2SAnirudh Venkataramanan 	netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
11352b245cb2SAnirudh Venkataramanan 	/* Memory barrier before checking head and tail */
11362b245cb2SAnirudh Venkataramanan 	smp_mb();
11372b245cb2SAnirudh Venkataramanan 
11382b245cb2SAnirudh Venkataramanan 	/* Check again in a case another CPU has just made room available. */
11392b245cb2SAnirudh Venkataramanan 	if (likely(ICE_DESC_UNUSED(tx_ring) < size))
11402b245cb2SAnirudh Venkataramanan 		return -EBUSY;
11412b245cb2SAnirudh Venkataramanan 
11422b245cb2SAnirudh Venkataramanan 	/* A reprieve! - use start_subqueue because it doesn't call schedule */
11432b245cb2SAnirudh Venkataramanan 	netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
11442b245cb2SAnirudh Venkataramanan 	++tx_ring->tx_stats.restart_q;
11452b245cb2SAnirudh Venkataramanan 	return 0;
11462b245cb2SAnirudh Venkataramanan }
11472b245cb2SAnirudh Venkataramanan 
11482b245cb2SAnirudh Venkataramanan /**
1149d337f2afSAnirudh Venkataramanan  * ice_maybe_stop_tx - 1st level check for Tx stop conditions
11502b245cb2SAnirudh Venkataramanan  * @tx_ring: the ring to be checked
11512b245cb2SAnirudh Venkataramanan  * @size:    the size buffer we want to assure is available
11522b245cb2SAnirudh Venkataramanan  *
11532b245cb2SAnirudh Venkataramanan  * Returns 0 if stop is not needed
11542b245cb2SAnirudh Venkataramanan  */
11552b245cb2SAnirudh Venkataramanan static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
11562b245cb2SAnirudh Venkataramanan {
11572b245cb2SAnirudh Venkataramanan 	if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
11582b245cb2SAnirudh Venkataramanan 		return 0;
1159d337f2afSAnirudh Venkataramanan 
11602b245cb2SAnirudh Venkataramanan 	return __ice_maybe_stop_tx(tx_ring, size);
11612b245cb2SAnirudh Venkataramanan }
11622b245cb2SAnirudh Venkataramanan 
11632b245cb2SAnirudh Venkataramanan /**
11642b245cb2SAnirudh Venkataramanan  * ice_tx_map - Build the Tx descriptor
11652b245cb2SAnirudh Venkataramanan  * @tx_ring: ring to send buffer on
11662b245cb2SAnirudh Venkataramanan  * @first: first buffer info buffer to use
1167d76a60baSAnirudh Venkataramanan  * @off: pointer to struct that holds offload parameters
11682b245cb2SAnirudh Venkataramanan  *
11692b245cb2SAnirudh Venkataramanan  * This function loops over the skb data pointed to by *first
11702b245cb2SAnirudh Venkataramanan  * and gets a physical address for each memory location and programs
11712b245cb2SAnirudh Venkataramanan  * it and the length into the transmit descriptor.
11722b245cb2SAnirudh Venkataramanan  */
1173d76a60baSAnirudh Venkataramanan static void
1174d76a60baSAnirudh Venkataramanan ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1175d76a60baSAnirudh Venkataramanan 	   struct ice_tx_offload_params *off)
11762b245cb2SAnirudh Venkataramanan {
1177d76a60baSAnirudh Venkataramanan 	u64 td_offset, td_tag, td_cmd;
11782b245cb2SAnirudh Venkataramanan 	u16 i = tx_ring->next_to_use;
11792b245cb2SAnirudh Venkataramanan 	struct skb_frag_struct *frag;
11802b245cb2SAnirudh Venkataramanan 	unsigned int data_len, size;
11812b245cb2SAnirudh Venkataramanan 	struct ice_tx_desc *tx_desc;
11822b245cb2SAnirudh Venkataramanan 	struct ice_tx_buf *tx_buf;
11832b245cb2SAnirudh Venkataramanan 	struct sk_buff *skb;
11842b245cb2SAnirudh Venkataramanan 	dma_addr_t dma;
11852b245cb2SAnirudh Venkataramanan 
1186d76a60baSAnirudh Venkataramanan 	td_tag = off->td_l2tag1;
1187d76a60baSAnirudh Venkataramanan 	td_cmd = off->td_cmd;
1188d76a60baSAnirudh Venkataramanan 	td_offset = off->td_offset;
11892b245cb2SAnirudh Venkataramanan 	skb = first->skb;
11902b245cb2SAnirudh Venkataramanan 
11912b245cb2SAnirudh Venkataramanan 	data_len = skb->data_len;
11922b245cb2SAnirudh Venkataramanan 	size = skb_headlen(skb);
11932b245cb2SAnirudh Venkataramanan 
11942b245cb2SAnirudh Venkataramanan 	tx_desc = ICE_TX_DESC(tx_ring, i);
11952b245cb2SAnirudh Venkataramanan 
1196d76a60baSAnirudh Venkataramanan 	if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1197d76a60baSAnirudh Venkataramanan 		td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1198d76a60baSAnirudh Venkataramanan 		td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1199d76a60baSAnirudh Venkataramanan 			  ICE_TX_FLAGS_VLAN_S;
1200d76a60baSAnirudh Venkataramanan 	}
1201d76a60baSAnirudh Venkataramanan 
12022b245cb2SAnirudh Venkataramanan 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
12032b245cb2SAnirudh Venkataramanan 
12042b245cb2SAnirudh Venkataramanan 	tx_buf = first;
12052b245cb2SAnirudh Venkataramanan 
12062b245cb2SAnirudh Venkataramanan 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
12072b245cb2SAnirudh Venkataramanan 		unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
12082b245cb2SAnirudh Venkataramanan 
12092b245cb2SAnirudh Venkataramanan 		if (dma_mapping_error(tx_ring->dev, dma))
12102b245cb2SAnirudh Venkataramanan 			goto dma_error;
12112b245cb2SAnirudh Venkataramanan 
12122b245cb2SAnirudh Venkataramanan 		/* record length, and DMA address */
12132b245cb2SAnirudh Venkataramanan 		dma_unmap_len_set(tx_buf, len, size);
12142b245cb2SAnirudh Venkataramanan 		dma_unmap_addr_set(tx_buf, dma, dma);
12152b245cb2SAnirudh Venkataramanan 
12162b245cb2SAnirudh Venkataramanan 		/* align size to end of page */
12172b245cb2SAnirudh Venkataramanan 		max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
12182b245cb2SAnirudh Venkataramanan 		tx_desc->buf_addr = cpu_to_le64(dma);
12192b245cb2SAnirudh Venkataramanan 
12202b245cb2SAnirudh Venkataramanan 		/* account for data chunks larger than the hardware
12212b245cb2SAnirudh Venkataramanan 		 * can handle
12222b245cb2SAnirudh Venkataramanan 		 */
12232b245cb2SAnirudh Venkataramanan 		while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
12242b245cb2SAnirudh Venkataramanan 			tx_desc->cmd_type_offset_bsz =
12252b245cb2SAnirudh Venkataramanan 				build_ctob(td_cmd, td_offset, max_data, td_tag);
12262b245cb2SAnirudh Venkataramanan 
12272b245cb2SAnirudh Venkataramanan 			tx_desc++;
12282b245cb2SAnirudh Venkataramanan 			i++;
12292b245cb2SAnirudh Venkataramanan 
12302b245cb2SAnirudh Venkataramanan 			if (i == tx_ring->count) {
12312b245cb2SAnirudh Venkataramanan 				tx_desc = ICE_TX_DESC(tx_ring, 0);
12322b245cb2SAnirudh Venkataramanan 				i = 0;
12332b245cb2SAnirudh Venkataramanan 			}
12342b245cb2SAnirudh Venkataramanan 
12352b245cb2SAnirudh Venkataramanan 			dma += max_data;
12362b245cb2SAnirudh Venkataramanan 			size -= max_data;
12372b245cb2SAnirudh Venkataramanan 
12382b245cb2SAnirudh Venkataramanan 			max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
12392b245cb2SAnirudh Venkataramanan 			tx_desc->buf_addr = cpu_to_le64(dma);
12402b245cb2SAnirudh Venkataramanan 		}
12412b245cb2SAnirudh Venkataramanan 
12422b245cb2SAnirudh Venkataramanan 		if (likely(!data_len))
12432b245cb2SAnirudh Venkataramanan 			break;
12442b245cb2SAnirudh Venkataramanan 
12452b245cb2SAnirudh Venkataramanan 		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
12462b245cb2SAnirudh Venkataramanan 							  size, td_tag);
12472b245cb2SAnirudh Venkataramanan 
12482b245cb2SAnirudh Venkataramanan 		tx_desc++;
12492b245cb2SAnirudh Venkataramanan 		i++;
12502b245cb2SAnirudh Venkataramanan 
12512b245cb2SAnirudh Venkataramanan 		if (i == tx_ring->count) {
12522b245cb2SAnirudh Venkataramanan 			tx_desc = ICE_TX_DESC(tx_ring, 0);
12532b245cb2SAnirudh Venkataramanan 			i = 0;
12542b245cb2SAnirudh Venkataramanan 		}
12552b245cb2SAnirudh Venkataramanan 
12562b245cb2SAnirudh Venkataramanan 		size = skb_frag_size(frag);
12572b245cb2SAnirudh Venkataramanan 		data_len -= size;
12582b245cb2SAnirudh Venkataramanan 
12592b245cb2SAnirudh Venkataramanan 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
12602b245cb2SAnirudh Venkataramanan 				       DMA_TO_DEVICE);
12612b245cb2SAnirudh Venkataramanan 
12622b245cb2SAnirudh Venkataramanan 		tx_buf = &tx_ring->tx_buf[i];
12632b245cb2SAnirudh Venkataramanan 	}
12642b245cb2SAnirudh Venkataramanan 
12652b245cb2SAnirudh Venkataramanan 	/* record bytecount for BQL */
12662b245cb2SAnirudh Venkataramanan 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
12672b245cb2SAnirudh Venkataramanan 
12682b245cb2SAnirudh Venkataramanan 	/* record SW timestamp if HW timestamp is not available */
12692b245cb2SAnirudh Venkataramanan 	skb_tx_timestamp(first->skb);
12702b245cb2SAnirudh Venkataramanan 
12712b245cb2SAnirudh Venkataramanan 	i++;
12722b245cb2SAnirudh Venkataramanan 	if (i == tx_ring->count)
12732b245cb2SAnirudh Venkataramanan 		i = 0;
12742b245cb2SAnirudh Venkataramanan 
12752b245cb2SAnirudh Venkataramanan 	/* write last descriptor with RS and EOP bits */
12762b245cb2SAnirudh Venkataramanan 	td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
12772b245cb2SAnirudh Venkataramanan 	tx_desc->cmd_type_offset_bsz =
12782b245cb2SAnirudh Venkataramanan 			build_ctob(td_cmd, td_offset, size, td_tag);
12792b245cb2SAnirudh Venkataramanan 
12802b245cb2SAnirudh Venkataramanan 	/* Force memory writes to complete before letting h/w know there
12812b245cb2SAnirudh Venkataramanan 	 * are new descriptors to fetch.
12822b245cb2SAnirudh Venkataramanan 	 *
12832b245cb2SAnirudh Venkataramanan 	 * We also use this memory barrier to make certain all of the
12842b245cb2SAnirudh Venkataramanan 	 * status bits have been updated before next_to_watch is written.
12852b245cb2SAnirudh Venkataramanan 	 */
12862b245cb2SAnirudh Venkataramanan 	wmb();
12872b245cb2SAnirudh Venkataramanan 
12882b245cb2SAnirudh Venkataramanan 	/* set next_to_watch value indicating a packet is present */
12892b245cb2SAnirudh Venkataramanan 	first->next_to_watch = tx_desc;
12902b245cb2SAnirudh Venkataramanan 
12912b245cb2SAnirudh Venkataramanan 	tx_ring->next_to_use = i;
12922b245cb2SAnirudh Venkataramanan 
12932b245cb2SAnirudh Venkataramanan 	ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
12942b245cb2SAnirudh Venkataramanan 
12952b245cb2SAnirudh Venkataramanan 	/* notify HW of packet */
12962b245cb2SAnirudh Venkataramanan 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
12972b245cb2SAnirudh Venkataramanan 		writel(i, tx_ring->tail);
12982b245cb2SAnirudh Venkataramanan 
12992b245cb2SAnirudh Venkataramanan 		/* we need this if more than one processor can write to our tail
13002b245cb2SAnirudh Venkataramanan 		 * at a time, it synchronizes IO on IA64/Altix systems
13012b245cb2SAnirudh Venkataramanan 		 */
13022b245cb2SAnirudh Venkataramanan 		mmiowb();
13032b245cb2SAnirudh Venkataramanan 	}
13042b245cb2SAnirudh Venkataramanan 
13052b245cb2SAnirudh Venkataramanan 	return;
13062b245cb2SAnirudh Venkataramanan 
13072b245cb2SAnirudh Venkataramanan dma_error:
13082b245cb2SAnirudh Venkataramanan 	/* clear dma mappings for failed tx_buf map */
13092b245cb2SAnirudh Venkataramanan 	for (;;) {
13102b245cb2SAnirudh Venkataramanan 		tx_buf = &tx_ring->tx_buf[i];
13112b245cb2SAnirudh Venkataramanan 		ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
13122b245cb2SAnirudh Venkataramanan 		if (tx_buf == first)
13132b245cb2SAnirudh Venkataramanan 			break;
13142b245cb2SAnirudh Venkataramanan 		if (i == 0)
13152b245cb2SAnirudh Venkataramanan 			i = tx_ring->count;
13162b245cb2SAnirudh Venkataramanan 		i--;
13172b245cb2SAnirudh Venkataramanan 	}
13182b245cb2SAnirudh Venkataramanan 
13192b245cb2SAnirudh Venkataramanan 	tx_ring->next_to_use = i;
13202b245cb2SAnirudh Venkataramanan }
13212b245cb2SAnirudh Venkataramanan 
13222b245cb2SAnirudh Venkataramanan /**
1323d76a60baSAnirudh Venkataramanan  * ice_tx_csum - Enable Tx checksum offloads
1324d76a60baSAnirudh Venkataramanan  * @first: pointer to the first descriptor
1325d76a60baSAnirudh Venkataramanan  * @off: pointer to struct that holds offload parameters
1326d76a60baSAnirudh Venkataramanan  *
1327d76a60baSAnirudh Venkataramanan  * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1328d76a60baSAnirudh Venkataramanan  */
1329d76a60baSAnirudh Venkataramanan static
1330d76a60baSAnirudh Venkataramanan int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1331d76a60baSAnirudh Venkataramanan {
1332d76a60baSAnirudh Venkataramanan 	u32 l4_len = 0, l3_len = 0, l2_len = 0;
1333d76a60baSAnirudh Venkataramanan 	struct sk_buff *skb = first->skb;
1334d76a60baSAnirudh Venkataramanan 	union {
1335d76a60baSAnirudh Venkataramanan 		struct iphdr *v4;
1336d76a60baSAnirudh Venkataramanan 		struct ipv6hdr *v6;
1337d76a60baSAnirudh Venkataramanan 		unsigned char *hdr;
1338d76a60baSAnirudh Venkataramanan 	} ip;
1339d76a60baSAnirudh Venkataramanan 	union {
1340d76a60baSAnirudh Venkataramanan 		struct tcphdr *tcp;
1341d76a60baSAnirudh Venkataramanan 		unsigned char *hdr;
1342d76a60baSAnirudh Venkataramanan 	} l4;
1343d76a60baSAnirudh Venkataramanan 	__be16 frag_off, protocol;
1344d76a60baSAnirudh Venkataramanan 	unsigned char *exthdr;
1345d76a60baSAnirudh Venkataramanan 	u32 offset, cmd = 0;
1346d76a60baSAnirudh Venkataramanan 	u8 l4_proto = 0;
1347d76a60baSAnirudh Venkataramanan 
1348d76a60baSAnirudh Venkataramanan 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1349d76a60baSAnirudh Venkataramanan 		return 0;
1350d76a60baSAnirudh Venkataramanan 
1351d76a60baSAnirudh Venkataramanan 	ip.hdr = skb_network_header(skb);
1352d76a60baSAnirudh Venkataramanan 	l4.hdr = skb_transport_header(skb);
1353d76a60baSAnirudh Venkataramanan 
1354d76a60baSAnirudh Venkataramanan 	/* compute outer L2 header size */
1355d76a60baSAnirudh Venkataramanan 	l2_len = ip.hdr - skb->data;
1356d76a60baSAnirudh Venkataramanan 	offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1357d76a60baSAnirudh Venkataramanan 
1358d76a60baSAnirudh Venkataramanan 	if (skb->encapsulation)
1359d76a60baSAnirudh Venkataramanan 		return -1;
1360d76a60baSAnirudh Venkataramanan 
1361d76a60baSAnirudh Venkataramanan 	/* Enable IP checksum offloads */
1362d76a60baSAnirudh Venkataramanan 	protocol = vlan_get_protocol(skb);
1363d76a60baSAnirudh Venkataramanan 	if (protocol == htons(ETH_P_IP)) {
1364d76a60baSAnirudh Venkataramanan 		l4_proto = ip.v4->protocol;
1365d76a60baSAnirudh Venkataramanan 		/* the stack computes the IP header already, the only time we
1366d76a60baSAnirudh Venkataramanan 		 * need the hardware to recompute it is in the case of TSO.
1367d76a60baSAnirudh Venkataramanan 		 */
1368d76a60baSAnirudh Venkataramanan 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1369d76a60baSAnirudh Venkataramanan 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1370d76a60baSAnirudh Venkataramanan 		else
1371d76a60baSAnirudh Venkataramanan 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1372d76a60baSAnirudh Venkataramanan 
1373d76a60baSAnirudh Venkataramanan 	} else if (protocol == htons(ETH_P_IPV6)) {
1374d76a60baSAnirudh Venkataramanan 		cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1375d76a60baSAnirudh Venkataramanan 		exthdr = ip.hdr + sizeof(*ip.v6);
1376d76a60baSAnirudh Venkataramanan 		l4_proto = ip.v6->nexthdr;
1377d76a60baSAnirudh Venkataramanan 		if (l4.hdr != exthdr)
1378d76a60baSAnirudh Venkataramanan 			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1379d76a60baSAnirudh Venkataramanan 					 &frag_off);
1380d76a60baSAnirudh Venkataramanan 	} else {
1381d76a60baSAnirudh Venkataramanan 		return -1;
1382d76a60baSAnirudh Venkataramanan 	}
1383d76a60baSAnirudh Venkataramanan 
1384d76a60baSAnirudh Venkataramanan 	/* compute inner L3 header size */
1385d76a60baSAnirudh Venkataramanan 	l3_len = l4.hdr - ip.hdr;
1386d76a60baSAnirudh Venkataramanan 	offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1387d76a60baSAnirudh Venkataramanan 
1388d76a60baSAnirudh Venkataramanan 	/* Enable L4 checksum offloads */
1389d76a60baSAnirudh Venkataramanan 	switch (l4_proto) {
1390d76a60baSAnirudh Venkataramanan 	case IPPROTO_TCP:
1391d76a60baSAnirudh Venkataramanan 		/* enable checksum offloads */
1392d76a60baSAnirudh Venkataramanan 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1393d76a60baSAnirudh Venkataramanan 		l4_len = l4.tcp->doff;
1394d76a60baSAnirudh Venkataramanan 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1395d76a60baSAnirudh Venkataramanan 		break;
1396d76a60baSAnirudh Venkataramanan 	case IPPROTO_UDP:
1397d76a60baSAnirudh Venkataramanan 		/* enable UDP checksum offload */
1398d76a60baSAnirudh Venkataramanan 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1399d76a60baSAnirudh Venkataramanan 		l4_len = (sizeof(struct udphdr) >> 2);
1400d76a60baSAnirudh Venkataramanan 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1401d76a60baSAnirudh Venkataramanan 		break;
1402d76a60baSAnirudh Venkataramanan 	case IPPROTO_SCTP:
1403d76a60baSAnirudh Venkataramanan 	default:
1404d76a60baSAnirudh Venkataramanan 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1405d76a60baSAnirudh Venkataramanan 			return -1;
1406d76a60baSAnirudh Venkataramanan 		skb_checksum_help(skb);
1407d76a60baSAnirudh Venkataramanan 		return 0;
1408d76a60baSAnirudh Venkataramanan 	}
1409d76a60baSAnirudh Venkataramanan 
1410d76a60baSAnirudh Venkataramanan 	off->td_cmd |= cmd;
1411d76a60baSAnirudh Venkataramanan 	off->td_offset |= offset;
1412d76a60baSAnirudh Venkataramanan 	return 1;
1413d76a60baSAnirudh Venkataramanan }
1414d76a60baSAnirudh Venkataramanan 
1415d76a60baSAnirudh Venkataramanan /**
1416d76a60baSAnirudh Venkataramanan  * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1417d76a60baSAnirudh Venkataramanan  * @tx_ring: ring to send buffer on
1418d76a60baSAnirudh Venkataramanan  * @first: pointer to struct ice_tx_buf
1419d76a60baSAnirudh Venkataramanan  *
1420d76a60baSAnirudh Venkataramanan  * Checks the skb and set up correspondingly several generic transmit flags
1421d76a60baSAnirudh Venkataramanan  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1422d76a60baSAnirudh Venkataramanan  *
1423d76a60baSAnirudh Venkataramanan  * Returns error code indicate the frame should be dropped upon error and the
1424d76a60baSAnirudh Venkataramanan  * otherwise returns 0 to indicate the flags has been set properly.
1425d76a60baSAnirudh Venkataramanan  */
1426d76a60baSAnirudh Venkataramanan static int
1427d76a60baSAnirudh Venkataramanan ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1428d76a60baSAnirudh Venkataramanan {
1429d76a60baSAnirudh Venkataramanan 	struct sk_buff *skb = first->skb;
1430d76a60baSAnirudh Venkataramanan 	__be16 protocol = skb->protocol;
1431d76a60baSAnirudh Venkataramanan 
1432d76a60baSAnirudh Venkataramanan 	if (protocol == htons(ETH_P_8021Q) &&
1433d76a60baSAnirudh Venkataramanan 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1434d76a60baSAnirudh Venkataramanan 		/* when HW VLAN acceleration is turned off by the user the
1435d76a60baSAnirudh Venkataramanan 		 * stack sets the protocol to 8021q so that the driver
1436d76a60baSAnirudh Venkataramanan 		 * can take any steps required to support the SW only
1437d76a60baSAnirudh Venkataramanan 		 * VLAN handling. In our case the driver doesn't need
1438d76a60baSAnirudh Venkataramanan 		 * to take any further steps so just set the protocol
1439d76a60baSAnirudh Venkataramanan 		 * to the encapsulated ethertype.
1440d76a60baSAnirudh Venkataramanan 		 */
1441d76a60baSAnirudh Venkataramanan 		skb->protocol = vlan_get_protocol(skb);
1442d76a60baSAnirudh Venkataramanan 		goto out;
1443d76a60baSAnirudh Venkataramanan 	}
1444d76a60baSAnirudh Venkataramanan 
1445d76a60baSAnirudh Venkataramanan 	/* if we have a HW VLAN tag being added, default to the HW one */
1446d76a60baSAnirudh Venkataramanan 	if (skb_vlan_tag_present(skb)) {
1447d76a60baSAnirudh Venkataramanan 		first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1448d76a60baSAnirudh Venkataramanan 		first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1449d76a60baSAnirudh Venkataramanan 	} else if (protocol == htons(ETH_P_8021Q)) {
1450d76a60baSAnirudh Venkataramanan 		struct vlan_hdr *vhdr, _vhdr;
1451d76a60baSAnirudh Venkataramanan 
1452d76a60baSAnirudh Venkataramanan 		/* for SW VLAN, check the next protocol and store the tag */
1453d76a60baSAnirudh Venkataramanan 		vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
1454d76a60baSAnirudh Venkataramanan 							     sizeof(_vhdr),
1455d76a60baSAnirudh Venkataramanan 							     &_vhdr);
1456d76a60baSAnirudh Venkataramanan 		if (!vhdr)
1457d76a60baSAnirudh Venkataramanan 			return -EINVAL;
1458d76a60baSAnirudh Venkataramanan 
1459d76a60baSAnirudh Venkataramanan 		first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
1460d76a60baSAnirudh Venkataramanan 				   ICE_TX_FLAGS_VLAN_S;
1461d76a60baSAnirudh Venkataramanan 		first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
1462d76a60baSAnirudh Venkataramanan 	}
1463d76a60baSAnirudh Venkataramanan 
1464d76a60baSAnirudh Venkataramanan out:
1465d76a60baSAnirudh Venkataramanan 	return 0;
1466d76a60baSAnirudh Venkataramanan }
1467d76a60baSAnirudh Venkataramanan 
1468d76a60baSAnirudh Venkataramanan /**
1469d76a60baSAnirudh Venkataramanan  * ice_tso - computes mss and TSO length to prepare for TSO
1470d76a60baSAnirudh Venkataramanan  * @first: pointer to struct ice_tx_buf
1471d76a60baSAnirudh Venkataramanan  * @off: pointer to struct that holds offload parameters
1472d76a60baSAnirudh Venkataramanan  *
1473d76a60baSAnirudh Venkataramanan  * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1474d76a60baSAnirudh Venkataramanan  */
1475d76a60baSAnirudh Venkataramanan static
1476d76a60baSAnirudh Venkataramanan int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1477d76a60baSAnirudh Venkataramanan {
1478d76a60baSAnirudh Venkataramanan 	struct sk_buff *skb = first->skb;
1479d76a60baSAnirudh Venkataramanan 	union {
1480d76a60baSAnirudh Venkataramanan 		struct iphdr *v4;
1481d76a60baSAnirudh Venkataramanan 		struct ipv6hdr *v6;
1482d76a60baSAnirudh Venkataramanan 		unsigned char *hdr;
1483d76a60baSAnirudh Venkataramanan 	} ip;
1484d76a60baSAnirudh Venkataramanan 	union {
1485d76a60baSAnirudh Venkataramanan 		struct tcphdr *tcp;
1486d76a60baSAnirudh Venkataramanan 		unsigned char *hdr;
1487d76a60baSAnirudh Venkataramanan 	} l4;
1488d76a60baSAnirudh Venkataramanan 	u64 cd_mss, cd_tso_len;
1489d76a60baSAnirudh Venkataramanan 	u32 paylen, l4_start;
1490d76a60baSAnirudh Venkataramanan 	int err;
1491d76a60baSAnirudh Venkataramanan 
1492d76a60baSAnirudh Venkataramanan 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1493d76a60baSAnirudh Venkataramanan 		return 0;
1494d76a60baSAnirudh Venkataramanan 
1495d76a60baSAnirudh Venkataramanan 	if (!skb_is_gso(skb))
1496d76a60baSAnirudh Venkataramanan 		return 0;
1497d76a60baSAnirudh Venkataramanan 
1498d76a60baSAnirudh Venkataramanan 	err = skb_cow_head(skb, 0);
1499d76a60baSAnirudh Venkataramanan 	if (err < 0)
1500d76a60baSAnirudh Venkataramanan 		return err;
1501d76a60baSAnirudh Venkataramanan 
1502d76a60baSAnirudh Venkataramanan 	ip.hdr = skb_network_header(skb);
1503d76a60baSAnirudh Venkataramanan 	l4.hdr = skb_transport_header(skb);
1504d76a60baSAnirudh Venkataramanan 
1505d76a60baSAnirudh Venkataramanan 	/* initialize outer IP header fields */
1506d76a60baSAnirudh Venkataramanan 	if (ip.v4->version == 4) {
1507d76a60baSAnirudh Venkataramanan 		ip.v4->tot_len = 0;
1508d76a60baSAnirudh Venkataramanan 		ip.v4->check = 0;
1509d76a60baSAnirudh Venkataramanan 	} else {
1510d76a60baSAnirudh Venkataramanan 		ip.v6->payload_len = 0;
1511d76a60baSAnirudh Venkataramanan 	}
1512d76a60baSAnirudh Venkataramanan 
1513d76a60baSAnirudh Venkataramanan 	/* determine offset of transport header */
1514d76a60baSAnirudh Venkataramanan 	l4_start = l4.hdr - skb->data;
1515d76a60baSAnirudh Venkataramanan 
1516d76a60baSAnirudh Venkataramanan 	/* remove payload length from checksum */
1517d76a60baSAnirudh Venkataramanan 	paylen = skb->len - l4_start;
1518d76a60baSAnirudh Venkataramanan 	csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1519d76a60baSAnirudh Venkataramanan 
1520d76a60baSAnirudh Venkataramanan 	/* compute length of segmentation header */
1521d76a60baSAnirudh Venkataramanan 	off->header_len = (l4.tcp->doff * 4) + l4_start;
1522d76a60baSAnirudh Venkataramanan 
1523d76a60baSAnirudh Venkataramanan 	/* update gso_segs and bytecount */
1524d76a60baSAnirudh Venkataramanan 	first->gso_segs = skb_shinfo(skb)->gso_segs;
1525d944b469SBrett Creeley 	first->bytecount += (first->gso_segs - 1) * off->header_len;
1526d76a60baSAnirudh Venkataramanan 
1527d76a60baSAnirudh Venkataramanan 	cd_tso_len = skb->len - off->header_len;
1528d76a60baSAnirudh Venkataramanan 	cd_mss = skb_shinfo(skb)->gso_size;
1529d76a60baSAnirudh Venkataramanan 
1530d76a60baSAnirudh Venkataramanan 	/* record cdesc_qw1 with TSO parameters */
1531d76a60baSAnirudh Venkataramanan 	off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX |
1532d76a60baSAnirudh Venkataramanan 			 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1533d76a60baSAnirudh Venkataramanan 			 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1534d76a60baSAnirudh Venkataramanan 			 (cd_mss << ICE_TXD_CTX_QW1_MSS_S);
1535d76a60baSAnirudh Venkataramanan 	first->tx_flags |= ICE_TX_FLAGS_TSO;
1536d76a60baSAnirudh Venkataramanan 	return 1;
1537d76a60baSAnirudh Venkataramanan }
1538d76a60baSAnirudh Venkataramanan 
1539d76a60baSAnirudh Venkataramanan /**
15402b245cb2SAnirudh Venkataramanan  * ice_txd_use_count  - estimate the number of descriptors needed for Tx
15412b245cb2SAnirudh Venkataramanan  * @size: transmit request size in bytes
15422b245cb2SAnirudh Venkataramanan  *
15432b245cb2SAnirudh Venkataramanan  * Due to hardware alignment restrictions (4K alignment), we need to
15442b245cb2SAnirudh Venkataramanan  * assume that we can have no more than 12K of data per descriptor, even
15452b245cb2SAnirudh Venkataramanan  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
15462b245cb2SAnirudh Venkataramanan  * Thus, we need to divide by 12K. But division is slow! Instead,
15472b245cb2SAnirudh Venkataramanan  * we decompose the operation into shifts and one relatively cheap
15482b245cb2SAnirudh Venkataramanan  * multiply operation.
15492b245cb2SAnirudh Venkataramanan  *
15502b245cb2SAnirudh Venkataramanan  * To divide by 12K, we first divide by 4K, then divide by 3:
15512b245cb2SAnirudh Venkataramanan  *     To divide by 4K, shift right by 12 bits
15522b245cb2SAnirudh Venkataramanan  *     To divide by 3, multiply by 85, then divide by 256
15532b245cb2SAnirudh Venkataramanan  *     (Divide by 256 is done by shifting right by 8 bits)
15542b245cb2SAnirudh Venkataramanan  * Finally, we add one to round up. Because 256 isn't an exact multiple of
15552b245cb2SAnirudh Venkataramanan  * 3, we'll underestimate near each multiple of 12K. This is actually more
15562b245cb2SAnirudh Venkataramanan  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
15572b245cb2SAnirudh Venkataramanan  * segment. For our purposes this is accurate out to 1M which is orders of
15582b245cb2SAnirudh Venkataramanan  * magnitude greater than our largest possible GSO size.
15592b245cb2SAnirudh Venkataramanan  *
15602b245cb2SAnirudh Venkataramanan  * This would then be implemented as:
1561c585ea42SBrett Creeley  *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
15622b245cb2SAnirudh Venkataramanan  *
15632b245cb2SAnirudh Venkataramanan  * Since multiplication and division are commutative, we can reorder
15642b245cb2SAnirudh Venkataramanan  * operations into:
1565c585ea42SBrett Creeley  *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
15662b245cb2SAnirudh Venkataramanan  */
15672b245cb2SAnirudh Venkataramanan static unsigned int ice_txd_use_count(unsigned int size)
15682b245cb2SAnirudh Venkataramanan {
1569c585ea42SBrett Creeley 	return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
15702b245cb2SAnirudh Venkataramanan }
15712b245cb2SAnirudh Venkataramanan 
15722b245cb2SAnirudh Venkataramanan /**
1573d337f2afSAnirudh Venkataramanan  * ice_xmit_desc_count - calculate number of Tx descriptors needed
15742b245cb2SAnirudh Venkataramanan  * @skb: send buffer
15752b245cb2SAnirudh Venkataramanan  *
15762b245cb2SAnirudh Venkataramanan  * Returns number of data descriptors needed for this skb.
15772b245cb2SAnirudh Venkataramanan  */
15782b245cb2SAnirudh Venkataramanan static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
15792b245cb2SAnirudh Venkataramanan {
15802b245cb2SAnirudh Venkataramanan 	const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
15812b245cb2SAnirudh Venkataramanan 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
15822b245cb2SAnirudh Venkataramanan 	unsigned int count = 0, size = skb_headlen(skb);
15832b245cb2SAnirudh Venkataramanan 
15842b245cb2SAnirudh Venkataramanan 	for (;;) {
15852b245cb2SAnirudh Venkataramanan 		count += ice_txd_use_count(size);
15862b245cb2SAnirudh Venkataramanan 
15872b245cb2SAnirudh Venkataramanan 		if (!nr_frags--)
15882b245cb2SAnirudh Venkataramanan 			break;
15892b245cb2SAnirudh Venkataramanan 
15902b245cb2SAnirudh Venkataramanan 		size = skb_frag_size(frag++);
15912b245cb2SAnirudh Venkataramanan 	}
15922b245cb2SAnirudh Venkataramanan 
15932b245cb2SAnirudh Venkataramanan 	return count;
15942b245cb2SAnirudh Venkataramanan }
15952b245cb2SAnirudh Venkataramanan 
15962b245cb2SAnirudh Venkataramanan /**
15972b245cb2SAnirudh Venkataramanan  * __ice_chk_linearize - Check if there are more than 8 buffers per packet
15982b245cb2SAnirudh Venkataramanan  * @skb: send buffer
15992b245cb2SAnirudh Venkataramanan  *
16002b245cb2SAnirudh Venkataramanan  * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
16012b245cb2SAnirudh Venkataramanan  * and so we need to figure out the cases where we need to linearize the skb.
16022b245cb2SAnirudh Venkataramanan  *
16032b245cb2SAnirudh Venkataramanan  * For TSO we need to count the TSO header and segment payload separately.
16042b245cb2SAnirudh Venkataramanan  * As such we need to check cases where we have 7 fragments or more as we
16052b245cb2SAnirudh Venkataramanan  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
16062b245cb2SAnirudh Venkataramanan  * the segment payload in the first descriptor, and another 7 for the
16072b245cb2SAnirudh Venkataramanan  * fragments.
16082b245cb2SAnirudh Venkataramanan  */
16092b245cb2SAnirudh Venkataramanan static bool __ice_chk_linearize(struct sk_buff *skb)
16102b245cb2SAnirudh Venkataramanan {
16112b245cb2SAnirudh Venkataramanan 	const struct skb_frag_struct *frag, *stale;
16122b245cb2SAnirudh Venkataramanan 	int nr_frags, sum;
16132b245cb2SAnirudh Venkataramanan 
16142b245cb2SAnirudh Venkataramanan 	/* no need to check if number of frags is less than 7 */
16152b245cb2SAnirudh Venkataramanan 	nr_frags = skb_shinfo(skb)->nr_frags;
16162b245cb2SAnirudh Venkataramanan 	if (nr_frags < (ICE_MAX_BUF_TXD - 1))
16172b245cb2SAnirudh Venkataramanan 		return false;
16182b245cb2SAnirudh Venkataramanan 
16192b245cb2SAnirudh Venkataramanan 	/* We need to walk through the list and validate that each group
16202b245cb2SAnirudh Venkataramanan 	 * of 6 fragments totals at least gso_size.
16212b245cb2SAnirudh Venkataramanan 	 */
16222b245cb2SAnirudh Venkataramanan 	nr_frags -= ICE_MAX_BUF_TXD - 2;
16232b245cb2SAnirudh Venkataramanan 	frag = &skb_shinfo(skb)->frags[0];
16242b245cb2SAnirudh Venkataramanan 
16252b245cb2SAnirudh Venkataramanan 	/* Initialize size to the negative value of gso_size minus 1. We
16262b245cb2SAnirudh Venkataramanan 	 * use this as the worst case scenerio in which the frag ahead
16272b245cb2SAnirudh Venkataramanan 	 * of us only provides one byte which is why we are limited to 6
16282b245cb2SAnirudh Venkataramanan 	 * descriptors for a single transmit as the header and previous
16292b245cb2SAnirudh Venkataramanan 	 * fragment are already consuming 2 descriptors.
16302b245cb2SAnirudh Venkataramanan 	 */
16312b245cb2SAnirudh Venkataramanan 	sum = 1 - skb_shinfo(skb)->gso_size;
16322b245cb2SAnirudh Venkataramanan 
16332b245cb2SAnirudh Venkataramanan 	/* Add size of frags 0 through 4 to create our initial sum */
16342b245cb2SAnirudh Venkataramanan 	sum += skb_frag_size(frag++);
16352b245cb2SAnirudh Venkataramanan 	sum += skb_frag_size(frag++);
16362b245cb2SAnirudh Venkataramanan 	sum += skb_frag_size(frag++);
16372b245cb2SAnirudh Venkataramanan 	sum += skb_frag_size(frag++);
16382b245cb2SAnirudh Venkataramanan 	sum += skb_frag_size(frag++);
16392b245cb2SAnirudh Venkataramanan 
16402b245cb2SAnirudh Venkataramanan 	/* Walk through fragments adding latest fragment, testing it, and
16412b245cb2SAnirudh Venkataramanan 	 * then removing stale fragments from the sum.
16422b245cb2SAnirudh Venkataramanan 	 */
16432b245cb2SAnirudh Venkataramanan 	stale = &skb_shinfo(skb)->frags[0];
16442b245cb2SAnirudh Venkataramanan 	for (;;) {
16452b245cb2SAnirudh Venkataramanan 		sum += skb_frag_size(frag++);
16462b245cb2SAnirudh Venkataramanan 
16472b245cb2SAnirudh Venkataramanan 		/* if sum is negative we failed to make sufficient progress */
16482b245cb2SAnirudh Venkataramanan 		if (sum < 0)
16492b245cb2SAnirudh Venkataramanan 			return true;
16502b245cb2SAnirudh Venkataramanan 
16512b245cb2SAnirudh Venkataramanan 		if (!nr_frags--)
16522b245cb2SAnirudh Venkataramanan 			break;
16532b245cb2SAnirudh Venkataramanan 
16542b245cb2SAnirudh Venkataramanan 		sum -= skb_frag_size(stale++);
16552b245cb2SAnirudh Venkataramanan 	}
16562b245cb2SAnirudh Venkataramanan 
16572b245cb2SAnirudh Venkataramanan 	return false;
16582b245cb2SAnirudh Venkataramanan }
16592b245cb2SAnirudh Venkataramanan 
16602b245cb2SAnirudh Venkataramanan /**
16612b245cb2SAnirudh Venkataramanan  * ice_chk_linearize - Check if there are more than 8 fragments per packet
16622b245cb2SAnirudh Venkataramanan  * @skb:      send buffer
16632b245cb2SAnirudh Venkataramanan  * @count:    number of buffers used
16642b245cb2SAnirudh Venkataramanan  *
16652b245cb2SAnirudh Venkataramanan  * Note: Our HW can't scatter-gather more than 8 fragments to build
16662b245cb2SAnirudh Venkataramanan  * a packet on the wire and so we need to figure out the cases where we
16672b245cb2SAnirudh Venkataramanan  * need to linearize the skb.
16682b245cb2SAnirudh Venkataramanan  */
16692b245cb2SAnirudh Venkataramanan static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
16702b245cb2SAnirudh Venkataramanan {
16712b245cb2SAnirudh Venkataramanan 	/* Both TSO and single send will work if count is less than 8 */
16722b245cb2SAnirudh Venkataramanan 	if (likely(count < ICE_MAX_BUF_TXD))
16732b245cb2SAnirudh Venkataramanan 		return false;
16742b245cb2SAnirudh Venkataramanan 
16752b245cb2SAnirudh Venkataramanan 	if (skb_is_gso(skb))
16762b245cb2SAnirudh Venkataramanan 		return __ice_chk_linearize(skb);
16772b245cb2SAnirudh Venkataramanan 
16782b245cb2SAnirudh Venkataramanan 	/* we can support up to 8 data buffers for a single send */
16792b245cb2SAnirudh Venkataramanan 	return count != ICE_MAX_BUF_TXD;
16802b245cb2SAnirudh Venkataramanan }
16812b245cb2SAnirudh Venkataramanan 
16822b245cb2SAnirudh Venkataramanan /**
16832b245cb2SAnirudh Venkataramanan  * ice_xmit_frame_ring - Sends buffer on Tx ring
16842b245cb2SAnirudh Venkataramanan  * @skb: send buffer
16852b245cb2SAnirudh Venkataramanan  * @tx_ring: ring to send buffer on
16862b245cb2SAnirudh Venkataramanan  *
16872b245cb2SAnirudh Venkataramanan  * Returns NETDEV_TX_OK if sent, else an error code
16882b245cb2SAnirudh Venkataramanan  */
16892b245cb2SAnirudh Venkataramanan static netdev_tx_t
16902b245cb2SAnirudh Venkataramanan ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
16912b245cb2SAnirudh Venkataramanan {
1692d76a60baSAnirudh Venkataramanan 	struct ice_tx_offload_params offload = { 0 };
16932b245cb2SAnirudh Venkataramanan 	struct ice_tx_buf *first;
16942b245cb2SAnirudh Venkataramanan 	unsigned int count;
1695d76a60baSAnirudh Venkataramanan 	int tso, csum;
16962b245cb2SAnirudh Venkataramanan 
16972b245cb2SAnirudh Venkataramanan 	count = ice_xmit_desc_count(skb);
16982b245cb2SAnirudh Venkataramanan 	if (ice_chk_linearize(skb, count)) {
16992b245cb2SAnirudh Venkataramanan 		if (__skb_linearize(skb))
17002b245cb2SAnirudh Venkataramanan 			goto out_drop;
17012b245cb2SAnirudh Venkataramanan 		count = ice_txd_use_count(skb->len);
17022b245cb2SAnirudh Venkataramanan 		tx_ring->tx_stats.tx_linearize++;
17032b245cb2SAnirudh Venkataramanan 	}
17042b245cb2SAnirudh Venkataramanan 
17052b245cb2SAnirudh Venkataramanan 	/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
17062b245cb2SAnirudh Venkataramanan 	 *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
17072b245cb2SAnirudh Venkataramanan 	 *       + 4 desc gap to avoid the cache line where head is,
17082b245cb2SAnirudh Venkataramanan 	 *       + 1 desc for context descriptor,
17092b245cb2SAnirudh Venkataramanan 	 * otherwise try next time
17102b245cb2SAnirudh Venkataramanan 	 */
1711c585ea42SBrett Creeley 	if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
1712c585ea42SBrett Creeley 			      ICE_DESCS_FOR_CTX_DESC)) {
17132b245cb2SAnirudh Venkataramanan 		tx_ring->tx_stats.tx_busy++;
17142b245cb2SAnirudh Venkataramanan 		return NETDEV_TX_BUSY;
17152b245cb2SAnirudh Venkataramanan 	}
17162b245cb2SAnirudh Venkataramanan 
1717d76a60baSAnirudh Venkataramanan 	offload.tx_ring = tx_ring;
1718d76a60baSAnirudh Venkataramanan 
17192b245cb2SAnirudh Venkataramanan 	/* record the location of the first descriptor for this packet */
17202b245cb2SAnirudh Venkataramanan 	first = &tx_ring->tx_buf[tx_ring->next_to_use];
17212b245cb2SAnirudh Venkataramanan 	first->skb = skb;
17222b245cb2SAnirudh Venkataramanan 	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
17232b245cb2SAnirudh Venkataramanan 	first->gso_segs = 1;
1724d76a60baSAnirudh Venkataramanan 	first->tx_flags = 0;
17252b245cb2SAnirudh Venkataramanan 
1726d76a60baSAnirudh Venkataramanan 	/* prepare the VLAN tagging flags for Tx */
1727d76a60baSAnirudh Venkataramanan 	if (ice_tx_prepare_vlan_flags(tx_ring, first))
1728d76a60baSAnirudh Venkataramanan 		goto out_drop;
1729d76a60baSAnirudh Venkataramanan 
1730d76a60baSAnirudh Venkataramanan 	/* set up TSO offload */
1731d76a60baSAnirudh Venkataramanan 	tso = ice_tso(first, &offload);
1732d76a60baSAnirudh Venkataramanan 	if (tso < 0)
1733d76a60baSAnirudh Venkataramanan 		goto out_drop;
1734d76a60baSAnirudh Venkataramanan 
1735d76a60baSAnirudh Venkataramanan 	/* always set up Tx checksum offload */
1736d76a60baSAnirudh Venkataramanan 	csum = ice_tx_csum(first, &offload);
1737d76a60baSAnirudh Venkataramanan 	if (csum < 0)
1738d76a60baSAnirudh Venkataramanan 		goto out_drop;
1739d76a60baSAnirudh Venkataramanan 
1740d76a60baSAnirudh Venkataramanan 	if (tso || offload.cd_tunnel_params) {
1741d76a60baSAnirudh Venkataramanan 		struct ice_tx_ctx_desc *cdesc;
1742d76a60baSAnirudh Venkataramanan 		int i = tx_ring->next_to_use;
1743d76a60baSAnirudh Venkataramanan 
1744d76a60baSAnirudh Venkataramanan 		/* grab the next descriptor */
1745d76a60baSAnirudh Venkataramanan 		cdesc = ICE_TX_CTX_DESC(tx_ring, i);
1746d76a60baSAnirudh Venkataramanan 		i++;
1747d76a60baSAnirudh Venkataramanan 		tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1748d76a60baSAnirudh Venkataramanan 
1749d76a60baSAnirudh Venkataramanan 		/* setup context descriptor */
1750d76a60baSAnirudh Venkataramanan 		cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
1751d76a60baSAnirudh Venkataramanan 		cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
1752d76a60baSAnirudh Venkataramanan 		cdesc->rsvd = cpu_to_le16(0);
1753d76a60baSAnirudh Venkataramanan 		cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
1754d76a60baSAnirudh Venkataramanan 	}
1755d76a60baSAnirudh Venkataramanan 
1756d76a60baSAnirudh Venkataramanan 	ice_tx_map(tx_ring, first, &offload);
17572b245cb2SAnirudh Venkataramanan 	return NETDEV_TX_OK;
17582b245cb2SAnirudh Venkataramanan 
17592b245cb2SAnirudh Venkataramanan out_drop:
17602b245cb2SAnirudh Venkataramanan 	dev_kfree_skb_any(skb);
17612b245cb2SAnirudh Venkataramanan 	return NETDEV_TX_OK;
17622b245cb2SAnirudh Venkataramanan }
17632b245cb2SAnirudh Venkataramanan 
17642b245cb2SAnirudh Venkataramanan /**
17652b245cb2SAnirudh Venkataramanan  * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
17662b245cb2SAnirudh Venkataramanan  * @skb: send buffer
17672b245cb2SAnirudh Venkataramanan  * @netdev: network interface device structure
17682b245cb2SAnirudh Venkataramanan  *
17692b245cb2SAnirudh Venkataramanan  * Returns NETDEV_TX_OK if sent, else an error code
17702b245cb2SAnirudh Venkataramanan  */
17712b245cb2SAnirudh Venkataramanan netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
17722b245cb2SAnirudh Venkataramanan {
17732b245cb2SAnirudh Venkataramanan 	struct ice_netdev_priv *np = netdev_priv(netdev);
17742b245cb2SAnirudh Venkataramanan 	struct ice_vsi *vsi = np->vsi;
17752b245cb2SAnirudh Venkataramanan 	struct ice_ring *tx_ring;
17762b245cb2SAnirudh Venkataramanan 
17772b245cb2SAnirudh Venkataramanan 	tx_ring = vsi->tx_rings[skb->queue_mapping];
17782b245cb2SAnirudh Venkataramanan 
17792b245cb2SAnirudh Venkataramanan 	/* hardware can't handle really short frames, hardware padding works
17802b245cb2SAnirudh Venkataramanan 	 * beyond this point
17812b245cb2SAnirudh Venkataramanan 	 */
17822b245cb2SAnirudh Venkataramanan 	if (skb_put_padto(skb, ICE_MIN_TX_LEN))
17832b245cb2SAnirudh Venkataramanan 		return NETDEV_TX_OK;
17842b245cb2SAnirudh Venkataramanan 
17852b245cb2SAnirudh Venkataramanan 	return ice_xmit_frame_ring(skb, tx_ring);
17862b245cb2SAnirudh Venkataramanan }
1787