15ec8b7d1SJesse Brandeburg // SPDX-License-Identifier: GPL-2.0
25ec8b7d1SJesse Brandeburg /* Copyright(c) 2013 - 2018 Intel Corporation. */
35ec8b7d1SJesse Brandeburg 
45ec8b7d1SJesse Brandeburg #include <linux/prefetch.h>
55ec8b7d1SJesse Brandeburg 
65ec8b7d1SJesse Brandeburg #include "iavf.h"
7ad64ed8bSJesse Brandeburg #include "iavf_trace.h"
866bc8e0fSJesse Brandeburg #include "iavf_prototype.h"
95ec8b7d1SJesse Brandeburg 
105ec8b7d1SJesse Brandeburg static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
115ec8b7d1SJesse Brandeburg 				u32 td_tag)
125ec8b7d1SJesse Brandeburg {
13f1cad2ceSJesse Brandeburg 	return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
1456184e01SJesse Brandeburg 			   ((u64)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
1556184e01SJesse Brandeburg 			   ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |
1656184e01SJesse Brandeburg 			   ((u64)size  << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
1756184e01SJesse Brandeburg 			   ((u64)td_tag  << IAVF_TXD_QW1_L2TAG1_SHIFT));
185ec8b7d1SJesse Brandeburg }
195ec8b7d1SJesse Brandeburg 
2056184e01SJesse Brandeburg #define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
215ec8b7d1SJesse Brandeburg 
225ec8b7d1SJesse Brandeburg /**
2356184e01SJesse Brandeburg  * iavf_unmap_and_free_tx_resource - Release a Tx buffer
245ec8b7d1SJesse Brandeburg  * @ring:      the ring that owns the buffer
255ec8b7d1SJesse Brandeburg  * @tx_buffer: the buffer to free
265ec8b7d1SJesse Brandeburg  **/
2756184e01SJesse Brandeburg static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
2856184e01SJesse Brandeburg 					    struct iavf_tx_buffer *tx_buffer)
295ec8b7d1SJesse Brandeburg {
305ec8b7d1SJesse Brandeburg 	if (tx_buffer->skb) {
3156184e01SJesse Brandeburg 		if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)
325ec8b7d1SJesse Brandeburg 			kfree(tx_buffer->raw_buf);
335ec8b7d1SJesse Brandeburg 		else
345ec8b7d1SJesse Brandeburg 			dev_kfree_skb_any(tx_buffer->skb);
355ec8b7d1SJesse Brandeburg 		if (dma_unmap_len(tx_buffer, len))
365ec8b7d1SJesse Brandeburg 			dma_unmap_single(ring->dev,
375ec8b7d1SJesse Brandeburg 					 dma_unmap_addr(tx_buffer, dma),
385ec8b7d1SJesse Brandeburg 					 dma_unmap_len(tx_buffer, len),
395ec8b7d1SJesse Brandeburg 					 DMA_TO_DEVICE);
405ec8b7d1SJesse Brandeburg 	} else if (dma_unmap_len(tx_buffer, len)) {
415ec8b7d1SJesse Brandeburg 		dma_unmap_page(ring->dev,
425ec8b7d1SJesse Brandeburg 			       dma_unmap_addr(tx_buffer, dma),
435ec8b7d1SJesse Brandeburg 			       dma_unmap_len(tx_buffer, len),
445ec8b7d1SJesse Brandeburg 			       DMA_TO_DEVICE);
455ec8b7d1SJesse Brandeburg 	}
465ec8b7d1SJesse Brandeburg 
475ec8b7d1SJesse Brandeburg 	tx_buffer->next_to_watch = NULL;
485ec8b7d1SJesse Brandeburg 	tx_buffer->skb = NULL;
495ec8b7d1SJesse Brandeburg 	dma_unmap_len_set(tx_buffer, len, 0);
505ec8b7d1SJesse Brandeburg 	/* tx_buffer must be completely set up in the transmit path */
515ec8b7d1SJesse Brandeburg }
525ec8b7d1SJesse Brandeburg 
535ec8b7d1SJesse Brandeburg /**
545ec8b7d1SJesse Brandeburg  * iavf_clean_tx_ring - Free any empty Tx buffers
555ec8b7d1SJesse Brandeburg  * @tx_ring: ring to be cleaned
565ec8b7d1SJesse Brandeburg  **/
5756184e01SJesse Brandeburg void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
585ec8b7d1SJesse Brandeburg {
595ec8b7d1SJesse Brandeburg 	unsigned long bi_size;
605ec8b7d1SJesse Brandeburg 	u16 i;
615ec8b7d1SJesse Brandeburg 
625ec8b7d1SJesse Brandeburg 	/* ring already cleared, nothing to do */
635ec8b7d1SJesse Brandeburg 	if (!tx_ring->tx_bi)
645ec8b7d1SJesse Brandeburg 		return;
655ec8b7d1SJesse Brandeburg 
665ec8b7d1SJesse Brandeburg 	/* Free all the Tx ring sk_buffs */
675ec8b7d1SJesse Brandeburg 	for (i = 0; i < tx_ring->count; i++)
6856184e01SJesse Brandeburg 		iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
695ec8b7d1SJesse Brandeburg 
7056184e01SJesse Brandeburg 	bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
715ec8b7d1SJesse Brandeburg 	memset(tx_ring->tx_bi, 0, bi_size);
725ec8b7d1SJesse Brandeburg 
735ec8b7d1SJesse Brandeburg 	/* Zero out the descriptor ring */
745ec8b7d1SJesse Brandeburg 	memset(tx_ring->desc, 0, tx_ring->size);
755ec8b7d1SJesse Brandeburg 
765ec8b7d1SJesse Brandeburg 	tx_ring->next_to_use = 0;
775ec8b7d1SJesse Brandeburg 	tx_ring->next_to_clean = 0;
785ec8b7d1SJesse Brandeburg 
795ec8b7d1SJesse Brandeburg 	if (!tx_ring->netdev)
805ec8b7d1SJesse Brandeburg 		return;
815ec8b7d1SJesse Brandeburg 
825ec8b7d1SJesse Brandeburg 	/* cleanup Tx queue statistics */
835ec8b7d1SJesse Brandeburg 	netdev_tx_reset_queue(txring_txq(tx_ring));
845ec8b7d1SJesse Brandeburg }
855ec8b7d1SJesse Brandeburg 
865ec8b7d1SJesse Brandeburg /**
875ec8b7d1SJesse Brandeburg  * iavf_free_tx_resources - Free Tx resources per queue
885ec8b7d1SJesse Brandeburg  * @tx_ring: Tx descriptor ring for a specific queue
895ec8b7d1SJesse Brandeburg  *
905ec8b7d1SJesse Brandeburg  * Free all transmit software resources
915ec8b7d1SJesse Brandeburg  **/
9256184e01SJesse Brandeburg void iavf_free_tx_resources(struct iavf_ring *tx_ring)
935ec8b7d1SJesse Brandeburg {
945ec8b7d1SJesse Brandeburg 	iavf_clean_tx_ring(tx_ring);
955ec8b7d1SJesse Brandeburg 	kfree(tx_ring->tx_bi);
965ec8b7d1SJesse Brandeburg 	tx_ring->tx_bi = NULL;
975ec8b7d1SJesse Brandeburg 
985ec8b7d1SJesse Brandeburg 	if (tx_ring->desc) {
995ec8b7d1SJesse Brandeburg 		dma_free_coherent(tx_ring->dev, tx_ring->size,
1005ec8b7d1SJesse Brandeburg 				  tx_ring->desc, tx_ring->dma);
1015ec8b7d1SJesse Brandeburg 		tx_ring->desc = NULL;
1025ec8b7d1SJesse Brandeburg 	}
1035ec8b7d1SJesse Brandeburg }
1045ec8b7d1SJesse Brandeburg 
1055ec8b7d1SJesse Brandeburg /**
1065ec8b7d1SJesse Brandeburg  * iavf_get_tx_pending - how many Tx descriptors not processed
1075ec8b7d1SJesse Brandeburg  * @ring: the ring of descriptors
1085ec8b7d1SJesse Brandeburg  * @in_sw: is tx_pending being checked in SW or HW
1095ec8b7d1SJesse Brandeburg  *
1105ec8b7d1SJesse Brandeburg  * Since there is no access to the ring head register
1115ec8b7d1SJesse Brandeburg  * in XL710, we need to use our local copies
1125ec8b7d1SJesse Brandeburg  **/
11356184e01SJesse Brandeburg u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
1145ec8b7d1SJesse Brandeburg {
1155ec8b7d1SJesse Brandeburg 	u32 head, tail;
1165ec8b7d1SJesse Brandeburg 
1175ec8b7d1SJesse Brandeburg 	head = ring->next_to_clean;
1185ec8b7d1SJesse Brandeburg 	tail = readl(ring->tail);
1195ec8b7d1SJesse Brandeburg 
1205ec8b7d1SJesse Brandeburg 	if (head != tail)
1215ec8b7d1SJesse Brandeburg 		return (head < tail) ?
1225ec8b7d1SJesse Brandeburg 			tail - head : (tail + ring->count - head);
1235ec8b7d1SJesse Brandeburg 
1245ec8b7d1SJesse Brandeburg 	return 0;
1255ec8b7d1SJesse Brandeburg }
1265ec8b7d1SJesse Brandeburg 
1275ec8b7d1SJesse Brandeburg /**
1285ec8b7d1SJesse Brandeburg  * iavf_detect_recover_hung - Function to detect and recover hung_queues
1295ec8b7d1SJesse Brandeburg  * @vsi:  pointer to vsi struct with tx queues
1305ec8b7d1SJesse Brandeburg  *
1315ec8b7d1SJesse Brandeburg  * VSI has netdev and netdev has TX queues. This function is to check each of
1325ec8b7d1SJesse Brandeburg  * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
1335ec8b7d1SJesse Brandeburg  **/
13456184e01SJesse Brandeburg void iavf_detect_recover_hung(struct iavf_vsi *vsi)
1355ec8b7d1SJesse Brandeburg {
13656184e01SJesse Brandeburg 	struct iavf_ring *tx_ring = NULL;
1375ec8b7d1SJesse Brandeburg 	struct net_device *netdev;
1385ec8b7d1SJesse Brandeburg 	unsigned int i;
1395ec8b7d1SJesse Brandeburg 	int packets;
1405ec8b7d1SJesse Brandeburg 
1415ec8b7d1SJesse Brandeburg 	if (!vsi)
1425ec8b7d1SJesse Brandeburg 		return;
1435ec8b7d1SJesse Brandeburg 
14456184e01SJesse Brandeburg 	if (test_bit(__IAVF_VSI_DOWN, vsi->state))
1455ec8b7d1SJesse Brandeburg 		return;
1465ec8b7d1SJesse Brandeburg 
1475ec8b7d1SJesse Brandeburg 	netdev = vsi->netdev;
1485ec8b7d1SJesse Brandeburg 	if (!netdev)
1495ec8b7d1SJesse Brandeburg 		return;
1505ec8b7d1SJesse Brandeburg 
1515ec8b7d1SJesse Brandeburg 	if (!netif_carrier_ok(netdev))
1525ec8b7d1SJesse Brandeburg 		return;
1535ec8b7d1SJesse Brandeburg 
1545ec8b7d1SJesse Brandeburg 	for (i = 0; i < vsi->back->num_active_queues; i++) {
1555ec8b7d1SJesse Brandeburg 		tx_ring = &vsi->back->tx_rings[i];
1565ec8b7d1SJesse Brandeburg 		if (tx_ring && tx_ring->desc) {
1575ec8b7d1SJesse Brandeburg 			/* If packet counter has not changed the queue is
1585ec8b7d1SJesse Brandeburg 			 * likely stalled, so force an interrupt for this
1595ec8b7d1SJesse Brandeburg 			 * queue.
1605ec8b7d1SJesse Brandeburg 			 *
1615ec8b7d1SJesse Brandeburg 			 * prev_pkt_ctr would be negative if there was no
1625ec8b7d1SJesse Brandeburg 			 * pending work.
1635ec8b7d1SJesse Brandeburg 			 */
1645ec8b7d1SJesse Brandeburg 			packets = tx_ring->stats.packets & INT_MAX;
1655ec8b7d1SJesse Brandeburg 			if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
1665ec8b7d1SJesse Brandeburg 				iavf_force_wb(vsi, tx_ring->q_vector);
1675ec8b7d1SJesse Brandeburg 				continue;
1685ec8b7d1SJesse Brandeburg 			}
1695ec8b7d1SJesse Brandeburg 
1705ec8b7d1SJesse Brandeburg 			/* Memory barrier between read of packet count and call
1715ec8b7d1SJesse Brandeburg 			 * to iavf_get_tx_pending()
1725ec8b7d1SJesse Brandeburg 			 */
1735ec8b7d1SJesse Brandeburg 			smp_rmb();
1745ec8b7d1SJesse Brandeburg 			tx_ring->tx_stats.prev_pkt_ctr =
1755ec8b7d1SJesse Brandeburg 			  iavf_get_tx_pending(tx_ring, true) ? packets : -1;
1765ec8b7d1SJesse Brandeburg 		}
1775ec8b7d1SJesse Brandeburg 	}
1785ec8b7d1SJesse Brandeburg }
1795ec8b7d1SJesse Brandeburg 
1805ec8b7d1SJesse Brandeburg #define WB_STRIDE 4
1815ec8b7d1SJesse Brandeburg 
1825ec8b7d1SJesse Brandeburg /**
18356184e01SJesse Brandeburg  * iavf_clean_tx_irq - Reclaim resources after transmit completes
1845ec8b7d1SJesse Brandeburg  * @vsi: the VSI we care about
1855ec8b7d1SJesse Brandeburg  * @tx_ring: Tx ring to clean
1865ec8b7d1SJesse Brandeburg  * @napi_budget: Used to determine if we are in netpoll
1875ec8b7d1SJesse Brandeburg  *
1885ec8b7d1SJesse Brandeburg  * Returns true if there's any budget left (e.g. the clean is finished)
1895ec8b7d1SJesse Brandeburg  **/
19056184e01SJesse Brandeburg static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
19156184e01SJesse Brandeburg 			      struct iavf_ring *tx_ring, int napi_budget)
1925ec8b7d1SJesse Brandeburg {
193168d91cfSMitch Williams 	int i = tx_ring->next_to_clean;
19456184e01SJesse Brandeburg 	struct iavf_tx_buffer *tx_buf;
19556184e01SJesse Brandeburg 	struct iavf_tx_desc *tx_desc;
1965ec8b7d1SJesse Brandeburg 	unsigned int total_bytes = 0, total_packets = 0;
1975ec8b7d1SJesse Brandeburg 	unsigned int budget = vsi->work_limit;
1985ec8b7d1SJesse Brandeburg 
1995ec8b7d1SJesse Brandeburg 	tx_buf = &tx_ring->tx_bi[i];
200f1cad2ceSJesse Brandeburg 	tx_desc = IAVF_TX_DESC(tx_ring, i);
2015ec8b7d1SJesse Brandeburg 	i -= tx_ring->count;
2025ec8b7d1SJesse Brandeburg 
2035ec8b7d1SJesse Brandeburg 	do {
20456184e01SJesse Brandeburg 		struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;
2055ec8b7d1SJesse Brandeburg 
2065ec8b7d1SJesse Brandeburg 		/* if next_to_watch is not set then there is no work pending */
2075ec8b7d1SJesse Brandeburg 		if (!eop_desc)
2085ec8b7d1SJesse Brandeburg 			break;
2095ec8b7d1SJesse Brandeburg 
2105ec8b7d1SJesse Brandeburg 		/* prevent any other reads prior to eop_desc */
2115ec8b7d1SJesse Brandeburg 		smp_rmb();
2125ec8b7d1SJesse Brandeburg 
213ad64ed8bSJesse Brandeburg 		iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
2145ec8b7d1SJesse Brandeburg 		/* if the descriptor isn't done, no work yet to do */
2155ec8b7d1SJesse Brandeburg 		if (!(eop_desc->cmd_type_offset_bsz &
216f1cad2ceSJesse Brandeburg 		      cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE)))
2175ec8b7d1SJesse Brandeburg 			break;
2185ec8b7d1SJesse Brandeburg 
2195ec8b7d1SJesse Brandeburg 		/* clear next_to_watch to prevent false hangs */
2205ec8b7d1SJesse Brandeburg 		tx_buf->next_to_watch = NULL;
2215ec8b7d1SJesse Brandeburg 
2225ec8b7d1SJesse Brandeburg 		/* update the statistics for this packet */
2235ec8b7d1SJesse Brandeburg 		total_bytes += tx_buf->bytecount;
2245ec8b7d1SJesse Brandeburg 		total_packets += tx_buf->gso_segs;
2255ec8b7d1SJesse Brandeburg 
2265ec8b7d1SJesse Brandeburg 		/* free the skb */
2275ec8b7d1SJesse Brandeburg 		napi_consume_skb(tx_buf->skb, napi_budget);
2285ec8b7d1SJesse Brandeburg 
2295ec8b7d1SJesse Brandeburg 		/* unmap skb header data */
2305ec8b7d1SJesse Brandeburg 		dma_unmap_single(tx_ring->dev,
2315ec8b7d1SJesse Brandeburg 				 dma_unmap_addr(tx_buf, dma),
2325ec8b7d1SJesse Brandeburg 				 dma_unmap_len(tx_buf, len),
2335ec8b7d1SJesse Brandeburg 				 DMA_TO_DEVICE);
2345ec8b7d1SJesse Brandeburg 
2355ec8b7d1SJesse Brandeburg 		/* clear tx_buffer data */
2365ec8b7d1SJesse Brandeburg 		tx_buf->skb = NULL;
2375ec8b7d1SJesse Brandeburg 		dma_unmap_len_set(tx_buf, len, 0);
2385ec8b7d1SJesse Brandeburg 
2395ec8b7d1SJesse Brandeburg 		/* unmap remaining buffers */
2405ec8b7d1SJesse Brandeburg 		while (tx_desc != eop_desc) {
241ad64ed8bSJesse Brandeburg 			iavf_trace(clean_tx_irq_unmap,
2425ec8b7d1SJesse Brandeburg 				   tx_ring, tx_desc, tx_buf);
2435ec8b7d1SJesse Brandeburg 
2445ec8b7d1SJesse Brandeburg 			tx_buf++;
2455ec8b7d1SJesse Brandeburg 			tx_desc++;
2465ec8b7d1SJesse Brandeburg 			i++;
2475ec8b7d1SJesse Brandeburg 			if (unlikely(!i)) {
2485ec8b7d1SJesse Brandeburg 				i -= tx_ring->count;
2495ec8b7d1SJesse Brandeburg 				tx_buf = tx_ring->tx_bi;
250f1cad2ceSJesse Brandeburg 				tx_desc = IAVF_TX_DESC(tx_ring, 0);
2515ec8b7d1SJesse Brandeburg 			}
2525ec8b7d1SJesse Brandeburg 
2535ec8b7d1SJesse Brandeburg 			/* unmap any remaining paged data */
2545ec8b7d1SJesse Brandeburg 			if (dma_unmap_len(tx_buf, len)) {
2555ec8b7d1SJesse Brandeburg 				dma_unmap_page(tx_ring->dev,
2565ec8b7d1SJesse Brandeburg 					       dma_unmap_addr(tx_buf, dma),
2575ec8b7d1SJesse Brandeburg 					       dma_unmap_len(tx_buf, len),
2585ec8b7d1SJesse Brandeburg 					       DMA_TO_DEVICE);
2595ec8b7d1SJesse Brandeburg 				dma_unmap_len_set(tx_buf, len, 0);
2605ec8b7d1SJesse Brandeburg 			}
2615ec8b7d1SJesse Brandeburg 		}
2625ec8b7d1SJesse Brandeburg 
2635ec8b7d1SJesse Brandeburg 		/* move us one more past the eop_desc for start of next pkt */
2645ec8b7d1SJesse Brandeburg 		tx_buf++;
2655ec8b7d1SJesse Brandeburg 		tx_desc++;
2665ec8b7d1SJesse Brandeburg 		i++;
2675ec8b7d1SJesse Brandeburg 		if (unlikely(!i)) {
2685ec8b7d1SJesse Brandeburg 			i -= tx_ring->count;
2695ec8b7d1SJesse Brandeburg 			tx_buf = tx_ring->tx_bi;
270f1cad2ceSJesse Brandeburg 			tx_desc = IAVF_TX_DESC(tx_ring, 0);
2715ec8b7d1SJesse Brandeburg 		}
2725ec8b7d1SJesse Brandeburg 
2735ec8b7d1SJesse Brandeburg 		prefetch(tx_desc);
2745ec8b7d1SJesse Brandeburg 
2755ec8b7d1SJesse Brandeburg 		/* update budget accounting */
2765ec8b7d1SJesse Brandeburg 		budget--;
2775ec8b7d1SJesse Brandeburg 	} while (likely(budget));
2785ec8b7d1SJesse Brandeburg 
2795ec8b7d1SJesse Brandeburg 	i += tx_ring->count;
2805ec8b7d1SJesse Brandeburg 	tx_ring->next_to_clean = i;
2815ec8b7d1SJesse Brandeburg 	u64_stats_update_begin(&tx_ring->syncp);
2825ec8b7d1SJesse Brandeburg 	tx_ring->stats.bytes += total_bytes;
2835ec8b7d1SJesse Brandeburg 	tx_ring->stats.packets += total_packets;
2845ec8b7d1SJesse Brandeburg 	u64_stats_update_end(&tx_ring->syncp);
2855ec8b7d1SJesse Brandeburg 	tx_ring->q_vector->tx.total_bytes += total_bytes;
2865ec8b7d1SJesse Brandeburg 	tx_ring->q_vector->tx.total_packets += total_packets;
2875ec8b7d1SJesse Brandeburg 
28856184e01SJesse Brandeburg 	if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
2895ec8b7d1SJesse Brandeburg 		/* check to see if there are < 4 descriptors
2905ec8b7d1SJesse Brandeburg 		 * waiting to be written back, then kick the hardware to force
2915ec8b7d1SJesse Brandeburg 		 * them to be written back in case we stay in NAPI.
2925ec8b7d1SJesse Brandeburg 		 * In this mode on X722 we do not enable Interrupt.
2935ec8b7d1SJesse Brandeburg 		 */
2945ec8b7d1SJesse Brandeburg 		unsigned int j = iavf_get_tx_pending(tx_ring, false);
2955ec8b7d1SJesse Brandeburg 
2965ec8b7d1SJesse Brandeburg 		if (budget &&
2975ec8b7d1SJesse Brandeburg 		    ((j / WB_STRIDE) == 0) && (j > 0) &&
29856184e01SJesse Brandeburg 		    !test_bit(__IAVF_VSI_DOWN, vsi->state) &&
29956184e01SJesse Brandeburg 		    (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
3005ec8b7d1SJesse Brandeburg 			tx_ring->arm_wb = true;
3015ec8b7d1SJesse Brandeburg 	}
3025ec8b7d1SJesse Brandeburg 
3035ec8b7d1SJesse Brandeburg 	/* notify netdev of completed buffers */
3045ec8b7d1SJesse Brandeburg 	netdev_tx_completed_queue(txring_txq(tx_ring),
3055ec8b7d1SJesse Brandeburg 				  total_packets, total_bytes);
3065ec8b7d1SJesse Brandeburg 
3075ec8b7d1SJesse Brandeburg #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
3085ec8b7d1SJesse Brandeburg 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
30956184e01SJesse Brandeburg 		     (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
3105ec8b7d1SJesse Brandeburg 		/* Make sure that anybody stopping the queue after this
3115ec8b7d1SJesse Brandeburg 		 * sees the new next_to_clean.
3125ec8b7d1SJesse Brandeburg 		 */
3135ec8b7d1SJesse Brandeburg 		smp_mb();
3145ec8b7d1SJesse Brandeburg 		if (__netif_subqueue_stopped(tx_ring->netdev,
3155ec8b7d1SJesse Brandeburg 					     tx_ring->queue_index) &&
31656184e01SJesse Brandeburg 		   !test_bit(__IAVF_VSI_DOWN, vsi->state)) {
3175ec8b7d1SJesse Brandeburg 			netif_wake_subqueue(tx_ring->netdev,
3185ec8b7d1SJesse Brandeburg 					    tx_ring->queue_index);
3195ec8b7d1SJesse Brandeburg 			++tx_ring->tx_stats.restart_queue;
3205ec8b7d1SJesse Brandeburg 		}
3215ec8b7d1SJesse Brandeburg 	}
3225ec8b7d1SJesse Brandeburg 
3235ec8b7d1SJesse Brandeburg 	return !!budget;
3245ec8b7d1SJesse Brandeburg }
3255ec8b7d1SJesse Brandeburg 
3265ec8b7d1SJesse Brandeburg /**
3275ec8b7d1SJesse Brandeburg  * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
3285ec8b7d1SJesse Brandeburg  * @vsi: the VSI we care about
3295ec8b7d1SJesse Brandeburg  * @q_vector: the vector on which to enable writeback
3305ec8b7d1SJesse Brandeburg  *
3315ec8b7d1SJesse Brandeburg  **/
33256184e01SJesse Brandeburg static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
33356184e01SJesse Brandeburg 				  struct iavf_q_vector *q_vector)
3345ec8b7d1SJesse Brandeburg {
3355ec8b7d1SJesse Brandeburg 	u16 flags = q_vector->tx.ring[0].flags;
3365ec8b7d1SJesse Brandeburg 	u32 val;
3375ec8b7d1SJesse Brandeburg 
33856184e01SJesse Brandeburg 	if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))
3395ec8b7d1SJesse Brandeburg 		return;
3405ec8b7d1SJesse Brandeburg 
3415ec8b7d1SJesse Brandeburg 	if (q_vector->arm_wb_state)
3425ec8b7d1SJesse Brandeburg 		return;
3435ec8b7d1SJesse Brandeburg 
344f1cad2ceSJesse Brandeburg 	val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
345f1cad2ceSJesse Brandeburg 	      IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
3465ec8b7d1SJesse Brandeburg 
3475ec8b7d1SJesse Brandeburg 	wr32(&vsi->back->hw,
348f1cad2ceSJesse Brandeburg 	     IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
3495ec8b7d1SJesse Brandeburg 	q_vector->arm_wb_state = true;
3505ec8b7d1SJesse Brandeburg }
3515ec8b7d1SJesse Brandeburg 
3525ec8b7d1SJesse Brandeburg /**
3535ec8b7d1SJesse Brandeburg  * iavf_force_wb - Issue SW Interrupt so HW does a wb
3545ec8b7d1SJesse Brandeburg  * @vsi: the VSI we care about
3555ec8b7d1SJesse Brandeburg  * @q_vector: the vector  on which to force writeback
3565ec8b7d1SJesse Brandeburg  *
3575ec8b7d1SJesse Brandeburg  **/
35856184e01SJesse Brandeburg void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
3595ec8b7d1SJesse Brandeburg {
360f1cad2ceSJesse Brandeburg 	u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
361f1cad2ceSJesse Brandeburg 		  IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
362f1cad2ceSJesse Brandeburg 		  IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
363f1cad2ceSJesse Brandeburg 		  IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
3645ec8b7d1SJesse Brandeburg 		  /* allow 00 to be written to the index */;
3655ec8b7d1SJesse Brandeburg 
3665ec8b7d1SJesse Brandeburg 	wr32(&vsi->back->hw,
367f1cad2ceSJesse Brandeburg 	     IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
3685ec8b7d1SJesse Brandeburg 	     val);
3695ec8b7d1SJesse Brandeburg }
3705ec8b7d1SJesse Brandeburg 
37156184e01SJesse Brandeburg static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
37256184e01SJesse Brandeburg 					struct iavf_ring_container *rc)
3735ec8b7d1SJesse Brandeburg {
3745ec8b7d1SJesse Brandeburg 	return &q_vector->rx == rc;
3755ec8b7d1SJesse Brandeburg }
3765ec8b7d1SJesse Brandeburg 
377*d73dd127SMateusz Palczewski #define IAVF_AIM_MULTIPLIER_100G	2560
378*d73dd127SMateusz Palczewski #define IAVF_AIM_MULTIPLIER_50G		1280
379*d73dd127SMateusz Palczewski #define IAVF_AIM_MULTIPLIER_40G		1024
380*d73dd127SMateusz Palczewski #define IAVF_AIM_MULTIPLIER_20G		512
381*d73dd127SMateusz Palczewski #define IAVF_AIM_MULTIPLIER_10G		256
382*d73dd127SMateusz Palczewski #define IAVF_AIM_MULTIPLIER_1G		32
3835ec8b7d1SJesse Brandeburg 
384*d73dd127SMateusz Palczewski static unsigned int iavf_mbps_itr_multiplier(u32 speed_mbps)
385*d73dd127SMateusz Palczewski {
386*d73dd127SMateusz Palczewski 	switch (speed_mbps) {
387*d73dd127SMateusz Palczewski 	case SPEED_100000:
388*d73dd127SMateusz Palczewski 		return IAVF_AIM_MULTIPLIER_100G;
389*d73dd127SMateusz Palczewski 	case SPEED_50000:
390*d73dd127SMateusz Palczewski 		return IAVF_AIM_MULTIPLIER_50G;
391*d73dd127SMateusz Palczewski 	case SPEED_40000:
392*d73dd127SMateusz Palczewski 		return IAVF_AIM_MULTIPLIER_40G;
393*d73dd127SMateusz Palczewski 	case SPEED_25000:
394*d73dd127SMateusz Palczewski 	case SPEED_20000:
395*d73dd127SMateusz Palczewski 		return IAVF_AIM_MULTIPLIER_20G;
396*d73dd127SMateusz Palczewski 	case SPEED_10000:
3975ec8b7d1SJesse Brandeburg 	default:
398*d73dd127SMateusz Palczewski 		return IAVF_AIM_MULTIPLIER_10G;
399*d73dd127SMateusz Palczewski 	case SPEED_1000:
400*d73dd127SMateusz Palczewski 	case SPEED_100:
401*d73dd127SMateusz Palczewski 		return IAVF_AIM_MULTIPLIER_1G;
402*d73dd127SMateusz Palczewski 	}
4035ec8b7d1SJesse Brandeburg }
4045ec8b7d1SJesse Brandeburg 
405*d73dd127SMateusz Palczewski static unsigned int
406*d73dd127SMateusz Palczewski iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl)
407*d73dd127SMateusz Palczewski {
408*d73dd127SMateusz Palczewski 	switch (speed_virtchnl) {
409*d73dd127SMateusz Palczewski 	case VIRTCHNL_LINK_SPEED_40GB:
410*d73dd127SMateusz Palczewski 		return IAVF_AIM_MULTIPLIER_40G;
411*d73dd127SMateusz Palczewski 	case VIRTCHNL_LINK_SPEED_25GB:
412*d73dd127SMateusz Palczewski 	case VIRTCHNL_LINK_SPEED_20GB:
413*d73dd127SMateusz Palczewski 		return IAVF_AIM_MULTIPLIER_20G;
414*d73dd127SMateusz Palczewski 	case VIRTCHNL_LINK_SPEED_10GB:
415*d73dd127SMateusz Palczewski 	default:
416*d73dd127SMateusz Palczewski 		return IAVF_AIM_MULTIPLIER_10G;
417*d73dd127SMateusz Palczewski 	case VIRTCHNL_LINK_SPEED_1GB:
418*d73dd127SMateusz Palczewski 	case VIRTCHNL_LINK_SPEED_100MB:
419*d73dd127SMateusz Palczewski 		return IAVF_AIM_MULTIPLIER_1G;
420*d73dd127SMateusz Palczewski 	}
421*d73dd127SMateusz Palczewski }
422*d73dd127SMateusz Palczewski 
423*d73dd127SMateusz Palczewski static unsigned int iavf_itr_divisor(struct iavf_adapter *adapter)
424*d73dd127SMateusz Palczewski {
425*d73dd127SMateusz Palczewski 	if (ADV_LINK_SUPPORT(adapter))
426*d73dd127SMateusz Palczewski 		return IAVF_ITR_ADAPTIVE_MIN_INC *
427*d73dd127SMateusz Palczewski 			iavf_mbps_itr_multiplier(adapter->link_speed_mbps);
428*d73dd127SMateusz Palczewski 	else
429*d73dd127SMateusz Palczewski 		return IAVF_ITR_ADAPTIVE_MIN_INC *
430*d73dd127SMateusz Palczewski 			iavf_virtchnl_itr_multiplier(adapter->link_speed);
4315ec8b7d1SJesse Brandeburg }
4325ec8b7d1SJesse Brandeburg 
4335ec8b7d1SJesse Brandeburg /**
43456184e01SJesse Brandeburg  * iavf_update_itr - update the dynamic ITR value based on statistics
4355ec8b7d1SJesse Brandeburg  * @q_vector: structure containing interrupt and ring information
4365ec8b7d1SJesse Brandeburg  * @rc: structure containing ring performance data
4375ec8b7d1SJesse Brandeburg  *
4385ec8b7d1SJesse Brandeburg  * Stores a new ITR value based on packets and byte
4395ec8b7d1SJesse Brandeburg  * counts during the last interrupt.  The advantage of per interrupt
4405ec8b7d1SJesse Brandeburg  * computation is faster updates and more accurate ITR for the current
4415ec8b7d1SJesse Brandeburg  * traffic pattern.  Constants in this function were computed
4425ec8b7d1SJesse Brandeburg  * based on theoretical maximum wire speed and thresholds were set based
4435ec8b7d1SJesse Brandeburg  * on testing data as well as attempting to minimize response time
4445ec8b7d1SJesse Brandeburg  * while increasing bulk throughput.
4455ec8b7d1SJesse Brandeburg  **/
44656184e01SJesse Brandeburg static void iavf_update_itr(struct iavf_q_vector *q_vector,
44756184e01SJesse Brandeburg 			    struct iavf_ring_container *rc)
4485ec8b7d1SJesse Brandeburg {
4495ec8b7d1SJesse Brandeburg 	unsigned int avg_wire_size, packets, bytes, itr;
4505ec8b7d1SJesse Brandeburg 	unsigned long next_update = jiffies;
4515ec8b7d1SJesse Brandeburg 
4525ec8b7d1SJesse Brandeburg 	/* If we don't have any rings just leave ourselves set for maximum
4535ec8b7d1SJesse Brandeburg 	 * possible latency so we take ourselves out of the equation.
4545ec8b7d1SJesse Brandeburg 	 */
4555ec8b7d1SJesse Brandeburg 	if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
4565ec8b7d1SJesse Brandeburg 		return;
4575ec8b7d1SJesse Brandeburg 
4585ec8b7d1SJesse Brandeburg 	/* For Rx we want to push the delay up and default to low latency.
4595ec8b7d1SJesse Brandeburg 	 * for Tx we want to pull the delay down and default to high latency.
4605ec8b7d1SJesse Brandeburg 	 */
46156184e01SJesse Brandeburg 	itr = iavf_container_is_rx(q_vector, rc) ?
46256184e01SJesse Brandeburg 	      IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :
46356184e01SJesse Brandeburg 	      IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;
4645ec8b7d1SJesse Brandeburg 
4655ec8b7d1SJesse Brandeburg 	/* If we didn't update within up to 1 - 2 jiffies we can assume
4665ec8b7d1SJesse Brandeburg 	 * that either packets are coming in so slow there hasn't been
4675ec8b7d1SJesse Brandeburg 	 * any work, or that there is so much work that NAPI is dealing
4685ec8b7d1SJesse Brandeburg 	 * with interrupt moderation and we don't need to do anything.
4695ec8b7d1SJesse Brandeburg 	 */
4705ec8b7d1SJesse Brandeburg 	if (time_after(next_update, rc->next_update))
4715ec8b7d1SJesse Brandeburg 		goto clear_counts;
4725ec8b7d1SJesse Brandeburg 
4735ec8b7d1SJesse Brandeburg 	/* If itr_countdown is set it means we programmed an ITR within
4745ec8b7d1SJesse Brandeburg 	 * the last 4 interrupt cycles. This has a side effect of us
4755ec8b7d1SJesse Brandeburg 	 * potentially firing an early interrupt. In order to work around
4765ec8b7d1SJesse Brandeburg 	 * this we need to throw out any data received for a few
4775ec8b7d1SJesse Brandeburg 	 * interrupts following the update.
4785ec8b7d1SJesse Brandeburg 	 */
4795ec8b7d1SJesse Brandeburg 	if (q_vector->itr_countdown) {
4805ec8b7d1SJesse Brandeburg 		itr = rc->target_itr;
4815ec8b7d1SJesse Brandeburg 		goto clear_counts;
4825ec8b7d1SJesse Brandeburg 	}
4835ec8b7d1SJesse Brandeburg 
4845ec8b7d1SJesse Brandeburg 	packets = rc->total_packets;
4855ec8b7d1SJesse Brandeburg 	bytes = rc->total_bytes;
4865ec8b7d1SJesse Brandeburg 
48756184e01SJesse Brandeburg 	if (iavf_container_is_rx(q_vector, rc)) {
4885ec8b7d1SJesse Brandeburg 		/* If Rx there are 1 to 4 packets and bytes are less than
4895ec8b7d1SJesse Brandeburg 		 * 9000 assume insufficient data to use bulk rate limiting
4905ec8b7d1SJesse Brandeburg 		 * approach unless Tx is already in bulk rate limiting. We
4915ec8b7d1SJesse Brandeburg 		 * are likely latency driven.
4925ec8b7d1SJesse Brandeburg 		 */
4935ec8b7d1SJesse Brandeburg 		if (packets && packets < 4 && bytes < 9000 &&
49456184e01SJesse Brandeburg 		    (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {
49556184e01SJesse Brandeburg 			itr = IAVF_ITR_ADAPTIVE_LATENCY;
4965ec8b7d1SJesse Brandeburg 			goto adjust_by_size;
4975ec8b7d1SJesse Brandeburg 		}
4985ec8b7d1SJesse Brandeburg 	} else if (packets < 4) {
4995ec8b7d1SJesse Brandeburg 		/* If we have Tx and Rx ITR maxed and Tx ITR is running in
5005ec8b7d1SJesse Brandeburg 		 * bulk mode and we are receiving 4 or fewer packets just
5015ec8b7d1SJesse Brandeburg 		 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
5025ec8b7d1SJesse Brandeburg 		 * that the Rx can relax.
5035ec8b7d1SJesse Brandeburg 		 */
50456184e01SJesse Brandeburg 		if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&
50556184e01SJesse Brandeburg 		    (q_vector->rx.target_itr & IAVF_ITR_MASK) ==
50656184e01SJesse Brandeburg 		     IAVF_ITR_ADAPTIVE_MAX_USECS)
5075ec8b7d1SJesse Brandeburg 			goto clear_counts;
5085ec8b7d1SJesse Brandeburg 	} else if (packets > 32) {
5095ec8b7d1SJesse Brandeburg 		/* If we have processed over 32 packets in a single interrupt
5105ec8b7d1SJesse Brandeburg 		 * for Tx assume we need to switch over to "bulk" mode.
5115ec8b7d1SJesse Brandeburg 		 */
51256184e01SJesse Brandeburg 		rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;
5135ec8b7d1SJesse Brandeburg 	}
5145ec8b7d1SJesse Brandeburg 
5155ec8b7d1SJesse Brandeburg 	/* We have no packets to actually measure against. This means
5165ec8b7d1SJesse Brandeburg 	 * either one of the other queues on this vector is active or
5175ec8b7d1SJesse Brandeburg 	 * we are a Tx queue doing TSO with too high of an interrupt rate.
5185ec8b7d1SJesse Brandeburg 	 *
5195ec8b7d1SJesse Brandeburg 	 * Between 4 and 56 we can assume that our current interrupt delay
5205ec8b7d1SJesse Brandeburg 	 * is only slightly too low. As such we should increase it by a small
5215ec8b7d1SJesse Brandeburg 	 * fixed amount.
5225ec8b7d1SJesse Brandeburg 	 */
5235ec8b7d1SJesse Brandeburg 	if (packets < 56) {
52456184e01SJesse Brandeburg 		itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;
52556184e01SJesse Brandeburg 		if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
52656184e01SJesse Brandeburg 			itr &= IAVF_ITR_ADAPTIVE_LATENCY;
52756184e01SJesse Brandeburg 			itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
5285ec8b7d1SJesse Brandeburg 		}
5295ec8b7d1SJesse Brandeburg 		goto clear_counts;
5305ec8b7d1SJesse Brandeburg 	}
5315ec8b7d1SJesse Brandeburg 
5325ec8b7d1SJesse Brandeburg 	if (packets <= 256) {
5335ec8b7d1SJesse Brandeburg 		itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
53456184e01SJesse Brandeburg 		itr &= IAVF_ITR_MASK;
5355ec8b7d1SJesse Brandeburg 
5365ec8b7d1SJesse Brandeburg 		/* Between 56 and 112 is our "goldilocks" zone where we are
5375ec8b7d1SJesse Brandeburg 		 * working out "just right". Just report that our current
5385ec8b7d1SJesse Brandeburg 		 * ITR is good for us.
5395ec8b7d1SJesse Brandeburg 		 */
5405ec8b7d1SJesse Brandeburg 		if (packets <= 112)
5415ec8b7d1SJesse Brandeburg 			goto clear_counts;
5425ec8b7d1SJesse Brandeburg 
5435ec8b7d1SJesse Brandeburg 		/* If packet count is 128 or greater we are likely looking
5445ec8b7d1SJesse Brandeburg 		 * at a slight overrun of the delay we want. Try halving
5455ec8b7d1SJesse Brandeburg 		 * our delay to see if that will cut the number of packets
5465ec8b7d1SJesse Brandeburg 		 * in half per interrupt.
5475ec8b7d1SJesse Brandeburg 		 */
5485ec8b7d1SJesse Brandeburg 		itr /= 2;
54956184e01SJesse Brandeburg 		itr &= IAVF_ITR_MASK;
55056184e01SJesse Brandeburg 		if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)
55156184e01SJesse Brandeburg 			itr = IAVF_ITR_ADAPTIVE_MIN_USECS;
5525ec8b7d1SJesse Brandeburg 
5535ec8b7d1SJesse Brandeburg 		goto clear_counts;
5545ec8b7d1SJesse Brandeburg 	}
5555ec8b7d1SJesse Brandeburg 
5565ec8b7d1SJesse Brandeburg 	/* The paths below assume we are dealing with a bulk ITR since
5575ec8b7d1SJesse Brandeburg 	 * number of packets is greater than 256. We are just going to have
5585ec8b7d1SJesse Brandeburg 	 * to compute a value and try to bring the count under control,
5595ec8b7d1SJesse Brandeburg 	 * though for smaller packet sizes there isn't much we can do as
5605ec8b7d1SJesse Brandeburg 	 * NAPI polling will likely be kicking in sooner rather than later.
5615ec8b7d1SJesse Brandeburg 	 */
56256184e01SJesse Brandeburg 	itr = IAVF_ITR_ADAPTIVE_BULK;
5635ec8b7d1SJesse Brandeburg 
5645ec8b7d1SJesse Brandeburg adjust_by_size:
5655ec8b7d1SJesse Brandeburg 	/* If packet counts are 256 or greater we can assume we have a gross
5665ec8b7d1SJesse Brandeburg 	 * overestimation of what the rate should be. Instead of trying to fine
5675ec8b7d1SJesse Brandeburg 	 * tune it just use the formula below to try and dial in an exact value
5685ec8b7d1SJesse Brandeburg 	 * give the current packet size of the frame.
5695ec8b7d1SJesse Brandeburg 	 */
5705ec8b7d1SJesse Brandeburg 	avg_wire_size = bytes / packets;
5715ec8b7d1SJesse Brandeburg 
5725ec8b7d1SJesse Brandeburg 	/* The following is a crude approximation of:
5735ec8b7d1SJesse Brandeburg 	 *  wmem_default / (size + overhead) = desired_pkts_per_int
5745ec8b7d1SJesse Brandeburg 	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
5755ec8b7d1SJesse Brandeburg 	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
5765ec8b7d1SJesse Brandeburg 	 *
5775ec8b7d1SJesse Brandeburg 	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
5785ec8b7d1SJesse Brandeburg 	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
5795ec8b7d1SJesse Brandeburg 	 * formula down to
5805ec8b7d1SJesse Brandeburg 	 *
5815ec8b7d1SJesse Brandeburg 	 *  (170 * (size + 24)) / (size + 640) = ITR
5825ec8b7d1SJesse Brandeburg 	 *
5835ec8b7d1SJesse Brandeburg 	 * We first do some math on the packet size and then finally bitshift
5845ec8b7d1SJesse Brandeburg 	 * by 8 after rounding up. We also have to account for PCIe link speed
5855ec8b7d1SJesse Brandeburg 	 * difference as ITR scales based on this.
5865ec8b7d1SJesse Brandeburg 	 */
5875ec8b7d1SJesse Brandeburg 	if (avg_wire_size <= 60) {
5885ec8b7d1SJesse Brandeburg 		/* Start at 250k ints/sec */
5895ec8b7d1SJesse Brandeburg 		avg_wire_size = 4096;
5905ec8b7d1SJesse Brandeburg 	} else if (avg_wire_size <= 380) {
5915ec8b7d1SJesse Brandeburg 		/* 250K ints/sec to 60K ints/sec */
5925ec8b7d1SJesse Brandeburg 		avg_wire_size *= 40;
5935ec8b7d1SJesse Brandeburg 		avg_wire_size += 1696;
5945ec8b7d1SJesse Brandeburg 	} else if (avg_wire_size <= 1084) {
5955ec8b7d1SJesse Brandeburg 		/* 60K ints/sec to 36K ints/sec */
5965ec8b7d1SJesse Brandeburg 		avg_wire_size *= 15;
5975ec8b7d1SJesse Brandeburg 		avg_wire_size += 11452;
5985ec8b7d1SJesse Brandeburg 	} else if (avg_wire_size <= 1980) {
5995ec8b7d1SJesse Brandeburg 		/* 36K ints/sec to 30K ints/sec */
6005ec8b7d1SJesse Brandeburg 		avg_wire_size *= 5;
6015ec8b7d1SJesse Brandeburg 		avg_wire_size += 22420;
6025ec8b7d1SJesse Brandeburg 	} else {
6035ec8b7d1SJesse Brandeburg 		/* plateau at a limit of 30K ints/sec */
6045ec8b7d1SJesse Brandeburg 		avg_wire_size = 32256;
6055ec8b7d1SJesse Brandeburg 	}
6065ec8b7d1SJesse Brandeburg 
6075ec8b7d1SJesse Brandeburg 	/* If we are in low latency mode halve our delay which doubles the
6085ec8b7d1SJesse Brandeburg 	 * rate to somewhere between 100K to 16K ints/sec
6095ec8b7d1SJesse Brandeburg 	 */
61056184e01SJesse Brandeburg 	if (itr & IAVF_ITR_ADAPTIVE_LATENCY)
6115ec8b7d1SJesse Brandeburg 		avg_wire_size /= 2;
6125ec8b7d1SJesse Brandeburg 
6135ec8b7d1SJesse Brandeburg 	/* Resultant value is 256 times larger than it needs to be. This
6145ec8b7d1SJesse Brandeburg 	 * gives us room to adjust the value as needed to either increase
6155ec8b7d1SJesse Brandeburg 	 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
6165ec8b7d1SJesse Brandeburg 	 *
6175ec8b7d1SJesse Brandeburg 	 * Use addition as we have already recorded the new latency flag
6185ec8b7d1SJesse Brandeburg 	 * for the ITR value.
6195ec8b7d1SJesse Brandeburg 	 */
620*d73dd127SMateusz Palczewski 	itr += DIV_ROUND_UP(avg_wire_size,
621*d73dd127SMateusz Palczewski 			    iavf_itr_divisor(q_vector->adapter)) *
62256184e01SJesse Brandeburg 		IAVF_ITR_ADAPTIVE_MIN_INC;
6235ec8b7d1SJesse Brandeburg 
62456184e01SJesse Brandeburg 	if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
62556184e01SJesse Brandeburg 		itr &= IAVF_ITR_ADAPTIVE_LATENCY;
62656184e01SJesse Brandeburg 		itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
6275ec8b7d1SJesse Brandeburg 	}
6285ec8b7d1SJesse Brandeburg 
6295ec8b7d1SJesse Brandeburg clear_counts:
6305ec8b7d1SJesse Brandeburg 	/* write back value */
6315ec8b7d1SJesse Brandeburg 	rc->target_itr = itr;
6325ec8b7d1SJesse Brandeburg 
6335ec8b7d1SJesse Brandeburg 	/* next update should occur within next jiffy */
6345ec8b7d1SJesse Brandeburg 	rc->next_update = next_update + 1;
6355ec8b7d1SJesse Brandeburg 
6365ec8b7d1SJesse Brandeburg 	rc->total_bytes = 0;
6375ec8b7d1SJesse Brandeburg 	rc->total_packets = 0;
6385ec8b7d1SJesse Brandeburg }
6395ec8b7d1SJesse Brandeburg 
6405ec8b7d1SJesse Brandeburg /**
6415ec8b7d1SJesse Brandeburg  * iavf_setup_tx_descriptors - Allocate the Tx descriptors
6425ec8b7d1SJesse Brandeburg  * @tx_ring: the tx ring to set up
6435ec8b7d1SJesse Brandeburg  *
6445ec8b7d1SJesse Brandeburg  * Return 0 on success, negative on error
6455ec8b7d1SJesse Brandeburg  **/
64656184e01SJesse Brandeburg int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
6475ec8b7d1SJesse Brandeburg {
6485ec8b7d1SJesse Brandeburg 	struct device *dev = tx_ring->dev;
6495ec8b7d1SJesse Brandeburg 	int bi_size;
6505ec8b7d1SJesse Brandeburg 
6515ec8b7d1SJesse Brandeburg 	if (!dev)
6525ec8b7d1SJesse Brandeburg 		return -ENOMEM;
6535ec8b7d1SJesse Brandeburg 
6545ec8b7d1SJesse Brandeburg 	/* warn if we are about to overwrite the pointer */
6555ec8b7d1SJesse Brandeburg 	WARN_ON(tx_ring->tx_bi);
65656184e01SJesse Brandeburg 	bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
6575ec8b7d1SJesse Brandeburg 	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
6585ec8b7d1SJesse Brandeburg 	if (!tx_ring->tx_bi)
6595ec8b7d1SJesse Brandeburg 		goto err;
6605ec8b7d1SJesse Brandeburg 
6615ec8b7d1SJesse Brandeburg 	/* round up to nearest 4K */
66256184e01SJesse Brandeburg 	tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
6635ec8b7d1SJesse Brandeburg 	tx_ring->size = ALIGN(tx_ring->size, 4096);
6645ec8b7d1SJesse Brandeburg 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6655ec8b7d1SJesse Brandeburg 					   &tx_ring->dma, GFP_KERNEL);
6665ec8b7d1SJesse Brandeburg 	if (!tx_ring->desc) {
6675ec8b7d1SJesse Brandeburg 		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
6685ec8b7d1SJesse Brandeburg 			 tx_ring->size);
6695ec8b7d1SJesse Brandeburg 		goto err;
6705ec8b7d1SJesse Brandeburg 	}
6715ec8b7d1SJesse Brandeburg 
6725ec8b7d1SJesse Brandeburg 	tx_ring->next_to_use = 0;
6735ec8b7d1SJesse Brandeburg 	tx_ring->next_to_clean = 0;
6745ec8b7d1SJesse Brandeburg 	tx_ring->tx_stats.prev_pkt_ctr = -1;
6755ec8b7d1SJesse Brandeburg 	return 0;
6765ec8b7d1SJesse Brandeburg 
6775ec8b7d1SJesse Brandeburg err:
6785ec8b7d1SJesse Brandeburg 	kfree(tx_ring->tx_bi);
6795ec8b7d1SJesse Brandeburg 	tx_ring->tx_bi = NULL;
6805ec8b7d1SJesse Brandeburg 	return -ENOMEM;
6815ec8b7d1SJesse Brandeburg }
6825ec8b7d1SJesse Brandeburg 
6835ec8b7d1SJesse Brandeburg /**
6845ec8b7d1SJesse Brandeburg  * iavf_clean_rx_ring - Free Rx buffers
6855ec8b7d1SJesse Brandeburg  * @rx_ring: ring to be cleaned
6865ec8b7d1SJesse Brandeburg  **/
68756184e01SJesse Brandeburg void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
6885ec8b7d1SJesse Brandeburg {
6895ec8b7d1SJesse Brandeburg 	unsigned long bi_size;
6905ec8b7d1SJesse Brandeburg 	u16 i;
6915ec8b7d1SJesse Brandeburg 
6925ec8b7d1SJesse Brandeburg 	/* ring already cleared, nothing to do */
6935ec8b7d1SJesse Brandeburg 	if (!rx_ring->rx_bi)
6945ec8b7d1SJesse Brandeburg 		return;
6955ec8b7d1SJesse Brandeburg 
6965ec8b7d1SJesse Brandeburg 	if (rx_ring->skb) {
6975ec8b7d1SJesse Brandeburg 		dev_kfree_skb(rx_ring->skb);
6985ec8b7d1SJesse Brandeburg 		rx_ring->skb = NULL;
6995ec8b7d1SJesse Brandeburg 	}
7005ec8b7d1SJesse Brandeburg 
7015ec8b7d1SJesse Brandeburg 	/* Free all the Rx ring sk_buffs */
7025ec8b7d1SJesse Brandeburg 	for (i = 0; i < rx_ring->count; i++) {
70356184e01SJesse Brandeburg 		struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
7045ec8b7d1SJesse Brandeburg 
7055ec8b7d1SJesse Brandeburg 		if (!rx_bi->page)
7065ec8b7d1SJesse Brandeburg 			continue;
7075ec8b7d1SJesse Brandeburg 
7085ec8b7d1SJesse Brandeburg 		/* Invalidate cache lines that may have been written to by
7095ec8b7d1SJesse Brandeburg 		 * device so that we avoid corrupting memory.
7105ec8b7d1SJesse Brandeburg 		 */
7115ec8b7d1SJesse Brandeburg 		dma_sync_single_range_for_cpu(rx_ring->dev,
7125ec8b7d1SJesse Brandeburg 					      rx_bi->dma,
7135ec8b7d1SJesse Brandeburg 					      rx_bi->page_offset,
7145ec8b7d1SJesse Brandeburg 					      rx_ring->rx_buf_len,
7155ec8b7d1SJesse Brandeburg 					      DMA_FROM_DEVICE);
7165ec8b7d1SJesse Brandeburg 
7175ec8b7d1SJesse Brandeburg 		/* free resources associated with mapping */
7185ec8b7d1SJesse Brandeburg 		dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
71956184e01SJesse Brandeburg 				     iavf_rx_pg_size(rx_ring),
7205ec8b7d1SJesse Brandeburg 				     DMA_FROM_DEVICE,
72156184e01SJesse Brandeburg 				     IAVF_RX_DMA_ATTR);
7225ec8b7d1SJesse Brandeburg 
7235ec8b7d1SJesse Brandeburg 		__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
7245ec8b7d1SJesse Brandeburg 
7255ec8b7d1SJesse Brandeburg 		rx_bi->page = NULL;
7265ec8b7d1SJesse Brandeburg 		rx_bi->page_offset = 0;
7275ec8b7d1SJesse Brandeburg 	}
7285ec8b7d1SJesse Brandeburg 
72956184e01SJesse Brandeburg 	bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
7305ec8b7d1SJesse Brandeburg 	memset(rx_ring->rx_bi, 0, bi_size);
7315ec8b7d1SJesse Brandeburg 
7325ec8b7d1SJesse Brandeburg 	/* Zero out the descriptor ring */
7335ec8b7d1SJesse Brandeburg 	memset(rx_ring->desc, 0, rx_ring->size);
7345ec8b7d1SJesse Brandeburg 
7355ec8b7d1SJesse Brandeburg 	rx_ring->next_to_alloc = 0;
7365ec8b7d1SJesse Brandeburg 	rx_ring->next_to_clean = 0;
7375ec8b7d1SJesse Brandeburg 	rx_ring->next_to_use = 0;
7385ec8b7d1SJesse Brandeburg }
7395ec8b7d1SJesse Brandeburg 
7405ec8b7d1SJesse Brandeburg /**
7415ec8b7d1SJesse Brandeburg  * iavf_free_rx_resources - Free Rx resources
7425ec8b7d1SJesse Brandeburg  * @rx_ring: ring to clean the resources from
7435ec8b7d1SJesse Brandeburg  *
7445ec8b7d1SJesse Brandeburg  * Free all receive software resources
7455ec8b7d1SJesse Brandeburg  **/
74656184e01SJesse Brandeburg void iavf_free_rx_resources(struct iavf_ring *rx_ring)
7475ec8b7d1SJesse Brandeburg {
7485ec8b7d1SJesse Brandeburg 	iavf_clean_rx_ring(rx_ring);
7495ec8b7d1SJesse Brandeburg 	kfree(rx_ring->rx_bi);
7505ec8b7d1SJesse Brandeburg 	rx_ring->rx_bi = NULL;
7515ec8b7d1SJesse Brandeburg 
7525ec8b7d1SJesse Brandeburg 	if (rx_ring->desc) {
7535ec8b7d1SJesse Brandeburg 		dma_free_coherent(rx_ring->dev, rx_ring->size,
7545ec8b7d1SJesse Brandeburg 				  rx_ring->desc, rx_ring->dma);
7555ec8b7d1SJesse Brandeburg 		rx_ring->desc = NULL;
7565ec8b7d1SJesse Brandeburg 	}
7575ec8b7d1SJesse Brandeburg }
7585ec8b7d1SJesse Brandeburg 
7595ec8b7d1SJesse Brandeburg /**
7605ec8b7d1SJesse Brandeburg  * iavf_setup_rx_descriptors - Allocate Rx descriptors
7615ec8b7d1SJesse Brandeburg  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
7625ec8b7d1SJesse Brandeburg  *
7635ec8b7d1SJesse Brandeburg  * Returns 0 on success, negative on failure
7645ec8b7d1SJesse Brandeburg  **/
76556184e01SJesse Brandeburg int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
7665ec8b7d1SJesse Brandeburg {
7675ec8b7d1SJesse Brandeburg 	struct device *dev = rx_ring->dev;
7685ec8b7d1SJesse Brandeburg 	int bi_size;
7695ec8b7d1SJesse Brandeburg 
7705ec8b7d1SJesse Brandeburg 	/* warn if we are about to overwrite the pointer */
7715ec8b7d1SJesse Brandeburg 	WARN_ON(rx_ring->rx_bi);
77256184e01SJesse Brandeburg 	bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
7735ec8b7d1SJesse Brandeburg 	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
7745ec8b7d1SJesse Brandeburg 	if (!rx_ring->rx_bi)
7755ec8b7d1SJesse Brandeburg 		goto err;
7765ec8b7d1SJesse Brandeburg 
7775ec8b7d1SJesse Brandeburg 	u64_stats_init(&rx_ring->syncp);
7785ec8b7d1SJesse Brandeburg 
7795ec8b7d1SJesse Brandeburg 	/* Round up to nearest 4K */
78056184e01SJesse Brandeburg 	rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
7815ec8b7d1SJesse Brandeburg 	rx_ring->size = ALIGN(rx_ring->size, 4096);
7825ec8b7d1SJesse Brandeburg 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
7835ec8b7d1SJesse Brandeburg 					   &rx_ring->dma, GFP_KERNEL);
7845ec8b7d1SJesse Brandeburg 
7855ec8b7d1SJesse Brandeburg 	if (!rx_ring->desc) {
7865ec8b7d1SJesse Brandeburg 		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
7875ec8b7d1SJesse Brandeburg 			 rx_ring->size);
7885ec8b7d1SJesse Brandeburg 		goto err;
7895ec8b7d1SJesse Brandeburg 	}
7905ec8b7d1SJesse Brandeburg 
7915ec8b7d1SJesse Brandeburg 	rx_ring->next_to_alloc = 0;
7925ec8b7d1SJesse Brandeburg 	rx_ring->next_to_clean = 0;
7935ec8b7d1SJesse Brandeburg 	rx_ring->next_to_use = 0;
7945ec8b7d1SJesse Brandeburg 
7955ec8b7d1SJesse Brandeburg 	return 0;
7965ec8b7d1SJesse Brandeburg err:
7975ec8b7d1SJesse Brandeburg 	kfree(rx_ring->rx_bi);
7985ec8b7d1SJesse Brandeburg 	rx_ring->rx_bi = NULL;
7995ec8b7d1SJesse Brandeburg 	return -ENOMEM;
8005ec8b7d1SJesse Brandeburg }
8015ec8b7d1SJesse Brandeburg 
8025ec8b7d1SJesse Brandeburg /**
80356184e01SJesse Brandeburg  * iavf_release_rx_desc - Store the new tail and head values
8045ec8b7d1SJesse Brandeburg  * @rx_ring: ring to bump
8055ec8b7d1SJesse Brandeburg  * @val: new head index
8065ec8b7d1SJesse Brandeburg  **/
80756184e01SJesse Brandeburg static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
8085ec8b7d1SJesse Brandeburg {
8095ec8b7d1SJesse Brandeburg 	rx_ring->next_to_use = val;
8105ec8b7d1SJesse Brandeburg 
8115ec8b7d1SJesse Brandeburg 	/* update next to alloc since we have filled the ring */
8125ec8b7d1SJesse Brandeburg 	rx_ring->next_to_alloc = val;
8135ec8b7d1SJesse Brandeburg 
8145ec8b7d1SJesse Brandeburg 	/* Force memory writes to complete before letting h/w
8155ec8b7d1SJesse Brandeburg 	 * know there are new descriptors to fetch.  (Only
8165ec8b7d1SJesse Brandeburg 	 * applicable for weak-ordered memory model archs,
8175ec8b7d1SJesse Brandeburg 	 * such as IA-64).
8185ec8b7d1SJesse Brandeburg 	 */
8195ec8b7d1SJesse Brandeburg 	wmb();
8205ec8b7d1SJesse Brandeburg 	writel(val, rx_ring->tail);
8215ec8b7d1SJesse Brandeburg }
8225ec8b7d1SJesse Brandeburg 
8235ec8b7d1SJesse Brandeburg /**
82456184e01SJesse Brandeburg  * iavf_rx_offset - Return expected offset into page to access data
8255ec8b7d1SJesse Brandeburg  * @rx_ring: Ring we are requesting offset of
8265ec8b7d1SJesse Brandeburg  *
8275ec8b7d1SJesse Brandeburg  * Returns the offset value for ring into the data buffer.
8285ec8b7d1SJesse Brandeburg  */
82956184e01SJesse Brandeburg static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
8305ec8b7d1SJesse Brandeburg {
83156184e01SJesse Brandeburg 	return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
8325ec8b7d1SJesse Brandeburg }
8335ec8b7d1SJesse Brandeburg 
8345ec8b7d1SJesse Brandeburg /**
83556184e01SJesse Brandeburg  * iavf_alloc_mapped_page - recycle or make a new page
8365ec8b7d1SJesse Brandeburg  * @rx_ring: ring to use
8375ec8b7d1SJesse Brandeburg  * @bi: rx_buffer struct to modify
8385ec8b7d1SJesse Brandeburg  *
8395ec8b7d1SJesse Brandeburg  * Returns true if the page was successfully allocated or
8405ec8b7d1SJesse Brandeburg  * reused.
8415ec8b7d1SJesse Brandeburg  **/
84256184e01SJesse Brandeburg static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
84356184e01SJesse Brandeburg 				   struct iavf_rx_buffer *bi)
8445ec8b7d1SJesse Brandeburg {
8455ec8b7d1SJesse Brandeburg 	struct page *page = bi->page;
8465ec8b7d1SJesse Brandeburg 	dma_addr_t dma;
8475ec8b7d1SJesse Brandeburg 
8485ec8b7d1SJesse Brandeburg 	/* since we are recycling buffers we should seldom need to alloc */
8495ec8b7d1SJesse Brandeburg 	if (likely(page)) {
8505ec8b7d1SJesse Brandeburg 		rx_ring->rx_stats.page_reuse_count++;
8515ec8b7d1SJesse Brandeburg 		return true;
8525ec8b7d1SJesse Brandeburg 	}
8535ec8b7d1SJesse Brandeburg 
8545ec8b7d1SJesse Brandeburg 	/* alloc new page for storage */
85556184e01SJesse Brandeburg 	page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
8565ec8b7d1SJesse Brandeburg 	if (unlikely(!page)) {
8575ec8b7d1SJesse Brandeburg 		rx_ring->rx_stats.alloc_page_failed++;
8585ec8b7d1SJesse Brandeburg 		return false;
8595ec8b7d1SJesse Brandeburg 	}
8605ec8b7d1SJesse Brandeburg 
8615ec8b7d1SJesse Brandeburg 	/* map page for use */
8625ec8b7d1SJesse Brandeburg 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
86356184e01SJesse Brandeburg 				 iavf_rx_pg_size(rx_ring),
8645ec8b7d1SJesse Brandeburg 				 DMA_FROM_DEVICE,
86556184e01SJesse Brandeburg 				 IAVF_RX_DMA_ATTR);
8665ec8b7d1SJesse Brandeburg 
8675ec8b7d1SJesse Brandeburg 	/* if mapping failed free memory back to system since
8685ec8b7d1SJesse Brandeburg 	 * there isn't much point in holding memory we can't use
8695ec8b7d1SJesse Brandeburg 	 */
8705ec8b7d1SJesse Brandeburg 	if (dma_mapping_error(rx_ring->dev, dma)) {
87156184e01SJesse Brandeburg 		__free_pages(page, iavf_rx_pg_order(rx_ring));
8725ec8b7d1SJesse Brandeburg 		rx_ring->rx_stats.alloc_page_failed++;
8735ec8b7d1SJesse Brandeburg 		return false;
8745ec8b7d1SJesse Brandeburg 	}
8755ec8b7d1SJesse Brandeburg 
8765ec8b7d1SJesse Brandeburg 	bi->dma = dma;
8775ec8b7d1SJesse Brandeburg 	bi->page = page;
87856184e01SJesse Brandeburg 	bi->page_offset = iavf_rx_offset(rx_ring);
8795ec8b7d1SJesse Brandeburg 
8805ec8b7d1SJesse Brandeburg 	/* initialize pagecnt_bias to 1 representing we fully own page */
8815ec8b7d1SJesse Brandeburg 	bi->pagecnt_bias = 1;
8825ec8b7d1SJesse Brandeburg 
8835ec8b7d1SJesse Brandeburg 	return true;
8845ec8b7d1SJesse Brandeburg }
8855ec8b7d1SJesse Brandeburg 
8865ec8b7d1SJesse Brandeburg /**
88756184e01SJesse Brandeburg  * iavf_receive_skb - Send a completed packet up the stack
8885ec8b7d1SJesse Brandeburg  * @rx_ring:  rx ring in play
8895ec8b7d1SJesse Brandeburg  * @skb: packet to send up
8905ec8b7d1SJesse Brandeburg  * @vlan_tag: vlan tag for packet
8915ec8b7d1SJesse Brandeburg  **/
89256184e01SJesse Brandeburg static void iavf_receive_skb(struct iavf_ring *rx_ring,
8935ec8b7d1SJesse Brandeburg 			     struct sk_buff *skb, u16 vlan_tag)
8945ec8b7d1SJesse Brandeburg {
89556184e01SJesse Brandeburg 	struct iavf_q_vector *q_vector = rx_ring->q_vector;
8965ec8b7d1SJesse Brandeburg 
8975ec8b7d1SJesse Brandeburg 	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
8985ec8b7d1SJesse Brandeburg 	    (vlan_tag & VLAN_VID_MASK))
8995ec8b7d1SJesse Brandeburg 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
900ccd219d2SBrett Creeley 	else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) &&
901ccd219d2SBrett Creeley 		 vlan_tag & VLAN_VID_MASK)
902ccd219d2SBrett Creeley 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
9035ec8b7d1SJesse Brandeburg 
9045ec8b7d1SJesse Brandeburg 	napi_gro_receive(&q_vector->napi, skb);
9055ec8b7d1SJesse Brandeburg }
9065ec8b7d1SJesse Brandeburg 
9075ec8b7d1SJesse Brandeburg /**
9085ec8b7d1SJesse Brandeburg  * iavf_alloc_rx_buffers - Replace used receive buffers
9095ec8b7d1SJesse Brandeburg  * @rx_ring: ring to place buffers on
9105ec8b7d1SJesse Brandeburg  * @cleaned_count: number of buffers to replace
9115ec8b7d1SJesse Brandeburg  *
9125ec8b7d1SJesse Brandeburg  * Returns false if all allocations were successful, true if any fail
9135ec8b7d1SJesse Brandeburg  **/
91456184e01SJesse Brandeburg bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
9155ec8b7d1SJesse Brandeburg {
9165ec8b7d1SJesse Brandeburg 	u16 ntu = rx_ring->next_to_use;
91756184e01SJesse Brandeburg 	union iavf_rx_desc *rx_desc;
91856184e01SJesse Brandeburg 	struct iavf_rx_buffer *bi;
9195ec8b7d1SJesse Brandeburg 
9205ec8b7d1SJesse Brandeburg 	/* do nothing if no valid netdev defined */
9215ec8b7d1SJesse Brandeburg 	if (!rx_ring->netdev || !cleaned_count)
9225ec8b7d1SJesse Brandeburg 		return false;
9235ec8b7d1SJesse Brandeburg 
924f1cad2ceSJesse Brandeburg 	rx_desc = IAVF_RX_DESC(rx_ring, ntu);
9255ec8b7d1SJesse Brandeburg 	bi = &rx_ring->rx_bi[ntu];
9265ec8b7d1SJesse Brandeburg 
9275ec8b7d1SJesse Brandeburg 	do {
92856184e01SJesse Brandeburg 		if (!iavf_alloc_mapped_page(rx_ring, bi))
9295ec8b7d1SJesse Brandeburg 			goto no_buffers;
9305ec8b7d1SJesse Brandeburg 
9315ec8b7d1SJesse Brandeburg 		/* sync the buffer for use by the device */
9325ec8b7d1SJesse Brandeburg 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
9335ec8b7d1SJesse Brandeburg 						 bi->page_offset,
9345ec8b7d1SJesse Brandeburg 						 rx_ring->rx_buf_len,
9355ec8b7d1SJesse Brandeburg 						 DMA_FROM_DEVICE);
9365ec8b7d1SJesse Brandeburg 
9375ec8b7d1SJesse Brandeburg 		/* Refresh the desc even if buffer_addrs didn't change
9385ec8b7d1SJesse Brandeburg 		 * because each write-back erases this info.
9395ec8b7d1SJesse Brandeburg 		 */
9405ec8b7d1SJesse Brandeburg 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
9415ec8b7d1SJesse Brandeburg 
9425ec8b7d1SJesse Brandeburg 		rx_desc++;
9435ec8b7d1SJesse Brandeburg 		bi++;
9445ec8b7d1SJesse Brandeburg 		ntu++;
9455ec8b7d1SJesse Brandeburg 		if (unlikely(ntu == rx_ring->count)) {
946f1cad2ceSJesse Brandeburg 			rx_desc = IAVF_RX_DESC(rx_ring, 0);
9475ec8b7d1SJesse Brandeburg 			bi = rx_ring->rx_bi;
9485ec8b7d1SJesse Brandeburg 			ntu = 0;
9495ec8b7d1SJesse Brandeburg 		}
9505ec8b7d1SJesse Brandeburg 
9515ec8b7d1SJesse Brandeburg 		/* clear the status bits for the next_to_use descriptor */
9525ec8b7d1SJesse Brandeburg 		rx_desc->wb.qword1.status_error_len = 0;
9535ec8b7d1SJesse Brandeburg 
9545ec8b7d1SJesse Brandeburg 		cleaned_count--;
9555ec8b7d1SJesse Brandeburg 	} while (cleaned_count);
9565ec8b7d1SJesse Brandeburg 
9575ec8b7d1SJesse Brandeburg 	if (rx_ring->next_to_use != ntu)
95856184e01SJesse Brandeburg 		iavf_release_rx_desc(rx_ring, ntu);
9595ec8b7d1SJesse Brandeburg 
9605ec8b7d1SJesse Brandeburg 	return false;
9615ec8b7d1SJesse Brandeburg 
9625ec8b7d1SJesse Brandeburg no_buffers:
9635ec8b7d1SJesse Brandeburg 	if (rx_ring->next_to_use != ntu)
96456184e01SJesse Brandeburg 		iavf_release_rx_desc(rx_ring, ntu);
9655ec8b7d1SJesse Brandeburg 
9665ec8b7d1SJesse Brandeburg 	/* make sure to come back via polling to try again after
9675ec8b7d1SJesse Brandeburg 	 * allocation failure
9685ec8b7d1SJesse Brandeburg 	 */
9695ec8b7d1SJesse Brandeburg 	return true;
9705ec8b7d1SJesse Brandeburg }
9715ec8b7d1SJesse Brandeburg 
9725ec8b7d1SJesse Brandeburg /**
97356184e01SJesse Brandeburg  * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
9745ec8b7d1SJesse Brandeburg  * @vsi: the VSI we care about
9755ec8b7d1SJesse Brandeburg  * @skb: skb currently being received and modified
9765ec8b7d1SJesse Brandeburg  * @rx_desc: the receive descriptor
9775ec8b7d1SJesse Brandeburg  **/
97856184e01SJesse Brandeburg static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
9795ec8b7d1SJesse Brandeburg 				    struct sk_buff *skb,
98056184e01SJesse Brandeburg 				    union iavf_rx_desc *rx_desc)
9815ec8b7d1SJesse Brandeburg {
98256184e01SJesse Brandeburg 	struct iavf_rx_ptype_decoded decoded;
9835ec8b7d1SJesse Brandeburg 	u32 rx_error, rx_status;
9845ec8b7d1SJesse Brandeburg 	bool ipv4, ipv6;
9855ec8b7d1SJesse Brandeburg 	u8 ptype;
9865ec8b7d1SJesse Brandeburg 	u64 qword;
9875ec8b7d1SJesse Brandeburg 
9885ec8b7d1SJesse Brandeburg 	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
98956184e01SJesse Brandeburg 	ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
99056184e01SJesse Brandeburg 	rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
99156184e01SJesse Brandeburg 		   IAVF_RXD_QW1_ERROR_SHIFT;
99256184e01SJesse Brandeburg 	rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
99356184e01SJesse Brandeburg 		    IAVF_RXD_QW1_STATUS_SHIFT;
9945ec8b7d1SJesse Brandeburg 	decoded = decode_rx_desc_ptype(ptype);
9955ec8b7d1SJesse Brandeburg 
9965ec8b7d1SJesse Brandeburg 	skb->ip_summed = CHECKSUM_NONE;
9975ec8b7d1SJesse Brandeburg 
9985ec8b7d1SJesse Brandeburg 	skb_checksum_none_assert(skb);
9995ec8b7d1SJesse Brandeburg 
10005ec8b7d1SJesse Brandeburg 	/* Rx csum enabled and ip headers found? */
10015ec8b7d1SJesse Brandeburg 	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
10025ec8b7d1SJesse Brandeburg 		return;
10035ec8b7d1SJesse Brandeburg 
10045ec8b7d1SJesse Brandeburg 	/* did the hardware decode the packet and checksum? */
1005f1cad2ceSJesse Brandeburg 	if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
10065ec8b7d1SJesse Brandeburg 		return;
10075ec8b7d1SJesse Brandeburg 
10085ec8b7d1SJesse Brandeburg 	/* both known and outer_ip must be set for the below code to work */
10095ec8b7d1SJesse Brandeburg 	if (!(decoded.known && decoded.outer_ip))
10105ec8b7d1SJesse Brandeburg 		return;
10115ec8b7d1SJesse Brandeburg 
101256184e01SJesse Brandeburg 	ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
101356184e01SJesse Brandeburg 	       (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
101456184e01SJesse Brandeburg 	ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
101556184e01SJesse Brandeburg 	       (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
10165ec8b7d1SJesse Brandeburg 
10175ec8b7d1SJesse Brandeburg 	if (ipv4 &&
1018f1cad2ceSJesse Brandeburg 	    (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
1019f1cad2ceSJesse Brandeburg 			 BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
10205ec8b7d1SJesse Brandeburg 		goto checksum_fail;
10215ec8b7d1SJesse Brandeburg 
10225ec8b7d1SJesse Brandeburg 	/* likely incorrect csum if alternate IP extension headers found */
10235ec8b7d1SJesse Brandeburg 	if (ipv6 &&
1024f1cad2ceSJesse Brandeburg 	    rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
10255ec8b7d1SJesse Brandeburg 		/* don't increment checksum err here, non-fatal err */
10265ec8b7d1SJesse Brandeburg 		return;
10275ec8b7d1SJesse Brandeburg 
10285ec8b7d1SJesse Brandeburg 	/* there was some L4 error, count error and punt packet to the stack */
1029f1cad2ceSJesse Brandeburg 	if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
10305ec8b7d1SJesse Brandeburg 		goto checksum_fail;
10315ec8b7d1SJesse Brandeburg 
10325ec8b7d1SJesse Brandeburg 	/* handle packets that were not able to be checksummed due
10335ec8b7d1SJesse Brandeburg 	 * to arrival speed, in this case the stack can compute
10345ec8b7d1SJesse Brandeburg 	 * the csum.
10355ec8b7d1SJesse Brandeburg 	 */
1036f1cad2ceSJesse Brandeburg 	if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
10375ec8b7d1SJesse Brandeburg 		return;
10385ec8b7d1SJesse Brandeburg 
10395ec8b7d1SJesse Brandeburg 	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
10405ec8b7d1SJesse Brandeburg 	switch (decoded.inner_prot) {
104156184e01SJesse Brandeburg 	case IAVF_RX_PTYPE_INNER_PROT_TCP:
104256184e01SJesse Brandeburg 	case IAVF_RX_PTYPE_INNER_PROT_UDP:
104356184e01SJesse Brandeburg 	case IAVF_RX_PTYPE_INNER_PROT_SCTP:
10445ec8b7d1SJesse Brandeburg 		skb->ip_summed = CHECKSUM_UNNECESSARY;
10455463fce6SJeff Kirsher 		fallthrough;
10465ec8b7d1SJesse Brandeburg 	default:
10475ec8b7d1SJesse Brandeburg 		break;
10485ec8b7d1SJesse Brandeburg 	}
10495ec8b7d1SJesse Brandeburg 
10505ec8b7d1SJesse Brandeburg 	return;
10515ec8b7d1SJesse Brandeburg 
10525ec8b7d1SJesse Brandeburg checksum_fail:
10535ec8b7d1SJesse Brandeburg 	vsi->back->hw_csum_rx_error++;
10545ec8b7d1SJesse Brandeburg }
10555ec8b7d1SJesse Brandeburg 
10565ec8b7d1SJesse Brandeburg /**
105756184e01SJesse Brandeburg  * iavf_ptype_to_htype - get a hash type
10585ec8b7d1SJesse Brandeburg  * @ptype: the ptype value from the descriptor
10595ec8b7d1SJesse Brandeburg  *
10605ec8b7d1SJesse Brandeburg  * Returns a hash type to be used by skb_set_hash
10615ec8b7d1SJesse Brandeburg  **/
106256184e01SJesse Brandeburg static inline int iavf_ptype_to_htype(u8 ptype)
10635ec8b7d1SJesse Brandeburg {
106456184e01SJesse Brandeburg 	struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
10655ec8b7d1SJesse Brandeburg 
10665ec8b7d1SJesse Brandeburg 	if (!decoded.known)
10675ec8b7d1SJesse Brandeburg 		return PKT_HASH_TYPE_NONE;
10685ec8b7d1SJesse Brandeburg 
106956184e01SJesse Brandeburg 	if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
107056184e01SJesse Brandeburg 	    decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
10715ec8b7d1SJesse Brandeburg 		return PKT_HASH_TYPE_L4;
107256184e01SJesse Brandeburg 	else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
107356184e01SJesse Brandeburg 		 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
10745ec8b7d1SJesse Brandeburg 		return PKT_HASH_TYPE_L3;
10755ec8b7d1SJesse Brandeburg 	else
10765ec8b7d1SJesse Brandeburg 		return PKT_HASH_TYPE_L2;
10775ec8b7d1SJesse Brandeburg }
10785ec8b7d1SJesse Brandeburg 
10795ec8b7d1SJesse Brandeburg /**
108056184e01SJesse Brandeburg  * iavf_rx_hash - set the hash value in the skb
10815ec8b7d1SJesse Brandeburg  * @ring: descriptor ring
10825ec8b7d1SJesse Brandeburg  * @rx_desc: specific descriptor
10835ec8b7d1SJesse Brandeburg  * @skb: skb currently being received and modified
10845ec8b7d1SJesse Brandeburg  * @rx_ptype: Rx packet type
10855ec8b7d1SJesse Brandeburg  **/
108656184e01SJesse Brandeburg static inline void iavf_rx_hash(struct iavf_ring *ring,
108756184e01SJesse Brandeburg 				union iavf_rx_desc *rx_desc,
10885ec8b7d1SJesse Brandeburg 				struct sk_buff *skb,
10895ec8b7d1SJesse Brandeburg 				u8 rx_ptype)
10905ec8b7d1SJesse Brandeburg {
10915ec8b7d1SJesse Brandeburg 	u32 hash;
10925ec8b7d1SJesse Brandeburg 	const __le64 rss_mask =
1093f1cad2ceSJesse Brandeburg 		cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
1094f1cad2ceSJesse Brandeburg 			    IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
10955ec8b7d1SJesse Brandeburg 
10965ec8b7d1SJesse Brandeburg 	if (ring->netdev->features & NETIF_F_RXHASH)
10975ec8b7d1SJesse Brandeburg 		return;
10985ec8b7d1SJesse Brandeburg 
10995ec8b7d1SJesse Brandeburg 	if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
11005ec8b7d1SJesse Brandeburg 		hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
110156184e01SJesse Brandeburg 		skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
11025ec8b7d1SJesse Brandeburg 	}
11035ec8b7d1SJesse Brandeburg }
11045ec8b7d1SJesse Brandeburg 
11055ec8b7d1SJesse Brandeburg /**
11065ec8b7d1SJesse Brandeburg  * iavf_process_skb_fields - Populate skb header fields from Rx descriptor
11075ec8b7d1SJesse Brandeburg  * @rx_ring: rx descriptor ring packet is being transacted on
11085ec8b7d1SJesse Brandeburg  * @rx_desc: pointer to the EOP Rx descriptor
11095ec8b7d1SJesse Brandeburg  * @skb: pointer to current skb being populated
11105ec8b7d1SJesse Brandeburg  * @rx_ptype: the packet type decoded by hardware
11115ec8b7d1SJesse Brandeburg  *
11125ec8b7d1SJesse Brandeburg  * This function checks the ring, descriptor, and packet information in
11135ec8b7d1SJesse Brandeburg  * order to populate the hash, checksum, VLAN, protocol, and
11145ec8b7d1SJesse Brandeburg  * other fields within the skb.
11155ec8b7d1SJesse Brandeburg  **/
11165ec8b7d1SJesse Brandeburg static inline
111756184e01SJesse Brandeburg void iavf_process_skb_fields(struct iavf_ring *rx_ring,
111856184e01SJesse Brandeburg 			     union iavf_rx_desc *rx_desc, struct sk_buff *skb,
11195ec8b7d1SJesse Brandeburg 			     u8 rx_ptype)
11205ec8b7d1SJesse Brandeburg {
112156184e01SJesse Brandeburg 	iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
11225ec8b7d1SJesse Brandeburg 
112356184e01SJesse Brandeburg 	iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
11245ec8b7d1SJesse Brandeburg 
11255ec8b7d1SJesse Brandeburg 	skb_record_rx_queue(skb, rx_ring->queue_index);
11265ec8b7d1SJesse Brandeburg 
11275ec8b7d1SJesse Brandeburg 	/* modifies the skb - consumes the enet header */
11285ec8b7d1SJesse Brandeburg 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
11295ec8b7d1SJesse Brandeburg }
11305ec8b7d1SJesse Brandeburg 
11315ec8b7d1SJesse Brandeburg /**
113256184e01SJesse Brandeburg  * iavf_cleanup_headers - Correct empty headers
11335ec8b7d1SJesse Brandeburg  * @rx_ring: rx descriptor ring packet is being transacted on
11345ec8b7d1SJesse Brandeburg  * @skb: pointer to current skb being fixed
11355ec8b7d1SJesse Brandeburg  *
11365ec8b7d1SJesse Brandeburg  * Also address the case where we are pulling data in on pages only
11375ec8b7d1SJesse Brandeburg  * and as such no data is present in the skb header.
11385ec8b7d1SJesse Brandeburg  *
11395ec8b7d1SJesse Brandeburg  * In addition if skb is not at least 60 bytes we need to pad it so that
11405ec8b7d1SJesse Brandeburg  * it is large enough to qualify as a valid Ethernet frame.
11415ec8b7d1SJesse Brandeburg  *
11425ec8b7d1SJesse Brandeburg  * Returns true if an error was encountered and skb was freed.
11435ec8b7d1SJesse Brandeburg  **/
114456184e01SJesse Brandeburg static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
11455ec8b7d1SJesse Brandeburg {
11465ec8b7d1SJesse Brandeburg 	/* if eth_skb_pad returns an error the skb was freed */
11475ec8b7d1SJesse Brandeburg 	if (eth_skb_pad(skb))
11485ec8b7d1SJesse Brandeburg 		return true;
11495ec8b7d1SJesse Brandeburg 
11505ec8b7d1SJesse Brandeburg 	return false;
11515ec8b7d1SJesse Brandeburg }
11525ec8b7d1SJesse Brandeburg 
11535ec8b7d1SJesse Brandeburg /**
115456184e01SJesse Brandeburg  * iavf_reuse_rx_page - page flip buffer and store it back on the ring
11555ec8b7d1SJesse Brandeburg  * @rx_ring: rx descriptor ring to store buffers on
11565ec8b7d1SJesse Brandeburg  * @old_buff: donor buffer to have page reused
11575ec8b7d1SJesse Brandeburg  *
11585ec8b7d1SJesse Brandeburg  * Synchronizes page for reuse by the adapter
11595ec8b7d1SJesse Brandeburg  **/
116056184e01SJesse Brandeburg static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
116156184e01SJesse Brandeburg 			       struct iavf_rx_buffer *old_buff)
11625ec8b7d1SJesse Brandeburg {
116356184e01SJesse Brandeburg 	struct iavf_rx_buffer *new_buff;
11645ec8b7d1SJesse Brandeburg 	u16 nta = rx_ring->next_to_alloc;
11655ec8b7d1SJesse Brandeburg 
11665ec8b7d1SJesse Brandeburg 	new_buff = &rx_ring->rx_bi[nta];
11675ec8b7d1SJesse Brandeburg 
11685ec8b7d1SJesse Brandeburg 	/* update, and store next to alloc */
11695ec8b7d1SJesse Brandeburg 	nta++;
11705ec8b7d1SJesse Brandeburg 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
11715ec8b7d1SJesse Brandeburg 
11725ec8b7d1SJesse Brandeburg 	/* transfer page from old buffer to new buffer */
11735ec8b7d1SJesse Brandeburg 	new_buff->dma		= old_buff->dma;
11745ec8b7d1SJesse Brandeburg 	new_buff->page		= old_buff->page;
11755ec8b7d1SJesse Brandeburg 	new_buff->page_offset	= old_buff->page_offset;
11765ec8b7d1SJesse Brandeburg 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
11775ec8b7d1SJesse Brandeburg }
11785ec8b7d1SJesse Brandeburg 
11795ec8b7d1SJesse Brandeburg /**
118056184e01SJesse Brandeburg  * iavf_can_reuse_rx_page - Determine if this page can be reused by
11815ec8b7d1SJesse Brandeburg  * the adapter for another receive
11825ec8b7d1SJesse Brandeburg  *
11835ec8b7d1SJesse Brandeburg  * @rx_buffer: buffer containing the page
11845ec8b7d1SJesse Brandeburg  *
11855ec8b7d1SJesse Brandeburg  * If page is reusable, rx_buffer->page_offset is adjusted to point to
11865ec8b7d1SJesse Brandeburg  * an unused region in the page.
11875ec8b7d1SJesse Brandeburg  *
11885ec8b7d1SJesse Brandeburg  * For small pages, @truesize will be a constant value, half the size
11895ec8b7d1SJesse Brandeburg  * of the memory at page.  We'll attempt to alternate between high and
11905ec8b7d1SJesse Brandeburg  * low halves of the page, with one half ready for use by the hardware
11915ec8b7d1SJesse Brandeburg  * and the other half being consumed by the stack.  We use the page
11925ec8b7d1SJesse Brandeburg  * ref count to determine whether the stack has finished consuming the
11935ec8b7d1SJesse Brandeburg  * portion of this page that was passed up with a previous packet.  If
11945ec8b7d1SJesse Brandeburg  * the page ref count is >1, we'll assume the "other" half page is
11955ec8b7d1SJesse Brandeburg  * still busy, and this page cannot be reused.
11965ec8b7d1SJesse Brandeburg  *
11975ec8b7d1SJesse Brandeburg  * For larger pages, @truesize will be the actual space used by the
11985ec8b7d1SJesse Brandeburg  * received packet (adjusted upward to an even multiple of the cache
11995ec8b7d1SJesse Brandeburg  * line size).  This will advance through the page by the amount
12005ec8b7d1SJesse Brandeburg  * actually consumed by the received packets while there is still
12015ec8b7d1SJesse Brandeburg  * space for a buffer.  Each region of larger pages will be used at
12025ec8b7d1SJesse Brandeburg  * most once, after which the page will not be reused.
12035ec8b7d1SJesse Brandeburg  *
12045ec8b7d1SJesse Brandeburg  * In either case, if the page is reusable its refcount is increased.
12055ec8b7d1SJesse Brandeburg  **/
120656184e01SJesse Brandeburg static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
12075ec8b7d1SJesse Brandeburg {
12085ec8b7d1SJesse Brandeburg 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
12095ec8b7d1SJesse Brandeburg 	struct page *page = rx_buffer->page;
12105ec8b7d1SJesse Brandeburg 
12115ec8b7d1SJesse Brandeburg 	/* Is any reuse possible? */
1212a79afa78SAlexander Lobakin 	if (!dev_page_is_reusable(page))
12135ec8b7d1SJesse Brandeburg 		return false;
12145ec8b7d1SJesse Brandeburg 
12155ec8b7d1SJesse Brandeburg #if (PAGE_SIZE < 8192)
12165ec8b7d1SJesse Brandeburg 	/* if we are only owner of page we can reuse it */
12175ec8b7d1SJesse Brandeburg 	if (unlikely((page_count(page) - pagecnt_bias) > 1))
12185ec8b7d1SJesse Brandeburg 		return false;
12195ec8b7d1SJesse Brandeburg #else
122056184e01SJesse Brandeburg #define IAVF_LAST_OFFSET \
122156184e01SJesse Brandeburg 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
122256184e01SJesse Brandeburg 	if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
12235ec8b7d1SJesse Brandeburg 		return false;
12245ec8b7d1SJesse Brandeburg #endif
12255ec8b7d1SJesse Brandeburg 
12265ec8b7d1SJesse Brandeburg 	/* If we have drained the page fragment pool we need to update
12275ec8b7d1SJesse Brandeburg 	 * the pagecnt_bias and page count so that we fully restock the
12285ec8b7d1SJesse Brandeburg 	 * number of references the driver holds.
12295ec8b7d1SJesse Brandeburg 	 */
12305ec8b7d1SJesse Brandeburg 	if (unlikely(!pagecnt_bias)) {
12315ec8b7d1SJesse Brandeburg 		page_ref_add(page, USHRT_MAX);
12325ec8b7d1SJesse Brandeburg 		rx_buffer->pagecnt_bias = USHRT_MAX;
12335ec8b7d1SJesse Brandeburg 	}
12345ec8b7d1SJesse Brandeburg 
12355ec8b7d1SJesse Brandeburg 	return true;
12365ec8b7d1SJesse Brandeburg }
12375ec8b7d1SJesse Brandeburg 
12385ec8b7d1SJesse Brandeburg /**
123956184e01SJesse Brandeburg  * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
12405ec8b7d1SJesse Brandeburg  * @rx_ring: rx descriptor ring to transact packets on
12415ec8b7d1SJesse Brandeburg  * @rx_buffer: buffer containing page to add
12425ec8b7d1SJesse Brandeburg  * @skb: sk_buff to place the data into
12435ec8b7d1SJesse Brandeburg  * @size: packet length from rx_desc
12445ec8b7d1SJesse Brandeburg  *
12455ec8b7d1SJesse Brandeburg  * This function will add the data contained in rx_buffer->page to the skb.
12465ec8b7d1SJesse Brandeburg  * It will just attach the page as a frag to the skb.
12475ec8b7d1SJesse Brandeburg  *
12485ec8b7d1SJesse Brandeburg  * The function will then update the page offset.
12495ec8b7d1SJesse Brandeburg  **/
125056184e01SJesse Brandeburg static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
125156184e01SJesse Brandeburg 			     struct iavf_rx_buffer *rx_buffer,
12525ec8b7d1SJesse Brandeburg 			     struct sk_buff *skb,
12535ec8b7d1SJesse Brandeburg 			     unsigned int size)
12545ec8b7d1SJesse Brandeburg {
12555ec8b7d1SJesse Brandeburg #if (PAGE_SIZE < 8192)
125656184e01SJesse Brandeburg 	unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
12575ec8b7d1SJesse Brandeburg #else
125856184e01SJesse Brandeburg 	unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
12595ec8b7d1SJesse Brandeburg #endif
12605ec8b7d1SJesse Brandeburg 
1261efa14c39SMitch Williams 	if (!size)
1262efa14c39SMitch Williams 		return;
1263efa14c39SMitch Williams 
12645ec8b7d1SJesse Brandeburg 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
12655ec8b7d1SJesse Brandeburg 			rx_buffer->page_offset, size, truesize);
12665ec8b7d1SJesse Brandeburg 
12675ec8b7d1SJesse Brandeburg 	/* page is being used so we must update the page offset */
12685ec8b7d1SJesse Brandeburg #if (PAGE_SIZE < 8192)
12695ec8b7d1SJesse Brandeburg 	rx_buffer->page_offset ^= truesize;
12705ec8b7d1SJesse Brandeburg #else
12715ec8b7d1SJesse Brandeburg 	rx_buffer->page_offset += truesize;
12725ec8b7d1SJesse Brandeburg #endif
12735ec8b7d1SJesse Brandeburg }
12745ec8b7d1SJesse Brandeburg 
12755ec8b7d1SJesse Brandeburg /**
127656184e01SJesse Brandeburg  * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use
12775ec8b7d1SJesse Brandeburg  * @rx_ring: rx descriptor ring to transact packets on
12785ec8b7d1SJesse Brandeburg  * @size: size of buffer to add to skb
12795ec8b7d1SJesse Brandeburg  *
12805ec8b7d1SJesse Brandeburg  * This function will pull an Rx buffer from the ring and synchronize it
12815ec8b7d1SJesse Brandeburg  * for use by the CPU.
12825ec8b7d1SJesse Brandeburg  */
128356184e01SJesse Brandeburg static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
12845ec8b7d1SJesse Brandeburg 						 const unsigned int size)
12855ec8b7d1SJesse Brandeburg {
128656184e01SJesse Brandeburg 	struct iavf_rx_buffer *rx_buffer;
12875ec8b7d1SJesse Brandeburg 
1288efa14c39SMitch Williams 	if (!size)
1289efa14c39SMitch Williams 		return NULL;
1290efa14c39SMitch Williams 
12915ec8b7d1SJesse Brandeburg 	rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
12925ec8b7d1SJesse Brandeburg 	prefetchw(rx_buffer->page);
12935ec8b7d1SJesse Brandeburg 
12945ec8b7d1SJesse Brandeburg 	/* we are reusing so sync this buffer for CPU use */
12955ec8b7d1SJesse Brandeburg 	dma_sync_single_range_for_cpu(rx_ring->dev,
12965ec8b7d1SJesse Brandeburg 				      rx_buffer->dma,
12975ec8b7d1SJesse Brandeburg 				      rx_buffer->page_offset,
12985ec8b7d1SJesse Brandeburg 				      size,
12995ec8b7d1SJesse Brandeburg 				      DMA_FROM_DEVICE);
13005ec8b7d1SJesse Brandeburg 
13015ec8b7d1SJesse Brandeburg 	/* We have pulled a buffer for use, so decrement pagecnt_bias */
13025ec8b7d1SJesse Brandeburg 	rx_buffer->pagecnt_bias--;
13035ec8b7d1SJesse Brandeburg 
13045ec8b7d1SJesse Brandeburg 	return rx_buffer;
13055ec8b7d1SJesse Brandeburg }
13065ec8b7d1SJesse Brandeburg 
13075ec8b7d1SJesse Brandeburg /**
130856184e01SJesse Brandeburg  * iavf_construct_skb - Allocate skb and populate it
13095ec8b7d1SJesse Brandeburg  * @rx_ring: rx descriptor ring to transact packets on
13105ec8b7d1SJesse Brandeburg  * @rx_buffer: rx buffer to pull data from
13115ec8b7d1SJesse Brandeburg  * @size: size of buffer to add to skb
13125ec8b7d1SJesse Brandeburg  *
13135ec8b7d1SJesse Brandeburg  * This function allocates an skb.  It then populates it with the page
13145ec8b7d1SJesse Brandeburg  * data from the current receive descriptor, taking care to set up the
13155ec8b7d1SJesse Brandeburg  * skb correctly.
13165ec8b7d1SJesse Brandeburg  */
131756184e01SJesse Brandeburg static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
131856184e01SJesse Brandeburg 					  struct iavf_rx_buffer *rx_buffer,
13195ec8b7d1SJesse Brandeburg 					  unsigned int size)
13205ec8b7d1SJesse Brandeburg {
13219fe06a51SColin Ian King 	void *va;
13225ec8b7d1SJesse Brandeburg #if (PAGE_SIZE < 8192)
132356184e01SJesse Brandeburg 	unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
13245ec8b7d1SJesse Brandeburg #else
13255ec8b7d1SJesse Brandeburg 	unsigned int truesize = SKB_DATA_ALIGN(size);
13265ec8b7d1SJesse Brandeburg #endif
13275ec8b7d1SJesse Brandeburg 	unsigned int headlen;
13285ec8b7d1SJesse Brandeburg 	struct sk_buff *skb;
13295ec8b7d1SJesse Brandeburg 
1330efa14c39SMitch Williams 	if (!rx_buffer)
1331efa14c39SMitch Williams 		return NULL;
13325ec8b7d1SJesse Brandeburg 	/* prefetch first cache line of first page */
13339fe06a51SColin Ian King 	va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1334f468f21bSTariq Toukan 	net_prefetch(va);
13355ec8b7d1SJesse Brandeburg 
13365ec8b7d1SJesse Brandeburg 	/* allocate a skb to store the frags */
13375ec8b7d1SJesse Brandeburg 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
133856184e01SJesse Brandeburg 			       IAVF_RX_HDR_SIZE,
13395ec8b7d1SJesse Brandeburg 			       GFP_ATOMIC | __GFP_NOWARN);
13405ec8b7d1SJesse Brandeburg 	if (unlikely(!skb))
13415ec8b7d1SJesse Brandeburg 		return NULL;
13425ec8b7d1SJesse Brandeburg 
13435ec8b7d1SJesse Brandeburg 	/* Determine available headroom for copy */
13445ec8b7d1SJesse Brandeburg 	headlen = size;
134556184e01SJesse Brandeburg 	if (headlen > IAVF_RX_HDR_SIZE)
1346c43f1255SStanislav Fomichev 		headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
13475ec8b7d1SJesse Brandeburg 
13485ec8b7d1SJesse Brandeburg 	/* align pull length to size of long to optimize memcpy performance */
13495ec8b7d1SJesse Brandeburg 	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
13505ec8b7d1SJesse Brandeburg 
13515ec8b7d1SJesse Brandeburg 	/* update all of the pointers */
13525ec8b7d1SJesse Brandeburg 	size -= headlen;
13535ec8b7d1SJesse Brandeburg 	if (size) {
13545ec8b7d1SJesse Brandeburg 		skb_add_rx_frag(skb, 0, rx_buffer->page,
13555ec8b7d1SJesse Brandeburg 				rx_buffer->page_offset + headlen,
13565ec8b7d1SJesse Brandeburg 				size, truesize);
13575ec8b7d1SJesse Brandeburg 
13585ec8b7d1SJesse Brandeburg 		/* buffer is used by skb, update page_offset */
13595ec8b7d1SJesse Brandeburg #if (PAGE_SIZE < 8192)
13605ec8b7d1SJesse Brandeburg 		rx_buffer->page_offset ^= truesize;
13615ec8b7d1SJesse Brandeburg #else
13625ec8b7d1SJesse Brandeburg 		rx_buffer->page_offset += truesize;
13635ec8b7d1SJesse Brandeburg #endif
13645ec8b7d1SJesse Brandeburg 	} else {
13655ec8b7d1SJesse Brandeburg 		/* buffer is unused, reset bias back to rx_buffer */
13665ec8b7d1SJesse Brandeburg 		rx_buffer->pagecnt_bias++;
13675ec8b7d1SJesse Brandeburg 	}
13685ec8b7d1SJesse Brandeburg 
13695ec8b7d1SJesse Brandeburg 	return skb;
13705ec8b7d1SJesse Brandeburg }
13715ec8b7d1SJesse Brandeburg 
13725ec8b7d1SJesse Brandeburg /**
137356184e01SJesse Brandeburg  * iavf_build_skb - Build skb around an existing buffer
13745ec8b7d1SJesse Brandeburg  * @rx_ring: Rx descriptor ring to transact packets on
13755ec8b7d1SJesse Brandeburg  * @rx_buffer: Rx buffer to pull data from
13765ec8b7d1SJesse Brandeburg  * @size: size of buffer to add to skb
13775ec8b7d1SJesse Brandeburg  *
13785ec8b7d1SJesse Brandeburg  * This function builds an skb around an existing Rx buffer, taking care
13795ec8b7d1SJesse Brandeburg  * to set up the skb correctly and avoid any memcpy overhead.
13805ec8b7d1SJesse Brandeburg  */
138156184e01SJesse Brandeburg static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
138256184e01SJesse Brandeburg 				      struct iavf_rx_buffer *rx_buffer,
13835ec8b7d1SJesse Brandeburg 				      unsigned int size)
13845ec8b7d1SJesse Brandeburg {
13859fe06a51SColin Ian King 	void *va;
13865ec8b7d1SJesse Brandeburg #if (PAGE_SIZE < 8192)
138756184e01SJesse Brandeburg 	unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
13885ec8b7d1SJesse Brandeburg #else
13895ec8b7d1SJesse Brandeburg 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
139056184e01SJesse Brandeburg 				SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
13915ec8b7d1SJesse Brandeburg #endif
13925ec8b7d1SJesse Brandeburg 	struct sk_buff *skb;
13935ec8b7d1SJesse Brandeburg 
1394efa14c39SMitch Williams 	if (!rx_buffer)
1395efa14c39SMitch Williams 		return NULL;
13965ec8b7d1SJesse Brandeburg 	/* prefetch first cache line of first page */
13979fe06a51SColin Ian King 	va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1398f468f21bSTariq Toukan 	net_prefetch(va);
1399f468f21bSTariq Toukan 
14005ec8b7d1SJesse Brandeburg 	/* build an skb around the page buffer */
1401ef687d61SAlexander Lobakin 	skb = napi_build_skb(va - IAVF_SKB_PAD, truesize);
14025ec8b7d1SJesse Brandeburg 	if (unlikely(!skb))
14035ec8b7d1SJesse Brandeburg 		return NULL;
14045ec8b7d1SJesse Brandeburg 
14055ec8b7d1SJesse Brandeburg 	/* update pointers within the skb to store the data */
140656184e01SJesse Brandeburg 	skb_reserve(skb, IAVF_SKB_PAD);
14075ec8b7d1SJesse Brandeburg 	__skb_put(skb, size);
14085ec8b7d1SJesse Brandeburg 
14095ec8b7d1SJesse Brandeburg 	/* buffer is used by skb, update page_offset */
14105ec8b7d1SJesse Brandeburg #if (PAGE_SIZE < 8192)
14115ec8b7d1SJesse Brandeburg 	rx_buffer->page_offset ^= truesize;
14125ec8b7d1SJesse Brandeburg #else
14135ec8b7d1SJesse Brandeburg 	rx_buffer->page_offset += truesize;
14145ec8b7d1SJesse Brandeburg #endif
14155ec8b7d1SJesse Brandeburg 
14165ec8b7d1SJesse Brandeburg 	return skb;
14175ec8b7d1SJesse Brandeburg }
14185ec8b7d1SJesse Brandeburg 
14195ec8b7d1SJesse Brandeburg /**
142056184e01SJesse Brandeburg  * iavf_put_rx_buffer - Clean up used buffer and either recycle or free
14215ec8b7d1SJesse Brandeburg  * @rx_ring: rx descriptor ring to transact packets on
14225ec8b7d1SJesse Brandeburg  * @rx_buffer: rx buffer to pull data from
14235ec8b7d1SJesse Brandeburg  *
14245ec8b7d1SJesse Brandeburg  * This function will clean up the contents of the rx_buffer.  It will
14255ec8b7d1SJesse Brandeburg  * either recycle the buffer or unmap it and free the associated resources.
14265ec8b7d1SJesse Brandeburg  */
142756184e01SJesse Brandeburg static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
142856184e01SJesse Brandeburg 			       struct iavf_rx_buffer *rx_buffer)
14295ec8b7d1SJesse Brandeburg {
1430efa14c39SMitch Williams 	if (!rx_buffer)
1431efa14c39SMitch Williams 		return;
1432efa14c39SMitch Williams 
143356184e01SJesse Brandeburg 	if (iavf_can_reuse_rx_page(rx_buffer)) {
14345ec8b7d1SJesse Brandeburg 		/* hand second half of page back to the ring */
143556184e01SJesse Brandeburg 		iavf_reuse_rx_page(rx_ring, rx_buffer);
14365ec8b7d1SJesse Brandeburg 		rx_ring->rx_stats.page_reuse_count++;
14375ec8b7d1SJesse Brandeburg 	} else {
14385ec8b7d1SJesse Brandeburg 		/* we are not reusing the buffer so unmap it */
14395ec8b7d1SJesse Brandeburg 		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
144056184e01SJesse Brandeburg 				     iavf_rx_pg_size(rx_ring),
144156184e01SJesse Brandeburg 				     DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
14425ec8b7d1SJesse Brandeburg 		__page_frag_cache_drain(rx_buffer->page,
14435ec8b7d1SJesse Brandeburg 					rx_buffer->pagecnt_bias);
14445ec8b7d1SJesse Brandeburg 	}
14455ec8b7d1SJesse Brandeburg 
14465ec8b7d1SJesse Brandeburg 	/* clear contents of buffer_info */
14475ec8b7d1SJesse Brandeburg 	rx_buffer->page = NULL;
14485ec8b7d1SJesse Brandeburg }
14495ec8b7d1SJesse Brandeburg 
14505ec8b7d1SJesse Brandeburg /**
145156184e01SJesse Brandeburg  * iavf_is_non_eop - process handling of non-EOP buffers
14525ec8b7d1SJesse Brandeburg  * @rx_ring: Rx ring being processed
14535ec8b7d1SJesse Brandeburg  * @rx_desc: Rx descriptor for current buffer
14545ec8b7d1SJesse Brandeburg  * @skb: Current socket buffer containing buffer in progress
14555ec8b7d1SJesse Brandeburg  *
14565ec8b7d1SJesse Brandeburg  * This function updates next to clean.  If the buffer is an EOP buffer
14575ec8b7d1SJesse Brandeburg  * this function exits returning false, otherwise it will place the
14585ec8b7d1SJesse Brandeburg  * sk_buff in the next buffer to be chained and return true indicating
14595ec8b7d1SJesse Brandeburg  * that this is in fact a non-EOP buffer.
14605ec8b7d1SJesse Brandeburg  **/
146156184e01SJesse Brandeburg static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
146256184e01SJesse Brandeburg 			    union iavf_rx_desc *rx_desc,
14635ec8b7d1SJesse Brandeburg 			    struct sk_buff *skb)
14645ec8b7d1SJesse Brandeburg {
14655ec8b7d1SJesse Brandeburg 	u32 ntc = rx_ring->next_to_clean + 1;
14665ec8b7d1SJesse Brandeburg 
14675ec8b7d1SJesse Brandeburg 	/* fetch, update, and store next to clean */
14685ec8b7d1SJesse Brandeburg 	ntc = (ntc < rx_ring->count) ? ntc : 0;
14695ec8b7d1SJesse Brandeburg 	rx_ring->next_to_clean = ntc;
14705ec8b7d1SJesse Brandeburg 
1471f1cad2ceSJesse Brandeburg 	prefetch(IAVF_RX_DESC(rx_ring, ntc));
14725ec8b7d1SJesse Brandeburg 
14735ec8b7d1SJesse Brandeburg 	/* if we are the last buffer then there is nothing else to do */
147456184e01SJesse Brandeburg #define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
147556184e01SJesse Brandeburg 	if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
14765ec8b7d1SJesse Brandeburg 		return false;
14775ec8b7d1SJesse Brandeburg 
14785ec8b7d1SJesse Brandeburg 	rx_ring->rx_stats.non_eop_descs++;
14795ec8b7d1SJesse Brandeburg 
14805ec8b7d1SJesse Brandeburg 	return true;
14815ec8b7d1SJesse Brandeburg }
14825ec8b7d1SJesse Brandeburg 
14835ec8b7d1SJesse Brandeburg /**
148456184e01SJesse Brandeburg  * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
14855ec8b7d1SJesse Brandeburg  * @rx_ring: rx descriptor ring to transact packets on
14865ec8b7d1SJesse Brandeburg  * @budget: Total limit on number of packets to process
14875ec8b7d1SJesse Brandeburg  *
14885ec8b7d1SJesse Brandeburg  * This function provides a "bounce buffer" approach to Rx interrupt
14895ec8b7d1SJesse Brandeburg  * processing.  The advantage to this is that on systems that have
14905ec8b7d1SJesse Brandeburg  * expensive overhead for IOMMU access this provides a means of avoiding
14915ec8b7d1SJesse Brandeburg  * it by maintaining the mapping of the page to the system.
14925ec8b7d1SJesse Brandeburg  *
14935ec8b7d1SJesse Brandeburg  * Returns amount of work completed
14945ec8b7d1SJesse Brandeburg  **/
149556184e01SJesse Brandeburg static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
14965ec8b7d1SJesse Brandeburg {
14975ec8b7d1SJesse Brandeburg 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
14985ec8b7d1SJesse Brandeburg 	struct sk_buff *skb = rx_ring->skb;
149956184e01SJesse Brandeburg 	u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
15005ec8b7d1SJesse Brandeburg 	bool failure = false;
15015ec8b7d1SJesse Brandeburg 
15025ec8b7d1SJesse Brandeburg 	while (likely(total_rx_packets < (unsigned int)budget)) {
150356184e01SJesse Brandeburg 		struct iavf_rx_buffer *rx_buffer;
150456184e01SJesse Brandeburg 		union iavf_rx_desc *rx_desc;
15055ec8b7d1SJesse Brandeburg 		unsigned int size;
1506ccd219d2SBrett Creeley 		u16 vlan_tag = 0;
15075ec8b7d1SJesse Brandeburg 		u8 rx_ptype;
15085ec8b7d1SJesse Brandeburg 		u64 qword;
15095ec8b7d1SJesse Brandeburg 
15105ec8b7d1SJesse Brandeburg 		/* return some buffers to hardware, one at a time is too slow */
151156184e01SJesse Brandeburg 		if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
15125ec8b7d1SJesse Brandeburg 			failure = failure ||
15135ec8b7d1SJesse Brandeburg 				  iavf_alloc_rx_buffers(rx_ring, cleaned_count);
15145ec8b7d1SJesse Brandeburg 			cleaned_count = 0;
15155ec8b7d1SJesse Brandeburg 		}
15165ec8b7d1SJesse Brandeburg 
1517f1cad2ceSJesse Brandeburg 		rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
15185ec8b7d1SJesse Brandeburg 
15195ec8b7d1SJesse Brandeburg 		/* status_error_len will always be zero for unused descriptors
15205ec8b7d1SJesse Brandeburg 		 * because it's cleared in cleanup, and overlaps with hdr_addr
15215ec8b7d1SJesse Brandeburg 		 * which is always zero because packet split isn't used, if the
15225ec8b7d1SJesse Brandeburg 		 * hardware wrote DD then the length will be non-zero
15235ec8b7d1SJesse Brandeburg 		 */
15245ec8b7d1SJesse Brandeburg 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
15255ec8b7d1SJesse Brandeburg 
15265ec8b7d1SJesse Brandeburg 		/* This memory barrier is needed to keep us from reading
15275ec8b7d1SJesse Brandeburg 		 * any other fields out of the rx_desc until we have
15285ec8b7d1SJesse Brandeburg 		 * verified the descriptor has been written back.
15295ec8b7d1SJesse Brandeburg 		 */
15305ec8b7d1SJesse Brandeburg 		dma_rmb();
1531efa14c39SMitch Williams #define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
1532efa14c39SMitch Williams 		if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
1533efa14c39SMitch Williams 			break;
15345ec8b7d1SJesse Brandeburg 
153556184e01SJesse Brandeburg 		size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
153656184e01SJesse Brandeburg 		       IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
15375ec8b7d1SJesse Brandeburg 
1538ad64ed8bSJesse Brandeburg 		iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
153956184e01SJesse Brandeburg 		rx_buffer = iavf_get_rx_buffer(rx_ring, size);
15405ec8b7d1SJesse Brandeburg 
15415ec8b7d1SJesse Brandeburg 		/* retrieve a buffer from the ring */
15425ec8b7d1SJesse Brandeburg 		if (skb)
154356184e01SJesse Brandeburg 			iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
15445ec8b7d1SJesse Brandeburg 		else if (ring_uses_build_skb(rx_ring))
154556184e01SJesse Brandeburg 			skb = iavf_build_skb(rx_ring, rx_buffer, size);
15465ec8b7d1SJesse Brandeburg 		else
154756184e01SJesse Brandeburg 			skb = iavf_construct_skb(rx_ring, rx_buffer, size);
15485ec8b7d1SJesse Brandeburg 
15495ec8b7d1SJesse Brandeburg 		/* exit if we failed to retrieve a buffer */
15505ec8b7d1SJesse Brandeburg 		if (!skb) {
15515ec8b7d1SJesse Brandeburg 			rx_ring->rx_stats.alloc_buff_failed++;
1552efa14c39SMitch Williams 			if (rx_buffer)
15535ec8b7d1SJesse Brandeburg 				rx_buffer->pagecnt_bias++;
15545ec8b7d1SJesse Brandeburg 			break;
15555ec8b7d1SJesse Brandeburg 		}
15565ec8b7d1SJesse Brandeburg 
155756184e01SJesse Brandeburg 		iavf_put_rx_buffer(rx_ring, rx_buffer);
15585ec8b7d1SJesse Brandeburg 		cleaned_count++;
15595ec8b7d1SJesse Brandeburg 
156056184e01SJesse Brandeburg 		if (iavf_is_non_eop(rx_ring, rx_desc, skb))
15615ec8b7d1SJesse Brandeburg 			continue;
15625ec8b7d1SJesse Brandeburg 
15635ec8b7d1SJesse Brandeburg 		/* ERR_MASK will only have valid bits if EOP set, and
15645ec8b7d1SJesse Brandeburg 		 * what we are doing here is actually checking
1565f1cad2ceSJesse Brandeburg 		 * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
15665ec8b7d1SJesse Brandeburg 		 * the error field
15675ec8b7d1SJesse Brandeburg 		 */
156856184e01SJesse Brandeburg 		if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
15695ec8b7d1SJesse Brandeburg 			dev_kfree_skb_any(skb);
15705ec8b7d1SJesse Brandeburg 			skb = NULL;
15715ec8b7d1SJesse Brandeburg 			continue;
15725ec8b7d1SJesse Brandeburg 		}
15735ec8b7d1SJesse Brandeburg 
157456184e01SJesse Brandeburg 		if (iavf_cleanup_headers(rx_ring, skb)) {
15755ec8b7d1SJesse Brandeburg 			skb = NULL;
15765ec8b7d1SJesse Brandeburg 			continue;
15775ec8b7d1SJesse Brandeburg 		}
15785ec8b7d1SJesse Brandeburg 
15795ec8b7d1SJesse Brandeburg 		/* probably a little skewed due to removing CRC */
15805ec8b7d1SJesse Brandeburg 		total_rx_bytes += skb->len;
15815ec8b7d1SJesse Brandeburg 
15825ec8b7d1SJesse Brandeburg 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
158356184e01SJesse Brandeburg 		rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>
158456184e01SJesse Brandeburg 			   IAVF_RXD_QW1_PTYPE_SHIFT;
15855ec8b7d1SJesse Brandeburg 
15865ec8b7d1SJesse Brandeburg 		/* populate checksum, VLAN, and protocol */
15875ec8b7d1SJesse Brandeburg 		iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
15885ec8b7d1SJesse Brandeburg 
1589ccd219d2SBrett Creeley 		if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
1590ccd219d2SBrett Creeley 		    rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
1591ccd219d2SBrett Creeley 			vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
1592ccd219d2SBrett Creeley 		if (rx_desc->wb.qword2.ext_status &
1593ccd219d2SBrett Creeley 		    cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
1594ccd219d2SBrett Creeley 		    rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
1595ccd219d2SBrett Creeley 			vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);
15965ec8b7d1SJesse Brandeburg 
1597ad64ed8bSJesse Brandeburg 		iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
159856184e01SJesse Brandeburg 		iavf_receive_skb(rx_ring, skb, vlan_tag);
15995ec8b7d1SJesse Brandeburg 		skb = NULL;
16005ec8b7d1SJesse Brandeburg 
16015ec8b7d1SJesse Brandeburg 		/* update budget accounting */
16025ec8b7d1SJesse Brandeburg 		total_rx_packets++;
16035ec8b7d1SJesse Brandeburg 	}
16045ec8b7d1SJesse Brandeburg 
16055ec8b7d1SJesse Brandeburg 	rx_ring->skb = skb;
16065ec8b7d1SJesse Brandeburg 
16075ec8b7d1SJesse Brandeburg 	u64_stats_update_begin(&rx_ring->syncp);
16085ec8b7d1SJesse Brandeburg 	rx_ring->stats.packets += total_rx_packets;
16095ec8b7d1SJesse Brandeburg 	rx_ring->stats.bytes += total_rx_bytes;
16105ec8b7d1SJesse Brandeburg 	u64_stats_update_end(&rx_ring->syncp);
16115ec8b7d1SJesse Brandeburg 	rx_ring->q_vector->rx.total_packets += total_rx_packets;
16125ec8b7d1SJesse Brandeburg 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
16135ec8b7d1SJesse Brandeburg 
16145ec8b7d1SJesse Brandeburg 	/* guarantee a trip back through this routine if there was a failure */
16155ec8b7d1SJesse Brandeburg 	return failure ? budget : (int)total_rx_packets;
16165ec8b7d1SJesse Brandeburg }
16175ec8b7d1SJesse Brandeburg 
161856184e01SJesse Brandeburg static inline u32 iavf_buildreg_itr(const int type, u16 itr)
16195ec8b7d1SJesse Brandeburg {
16205ec8b7d1SJesse Brandeburg 	u32 val;
16215ec8b7d1SJesse Brandeburg 
16225ec8b7d1SJesse Brandeburg 	/* We don't bother with setting the CLEARPBA bit as the data sheet
16235ec8b7d1SJesse Brandeburg 	 * points out doing so is "meaningless since it was already
16245ec8b7d1SJesse Brandeburg 	 * auto-cleared". The auto-clearing happens when the interrupt is
16255ec8b7d1SJesse Brandeburg 	 * asserted.
16265ec8b7d1SJesse Brandeburg 	 *
16275ec8b7d1SJesse Brandeburg 	 * Hardware errata 28 for also indicates that writing to a
16285ec8b7d1SJesse Brandeburg 	 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
16295ec8b7d1SJesse Brandeburg 	 * an event in the PBA anyway so we need to rely on the automask
16305ec8b7d1SJesse Brandeburg 	 * to hold pending events for us until the interrupt is re-enabled
16315ec8b7d1SJesse Brandeburg 	 *
16325ec8b7d1SJesse Brandeburg 	 * The itr value is reported in microseconds, and the register
16335ec8b7d1SJesse Brandeburg 	 * value is recorded in 2 microsecond units. For this reason we
16345ec8b7d1SJesse Brandeburg 	 * only need to shift by the interval shift - 1 instead of the
16355ec8b7d1SJesse Brandeburg 	 * full value.
16365ec8b7d1SJesse Brandeburg 	 */
163756184e01SJesse Brandeburg 	itr &= IAVF_ITR_MASK;
16385ec8b7d1SJesse Brandeburg 
1639f1cad2ceSJesse Brandeburg 	val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1640f1cad2ceSJesse Brandeburg 	      (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1641f1cad2ceSJesse Brandeburg 	      (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
16425ec8b7d1SJesse Brandeburg 
16435ec8b7d1SJesse Brandeburg 	return val;
16445ec8b7d1SJesse Brandeburg }
16455ec8b7d1SJesse Brandeburg 
16465ec8b7d1SJesse Brandeburg /* a small macro to shorten up some long lines */
1647f1cad2ceSJesse Brandeburg #define INTREG IAVF_VFINT_DYN_CTLN1
16485ec8b7d1SJesse Brandeburg 
16495ec8b7d1SJesse Brandeburg /* The act of updating the ITR will cause it to immediately trigger. In order
16505ec8b7d1SJesse Brandeburg  * to prevent this from throwing off adaptive update statistics we defer the
16515ec8b7d1SJesse Brandeburg  * update so that it can only happen so often. So after either Tx or Rx are
16525ec8b7d1SJesse Brandeburg  * updated we make the adaptive scheme wait until either the ITR completely
16535ec8b7d1SJesse Brandeburg  * expires via the next_update expiration or we have been through at least
16545ec8b7d1SJesse Brandeburg  * 3 interrupts.
16555ec8b7d1SJesse Brandeburg  */
16565ec8b7d1SJesse Brandeburg #define ITR_COUNTDOWN_START 3
16575ec8b7d1SJesse Brandeburg 
16585ec8b7d1SJesse Brandeburg /**
165956184e01SJesse Brandeburg  * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt
16605ec8b7d1SJesse Brandeburg  * @vsi: the VSI we care about
16615ec8b7d1SJesse Brandeburg  * @q_vector: q_vector for which itr is being updated and interrupt enabled
16625ec8b7d1SJesse Brandeburg  *
16635ec8b7d1SJesse Brandeburg  **/
166456184e01SJesse Brandeburg static inline void iavf_update_enable_itr(struct iavf_vsi *vsi,
166556184e01SJesse Brandeburg 					  struct iavf_q_vector *q_vector)
16665ec8b7d1SJesse Brandeburg {
1667f349daa5SJesse Brandeburg 	struct iavf_hw *hw = &vsi->back->hw;
16685ec8b7d1SJesse Brandeburg 	u32 intval;
16695ec8b7d1SJesse Brandeburg 
16705ec8b7d1SJesse Brandeburg 	/* These will do nothing if dynamic updates are not enabled */
167156184e01SJesse Brandeburg 	iavf_update_itr(q_vector, &q_vector->tx);
167256184e01SJesse Brandeburg 	iavf_update_itr(q_vector, &q_vector->rx);
16735ec8b7d1SJesse Brandeburg 
16745ec8b7d1SJesse Brandeburg 	/* This block of logic allows us to get away with only updating
16755ec8b7d1SJesse Brandeburg 	 * one ITR value with each interrupt. The idea is to perform a
16765ec8b7d1SJesse Brandeburg 	 * pseudo-lazy update with the following criteria.
16775ec8b7d1SJesse Brandeburg 	 *
16785ec8b7d1SJesse Brandeburg 	 * 1. Rx is given higher priority than Tx if both are in same state
16795ec8b7d1SJesse Brandeburg 	 * 2. If we must reduce an ITR that is given highest priority.
16805ec8b7d1SJesse Brandeburg 	 * 3. We then give priority to increasing ITR based on amount.
16815ec8b7d1SJesse Brandeburg 	 */
16825ec8b7d1SJesse Brandeburg 	if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
16835ec8b7d1SJesse Brandeburg 		/* Rx ITR needs to be reduced, this is highest priority */
168456184e01SJesse Brandeburg 		intval = iavf_buildreg_itr(IAVF_RX_ITR,
16855ec8b7d1SJesse Brandeburg 					   q_vector->rx.target_itr);
16865ec8b7d1SJesse Brandeburg 		q_vector->rx.current_itr = q_vector->rx.target_itr;
16875ec8b7d1SJesse Brandeburg 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
16885ec8b7d1SJesse Brandeburg 	} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
16895ec8b7d1SJesse Brandeburg 		   ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
16905ec8b7d1SJesse Brandeburg 		    (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
16915ec8b7d1SJesse Brandeburg 		/* Tx ITR needs to be reduced, this is second priority
16925ec8b7d1SJesse Brandeburg 		 * Tx ITR needs to be increased more than Rx, fourth priority
16935ec8b7d1SJesse Brandeburg 		 */
169456184e01SJesse Brandeburg 		intval = iavf_buildreg_itr(IAVF_TX_ITR,
16955ec8b7d1SJesse Brandeburg 					   q_vector->tx.target_itr);
16965ec8b7d1SJesse Brandeburg 		q_vector->tx.current_itr = q_vector->tx.target_itr;
16975ec8b7d1SJesse Brandeburg 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
16985ec8b7d1SJesse Brandeburg 	} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
16995ec8b7d1SJesse Brandeburg 		/* Rx ITR needs to be increased, third priority */
170056184e01SJesse Brandeburg 		intval = iavf_buildreg_itr(IAVF_RX_ITR,
17015ec8b7d1SJesse Brandeburg 					   q_vector->rx.target_itr);
17025ec8b7d1SJesse Brandeburg 		q_vector->rx.current_itr = q_vector->rx.target_itr;
17035ec8b7d1SJesse Brandeburg 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
17045ec8b7d1SJesse Brandeburg 	} else {
17055ec8b7d1SJesse Brandeburg 		/* No ITR update, lowest priority */
170656184e01SJesse Brandeburg 		intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0);
17075ec8b7d1SJesse Brandeburg 		if (q_vector->itr_countdown)
17085ec8b7d1SJesse Brandeburg 			q_vector->itr_countdown--;
17095ec8b7d1SJesse Brandeburg 	}
17105ec8b7d1SJesse Brandeburg 
171156184e01SJesse Brandeburg 	if (!test_bit(__IAVF_VSI_DOWN, vsi->state))
17125ec8b7d1SJesse Brandeburg 		wr32(hw, INTREG(q_vector->reg_idx), intval);
17135ec8b7d1SJesse Brandeburg }
17145ec8b7d1SJesse Brandeburg 
17155ec8b7d1SJesse Brandeburg /**
17165ec8b7d1SJesse Brandeburg  * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine
17175ec8b7d1SJesse Brandeburg  * @napi: napi struct with our devices info in it
17185ec8b7d1SJesse Brandeburg  * @budget: amount of work driver is allowed to do this pass, in packets
17195ec8b7d1SJesse Brandeburg  *
17205ec8b7d1SJesse Brandeburg  * This function will clean all queues associated with a q_vector.
17215ec8b7d1SJesse Brandeburg  *
17225ec8b7d1SJesse Brandeburg  * Returns the amount of work done
17235ec8b7d1SJesse Brandeburg  **/
17245ec8b7d1SJesse Brandeburg int iavf_napi_poll(struct napi_struct *napi, int budget)
17255ec8b7d1SJesse Brandeburg {
172656184e01SJesse Brandeburg 	struct iavf_q_vector *q_vector =
172756184e01SJesse Brandeburg 			       container_of(napi, struct iavf_q_vector, napi);
172856184e01SJesse Brandeburg 	struct iavf_vsi *vsi = q_vector->vsi;
172956184e01SJesse Brandeburg 	struct iavf_ring *ring;
17305ec8b7d1SJesse Brandeburg 	bool clean_complete = true;
17315ec8b7d1SJesse Brandeburg 	bool arm_wb = false;
17325ec8b7d1SJesse Brandeburg 	int budget_per_ring;
17335ec8b7d1SJesse Brandeburg 	int work_done = 0;
17345ec8b7d1SJesse Brandeburg 
173556184e01SJesse Brandeburg 	if (test_bit(__IAVF_VSI_DOWN, vsi->state)) {
17365ec8b7d1SJesse Brandeburg 		napi_complete(napi);
17375ec8b7d1SJesse Brandeburg 		return 0;
17385ec8b7d1SJesse Brandeburg 	}
17395ec8b7d1SJesse Brandeburg 
17405ec8b7d1SJesse Brandeburg 	/* Since the actual Tx work is minimal, we can give the Tx a larger
17415ec8b7d1SJesse Brandeburg 	 * budget and be more aggressive about cleaning up the Tx descriptors.
17425ec8b7d1SJesse Brandeburg 	 */
174356184e01SJesse Brandeburg 	iavf_for_each_ring(ring, q_vector->tx) {
174456184e01SJesse Brandeburg 		if (!iavf_clean_tx_irq(vsi, ring, budget)) {
17455ec8b7d1SJesse Brandeburg 			clean_complete = false;
17465ec8b7d1SJesse Brandeburg 			continue;
17475ec8b7d1SJesse Brandeburg 		}
17485ec8b7d1SJesse Brandeburg 		arm_wb |= ring->arm_wb;
17495ec8b7d1SJesse Brandeburg 		ring->arm_wb = false;
17505ec8b7d1SJesse Brandeburg 	}
17515ec8b7d1SJesse Brandeburg 
17525ec8b7d1SJesse Brandeburg 	/* Handle case where we are called by netpoll with a budget of 0 */
17535ec8b7d1SJesse Brandeburg 	if (budget <= 0)
17545ec8b7d1SJesse Brandeburg 		goto tx_only;
17555ec8b7d1SJesse Brandeburg 
17565ec8b7d1SJesse Brandeburg 	/* We attempt to distribute budget to each Rx queue fairly, but don't
17575ec8b7d1SJesse Brandeburg 	 * allow the budget to go below 1 because that would exit polling early.
17585ec8b7d1SJesse Brandeburg 	 */
17595ec8b7d1SJesse Brandeburg 	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
17605ec8b7d1SJesse Brandeburg 
176156184e01SJesse Brandeburg 	iavf_for_each_ring(ring, q_vector->rx) {
176256184e01SJesse Brandeburg 		int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
17635ec8b7d1SJesse Brandeburg 
17645ec8b7d1SJesse Brandeburg 		work_done += cleaned;
17655ec8b7d1SJesse Brandeburg 		/* if we clean as many as budgeted, we must not be done */
17665ec8b7d1SJesse Brandeburg 		if (cleaned >= budget_per_ring)
17675ec8b7d1SJesse Brandeburg 			clean_complete = false;
17685ec8b7d1SJesse Brandeburg 	}
17695ec8b7d1SJesse Brandeburg 
17705ec8b7d1SJesse Brandeburg 	/* If work not completed, return budget and polling will return */
17715ec8b7d1SJesse Brandeburg 	if (!clean_complete) {
17725ec8b7d1SJesse Brandeburg 		int cpu_id = smp_processor_id();
17735ec8b7d1SJesse Brandeburg 
17745ec8b7d1SJesse Brandeburg 		/* It is possible that the interrupt affinity has changed but,
17755ec8b7d1SJesse Brandeburg 		 * if the cpu is pegged at 100%, polling will never exit while
17765ec8b7d1SJesse Brandeburg 		 * traffic continues and the interrupt will be stuck on this
17775ec8b7d1SJesse Brandeburg 		 * cpu.  We check to make sure affinity is correct before we
17785ec8b7d1SJesse Brandeburg 		 * continue to poll, otherwise we must stop polling so the
17795ec8b7d1SJesse Brandeburg 		 * interrupt can move to the correct cpu.
17805ec8b7d1SJesse Brandeburg 		 */
17815ec8b7d1SJesse Brandeburg 		if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
17825ec8b7d1SJesse Brandeburg 			/* Tell napi that we are done polling */
17835ec8b7d1SJesse Brandeburg 			napi_complete_done(napi, work_done);
17845ec8b7d1SJesse Brandeburg 
17855ec8b7d1SJesse Brandeburg 			/* Force an interrupt */
17865ec8b7d1SJesse Brandeburg 			iavf_force_wb(vsi, q_vector);
17875ec8b7d1SJesse Brandeburg 
17885ec8b7d1SJesse Brandeburg 			/* Return budget-1 so that polling stops */
17895ec8b7d1SJesse Brandeburg 			return budget - 1;
17905ec8b7d1SJesse Brandeburg 		}
17915ec8b7d1SJesse Brandeburg tx_only:
17925ec8b7d1SJesse Brandeburg 		if (arm_wb) {
17935ec8b7d1SJesse Brandeburg 			q_vector->tx.ring[0].tx_stats.tx_force_wb++;
179456184e01SJesse Brandeburg 			iavf_enable_wb_on_itr(vsi, q_vector);
17955ec8b7d1SJesse Brandeburg 		}
17965ec8b7d1SJesse Brandeburg 		return budget;
17975ec8b7d1SJesse Brandeburg 	}
17985ec8b7d1SJesse Brandeburg 
179956184e01SJesse Brandeburg 	if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
18005ec8b7d1SJesse Brandeburg 		q_vector->arm_wb_state = false;
18015ec8b7d1SJesse Brandeburg 
18020bcd952fSJesse Brandeburg 	/* Exit the polling mode, but don't re-enable interrupts if stack might
18030bcd952fSJesse Brandeburg 	 * poll us due to busy-polling
18040bcd952fSJesse Brandeburg 	 */
18050bcd952fSJesse Brandeburg 	if (likely(napi_complete_done(napi, work_done)))
180656184e01SJesse Brandeburg 		iavf_update_enable_itr(vsi, q_vector);
18075ec8b7d1SJesse Brandeburg 
1808349181b7SKaren Sornek 	return min_t(int, work_done, budget - 1);
18095ec8b7d1SJesse Brandeburg }
18105ec8b7d1SJesse Brandeburg 
18115ec8b7d1SJesse Brandeburg /**
18125ec8b7d1SJesse Brandeburg  * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
18135ec8b7d1SJesse Brandeburg  * @skb:     send buffer
18145ec8b7d1SJesse Brandeburg  * @tx_ring: ring to send buffer on
18155ec8b7d1SJesse Brandeburg  * @flags:   the tx flags to be set
18165ec8b7d1SJesse Brandeburg  *
18175ec8b7d1SJesse Brandeburg  * Checks the skb and set up correspondingly several generic transmit flags
18185ec8b7d1SJesse Brandeburg  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
18195ec8b7d1SJesse Brandeburg  *
18205ec8b7d1SJesse Brandeburg  * Returns error code indicate the frame should be dropped upon error and the
18215ec8b7d1SJesse Brandeburg  * otherwise  returns 0 to indicate the flags has been set properly.
18225ec8b7d1SJesse Brandeburg  **/
1823ccd219d2SBrett Creeley static void iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
1824ccd219d2SBrett Creeley 				       struct iavf_ring *tx_ring, u32 *flags)
18255ec8b7d1SJesse Brandeburg {
18265ec8b7d1SJesse Brandeburg 	u32  tx_flags = 0;
18275ec8b7d1SJesse Brandeburg 
1828ccd219d2SBrett Creeley 
1829ccd219d2SBrett Creeley 	/* stack will only request hardware VLAN insertion offload for protocols
1830ccd219d2SBrett Creeley 	 * that the driver supports and has enabled
18315ec8b7d1SJesse Brandeburg 	 */
1832ccd219d2SBrett Creeley 	if (!skb_vlan_tag_present(skb))
1833ccd219d2SBrett Creeley 		return;
18345ec8b7d1SJesse Brandeburg 
183556184e01SJesse Brandeburg 	tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
1836ccd219d2SBrett Creeley 	if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) {
1837ccd219d2SBrett Creeley 		tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
1838ccd219d2SBrett Creeley 	} else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
183956184e01SJesse Brandeburg 		tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
1840ccd219d2SBrett Creeley 	} else {
1841ccd219d2SBrett Creeley 		dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n");
1842ccd219d2SBrett Creeley 		return;
18435ec8b7d1SJesse Brandeburg 	}
18445ec8b7d1SJesse Brandeburg 
18455ec8b7d1SJesse Brandeburg 	*flags = tx_flags;
18465ec8b7d1SJesse Brandeburg }
18475ec8b7d1SJesse Brandeburg 
18485ec8b7d1SJesse Brandeburg /**
184956184e01SJesse Brandeburg  * iavf_tso - set up the tso context descriptor
18505ec8b7d1SJesse Brandeburg  * @first:    pointer to first Tx buffer for xmit
18515ec8b7d1SJesse Brandeburg  * @hdr_len:  ptr to the size of the packet header
18525ec8b7d1SJesse Brandeburg  * @cd_type_cmd_tso_mss: Quad Word 1
18535ec8b7d1SJesse Brandeburg  *
18545ec8b7d1SJesse Brandeburg  * Returns 0 if no TSO can happen, 1 if tso is going, or error
18555ec8b7d1SJesse Brandeburg  **/
185656184e01SJesse Brandeburg static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
18575ec8b7d1SJesse Brandeburg 		    u64 *cd_type_cmd_tso_mss)
18585ec8b7d1SJesse Brandeburg {
18595ec8b7d1SJesse Brandeburg 	struct sk_buff *skb = first->skb;
18605ec8b7d1SJesse Brandeburg 	u64 cd_cmd, cd_tso_len, cd_mss;
18615ec8b7d1SJesse Brandeburg 	union {
18625ec8b7d1SJesse Brandeburg 		struct iphdr *v4;
18635ec8b7d1SJesse Brandeburg 		struct ipv6hdr *v6;
18645ec8b7d1SJesse Brandeburg 		unsigned char *hdr;
18655ec8b7d1SJesse Brandeburg 	} ip;
18665ec8b7d1SJesse Brandeburg 	union {
18675ec8b7d1SJesse Brandeburg 		struct tcphdr *tcp;
18685ec8b7d1SJesse Brandeburg 		struct udphdr *udp;
18695ec8b7d1SJesse Brandeburg 		unsigned char *hdr;
18705ec8b7d1SJesse Brandeburg 	} l4;
18715ec8b7d1SJesse Brandeburg 	u32 paylen, l4_offset;
18725ec8b7d1SJesse Brandeburg 	u16 gso_segs, gso_size;
18735ec8b7d1SJesse Brandeburg 	int err;
18745ec8b7d1SJesse Brandeburg 
18755ec8b7d1SJesse Brandeburg 	if (skb->ip_summed != CHECKSUM_PARTIAL)
18765ec8b7d1SJesse Brandeburg 		return 0;
18775ec8b7d1SJesse Brandeburg 
18785ec8b7d1SJesse Brandeburg 	if (!skb_is_gso(skb))
18795ec8b7d1SJesse Brandeburg 		return 0;
18805ec8b7d1SJesse Brandeburg 
18815ec8b7d1SJesse Brandeburg 	err = skb_cow_head(skb, 0);
18825ec8b7d1SJesse Brandeburg 	if (err < 0)
18835ec8b7d1SJesse Brandeburg 		return err;
18845ec8b7d1SJesse Brandeburg 
18855ec8b7d1SJesse Brandeburg 	ip.hdr = skb_network_header(skb);
18865ec8b7d1SJesse Brandeburg 	l4.hdr = skb_transport_header(skb);
18875ec8b7d1SJesse Brandeburg 
18885ec8b7d1SJesse Brandeburg 	/* initialize outer IP header fields */
18895ec8b7d1SJesse Brandeburg 	if (ip.v4->version == 4) {
18905ec8b7d1SJesse Brandeburg 		ip.v4->tot_len = 0;
18915ec8b7d1SJesse Brandeburg 		ip.v4->check = 0;
18925ec8b7d1SJesse Brandeburg 	} else {
18935ec8b7d1SJesse Brandeburg 		ip.v6->payload_len = 0;
18945ec8b7d1SJesse Brandeburg 	}
18955ec8b7d1SJesse Brandeburg 
18965ec8b7d1SJesse Brandeburg 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
18975ec8b7d1SJesse Brandeburg 					 SKB_GSO_GRE_CSUM |
18985ec8b7d1SJesse Brandeburg 					 SKB_GSO_IPXIP4 |
18995ec8b7d1SJesse Brandeburg 					 SKB_GSO_IPXIP6 |
19005ec8b7d1SJesse Brandeburg 					 SKB_GSO_UDP_TUNNEL |
19015ec8b7d1SJesse Brandeburg 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
19025ec8b7d1SJesse Brandeburg 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
19035ec8b7d1SJesse Brandeburg 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
19045ec8b7d1SJesse Brandeburg 			l4.udp->len = 0;
19055ec8b7d1SJesse Brandeburg 
19065ec8b7d1SJesse Brandeburg 			/* determine offset of outer transport header */
19075ec8b7d1SJesse Brandeburg 			l4_offset = l4.hdr - skb->data;
19085ec8b7d1SJesse Brandeburg 
19095ec8b7d1SJesse Brandeburg 			/* remove payload length from outer checksum */
19105ec8b7d1SJesse Brandeburg 			paylen = skb->len - l4_offset;
19115ec8b7d1SJesse Brandeburg 			csum_replace_by_diff(&l4.udp->check,
19125ec8b7d1SJesse Brandeburg 					     (__force __wsum)htonl(paylen));
19135ec8b7d1SJesse Brandeburg 		}
19145ec8b7d1SJesse Brandeburg 
19155ec8b7d1SJesse Brandeburg 		/* reset pointers to inner headers */
19165ec8b7d1SJesse Brandeburg 		ip.hdr = skb_inner_network_header(skb);
19175ec8b7d1SJesse Brandeburg 		l4.hdr = skb_inner_transport_header(skb);
19185ec8b7d1SJesse Brandeburg 
19195ec8b7d1SJesse Brandeburg 		/* initialize inner IP header fields */
19205ec8b7d1SJesse Brandeburg 		if (ip.v4->version == 4) {
19215ec8b7d1SJesse Brandeburg 			ip.v4->tot_len = 0;
19225ec8b7d1SJesse Brandeburg 			ip.v4->check = 0;
19235ec8b7d1SJesse Brandeburg 		} else {
19245ec8b7d1SJesse Brandeburg 			ip.v6->payload_len = 0;
19255ec8b7d1SJesse Brandeburg 		}
19265ec8b7d1SJesse Brandeburg 	}
19275ec8b7d1SJesse Brandeburg 
19285ec8b7d1SJesse Brandeburg 	/* determine offset of inner transport header */
19295ec8b7d1SJesse Brandeburg 	l4_offset = l4.hdr - skb->data;
19305ec8b7d1SJesse Brandeburg 	/* remove payload length from inner checksum */
19315ec8b7d1SJesse Brandeburg 	paylen = skb->len - l4_offset;
19325ec8b7d1SJesse Brandeburg 
1933c91a4f9fSBrett Creeley 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1934c91a4f9fSBrett Creeley 		csum_replace_by_diff(&l4.udp->check,
1935c91a4f9fSBrett Creeley 				     (__force __wsum)htonl(paylen));
1936c91a4f9fSBrett Creeley 		/* compute length of UDP segmentation header */
1937c91a4f9fSBrett Creeley 		*hdr_len = (u8)sizeof(l4.udp) + l4_offset;
1938c91a4f9fSBrett Creeley 	} else {
1939c91a4f9fSBrett Creeley 		csum_replace_by_diff(&l4.tcp->check,
1940c91a4f9fSBrett Creeley 				     (__force __wsum)htonl(paylen));
1941c91a4f9fSBrett Creeley 		/* compute length of TCP segmentation header */
1942c91a4f9fSBrett Creeley 		*hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset);
1943c91a4f9fSBrett Creeley 	}
19445ec8b7d1SJesse Brandeburg 
19455ec8b7d1SJesse Brandeburg 	/* pull values out of skb_shinfo */
19465ec8b7d1SJesse Brandeburg 	gso_size = skb_shinfo(skb)->gso_size;
19475ec8b7d1SJesse Brandeburg 	gso_segs = skb_shinfo(skb)->gso_segs;
19485ec8b7d1SJesse Brandeburg 
19495ec8b7d1SJesse Brandeburg 	/* update GSO size and bytecount with header size */
19505ec8b7d1SJesse Brandeburg 	first->gso_segs = gso_segs;
19515ec8b7d1SJesse Brandeburg 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
19525ec8b7d1SJesse Brandeburg 
19535ec8b7d1SJesse Brandeburg 	/* find the field values */
195456184e01SJesse Brandeburg 	cd_cmd = IAVF_TX_CTX_DESC_TSO;
19555ec8b7d1SJesse Brandeburg 	cd_tso_len = skb->len - *hdr_len;
19565ec8b7d1SJesse Brandeburg 	cd_mss = gso_size;
195756184e01SJesse Brandeburg 	*cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
195856184e01SJesse Brandeburg 				(cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
195956184e01SJesse Brandeburg 				(cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
19605ec8b7d1SJesse Brandeburg 	return 1;
19615ec8b7d1SJesse Brandeburg }
19625ec8b7d1SJesse Brandeburg 
19635ec8b7d1SJesse Brandeburg /**
196456184e01SJesse Brandeburg  * iavf_tx_enable_csum - Enable Tx checksum offloads
19655ec8b7d1SJesse Brandeburg  * @skb: send buffer
19665ec8b7d1SJesse Brandeburg  * @tx_flags: pointer to Tx flags currently set
19675ec8b7d1SJesse Brandeburg  * @td_cmd: Tx descriptor command bits to set
19685ec8b7d1SJesse Brandeburg  * @td_offset: Tx descriptor header offsets to set
19695ec8b7d1SJesse Brandeburg  * @tx_ring: Tx descriptor ring
19705ec8b7d1SJesse Brandeburg  * @cd_tunneling: ptr to context desc bits
19715ec8b7d1SJesse Brandeburg  **/
197256184e01SJesse Brandeburg static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
19735ec8b7d1SJesse Brandeburg 			       u32 *td_cmd, u32 *td_offset,
197456184e01SJesse Brandeburg 			       struct iavf_ring *tx_ring,
19755ec8b7d1SJesse Brandeburg 			       u32 *cd_tunneling)
19765ec8b7d1SJesse Brandeburg {
19775ec8b7d1SJesse Brandeburg 	union {
19785ec8b7d1SJesse Brandeburg 		struct iphdr *v4;
19795ec8b7d1SJesse Brandeburg 		struct ipv6hdr *v6;
19805ec8b7d1SJesse Brandeburg 		unsigned char *hdr;
19815ec8b7d1SJesse Brandeburg 	} ip;
19825ec8b7d1SJesse Brandeburg 	union {
19835ec8b7d1SJesse Brandeburg 		struct tcphdr *tcp;
19845ec8b7d1SJesse Brandeburg 		struct udphdr *udp;
19855ec8b7d1SJesse Brandeburg 		unsigned char *hdr;
19865ec8b7d1SJesse Brandeburg 	} l4;
19875ec8b7d1SJesse Brandeburg 	unsigned char *exthdr;
19885ec8b7d1SJesse Brandeburg 	u32 offset, cmd = 0;
19895ec8b7d1SJesse Brandeburg 	__be16 frag_off;
19905ec8b7d1SJesse Brandeburg 	u8 l4_proto = 0;
19915ec8b7d1SJesse Brandeburg 
19925ec8b7d1SJesse Brandeburg 	if (skb->ip_summed != CHECKSUM_PARTIAL)
19935ec8b7d1SJesse Brandeburg 		return 0;
19945ec8b7d1SJesse Brandeburg 
19955ec8b7d1SJesse Brandeburg 	ip.hdr = skb_network_header(skb);
19965ec8b7d1SJesse Brandeburg 	l4.hdr = skb_transport_header(skb);
19975ec8b7d1SJesse Brandeburg 
19985ec8b7d1SJesse Brandeburg 	/* compute outer L2 header size */
1999f1cad2ceSJesse Brandeburg 	offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
20005ec8b7d1SJesse Brandeburg 
20015ec8b7d1SJesse Brandeburg 	if (skb->encapsulation) {
20025ec8b7d1SJesse Brandeburg 		u32 tunnel = 0;
20035ec8b7d1SJesse Brandeburg 		/* define outer network header type */
200456184e01SJesse Brandeburg 		if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
200556184e01SJesse Brandeburg 			tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
200656184e01SJesse Brandeburg 				  IAVF_TX_CTX_EXT_IP_IPV4 :
200756184e01SJesse Brandeburg 				  IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
20085ec8b7d1SJesse Brandeburg 
20095ec8b7d1SJesse Brandeburg 			l4_proto = ip.v4->protocol;
201056184e01SJesse Brandeburg 		} else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
201156184e01SJesse Brandeburg 			tunnel |= IAVF_TX_CTX_EXT_IP_IPV6;
20125ec8b7d1SJesse Brandeburg 
20135ec8b7d1SJesse Brandeburg 			exthdr = ip.hdr + sizeof(*ip.v6);
20145ec8b7d1SJesse Brandeburg 			l4_proto = ip.v6->nexthdr;
20155ec8b7d1SJesse Brandeburg 			if (l4.hdr != exthdr)
20165ec8b7d1SJesse Brandeburg 				ipv6_skip_exthdr(skb, exthdr - skb->data,
20175ec8b7d1SJesse Brandeburg 						 &l4_proto, &frag_off);
20185ec8b7d1SJesse Brandeburg 		}
20195ec8b7d1SJesse Brandeburg 
20205ec8b7d1SJesse Brandeburg 		/* define outer transport */
20215ec8b7d1SJesse Brandeburg 		switch (l4_proto) {
20225ec8b7d1SJesse Brandeburg 		case IPPROTO_UDP:
202356184e01SJesse Brandeburg 			tunnel |= IAVF_TXD_CTX_UDP_TUNNELING;
202456184e01SJesse Brandeburg 			*tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
20255ec8b7d1SJesse Brandeburg 			break;
20265ec8b7d1SJesse Brandeburg 		case IPPROTO_GRE:
202756184e01SJesse Brandeburg 			tunnel |= IAVF_TXD_CTX_GRE_TUNNELING;
202856184e01SJesse Brandeburg 			*tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
20295ec8b7d1SJesse Brandeburg 			break;
20305ec8b7d1SJesse Brandeburg 		case IPPROTO_IPIP:
20315ec8b7d1SJesse Brandeburg 		case IPPROTO_IPV6:
203256184e01SJesse Brandeburg 			*tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
20335ec8b7d1SJesse Brandeburg 			l4.hdr = skb_inner_network_header(skb);
20345ec8b7d1SJesse Brandeburg 			break;
20355ec8b7d1SJesse Brandeburg 		default:
203656184e01SJesse Brandeburg 			if (*tx_flags & IAVF_TX_FLAGS_TSO)
20375ec8b7d1SJesse Brandeburg 				return -1;
20385ec8b7d1SJesse Brandeburg 
20395ec8b7d1SJesse Brandeburg 			skb_checksum_help(skb);
20405ec8b7d1SJesse Brandeburg 			return 0;
20415ec8b7d1SJesse Brandeburg 		}
20425ec8b7d1SJesse Brandeburg 
20435ec8b7d1SJesse Brandeburg 		/* compute outer L3 header size */
20445ec8b7d1SJesse Brandeburg 		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
204556184e01SJesse Brandeburg 			  IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
20465ec8b7d1SJesse Brandeburg 
20475ec8b7d1SJesse Brandeburg 		/* switch IP header pointer from outer to inner header */
20485ec8b7d1SJesse Brandeburg 		ip.hdr = skb_inner_network_header(skb);
20495ec8b7d1SJesse Brandeburg 
20505ec8b7d1SJesse Brandeburg 		/* compute tunnel header size */
20515ec8b7d1SJesse Brandeburg 		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
205256184e01SJesse Brandeburg 			  IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
20535ec8b7d1SJesse Brandeburg 
20545ec8b7d1SJesse Brandeburg 		/* indicate if we need to offload outer UDP header */
205556184e01SJesse Brandeburg 		if ((*tx_flags & IAVF_TX_FLAGS_TSO) &&
20565ec8b7d1SJesse Brandeburg 		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
20575ec8b7d1SJesse Brandeburg 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
205856184e01SJesse Brandeburg 			tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
20595ec8b7d1SJesse Brandeburg 
20605ec8b7d1SJesse Brandeburg 		/* record tunnel offload values */
20615ec8b7d1SJesse Brandeburg 		*cd_tunneling |= tunnel;
20625ec8b7d1SJesse Brandeburg 
20635ec8b7d1SJesse Brandeburg 		/* switch L4 header pointer from outer to inner */
20645ec8b7d1SJesse Brandeburg 		l4.hdr = skb_inner_transport_header(skb);
20655ec8b7d1SJesse Brandeburg 		l4_proto = 0;
20665ec8b7d1SJesse Brandeburg 
20675ec8b7d1SJesse Brandeburg 		/* reset type as we transition from outer to inner headers */
206856184e01SJesse Brandeburg 		*tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6);
20695ec8b7d1SJesse Brandeburg 		if (ip.v4->version == 4)
207056184e01SJesse Brandeburg 			*tx_flags |= IAVF_TX_FLAGS_IPV4;
20715ec8b7d1SJesse Brandeburg 		if (ip.v6->version == 6)
207256184e01SJesse Brandeburg 			*tx_flags |= IAVF_TX_FLAGS_IPV6;
20735ec8b7d1SJesse Brandeburg 	}
20745ec8b7d1SJesse Brandeburg 
20755ec8b7d1SJesse Brandeburg 	/* Enable IP checksum offloads */
207656184e01SJesse Brandeburg 	if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
20775ec8b7d1SJesse Brandeburg 		l4_proto = ip.v4->protocol;
20785ec8b7d1SJesse Brandeburg 		/* the stack computes the IP header already, the only time we
20795ec8b7d1SJesse Brandeburg 		 * need the hardware to recompute it is in the case of TSO.
20805ec8b7d1SJesse Brandeburg 		 */
208156184e01SJesse Brandeburg 		cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
2082f1cad2ceSJesse Brandeburg 		       IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :
2083f1cad2ceSJesse Brandeburg 		       IAVF_TX_DESC_CMD_IIPT_IPV4;
208456184e01SJesse Brandeburg 	} else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
2085f1cad2ceSJesse Brandeburg 		cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
20865ec8b7d1SJesse Brandeburg 
20875ec8b7d1SJesse Brandeburg 		exthdr = ip.hdr + sizeof(*ip.v6);
20885ec8b7d1SJesse Brandeburg 		l4_proto = ip.v6->nexthdr;
20895ec8b7d1SJesse Brandeburg 		if (l4.hdr != exthdr)
20905ec8b7d1SJesse Brandeburg 			ipv6_skip_exthdr(skb, exthdr - skb->data,
20915ec8b7d1SJesse Brandeburg 					 &l4_proto, &frag_off);
20925ec8b7d1SJesse Brandeburg 	}
20935ec8b7d1SJesse Brandeburg 
20945ec8b7d1SJesse Brandeburg 	/* compute inner L3 header size */
2095f1cad2ceSJesse Brandeburg 	offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
20965ec8b7d1SJesse Brandeburg 
20975ec8b7d1SJesse Brandeburg 	/* Enable L4 checksum offloads */
20985ec8b7d1SJesse Brandeburg 	switch (l4_proto) {
20995ec8b7d1SJesse Brandeburg 	case IPPROTO_TCP:
21005ec8b7d1SJesse Brandeburg 		/* enable checksum offloads */
2101f1cad2ceSJesse Brandeburg 		cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2102f1cad2ceSJesse Brandeburg 		offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
21035ec8b7d1SJesse Brandeburg 		break;
21045ec8b7d1SJesse Brandeburg 	case IPPROTO_SCTP:
21055ec8b7d1SJesse Brandeburg 		/* enable SCTP checksum offload */
2106f1cad2ceSJesse Brandeburg 		cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
21075ec8b7d1SJesse Brandeburg 		offset |= (sizeof(struct sctphdr) >> 2) <<
2108f1cad2ceSJesse Brandeburg 			  IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
21095ec8b7d1SJesse Brandeburg 		break;
21105ec8b7d1SJesse Brandeburg 	case IPPROTO_UDP:
21115ec8b7d1SJesse Brandeburg 		/* enable UDP checksum offload */
2112f1cad2ceSJesse Brandeburg 		cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
21135ec8b7d1SJesse Brandeburg 		offset |= (sizeof(struct udphdr) >> 2) <<
2114f1cad2ceSJesse Brandeburg 			  IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
21155ec8b7d1SJesse Brandeburg 		break;
21165ec8b7d1SJesse Brandeburg 	default:
211756184e01SJesse Brandeburg 		if (*tx_flags & IAVF_TX_FLAGS_TSO)
21185ec8b7d1SJesse Brandeburg 			return -1;
21195ec8b7d1SJesse Brandeburg 		skb_checksum_help(skb);
21205ec8b7d1SJesse Brandeburg 		return 0;
21215ec8b7d1SJesse Brandeburg 	}
21225ec8b7d1SJesse Brandeburg 
21235ec8b7d1SJesse Brandeburg 	*td_cmd |= cmd;
21245ec8b7d1SJesse Brandeburg 	*td_offset |= offset;
21255ec8b7d1SJesse Brandeburg 
21265ec8b7d1SJesse Brandeburg 	return 1;
21275ec8b7d1SJesse Brandeburg }
21285ec8b7d1SJesse Brandeburg 
21295ec8b7d1SJesse Brandeburg /**
2130262de08fSJesse Brandeburg  * iavf_create_tx_ctx - Build the Tx context descriptor
21315ec8b7d1SJesse Brandeburg  * @tx_ring:  ring to create the descriptor on
21325ec8b7d1SJesse Brandeburg  * @cd_type_cmd_tso_mss: Quad Word 1
21335ec8b7d1SJesse Brandeburg  * @cd_tunneling: Quad Word 0 - bits 0-31
21345ec8b7d1SJesse Brandeburg  * @cd_l2tag2: Quad Word 0 - bits 32-63
21355ec8b7d1SJesse Brandeburg  **/
213656184e01SJesse Brandeburg static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
21375ec8b7d1SJesse Brandeburg 			       const u64 cd_type_cmd_tso_mss,
21385ec8b7d1SJesse Brandeburg 			       const u32 cd_tunneling, const u32 cd_l2tag2)
21395ec8b7d1SJesse Brandeburg {
214056184e01SJesse Brandeburg 	struct iavf_tx_context_desc *context_desc;
21415ec8b7d1SJesse Brandeburg 	int i = tx_ring->next_to_use;
21425ec8b7d1SJesse Brandeburg 
2143f1cad2ceSJesse Brandeburg 	if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) &&
21445ec8b7d1SJesse Brandeburg 	    !cd_tunneling && !cd_l2tag2)
21455ec8b7d1SJesse Brandeburg 		return;
21465ec8b7d1SJesse Brandeburg 
21475ec8b7d1SJesse Brandeburg 	/* grab the next descriptor */
2148f1cad2ceSJesse Brandeburg 	context_desc = IAVF_TX_CTXTDESC(tx_ring, i);
21495ec8b7d1SJesse Brandeburg 
21505ec8b7d1SJesse Brandeburg 	i++;
21515ec8b7d1SJesse Brandeburg 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
21525ec8b7d1SJesse Brandeburg 
21535ec8b7d1SJesse Brandeburg 	/* cpu_to_le32 and assign to struct fields */
21545ec8b7d1SJesse Brandeburg 	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
21555ec8b7d1SJesse Brandeburg 	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
21565ec8b7d1SJesse Brandeburg 	context_desc->rsvd = cpu_to_le16(0);
21575ec8b7d1SJesse Brandeburg 	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
21585ec8b7d1SJesse Brandeburg }
21595ec8b7d1SJesse Brandeburg 
21605ec8b7d1SJesse Brandeburg /**
21615ec8b7d1SJesse Brandeburg  * __iavf_chk_linearize - Check if there are more than 8 buffers per packet
21625ec8b7d1SJesse Brandeburg  * @skb:      send buffer
21635ec8b7d1SJesse Brandeburg  *
21645ec8b7d1SJesse Brandeburg  * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
21655ec8b7d1SJesse Brandeburg  * and so we need to figure out the cases where we need to linearize the skb.
21665ec8b7d1SJesse Brandeburg  *
21675ec8b7d1SJesse Brandeburg  * For TSO we need to count the TSO header and segment payload separately.
21685ec8b7d1SJesse Brandeburg  * As such we need to check cases where we have 7 fragments or more as we
21695ec8b7d1SJesse Brandeburg  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
21705ec8b7d1SJesse Brandeburg  * the segment payload in the first descriptor, and another 7 for the
21715ec8b7d1SJesse Brandeburg  * fragments.
21725ec8b7d1SJesse Brandeburg  **/
21735ec8b7d1SJesse Brandeburg bool __iavf_chk_linearize(struct sk_buff *skb)
21745ec8b7d1SJesse Brandeburg {
2175d7840976SMatthew Wilcox (Oracle) 	const skb_frag_t *frag, *stale;
21765ec8b7d1SJesse Brandeburg 	int nr_frags, sum;
21775ec8b7d1SJesse Brandeburg 
21785ec8b7d1SJesse Brandeburg 	/* no need to check if number of frags is less than 7 */
21795ec8b7d1SJesse Brandeburg 	nr_frags = skb_shinfo(skb)->nr_frags;
218056184e01SJesse Brandeburg 	if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1))
21815ec8b7d1SJesse Brandeburg 		return false;
21825ec8b7d1SJesse Brandeburg 
21835ec8b7d1SJesse Brandeburg 	/* We need to walk through the list and validate that each group
21845ec8b7d1SJesse Brandeburg 	 * of 6 fragments totals at least gso_size.
21855ec8b7d1SJesse Brandeburg 	 */
218656184e01SJesse Brandeburg 	nr_frags -= IAVF_MAX_BUFFER_TXD - 2;
21875ec8b7d1SJesse Brandeburg 	frag = &skb_shinfo(skb)->frags[0];
21885ec8b7d1SJesse Brandeburg 
21895ec8b7d1SJesse Brandeburg 	/* Initialize size to the negative value of gso_size minus 1.  We
21905ec8b7d1SJesse Brandeburg 	 * use this as the worst case scenerio in which the frag ahead
21915ec8b7d1SJesse Brandeburg 	 * of us only provides one byte which is why we are limited to 6
21925ec8b7d1SJesse Brandeburg 	 * descriptors for a single transmit as the header and previous
21935ec8b7d1SJesse Brandeburg 	 * fragment are already consuming 2 descriptors.
21945ec8b7d1SJesse Brandeburg 	 */
21955ec8b7d1SJesse Brandeburg 	sum = 1 - skb_shinfo(skb)->gso_size;
21965ec8b7d1SJesse Brandeburg 
21975ec8b7d1SJesse Brandeburg 	/* Add size of frags 0 through 4 to create our initial sum */
21985ec8b7d1SJesse Brandeburg 	sum += skb_frag_size(frag++);
21995ec8b7d1SJesse Brandeburg 	sum += skb_frag_size(frag++);
22005ec8b7d1SJesse Brandeburg 	sum += skb_frag_size(frag++);
22015ec8b7d1SJesse Brandeburg 	sum += skb_frag_size(frag++);
22025ec8b7d1SJesse Brandeburg 	sum += skb_frag_size(frag++);
22035ec8b7d1SJesse Brandeburg 
22045ec8b7d1SJesse Brandeburg 	/* Walk through fragments adding latest fragment, testing it, and
22055ec8b7d1SJesse Brandeburg 	 * then removing stale fragments from the sum.
22065ec8b7d1SJesse Brandeburg 	 */
22075ec8b7d1SJesse Brandeburg 	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
22085ec8b7d1SJesse Brandeburg 		int stale_size = skb_frag_size(stale);
22095ec8b7d1SJesse Brandeburg 
22105ec8b7d1SJesse Brandeburg 		sum += skb_frag_size(frag++);
22115ec8b7d1SJesse Brandeburg 
22125ec8b7d1SJesse Brandeburg 		/* The stale fragment may present us with a smaller
22135ec8b7d1SJesse Brandeburg 		 * descriptor than the actual fragment size. To account
22145ec8b7d1SJesse Brandeburg 		 * for that we need to remove all the data on the front and
22155ec8b7d1SJesse Brandeburg 		 * figure out what the remainder would be in the last
22165ec8b7d1SJesse Brandeburg 		 * descriptor associated with the fragment.
22175ec8b7d1SJesse Brandeburg 		 */
221856184e01SJesse Brandeburg 		if (stale_size > IAVF_MAX_DATA_PER_TXD) {
2219b54c9d5bSJonathan Lemon 			int align_pad = -(skb_frag_off(stale)) &
222056184e01SJesse Brandeburg 					(IAVF_MAX_READ_REQ_SIZE - 1);
22215ec8b7d1SJesse Brandeburg 
22225ec8b7d1SJesse Brandeburg 			sum -= align_pad;
22235ec8b7d1SJesse Brandeburg 			stale_size -= align_pad;
22245ec8b7d1SJesse Brandeburg 
22255ec8b7d1SJesse Brandeburg 			do {
222656184e01SJesse Brandeburg 				sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
222756184e01SJesse Brandeburg 				stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
222856184e01SJesse Brandeburg 			} while (stale_size > IAVF_MAX_DATA_PER_TXD);
22295ec8b7d1SJesse Brandeburg 		}
22305ec8b7d1SJesse Brandeburg 
22315ec8b7d1SJesse Brandeburg 		/* if sum is negative we failed to make sufficient progress */
22325ec8b7d1SJesse Brandeburg 		if (sum < 0)
22335ec8b7d1SJesse Brandeburg 			return true;
22345ec8b7d1SJesse Brandeburg 
22355ec8b7d1SJesse Brandeburg 		if (!nr_frags--)
22365ec8b7d1SJesse Brandeburg 			break;
22375ec8b7d1SJesse Brandeburg 
22385ec8b7d1SJesse Brandeburg 		sum -= stale_size;
22395ec8b7d1SJesse Brandeburg 	}
22405ec8b7d1SJesse Brandeburg 
22415ec8b7d1SJesse Brandeburg 	return false;
22425ec8b7d1SJesse Brandeburg }
22435ec8b7d1SJesse Brandeburg 
22445ec8b7d1SJesse Brandeburg /**
22455ec8b7d1SJesse Brandeburg  * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions
22465ec8b7d1SJesse Brandeburg  * @tx_ring: the ring to be checked
22475ec8b7d1SJesse Brandeburg  * @size:    the size buffer we want to assure is available
22485ec8b7d1SJesse Brandeburg  *
22495ec8b7d1SJesse Brandeburg  * Returns -EBUSY if a stop is needed, else 0
22505ec8b7d1SJesse Brandeburg  **/
225156184e01SJesse Brandeburg int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
22525ec8b7d1SJesse Brandeburg {
22535ec8b7d1SJesse Brandeburg 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
22545ec8b7d1SJesse Brandeburg 	/* Memory barrier before checking head and tail */
22555ec8b7d1SJesse Brandeburg 	smp_mb();
22565ec8b7d1SJesse Brandeburg 
22575ec8b7d1SJesse Brandeburg 	/* Check again in a case another CPU has just made room available. */
225856184e01SJesse Brandeburg 	if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
22595ec8b7d1SJesse Brandeburg 		return -EBUSY;
22605ec8b7d1SJesse Brandeburg 
22615ec8b7d1SJesse Brandeburg 	/* A reprieve! - use start_queue because it doesn't call schedule */
22625ec8b7d1SJesse Brandeburg 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
22635ec8b7d1SJesse Brandeburg 	++tx_ring->tx_stats.restart_queue;
22645ec8b7d1SJesse Brandeburg 	return 0;
22655ec8b7d1SJesse Brandeburg }
22665ec8b7d1SJesse Brandeburg 
22675ec8b7d1SJesse Brandeburg /**
22685ec8b7d1SJesse Brandeburg  * iavf_tx_map - Build the Tx descriptor
22695ec8b7d1SJesse Brandeburg  * @tx_ring:  ring to send buffer on
22705ec8b7d1SJesse Brandeburg  * @skb:      send buffer
22715ec8b7d1SJesse Brandeburg  * @first:    first buffer info buffer to use
22725ec8b7d1SJesse Brandeburg  * @tx_flags: collected send information
22735ec8b7d1SJesse Brandeburg  * @hdr_len:  size of the packet header
22745ec8b7d1SJesse Brandeburg  * @td_cmd:   the command field in the descriptor
22755ec8b7d1SJesse Brandeburg  * @td_offset: offset for checksum or crc
22765ec8b7d1SJesse Brandeburg  **/
227756184e01SJesse Brandeburg static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
227856184e01SJesse Brandeburg 			       struct iavf_tx_buffer *first, u32 tx_flags,
22795ec8b7d1SJesse Brandeburg 			       const u8 hdr_len, u32 td_cmd, u32 td_offset)
22805ec8b7d1SJesse Brandeburg {
22815ec8b7d1SJesse Brandeburg 	unsigned int data_len = skb->data_len;
22825ec8b7d1SJesse Brandeburg 	unsigned int size = skb_headlen(skb);
2283d7840976SMatthew Wilcox (Oracle) 	skb_frag_t *frag;
228456184e01SJesse Brandeburg 	struct iavf_tx_buffer *tx_bi;
228556184e01SJesse Brandeburg 	struct iavf_tx_desc *tx_desc;
22865ec8b7d1SJesse Brandeburg 	u16 i = tx_ring->next_to_use;
22875ec8b7d1SJesse Brandeburg 	u32 td_tag = 0;
22885ec8b7d1SJesse Brandeburg 	dma_addr_t dma;
22895ec8b7d1SJesse Brandeburg 
229056184e01SJesse Brandeburg 	if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
2291f1cad2ceSJesse Brandeburg 		td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
229256184e01SJesse Brandeburg 		td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
229356184e01SJesse Brandeburg 			 IAVF_TX_FLAGS_VLAN_SHIFT;
22945ec8b7d1SJesse Brandeburg 	}
22955ec8b7d1SJesse Brandeburg 
22965ec8b7d1SJesse Brandeburg 	first->tx_flags = tx_flags;
22975ec8b7d1SJesse Brandeburg 
22985ec8b7d1SJesse Brandeburg 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
22995ec8b7d1SJesse Brandeburg 
2300f1cad2ceSJesse Brandeburg 	tx_desc = IAVF_TX_DESC(tx_ring, i);
23015ec8b7d1SJesse Brandeburg 	tx_bi = first;
23025ec8b7d1SJesse Brandeburg 
23035ec8b7d1SJesse Brandeburg 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
230456184e01SJesse Brandeburg 		unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
23055ec8b7d1SJesse Brandeburg 
23065ec8b7d1SJesse Brandeburg 		if (dma_mapping_error(tx_ring->dev, dma))
23075ec8b7d1SJesse Brandeburg 			goto dma_error;
23085ec8b7d1SJesse Brandeburg 
23095ec8b7d1SJesse Brandeburg 		/* record length, and DMA address */
23105ec8b7d1SJesse Brandeburg 		dma_unmap_len_set(tx_bi, len, size);
23115ec8b7d1SJesse Brandeburg 		dma_unmap_addr_set(tx_bi, dma, dma);
23125ec8b7d1SJesse Brandeburg 
23135ec8b7d1SJesse Brandeburg 		/* align size to end of page */
231456184e01SJesse Brandeburg 		max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1);
23155ec8b7d1SJesse Brandeburg 		tx_desc->buffer_addr = cpu_to_le64(dma);
23165ec8b7d1SJesse Brandeburg 
231756184e01SJesse Brandeburg 		while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) {
23185ec8b7d1SJesse Brandeburg 			tx_desc->cmd_type_offset_bsz =
23195ec8b7d1SJesse Brandeburg 				build_ctob(td_cmd, td_offset,
23205ec8b7d1SJesse Brandeburg 					   max_data, td_tag);
23215ec8b7d1SJesse Brandeburg 
23225ec8b7d1SJesse Brandeburg 			tx_desc++;
23235ec8b7d1SJesse Brandeburg 			i++;
23245ec8b7d1SJesse Brandeburg 
23255ec8b7d1SJesse Brandeburg 			if (i == tx_ring->count) {
2326f1cad2ceSJesse Brandeburg 				tx_desc = IAVF_TX_DESC(tx_ring, 0);
23275ec8b7d1SJesse Brandeburg 				i = 0;
23285ec8b7d1SJesse Brandeburg 			}
23295ec8b7d1SJesse Brandeburg 
23305ec8b7d1SJesse Brandeburg 			dma += max_data;
23315ec8b7d1SJesse Brandeburg 			size -= max_data;
23325ec8b7d1SJesse Brandeburg 
233356184e01SJesse Brandeburg 			max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
23345ec8b7d1SJesse Brandeburg 			tx_desc->buffer_addr = cpu_to_le64(dma);
23355ec8b7d1SJesse Brandeburg 		}
23365ec8b7d1SJesse Brandeburg 
23375ec8b7d1SJesse Brandeburg 		if (likely(!data_len))
23385ec8b7d1SJesse Brandeburg 			break;
23395ec8b7d1SJesse Brandeburg 
23405ec8b7d1SJesse Brandeburg 		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
23415ec8b7d1SJesse Brandeburg 							  size, td_tag);
23425ec8b7d1SJesse Brandeburg 
23435ec8b7d1SJesse Brandeburg 		tx_desc++;
23445ec8b7d1SJesse Brandeburg 		i++;
23455ec8b7d1SJesse Brandeburg 
23465ec8b7d1SJesse Brandeburg 		if (i == tx_ring->count) {
2347f1cad2ceSJesse Brandeburg 			tx_desc = IAVF_TX_DESC(tx_ring, 0);
23485ec8b7d1SJesse Brandeburg 			i = 0;
23495ec8b7d1SJesse Brandeburg 		}
23505ec8b7d1SJesse Brandeburg 
23515ec8b7d1SJesse Brandeburg 		size = skb_frag_size(frag);
23525ec8b7d1SJesse Brandeburg 		data_len -= size;
23535ec8b7d1SJesse Brandeburg 
23545ec8b7d1SJesse Brandeburg 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
23555ec8b7d1SJesse Brandeburg 				       DMA_TO_DEVICE);
23565ec8b7d1SJesse Brandeburg 
23575ec8b7d1SJesse Brandeburg 		tx_bi = &tx_ring->tx_bi[i];
23585ec8b7d1SJesse Brandeburg 	}
23595ec8b7d1SJesse Brandeburg 
23605ec8b7d1SJesse Brandeburg 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
23615ec8b7d1SJesse Brandeburg 
23625ec8b7d1SJesse Brandeburg 	i++;
23635ec8b7d1SJesse Brandeburg 	if (i == tx_ring->count)
23645ec8b7d1SJesse Brandeburg 		i = 0;
23655ec8b7d1SJesse Brandeburg 
23665ec8b7d1SJesse Brandeburg 	tx_ring->next_to_use = i;
23675ec8b7d1SJesse Brandeburg 
236856184e01SJesse Brandeburg 	iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
23695ec8b7d1SJesse Brandeburg 
23705ec8b7d1SJesse Brandeburg 	/* write last descriptor with RS and EOP bits */
237156184e01SJesse Brandeburg 	td_cmd |= IAVF_TXD_CMD;
23725ec8b7d1SJesse Brandeburg 	tx_desc->cmd_type_offset_bsz =
23735ec8b7d1SJesse Brandeburg 			build_ctob(td_cmd, td_offset, size, td_tag);
23745ec8b7d1SJesse Brandeburg 
2375a9e51058SJacob Keller 	skb_tx_timestamp(skb);
2376a9e51058SJacob Keller 
23775ec8b7d1SJesse Brandeburg 	/* Force memory writes to complete before letting h/w know there
23785ec8b7d1SJesse Brandeburg 	 * are new descriptors to fetch.
23795ec8b7d1SJesse Brandeburg 	 *
23805ec8b7d1SJesse Brandeburg 	 * We also use this memory barrier to make certain all of the
23815ec8b7d1SJesse Brandeburg 	 * status bits have been updated before next_to_watch is written.
23825ec8b7d1SJesse Brandeburg 	 */
23835ec8b7d1SJesse Brandeburg 	wmb();
23845ec8b7d1SJesse Brandeburg 
23855ec8b7d1SJesse Brandeburg 	/* set next_to_watch value indicating a packet is present */
23865ec8b7d1SJesse Brandeburg 	first->next_to_watch = tx_desc;
23875ec8b7d1SJesse Brandeburg 
23885ec8b7d1SJesse Brandeburg 	/* notify HW of packet */
23896b16f9eeSFlorian Westphal 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
23905ec8b7d1SJesse Brandeburg 		writel(i, tx_ring->tail);
23915ec8b7d1SJesse Brandeburg 	}
23925ec8b7d1SJesse Brandeburg 
23935ec8b7d1SJesse Brandeburg 	return;
23945ec8b7d1SJesse Brandeburg 
23955ec8b7d1SJesse Brandeburg dma_error:
23965ec8b7d1SJesse Brandeburg 	dev_info(tx_ring->dev, "TX DMA map failed\n");
23975ec8b7d1SJesse Brandeburg 
23985ec8b7d1SJesse Brandeburg 	/* clear dma mappings for failed tx_bi map */
23995ec8b7d1SJesse Brandeburg 	for (;;) {
24005ec8b7d1SJesse Brandeburg 		tx_bi = &tx_ring->tx_bi[i];
240156184e01SJesse Brandeburg 		iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
24025ec8b7d1SJesse Brandeburg 		if (tx_bi == first)
24035ec8b7d1SJesse Brandeburg 			break;
24045ec8b7d1SJesse Brandeburg 		if (i == 0)
24055ec8b7d1SJesse Brandeburg 			i = tx_ring->count;
24065ec8b7d1SJesse Brandeburg 		i--;
24075ec8b7d1SJesse Brandeburg 	}
24085ec8b7d1SJesse Brandeburg 
24095ec8b7d1SJesse Brandeburg 	tx_ring->next_to_use = i;
24105ec8b7d1SJesse Brandeburg }
24115ec8b7d1SJesse Brandeburg 
24125ec8b7d1SJesse Brandeburg /**
241356184e01SJesse Brandeburg  * iavf_xmit_frame_ring - Sends buffer on Tx ring
24145ec8b7d1SJesse Brandeburg  * @skb:     send buffer
24155ec8b7d1SJesse Brandeburg  * @tx_ring: ring to send buffer on
24165ec8b7d1SJesse Brandeburg  *
24175ec8b7d1SJesse Brandeburg  * Returns NETDEV_TX_OK if sent, else an error code
24185ec8b7d1SJesse Brandeburg  **/
241956184e01SJesse Brandeburg static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
242056184e01SJesse Brandeburg 					struct iavf_ring *tx_ring)
24215ec8b7d1SJesse Brandeburg {
2422f1cad2ceSJesse Brandeburg 	u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT;
24235ec8b7d1SJesse Brandeburg 	u32 cd_tunneling = 0, cd_l2tag2 = 0;
242456184e01SJesse Brandeburg 	struct iavf_tx_buffer *first;
24255ec8b7d1SJesse Brandeburg 	u32 td_offset = 0;
24265ec8b7d1SJesse Brandeburg 	u32 tx_flags = 0;
24275ec8b7d1SJesse Brandeburg 	__be16 protocol;
24285ec8b7d1SJesse Brandeburg 	u32 td_cmd = 0;
24295ec8b7d1SJesse Brandeburg 	u8 hdr_len = 0;
24305ec8b7d1SJesse Brandeburg 	int tso, count;
24315ec8b7d1SJesse Brandeburg 
24325ec8b7d1SJesse Brandeburg 	/* prefetch the data, we'll need it later */
24335ec8b7d1SJesse Brandeburg 	prefetch(skb->data);
24345ec8b7d1SJesse Brandeburg 
2435ad64ed8bSJesse Brandeburg 	iavf_trace(xmit_frame_ring, skb, tx_ring);
24365ec8b7d1SJesse Brandeburg 
243756184e01SJesse Brandeburg 	count = iavf_xmit_descriptor_count(skb);
243856184e01SJesse Brandeburg 	if (iavf_chk_linearize(skb, count)) {
24395ec8b7d1SJesse Brandeburg 		if (__skb_linearize(skb)) {
24405ec8b7d1SJesse Brandeburg 			dev_kfree_skb_any(skb);
24415ec8b7d1SJesse Brandeburg 			return NETDEV_TX_OK;
24425ec8b7d1SJesse Brandeburg 		}
244356184e01SJesse Brandeburg 		count = iavf_txd_use_count(skb->len);
24445ec8b7d1SJesse Brandeburg 		tx_ring->tx_stats.tx_linearize++;
24455ec8b7d1SJesse Brandeburg 	}
24465ec8b7d1SJesse Brandeburg 
244756184e01SJesse Brandeburg 	/* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD,
244856184e01SJesse Brandeburg 	 *       + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD,
24495ec8b7d1SJesse Brandeburg 	 *       + 4 desc gap to avoid the cache line where head is,
24505ec8b7d1SJesse Brandeburg 	 *       + 1 desc for context descriptor,
24515ec8b7d1SJesse Brandeburg 	 * otherwise try next time
24525ec8b7d1SJesse Brandeburg 	 */
245356184e01SJesse Brandeburg 	if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
24545ec8b7d1SJesse Brandeburg 		tx_ring->tx_stats.tx_busy++;
24555ec8b7d1SJesse Brandeburg 		return NETDEV_TX_BUSY;
24565ec8b7d1SJesse Brandeburg 	}
24575ec8b7d1SJesse Brandeburg 
24585ec8b7d1SJesse Brandeburg 	/* record the location of the first descriptor for this packet */
24595ec8b7d1SJesse Brandeburg 	first = &tx_ring->tx_bi[tx_ring->next_to_use];
24605ec8b7d1SJesse Brandeburg 	first->skb = skb;
24615ec8b7d1SJesse Brandeburg 	first->bytecount = skb->len;
24625ec8b7d1SJesse Brandeburg 	first->gso_segs = 1;
24635ec8b7d1SJesse Brandeburg 
24645ec8b7d1SJesse Brandeburg 	/* prepare the xmit flags */
2465ccd219d2SBrett Creeley 	iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags);
2466ccd219d2SBrett Creeley 	if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
2467ccd219d2SBrett Creeley 		cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 <<
2468ccd219d2SBrett Creeley 			IAVF_TXD_CTX_QW1_CMD_SHIFT;
2469ccd219d2SBrett Creeley 		cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
2470ccd219d2SBrett Creeley 			IAVF_TX_FLAGS_VLAN_SHIFT;
2471ccd219d2SBrett Creeley 	}
24725ec8b7d1SJesse Brandeburg 
24735ec8b7d1SJesse Brandeburg 	/* obtain protocol of skb */
24745ec8b7d1SJesse Brandeburg 	protocol = vlan_get_protocol(skb);
24755ec8b7d1SJesse Brandeburg 
24765ec8b7d1SJesse Brandeburg 	/* setup IPv4/IPv6 offloads */
24775ec8b7d1SJesse Brandeburg 	if (protocol == htons(ETH_P_IP))
247856184e01SJesse Brandeburg 		tx_flags |= IAVF_TX_FLAGS_IPV4;
24795ec8b7d1SJesse Brandeburg 	else if (protocol == htons(ETH_P_IPV6))
248056184e01SJesse Brandeburg 		tx_flags |= IAVF_TX_FLAGS_IPV6;
24815ec8b7d1SJesse Brandeburg 
248256184e01SJesse Brandeburg 	tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
24835ec8b7d1SJesse Brandeburg 
24845ec8b7d1SJesse Brandeburg 	if (tso < 0)
24855ec8b7d1SJesse Brandeburg 		goto out_drop;
24865ec8b7d1SJesse Brandeburg 	else if (tso)
248756184e01SJesse Brandeburg 		tx_flags |= IAVF_TX_FLAGS_TSO;
24885ec8b7d1SJesse Brandeburg 
24895ec8b7d1SJesse Brandeburg 	/* Always offload the checksum, since it's in the data descriptor */
249056184e01SJesse Brandeburg 	tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
24915ec8b7d1SJesse Brandeburg 				  tx_ring, &cd_tunneling);
24925ec8b7d1SJesse Brandeburg 	if (tso < 0)
24935ec8b7d1SJesse Brandeburg 		goto out_drop;
24945ec8b7d1SJesse Brandeburg 
24955ec8b7d1SJesse Brandeburg 	/* always enable CRC insertion offload */
2496f1cad2ceSJesse Brandeburg 	td_cmd |= IAVF_TX_DESC_CMD_ICRC;
24975ec8b7d1SJesse Brandeburg 
249856184e01SJesse Brandeburg 	iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
24995ec8b7d1SJesse Brandeburg 			   cd_tunneling, cd_l2tag2);
25005ec8b7d1SJesse Brandeburg 
25015ec8b7d1SJesse Brandeburg 	iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
25025ec8b7d1SJesse Brandeburg 		    td_cmd, td_offset);
25035ec8b7d1SJesse Brandeburg 
25045ec8b7d1SJesse Brandeburg 	return NETDEV_TX_OK;
25055ec8b7d1SJesse Brandeburg 
25065ec8b7d1SJesse Brandeburg out_drop:
2507ad64ed8bSJesse Brandeburg 	iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
25085ec8b7d1SJesse Brandeburg 	dev_kfree_skb_any(first->skb);
25095ec8b7d1SJesse Brandeburg 	first->skb = NULL;
25105ec8b7d1SJesse Brandeburg 	return NETDEV_TX_OK;
25115ec8b7d1SJesse Brandeburg }
25125ec8b7d1SJesse Brandeburg 
25135ec8b7d1SJesse Brandeburg /**
25145ec8b7d1SJesse Brandeburg  * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
25155ec8b7d1SJesse Brandeburg  * @skb:    send buffer
25165ec8b7d1SJesse Brandeburg  * @netdev: network interface device structure
25175ec8b7d1SJesse Brandeburg  *
25185ec8b7d1SJesse Brandeburg  * Returns NETDEV_TX_OK if sent, else an error code
25195ec8b7d1SJesse Brandeburg  **/
25205ec8b7d1SJesse Brandeburg netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
25215ec8b7d1SJesse Brandeburg {
25225ec8b7d1SJesse Brandeburg 	struct iavf_adapter *adapter = netdev_priv(netdev);
252356184e01SJesse Brandeburg 	struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
25245ec8b7d1SJesse Brandeburg 
25255ec8b7d1SJesse Brandeburg 	/* hardware can't handle really short frames, hardware padding works
25265ec8b7d1SJesse Brandeburg 	 * beyond this point
25275ec8b7d1SJesse Brandeburg 	 */
252856184e01SJesse Brandeburg 	if (unlikely(skb->len < IAVF_MIN_TX_LEN)) {
252956184e01SJesse Brandeburg 		if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))
25305ec8b7d1SJesse Brandeburg 			return NETDEV_TX_OK;
253156184e01SJesse Brandeburg 		skb->len = IAVF_MIN_TX_LEN;
253256184e01SJesse Brandeburg 		skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);
25335ec8b7d1SJesse Brandeburg 	}
25345ec8b7d1SJesse Brandeburg 
253556184e01SJesse Brandeburg 	return iavf_xmit_frame_ring(skb, tx_ring);
25365ec8b7d1SJesse Brandeburg }
2537