xref: /openbmc/linux/drivers/net/ethernet/intel/i40e/i40e_txrx.h (revision 86aa961bb4619a68077ebeba21c52e9ba0eab43d)
1ae06c70bSJeff Kirsher /* SPDX-License-Identifier: GPL-2.0 */
251dce24bSJeff Kirsher /* Copyright(c) 2013 - 2018 Intel Corporation. */
37daa6bf3SJesse Brandeburg 
436fac581SVasu Dev #ifndef _I40E_TXRX_H_
536fac581SVasu Dev #define _I40E_TXRX_H_
636fac581SVasu Dev 
787128824SJesper Dangaard Brouer #include <net/xdp.h>
8*e77220eeSIvan Vecera #include "i40e_type.h"
987128824SJesper Dangaard Brouer 
10aee8087fSJesse Brandeburg /* Interrupt Throttling and Rate Limiting Goodies */
117daa6bf3SJesse Brandeburg #define I40E_DEFAULT_IRQ_WORK      256
1292418fb1SAlexander Duyck 
1392418fb1SAlexander Duyck /* The datasheet for the X710 and XL710 indicate that the maximum value for
1492418fb1SAlexander Duyck  * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
1592418fb1SAlexander Duyck  * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
1692418fb1SAlexander Duyck  * the register value which is divided by 2 lets use the actual values and
1792418fb1SAlexander Duyck  * avoid an excessive amount of translation.
1892418fb1SAlexander Duyck  */
1992418fb1SAlexander Duyck #define I40E_ITR_DYNAMIC	0x8000	/* use top bit as a flag */
2092418fb1SAlexander Duyck #define I40E_ITR_MASK		0x1FFE	/* mask for ITR register value */
2192418fb1SAlexander Duyck #define I40E_MIN_ITR		     2	/* reg uses 2 usec resolution */
2292418fb1SAlexander Duyck #define I40E_ITR_20K		    50
2392418fb1SAlexander Duyck #define I40E_ITR_8K		   122
2492418fb1SAlexander Duyck #define I40E_MAX_ITR		  8160	/* maximum value as per datasheet */
2592418fb1SAlexander Duyck #define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
2692418fb1SAlexander Duyck #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
2792418fb1SAlexander Duyck #define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
2892418fb1SAlexander Duyck 
2992418fb1SAlexander Duyck #define I40E_ITR_RX_DEF		(I40E_ITR_20K | I40E_ITR_DYNAMIC)
3092418fb1SAlexander Duyck #define I40E_ITR_TX_DEF		(I40E_ITR_20K | I40E_ITR_DYNAMIC)
3192418fb1SAlexander Duyck 
32ac26fc13SJesse Brandeburg /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
33ac26fc13SJesse Brandeburg  * the value of the rate limit is non-zero
34ac26fc13SJesse Brandeburg  */
35ac26fc13SJesse Brandeburg #define INTRL_ENA                  BIT(6)
3692418fb1SAlexander Duyck #define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
37ac26fc13SJesse Brandeburg #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
3892418fb1SAlexander Duyck 
391c0e6a36SAlan Brady /**
401c0e6a36SAlan Brady  * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
411c0e6a36SAlan Brady  * @intrl: interrupt rate limit to convert
421c0e6a36SAlan Brady  *
431c0e6a36SAlan Brady  * This function converts a decimal interrupt rate limit to the appropriate
441c0e6a36SAlan Brady  * register format expected by the firmware when setting interrupt rate limit.
451c0e6a36SAlan Brady  */
i40e_intrl_usec_to_reg(int intrl)461c0e6a36SAlan Brady static inline u16 i40e_intrl_usec_to_reg(int intrl)
471c0e6a36SAlan Brady {
481c0e6a36SAlan Brady 	if (intrl >> 2)
491c0e6a36SAlan Brady 		return ((intrl >> 2) | INTRL_ENA);
501c0e6a36SAlan Brady 	else
511c0e6a36SAlan Brady 		return 0;
521c0e6a36SAlan Brady }
537daa6bf3SJesse Brandeburg 
547daa6bf3SJesse Brandeburg #define I40E_QUEUE_END_OF_LIST 0x7FF
557daa6bf3SJesse Brandeburg 
560319577fSJesse Brandeburg /* this enum matches hardware bits and is meant to be used by DYN_CTLN
570319577fSJesse Brandeburg  * registers and QINT registers or more generally anywhere in the manual
580319577fSJesse Brandeburg  * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
590319577fSJesse Brandeburg  * register but instead is a special value meaning "don't update" ITR0/1/2.
600319577fSJesse Brandeburg  */
61e6d25dbdSIvan Vecera enum i40e_dyn_idx {
620319577fSJesse Brandeburg 	I40E_IDX_ITR0 = 0,
630319577fSJesse Brandeburg 	I40E_IDX_ITR1 = 1,
640319577fSJesse Brandeburg 	I40E_IDX_ITR2 = 2,
650319577fSJesse Brandeburg 	I40E_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
660319577fSJesse Brandeburg };
670319577fSJesse Brandeburg 
680319577fSJesse Brandeburg /* these are indexes into ITRN registers */
690319577fSJesse Brandeburg #define I40E_RX_ITR    I40E_IDX_ITR0
700319577fSJesse Brandeburg #define I40E_TX_ITR    I40E_IDX_ITR1
7166ca011aSIvan Vecera #define I40E_SW_ITR    I40E_IDX_ITR2
720319577fSJesse Brandeburg 
7312dc4fe3SMitch Williams /* Supported RSS offloads */
7412dc4fe3SMitch Williams #define I40E_DEFAULT_RSS_HENA ( \
7541a1d04bSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
7641a1d04bSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
7741a1d04bSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
7841a1d04bSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
7941a1d04bSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
8041a1d04bSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
8141a1d04bSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
8241a1d04bSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
8341a1d04bSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
8441a1d04bSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
8541a1d04bSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
8612dc4fe3SMitch Williams 
87e25d00b8SAnjali Singhai Jain #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
889c70d7ceSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
899c70d7ceSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
909c70d7ceSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
919c70d7ceSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
929c70d7ceSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
939c70d7ceSJesse Brandeburg 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
94e25d00b8SAnjali Singhai Jain 
95e25d00b8SAnjali Singhai Jain #define i40e_pf_get_default_rss_hena(pf) \
96d36e41dcSJacob Keller 	(((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
97e25d00b8SAnjali Singhai Jain 	  I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
98e25d00b8SAnjali Singhai Jain 
991a557afcSJesse Brandeburg /* Supported Rx Buffer Sizes (a multiple of 128) */
1001a557afcSJesse Brandeburg #define I40E_RXBUFFER_256   256
101dab86afdSAlexander Duyck #define I40E_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
1027daa6bf3SJesse Brandeburg #define I40E_RXBUFFER_2048  2048
10398efd694SAlexander Duyck #define I40E_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
1047daa6bf3SJesse Brandeburg #define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */
1057daa6bf3SJesse Brandeburg 
1067daa6bf3SJesse Brandeburg /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
1077daa6bf3SJesse Brandeburg  * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
1087daa6bf3SJesse Brandeburg  * this adds up to 512 bytes of extra data meaning the smallest allocation
1097daa6bf3SJesse Brandeburg  * we could have is 1K.
1101a557afcSJesse Brandeburg  * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
1111a557afcSJesse Brandeburg  * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
1127daa6bf3SJesse Brandeburg  */
1131a557afcSJesse Brandeburg #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
1141e3a5fd5SMitch Williams #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
115f0064bfdSBjörn Töpel #define i40e_rx_desc i40e_16byte_rx_desc
1161a557afcSJesse Brandeburg 
11759605bc0SAlexander Duyck #define I40E_RX_DMA_ATTR \
11859605bc0SAlexander Duyck 	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
11959605bc0SAlexander Duyck 
120ca9ec088SAlexander Duyck /* Attempt to maximize the headroom available for incoming frames.  We
121ca9ec088SAlexander Duyck  * use a 2K buffer for receives and need 1536/1534 to store the data for
122ca9ec088SAlexander Duyck  * the frame.  This leaves us with 512 bytes of room.  From that we need
123ca9ec088SAlexander Duyck  * to deduct the space needed for the shared info and the padding needed
124ca9ec088SAlexander Duyck  * to IP align the frame.
125ca9ec088SAlexander Duyck  *
126ca9ec088SAlexander Duyck  * Note: For cache line sizes 256 or larger this value is going to end
127ca9ec088SAlexander Duyck  *	 up negative.  In these cases we should fall back to the legacy
128ca9ec088SAlexander Duyck  *	 receive path.
129ca9ec088SAlexander Duyck  */
130ca9ec088SAlexander Duyck #if (PAGE_SIZE < 8192)
131ca9ec088SAlexander Duyck #define I40E_2K_TOO_SMALL_WITH_PADDING \
132ca9ec088SAlexander Duyck ((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
133ca9ec088SAlexander Duyck 
i40e_compute_pad(int rx_buf_len)134ca9ec088SAlexander Duyck static inline int i40e_compute_pad(int rx_buf_len)
135ca9ec088SAlexander Duyck {
136ca9ec088SAlexander Duyck 	int page_size, pad_size;
137ca9ec088SAlexander Duyck 
138ca9ec088SAlexander Duyck 	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
139ca9ec088SAlexander Duyck 	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
140ca9ec088SAlexander Duyck 
141ca9ec088SAlexander Duyck 	return pad_size;
142ca9ec088SAlexander Duyck }
143ca9ec088SAlexander Duyck 
i40e_skb_pad(void)144ca9ec088SAlexander Duyck static inline int i40e_skb_pad(void)
145ca9ec088SAlexander Duyck {
146ca9ec088SAlexander Duyck 	int rx_buf_len;
147ca9ec088SAlexander Duyck 
148ca9ec088SAlexander Duyck 	/* If a 2K buffer cannot handle a standard Ethernet frame then
149ca9ec088SAlexander Duyck 	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
150ca9ec088SAlexander Duyck 	 *
151ca9ec088SAlexander Duyck 	 * For a 3K buffer we need to add enough padding to allow for
152ca9ec088SAlexander Duyck 	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
153ca9ec088SAlexander Duyck 	 * cache-line alignment.
154ca9ec088SAlexander Duyck 	 */
155ca9ec088SAlexander Duyck 	if (I40E_2K_TOO_SMALL_WITH_PADDING)
156ca9ec088SAlexander Duyck 		rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
157ca9ec088SAlexander Duyck 	else
158ca9ec088SAlexander Duyck 		rx_buf_len = I40E_RXBUFFER_1536;
159ca9ec088SAlexander Duyck 
160ca9ec088SAlexander Duyck 	/* if needed make room for NET_IP_ALIGN */
161ca9ec088SAlexander Duyck 	rx_buf_len -= NET_IP_ALIGN;
162ca9ec088SAlexander Duyck 
163ca9ec088SAlexander Duyck 	return i40e_compute_pad(rx_buf_len);
164ca9ec088SAlexander Duyck }
165ca9ec088SAlexander Duyck 
166ca9ec088SAlexander Duyck #define I40E_SKB_PAD i40e_skb_pad()
167ca9ec088SAlexander Duyck #else
168ca9ec088SAlexander Duyck #define I40E_2K_TOO_SMALL_WITH_PADDING false
169ca9ec088SAlexander Duyck #define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
170ca9ec088SAlexander Duyck #endif
171ca9ec088SAlexander Duyck 
1721a557afcSJesse Brandeburg /**
1731a557afcSJesse Brandeburg  * i40e_test_staterr - tests bits in Rx descriptor status and error fields
1741a557afcSJesse Brandeburg  * @rx_desc: pointer to receive descriptor (in le64 format)
1751a557afcSJesse Brandeburg  * @stat_err_bits: value to mask
1761a557afcSJesse Brandeburg  *
1771a557afcSJesse Brandeburg  * This function does some fast chicanery in order to return the
1781a557afcSJesse Brandeburg  * value of the mask which is really only used for boolean tests.
1791a557afcSJesse Brandeburg  * The status_error_len doesn't need to be shifted because it begins
1801a557afcSJesse Brandeburg  * at offset zero.
1811a557afcSJesse Brandeburg  */
i40e_test_staterr(union i40e_rx_desc * rx_desc,const u64 stat_err_bits)1821a557afcSJesse Brandeburg static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
1831a557afcSJesse Brandeburg 				     const u64 stat_err_bits)
1841a557afcSJesse Brandeburg {
1851a557afcSJesse Brandeburg 	return !!(rx_desc->wb.qword1.status_error_len &
1861a557afcSJesse Brandeburg 		  cpu_to_le64(stat_err_bits));
1871a557afcSJesse Brandeburg }
1887daa6bf3SJesse Brandeburg 
1897daa6bf3SJesse Brandeburg /* How many Rx Buffers do we bundle into one write to the hardware ? */
19095bc2fb4SJacob Keller #define I40E_RX_BUFFER_WRITE	32	/* Must be power of 2 */
191a132af24SMitch Williams 
1927daa6bf3SJesse Brandeburg #define I40E_RX_NEXT_DESC(r, i, n)		\
1937daa6bf3SJesse Brandeburg 	do {					\
1947daa6bf3SJesse Brandeburg 		(i)++;				\
1957daa6bf3SJesse Brandeburg 		if ((i) == (r)->count)		\
1967daa6bf3SJesse Brandeburg 			i = 0;			\
1977daa6bf3SJesse Brandeburg 		(n) = I40E_RX_DESC((r), (i));	\
1987daa6bf3SJesse Brandeburg 	} while (0)
1997daa6bf3SJesse Brandeburg 
2007daa6bf3SJesse Brandeburg 
20171da6197SAnjali Singhai #define I40E_MAX_BUFFER_TXD	8
2027daa6bf3SJesse Brandeburg #define I40E_MIN_TX_LEN		17
2035c4654daSAlexander Duyck 
2045c4654daSAlexander Duyck /* The size limit for a transmit buffer in a descriptor is (16K - 1).
2055c4654daSAlexander Duyck  * In order to align with the read requests we will align the value to
2065c4654daSAlexander Duyck  * the nearest 4K which represents our maximum read request size.
2075c4654daSAlexander Duyck  */
2085c4654daSAlexander Duyck #define I40E_MAX_READ_REQ_SIZE		4096
2095c4654daSAlexander Duyck #define I40E_MAX_DATA_PER_TXD		(16 * 1024 - 1)
2105c4654daSAlexander Duyck #define I40E_MAX_DATA_PER_TXD_ALIGNED \
2115c4654daSAlexander Duyck 	(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
2125c4654daSAlexander Duyck 
2134293d5f5SMitch Williams /**
2144293d5f5SMitch Williams  * i40e_txd_use_count  - estimate the number of descriptors needed for Tx
2154293d5f5SMitch Williams  * @size: transmit request size in bytes
2164293d5f5SMitch Williams  *
2174293d5f5SMitch Williams  * Due to hardware alignment restrictions (4K alignment), we need to
2184293d5f5SMitch Williams  * assume that we can have no more than 12K of data per descriptor, even
2194293d5f5SMitch Williams  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2204293d5f5SMitch Williams  * Thus, we need to divide by 12K. But division is slow! Instead,
2214293d5f5SMitch Williams  * we decompose the operation into shifts and one relatively cheap
2224293d5f5SMitch Williams  * multiply operation.
2234293d5f5SMitch Williams  *
2244293d5f5SMitch Williams  * To divide by 12K, we first divide by 4K, then divide by 3:
2254293d5f5SMitch Williams  *     To divide by 4K, shift right by 12 bits
2264293d5f5SMitch Williams  *     To divide by 3, multiply by 85, then divide by 256
2274293d5f5SMitch Williams  *     (Divide by 256 is done by shifting right by 8 bits)
2284293d5f5SMitch Williams  * Finally, we add one to round up. Because 256 isn't an exact multiple of
2294293d5f5SMitch Williams  * 3, we'll underestimate near each multiple of 12K. This is actually more
2304293d5f5SMitch Williams  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2314293d5f5SMitch Williams  * segment.  For our purposes this is accurate out to 1M which is orders of
2324293d5f5SMitch Williams  * magnitude greater than our largest possible GSO size.
2334293d5f5SMitch Williams  *
2344293d5f5SMitch Williams  * This would then be implemented as:
2354293d5f5SMitch Williams  *     return (((size >> 12) * 85) >> 8) + 1;
2364293d5f5SMitch Williams  *
2374293d5f5SMitch Williams  * Since multiplication and division are commutative, we can reorder
2384293d5f5SMitch Williams  * operations into:
2394293d5f5SMitch Williams  *     return ((size * 85) >> 20) + 1;
2405c4654daSAlexander Duyck  */
i40e_txd_use_count(unsigned int size)2415c4654daSAlexander Duyck static inline unsigned int i40e_txd_use_count(unsigned int size)
2425c4654daSAlexander Duyck {
2434293d5f5SMitch Williams 	return ((size * 85) >> 20) + 1;
2445c4654daSAlexander Duyck }
2457daa6bf3SJesse Brandeburg 
2467daa6bf3SJesse Brandeburg /* Tx Descriptors needed, worst case */
2470a797db3SAlexander Duyck #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
2487daa6bf3SJesse Brandeburg 
24941a1d04bSJesse Brandeburg #define I40E_TX_FLAGS_HW_VLAN		BIT(1)
25041a1d04bSJesse Brandeburg #define I40E_TX_FLAGS_SW_VLAN		BIT(2)
25141a1d04bSJesse Brandeburg #define I40E_TX_FLAGS_TSO		BIT(3)
25241a1d04bSJesse Brandeburg #define I40E_TX_FLAGS_IPV4		BIT(4)
25341a1d04bSJesse Brandeburg #define I40E_TX_FLAGS_IPV6		BIT(5)
25441a1d04bSJesse Brandeburg #define I40E_TX_FLAGS_TSYN		BIT(8)
25541a1d04bSJesse Brandeburg #define I40E_TX_FLAGS_FD_SB		BIT(9)
2566a899024SSinghai, Anjali #define I40E_TX_FLAGS_UDP_TUNNEL	BIT(10)
2577daa6bf3SJesse Brandeburg #define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
2587daa6bf3SJesse Brandeburg #define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
2597daa6bf3SJesse Brandeburg #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
2607daa6bf3SJesse Brandeburg #define I40E_TX_FLAGS_VLAN_SHIFT	16
2617daa6bf3SJesse Brandeburg 
2627daa6bf3SJesse Brandeburg struct i40e_tx_buffer {
2637daa6bf3SJesse Brandeburg 	struct i40e_tx_desc *next_to_watch;
26449d7d933SAnjali Singhai Jain 	union {
265b411ef11SJesper Dangaard Brouer 		struct xdp_frame *xdpf;
26635a1e2adSAlexander Duyck 		struct sk_buff *skb;
26749d7d933SAnjali Singhai Jain 		void *raw_buf;
26849d7d933SAnjali Singhai Jain 	};
2697daa6bf3SJesse Brandeburg 	unsigned int bytecount;
27035a1e2adSAlexander Duyck 	unsigned short gso_segs;
2716995b36cSJesse Brandeburg 
27235a1e2adSAlexander Duyck 	DEFINE_DMA_UNMAP_ADDR(dma);
27335a1e2adSAlexander Duyck 	DEFINE_DMA_UNMAP_LEN(len);
27435a1e2adSAlexander Duyck 	u32 tx_flags;
2757daa6bf3SJesse Brandeburg };
2767daa6bf3SJesse Brandeburg 
2777daa6bf3SJesse Brandeburg struct i40e_rx_buffer {
2787daa6bf3SJesse Brandeburg 	dma_addr_t dma;
2797daa6bf3SJesse Brandeburg 	struct page *page;
2801793668cSAlexander Duyck 	__u32 page_offset;
2811793668cSAlexander Duyck 	__u16 pagecnt_bias;
282e2843f03STirthendu Sarkar 	__u32 page_count;
2837daa6bf3SJesse Brandeburg };
284be1222b5SBjörn Töpel 
285a114d0a6SAlexander Duyck struct i40e_queue_stats {
2867daa6bf3SJesse Brandeburg 	u64 packets;
2877daa6bf3SJesse Brandeburg 	u64 bytes;
288a114d0a6SAlexander Duyck };
289a114d0a6SAlexander Duyck 
290a114d0a6SAlexander Duyck struct i40e_tx_queue_stats {
2917daa6bf3SJesse Brandeburg 	u64 restart_queue;
2927daa6bf3SJesse Brandeburg 	u64 tx_busy;
2937daa6bf3SJesse Brandeburg 	u64 tx_done_old;
2942fc3d715SAnjali Singhai Jain 	u64 tx_linearize;
295164c9f54SAnjali Singhai Jain 	u64 tx_force_wb;
296f728fa01SJoe Damato 	u64 tx_stopped;
29707d44190SSudheer Mogilappagari 	int prev_pkt_ctr;
2987daa6bf3SJesse Brandeburg };
2997daa6bf3SJesse Brandeburg 
3007daa6bf3SJesse Brandeburg struct i40e_rx_queue_stats {
3017daa6bf3SJesse Brandeburg 	u64 non_eop_descs;
302420136ccSMitch Williams 	u64 alloc_page_failed;
303420136ccSMitch Williams 	u64 alloc_buff_failed;
304f16704e5SMitch Williams 	u64 page_reuse_count;
305453f8305SJoe Damato 	u64 page_alloc_count;
306cb963b98SJoe Damato 	u64 page_waive_count;
307b76bc129SJoe Damato 	u64 page_busy_count;
3087daa6bf3SJesse Brandeburg };
3097daa6bf3SJesse Brandeburg 
310e6d25dbdSIvan Vecera enum i40e_ring_state {
3117daa6bf3SJesse Brandeburg 	__I40E_TX_FDIR_INIT_DONE,
3127daa6bf3SJesse Brandeburg 	__I40E_TX_XPS_INIT_DONE,
313bd6cd4e6SJesse Brandeburg 	__I40E_RING_STATE_NBITS /* must be last */
3147daa6bf3SJesse Brandeburg };
3157daa6bf3SJesse Brandeburg 
316bec60fc4SJesse Brandeburg /* some useful defines for virtchannel interface, which
317bec60fc4SJesse Brandeburg  * is the only remaining user of header split
318bec60fc4SJesse Brandeburg  */
319bec60fc4SJesse Brandeburg #define I40E_RX_DTYPE_HEADER_SPLIT  1
320bec60fc4SJesse Brandeburg #define I40E_RX_SPLIT_L2      0x1
321bec60fc4SJesse Brandeburg #define I40E_RX_SPLIT_IP      0x2
322bec60fc4SJesse Brandeburg #define I40E_RX_SPLIT_TCP_UDP 0x4
323bec60fc4SJesse Brandeburg #define I40E_RX_SPLIT_SCTP    0x8
3247daa6bf3SJesse Brandeburg 
3257daa6bf3SJesse Brandeburg /* struct that defines a descriptor ring, associated with a VSI */
3267daa6bf3SJesse Brandeburg struct i40e_ring {
327cd0b6fa6SAlexander Duyck 	struct i40e_ring *next;		/* pointer to next ring in q_vector */
3287daa6bf3SJesse Brandeburg 	void *desc;			/* Descriptor ring memory */
3297daa6bf3SJesse Brandeburg 	struct device *dev;		/* Used for DMA mapping */
3307daa6bf3SJesse Brandeburg 	struct net_device *netdev;	/* netdev ring maps to */
3310c8493d9SBjörn Töpel 	struct bpf_prog *xdp_prog;
3327daa6bf3SJesse Brandeburg 	union {
3337daa6bf3SJesse Brandeburg 		struct i40e_tx_buffer *tx_bi;
3347daa6bf3SJesse Brandeburg 		struct i40e_rx_buffer *rx_bi;
3353b4f0b66SBjörn Töpel 		struct xdp_buff **rx_bi_zc;
3367daa6bf3SJesse Brandeburg 	};
337bd6cd4e6SJesse Brandeburg 	DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
3387daa6bf3SJesse Brandeburg 	u16 queue_index;		/* Queue number of ring */
3397daa6bf3SJesse Brandeburg 	u8 dcb_tc;			/* Traffic class of ring */
3407daa6bf3SJesse Brandeburg 	u8 __iomem *tail;
3417daa6bf3SJesse Brandeburg 
34201aa49e3STirthendu Sarkar 	/* Storing xdp_buff on ring helps in saving the state of partially built
34301aa49e3STirthendu Sarkar 	 * packet when i40e_clean_rx_ring_irq() must return before it sees EOP
34401aa49e3STirthendu Sarkar 	 * and to resume packet building for this ring in the next call to
34501aa49e3STirthendu Sarkar 	 * i40e_clean_rx_ring_irq().
34601aa49e3STirthendu Sarkar 	 */
34701aa49e3STirthendu Sarkar 	struct xdp_buff xdp;
34801aa49e3STirthendu Sarkar 
349e9031f2dSTirthendu Sarkar 	/* Next descriptor to be processed; next_to_clean is updated only on
350e9031f2dSTirthendu Sarkar 	 * processing EOP descriptor
351e9031f2dSTirthendu Sarkar 	 */
352e9031f2dSTirthendu Sarkar 	u16 next_to_process;
353a75e8005SKan Liang 	/* high bit set means dynamic, use accessor routines to read/write.
354a75e8005SKan Liang 	 * hardware only supports 2us resolution for the ITR registers.
355a75e8005SKan Liang 	 * these values always store the USER setting, and must be converted
356a75e8005SKan Liang 	 * before programming to a register.
357a75e8005SKan Liang 	 */
35840588ca6SAlexander Duyck 	u16 itr_setting;
359a75e8005SKan Liang 
3607daa6bf3SJesse Brandeburg 	u16 count;			/* Number of descriptors */
3617daa6bf3SJesse Brandeburg 	u16 reg_idx;			/* HW register index of the ring */
3627daa6bf3SJesse Brandeburg 	u16 rx_buf_len;
3637daa6bf3SJesse Brandeburg 
3647daa6bf3SJesse Brandeburg 	/* used in interrupt processing */
3657daa6bf3SJesse Brandeburg 	u16 next_to_use;
3667daa6bf3SJesse Brandeburg 	u16 next_to_clean;
3675574ff7bSMagnus Karlsson 	u16 xdp_tx_active;
3687daa6bf3SJesse Brandeburg 
3697daa6bf3SJesse Brandeburg 	u8 atr_sample_rate;
3707daa6bf3SJesse Brandeburg 	u8 atr_count;
3717daa6bf3SJesse Brandeburg 
3727daa6bf3SJesse Brandeburg 	bool ring_active;		/* is ring online or not */
373d91649f5SJesse Brandeburg 	bool arm_wb;		/* do something to arm write back */
37458044743SAnjali Singhai 	u8 packet_stride;
3757daa6bf3SJesse Brandeburg 
3768e0764b4SAnjali Singhai Jain 	u16 flags;
3778e0764b4SAnjali Singhai Jain #define I40E_TXR_FLAGS_WB_ON_ITR		BIT(0)
378ca9ec088SAlexander Duyck #define I40E_RXR_FLAGS_BUILD_SKB_ENABLED	BIT(1)
37974608d17SBjörn Töpel #define I40E_TXR_FLAGS_XDP			BIT(2)
380527274c7SAnjali Singhai Jain 
3817daa6bf3SJesse Brandeburg 	/* stats structs */
382a114d0a6SAlexander Duyck 	struct i40e_queue_stats	stats;
383980e9b11SAlexander Duyck 	struct u64_stats_sync syncp;
3847daa6bf3SJesse Brandeburg 	union {
3857daa6bf3SJesse Brandeburg 		struct i40e_tx_queue_stats tx_stats;
3867daa6bf3SJesse Brandeburg 		struct i40e_rx_queue_stats rx_stats;
3877daa6bf3SJesse Brandeburg 	};
3887daa6bf3SJesse Brandeburg 
3897daa6bf3SJesse Brandeburg 	unsigned int size;		/* length of descriptor ring in bytes */
3907daa6bf3SJesse Brandeburg 	dma_addr_t dma;			/* physical address of ring */
3917daa6bf3SJesse Brandeburg 
3927daa6bf3SJesse Brandeburg 	struct i40e_vsi *vsi;		/* Backreference to associated VSI */
3937daa6bf3SJesse Brandeburg 	struct i40e_q_vector *q_vector;	/* Backreference to associated vector */
3949f65e15bSAlexander Duyck 
3959f65e15bSAlexander Duyck 	struct rcu_head rcu;		/* to avoid race on free */
3961a557afcSJesse Brandeburg 	u16 next_to_alloc;
3978f88b303SAmritha Nambiar 
3988f88b303SAmritha Nambiar 	struct i40e_channel *ch;
399f7bb0d71SMaciej Fijalkowski 	u16 rx_offset;
40087128824SJesper Dangaard Brouer 	struct xdp_rxq_info xdp_rxq;
4011742b3d5SMagnus Karlsson 	struct xsk_buff_pool *xsk_pool;
4027daa6bf3SJesse Brandeburg } ____cacheline_internodealigned_in_smp;
4037daa6bf3SJesse Brandeburg 
ring_uses_build_skb(struct i40e_ring * ring)404ca9ec088SAlexander Duyck static inline bool ring_uses_build_skb(struct i40e_ring *ring)
405ca9ec088SAlexander Duyck {
406ca9ec088SAlexander Duyck 	return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
407ca9ec088SAlexander Duyck }
408ca9ec088SAlexander Duyck 
set_ring_build_skb_enabled(struct i40e_ring * ring)409ca9ec088SAlexander Duyck static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
410ca9ec088SAlexander Duyck {
411ca9ec088SAlexander Duyck 	ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
412ca9ec088SAlexander Duyck }
413ca9ec088SAlexander Duyck 
clear_ring_build_skb_enabled(struct i40e_ring * ring)414ca9ec088SAlexander Duyck static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
415ca9ec088SAlexander Duyck {
416ca9ec088SAlexander Duyck 	ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
417ca9ec088SAlexander Duyck }
418ca9ec088SAlexander Duyck 
ring_is_xdp(struct i40e_ring * ring)41974608d17SBjörn Töpel static inline bool ring_is_xdp(struct i40e_ring *ring)
42074608d17SBjörn Töpel {
42174608d17SBjörn Töpel 	return !!(ring->flags & I40E_TXR_FLAGS_XDP);
42274608d17SBjörn Töpel }
42374608d17SBjörn Töpel 
set_ring_xdp(struct i40e_ring * ring)42474608d17SBjörn Töpel static inline void set_ring_xdp(struct i40e_ring *ring)
42574608d17SBjörn Töpel {
42674608d17SBjörn Töpel 	ring->flags |= I40E_TXR_FLAGS_XDP;
42774608d17SBjörn Töpel }
42874608d17SBjörn Töpel 
429a0073a4bSAlexander Duyck #define I40E_ITR_ADAPTIVE_MIN_INC	0x0002
430a0073a4bSAlexander Duyck #define I40E_ITR_ADAPTIVE_MIN_USECS	0x0002
431a0073a4bSAlexander Duyck #define I40E_ITR_ADAPTIVE_MAX_USECS	0x007e
432a0073a4bSAlexander Duyck #define I40E_ITR_ADAPTIVE_LATENCY	0x8000
433a0073a4bSAlexander Duyck #define I40E_ITR_ADAPTIVE_BULK		0x0000
4347daa6bf3SJesse Brandeburg 
4357daa6bf3SJesse Brandeburg struct i40e_ring_container {
436a0073a4bSAlexander Duyck 	struct i40e_ring *ring;		/* pointer to linked list of ring(s) */
437a0073a4bSAlexander Duyck 	unsigned long next_update;	/* jiffies value of next update */
4387daa6bf3SJesse Brandeburg 	unsigned int total_bytes;	/* total bytes processed this int */
4397daa6bf3SJesse Brandeburg 	unsigned int total_packets;	/* total packets processed this int */
4407daa6bf3SJesse Brandeburg 	u16 count;
441556fdfd6SAlexander Duyck 	u16 target_itr;			/* target ITR setting for ring(s) */
442556fdfd6SAlexander Duyck 	u16 current_itr;		/* current ITR setting for ring(s) */
4437daa6bf3SJesse Brandeburg };
4447daa6bf3SJesse Brandeburg 
445cd0b6fa6SAlexander Duyck /* iterator for handling rings in ring container */
446cd0b6fa6SAlexander Duyck #define i40e_for_each_ring(pos, head) \
447cd0b6fa6SAlexander Duyck 	for (pos = (head).ring; pos != NULL; pos = pos->next)
448cd0b6fa6SAlexander Duyck 
i40e_rx_pg_order(struct i40e_ring * ring)44998efd694SAlexander Duyck static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
45098efd694SAlexander Duyck {
45198efd694SAlexander Duyck #if (PAGE_SIZE < 8192)
45298efd694SAlexander Duyck 	if (ring->rx_buf_len > (PAGE_SIZE / 2))
45398efd694SAlexander Duyck 		return 1;
45498efd694SAlexander Duyck #endif
45598efd694SAlexander Duyck 	return 0;
45698efd694SAlexander Duyck }
45798efd694SAlexander Duyck 
45898efd694SAlexander Duyck #define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
45998efd694SAlexander Duyck 
4601a557afcSJesse Brandeburg bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
4617daa6bf3SJesse Brandeburg netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
46289ec1f08SJedrzej Jagielski u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
46389ec1f08SJedrzej Jagielski 			  struct net_device *sb_dev);
4647daa6bf3SJesse Brandeburg void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
4657daa6bf3SJesse Brandeburg void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
4667daa6bf3SJesse Brandeburg int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
4677daa6bf3SJesse Brandeburg int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
4687daa6bf3SJesse Brandeburg void i40e_free_tx_resources(struct i40e_ring *tx_ring);
4697daa6bf3SJesse Brandeburg void i40e_free_rx_resources(struct i40e_ring *rx_ring);
4707daa6bf3SJesse Brandeburg int i40e_napi_poll(struct napi_struct *napi, int budget);
471b03a8c1fSKiran Patil void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
47204d41051SAlan Brady u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
47307d44190SSudheer Mogilappagari void i40e_detect_recover_hung(struct i40e_vsi *vsi);
4744ec441dfSAlexander Duyck int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
4752d37490bSAlexander Duyck bool __i40e_chk_linearize(struct sk_buff *skb);
47642b33468SJesper Dangaard Brouer int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
47742b33468SJesper Dangaard Brouer 		  u32 flags);
4781c9ba9c1STirthendu Sarkar bool i40e_is_non_eop(struct i40e_ring *rx_ring,
4791c9ba9c1STirthendu Sarkar 		     union i40e_rx_desc *rx_desc);
4801e6d6f8cSKiran Patil 
4811e6d6f8cSKiran Patil /**
4821e6d6f8cSKiran Patil  * i40e_get_head - Retrieve head from head writeback
4831e6d6f8cSKiran Patil  * @tx_ring:  tx ring to fetch head of
4841e6d6f8cSKiran Patil  *
4851e6d6f8cSKiran Patil  * Returns value of Tx ring head based on value stored
4861e6d6f8cSKiran Patil  * in head write-back location
4871e6d6f8cSKiran Patil  **/
i40e_get_head(struct i40e_ring * tx_ring)4881e6d6f8cSKiran Patil static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
4891e6d6f8cSKiran Patil {
4901e6d6f8cSKiran Patil 	void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
4911e6d6f8cSKiran Patil 
4921e6d6f8cSKiran Patil 	return le32_to_cpu(*(volatile __le32 *)head);
4931e6d6f8cSKiran Patil }
4944ec441dfSAlexander Duyck 
4954ec441dfSAlexander Duyck /**
4964ec441dfSAlexander Duyck  * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
4974ec441dfSAlexander Duyck  * @skb:     send buffer
4984ec441dfSAlexander Duyck  *
4994ec441dfSAlexander Duyck  * Returns number of data descriptors needed for this skb. Returns 0 to indicate
5004ec441dfSAlexander Duyck  * there is not enough descriptors available in this ring since we need at least
5014ec441dfSAlexander Duyck  * one descriptor.
5024ec441dfSAlexander Duyck  **/
i40e_xmit_descriptor_count(struct sk_buff * skb)5034ec441dfSAlexander Duyck static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
5044ec441dfSAlexander Duyck {
505d7840976SMatthew Wilcox (Oracle) 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
5064ec441dfSAlexander Duyck 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
5074ec441dfSAlexander Duyck 	int count = 0, size = skb_headlen(skb);
5084ec441dfSAlexander Duyck 
5094ec441dfSAlexander Duyck 	for (;;) {
5105c4654daSAlexander Duyck 		count += i40e_txd_use_count(size);
5114ec441dfSAlexander Duyck 
5124ec441dfSAlexander Duyck 		if (!nr_frags--)
5134ec441dfSAlexander Duyck 			break;
5144ec441dfSAlexander Duyck 
5154ec441dfSAlexander Duyck 		size = skb_frag_size(frag++);
5164ec441dfSAlexander Duyck 	}
5174ec441dfSAlexander Duyck 
5184ec441dfSAlexander Duyck 	return count;
5194ec441dfSAlexander Duyck }
5204ec441dfSAlexander Duyck 
5214ec441dfSAlexander Duyck /**
5224ec441dfSAlexander Duyck  * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
5234ec441dfSAlexander Duyck  * @tx_ring: the ring to be checked
5244ec441dfSAlexander Duyck  * @size:    the size buffer we want to assure is available
5254ec441dfSAlexander Duyck  *
5264ec441dfSAlexander Duyck  * Returns 0 if stop is not needed
5274ec441dfSAlexander Duyck  **/
i40e_maybe_stop_tx(struct i40e_ring * tx_ring,int size)5284ec441dfSAlexander Duyck static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
5294ec441dfSAlexander Duyck {
5304ec441dfSAlexander Duyck 	if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
5314ec441dfSAlexander Duyck 		return 0;
5324ec441dfSAlexander Duyck 	return __i40e_maybe_stop_tx(tx_ring, size);
5334ec441dfSAlexander Duyck }
5342d37490bSAlexander Duyck 
5352d37490bSAlexander Duyck /**
5362d37490bSAlexander Duyck  * i40e_chk_linearize - Check if there are more than 8 fragments per packet
5372d37490bSAlexander Duyck  * @skb:      send buffer
5382d37490bSAlexander Duyck  * @count:    number of buffers used
5392d37490bSAlexander Duyck  *
5402d37490bSAlexander Duyck  * Note: Our HW can't scatter-gather more than 8 fragments to build
5412d37490bSAlexander Duyck  * a packet on the wire and so we need to figure out the cases where we
5422d37490bSAlexander Duyck  * need to linearize the skb.
5432d37490bSAlexander Duyck  **/
i40e_chk_linearize(struct sk_buff * skb,int count)5442d37490bSAlexander Duyck static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
5452d37490bSAlexander Duyck {
5463f3f7cb8SAlexander Duyck 	/* Both TSO and single send will work if count is less than 8 */
5473f3f7cb8SAlexander Duyck 	if (likely(count < I40E_MAX_BUFFER_TXD))
5482d37490bSAlexander Duyck 		return false;
5492d37490bSAlexander Duyck 
5503f3f7cb8SAlexander Duyck 	if (skb_is_gso(skb))
5512d37490bSAlexander Duyck 		return __i40e_chk_linearize(skb);
5523f3f7cb8SAlexander Duyck 
5533f3f7cb8SAlexander Duyck 	/* we can support up to 8 data buffers for a single send */
5543f3f7cb8SAlexander Duyck 	return count != I40E_MAX_BUFFER_TXD;
5552d37490bSAlexander Duyck }
5561f15d667SJesse Brandeburg 
5571f15d667SJesse Brandeburg /**
558e486bdfdSAlexander Duyck  * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
559e486bdfdSAlexander Duyck  * @ring: Tx ring to find the netdev equivalent of
560e486bdfdSAlexander Duyck  **/
txring_txq(const struct i40e_ring * ring)561e486bdfdSAlexander Duyck static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
562e486bdfdSAlexander Duyck {
563e486bdfdSAlexander Duyck 	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
564e486bdfdSAlexander Duyck }
56536fac581SVasu Dev #endif /* _I40E_TXRX_H_ */
566