1940b61afSAnirudh Venkataramanan /* SPDX-License-Identifier: GPL-2.0 */
2940b61afSAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */
3940b61afSAnirudh Venkataramanan 
4940b61afSAnirudh Venkataramanan #ifndef _ICE_TXRX_H_
5940b61afSAnirudh Venkataramanan #define _ICE_TXRX_H_
6940b61afSAnirudh Venkataramanan 
72d4238f5SKrzysztof Kazimierczak #include "ice_type.h"
82d4238f5SKrzysztof Kazimierczak 
9940b61afSAnirudh Venkataramanan #define ICE_DFLT_IRQ_WORK	256
107237f5b0SMaciej Fijalkowski #define ICE_RXBUF_3072		3072
11cdedef59SAnirudh Venkataramanan #define ICE_RXBUF_2048		2048
12c61bcebdSMaciej Fijalkowski #define ICE_RXBUF_1664		1664
137237f5b0SMaciej Fijalkowski #define ICE_RXBUF_1536		1536
14cdedef59SAnirudh Venkataramanan #define ICE_MAX_CHAINED_RX_BUFS	5
152b245cb2SAnirudh Venkataramanan #define ICE_MAX_BUF_TXD		8
162b245cb2SAnirudh Venkataramanan #define ICE_MIN_TX_LEN		17
17c61bcebdSMaciej Fijalkowski #define ICE_MAX_FRAME_LEGACY_RX 8320
182b245cb2SAnirudh Venkataramanan 
192b245cb2SAnirudh Venkataramanan /* The size limit for a transmit buffer in a descriptor is (16K - 1).
202b245cb2SAnirudh Venkataramanan  * In order to align with the read requests we will align the value to
212b245cb2SAnirudh Venkataramanan  * the nearest 4K which represents our maximum read request size.
222b245cb2SAnirudh Venkataramanan  */
232b245cb2SAnirudh Venkataramanan #define ICE_MAX_READ_REQ_SIZE	4096
242b245cb2SAnirudh Venkataramanan #define ICE_MAX_DATA_PER_TXD	(16 * 1024 - 1)
252b245cb2SAnirudh Venkataramanan #define ICE_MAX_DATA_PER_TXD_ALIGNED \
262b245cb2SAnirudh Venkataramanan 	(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
272b245cb2SAnirudh Venkataramanan 
28cdedef59SAnirudh Venkataramanan #define ICE_MAX_TXQ_PER_TXQG	128
29cdedef59SAnirudh Venkataramanan 
3059bb0808SMaciej Fijalkowski /* Attempt to maximize the headroom available for incoming frames. We use a 2K
3159bb0808SMaciej Fijalkowski  * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
3259bb0808SMaciej Fijalkowski  * This leaves us with 512 bytes of room.  From that we need to deduct the
3359bb0808SMaciej Fijalkowski  * space needed for the shared info and the padding needed to IP align the
3459bb0808SMaciej Fijalkowski  * frame.
3559bb0808SMaciej Fijalkowski  *
3659bb0808SMaciej Fijalkowski  * Note: For cache line sizes 256 or larger this value is going to end
3759bb0808SMaciej Fijalkowski  *	 up negative.  In these cases we should fall back to the legacy
3859bb0808SMaciej Fijalkowski  *	 receive path.
3959bb0808SMaciej Fijalkowski  */
4059bb0808SMaciej Fijalkowski #if (PAGE_SIZE < 8192)
4159bb0808SMaciej Fijalkowski #define ICE_2K_TOO_SMALL_WITH_PADDING \
4222bef5e7SJesse Brandeburg 	((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
4322bef5e7SJesse Brandeburg 			SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
4459bb0808SMaciej Fijalkowski 
4559bb0808SMaciej Fijalkowski /**
4659bb0808SMaciej Fijalkowski  * ice_compute_pad - compute the padding
47b50f7bcaSJesse Brandeburg  * @rx_buf_len: buffer length
4859bb0808SMaciej Fijalkowski  *
4959bb0808SMaciej Fijalkowski  * Figure out the size of half page based on given buffer length and
5059bb0808SMaciej Fijalkowski  * then subtract the skb_shared_info followed by subtraction of the
5159bb0808SMaciej Fijalkowski  * actual buffer length; this in turn results in the actual space that
5259bb0808SMaciej Fijalkowski  * is left for padding usage
5359bb0808SMaciej Fijalkowski  */
5459bb0808SMaciej Fijalkowski static inline int ice_compute_pad(int rx_buf_len)
5559bb0808SMaciej Fijalkowski {
5659bb0808SMaciej Fijalkowski 	int half_page_size;
5759bb0808SMaciej Fijalkowski 
5859bb0808SMaciej Fijalkowski 	half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
5959bb0808SMaciej Fijalkowski 	return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
6059bb0808SMaciej Fijalkowski }
6159bb0808SMaciej Fijalkowski 
6259bb0808SMaciej Fijalkowski /**
6359bb0808SMaciej Fijalkowski  * ice_skb_pad - determine the padding that we can supply
6459bb0808SMaciej Fijalkowski  *
6559bb0808SMaciej Fijalkowski  * Figure out the right Rx buffer size and based on that calculate the
6659bb0808SMaciej Fijalkowski  * padding
6759bb0808SMaciej Fijalkowski  */
6859bb0808SMaciej Fijalkowski static inline int ice_skb_pad(void)
6959bb0808SMaciej Fijalkowski {
7059bb0808SMaciej Fijalkowski 	int rx_buf_len;
7159bb0808SMaciej Fijalkowski 
7259bb0808SMaciej Fijalkowski 	/* If a 2K buffer cannot handle a standard Ethernet frame then
7359bb0808SMaciej Fijalkowski 	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
7459bb0808SMaciej Fijalkowski 	 *
7559bb0808SMaciej Fijalkowski 	 * For a 3K buffer we need to add enough padding to allow for
7659bb0808SMaciej Fijalkowski 	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
7759bb0808SMaciej Fijalkowski 	 * cache-line alignment.
7859bb0808SMaciej Fijalkowski 	 */
7959bb0808SMaciej Fijalkowski 	if (ICE_2K_TOO_SMALL_WITH_PADDING)
8059bb0808SMaciej Fijalkowski 		rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
8159bb0808SMaciej Fijalkowski 	else
8259bb0808SMaciej Fijalkowski 		rx_buf_len = ICE_RXBUF_1536;
8359bb0808SMaciej Fijalkowski 
8459bb0808SMaciej Fijalkowski 	/* if needed make room for NET_IP_ALIGN */
8559bb0808SMaciej Fijalkowski 	rx_buf_len -= NET_IP_ALIGN;
8659bb0808SMaciej Fijalkowski 
8759bb0808SMaciej Fijalkowski 	return ice_compute_pad(rx_buf_len);
8859bb0808SMaciej Fijalkowski }
8959bb0808SMaciej Fijalkowski 
9059bb0808SMaciej Fijalkowski #define ICE_SKB_PAD ice_skb_pad()
9159bb0808SMaciej Fijalkowski #else
9259bb0808SMaciej Fijalkowski #define ICE_2K_TOO_SMALL_WITH_PADDING false
9359bb0808SMaciej Fijalkowski #define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
9459bb0808SMaciej Fijalkowski #endif
9559bb0808SMaciej Fijalkowski 
96c585ea42SBrett Creeley /* We are assuming that the cache line is always 64 Bytes here for ice.
97c585ea42SBrett Creeley  * In order to make sure that is a correct assumption there is a check in probe
98c585ea42SBrett Creeley  * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
99c585ea42SBrett Creeley  * size is 128 bytes. We do it this way because we do not want to read the
100c585ea42SBrett Creeley  * GLPCI_CNF2 register or a variable containing the value on every pass through
101c585ea42SBrett Creeley  * the Tx path.
102c585ea42SBrett Creeley  */
103c585ea42SBrett Creeley #define ICE_CACHE_LINE_BYTES		64
104c585ea42SBrett Creeley #define ICE_DESCS_PER_CACHE_LINE	(ICE_CACHE_LINE_BYTES / \
105c585ea42SBrett Creeley 					 sizeof(struct ice_tx_desc))
106c585ea42SBrett Creeley #define ICE_DESCS_FOR_CTX_DESC		1
107c585ea42SBrett Creeley #define ICE_DESCS_FOR_SKB_DATA_PTR	1
108c585ea42SBrett Creeley /* Tx descriptors needed, worst case */
109c585ea42SBrett Creeley #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
110c585ea42SBrett Creeley 		     ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
111cdedef59SAnirudh Venkataramanan #define ICE_DESC_UNUSED(R)	\
11222bef5e7SJesse Brandeburg 	(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
113cdedef59SAnirudh Venkataramanan 	      (R)->next_to_clean - (R)->next_to_use - 1)
114cdedef59SAnirudh Venkataramanan 
1153876ff52SMaciej Fijalkowski #define ICE_RING_QUARTER(R) ((R)->count >> 2)
1163876ff52SMaciej Fijalkowski 
117d76a60baSAnirudh Venkataramanan #define ICE_TX_FLAGS_TSO	BIT(0)
118d76a60baSAnirudh Venkataramanan #define ICE_TX_FLAGS_HW_VLAN	BIT(1)
119d76a60baSAnirudh Venkataramanan #define ICE_TX_FLAGS_SW_VLAN	BIT(2)
120148beb61SHenry Tieman /* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be
121148beb61SHenry Tieman  * freed instead of returned like skb packets.
122148beb61SHenry Tieman  */
123148beb61SHenry Tieman #define ICE_TX_FLAGS_DUMMY_PKT	BIT(3)
124ea9b847cSJacob Keller #define ICE_TX_FLAGS_TSYN	BIT(4)
125a4e82a81STony Nguyen #define ICE_TX_FLAGS_IPV4	BIT(5)
126a4e82a81STony Nguyen #define ICE_TX_FLAGS_IPV6	BIT(6)
127a4e82a81STony Nguyen #define ICE_TX_FLAGS_TUNNEL	BIT(7)
1280d54d8f7SBrett Creeley #define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN	BIT(8)
129d76a60baSAnirudh Venkataramanan #define ICE_TX_FLAGS_VLAN_M	0xffff0000
1305f6aa50eSAnirudh Venkataramanan #define ICE_TX_FLAGS_VLAN_PR_M	0xe0000000
1315f6aa50eSAnirudh Venkataramanan #define ICE_TX_FLAGS_VLAN_PR_S	29
132d76a60baSAnirudh Venkataramanan #define ICE_TX_FLAGS_VLAN_S	16
133d76a60baSAnirudh Venkataramanan 
134efc2214bSMaciej Fijalkowski #define ICE_XDP_PASS		0
135efc2214bSMaciej Fijalkowski #define ICE_XDP_CONSUMED	BIT(0)
136efc2214bSMaciej Fijalkowski #define ICE_XDP_TX		BIT(1)
137efc2214bSMaciej Fijalkowski #define ICE_XDP_REDIR		BIT(2)
13850ae0664SMaciej Fijalkowski #define ICE_XDP_EXIT		BIT(3)
139efc2214bSMaciej Fijalkowski 
140a65f71feSMaciej Fijalkowski #define ICE_RX_DMA_ATTR \
141a65f71feSMaciej Fijalkowski 	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
142a65f71feSMaciej Fijalkowski 
143efc2214bSMaciej Fijalkowski #define ICE_ETH_PKT_HDR_PAD	(ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
144efc2214bSMaciej Fijalkowski 
145efc2214bSMaciej Fijalkowski #define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
146efc2214bSMaciej Fijalkowski 
147cdedef59SAnirudh Venkataramanan struct ice_tx_buf {
148cdedef59SAnirudh Venkataramanan 	struct ice_tx_desc *next_to_watch;
149efc2214bSMaciej Fijalkowski 	union {
150cdedef59SAnirudh Venkataramanan 		struct sk_buff *skb;
151efc2214bSMaciej Fijalkowski 		void *raw_buf; /* used for XDP */
152efc2214bSMaciej Fijalkowski 	};
153cdedef59SAnirudh Venkataramanan 	unsigned int bytecount;
154cdedef59SAnirudh Venkataramanan 	unsigned short gso_segs;
155cdedef59SAnirudh Venkataramanan 	u32 tx_flags;
156cdedef59SAnirudh Venkataramanan 	DEFINE_DMA_UNMAP_LEN(len);
15765124bbfSJesse Brandeburg 	DEFINE_DMA_UNMAP_ADDR(dma);
158cdedef59SAnirudh Venkataramanan };
159cdedef59SAnirudh Venkataramanan 
160d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params {
16165124bbfSJesse Brandeburg 	u64 cd_qw1;
162e72bba21SMaciej Fijalkowski 	struct ice_tx_ring *tx_ring;
163d76a60baSAnirudh Venkataramanan 	u32 td_cmd;
164d76a60baSAnirudh Venkataramanan 	u32 td_offset;
165d76a60baSAnirudh Venkataramanan 	u32 td_l2tag1;
166d76a60baSAnirudh Venkataramanan 	u32 cd_tunnel_params;
16765124bbfSJesse Brandeburg 	u16 cd_l2tag2;
16865124bbfSJesse Brandeburg 	u8 header_len;
169d76a60baSAnirudh Venkataramanan };
170d76a60baSAnirudh Venkataramanan 
171cdedef59SAnirudh Venkataramanan struct ice_rx_buf {
172175fc430SBjörn Töpel 	dma_addr_t dma;
173cdedef59SAnirudh Venkataramanan 	struct page *page;
174cdedef59SAnirudh Venkataramanan 	unsigned int page_offset;
175*ac075339SMaciej Fijalkowski 	unsigned int pgcnt;
176*ac075339SMaciej Fijalkowski 	unsigned int pagecnt_bias;
177cdedef59SAnirudh Venkataramanan };
178940b61afSAnirudh Venkataramanan 
1792b245cb2SAnirudh Venkataramanan struct ice_q_stats {
1802b245cb2SAnirudh Venkataramanan 	u64 pkts;
1812b245cb2SAnirudh Venkataramanan 	u64 bytes;
1822b245cb2SAnirudh Venkataramanan };
1832b245cb2SAnirudh Venkataramanan 
1842b245cb2SAnirudh Venkataramanan struct ice_txq_stats {
1852b245cb2SAnirudh Venkataramanan 	u64 restart_q;
1862b245cb2SAnirudh Venkataramanan 	u64 tx_busy;
1872b245cb2SAnirudh Venkataramanan 	u64 tx_linearize;
188b3969fd7SSudheer Mogilappagari 	int prev_pkt; /* negative if no pending Tx descriptors */
1892b245cb2SAnirudh Venkataramanan };
1902b245cb2SAnirudh Venkataramanan 
1912b245cb2SAnirudh Venkataramanan struct ice_rxq_stats {
1922b245cb2SAnirudh Venkataramanan 	u64 non_eop_descs;
1932b245cb2SAnirudh Venkataramanan 	u64 alloc_page_failed;
1942b245cb2SAnirudh Venkataramanan 	u64 alloc_buf_failed;
1952b245cb2SAnirudh Venkataramanan };
1962b245cb2SAnirudh Venkataramanan 
197288ecf49SBenjamin Mikailenko struct ice_ring_stats {
198288ecf49SBenjamin Mikailenko 	struct rcu_head rcu;	/* to avoid race on free */
199288ecf49SBenjamin Mikailenko 	struct ice_q_stats stats;
200288ecf49SBenjamin Mikailenko 	struct u64_stats_sync syncp;
201288ecf49SBenjamin Mikailenko 	union {
202288ecf49SBenjamin Mikailenko 		struct ice_txq_stats tx_stats;
203288ecf49SBenjamin Mikailenko 		struct ice_rxq_stats rx_stats;
204288ecf49SBenjamin Mikailenko 	};
205288ecf49SBenjamin Mikailenko };
206288ecf49SBenjamin Mikailenko 
207634da4c1SBenita Bose enum ice_ring_state_t {
208634da4c1SBenita Bose 	ICE_TX_XPS_INIT_DONE,
209634da4c1SBenita Bose 	ICE_TX_NBITS,
210634da4c1SBenita Bose };
211634da4c1SBenita Bose 
212940b61afSAnirudh Venkataramanan /* this enum matches hardware bits and is meant to be used by DYN_CTLN
213940b61afSAnirudh Venkataramanan  * registers and QINT registers or more generally anywhere in the manual
214940b61afSAnirudh Venkataramanan  * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
215940b61afSAnirudh Venkataramanan  * register but instead is a special value meaning "don't update" ITR0/1/2.
216940b61afSAnirudh Venkataramanan  */
217940b61afSAnirudh Venkataramanan enum ice_dyn_idx_t {
218940b61afSAnirudh Venkataramanan 	ICE_IDX_ITR0 = 0,
219940b61afSAnirudh Venkataramanan 	ICE_IDX_ITR1 = 1,
220940b61afSAnirudh Venkataramanan 	ICE_IDX_ITR2 = 2,
221940b61afSAnirudh Venkataramanan 	ICE_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
222940b61afSAnirudh Venkataramanan };
223940b61afSAnirudh Venkataramanan 
224cdedef59SAnirudh Venkataramanan /* Header split modes defined by DTYPE field of Rx RLAN context */
225cdedef59SAnirudh Venkataramanan enum ice_rx_dtype {
226cdedef59SAnirudh Venkataramanan 	ICE_RX_DTYPE_NO_SPLIT		= 0,
227cdedef59SAnirudh Venkataramanan 	ICE_RX_DTYPE_HEADER_SPLIT	= 1,
228cdedef59SAnirudh Venkataramanan 	ICE_RX_DTYPE_SPLIT_ALWAYS	= 2,
229cdedef59SAnirudh Venkataramanan };
230cdedef59SAnirudh Venkataramanan 
231940b61afSAnirudh Venkataramanan /* indices into GLINT_ITR registers */
232940b61afSAnirudh Venkataramanan #define ICE_RX_ITR	ICE_IDX_ITR0
233cdedef59SAnirudh Venkataramanan #define ICE_TX_ITR	ICE_IDX_ITR1
23463f545edSBrett Creeley #define ICE_ITR_8K	124
235d2b464a7SBrett Creeley #define ICE_ITR_20K	50
236d59684a0SJesse Brandeburg #define ICE_ITR_MAX	8160 /* 0x1FE0 */
237d59684a0SJesse Brandeburg #define ICE_DFLT_TX_ITR	ICE_ITR_20K
238d59684a0SJesse Brandeburg #define ICE_DFLT_RX_ITR	ICE_ITR_20K
239d59684a0SJesse Brandeburg enum ice_dynamic_itr {
240d59684a0SJesse Brandeburg 	ITR_STATIC = 0,
241d59684a0SJesse Brandeburg 	ITR_DYNAMIC = 1
242d59684a0SJesse Brandeburg };
243d59684a0SJesse Brandeburg 
244d59684a0SJesse Brandeburg #define ITR_IS_DYNAMIC(rc) ((rc)->itr_mode == ITR_DYNAMIC)
24592414f32SBrett Creeley #define ICE_ITR_GRAN_S		1	/* ITR granularity is always 2us */
24670457520SBrett Creeley #define ICE_ITR_GRAN_US		BIT(ICE_ITR_GRAN_S)
24763f545edSBrett Creeley #define ICE_ITR_MASK		0x1FFE	/* ITR register value alignment mask */
248840f8ad0SBrett Creeley #define ITR_REG_ALIGN(setting)	((setting) & ICE_ITR_MASK)
249940b61afSAnirudh Venkataramanan 
2509e4ab4c2SBrett Creeley #define ICE_DFLT_INTRL	0
251b9c8bb06SBrett Creeley #define ICE_MAX_INTRL	236
252940b61afSAnirudh Venkataramanan 
2532ab28bb0SBrett Creeley #define ICE_IN_WB_ON_ITR_MODE	255
2542ab28bb0SBrett Creeley /* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
2552ab28bb0SBrett Creeley  * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
2562ab28bb0SBrett Creeley  * set the write-back latency to the usecs passed in.
2572ab28bb0SBrett Creeley  */
2582ab28bb0SBrett Creeley #define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx)	\
2592ab28bb0SBrett Creeley 	((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
2602ab28bb0SBrett Creeley 	  GLINT_DYN_CTL_INTERVAL_M) | \
2612ab28bb0SBrett Creeley 	 (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
2622ab28bb0SBrett Creeley 	  GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
2632ab28bb0SBrett Creeley 	 GLINT_DYN_CTL_WB_ON_ITR_M)
2642ab28bb0SBrett Creeley 
265cdedef59SAnirudh Venkataramanan /* Legacy or Advanced Mode Queue */
266cdedef59SAnirudh Venkataramanan #define ICE_TX_ADVANCED	0
267cdedef59SAnirudh Venkataramanan #define ICE_TX_LEGACY	1
268cdedef59SAnirudh Venkataramanan 
2693a858ba3SAnirudh Venkataramanan /* descriptor ring, associated with a VSI */
270e72bba21SMaciej Fijalkowski struct ice_rx_ring {
27165124bbfSJesse Brandeburg 	/* CL1 - 1st cacheline starts here */
272e72bba21SMaciej Fijalkowski 	struct ice_rx_ring *next;	/* pointer to next ring in q_vector */
273cdedef59SAnirudh Venkataramanan 	void *desc;			/* Descriptor ring memory */
2743a858ba3SAnirudh Venkataramanan 	struct device *dev;		/* Used for DMA mapping */
2753a858ba3SAnirudh Venkataramanan 	struct net_device *netdev;	/* netdev ring maps to */
2763a858ba3SAnirudh Venkataramanan 	struct ice_vsi *vsi;		/* Backreference to associated VSI */
2773a858ba3SAnirudh Venkataramanan 	struct ice_q_vector *q_vector;	/* Backreference to associated vector */
278cdedef59SAnirudh Venkataramanan 	u8 __iomem *tail;
279cdedef59SAnirudh Venkataramanan 	union {
280cdedef59SAnirudh Venkataramanan 		struct ice_rx_buf *rx_buf;
28157f7f8b6SMagnus Karlsson 		struct xdp_buff **xdp_buf;
282cdedef59SAnirudh Venkataramanan 	};
28365124bbfSJesse Brandeburg 	/* CL2 - 2nd cacheline starts here */
284e72bba21SMaciej Fijalkowski 	struct xdp_rxq_info xdp_rxq;
285e72bba21SMaciej Fijalkowski 	/* CL3 - 3rd cacheline starts here */
2863a858ba3SAnirudh Venkataramanan 	u16 q_index;			/* Queue number of ring */
28765124bbfSJesse Brandeburg 
2883a858ba3SAnirudh Venkataramanan 	u16 count;			/* Number of descriptors */
2893a858ba3SAnirudh Venkataramanan 	u16 reg_idx;			/* HW register index of the ring */
290cdedef59SAnirudh Venkataramanan 
291cdedef59SAnirudh Venkataramanan 	/* used in interrupt processing */
292cdedef59SAnirudh Venkataramanan 	u16 next_to_use;
293cdedef59SAnirudh Venkataramanan 	u16 next_to_clean;
29465124bbfSJesse Brandeburg 	u16 next_to_alloc;
295e72bba21SMaciej Fijalkowski 	u16 rx_offset;
296e72bba21SMaciej Fijalkowski 	u16 rx_buf_len;
297e72bba21SMaciej Fijalkowski 
298e72bba21SMaciej Fijalkowski 	/* stats structs */
299288ecf49SBenjamin Mikailenko 	struct ice_ring_stats *ring_stats;
300e72bba21SMaciej Fijalkowski 
301e72bba21SMaciej Fijalkowski 	struct rcu_head rcu;		/* to avoid race on free */
302e72bba21SMaciej Fijalkowski 	/* CL4 - 3rd cacheline starts here */
3030754d65bSKiran Patil 	struct ice_channel *ch;
304e72bba21SMaciej Fijalkowski 	struct bpf_prog *xdp_prog;
305eb087cd8SMaciej Fijalkowski 	struct ice_tx_ring *xdp_ring;
306e72bba21SMaciej Fijalkowski 	struct xsk_buff_pool *xsk_pool;
307cb0473e0SMaciej Fijalkowski 	struct xdp_buff xdp;
308e72bba21SMaciej Fijalkowski 	struct sk_buff *skb;
309e72bba21SMaciej Fijalkowski 	dma_addr_t dma;			/* physical address of ring */
310e72bba21SMaciej Fijalkowski 	u64 cached_phctime;
311e72bba21SMaciej Fijalkowski 	u8 dcb_tc;			/* Traffic class of ring */
312e72bba21SMaciej Fijalkowski 	u8 ptp_rx;
313dddd406dSJesse Brandeburg #define ICE_RX_FLAGS_RING_BUILD_SKB	BIT(1)
314dddd406dSJesse Brandeburg #define ICE_RX_FLAGS_CRC_STRIP_DIS	BIT(2)
315e72bba21SMaciej Fijalkowski 	u8 flags;
316e72bba21SMaciej Fijalkowski } ____cacheline_internodealigned_in_smp;
317e72bba21SMaciej Fijalkowski 
318e72bba21SMaciej Fijalkowski struct ice_tx_ring {
319e72bba21SMaciej Fijalkowski 	/* CL1 - 1st cacheline starts here */
320e72bba21SMaciej Fijalkowski 	struct ice_tx_ring *next;	/* pointer to next ring in q_vector */
321e72bba21SMaciej Fijalkowski 	void *desc;			/* Descriptor ring memory */
322e72bba21SMaciej Fijalkowski 	struct device *dev;		/* Used for DMA mapping */
323e72bba21SMaciej Fijalkowski 	u8 __iomem *tail;
324e72bba21SMaciej Fijalkowski 	struct ice_tx_buf *tx_buf;
325e72bba21SMaciej Fijalkowski 	struct ice_q_vector *q_vector;	/* Backreference to associated vector */
326e72bba21SMaciej Fijalkowski 	struct net_device *netdev;	/* netdev ring maps to */
327e72bba21SMaciej Fijalkowski 	struct ice_vsi *vsi;		/* Backreference to associated VSI */
328e72bba21SMaciej Fijalkowski 	/* CL2 - 2nd cacheline starts here */
329e72bba21SMaciej Fijalkowski 	dma_addr_t dma;			/* physical address of ring */
3309610bd98SMaciej Fijalkowski 	struct xsk_buff_pool *xsk_pool;
331e72bba21SMaciej Fijalkowski 	u16 next_to_use;
332e72bba21SMaciej Fijalkowski 	u16 next_to_clean;
3339610bd98SMaciej Fijalkowski 	u16 next_rs;
3349610bd98SMaciej Fijalkowski 	u16 next_dd;
3359610bd98SMaciej Fijalkowski 	u16 q_handle;			/* Queue handle per TC */
3369610bd98SMaciej Fijalkowski 	u16 reg_idx;			/* HW register index of the ring */
337e72bba21SMaciej Fijalkowski 	u16 count;			/* Number of descriptors */
338e72bba21SMaciej Fijalkowski 	u16 q_index;			/* Queue number of ring */
3392b245cb2SAnirudh Venkataramanan 	/* stats structs */
340288ecf49SBenjamin Mikailenko 	struct ice_ring_stats *ring_stats;
341126cdfe1SMaciej Fijalkowski 	/* CL3 - 3rd cacheline starts here */
3423a858ba3SAnirudh Venkataramanan 	struct rcu_head rcu;		/* to avoid race on free */
343634da4c1SBenita Bose 	DECLARE_BITMAP(xps_state, ICE_TX_NBITS);	/* XPS Config State */
3440754d65bSKiran Patil 	struct ice_channel *ch;
345ea9b847cSJacob Keller 	struct ice_ptp_tx *tx_tstamps;
34622bf877eSMaciej Fijalkowski 	spinlock_t tx_lock;
347e72bba21SMaciej Fijalkowski 	u32 txq_teid;			/* Added Tx queue TEID */
348126cdfe1SMaciej Fijalkowski 	/* CL4 - 4th cacheline starts here */
34959e92bfeSMaciej Fijalkowski 	u16 xdp_tx_active;
350e72bba21SMaciej Fijalkowski #define ICE_TX_FLAGS_RING_XDP		BIT(0)
3510d54d8f7SBrett Creeley #define ICE_TX_FLAGS_RING_VLAN_L2TAG1	BIT(1)
3520d54d8f7SBrett Creeley #define ICE_TX_FLAGS_RING_VLAN_L2TAG2	BIT(2)
353e72bba21SMaciej Fijalkowski 	u8 flags;
354e72bba21SMaciej Fijalkowski 	u8 dcb_tc;			/* Traffic class of ring */
355e72bba21SMaciej Fijalkowski 	u8 ptp_tx;
3563a858ba3SAnirudh Venkataramanan } ____cacheline_internodealigned_in_smp;
3573a858ba3SAnirudh Venkataramanan 
358e72bba21SMaciej Fijalkowski static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
35959bb0808SMaciej Fijalkowski {
36059bb0808SMaciej Fijalkowski 	return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
36159bb0808SMaciej Fijalkowski }
36259bb0808SMaciej Fijalkowski 
363e72bba21SMaciej Fijalkowski static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
36459bb0808SMaciej Fijalkowski {
36559bb0808SMaciej Fijalkowski 	ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
36659bb0808SMaciej Fijalkowski }
36759bb0808SMaciej Fijalkowski 
368e72bba21SMaciej Fijalkowski static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
36959bb0808SMaciej Fijalkowski {
37059bb0808SMaciej Fijalkowski 	ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
37159bb0808SMaciej Fijalkowski }
37259bb0808SMaciej Fijalkowski 
3730754d65bSKiran Patil static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
3740754d65bSKiran Patil {
3750754d65bSKiran Patil 	return !!ring->ch;
3760754d65bSKiran Patil }
3770754d65bSKiran Patil 
378e72bba21SMaciej Fijalkowski static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
379efc2214bSMaciej Fijalkowski {
380efc2214bSMaciej Fijalkowski 	return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
381efc2214bSMaciej Fijalkowski }
382efc2214bSMaciej Fijalkowski 
383dc23715cSMaciej Fijalkowski enum ice_container_type {
384dc23715cSMaciej Fijalkowski 	ICE_RX_CONTAINER,
385dc23715cSMaciej Fijalkowski 	ICE_TX_CONTAINER,
386dc23715cSMaciej Fijalkowski };
387dc23715cSMaciej Fijalkowski 
3883a858ba3SAnirudh Venkataramanan struct ice_ring_container {
38963f545edSBrett Creeley 	/* head of linked-list of rings */
390e72bba21SMaciej Fijalkowski 	union {
391e72bba21SMaciej Fijalkowski 		struct ice_rx_ring *rx_ring;
392e72bba21SMaciej Fijalkowski 		struct ice_tx_ring *tx_ring;
393e72bba21SMaciej Fijalkowski 	};
394cdf1f1f1SJacob Keller 	struct dim dim;		/* data for net_dim algorithm */
3958244dd2dSBrett Creeley 	u16 itr_idx;		/* index in the interrupt vector */
396d59684a0SJesse Brandeburg 	/* this matches the maximum number of ITR bits, but in usec
397d59684a0SJesse Brandeburg 	 * values, so it is shifted left one bit (bit zero is ignored)
39863f545edSBrett Creeley 	 */
399bf13502eSMichal Wilczynski 	union {
400bf13502eSMichal Wilczynski 		struct {
401d59684a0SJesse Brandeburg 			u16 itr_setting:13;
402d59684a0SJesse Brandeburg 			u16 itr_reserved:2;
403d59684a0SJesse Brandeburg 			u16 itr_mode:1;
404bf13502eSMichal Wilczynski 		};
405bf13502eSMichal Wilczynski 		u16 itr_settings;
406bf13502eSMichal Wilczynski 	};
407dc23715cSMaciej Fijalkowski 	enum ice_container_type type;
4083a858ba3SAnirudh Venkataramanan };
4093a858ba3SAnirudh Venkataramanan 
41061dc79ceSMichal Swiatkowski struct ice_coalesce_stored {
41161dc79ceSMichal Swiatkowski 	u16 itr_tx;
41261dc79ceSMichal Swiatkowski 	u16 itr_rx;
41361dc79ceSMichal Swiatkowski 	u8 intrl;
4142ec56385SPaul M Stillwell Jr 	u8 tx_valid;
4152ec56385SPaul M Stillwell Jr 	u8 rx_valid;
41661dc79ceSMichal Swiatkowski };
41761dc79ceSMichal Swiatkowski 
4183a858ba3SAnirudh Venkataramanan /* iterator for handling rings in ring container */
419e72bba21SMaciej Fijalkowski #define ice_for_each_rx_ring(pos, head) \
420e72bba21SMaciej Fijalkowski 	for (pos = (head).rx_ring; pos; pos = pos->next)
4213a858ba3SAnirudh Venkataramanan 
422e72bba21SMaciej Fijalkowski #define ice_for_each_tx_ring(pos, head) \
423e72bba21SMaciej Fijalkowski 	for (pos = (head).tx_ring; pos; pos = pos->next)
424e72bba21SMaciej Fijalkowski 
425e72bba21SMaciej Fijalkowski static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
4267237f5b0SMaciej Fijalkowski {
4277237f5b0SMaciej Fijalkowski #if (PAGE_SIZE < 8192)
4287237f5b0SMaciej Fijalkowski 	if (ring->rx_buf_len > (PAGE_SIZE / 2))
4297237f5b0SMaciej Fijalkowski 		return 1;
4307237f5b0SMaciej Fijalkowski #endif
4317237f5b0SMaciej Fijalkowski 	return 0;
4327237f5b0SMaciej Fijalkowski }
4337237f5b0SMaciej Fijalkowski 
4347237f5b0SMaciej Fijalkowski #define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
4357237f5b0SMaciej Fijalkowski 
4362d4238f5SKrzysztof Kazimierczak union ice_32b_rx_flex_desc;
4372d4238f5SKrzysztof Kazimierczak 
438e72bba21SMaciej Fijalkowski bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count);
4392b245cb2SAnirudh Venkataramanan netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
4402a87bd73SDave Ertman u16
4412a87bd73SDave Ertman ice_select_queue(struct net_device *dev, struct sk_buff *skb,
4422a87bd73SDave Ertman 		 struct net_device *sb_dev);
443e72bba21SMaciej Fijalkowski void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
444e72bba21SMaciej Fijalkowski void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
445e72bba21SMaciej Fijalkowski int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
446e72bba21SMaciej Fijalkowski int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
447e72bba21SMaciej Fijalkowski void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
448e72bba21SMaciej Fijalkowski void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
4492b245cb2SAnirudh Venkataramanan int ice_napi_poll(struct napi_struct *napi, int budget);
450cac2a27cSHenry Tieman int
451cac2a27cSHenry Tieman ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
452cac2a27cSHenry Tieman 		   u8 *raw_packet);
453e72bba21SMaciej Fijalkowski int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
454e72bba21SMaciej Fijalkowski void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
455940b61afSAnirudh Venkataramanan #endif /* _ICE_TXRX_H_ */
456