1940b61afSAnirudh Venkataramanan /* SPDX-License-Identifier: GPL-2.0 */
2940b61afSAnirudh Venkataramanan /* Copyright (c) 2018, Intel Corporation. */
3940b61afSAnirudh Venkataramanan
4940b61afSAnirudh Venkataramanan #ifndef _ICE_TXRX_H_
5940b61afSAnirudh Venkataramanan #define _ICE_TXRX_H_
6940b61afSAnirudh Venkataramanan
72d4238f5SKrzysztof Kazimierczak #include "ice_type.h"
82d4238f5SKrzysztof Kazimierczak
9940b61afSAnirudh Venkataramanan #define ICE_DFLT_IRQ_WORK 256
107237f5b0SMaciej Fijalkowski #define ICE_RXBUF_3072 3072
11cdedef59SAnirudh Venkataramanan #define ICE_RXBUF_2048 2048
12c61bcebdSMaciej Fijalkowski #define ICE_RXBUF_1664 1664
137237f5b0SMaciej Fijalkowski #define ICE_RXBUF_1536 1536
14cdedef59SAnirudh Venkataramanan #define ICE_MAX_CHAINED_RX_BUFS 5
152b245cb2SAnirudh Venkataramanan #define ICE_MAX_BUF_TXD 8
162b245cb2SAnirudh Venkataramanan #define ICE_MIN_TX_LEN 17
17c61bcebdSMaciej Fijalkowski #define ICE_MAX_FRAME_LEGACY_RX 8320
182b245cb2SAnirudh Venkataramanan
192b245cb2SAnirudh Venkataramanan /* The size limit for a transmit buffer in a descriptor is (16K - 1).
202b245cb2SAnirudh Venkataramanan * In order to align with the read requests we will align the value to
212b245cb2SAnirudh Venkataramanan * the nearest 4K which represents our maximum read request size.
222b245cb2SAnirudh Venkataramanan */
232b245cb2SAnirudh Venkataramanan #define ICE_MAX_READ_REQ_SIZE 4096
242b245cb2SAnirudh Venkataramanan #define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1)
252b245cb2SAnirudh Venkataramanan #define ICE_MAX_DATA_PER_TXD_ALIGNED \
262b245cb2SAnirudh Venkataramanan (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
272b245cb2SAnirudh Venkataramanan
28cdedef59SAnirudh Venkataramanan #define ICE_MAX_TXQ_PER_TXQG 128
29cdedef59SAnirudh Venkataramanan
3059bb0808SMaciej Fijalkowski /* Attempt to maximize the headroom available for incoming frames. We use a 2K
3159bb0808SMaciej Fijalkowski * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
3259bb0808SMaciej Fijalkowski * This leaves us with 512 bytes of room. From that we need to deduct the
3359bb0808SMaciej Fijalkowski * space needed for the shared info and the padding needed to IP align the
3459bb0808SMaciej Fijalkowski * frame.
3559bb0808SMaciej Fijalkowski *
3659bb0808SMaciej Fijalkowski * Note: For cache line sizes 256 or larger this value is going to end
3759bb0808SMaciej Fijalkowski * up negative. In these cases we should fall back to the legacy
3859bb0808SMaciej Fijalkowski * receive path.
3959bb0808SMaciej Fijalkowski */
4059bb0808SMaciej Fijalkowski #if (PAGE_SIZE < 8192)
4159bb0808SMaciej Fijalkowski #define ICE_2K_TOO_SMALL_WITH_PADDING \
4222bef5e7SJesse Brandeburg ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
4322bef5e7SJesse Brandeburg SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
4459bb0808SMaciej Fijalkowski
4559bb0808SMaciej Fijalkowski /**
4659bb0808SMaciej Fijalkowski * ice_compute_pad - compute the padding
47b50f7bcaSJesse Brandeburg * @rx_buf_len: buffer length
4859bb0808SMaciej Fijalkowski *
4959bb0808SMaciej Fijalkowski * Figure out the size of half page based on given buffer length and
5059bb0808SMaciej Fijalkowski * then subtract the skb_shared_info followed by subtraction of the
5159bb0808SMaciej Fijalkowski * actual buffer length; this in turn results in the actual space that
5259bb0808SMaciej Fijalkowski * is left for padding usage
5359bb0808SMaciej Fijalkowski */
ice_compute_pad(int rx_buf_len)5459bb0808SMaciej Fijalkowski static inline int ice_compute_pad(int rx_buf_len)
5559bb0808SMaciej Fijalkowski {
5659bb0808SMaciej Fijalkowski int half_page_size;
5759bb0808SMaciej Fijalkowski
5859bb0808SMaciej Fijalkowski half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
5959bb0808SMaciej Fijalkowski return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
6059bb0808SMaciej Fijalkowski }
6159bb0808SMaciej Fijalkowski
6259bb0808SMaciej Fijalkowski /**
6359bb0808SMaciej Fijalkowski * ice_skb_pad - determine the padding that we can supply
6459bb0808SMaciej Fijalkowski *
6559bb0808SMaciej Fijalkowski * Figure out the right Rx buffer size and based on that calculate the
6659bb0808SMaciej Fijalkowski * padding
6759bb0808SMaciej Fijalkowski */
ice_skb_pad(void)6859bb0808SMaciej Fijalkowski static inline int ice_skb_pad(void)
6959bb0808SMaciej Fijalkowski {
7059bb0808SMaciej Fijalkowski int rx_buf_len;
7159bb0808SMaciej Fijalkowski
7259bb0808SMaciej Fijalkowski /* If a 2K buffer cannot handle a standard Ethernet frame then
7359bb0808SMaciej Fijalkowski * optimize padding for a 3K buffer instead of a 1.5K buffer.
7459bb0808SMaciej Fijalkowski *
7559bb0808SMaciej Fijalkowski * For a 3K buffer we need to add enough padding to allow for
7659bb0808SMaciej Fijalkowski * tailroom due to NET_IP_ALIGN possibly shifting us out of
7759bb0808SMaciej Fijalkowski * cache-line alignment.
7859bb0808SMaciej Fijalkowski */
7959bb0808SMaciej Fijalkowski if (ICE_2K_TOO_SMALL_WITH_PADDING)
8059bb0808SMaciej Fijalkowski rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
8159bb0808SMaciej Fijalkowski else
8259bb0808SMaciej Fijalkowski rx_buf_len = ICE_RXBUF_1536;
8359bb0808SMaciej Fijalkowski
8459bb0808SMaciej Fijalkowski /* if needed make room for NET_IP_ALIGN */
8559bb0808SMaciej Fijalkowski rx_buf_len -= NET_IP_ALIGN;
8659bb0808SMaciej Fijalkowski
8759bb0808SMaciej Fijalkowski return ice_compute_pad(rx_buf_len);
8859bb0808SMaciej Fijalkowski }
8959bb0808SMaciej Fijalkowski
9059bb0808SMaciej Fijalkowski #define ICE_SKB_PAD ice_skb_pad()
9159bb0808SMaciej Fijalkowski #else
9259bb0808SMaciej Fijalkowski #define ICE_2K_TOO_SMALL_WITH_PADDING false
9359bb0808SMaciej Fijalkowski #define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
9459bb0808SMaciej Fijalkowski #endif
9559bb0808SMaciej Fijalkowski
96c585ea42SBrett Creeley /* We are assuming that the cache line is always 64 Bytes here for ice.
97c585ea42SBrett Creeley * In order to make sure that is a correct assumption there is a check in probe
98c585ea42SBrett Creeley * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
99c585ea42SBrett Creeley * size is 128 bytes. We do it this way because we do not want to read the
100c585ea42SBrett Creeley * GLPCI_CNF2 register or a variable containing the value on every pass through
101c585ea42SBrett Creeley * the Tx path.
102c585ea42SBrett Creeley */
103c585ea42SBrett Creeley #define ICE_CACHE_LINE_BYTES 64
104c585ea42SBrett Creeley #define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
105c585ea42SBrett Creeley sizeof(struct ice_tx_desc))
106c585ea42SBrett Creeley #define ICE_DESCS_FOR_CTX_DESC 1
107c585ea42SBrett Creeley #define ICE_DESCS_FOR_SKB_DATA_PTR 1
108c585ea42SBrett Creeley /* Tx descriptors needed, worst case */
109c585ea42SBrett Creeley #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
110c585ea42SBrett Creeley ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
111cdedef59SAnirudh Venkataramanan #define ICE_DESC_UNUSED(R) \
11222bef5e7SJesse Brandeburg (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
113cdedef59SAnirudh Venkataramanan (R)->next_to_clean - (R)->next_to_use - 1)
114cdedef59SAnirudh Venkataramanan
1152fba7dc5SMaciej Fijalkowski #define ICE_RX_DESC_UNUSED(R) \
1162fba7dc5SMaciej Fijalkowski ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \
1172fba7dc5SMaciej Fijalkowski (R)->first_desc - (R)->next_to_use - 1)
1182fba7dc5SMaciej Fijalkowski
1193876ff52SMaciej Fijalkowski #define ICE_RING_QUARTER(R) ((R)->count >> 2)
1203876ff52SMaciej Fijalkowski
121d76a60baSAnirudh Venkataramanan #define ICE_TX_FLAGS_TSO BIT(0)
122d76a60baSAnirudh Venkataramanan #define ICE_TX_FLAGS_HW_VLAN BIT(1)
123d76a60baSAnirudh Venkataramanan #define ICE_TX_FLAGS_SW_VLAN BIT(2)
124aa1d3fafSAlexander Lobakin /* Free, was ICE_TX_FLAGS_DUMMY_PKT */
125ea9b847cSJacob Keller #define ICE_TX_FLAGS_TSYN BIT(4)
126a4e82a81STony Nguyen #define ICE_TX_FLAGS_IPV4 BIT(5)
127a4e82a81STony Nguyen #define ICE_TX_FLAGS_IPV6 BIT(6)
128a4e82a81STony Nguyen #define ICE_TX_FLAGS_TUNNEL BIT(7)
1290d54d8f7SBrett Creeley #define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8)
130d76a60baSAnirudh Venkataramanan
131efc2214bSMaciej Fijalkowski #define ICE_XDP_PASS 0
132efc2214bSMaciej Fijalkowski #define ICE_XDP_CONSUMED BIT(0)
133efc2214bSMaciej Fijalkowski #define ICE_XDP_TX BIT(1)
134efc2214bSMaciej Fijalkowski #define ICE_XDP_REDIR BIT(2)
13550ae0664SMaciej Fijalkowski #define ICE_XDP_EXIT BIT(3)
1362fba7dc5SMaciej Fijalkowski #define ICE_SKB_CONSUMED ICE_XDP_CONSUMED
137efc2214bSMaciej Fijalkowski
138a65f71feSMaciej Fijalkowski #define ICE_RX_DMA_ATTR \
139a65f71feSMaciej Fijalkowski (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
140a65f71feSMaciej Fijalkowski
141efc2214bSMaciej Fijalkowski #define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
142efc2214bSMaciej Fijalkowski
143efc2214bSMaciej Fijalkowski #define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
144efc2214bSMaciej Fijalkowski
145aa1d3fafSAlexander Lobakin /**
146aa1d3fafSAlexander Lobakin * enum ice_tx_buf_type - type of &ice_tx_buf to act on Tx completion
147aa1d3fafSAlexander Lobakin * @ICE_TX_BUF_EMPTY: unused OR XSk frame, no action required
148aa1d3fafSAlexander Lobakin * @ICE_TX_BUF_DUMMY: dummy Flow Director packet, unmap and kfree()
149aa1d3fafSAlexander Lobakin * @ICE_TX_BUF_FRAG: mapped skb OR &xdp_buff frag, only unmap DMA
150aa1d3fafSAlexander Lobakin * @ICE_TX_BUF_SKB: &sk_buff, unmap and consume_skb(), update stats
151aa1d3fafSAlexander Lobakin * @ICE_TX_BUF_XDP_TX: &xdp_buff, unmap and page_frag_free(), stats
152055d0920SAlexander Lobakin * @ICE_TX_BUF_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame(), stats
153aa1d3fafSAlexander Lobakin * @ICE_TX_BUF_XSK_TX: &xdp_buff on XSk queue, xsk_buff_free(), stats
154aa1d3fafSAlexander Lobakin */
155aa1d3fafSAlexander Lobakin enum ice_tx_buf_type {
156aa1d3fafSAlexander Lobakin ICE_TX_BUF_EMPTY = 0U,
157aa1d3fafSAlexander Lobakin ICE_TX_BUF_DUMMY,
158aa1d3fafSAlexander Lobakin ICE_TX_BUF_FRAG,
159aa1d3fafSAlexander Lobakin ICE_TX_BUF_SKB,
160aa1d3fafSAlexander Lobakin ICE_TX_BUF_XDP_TX,
161055d0920SAlexander Lobakin ICE_TX_BUF_XDP_XMIT,
162aa1d3fafSAlexander Lobakin ICE_TX_BUF_XSK_TX,
163aa1d3fafSAlexander Lobakin };
164aa1d3fafSAlexander Lobakin
165cdedef59SAnirudh Venkataramanan struct ice_tx_buf {
1663246a107SMaciej Fijalkowski union {
167cdedef59SAnirudh Venkataramanan struct ice_tx_desc *next_to_watch;
1683246a107SMaciej Fijalkowski u32 rs_idx;
1693246a107SMaciej Fijalkowski };
170efc2214bSMaciej Fijalkowski union {
171aa1d3fafSAlexander Lobakin void *raw_buf; /* used for XDP_TX and FDir rules */
172aa1d3fafSAlexander Lobakin struct sk_buff *skb; /* used for .ndo_start_xmit() */
173055d0920SAlexander Lobakin struct xdp_frame *xdpf; /* used for .ndo_xdp_xmit() */
174a24b4c6eSMaciej Fijalkowski struct xdp_buff *xdp; /* used for XDP_TX ZC */
175efc2214bSMaciej Fijalkowski };
176cdedef59SAnirudh Venkataramanan unsigned int bytecount;
1773246a107SMaciej Fijalkowski union {
1783246a107SMaciej Fijalkowski unsigned int gso_segs;
1793246a107SMaciej Fijalkowski unsigned int nr_frags; /* used for mbuf XDP */
1803246a107SMaciej Fijalkowski };
1819113302bSJan Sokolowski u32 tx_flags:12;
1829113302bSJan Sokolowski u32 type:4; /* &ice_tx_buf_type */
1839113302bSJan Sokolowski u32 vid:16;
184cdedef59SAnirudh Venkataramanan DEFINE_DMA_UNMAP_LEN(len);
18565124bbfSJesse Brandeburg DEFINE_DMA_UNMAP_ADDR(dma);
186cdedef59SAnirudh Venkataramanan };
187cdedef59SAnirudh Venkataramanan
188d76a60baSAnirudh Venkataramanan struct ice_tx_offload_params {
18965124bbfSJesse Brandeburg u64 cd_qw1;
190e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring;
191d76a60baSAnirudh Venkataramanan u32 td_cmd;
192d76a60baSAnirudh Venkataramanan u32 td_offset;
193d76a60baSAnirudh Venkataramanan u32 td_l2tag1;
194d76a60baSAnirudh Venkataramanan u32 cd_tunnel_params;
19565124bbfSJesse Brandeburg u16 cd_l2tag2;
19665124bbfSJesse Brandeburg u8 header_len;
197d76a60baSAnirudh Venkataramanan };
198d76a60baSAnirudh Venkataramanan
199cdedef59SAnirudh Venkataramanan struct ice_rx_buf {
200175fc430SBjörn Töpel dma_addr_t dma;
201cdedef59SAnirudh Venkataramanan struct page *page;
202cdedef59SAnirudh Venkataramanan unsigned int page_offset;
203ac075339SMaciej Fijalkowski unsigned int pgcnt;
2041dc1a7e7SMaciej Fijalkowski unsigned int act;
205ac075339SMaciej Fijalkowski unsigned int pagecnt_bias;
206cdedef59SAnirudh Venkataramanan };
207940b61afSAnirudh Venkataramanan
2082b245cb2SAnirudh Venkataramanan struct ice_q_stats {
2092b245cb2SAnirudh Venkataramanan u64 pkts;
2102b245cb2SAnirudh Venkataramanan u64 bytes;
2112b245cb2SAnirudh Venkataramanan };
2122b245cb2SAnirudh Venkataramanan
2132b245cb2SAnirudh Venkataramanan struct ice_txq_stats {
2142b245cb2SAnirudh Venkataramanan u64 restart_q;
2152b245cb2SAnirudh Venkataramanan u64 tx_busy;
2162b245cb2SAnirudh Venkataramanan u64 tx_linearize;
217b3969fd7SSudheer Mogilappagari int prev_pkt; /* negative if no pending Tx descriptors */
2182b245cb2SAnirudh Venkataramanan };
2192b245cb2SAnirudh Venkataramanan
2202b245cb2SAnirudh Venkataramanan struct ice_rxq_stats {
2212b245cb2SAnirudh Venkataramanan u64 non_eop_descs;
2222b245cb2SAnirudh Venkataramanan u64 alloc_page_failed;
2232b245cb2SAnirudh Venkataramanan u64 alloc_buf_failed;
2242b245cb2SAnirudh Venkataramanan };
2252b245cb2SAnirudh Venkataramanan
226288ecf49SBenjamin Mikailenko struct ice_ring_stats {
227288ecf49SBenjamin Mikailenko struct rcu_head rcu; /* to avoid race on free */
228288ecf49SBenjamin Mikailenko struct ice_q_stats stats;
229288ecf49SBenjamin Mikailenko struct u64_stats_sync syncp;
230288ecf49SBenjamin Mikailenko union {
231288ecf49SBenjamin Mikailenko struct ice_txq_stats tx_stats;
232288ecf49SBenjamin Mikailenko struct ice_rxq_stats rx_stats;
233288ecf49SBenjamin Mikailenko };
234288ecf49SBenjamin Mikailenko };
235288ecf49SBenjamin Mikailenko
236634da4c1SBenita Bose enum ice_ring_state_t {
237634da4c1SBenita Bose ICE_TX_XPS_INIT_DONE,
238634da4c1SBenita Bose ICE_TX_NBITS,
239634da4c1SBenita Bose };
240634da4c1SBenita Bose
241940b61afSAnirudh Venkataramanan /* this enum matches hardware bits and is meant to be used by DYN_CTLN
242940b61afSAnirudh Venkataramanan * registers and QINT registers or more generally anywhere in the manual
243940b61afSAnirudh Venkataramanan * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
244940b61afSAnirudh Venkataramanan * register but instead is a special value meaning "don't update" ITR0/1/2.
245940b61afSAnirudh Venkataramanan */
246940b61afSAnirudh Venkataramanan enum ice_dyn_idx_t {
247940b61afSAnirudh Venkataramanan ICE_IDX_ITR0 = 0,
248940b61afSAnirudh Venkataramanan ICE_IDX_ITR1 = 1,
249940b61afSAnirudh Venkataramanan ICE_IDX_ITR2 = 2,
250940b61afSAnirudh Venkataramanan ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
251940b61afSAnirudh Venkataramanan };
252940b61afSAnirudh Venkataramanan
253cdedef59SAnirudh Venkataramanan /* Header split modes defined by DTYPE field of Rx RLAN context */
254cdedef59SAnirudh Venkataramanan enum ice_rx_dtype {
255cdedef59SAnirudh Venkataramanan ICE_RX_DTYPE_NO_SPLIT = 0,
256cdedef59SAnirudh Venkataramanan ICE_RX_DTYPE_HEADER_SPLIT = 1,
257cdedef59SAnirudh Venkataramanan ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
258cdedef59SAnirudh Venkataramanan };
259cdedef59SAnirudh Venkataramanan
260940b61afSAnirudh Venkataramanan /* indices into GLINT_ITR registers */
261940b61afSAnirudh Venkataramanan #define ICE_RX_ITR ICE_IDX_ITR0
262cdedef59SAnirudh Venkataramanan #define ICE_TX_ITR ICE_IDX_ITR1
26363f545edSBrett Creeley #define ICE_ITR_8K 124
264d2b464a7SBrett Creeley #define ICE_ITR_20K 50
265d59684a0SJesse Brandeburg #define ICE_ITR_MAX 8160 /* 0x1FE0 */
266d59684a0SJesse Brandeburg #define ICE_DFLT_TX_ITR ICE_ITR_20K
267d59684a0SJesse Brandeburg #define ICE_DFLT_RX_ITR ICE_ITR_20K
268d59684a0SJesse Brandeburg enum ice_dynamic_itr {
269d59684a0SJesse Brandeburg ITR_STATIC = 0,
270d59684a0SJesse Brandeburg ITR_DYNAMIC = 1
271d59684a0SJesse Brandeburg };
272d59684a0SJesse Brandeburg
273d59684a0SJesse Brandeburg #define ITR_IS_DYNAMIC(rc) ((rc)->itr_mode == ITR_DYNAMIC)
27492414f32SBrett Creeley #define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
27570457520SBrett Creeley #define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
27663f545edSBrett Creeley #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
277840f8ad0SBrett Creeley #define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK)
278940b61afSAnirudh Venkataramanan
2799e4ab4c2SBrett Creeley #define ICE_DFLT_INTRL 0
280b9c8bb06SBrett Creeley #define ICE_MAX_INTRL 236
281940b61afSAnirudh Venkataramanan
2822ab28bb0SBrett Creeley #define ICE_IN_WB_ON_ITR_MODE 255
2832ab28bb0SBrett Creeley /* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
2842ab28bb0SBrett Creeley * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
2852ab28bb0SBrett Creeley * set the write-back latency to the usecs passed in.
2862ab28bb0SBrett Creeley */
2872ab28bb0SBrett Creeley #define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \
2882ab28bb0SBrett Creeley ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
2892ab28bb0SBrett Creeley GLINT_DYN_CTL_INTERVAL_M) | \
2902ab28bb0SBrett Creeley (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
2912ab28bb0SBrett Creeley GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
2922ab28bb0SBrett Creeley GLINT_DYN_CTL_WB_ON_ITR_M)
2932ab28bb0SBrett Creeley
294cdedef59SAnirudh Venkataramanan /* Legacy or Advanced Mode Queue */
295cdedef59SAnirudh Venkataramanan #define ICE_TX_ADVANCED 0
296cdedef59SAnirudh Venkataramanan #define ICE_TX_LEGACY 1
297cdedef59SAnirudh Venkataramanan
2983a858ba3SAnirudh Venkataramanan /* descriptor ring, associated with a VSI */
299e72bba21SMaciej Fijalkowski struct ice_rx_ring {
30065124bbfSJesse Brandeburg /* CL1 - 1st cacheline starts here */
301e72bba21SMaciej Fijalkowski struct ice_rx_ring *next; /* pointer to next ring in q_vector */
302cdedef59SAnirudh Venkataramanan void *desc; /* Descriptor ring memory */
3033a858ba3SAnirudh Venkataramanan struct device *dev; /* Used for DMA mapping */
3043a858ba3SAnirudh Venkataramanan struct net_device *netdev; /* netdev ring maps to */
3053a858ba3SAnirudh Venkataramanan struct ice_vsi *vsi; /* Backreference to associated VSI */
3063a858ba3SAnirudh Venkataramanan struct ice_q_vector *q_vector; /* Backreference to associated vector */
307cdedef59SAnirudh Venkataramanan u8 __iomem *tail;
3083a858ba3SAnirudh Venkataramanan u16 q_index; /* Queue number of ring */
30965124bbfSJesse Brandeburg
3103a858ba3SAnirudh Venkataramanan u16 count; /* Number of descriptors */
3113a858ba3SAnirudh Venkataramanan u16 reg_idx; /* HW register index of the ring */
3122fba7dc5SMaciej Fijalkowski u16 next_to_alloc;
3132fba7dc5SMaciej Fijalkowski /* CL2 - 2nd cacheline starts here */
3142fba7dc5SMaciej Fijalkowski union {
3152fba7dc5SMaciej Fijalkowski struct ice_rx_buf *rx_buf;
3162fba7dc5SMaciej Fijalkowski struct xdp_buff **xdp_buf;
3172fba7dc5SMaciej Fijalkowski };
3182fba7dc5SMaciej Fijalkowski struct xdp_buff xdp;
3192fba7dc5SMaciej Fijalkowski /* CL3 - 3rd cacheline starts here */
3202fba7dc5SMaciej Fijalkowski struct bpf_prog *xdp_prog;
3212fba7dc5SMaciej Fijalkowski u16 rx_offset;
322cdedef59SAnirudh Venkataramanan
323cdedef59SAnirudh Venkataramanan /* used in interrupt processing */
324cdedef59SAnirudh Venkataramanan u16 next_to_use;
325cdedef59SAnirudh Venkataramanan u16 next_to_clean;
3262fba7dc5SMaciej Fijalkowski u16 first_desc;
327e72bba21SMaciej Fijalkowski
328e72bba21SMaciej Fijalkowski /* stats structs */
329288ecf49SBenjamin Mikailenko struct ice_ring_stats *ring_stats;
330e72bba21SMaciej Fijalkowski
331e72bba21SMaciej Fijalkowski struct rcu_head rcu; /* to avoid race on free */
3322fba7dc5SMaciej Fijalkowski /* CL4 - 4th cacheline starts here */
3330754d65bSKiran Patil struct ice_channel *ch;
334eb087cd8SMaciej Fijalkowski struct ice_tx_ring *xdp_ring;
335e72bba21SMaciej Fijalkowski struct xsk_buff_pool *xsk_pool;
336*728e112dSMaciej Fijalkowski u32 nr_frags;
337e72bba21SMaciej Fijalkowski dma_addr_t dma; /* physical address of ring */
338e72bba21SMaciej Fijalkowski u64 cached_phctime;
3392fba7dc5SMaciej Fijalkowski u16 rx_buf_len;
340e72bba21SMaciej Fijalkowski u8 dcb_tc; /* Traffic class of ring */
341e72bba21SMaciej Fijalkowski u8 ptp_rx;
342dddd406dSJesse Brandeburg #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
343dddd406dSJesse Brandeburg #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
344e72bba21SMaciej Fijalkowski u8 flags;
3452fba7dc5SMaciej Fijalkowski /* CL5 - 5th cacheline starts here */
3462fba7dc5SMaciej Fijalkowski struct xdp_rxq_info xdp_rxq;
347e72bba21SMaciej Fijalkowski } ____cacheline_internodealigned_in_smp;
348e72bba21SMaciej Fijalkowski
349e72bba21SMaciej Fijalkowski struct ice_tx_ring {
350e72bba21SMaciej Fijalkowski /* CL1 - 1st cacheline starts here */
351e72bba21SMaciej Fijalkowski struct ice_tx_ring *next; /* pointer to next ring in q_vector */
352e72bba21SMaciej Fijalkowski void *desc; /* Descriptor ring memory */
353e72bba21SMaciej Fijalkowski struct device *dev; /* Used for DMA mapping */
354e72bba21SMaciej Fijalkowski u8 __iomem *tail;
355e72bba21SMaciej Fijalkowski struct ice_tx_buf *tx_buf;
356e72bba21SMaciej Fijalkowski struct ice_q_vector *q_vector; /* Backreference to associated vector */
357e72bba21SMaciej Fijalkowski struct net_device *netdev; /* netdev ring maps to */
358e72bba21SMaciej Fijalkowski struct ice_vsi *vsi; /* Backreference to associated VSI */
359e72bba21SMaciej Fijalkowski /* CL2 - 2nd cacheline starts here */
360e72bba21SMaciej Fijalkowski dma_addr_t dma; /* physical address of ring */
3619610bd98SMaciej Fijalkowski struct xsk_buff_pool *xsk_pool;
362e72bba21SMaciej Fijalkowski u16 next_to_use;
363e72bba21SMaciej Fijalkowski u16 next_to_clean;
3649610bd98SMaciej Fijalkowski u16 q_handle; /* Queue handle per TC */
3659610bd98SMaciej Fijalkowski u16 reg_idx; /* HW register index of the ring */
366e72bba21SMaciej Fijalkowski u16 count; /* Number of descriptors */
367e72bba21SMaciej Fijalkowski u16 q_index; /* Queue number of ring */
3683246a107SMaciej Fijalkowski u16 xdp_tx_active;
3692b245cb2SAnirudh Venkataramanan /* stats structs */
370288ecf49SBenjamin Mikailenko struct ice_ring_stats *ring_stats;
371126cdfe1SMaciej Fijalkowski /* CL3 - 3rd cacheline starts here */
3723a858ba3SAnirudh Venkataramanan struct rcu_head rcu; /* to avoid race on free */
373634da4c1SBenita Bose DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
3740754d65bSKiran Patil struct ice_channel *ch;
375ea9b847cSJacob Keller struct ice_ptp_tx *tx_tstamps;
37622bf877eSMaciej Fijalkowski spinlock_t tx_lock;
377e72bba21SMaciej Fijalkowski u32 txq_teid; /* Added Tx queue TEID */
378126cdfe1SMaciej Fijalkowski /* CL4 - 4th cacheline starts here */
379e72bba21SMaciej Fijalkowski #define ICE_TX_FLAGS_RING_XDP BIT(0)
3800d54d8f7SBrett Creeley #define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
3810d54d8f7SBrett Creeley #define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
382e72bba21SMaciej Fijalkowski u8 flags;
383e72bba21SMaciej Fijalkowski u8 dcb_tc; /* Traffic class of ring */
384e72bba21SMaciej Fijalkowski u8 ptp_tx;
3853a858ba3SAnirudh Venkataramanan } ____cacheline_internodealigned_in_smp;
3863a858ba3SAnirudh Venkataramanan
ice_ring_uses_build_skb(struct ice_rx_ring * ring)387e72bba21SMaciej Fijalkowski static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
38859bb0808SMaciej Fijalkowski {
38959bb0808SMaciej Fijalkowski return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
39059bb0808SMaciej Fijalkowski }
39159bb0808SMaciej Fijalkowski
ice_set_ring_build_skb_ena(struct ice_rx_ring * ring)392e72bba21SMaciej Fijalkowski static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
39359bb0808SMaciej Fijalkowski {
39459bb0808SMaciej Fijalkowski ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
39559bb0808SMaciej Fijalkowski }
39659bb0808SMaciej Fijalkowski
ice_clear_ring_build_skb_ena(struct ice_rx_ring * ring)397e72bba21SMaciej Fijalkowski static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
39859bb0808SMaciej Fijalkowski {
39959bb0808SMaciej Fijalkowski ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
40059bb0808SMaciej Fijalkowski }
40159bb0808SMaciej Fijalkowski
ice_ring_ch_enabled(struct ice_tx_ring * ring)4020754d65bSKiran Patil static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
4030754d65bSKiran Patil {
4040754d65bSKiran Patil return !!ring->ch;
4050754d65bSKiran Patil }
4060754d65bSKiran Patil
ice_ring_is_xdp(struct ice_tx_ring * ring)407e72bba21SMaciej Fijalkowski static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
408efc2214bSMaciej Fijalkowski {
409efc2214bSMaciej Fijalkowski return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
410efc2214bSMaciej Fijalkowski }
411efc2214bSMaciej Fijalkowski
412dc23715cSMaciej Fijalkowski enum ice_container_type {
413dc23715cSMaciej Fijalkowski ICE_RX_CONTAINER,
414dc23715cSMaciej Fijalkowski ICE_TX_CONTAINER,
415dc23715cSMaciej Fijalkowski };
416dc23715cSMaciej Fijalkowski
4173a858ba3SAnirudh Venkataramanan struct ice_ring_container {
41863f545edSBrett Creeley /* head of linked-list of rings */
419e72bba21SMaciej Fijalkowski union {
420e72bba21SMaciej Fijalkowski struct ice_rx_ring *rx_ring;
421e72bba21SMaciej Fijalkowski struct ice_tx_ring *tx_ring;
422e72bba21SMaciej Fijalkowski };
423cdf1f1f1SJacob Keller struct dim dim; /* data for net_dim algorithm */
4248244dd2dSBrett Creeley u16 itr_idx; /* index in the interrupt vector */
425d59684a0SJesse Brandeburg /* this matches the maximum number of ITR bits, but in usec
426d59684a0SJesse Brandeburg * values, so it is shifted left one bit (bit zero is ignored)
42763f545edSBrett Creeley */
428bf13502eSMichal Wilczynski union {
429bf13502eSMichal Wilczynski struct {
430d59684a0SJesse Brandeburg u16 itr_setting:13;
431d59684a0SJesse Brandeburg u16 itr_reserved:2;
432d59684a0SJesse Brandeburg u16 itr_mode:1;
433bf13502eSMichal Wilczynski };
434bf13502eSMichal Wilczynski u16 itr_settings;
435bf13502eSMichal Wilczynski };
436dc23715cSMaciej Fijalkowski enum ice_container_type type;
4373a858ba3SAnirudh Venkataramanan };
4383a858ba3SAnirudh Venkataramanan
43961dc79ceSMichal Swiatkowski struct ice_coalesce_stored {
44061dc79ceSMichal Swiatkowski u16 itr_tx;
44161dc79ceSMichal Swiatkowski u16 itr_rx;
44261dc79ceSMichal Swiatkowski u8 intrl;
4432ec56385SPaul M Stillwell Jr u8 tx_valid;
4442ec56385SPaul M Stillwell Jr u8 rx_valid;
44561dc79ceSMichal Swiatkowski };
44661dc79ceSMichal Swiatkowski
4473a858ba3SAnirudh Venkataramanan /* iterator for handling rings in ring container */
448e72bba21SMaciej Fijalkowski #define ice_for_each_rx_ring(pos, head) \
449e72bba21SMaciej Fijalkowski for (pos = (head).rx_ring; pos; pos = pos->next)
4503a858ba3SAnirudh Venkataramanan
451e72bba21SMaciej Fijalkowski #define ice_for_each_tx_ring(pos, head) \
452e72bba21SMaciej Fijalkowski for (pos = (head).tx_ring; pos; pos = pos->next)
453e72bba21SMaciej Fijalkowski
ice_rx_pg_order(struct ice_rx_ring * ring)454e72bba21SMaciej Fijalkowski static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
4557237f5b0SMaciej Fijalkowski {
4567237f5b0SMaciej Fijalkowski #if (PAGE_SIZE < 8192)
4577237f5b0SMaciej Fijalkowski if (ring->rx_buf_len > (PAGE_SIZE / 2))
4587237f5b0SMaciej Fijalkowski return 1;
4597237f5b0SMaciej Fijalkowski #endif
4607237f5b0SMaciej Fijalkowski return 0;
4617237f5b0SMaciej Fijalkowski }
4627237f5b0SMaciej Fijalkowski
4637237f5b0SMaciej Fijalkowski #define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
4647237f5b0SMaciej Fijalkowski
4652d4238f5SKrzysztof Kazimierczak union ice_32b_rx_flex_desc;
4662d4238f5SKrzysztof Kazimierczak
4672fba7dc5SMaciej Fijalkowski bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);
4682b245cb2SAnirudh Venkataramanan netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
4692a87bd73SDave Ertman u16
4702a87bd73SDave Ertman ice_select_queue(struct net_device *dev, struct sk_buff *skb,
4712a87bd73SDave Ertman struct net_device *sb_dev);
472e72bba21SMaciej Fijalkowski void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
473e72bba21SMaciej Fijalkowski void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
474e72bba21SMaciej Fijalkowski int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
475e72bba21SMaciej Fijalkowski int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
476e72bba21SMaciej Fijalkowski void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
477e72bba21SMaciej Fijalkowski void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
4782b245cb2SAnirudh Venkataramanan int ice_napi_poll(struct napi_struct *napi, int budget);
479cac2a27cSHenry Tieman int
480cac2a27cSHenry Tieman ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
481cac2a27cSHenry Tieman u8 *raw_packet);
482e72bba21SMaciej Fijalkowski int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
483e72bba21SMaciej Fijalkowski void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
484940b61afSAnirudh Venkataramanan #endif /* _ICE_TXRX_H_ */
485