1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #ifndef _ICE_TXRX_H_
5 #define _ICE_TXRX_H_
6 
7 #define ICE_DFLT_IRQ_WORK	256
8 #define ICE_RXBUF_2048		2048
9 #define ICE_MAX_CHAINED_RX_BUFS	5
10 #define ICE_MAX_BUF_TXD		8
11 #define ICE_MIN_TX_LEN		17
12 
13 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
14  * In order to align with the read requests we will align the value to
15  * the nearest 4K which represents our maximum read request size.
16  */
17 #define ICE_MAX_READ_REQ_SIZE	4096
18 #define ICE_MAX_DATA_PER_TXD	(16 * 1024 - 1)
19 #define ICE_MAX_DATA_PER_TXD_ALIGNED \
20 	(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
21 
22 #define ICE_RX_BUF_WRITE	16	/* Must be power of 2 */
23 #define ICE_MAX_TXQ_PER_TXQG	128
24 
25 /* We are assuming that the cache line is always 64 Bytes here for ice.
26  * In order to make sure that is a correct assumption there is a check in probe
27  * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
28  * size is 128 bytes. We do it this way because we do not want to read the
29  * GLPCI_CNF2 register or a variable containing the value on every pass through
30  * the Tx path.
31  */
32 #define ICE_CACHE_LINE_BYTES		64
33 #define ICE_DESCS_PER_CACHE_LINE	(ICE_CACHE_LINE_BYTES / \
34 					 sizeof(struct ice_tx_desc))
35 #define ICE_DESCS_FOR_CTX_DESC		1
36 #define ICE_DESCS_FOR_SKB_DATA_PTR	1
37 /* Tx descriptors needed, worst case */
38 #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
39 		     ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
40 #define ICE_DESC_UNUSED(R)	\
41 	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
42 	(R)->next_to_clean - (R)->next_to_use - 1)
43 
44 #define ICE_TX_FLAGS_TSO	BIT(0)
45 #define ICE_TX_FLAGS_HW_VLAN	BIT(1)
46 #define ICE_TX_FLAGS_SW_VLAN	BIT(2)
47 #define ICE_TX_FLAGS_VLAN_M	0xffff0000
48 #define ICE_TX_FLAGS_VLAN_PR_M	0xe0000000
49 #define ICE_TX_FLAGS_VLAN_PR_S	29
50 #define ICE_TX_FLAGS_VLAN_S	16
51 
52 #define ICE_RX_DMA_ATTR \
53 	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
54 
55 struct ice_tx_buf {
56 	struct ice_tx_desc *next_to_watch;
57 	struct sk_buff *skb;
58 	unsigned int bytecount;
59 	unsigned short gso_segs;
60 	u32 tx_flags;
61 	DEFINE_DMA_UNMAP_LEN(len);
62 	DEFINE_DMA_UNMAP_ADDR(dma);
63 };
64 
65 struct ice_tx_offload_params {
66 	u64 cd_qw1;
67 	struct ice_ring *tx_ring;
68 	u32 td_cmd;
69 	u32 td_offset;
70 	u32 td_l2tag1;
71 	u32 cd_tunnel_params;
72 	u16 cd_l2tag2;
73 	u8 header_len;
74 };
75 
76 struct ice_rx_buf {
77 	struct sk_buff *skb;
78 	dma_addr_t dma;
79 	struct page *page;
80 	unsigned int page_offset;
81 	u16 pagecnt_bias;
82 };
83 
84 struct ice_q_stats {
85 	u64 pkts;
86 	u64 bytes;
87 };
88 
89 struct ice_txq_stats {
90 	u64 restart_q;
91 	u64 tx_busy;
92 	u64 tx_linearize;
93 	int prev_pkt; /* negative if no pending Tx descriptors */
94 };
95 
96 struct ice_rxq_stats {
97 	u64 non_eop_descs;
98 	u64 alloc_page_failed;
99 	u64 alloc_buf_failed;
100 	u64 page_reuse_count;
101 };
102 
103 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
104  * registers and QINT registers or more generally anywhere in the manual
105  * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
106  * register but instead is a special value meaning "don't update" ITR0/1/2.
107  */
108 enum ice_dyn_idx_t {
109 	ICE_IDX_ITR0 = 0,
110 	ICE_IDX_ITR1 = 1,
111 	ICE_IDX_ITR2 = 2,
112 	ICE_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
113 };
114 
115 /* Header split modes defined by DTYPE field of Rx RLAN context */
116 enum ice_rx_dtype {
117 	ICE_RX_DTYPE_NO_SPLIT		= 0,
118 	ICE_RX_DTYPE_HEADER_SPLIT	= 1,
119 	ICE_RX_DTYPE_SPLIT_ALWAYS	= 2,
120 };
121 
122 /* indices into GLINT_ITR registers */
123 #define ICE_RX_ITR	ICE_IDX_ITR0
124 #define ICE_TX_ITR	ICE_IDX_ITR1
125 #define ICE_ITR_8K	124
126 #define ICE_ITR_20K	50
127 #define ICE_ITR_MAX	8160
128 #define ICE_DFLT_TX_ITR	(ICE_ITR_20K | ICE_ITR_DYNAMIC)
129 #define ICE_DFLT_RX_ITR	(ICE_ITR_20K | ICE_ITR_DYNAMIC)
130 #define ICE_ITR_DYNAMIC	0x8000  /* used as flag for itr_setting */
131 #define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
132 #define ITR_TO_REG(setting)	((setting) & ~ICE_ITR_DYNAMIC)
133 #define ICE_ITR_GRAN_S		1	/* ITR granularity is always 2us */
134 #define ICE_ITR_GRAN_US		BIT(ICE_ITR_GRAN_S)
135 #define ICE_ITR_MASK		0x1FFE	/* ITR register value alignment mask */
136 #define ITR_REG_ALIGN(setting)	__ALIGN_MASK(setting, ~ICE_ITR_MASK)
137 
138 #define ICE_ITR_ADAPTIVE_MIN_INC	0x0002
139 #define ICE_ITR_ADAPTIVE_MIN_USECS	0x0002
140 #define ICE_ITR_ADAPTIVE_MAX_USECS	0x00FA
141 #define ICE_ITR_ADAPTIVE_LATENCY	0x8000
142 #define ICE_ITR_ADAPTIVE_BULK		0x0000
143 
144 #define ICE_DFLT_INTRL	0
145 #define ICE_MAX_INTRL	236
146 
147 /* Legacy or Advanced Mode Queue */
148 #define ICE_TX_ADVANCED	0
149 #define ICE_TX_LEGACY	1
150 
151 /* descriptor ring, associated with a VSI */
152 struct ice_ring {
153 	/* CL1 - 1st cacheline starts here */
154 	struct ice_ring *next;		/* pointer to next ring in q_vector */
155 	void *desc;			/* Descriptor ring memory */
156 	struct device *dev;		/* Used for DMA mapping */
157 	struct net_device *netdev;	/* netdev ring maps to */
158 	struct ice_vsi *vsi;		/* Backreference to associated VSI */
159 	struct ice_q_vector *q_vector;	/* Backreference to associated vector */
160 	u8 __iomem *tail;
161 	union {
162 		struct ice_tx_buf *tx_buf;
163 		struct ice_rx_buf *rx_buf;
164 	};
165 	/* CL2 - 2nd cacheline starts here */
166 	u16 q_index;			/* Queue number of ring */
167 	u16 q_handle;			/* Queue handle per TC */
168 
169 	u8 ring_active:1;		/* is ring online or not */
170 
171 	u16 count;			/* Number of descriptors */
172 	u16 reg_idx;			/* HW register index of the ring */
173 
174 	/* used in interrupt processing */
175 	u16 next_to_use;
176 	u16 next_to_clean;
177 	u16 next_to_alloc;
178 
179 	/* stats structs */
180 	struct ice_q_stats	stats;
181 	struct u64_stats_sync syncp;
182 	union {
183 		struct ice_txq_stats tx_stats;
184 		struct ice_rxq_stats rx_stats;
185 	};
186 
187 	struct rcu_head rcu;		/* to avoid race on free */
188 	/* CLX - the below items are only accessed infrequently and should be
189 	 * in their own cache line if possible
190 	 */
191 	dma_addr_t dma;			/* physical address of ring */
192 	unsigned int size;		/* length of descriptor ring in bytes */
193 	u32 txq_teid;			/* Added Tx queue TEID */
194 	u16 rx_buf_len;
195 #ifdef CONFIG_DCB
196 	u8 dcb_tc;			/* Traffic class of ring */
197 #endif /* CONFIG_DCB */
198 } ____cacheline_internodealigned_in_smp;
199 
200 struct ice_ring_container {
201 	/* head of linked-list of rings */
202 	struct ice_ring *ring;
203 	unsigned long next_update;	/* jiffies value of next queue update */
204 	unsigned int total_bytes;	/* total bytes processed this int */
205 	unsigned int total_pkts;	/* total packets processed this int */
206 	u16 itr_idx;		/* index in the interrupt vector */
207 	u16 target_itr;		/* value in usecs divided by the hw->itr_gran */
208 	u16 current_itr;	/* value in usecs divided by the hw->itr_gran */
209 	/* high bit set means dynamic ITR, rest is used to store user
210 	 * readable ITR value in usecs and must be converted before programming
211 	 * to a register.
212 	 */
213 	u16 itr_setting;
214 };
215 
216 /* iterator for handling rings in ring container */
217 #define ice_for_each_ring(pos, head) \
218 	for (pos = (head).ring; pos; pos = pos->next)
219 
220 bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
221 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
222 void ice_clean_tx_ring(struct ice_ring *tx_ring);
223 void ice_clean_rx_ring(struct ice_ring *rx_ring);
224 int ice_setup_tx_ring(struct ice_ring *tx_ring);
225 int ice_setup_rx_ring(struct ice_ring *rx_ring);
226 void ice_free_tx_ring(struct ice_ring *tx_ring);
227 void ice_free_rx_ring(struct ice_ring *rx_ring);
228 int ice_napi_poll(struct napi_struct *napi, int budget);
229 
230 #endif /* _ICE_TXRX_H_ */
231