1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #ifndef _ICE_TXRX_H_
5 #define _ICE_TXRX_H_
6 
7 #include "ice_type.h"
8 
9 #define ICE_DFLT_IRQ_WORK	256
10 #define ICE_RXBUF_3072		3072
11 #define ICE_RXBUF_2048		2048
12 #define ICE_RXBUF_1536		1536
13 #define ICE_MAX_CHAINED_RX_BUFS	5
14 #define ICE_MAX_BUF_TXD		8
15 #define ICE_MIN_TX_LEN		17
16 #define ICE_TX_THRESH		32
17 
18 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
19  * In order to align with the read requests we will align the value to
20  * the nearest 4K which represents our maximum read request size.
21  */
22 #define ICE_MAX_READ_REQ_SIZE	4096
23 #define ICE_MAX_DATA_PER_TXD	(16 * 1024 - 1)
24 #define ICE_MAX_DATA_PER_TXD_ALIGNED \
25 	(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
26 
27 #define ICE_MAX_TXQ_PER_TXQG	128
28 
29 /* Attempt to maximize the headroom available for incoming frames. We use a 2K
30  * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
31  * This leaves us with 512 bytes of room.  From that we need to deduct the
32  * space needed for the shared info and the padding needed to IP align the
33  * frame.
34  *
35  * Note: For cache line sizes 256 or larger this value is going to end
36  *	 up negative.  In these cases we should fall back to the legacy
37  *	 receive path.
38  */
39 #if (PAGE_SIZE < 8192)
40 #define ICE_2K_TOO_SMALL_WITH_PADDING \
41 	((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
42 			SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
43 
44 /**
45  * ice_compute_pad - compute the padding
46  * @rx_buf_len: buffer length
47  *
48  * Figure out the size of half page based on given buffer length and
49  * then subtract the skb_shared_info followed by subtraction of the
50  * actual buffer length; this in turn results in the actual space that
51  * is left for padding usage
52  */
53 static inline int ice_compute_pad(int rx_buf_len)
54 {
55 	int half_page_size;
56 
57 	half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
58 	return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
59 }
60 
61 /**
62  * ice_skb_pad - determine the padding that we can supply
63  *
64  * Figure out the right Rx buffer size and based on that calculate the
65  * padding
66  */
67 static inline int ice_skb_pad(void)
68 {
69 	int rx_buf_len;
70 
71 	/* If a 2K buffer cannot handle a standard Ethernet frame then
72 	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
73 	 *
74 	 * For a 3K buffer we need to add enough padding to allow for
75 	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
76 	 * cache-line alignment.
77 	 */
78 	if (ICE_2K_TOO_SMALL_WITH_PADDING)
79 		rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
80 	else
81 		rx_buf_len = ICE_RXBUF_1536;
82 
83 	/* if needed make room for NET_IP_ALIGN */
84 	rx_buf_len -= NET_IP_ALIGN;
85 
86 	return ice_compute_pad(rx_buf_len);
87 }
88 
89 #define ICE_SKB_PAD ice_skb_pad()
90 #else
91 #define ICE_2K_TOO_SMALL_WITH_PADDING false
92 #define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
93 #endif
94 
95 /* We are assuming that the cache line is always 64 Bytes here for ice.
96  * In order to make sure that is a correct assumption there is a check in probe
97  * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
98  * size is 128 bytes. We do it this way because we do not want to read the
99  * GLPCI_CNF2 register or a variable containing the value on every pass through
100  * the Tx path.
101  */
102 #define ICE_CACHE_LINE_BYTES		64
103 #define ICE_DESCS_PER_CACHE_LINE	(ICE_CACHE_LINE_BYTES / \
104 					 sizeof(struct ice_tx_desc))
105 #define ICE_DESCS_FOR_CTX_DESC		1
106 #define ICE_DESCS_FOR_SKB_DATA_PTR	1
107 /* Tx descriptors needed, worst case */
108 #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
109 		     ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
110 #define ICE_DESC_UNUSED(R)	\
111 	(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
112 	      (R)->next_to_clean - (R)->next_to_use - 1)
113 
114 #define ICE_TX_FLAGS_TSO	BIT(0)
115 #define ICE_TX_FLAGS_HW_VLAN	BIT(1)
116 #define ICE_TX_FLAGS_SW_VLAN	BIT(2)
117 /* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be
118  * freed instead of returned like skb packets.
119  */
120 #define ICE_TX_FLAGS_DUMMY_PKT	BIT(3)
121 #define ICE_TX_FLAGS_TSYN	BIT(4)
122 #define ICE_TX_FLAGS_IPV4	BIT(5)
123 #define ICE_TX_FLAGS_IPV6	BIT(6)
124 #define ICE_TX_FLAGS_TUNNEL	BIT(7)
125 #define ICE_TX_FLAGS_VLAN_M	0xffff0000
126 #define ICE_TX_FLAGS_VLAN_PR_M	0xe0000000
127 #define ICE_TX_FLAGS_VLAN_PR_S	29
128 #define ICE_TX_FLAGS_VLAN_S	16
129 
130 #define ICE_XDP_PASS		0
131 #define ICE_XDP_CONSUMED	BIT(0)
132 #define ICE_XDP_TX		BIT(1)
133 #define ICE_XDP_REDIR		BIT(2)
134 
135 #define ICE_RX_DMA_ATTR \
136 	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
137 
138 #define ICE_ETH_PKT_HDR_PAD	(ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
139 
140 #define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
141 
142 struct ice_tx_buf {
143 	struct ice_tx_desc *next_to_watch;
144 	union {
145 		struct sk_buff *skb;
146 		void *raw_buf; /* used for XDP */
147 	};
148 	unsigned int bytecount;
149 	unsigned short gso_segs;
150 	u32 tx_flags;
151 	DEFINE_DMA_UNMAP_LEN(len);
152 	DEFINE_DMA_UNMAP_ADDR(dma);
153 };
154 
155 struct ice_tx_offload_params {
156 	u64 cd_qw1;
157 	struct ice_tx_ring *tx_ring;
158 	u32 td_cmd;
159 	u32 td_offset;
160 	u32 td_l2tag1;
161 	u32 cd_tunnel_params;
162 	u16 cd_l2tag2;
163 	u8 header_len;
164 };
165 
166 struct ice_rx_buf {
167 	dma_addr_t dma;
168 	struct page *page;
169 	unsigned int page_offset;
170 	u16 pagecnt_bias;
171 };
172 
173 struct ice_q_stats {
174 	u64 pkts;
175 	u64 bytes;
176 };
177 
178 struct ice_txq_stats {
179 	u64 restart_q;
180 	u64 tx_busy;
181 	u64 tx_linearize;
182 	int prev_pkt; /* negative if no pending Tx descriptors */
183 };
184 
185 struct ice_rxq_stats {
186 	u64 non_eop_descs;
187 	u64 alloc_page_failed;
188 	u64 alloc_buf_failed;
189 };
190 
191 enum ice_ring_state_t {
192 	ICE_TX_XPS_INIT_DONE,
193 	ICE_TX_NBITS,
194 };
195 
196 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
197  * registers and QINT registers or more generally anywhere in the manual
198  * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
199  * register but instead is a special value meaning "don't update" ITR0/1/2.
200  */
201 enum ice_dyn_idx_t {
202 	ICE_IDX_ITR0 = 0,
203 	ICE_IDX_ITR1 = 1,
204 	ICE_IDX_ITR2 = 2,
205 	ICE_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
206 };
207 
208 /* Header split modes defined by DTYPE field of Rx RLAN context */
209 enum ice_rx_dtype {
210 	ICE_RX_DTYPE_NO_SPLIT		= 0,
211 	ICE_RX_DTYPE_HEADER_SPLIT	= 1,
212 	ICE_RX_DTYPE_SPLIT_ALWAYS	= 2,
213 };
214 
215 /* indices into GLINT_ITR registers */
216 #define ICE_RX_ITR	ICE_IDX_ITR0
217 #define ICE_TX_ITR	ICE_IDX_ITR1
218 #define ICE_ITR_8K	124
219 #define ICE_ITR_20K	50
220 #define ICE_ITR_MAX	8160 /* 0x1FE0 */
221 #define ICE_DFLT_TX_ITR	ICE_ITR_20K
222 #define ICE_DFLT_RX_ITR	ICE_ITR_20K
223 enum ice_dynamic_itr {
224 	ITR_STATIC = 0,
225 	ITR_DYNAMIC = 1
226 };
227 
228 #define ITR_IS_DYNAMIC(rc) ((rc)->itr_mode == ITR_DYNAMIC)
229 #define ICE_ITR_GRAN_S		1	/* ITR granularity is always 2us */
230 #define ICE_ITR_GRAN_US		BIT(ICE_ITR_GRAN_S)
231 #define ICE_ITR_MASK		0x1FFE	/* ITR register value alignment mask */
232 #define ITR_REG_ALIGN(setting)	((setting) & ICE_ITR_MASK)
233 
234 #define ICE_DFLT_INTRL	0
235 #define ICE_MAX_INTRL	236
236 
237 #define ICE_IN_WB_ON_ITR_MODE	255
238 /* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
239  * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
240  * set the write-back latency to the usecs passed in.
241  */
242 #define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx)	\
243 	((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
244 	  GLINT_DYN_CTL_INTERVAL_M) | \
245 	 (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
246 	  GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
247 	 GLINT_DYN_CTL_WB_ON_ITR_M)
248 
249 /* Legacy or Advanced Mode Queue */
250 #define ICE_TX_ADVANCED	0
251 #define ICE_TX_LEGACY	1
252 
253 /* descriptor ring, associated with a VSI */
254 struct ice_rx_ring {
255 	/* CL1 - 1st cacheline starts here */
256 	struct ice_rx_ring *next;	/* pointer to next ring in q_vector */
257 	void *desc;			/* Descriptor ring memory */
258 	struct device *dev;		/* Used for DMA mapping */
259 	struct net_device *netdev;	/* netdev ring maps to */
260 	struct ice_vsi *vsi;		/* Backreference to associated VSI */
261 	struct ice_q_vector *q_vector;	/* Backreference to associated vector */
262 	u8 __iomem *tail;
263 	union {
264 		struct ice_rx_buf *rx_buf;
265 		struct xdp_buff **xdp_buf;
266 	};
267 	/* CL2 - 2nd cacheline starts here */
268 	struct xdp_rxq_info xdp_rxq;
269 	/* CL3 - 3rd cacheline starts here */
270 	u16 q_index;			/* Queue number of ring */
271 
272 	u16 count;			/* Number of descriptors */
273 	u16 reg_idx;			/* HW register index of the ring */
274 
275 	/* used in interrupt processing */
276 	u16 next_to_use;
277 	u16 next_to_clean;
278 	u16 next_to_alloc;
279 	u16 rx_offset;
280 	u16 rx_buf_len;
281 
282 	/* stats structs */
283 	struct ice_rxq_stats rx_stats;
284 	struct ice_q_stats	stats;
285 	struct u64_stats_sync syncp;
286 
287 	struct rcu_head rcu;		/* to avoid race on free */
288 	/* CL4 - 3rd cacheline starts here */
289 	struct ice_channel *ch;
290 	struct bpf_prog *xdp_prog;
291 	struct ice_tx_ring *xdp_ring;
292 	struct xsk_buff_pool *xsk_pool;
293 	struct sk_buff *skb;
294 	dma_addr_t dma;			/* physical address of ring */
295 #define ICE_RX_FLAGS_RING_BUILD_SKB	BIT(1)
296 	u64 cached_phctime;
297 	u8 dcb_tc;			/* Traffic class of ring */
298 	u8 ptp_rx;
299 	u8 flags;
300 } ____cacheline_internodealigned_in_smp;
301 
302 struct ice_tx_ring {
303 	/* CL1 - 1st cacheline starts here */
304 	struct ice_tx_ring *next;	/* pointer to next ring in q_vector */
305 	void *desc;			/* Descriptor ring memory */
306 	struct device *dev;		/* Used for DMA mapping */
307 	u8 __iomem *tail;
308 	struct ice_tx_buf *tx_buf;
309 	struct ice_q_vector *q_vector;	/* Backreference to associated vector */
310 	struct net_device *netdev;	/* netdev ring maps to */
311 	struct ice_vsi *vsi;		/* Backreference to associated VSI */
312 	/* CL2 - 2nd cacheline starts here */
313 	dma_addr_t dma;			/* physical address of ring */
314 	struct xsk_buff_pool *xsk_pool;
315 	u16 next_to_use;
316 	u16 next_to_clean;
317 	u16 next_rs;
318 	u16 next_dd;
319 	u16 q_handle;			/* Queue handle per TC */
320 	u16 reg_idx;			/* HW register index of the ring */
321 	u16 count;			/* Number of descriptors */
322 	u16 q_index;			/* Queue number of ring */
323 	/* stats structs */
324 	struct ice_q_stats	stats;
325 	struct u64_stats_sync syncp;
326 	struct ice_txq_stats tx_stats;
327 
328 	/* CL3 - 3rd cacheline starts here */
329 	struct rcu_head rcu;		/* to avoid race on free */
330 	DECLARE_BITMAP(xps_state, ICE_TX_NBITS);	/* XPS Config State */
331 	struct ice_channel *ch;
332 	struct ice_ptp_tx *tx_tstamps;
333 	spinlock_t tx_lock;
334 	u32 txq_teid;			/* Added Tx queue TEID */
335 #define ICE_TX_FLAGS_RING_XDP		BIT(0)
336 	u8 flags;
337 	u8 dcb_tc;			/* Traffic class of ring */
338 	u8 ptp_tx;
339 } ____cacheline_internodealigned_in_smp;
340 
341 static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
342 {
343 	return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
344 }
345 
346 static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
347 {
348 	ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
349 }
350 
351 static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
352 {
353 	ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
354 }
355 
356 static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
357 {
358 	return !!ring->ch;
359 }
360 
361 static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
362 {
363 	return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
364 }
365 
366 enum ice_container_type {
367 	ICE_RX_CONTAINER,
368 	ICE_TX_CONTAINER,
369 };
370 
371 struct ice_ring_container {
372 	/* head of linked-list of rings */
373 	union {
374 		struct ice_rx_ring *rx_ring;
375 		struct ice_tx_ring *tx_ring;
376 	};
377 	struct dim dim;		/* data for net_dim algorithm */
378 	u16 itr_idx;		/* index in the interrupt vector */
379 	/* this matches the maximum number of ITR bits, but in usec
380 	 * values, so it is shifted left one bit (bit zero is ignored)
381 	 */
382 	u16 itr_setting:13;
383 	u16 itr_reserved:2;
384 	u16 itr_mode:1;
385 	enum ice_container_type type;
386 };
387 
388 struct ice_coalesce_stored {
389 	u16 itr_tx;
390 	u16 itr_rx;
391 	u8 intrl;
392 	u8 tx_valid;
393 	u8 rx_valid;
394 };
395 
396 /* iterator for handling rings in ring container */
397 #define ice_for_each_rx_ring(pos, head) \
398 	for (pos = (head).rx_ring; pos; pos = pos->next)
399 
400 #define ice_for_each_tx_ring(pos, head) \
401 	for (pos = (head).tx_ring; pos; pos = pos->next)
402 
403 static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
404 {
405 #if (PAGE_SIZE < 8192)
406 	if (ring->rx_buf_len > (PAGE_SIZE / 2))
407 		return 1;
408 #endif
409 	return 0;
410 }
411 
412 #define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
413 
414 union ice_32b_rx_flex_desc;
415 
416 bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count);
417 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
418 u16
419 ice_select_queue(struct net_device *dev, struct sk_buff *skb,
420 		 struct net_device *sb_dev);
421 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
422 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
423 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
424 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
425 void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
426 void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
427 int ice_napi_poll(struct napi_struct *napi, int budget);
428 int
429 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
430 		   u8 *raw_packet);
431 int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
432 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
433 #endif /* _ICE_TXRX_H_ */
434