xref: /openbmc/linux/drivers/net/ethernet/intel/i40e/i40e_txrx.h (revision 5ef12cb4a3a78ffb331c03a795a15eea4ae35155)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*******************************************************************************
3  *
4  * Intel Ethernet Controller XL710 Family Linux Driver
5  * Copyright(c) 2013 - 2016 Intel Corporation.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along
17  * with this program.  If not, see <http://www.gnu.org/licenses/>.
18  *
19  * The full GNU General Public License is included in this distribution in
20  * the file called "COPYING".
21  *
22  * Contact Information:
23  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25  *
26  ******************************************************************************/
27 
28 #ifndef _I40E_TXRX_H_
29 #define _I40E_TXRX_H_
30 
31 #include <net/xdp.h>
32 
33 /* Interrupt Throttling and Rate Limiting Goodies */
34 #define I40E_DEFAULT_IRQ_WORK      256
35 
36 /* The datasheet for the X710 and XL710 indicate that the maximum value for
37  * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
38  * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
39  * the register value which is divided by 2 lets use the actual values and
40  * avoid an excessive amount of translation.
41  */
42 #define I40E_ITR_DYNAMIC	0x8000	/* use top bit as a flag */
43 #define I40E_ITR_MASK		0x1FFE	/* mask for ITR register value */
44 #define I40E_MIN_ITR		     2	/* reg uses 2 usec resolution */
45 #define I40E_ITR_100K		    10	/* all values below must be even */
46 #define I40E_ITR_50K		    20
47 #define I40E_ITR_20K		    50
48 #define I40E_ITR_18K		    60
49 #define I40E_ITR_8K		   122
50 #define I40E_MAX_ITR		  8160	/* maximum value as per datasheet */
51 #define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
52 #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
53 #define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
54 
55 #define I40E_ITR_RX_DEF		(I40E_ITR_20K | I40E_ITR_DYNAMIC)
56 #define I40E_ITR_TX_DEF		(I40E_ITR_20K | I40E_ITR_DYNAMIC)
57 
58 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
59  * the value of the rate limit is non-zero
60  */
61 #define INTRL_ENA                  BIT(6)
62 #define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
63 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
64 
65 /**
66  * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
67  * @intrl: interrupt rate limit to convert
68  *
69  * This function converts a decimal interrupt rate limit to the appropriate
70  * register format expected by the firmware when setting interrupt rate limit.
71  */
72 static inline u16 i40e_intrl_usec_to_reg(int intrl)
73 {
74 	if (intrl >> 2)
75 		return ((intrl >> 2) | INTRL_ENA);
76 	else
77 		return 0;
78 }
79 #define I40E_INTRL_8K              125     /* 8000 ints/sec */
80 #define I40E_INTRL_62K             16      /* 62500 ints/sec */
81 #define I40E_INTRL_83K             12      /* 83333 ints/sec */
82 
83 #define I40E_QUEUE_END_OF_LIST 0x7FF
84 
85 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
86  * registers and QINT registers or more generally anywhere in the manual
87  * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
88  * register but instead is a special value meaning "don't update" ITR0/1/2.
89  */
90 enum i40e_dyn_idx_t {
91 	I40E_IDX_ITR0 = 0,
92 	I40E_IDX_ITR1 = 1,
93 	I40E_IDX_ITR2 = 2,
94 	I40E_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
95 };
96 
97 /* these are indexes into ITRN registers */
98 #define I40E_RX_ITR    I40E_IDX_ITR0
99 #define I40E_TX_ITR    I40E_IDX_ITR1
100 #define I40E_PE_ITR    I40E_IDX_ITR2
101 
102 /* Supported RSS offloads */
103 #define I40E_DEFAULT_RSS_HENA ( \
104 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
105 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
106 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
107 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
108 	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
109 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
110 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
111 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
112 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
113 	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
114 	BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
115 
116 #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
117 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
118 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
119 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
120 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
121 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
122 	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
123 
124 #define i40e_pf_get_default_rss_hena(pf) \
125 	(((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
126 	  I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
127 
128 /* Supported Rx Buffer Sizes (a multiple of 128) */
129 #define I40E_RXBUFFER_256   256
130 #define I40E_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
131 #define I40E_RXBUFFER_2048  2048
132 #define I40E_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
133 #define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */
134 
135 /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
136  * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
137  * this adds up to 512 bytes of extra data meaning the smallest allocation
138  * we could have is 1K.
139  * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
140  * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
141  */
142 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
143 #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
144 #define i40e_rx_desc i40e_32byte_rx_desc
145 
146 #define I40E_RX_DMA_ATTR \
147 	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
148 
149 /* Attempt to maximize the headroom available for incoming frames.  We
150  * use a 2K buffer for receives and need 1536/1534 to store the data for
151  * the frame.  This leaves us with 512 bytes of room.  From that we need
152  * to deduct the space needed for the shared info and the padding needed
153  * to IP align the frame.
154  *
155  * Note: For cache line sizes 256 or larger this value is going to end
156  *	 up negative.  In these cases we should fall back to the legacy
157  *	 receive path.
158  */
159 #if (PAGE_SIZE < 8192)
160 #define I40E_2K_TOO_SMALL_WITH_PADDING \
161 ((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
162 
163 static inline int i40e_compute_pad(int rx_buf_len)
164 {
165 	int page_size, pad_size;
166 
167 	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
168 	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
169 
170 	return pad_size;
171 }
172 
173 static inline int i40e_skb_pad(void)
174 {
175 	int rx_buf_len;
176 
177 	/* If a 2K buffer cannot handle a standard Ethernet frame then
178 	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
179 	 *
180 	 * For a 3K buffer we need to add enough padding to allow for
181 	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
182 	 * cache-line alignment.
183 	 */
184 	if (I40E_2K_TOO_SMALL_WITH_PADDING)
185 		rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
186 	else
187 		rx_buf_len = I40E_RXBUFFER_1536;
188 
189 	/* if needed make room for NET_IP_ALIGN */
190 	rx_buf_len -= NET_IP_ALIGN;
191 
192 	return i40e_compute_pad(rx_buf_len);
193 }
194 
195 #define I40E_SKB_PAD i40e_skb_pad()
196 #else
197 #define I40E_2K_TOO_SMALL_WITH_PADDING false
198 #define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
199 #endif
200 
201 /**
202  * i40e_test_staterr - tests bits in Rx descriptor status and error fields
203  * @rx_desc: pointer to receive descriptor (in le64 format)
204  * @stat_err_bits: value to mask
205  *
206  * This function does some fast chicanery in order to return the
207  * value of the mask which is really only used for boolean tests.
208  * The status_error_len doesn't need to be shifted because it begins
209  * at offset zero.
210  */
211 static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
212 				     const u64 stat_err_bits)
213 {
214 	return !!(rx_desc->wb.qword1.status_error_len &
215 		  cpu_to_le64(stat_err_bits));
216 }
217 
218 /* How many Rx Buffers do we bundle into one write to the hardware ? */
219 #define I40E_RX_BUFFER_WRITE	32	/* Must be power of 2 */
220 #define I40E_RX_INCREMENT(r, i) \
221 	do {					\
222 		(i)++;				\
223 		if ((i) == (r)->count)		\
224 			i = 0;			\
225 		r->next_to_clean = i;		\
226 	} while (0)
227 
228 #define I40E_RX_NEXT_DESC(r, i, n)		\
229 	do {					\
230 		(i)++;				\
231 		if ((i) == (r)->count)		\
232 			i = 0;			\
233 		(n) = I40E_RX_DESC((r), (i));	\
234 	} while (0)
235 
236 #define I40E_RX_NEXT_DESC_PREFETCH(r, i, n)		\
237 	do {						\
238 		I40E_RX_NEXT_DESC((r), (i), (n));	\
239 		prefetch((n));				\
240 	} while (0)
241 
242 #define I40E_MAX_BUFFER_TXD	8
243 #define I40E_MIN_TX_LEN		17
244 
245 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
246  * In order to align with the read requests we will align the value to
247  * the nearest 4K which represents our maximum read request size.
248  */
249 #define I40E_MAX_READ_REQ_SIZE		4096
250 #define I40E_MAX_DATA_PER_TXD		(16 * 1024 - 1)
251 #define I40E_MAX_DATA_PER_TXD_ALIGNED \
252 	(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
253 
254 /**
255  * i40e_txd_use_count  - estimate the number of descriptors needed for Tx
256  * @size: transmit request size in bytes
257  *
258  * Due to hardware alignment restrictions (4K alignment), we need to
259  * assume that we can have no more than 12K of data per descriptor, even
260  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
261  * Thus, we need to divide by 12K. But division is slow! Instead,
262  * we decompose the operation into shifts and one relatively cheap
263  * multiply operation.
264  *
265  * To divide by 12K, we first divide by 4K, then divide by 3:
266  *     To divide by 4K, shift right by 12 bits
267  *     To divide by 3, multiply by 85, then divide by 256
268  *     (Divide by 256 is done by shifting right by 8 bits)
269  * Finally, we add one to round up. Because 256 isn't an exact multiple of
270  * 3, we'll underestimate near each multiple of 12K. This is actually more
271  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
272  * segment.  For our purposes this is accurate out to 1M which is orders of
273  * magnitude greater than our largest possible GSO size.
274  *
275  * This would then be implemented as:
276  *     return (((size >> 12) * 85) >> 8) + 1;
277  *
278  * Since multiplication and division are commutative, we can reorder
279  * operations into:
280  *     return ((size * 85) >> 20) + 1;
281  */
282 static inline unsigned int i40e_txd_use_count(unsigned int size)
283 {
284 	return ((size * 85) >> 20) + 1;
285 }
286 
287 /* Tx Descriptors needed, worst case */
288 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
289 #define I40E_MIN_DESC_PENDING	4
290 
291 #define I40E_TX_FLAGS_HW_VLAN		BIT(1)
292 #define I40E_TX_FLAGS_SW_VLAN		BIT(2)
293 #define I40E_TX_FLAGS_TSO		BIT(3)
294 #define I40E_TX_FLAGS_IPV4		BIT(4)
295 #define I40E_TX_FLAGS_IPV6		BIT(5)
296 #define I40E_TX_FLAGS_FCCRC		BIT(6)
297 #define I40E_TX_FLAGS_FSO		BIT(7)
298 #define I40E_TX_FLAGS_TSYN		BIT(8)
299 #define I40E_TX_FLAGS_FD_SB		BIT(9)
300 #define I40E_TX_FLAGS_UDP_TUNNEL	BIT(10)
301 #define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
302 #define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
303 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
304 #define I40E_TX_FLAGS_VLAN_SHIFT	16
305 
306 struct i40e_tx_buffer {
307 	struct i40e_tx_desc *next_to_watch;
308 	union {
309 		struct sk_buff *skb;
310 		void *raw_buf;
311 	};
312 	unsigned int bytecount;
313 	unsigned short gso_segs;
314 
315 	DEFINE_DMA_UNMAP_ADDR(dma);
316 	DEFINE_DMA_UNMAP_LEN(len);
317 	u32 tx_flags;
318 };
319 
320 struct i40e_rx_buffer {
321 	dma_addr_t dma;
322 	struct page *page;
323 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
324 	__u32 page_offset;
325 #else
326 	__u16 page_offset;
327 #endif
328 	__u16 pagecnt_bias;
329 };
330 
331 struct i40e_queue_stats {
332 	u64 packets;
333 	u64 bytes;
334 };
335 
336 struct i40e_tx_queue_stats {
337 	u64 restart_queue;
338 	u64 tx_busy;
339 	u64 tx_done_old;
340 	u64 tx_linearize;
341 	u64 tx_force_wb;
342 	int prev_pkt_ctr;
343 };
344 
345 struct i40e_rx_queue_stats {
346 	u64 non_eop_descs;
347 	u64 alloc_page_failed;
348 	u64 alloc_buff_failed;
349 	u64 page_reuse_count;
350 	u64 realloc_count;
351 };
352 
353 enum i40e_ring_state_t {
354 	__I40E_TX_FDIR_INIT_DONE,
355 	__I40E_TX_XPS_INIT_DONE,
356 	__I40E_RING_STATE_NBITS /* must be last */
357 };
358 
359 /* some useful defines for virtchannel interface, which
360  * is the only remaining user of header split
361  */
362 #define I40E_RX_DTYPE_NO_SPLIT      0
363 #define I40E_RX_DTYPE_HEADER_SPLIT  1
364 #define I40E_RX_DTYPE_SPLIT_ALWAYS  2
365 #define I40E_RX_SPLIT_L2      0x1
366 #define I40E_RX_SPLIT_IP      0x2
367 #define I40E_RX_SPLIT_TCP_UDP 0x4
368 #define I40E_RX_SPLIT_SCTP    0x8
369 
370 /* struct that defines a descriptor ring, associated with a VSI */
371 struct i40e_ring {
372 	struct i40e_ring *next;		/* pointer to next ring in q_vector */
373 	void *desc;			/* Descriptor ring memory */
374 	struct device *dev;		/* Used for DMA mapping */
375 	struct net_device *netdev;	/* netdev ring maps to */
376 	struct bpf_prog *xdp_prog;
377 	union {
378 		struct i40e_tx_buffer *tx_bi;
379 		struct i40e_rx_buffer *rx_bi;
380 	};
381 	DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
382 	u16 queue_index;		/* Queue number of ring */
383 	u8 dcb_tc;			/* Traffic class of ring */
384 	u8 __iomem *tail;
385 
386 	/* high bit set means dynamic, use accessor routines to read/write.
387 	 * hardware only supports 2us resolution for the ITR registers.
388 	 * these values always store the USER setting, and must be converted
389 	 * before programming to a register.
390 	 */
391 	u16 itr_setting;
392 
393 	u16 count;			/* Number of descriptors */
394 	u16 reg_idx;			/* HW register index of the ring */
395 	u16 rx_buf_len;
396 
397 	/* used in interrupt processing */
398 	u16 next_to_use;
399 	u16 next_to_clean;
400 
401 	u8 atr_sample_rate;
402 	u8 atr_count;
403 
404 	bool ring_active;		/* is ring online or not */
405 	bool arm_wb;		/* do something to arm write back */
406 	u8 packet_stride;
407 
408 	u16 flags;
409 #define I40E_TXR_FLAGS_WB_ON_ITR		BIT(0)
410 #define I40E_RXR_FLAGS_BUILD_SKB_ENABLED	BIT(1)
411 #define I40E_TXR_FLAGS_XDP			BIT(2)
412 
413 	/* stats structs */
414 	struct i40e_queue_stats	stats;
415 	struct u64_stats_sync syncp;
416 	union {
417 		struct i40e_tx_queue_stats tx_stats;
418 		struct i40e_rx_queue_stats rx_stats;
419 	};
420 
421 	unsigned int size;		/* length of descriptor ring in bytes */
422 	dma_addr_t dma;			/* physical address of ring */
423 
424 	struct i40e_vsi *vsi;		/* Backreference to associated VSI */
425 	struct i40e_q_vector *q_vector;	/* Backreference to associated vector */
426 
427 	struct rcu_head rcu;		/* to avoid race on free */
428 	u16 next_to_alloc;
429 	struct sk_buff *skb;		/* When i40e_clean_rx_ring_irq() must
430 					 * return before it sees the EOP for
431 					 * the current packet, we save that skb
432 					 * here and resume receiving this
433 					 * packet the next time
434 					 * i40e_clean_rx_ring_irq() is called
435 					 * for this ring.
436 					 */
437 
438 	struct i40e_channel *ch;
439 	struct xdp_rxq_info xdp_rxq;
440 } ____cacheline_internodealigned_in_smp;
441 
442 static inline bool ring_uses_build_skb(struct i40e_ring *ring)
443 {
444 	return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
445 }
446 
447 static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
448 {
449 	ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
450 }
451 
452 static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
453 {
454 	ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
455 }
456 
457 static inline bool ring_is_xdp(struct i40e_ring *ring)
458 {
459 	return !!(ring->flags & I40E_TXR_FLAGS_XDP);
460 }
461 
462 static inline void set_ring_xdp(struct i40e_ring *ring)
463 {
464 	ring->flags |= I40E_TXR_FLAGS_XDP;
465 }
466 
467 #define I40E_ITR_ADAPTIVE_MIN_INC	0x0002
468 #define I40E_ITR_ADAPTIVE_MIN_USECS	0x0002
469 #define I40E_ITR_ADAPTIVE_MAX_USECS	0x007e
470 #define I40E_ITR_ADAPTIVE_LATENCY	0x8000
471 #define I40E_ITR_ADAPTIVE_BULK		0x0000
472 #define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY))
473 
474 struct i40e_ring_container {
475 	struct i40e_ring *ring;		/* pointer to linked list of ring(s) */
476 	unsigned long next_update;	/* jiffies value of next update */
477 	unsigned int total_bytes;	/* total bytes processed this int */
478 	unsigned int total_packets;	/* total packets processed this int */
479 	u16 count;
480 	u16 target_itr;			/* target ITR setting for ring(s) */
481 	u16 current_itr;		/* current ITR setting for ring(s) */
482 };
483 
484 /* iterator for handling rings in ring container */
485 #define i40e_for_each_ring(pos, head) \
486 	for (pos = (head).ring; pos != NULL; pos = pos->next)
487 
488 static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
489 {
490 #if (PAGE_SIZE < 8192)
491 	if (ring->rx_buf_len > (PAGE_SIZE / 2))
492 		return 1;
493 #endif
494 	return 0;
495 }
496 
497 #define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
498 
499 bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
500 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
501 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
502 void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
503 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
504 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
505 void i40e_free_tx_resources(struct i40e_ring *tx_ring);
506 void i40e_free_rx_resources(struct i40e_ring *rx_ring);
507 int i40e_napi_poll(struct napi_struct *napi, int budget);
508 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
509 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
510 void i40e_detect_recover_hung(struct i40e_vsi *vsi);
511 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
512 bool __i40e_chk_linearize(struct sk_buff *skb);
513 int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp);
514 void i40e_xdp_flush(struct net_device *dev);
515 
516 /**
517  * i40e_get_head - Retrieve head from head writeback
518  * @tx_ring:  tx ring to fetch head of
519  *
520  * Returns value of Tx ring head based on value stored
521  * in head write-back location
522  **/
523 static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
524 {
525 	void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
526 
527 	return le32_to_cpu(*(volatile __le32 *)head);
528 }
529 
530 /**
531  * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
532  * @skb:     send buffer
533  * @tx_ring: ring to send buffer on
534  *
535  * Returns number of data descriptors needed for this skb. Returns 0 to indicate
536  * there is not enough descriptors available in this ring since we need at least
537  * one descriptor.
538  **/
539 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
540 {
541 	const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
542 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
543 	int count = 0, size = skb_headlen(skb);
544 
545 	for (;;) {
546 		count += i40e_txd_use_count(size);
547 
548 		if (!nr_frags--)
549 			break;
550 
551 		size = skb_frag_size(frag++);
552 	}
553 
554 	return count;
555 }
556 
557 /**
558  * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
559  * @tx_ring: the ring to be checked
560  * @size:    the size buffer we want to assure is available
561  *
562  * Returns 0 if stop is not needed
563  **/
564 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
565 {
566 	if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
567 		return 0;
568 	return __i40e_maybe_stop_tx(tx_ring, size);
569 }
570 
571 /**
572  * i40e_chk_linearize - Check if there are more than 8 fragments per packet
573  * @skb:      send buffer
574  * @count:    number of buffers used
575  *
576  * Note: Our HW can't scatter-gather more than 8 fragments to build
577  * a packet on the wire and so we need to figure out the cases where we
578  * need to linearize the skb.
579  **/
580 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
581 {
582 	/* Both TSO and single send will work if count is less than 8 */
583 	if (likely(count < I40E_MAX_BUFFER_TXD))
584 		return false;
585 
586 	if (skb_is_gso(skb))
587 		return __i40e_chk_linearize(skb);
588 
589 	/* we can support up to 8 data buffers for a single send */
590 	return count != I40E_MAX_BUFFER_TXD;
591 }
592 
593 /**
594  * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
595  * @ring: Tx ring to find the netdev equivalent of
596  **/
597 static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
598 {
599 	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
600 }
601 #endif /* _I40E_TXRX_H_ */
602