1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2016 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #ifndef _I40E_TXRX_H_ 28 #define _I40E_TXRX_H_ 29 30 /* Interrupt Throttling and Rate Limiting Goodies */ 31 32 #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ 33 #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ 34 #define I40E_ITR_100K 0x0005 35 #define I40E_ITR_50K 0x000A 36 #define I40E_ITR_20K 0x0019 37 #define I40E_ITR_18K 0x001B 38 #define I40E_ITR_8K 0x003E 39 #define I40E_ITR_4K 0x007A 40 #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ 41 #define I40E_ITR_RX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \ 42 I40E_ITR_DYNAMIC) 43 #define I40E_ITR_TX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \ 44 I40E_ITR_DYNAMIC) 45 #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ 46 #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ 47 #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ 48 #define I40E_DEFAULT_IRQ_WORK 256 49 #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) 50 #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) 51 #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) 52 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if 53 * the value of the rate limit is non-zero 54 */ 55 #define INTRL_ENA BIT(6) 56 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) 57 /** 58 * i40e_intrl_usec_to_reg - convert interrupt rate limit to register 59 * @intrl: interrupt rate limit to convert 60 * 61 * This function converts a decimal interrupt rate limit to the appropriate 62 * register format expected by the firmware when setting interrupt rate limit. 63 */ 64 static inline u16 i40e_intrl_usec_to_reg(int intrl) 65 { 66 if (intrl >> 2) 67 return ((intrl >> 2) | INTRL_ENA); 68 else 69 return 0; 70 } 71 #define I40E_INTRL_8K 125 /* 8000 ints/sec */ 72 #define I40E_INTRL_62K 16 /* 62500 ints/sec */ 73 #define I40E_INTRL_83K 12 /* 83333 ints/sec */ 74 75 #define I40E_QUEUE_END_OF_LIST 0x7FF 76 77 /* this enum matches hardware bits and is meant to be used by DYN_CTLN 78 * registers and QINT registers or more generally anywhere in the manual 79 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any 80 * register but instead is a special value meaning "don't update" ITR0/1/2. 81 */ 82 enum i40e_dyn_idx_t { 83 I40E_IDX_ITR0 = 0, 84 I40E_IDX_ITR1 = 1, 85 I40E_IDX_ITR2 = 2, 86 I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ 87 }; 88 89 /* these are indexes into ITRN registers */ 90 #define I40E_RX_ITR I40E_IDX_ITR0 91 #define I40E_TX_ITR I40E_IDX_ITR1 92 #define I40E_PE_ITR I40E_IDX_ITR2 93 94 /* Supported RSS offloads */ 95 #define I40E_DEFAULT_RSS_HENA ( \ 96 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 97 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 98 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 99 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 100 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ 101 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 102 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 103 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ 104 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ 105 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ 106 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) 107 108 #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ 109 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ 110 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ 111 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ 112 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ 113 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ 114 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) 115 116 #define i40e_pf_get_default_rss_hena(pf) \ 117 (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ 118 I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) 119 120 /* Supported Rx Buffer Sizes (a multiple of 128) */ 121 #define I40E_RXBUFFER_256 256 122 #define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ 123 #define I40E_RXBUFFER_2048 2048 124 #define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ 125 #define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ 126 127 /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we 128 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, 129 * this adds up to 512 bytes of extra data meaning the smallest allocation 130 * we could have is 1K. 131 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) 132 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) 133 */ 134 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 135 #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) 136 #define i40e_rx_desc i40e_32byte_rx_desc 137 138 #define I40E_RX_DMA_ATTR \ 139 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) 140 141 /* Attempt to maximize the headroom available for incoming frames. We 142 * use a 2K buffer for receives and need 1536/1534 to store the data for 143 * the frame. This leaves us with 512 bytes of room. From that we need 144 * to deduct the space needed for the shared info and the padding needed 145 * to IP align the frame. 146 * 147 * Note: For cache line sizes 256 or larger this value is going to end 148 * up negative. In these cases we should fall back to the legacy 149 * receive path. 150 */ 151 #if (PAGE_SIZE < 8192) 152 #define I40E_2K_TOO_SMALL_WITH_PADDING \ 153 ((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048)) 154 155 static inline int i40e_compute_pad(int rx_buf_len) 156 { 157 int page_size, pad_size; 158 159 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); 160 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; 161 162 return pad_size; 163 } 164 165 static inline int i40e_skb_pad(void) 166 { 167 int rx_buf_len; 168 169 /* If a 2K buffer cannot handle a standard Ethernet frame then 170 * optimize padding for a 3K buffer instead of a 1.5K buffer. 171 * 172 * For a 3K buffer we need to add enough padding to allow for 173 * tailroom due to NET_IP_ALIGN possibly shifting us out of 174 * cache-line alignment. 175 */ 176 if (I40E_2K_TOO_SMALL_WITH_PADDING) 177 rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); 178 else 179 rx_buf_len = I40E_RXBUFFER_1536; 180 181 /* if needed make room for NET_IP_ALIGN */ 182 rx_buf_len -= NET_IP_ALIGN; 183 184 return i40e_compute_pad(rx_buf_len); 185 } 186 187 #define I40E_SKB_PAD i40e_skb_pad() 188 #else 189 #define I40E_2K_TOO_SMALL_WITH_PADDING false 190 #define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 191 #endif 192 193 /** 194 * i40e_test_staterr - tests bits in Rx descriptor status and error fields 195 * @rx_desc: pointer to receive descriptor (in le64 format) 196 * @stat_err_bits: value to mask 197 * 198 * This function does some fast chicanery in order to return the 199 * value of the mask which is really only used for boolean tests. 200 * The status_error_len doesn't need to be shifted because it begins 201 * at offset zero. 202 */ 203 static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, 204 const u64 stat_err_bits) 205 { 206 return !!(rx_desc->wb.qword1.status_error_len & 207 cpu_to_le64(stat_err_bits)); 208 } 209 210 /* How many Rx Buffers do we bundle into one write to the hardware ? */ 211 #define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */ 212 #define I40E_RX_INCREMENT(r, i) \ 213 do { \ 214 (i)++; \ 215 if ((i) == (r)->count) \ 216 i = 0; \ 217 r->next_to_clean = i; \ 218 } while (0) 219 220 #define I40E_RX_NEXT_DESC(r, i, n) \ 221 do { \ 222 (i)++; \ 223 if ((i) == (r)->count) \ 224 i = 0; \ 225 (n) = I40E_RX_DESC((r), (i)); \ 226 } while (0) 227 228 #define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ 229 do { \ 230 I40E_RX_NEXT_DESC((r), (i), (n)); \ 231 prefetch((n)); \ 232 } while (0) 233 234 #define I40E_MAX_BUFFER_TXD 8 235 #define I40E_MIN_TX_LEN 17 236 237 /* The size limit for a transmit buffer in a descriptor is (16K - 1). 238 * In order to align with the read requests we will align the value to 239 * the nearest 4K which represents our maximum read request size. 240 */ 241 #define I40E_MAX_READ_REQ_SIZE 4096 242 #define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) 243 #define I40E_MAX_DATA_PER_TXD_ALIGNED \ 244 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) 245 246 /** 247 * i40e_txd_use_count - estimate the number of descriptors needed for Tx 248 * @size: transmit request size in bytes 249 * 250 * Due to hardware alignment restrictions (4K alignment), we need to 251 * assume that we can have no more than 12K of data per descriptor, even 252 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 253 * Thus, we need to divide by 12K. But division is slow! Instead, 254 * we decompose the operation into shifts and one relatively cheap 255 * multiply operation. 256 * 257 * To divide by 12K, we first divide by 4K, then divide by 3: 258 * To divide by 4K, shift right by 12 bits 259 * To divide by 3, multiply by 85, then divide by 256 260 * (Divide by 256 is done by shifting right by 8 bits) 261 * Finally, we add one to round up. Because 256 isn't an exact multiple of 262 * 3, we'll underestimate near each multiple of 12K. This is actually more 263 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 264 * segment. For our purposes this is accurate out to 1M which is orders of 265 * magnitude greater than our largest possible GSO size. 266 * 267 * This would then be implemented as: 268 * return (((size >> 12) * 85) >> 8) + 1; 269 * 270 * Since multiplication and division are commutative, we can reorder 271 * operations into: 272 * return ((size * 85) >> 20) + 1; 273 */ 274 static inline unsigned int i40e_txd_use_count(unsigned int size) 275 { 276 return ((size * 85) >> 20) + 1; 277 } 278 279 /* Tx Descriptors needed, worst case */ 280 #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 281 #define I40E_MIN_DESC_PENDING 4 282 283 #define I40E_TX_FLAGS_HW_VLAN BIT(1) 284 #define I40E_TX_FLAGS_SW_VLAN BIT(2) 285 #define I40E_TX_FLAGS_TSO BIT(3) 286 #define I40E_TX_FLAGS_IPV4 BIT(4) 287 #define I40E_TX_FLAGS_IPV6 BIT(5) 288 #define I40E_TX_FLAGS_FCCRC BIT(6) 289 #define I40E_TX_FLAGS_FSO BIT(7) 290 #define I40E_TX_FLAGS_TSYN BIT(8) 291 #define I40E_TX_FLAGS_FD_SB BIT(9) 292 #define I40E_TX_FLAGS_UDP_TUNNEL BIT(10) 293 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 294 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 295 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 296 #define I40E_TX_FLAGS_VLAN_SHIFT 16 297 298 struct i40e_tx_buffer { 299 struct i40e_tx_desc *next_to_watch; 300 union { 301 struct sk_buff *skb; 302 void *raw_buf; 303 }; 304 unsigned int bytecount; 305 unsigned short gso_segs; 306 307 DEFINE_DMA_UNMAP_ADDR(dma); 308 DEFINE_DMA_UNMAP_LEN(len); 309 u32 tx_flags; 310 }; 311 312 struct i40e_rx_buffer { 313 dma_addr_t dma; 314 struct page *page; 315 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 316 __u32 page_offset; 317 #else 318 __u16 page_offset; 319 #endif 320 __u16 pagecnt_bias; 321 }; 322 323 struct i40e_queue_stats { 324 u64 packets; 325 u64 bytes; 326 }; 327 328 struct i40e_tx_queue_stats { 329 u64 restart_queue; 330 u64 tx_busy; 331 u64 tx_done_old; 332 u64 tx_linearize; 333 u64 tx_force_wb; 334 }; 335 336 struct i40e_rx_queue_stats { 337 u64 non_eop_descs; 338 u64 alloc_page_failed; 339 u64 alloc_buff_failed; 340 u64 page_reuse_count; 341 u64 realloc_count; 342 }; 343 344 enum i40e_ring_state_t { 345 __I40E_TX_FDIR_INIT_DONE, 346 __I40E_TX_XPS_INIT_DONE, 347 __I40E_RING_STATE_NBITS /* must be last */ 348 }; 349 350 /* some useful defines for virtchannel interface, which 351 * is the only remaining user of header split 352 */ 353 #define I40E_RX_DTYPE_NO_SPLIT 0 354 #define I40E_RX_DTYPE_HEADER_SPLIT 1 355 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2 356 #define I40E_RX_SPLIT_L2 0x1 357 #define I40E_RX_SPLIT_IP 0x2 358 #define I40E_RX_SPLIT_TCP_UDP 0x4 359 #define I40E_RX_SPLIT_SCTP 0x8 360 361 /* struct that defines a descriptor ring, associated with a VSI */ 362 struct i40e_ring { 363 struct i40e_ring *next; /* pointer to next ring in q_vector */ 364 void *desc; /* Descriptor ring memory */ 365 struct device *dev; /* Used for DMA mapping */ 366 struct net_device *netdev; /* netdev ring maps to */ 367 struct bpf_prog *xdp_prog; 368 union { 369 struct i40e_tx_buffer *tx_bi; 370 struct i40e_rx_buffer *rx_bi; 371 }; 372 DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS); 373 u16 queue_index; /* Queue number of ring */ 374 u8 dcb_tc; /* Traffic class of ring */ 375 u8 __iomem *tail; 376 377 /* high bit set means dynamic, use accessor routines to read/write. 378 * hardware only supports 2us resolution for the ITR registers. 379 * these values always store the USER setting, and must be converted 380 * before programming to a register. 381 */ 382 u16 rx_itr_setting; 383 u16 tx_itr_setting; 384 385 u16 count; /* Number of descriptors */ 386 u16 reg_idx; /* HW register index of the ring */ 387 u16 rx_buf_len; 388 389 /* used in interrupt processing */ 390 u16 next_to_use; 391 u16 next_to_clean; 392 393 u8 atr_sample_rate; 394 u8 atr_count; 395 396 bool ring_active; /* is ring online or not */ 397 bool arm_wb; /* do something to arm write back */ 398 u8 packet_stride; 399 400 u16 flags; 401 #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) 402 #define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) 403 #define I40E_TXR_FLAGS_XDP BIT(2) 404 405 /* stats structs */ 406 struct i40e_queue_stats stats; 407 struct u64_stats_sync syncp; 408 union { 409 struct i40e_tx_queue_stats tx_stats; 410 struct i40e_rx_queue_stats rx_stats; 411 }; 412 413 unsigned int size; /* length of descriptor ring in bytes */ 414 dma_addr_t dma; /* physical address of ring */ 415 416 struct i40e_vsi *vsi; /* Backreference to associated VSI */ 417 struct i40e_q_vector *q_vector; /* Backreference to associated vector */ 418 419 struct rcu_head rcu; /* to avoid race on free */ 420 u16 next_to_alloc; 421 struct sk_buff *skb; /* When i40e_clean_rx_ring_irq() must 422 * return before it sees the EOP for 423 * the current packet, we save that skb 424 * here and resume receiving this 425 * packet the next time 426 * i40e_clean_rx_ring_irq() is called 427 * for this ring. 428 */ 429 430 struct i40e_channel *ch; 431 } ____cacheline_internodealigned_in_smp; 432 433 static inline bool ring_uses_build_skb(struct i40e_ring *ring) 434 { 435 return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED); 436 } 437 438 static inline void set_ring_build_skb_enabled(struct i40e_ring *ring) 439 { 440 ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED; 441 } 442 443 static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring) 444 { 445 ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED; 446 } 447 448 static inline bool ring_is_xdp(struct i40e_ring *ring) 449 { 450 return !!(ring->flags & I40E_TXR_FLAGS_XDP); 451 } 452 453 static inline void set_ring_xdp(struct i40e_ring *ring) 454 { 455 ring->flags |= I40E_TXR_FLAGS_XDP; 456 } 457 458 enum i40e_latency_range { 459 I40E_LOWEST_LATENCY = 0, 460 I40E_LOW_LATENCY = 1, 461 I40E_BULK_LATENCY = 2, 462 }; 463 464 struct i40e_ring_container { 465 /* array of pointers to rings */ 466 struct i40e_ring *ring; 467 unsigned int total_bytes; /* total bytes processed this int */ 468 unsigned int total_packets; /* total packets processed this int */ 469 unsigned long last_itr_update; /* jiffies of last ITR update */ 470 u16 count; 471 enum i40e_latency_range latency_range; 472 u16 itr; 473 }; 474 475 /* iterator for handling rings in ring container */ 476 #define i40e_for_each_ring(pos, head) \ 477 for (pos = (head).ring; pos != NULL; pos = pos->next) 478 479 static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) 480 { 481 #if (PAGE_SIZE < 8192) 482 if (ring->rx_buf_len > (PAGE_SIZE / 2)) 483 return 1; 484 #endif 485 return 0; 486 } 487 488 #define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring)) 489 490 bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); 491 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 492 void i40e_clean_tx_ring(struct i40e_ring *tx_ring); 493 void i40e_clean_rx_ring(struct i40e_ring *rx_ring); 494 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring); 495 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring); 496 void i40e_free_tx_resources(struct i40e_ring *tx_ring); 497 void i40e_free_rx_resources(struct i40e_ring *rx_ring); 498 int i40e_napi_poll(struct napi_struct *napi, int budget); 499 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); 500 u32 i40e_get_tx_pending(struct i40e_ring *ring); 501 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); 502 bool __i40e_chk_linearize(struct sk_buff *skb); 503 504 /** 505 * i40e_get_head - Retrieve head from head writeback 506 * @tx_ring: tx ring to fetch head of 507 * 508 * Returns value of Tx ring head based on value stored 509 * in head write-back location 510 **/ 511 static inline u32 i40e_get_head(struct i40e_ring *tx_ring) 512 { 513 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; 514 515 return le32_to_cpu(*(volatile __le32 *)head); 516 } 517 518 /** 519 * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed 520 * @skb: send buffer 521 * @tx_ring: ring to send buffer on 522 * 523 * Returns number of data descriptors needed for this skb. Returns 0 to indicate 524 * there is not enough descriptors available in this ring since we need at least 525 * one descriptor. 526 **/ 527 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) 528 { 529 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 530 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 531 int count = 0, size = skb_headlen(skb); 532 533 for (;;) { 534 count += i40e_txd_use_count(size); 535 536 if (!nr_frags--) 537 break; 538 539 size = skb_frag_size(frag++); 540 } 541 542 return count; 543 } 544 545 /** 546 * i40e_maybe_stop_tx - 1st level check for Tx stop conditions 547 * @tx_ring: the ring to be checked 548 * @size: the size buffer we want to assure is available 549 * 550 * Returns 0 if stop is not needed 551 **/ 552 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) 553 { 554 if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) 555 return 0; 556 return __i40e_maybe_stop_tx(tx_ring, size); 557 } 558 559 /** 560 * i40e_chk_linearize - Check if there are more than 8 fragments per packet 561 * @skb: send buffer 562 * @count: number of buffers used 563 * 564 * Note: Our HW can't scatter-gather more than 8 fragments to build 565 * a packet on the wire and so we need to figure out the cases where we 566 * need to linearize the skb. 567 **/ 568 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 569 { 570 /* Both TSO and single send will work if count is less than 8 */ 571 if (likely(count < I40E_MAX_BUFFER_TXD)) 572 return false; 573 574 if (skb_is_gso(skb)) 575 return __i40e_chk_linearize(skb); 576 577 /* we can support up to 8 data buffers for a single send */ 578 return count != I40E_MAX_BUFFER_TXD; 579 } 580 581 /** 582 * txring_txq - Find the netdev Tx ring based on the i40e Tx ring 583 * @ring: Tx ring to find the netdev equivalent of 584 **/ 585 static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) 586 { 587 return netdev_get_tx_queue(ring->netdev, ring->queue_index); 588 } 589 #endif /* _I40E_TXRX_H_ */ 590