1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2016 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #ifndef _I40E_TXRX_H_ 28 #define _I40E_TXRX_H_ 29 30 /* Interrupt Throttling and Rate Limiting Goodies */ 31 32 #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ 33 #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ 34 #define I40E_ITR_100K 0x0005 35 #define I40E_ITR_50K 0x000A 36 #define I40E_ITR_20K 0x0019 37 #define I40E_ITR_18K 0x001B 38 #define I40E_ITR_8K 0x003E 39 #define I40E_ITR_4K 0x007A 40 #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ 41 #define I40E_ITR_RX_DEF I40E_ITR_20K 42 #define I40E_ITR_TX_DEF I40E_ITR_20K 43 #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ 44 #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ 45 #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ 46 #define I40E_DEFAULT_IRQ_WORK 256 47 #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) 48 #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) 49 #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) 50 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if 51 * the value of the rate limit is non-zero 52 */ 53 #define INTRL_ENA BIT(6) 54 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) 55 /** 56 * i40e_intrl_usec_to_reg - convert interrupt rate limit to register 57 * @intrl: interrupt rate limit to convert 58 * 59 * This function converts a decimal interrupt rate limit to the appropriate 60 * register format expected by the firmware when setting interrupt rate limit. 61 */ 62 static inline u16 i40e_intrl_usec_to_reg(int intrl) 63 { 64 if (intrl >> 2) 65 return ((intrl >> 2) | INTRL_ENA); 66 else 67 return 0; 68 } 69 #define I40E_INTRL_8K 125 /* 8000 ints/sec */ 70 #define I40E_INTRL_62K 16 /* 62500 ints/sec */ 71 #define I40E_INTRL_83K 12 /* 83333 ints/sec */ 72 73 #define I40E_QUEUE_END_OF_LIST 0x7FF 74 75 /* this enum matches hardware bits and is meant to be used by DYN_CTLN 76 * registers and QINT registers or more generally anywhere in the manual 77 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any 78 * register but instead is a special value meaning "don't update" ITR0/1/2. 79 */ 80 enum i40e_dyn_idx_t { 81 I40E_IDX_ITR0 = 0, 82 I40E_IDX_ITR1 = 1, 83 I40E_IDX_ITR2 = 2, 84 I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ 85 }; 86 87 /* these are indexes into ITRN registers */ 88 #define I40E_RX_ITR I40E_IDX_ITR0 89 #define I40E_TX_ITR I40E_IDX_ITR1 90 #define I40E_PE_ITR I40E_IDX_ITR2 91 92 /* Supported RSS offloads */ 93 #define I40E_DEFAULT_RSS_HENA ( \ 94 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 95 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 96 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 97 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 98 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ 99 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 100 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 101 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ 102 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ 103 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ 104 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) 105 106 #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ 107 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ 108 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ 109 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ 110 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ 111 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ 112 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) 113 114 #define i40e_pf_get_default_rss_hena(pf) \ 115 (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ 116 I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) 117 118 /* Supported Rx Buffer Sizes (a multiple of 128) */ 119 #define I40E_RXBUFFER_256 256 120 #define I40E_RXBUFFER_2048 2048 121 #define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ 122 #define I40E_RXBUFFER_4096 4096 123 #define I40E_RXBUFFER_8192 8192 124 #define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ 125 126 /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we 127 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, 128 * this adds up to 512 bytes of extra data meaning the smallest allocation 129 * we could have is 1K. 130 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) 131 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) 132 */ 133 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 134 #define i40e_rx_desc i40e_32byte_rx_desc 135 136 /** 137 * i40e_test_staterr - tests bits in Rx descriptor status and error fields 138 * @rx_desc: pointer to receive descriptor (in le64 format) 139 * @stat_err_bits: value to mask 140 * 141 * This function does some fast chicanery in order to return the 142 * value of the mask which is really only used for boolean tests. 143 * The status_error_len doesn't need to be shifted because it begins 144 * at offset zero. 145 */ 146 static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, 147 const u64 stat_err_bits) 148 { 149 return !!(rx_desc->wb.qword1.status_error_len & 150 cpu_to_le64(stat_err_bits)); 151 } 152 153 /* How many Rx Buffers do we bundle into one write to the hardware ? */ 154 #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 155 #define I40E_RX_INCREMENT(r, i) \ 156 do { \ 157 (i)++; \ 158 if ((i) == (r)->count) \ 159 i = 0; \ 160 r->next_to_clean = i; \ 161 } while (0) 162 163 #define I40E_RX_NEXT_DESC(r, i, n) \ 164 do { \ 165 (i)++; \ 166 if ((i) == (r)->count) \ 167 i = 0; \ 168 (n) = I40E_RX_DESC((r), (i)); \ 169 } while (0) 170 171 #define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ 172 do { \ 173 I40E_RX_NEXT_DESC((r), (i), (n)); \ 174 prefetch((n)); \ 175 } while (0) 176 177 #define I40E_MAX_BUFFER_TXD 8 178 #define I40E_MIN_TX_LEN 17 179 180 /* The size limit for a transmit buffer in a descriptor is (16K - 1). 181 * In order to align with the read requests we will align the value to 182 * the nearest 4K which represents our maximum read request size. 183 */ 184 #define I40E_MAX_READ_REQ_SIZE 4096 185 #define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) 186 #define I40E_MAX_DATA_PER_TXD_ALIGNED \ 187 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) 188 189 /** 190 * i40e_txd_use_count - estimate the number of descriptors needed for Tx 191 * @size: transmit request size in bytes 192 * 193 * Due to hardware alignment restrictions (4K alignment), we need to 194 * assume that we can have no more than 12K of data per descriptor, even 195 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 196 * Thus, we need to divide by 12K. But division is slow! Instead, 197 * we decompose the operation into shifts and one relatively cheap 198 * multiply operation. 199 * 200 * To divide by 12K, we first divide by 4K, then divide by 3: 201 * To divide by 4K, shift right by 12 bits 202 * To divide by 3, multiply by 85, then divide by 256 203 * (Divide by 256 is done by shifting right by 8 bits) 204 * Finally, we add one to round up. Because 256 isn't an exact multiple of 205 * 3, we'll underestimate near each multiple of 12K. This is actually more 206 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 207 * segment. For our purposes this is accurate out to 1M which is orders of 208 * magnitude greater than our largest possible GSO size. 209 * 210 * This would then be implemented as: 211 * return (((size >> 12) * 85) >> 8) + 1; 212 * 213 * Since multiplication and division are commutative, we can reorder 214 * operations into: 215 * return ((size * 85) >> 20) + 1; 216 */ 217 static inline unsigned int i40e_txd_use_count(unsigned int size) 218 { 219 return ((size * 85) >> 20) + 1; 220 } 221 222 /* Tx Descriptors needed, worst case */ 223 #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 224 #define I40E_MIN_DESC_PENDING 4 225 226 #define I40E_TX_FLAGS_HW_VLAN BIT(1) 227 #define I40E_TX_FLAGS_SW_VLAN BIT(2) 228 #define I40E_TX_FLAGS_TSO BIT(3) 229 #define I40E_TX_FLAGS_IPV4 BIT(4) 230 #define I40E_TX_FLAGS_IPV6 BIT(5) 231 #define I40E_TX_FLAGS_FCCRC BIT(6) 232 #define I40E_TX_FLAGS_FSO BIT(7) 233 #define I40E_TX_FLAGS_TSYN BIT(8) 234 #define I40E_TX_FLAGS_FD_SB BIT(9) 235 #define I40E_TX_FLAGS_UDP_TUNNEL BIT(10) 236 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 237 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 238 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 239 #define I40E_TX_FLAGS_VLAN_SHIFT 16 240 241 struct i40e_tx_buffer { 242 struct i40e_tx_desc *next_to_watch; 243 union { 244 struct sk_buff *skb; 245 void *raw_buf; 246 }; 247 unsigned int bytecount; 248 unsigned short gso_segs; 249 250 DEFINE_DMA_UNMAP_ADDR(dma); 251 DEFINE_DMA_UNMAP_LEN(len); 252 u32 tx_flags; 253 }; 254 255 struct i40e_rx_buffer { 256 dma_addr_t dma; 257 struct page *page; 258 unsigned int page_offset; 259 }; 260 261 struct i40e_queue_stats { 262 u64 packets; 263 u64 bytes; 264 }; 265 266 struct i40e_tx_queue_stats { 267 u64 restart_queue; 268 u64 tx_busy; 269 u64 tx_done_old; 270 u64 tx_linearize; 271 u64 tx_force_wb; 272 u64 tx_lost_interrupt; 273 }; 274 275 struct i40e_rx_queue_stats { 276 u64 non_eop_descs; 277 u64 alloc_page_failed; 278 u64 alloc_buff_failed; 279 u64 page_reuse_count; 280 u64 realloc_count; 281 }; 282 283 enum i40e_ring_state_t { 284 __I40E_TX_FDIR_INIT_DONE, 285 __I40E_TX_XPS_INIT_DONE, 286 }; 287 288 /* some useful defines for virtchannel interface, which 289 * is the only remaining user of header split 290 */ 291 #define I40E_RX_DTYPE_NO_SPLIT 0 292 #define I40E_RX_DTYPE_HEADER_SPLIT 1 293 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2 294 #define I40E_RX_SPLIT_L2 0x1 295 #define I40E_RX_SPLIT_IP 0x2 296 #define I40E_RX_SPLIT_TCP_UDP 0x4 297 #define I40E_RX_SPLIT_SCTP 0x8 298 299 /* struct that defines a descriptor ring, associated with a VSI */ 300 struct i40e_ring { 301 struct i40e_ring *next; /* pointer to next ring in q_vector */ 302 void *desc; /* Descriptor ring memory */ 303 struct device *dev; /* Used for DMA mapping */ 304 struct net_device *netdev; /* netdev ring maps to */ 305 union { 306 struct i40e_tx_buffer *tx_bi; 307 struct i40e_rx_buffer *rx_bi; 308 }; 309 unsigned long state; 310 u16 queue_index; /* Queue number of ring */ 311 u8 dcb_tc; /* Traffic class of ring */ 312 u8 __iomem *tail; 313 314 /* high bit set means dynamic, use accessor routines to read/write. 315 * hardware only supports 2us resolution for the ITR registers. 316 * these values always store the USER setting, and must be converted 317 * before programming to a register. 318 */ 319 u16 rx_itr_setting; 320 u16 tx_itr_setting; 321 322 u16 count; /* Number of descriptors */ 323 u16 reg_idx; /* HW register index of the ring */ 324 u16 rx_buf_len; 325 326 /* used in interrupt processing */ 327 u16 next_to_use; 328 u16 next_to_clean; 329 330 u8 atr_sample_rate; 331 u8 atr_count; 332 333 bool ring_active; /* is ring online or not */ 334 bool arm_wb; /* do something to arm write back */ 335 u8 packet_stride; 336 337 u16 flags; 338 #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) 339 340 /* stats structs */ 341 struct i40e_queue_stats stats; 342 struct u64_stats_sync syncp; 343 union { 344 struct i40e_tx_queue_stats tx_stats; 345 struct i40e_rx_queue_stats rx_stats; 346 }; 347 348 unsigned int size; /* length of descriptor ring in bytes */ 349 dma_addr_t dma; /* physical address of ring */ 350 351 struct i40e_vsi *vsi; /* Backreference to associated VSI */ 352 struct i40e_q_vector *q_vector; /* Backreference to associated vector */ 353 354 struct rcu_head rcu; /* to avoid race on free */ 355 u16 next_to_alloc; 356 struct sk_buff *skb; /* When i40e_clean_rx_ring_irq() must 357 * return before it sees the EOP for 358 * the current packet, we save that skb 359 * here and resume receiving this 360 * packet the next time 361 * i40e_clean_rx_ring_irq() is called 362 * for this ring. 363 */ 364 } ____cacheline_internodealigned_in_smp; 365 366 enum i40e_latency_range { 367 I40E_LOWEST_LATENCY = 0, 368 I40E_LOW_LATENCY = 1, 369 I40E_BULK_LATENCY = 2, 370 I40E_ULTRA_LATENCY = 3, 371 }; 372 373 struct i40e_ring_container { 374 /* array of pointers to rings */ 375 struct i40e_ring *ring; 376 unsigned int total_bytes; /* total bytes processed this int */ 377 unsigned int total_packets; /* total packets processed this int */ 378 u16 count; 379 enum i40e_latency_range latency_range; 380 u16 itr; 381 }; 382 383 /* iterator for handling rings in ring container */ 384 #define i40e_for_each_ring(pos, head) \ 385 for (pos = (head).ring; pos != NULL; pos = pos->next) 386 387 bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); 388 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 389 void i40e_clean_tx_ring(struct i40e_ring *tx_ring); 390 void i40e_clean_rx_ring(struct i40e_ring *rx_ring); 391 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring); 392 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring); 393 void i40e_free_tx_resources(struct i40e_ring *tx_ring); 394 void i40e_free_rx_resources(struct i40e_ring *rx_ring); 395 int i40e_napi_poll(struct napi_struct *napi, int budget); 396 #ifdef I40E_FCOE 397 void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, 398 struct i40e_tx_buffer *first, u32 tx_flags, 399 const u8 hdr_len, u32 td_cmd, u32 td_offset); 400 int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, 401 struct i40e_ring *tx_ring, u32 *flags); 402 #endif 403 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); 404 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); 405 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); 406 bool __i40e_chk_linearize(struct sk_buff *skb); 407 408 /** 409 * i40e_get_head - Retrieve head from head writeback 410 * @tx_ring: tx ring to fetch head of 411 * 412 * Returns value of Tx ring head based on value stored 413 * in head write-back location 414 **/ 415 static inline u32 i40e_get_head(struct i40e_ring *tx_ring) 416 { 417 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; 418 419 return le32_to_cpu(*(volatile __le32 *)head); 420 } 421 422 /** 423 * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed 424 * @skb: send buffer 425 * @tx_ring: ring to send buffer on 426 * 427 * Returns number of data descriptors needed for this skb. Returns 0 to indicate 428 * there is not enough descriptors available in this ring since we need at least 429 * one descriptor. 430 **/ 431 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) 432 { 433 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 434 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 435 int count = 0, size = skb_headlen(skb); 436 437 for (;;) { 438 count += i40e_txd_use_count(size); 439 440 if (!nr_frags--) 441 break; 442 443 size = skb_frag_size(frag++); 444 } 445 446 return count; 447 } 448 449 /** 450 * i40e_maybe_stop_tx - 1st level check for Tx stop conditions 451 * @tx_ring: the ring to be checked 452 * @size: the size buffer we want to assure is available 453 * 454 * Returns 0 if stop is not needed 455 **/ 456 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) 457 { 458 if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) 459 return 0; 460 return __i40e_maybe_stop_tx(tx_ring, size); 461 } 462 463 /** 464 * i40e_chk_linearize - Check if there are more than 8 fragments per packet 465 * @skb: send buffer 466 * @count: number of buffers used 467 * 468 * Note: Our HW can't scatter-gather more than 8 fragments to build 469 * a packet on the wire and so we need to figure out the cases where we 470 * need to linearize the skb. 471 **/ 472 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 473 { 474 /* Both TSO and single send will work if count is less than 8 */ 475 if (likely(count < I40E_MAX_BUFFER_TXD)) 476 return false; 477 478 if (skb_is_gso(skb)) 479 return __i40e_chk_linearize(skb); 480 481 /* we can support up to 8 data buffers for a single send */ 482 return count != I40E_MAX_BUFFER_TXD; 483 } 484 485 /** 486 * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE 487 * @ptype: the packet type field from Rx descriptor write-back 488 **/ 489 static inline bool i40e_rx_is_fcoe(u16 ptype) 490 { 491 return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && 492 (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); 493 } 494 495 /** 496 * txring_txq - Find the netdev Tx ring based on the i40e Tx ring 497 * @ring: Tx ring to find the netdev equivalent of 498 **/ 499 static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) 500 { 501 return netdev_get_tx_queue(ring->netdev, ring->queue_index); 502 } 503 #endif /* _I40E_TXRX_H_ */ 504