1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #ifndef _ICE_TXRX_H_ 5 #define _ICE_TXRX_H_ 6 7 #include "ice_type.h" 8 9 #define ICE_DFLT_IRQ_WORK 256 10 #define ICE_RXBUF_3072 3072 11 #define ICE_RXBUF_2048 2048 12 #define ICE_RXBUF_1664 1664 13 #define ICE_RXBUF_1536 1536 14 #define ICE_MAX_CHAINED_RX_BUFS 5 15 #define ICE_MAX_BUF_TXD 8 16 #define ICE_MIN_TX_LEN 17 17 #define ICE_MAX_FRAME_LEGACY_RX 8320 18 19 /* The size limit for a transmit buffer in a descriptor is (16K - 1). 20 * In order to align with the read requests we will align the value to 21 * the nearest 4K which represents our maximum read request size. 22 */ 23 #define ICE_MAX_READ_REQ_SIZE 4096 24 #define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1) 25 #define ICE_MAX_DATA_PER_TXD_ALIGNED \ 26 (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD) 27 28 #define ICE_MAX_TXQ_PER_TXQG 128 29 30 /* Attempt to maximize the headroom available for incoming frames. We use a 2K 31 * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame. 32 * This leaves us with 512 bytes of room. From that we need to deduct the 33 * space needed for the shared info and the padding needed to IP align the 34 * frame. 35 * 36 * Note: For cache line sizes 256 or larger this value is going to end 37 * up negative. In these cases we should fall back to the legacy 38 * receive path. 39 */ 40 #if (PAGE_SIZE < 8192) 41 #define ICE_2K_TOO_SMALL_WITH_PADDING \ 42 ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \ 43 SKB_WITH_OVERHEAD(ICE_RXBUF_2048)) 44 45 /** 46 * ice_compute_pad - compute the padding 47 * @rx_buf_len: buffer length 48 * 49 * Figure out the size of half page based on given buffer length and 50 * then subtract the skb_shared_info followed by subtraction of the 51 * actual buffer length; this in turn results in the actual space that 52 * is left for padding usage 53 */ 54 static inline int ice_compute_pad(int rx_buf_len) 55 { 56 int half_page_size; 57 58 half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); 59 return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len; 60 } 61 62 /** 63 * ice_skb_pad - determine the padding that we can supply 64 * 65 * Figure out the right Rx buffer size and based on that calculate the 66 * padding 67 */ 68 static inline int ice_skb_pad(void) 69 { 70 int rx_buf_len; 71 72 /* If a 2K buffer cannot handle a standard Ethernet frame then 73 * optimize padding for a 3K buffer instead of a 1.5K buffer. 74 * 75 * For a 3K buffer we need to add enough padding to allow for 76 * tailroom due to NET_IP_ALIGN possibly shifting us out of 77 * cache-line alignment. 78 */ 79 if (ICE_2K_TOO_SMALL_WITH_PADDING) 80 rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); 81 else 82 rx_buf_len = ICE_RXBUF_1536; 83 84 /* if needed make room for NET_IP_ALIGN */ 85 rx_buf_len -= NET_IP_ALIGN; 86 87 return ice_compute_pad(rx_buf_len); 88 } 89 90 #define ICE_SKB_PAD ice_skb_pad() 91 #else 92 #define ICE_2K_TOO_SMALL_WITH_PADDING false 93 #define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 94 #endif 95 96 /* We are assuming that the cache line is always 64 Bytes here for ice. 97 * In order to make sure that is a correct assumption there is a check in probe 98 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line 99 * size is 128 bytes. We do it this way because we do not want to read the 100 * GLPCI_CNF2 register or a variable containing the value on every pass through 101 * the Tx path. 102 */ 103 #define ICE_CACHE_LINE_BYTES 64 104 #define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \ 105 sizeof(struct ice_tx_desc)) 106 #define ICE_DESCS_FOR_CTX_DESC 1 107 #define ICE_DESCS_FOR_SKB_DATA_PTR 1 108 /* Tx descriptors needed, worst case */ 109 #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \ 110 ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR) 111 #define ICE_DESC_UNUSED(R) \ 112 (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ 113 (R)->next_to_clean - (R)->next_to_use - 1) 114 115 #define ICE_RX_DESC_UNUSED(R) \ 116 ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \ 117 (R)->first_desc - (R)->next_to_use - 1) 118 119 #define ICE_RING_QUARTER(R) ((R)->count >> 2) 120 121 #define ICE_TX_FLAGS_TSO BIT(0) 122 #define ICE_TX_FLAGS_HW_VLAN BIT(1) 123 #define ICE_TX_FLAGS_SW_VLAN BIT(2) 124 /* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be 125 * freed instead of returned like skb packets. 126 */ 127 #define ICE_TX_FLAGS_DUMMY_PKT BIT(3) 128 #define ICE_TX_FLAGS_TSYN BIT(4) 129 #define ICE_TX_FLAGS_IPV4 BIT(5) 130 #define ICE_TX_FLAGS_IPV6 BIT(6) 131 #define ICE_TX_FLAGS_TUNNEL BIT(7) 132 #define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8) 133 #define ICE_TX_FLAGS_VLAN_M 0xffff0000 134 #define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000 135 #define ICE_TX_FLAGS_VLAN_PR_S 29 136 #define ICE_TX_FLAGS_VLAN_S 16 137 138 #define ICE_XDP_PASS 0 139 #define ICE_XDP_CONSUMED BIT(0) 140 #define ICE_XDP_TX BIT(1) 141 #define ICE_XDP_REDIR BIT(2) 142 #define ICE_XDP_EXIT BIT(3) 143 #define ICE_SKB_CONSUMED ICE_XDP_CONSUMED 144 145 #define ICE_RX_DMA_ATTR \ 146 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) 147 148 #define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) 149 150 #define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS) 151 152 struct ice_tx_buf { 153 struct ice_tx_desc *next_to_watch; 154 union { 155 struct sk_buff *skb; 156 void *raw_buf; /* used for XDP */ 157 }; 158 unsigned int bytecount; 159 unsigned short gso_segs; 160 u32 tx_flags; 161 DEFINE_DMA_UNMAP_LEN(len); 162 DEFINE_DMA_UNMAP_ADDR(dma); 163 }; 164 165 struct ice_tx_offload_params { 166 u64 cd_qw1; 167 struct ice_tx_ring *tx_ring; 168 u32 td_cmd; 169 u32 td_offset; 170 u32 td_l2tag1; 171 u32 cd_tunnel_params; 172 u16 cd_l2tag2; 173 u8 header_len; 174 }; 175 176 struct ice_rx_buf { 177 dma_addr_t dma; 178 struct page *page; 179 unsigned int page_offset; 180 unsigned int pgcnt; 181 unsigned int act; 182 unsigned int pagecnt_bias; 183 }; 184 185 struct ice_q_stats { 186 u64 pkts; 187 u64 bytes; 188 }; 189 190 struct ice_txq_stats { 191 u64 restart_q; 192 u64 tx_busy; 193 u64 tx_linearize; 194 int prev_pkt; /* negative if no pending Tx descriptors */ 195 }; 196 197 struct ice_rxq_stats { 198 u64 non_eop_descs; 199 u64 alloc_page_failed; 200 u64 alloc_buf_failed; 201 }; 202 203 struct ice_ring_stats { 204 struct rcu_head rcu; /* to avoid race on free */ 205 struct ice_q_stats stats; 206 struct u64_stats_sync syncp; 207 union { 208 struct ice_txq_stats tx_stats; 209 struct ice_rxq_stats rx_stats; 210 }; 211 }; 212 213 enum ice_ring_state_t { 214 ICE_TX_XPS_INIT_DONE, 215 ICE_TX_NBITS, 216 }; 217 218 /* this enum matches hardware bits and is meant to be used by DYN_CTLN 219 * registers and QINT registers or more generally anywhere in the manual 220 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any 221 * register but instead is a special value meaning "don't update" ITR0/1/2. 222 */ 223 enum ice_dyn_idx_t { 224 ICE_IDX_ITR0 = 0, 225 ICE_IDX_ITR1 = 1, 226 ICE_IDX_ITR2 = 2, 227 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ 228 }; 229 230 /* Header split modes defined by DTYPE field of Rx RLAN context */ 231 enum ice_rx_dtype { 232 ICE_RX_DTYPE_NO_SPLIT = 0, 233 ICE_RX_DTYPE_HEADER_SPLIT = 1, 234 ICE_RX_DTYPE_SPLIT_ALWAYS = 2, 235 }; 236 237 /* indices into GLINT_ITR registers */ 238 #define ICE_RX_ITR ICE_IDX_ITR0 239 #define ICE_TX_ITR ICE_IDX_ITR1 240 #define ICE_ITR_8K 124 241 #define ICE_ITR_20K 50 242 #define ICE_ITR_MAX 8160 /* 0x1FE0 */ 243 #define ICE_DFLT_TX_ITR ICE_ITR_20K 244 #define ICE_DFLT_RX_ITR ICE_ITR_20K 245 enum ice_dynamic_itr { 246 ITR_STATIC = 0, 247 ITR_DYNAMIC = 1 248 }; 249 250 #define ITR_IS_DYNAMIC(rc) ((rc)->itr_mode == ITR_DYNAMIC) 251 #define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */ 252 #define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S) 253 #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */ 254 #define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK) 255 256 #define ICE_DFLT_INTRL 0 257 #define ICE_MAX_INTRL 236 258 259 #define ICE_IN_WB_ON_ITR_MODE 255 260 /* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows 261 * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also, 262 * set the write-back latency to the usecs passed in. 263 */ 264 #define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \ 265 ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \ 266 GLINT_DYN_CTL_INTERVAL_M) | \ 267 (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \ 268 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \ 269 GLINT_DYN_CTL_WB_ON_ITR_M) 270 271 /* Legacy or Advanced Mode Queue */ 272 #define ICE_TX_ADVANCED 0 273 #define ICE_TX_LEGACY 1 274 275 /* descriptor ring, associated with a VSI */ 276 struct ice_rx_ring { 277 /* CL1 - 1st cacheline starts here */ 278 struct ice_rx_ring *next; /* pointer to next ring in q_vector */ 279 void *desc; /* Descriptor ring memory */ 280 struct device *dev; /* Used for DMA mapping */ 281 struct net_device *netdev; /* netdev ring maps to */ 282 struct ice_vsi *vsi; /* Backreference to associated VSI */ 283 struct ice_q_vector *q_vector; /* Backreference to associated vector */ 284 u8 __iomem *tail; 285 u16 q_index; /* Queue number of ring */ 286 287 u16 count; /* Number of descriptors */ 288 u16 reg_idx; /* HW register index of the ring */ 289 u16 next_to_alloc; 290 /* CL2 - 2nd cacheline starts here */ 291 union { 292 struct ice_rx_buf *rx_buf; 293 struct xdp_buff **xdp_buf; 294 }; 295 struct xdp_buff xdp; 296 /* CL3 - 3rd cacheline starts here */ 297 struct bpf_prog *xdp_prog; 298 u16 rx_offset; 299 300 /* used in interrupt processing */ 301 u16 next_to_use; 302 u16 next_to_clean; 303 u16 first_desc; 304 305 /* stats structs */ 306 struct ice_ring_stats *ring_stats; 307 308 struct rcu_head rcu; /* to avoid race on free */ 309 /* CL4 - 4th cacheline starts here */ 310 struct ice_channel *ch; 311 struct ice_tx_ring *xdp_ring; 312 struct xsk_buff_pool *xsk_pool; 313 dma_addr_t dma; /* physical address of ring */ 314 u64 cached_phctime; 315 u16 rx_buf_len; 316 u8 dcb_tc; /* Traffic class of ring */ 317 u8 ptp_rx; 318 #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) 319 #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2) 320 u8 flags; 321 /* CL5 - 5th cacheline starts here */ 322 struct xdp_rxq_info xdp_rxq; 323 } ____cacheline_internodealigned_in_smp; 324 325 struct ice_tx_ring { 326 /* CL1 - 1st cacheline starts here */ 327 struct ice_tx_ring *next; /* pointer to next ring in q_vector */ 328 void *desc; /* Descriptor ring memory */ 329 struct device *dev; /* Used for DMA mapping */ 330 u8 __iomem *tail; 331 struct ice_tx_buf *tx_buf; 332 struct ice_q_vector *q_vector; /* Backreference to associated vector */ 333 struct net_device *netdev; /* netdev ring maps to */ 334 struct ice_vsi *vsi; /* Backreference to associated VSI */ 335 /* CL2 - 2nd cacheline starts here */ 336 dma_addr_t dma; /* physical address of ring */ 337 struct xsk_buff_pool *xsk_pool; 338 u16 next_to_use; 339 u16 next_to_clean; 340 u16 next_rs; 341 u16 next_dd; 342 u16 q_handle; /* Queue handle per TC */ 343 u16 reg_idx; /* HW register index of the ring */ 344 u16 count; /* Number of descriptors */ 345 u16 q_index; /* Queue number of ring */ 346 /* stats structs */ 347 struct ice_ring_stats *ring_stats; 348 /* CL3 - 3rd cacheline starts here */ 349 struct rcu_head rcu; /* to avoid race on free */ 350 DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */ 351 struct ice_channel *ch; 352 struct ice_ptp_tx *tx_tstamps; 353 spinlock_t tx_lock; 354 u32 txq_teid; /* Added Tx queue TEID */ 355 /* CL4 - 4th cacheline starts here */ 356 u16 xdp_tx_active; 357 #define ICE_TX_FLAGS_RING_XDP BIT(0) 358 #define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1) 359 #define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2) 360 u8 flags; 361 u8 dcb_tc; /* Traffic class of ring */ 362 u8 ptp_tx; 363 } ____cacheline_internodealigned_in_smp; 364 365 static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring) 366 { 367 return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB); 368 } 369 370 static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring) 371 { 372 ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB; 373 } 374 375 static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring) 376 { 377 ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB; 378 } 379 380 static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring) 381 { 382 return !!ring->ch; 383 } 384 385 static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring) 386 { 387 return !!(ring->flags & ICE_TX_FLAGS_RING_XDP); 388 } 389 390 enum ice_container_type { 391 ICE_RX_CONTAINER, 392 ICE_TX_CONTAINER, 393 }; 394 395 struct ice_ring_container { 396 /* head of linked-list of rings */ 397 union { 398 struct ice_rx_ring *rx_ring; 399 struct ice_tx_ring *tx_ring; 400 }; 401 struct dim dim; /* data for net_dim algorithm */ 402 u16 itr_idx; /* index in the interrupt vector */ 403 /* this matches the maximum number of ITR bits, but in usec 404 * values, so it is shifted left one bit (bit zero is ignored) 405 */ 406 union { 407 struct { 408 u16 itr_setting:13; 409 u16 itr_reserved:2; 410 u16 itr_mode:1; 411 }; 412 u16 itr_settings; 413 }; 414 enum ice_container_type type; 415 }; 416 417 struct ice_coalesce_stored { 418 u16 itr_tx; 419 u16 itr_rx; 420 u8 intrl; 421 u8 tx_valid; 422 u8 rx_valid; 423 }; 424 425 /* iterator for handling rings in ring container */ 426 #define ice_for_each_rx_ring(pos, head) \ 427 for (pos = (head).rx_ring; pos; pos = pos->next) 428 429 #define ice_for_each_tx_ring(pos, head) \ 430 for (pos = (head).tx_ring; pos; pos = pos->next) 431 432 static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring) 433 { 434 #if (PAGE_SIZE < 8192) 435 if (ring->rx_buf_len > (PAGE_SIZE / 2)) 436 return 1; 437 #endif 438 return 0; 439 } 440 441 #define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring)) 442 443 union ice_32b_rx_flex_desc; 444 445 bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count); 446 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); 447 u16 448 ice_select_queue(struct net_device *dev, struct sk_buff *skb, 449 struct net_device *sb_dev); 450 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring); 451 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring); 452 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring); 453 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring); 454 void ice_free_tx_ring(struct ice_tx_ring *tx_ring); 455 void ice_free_rx_ring(struct ice_rx_ring *rx_ring); 456 int ice_napi_poll(struct napi_struct *napi, int budget); 457 int 458 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 459 u8 *raw_packet); 460 int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget); 461 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring); 462 #endif /* _ICE_TXRX_H_ */ 463