1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #ifndef _ICE_TXRX_H_ 5 #define _ICE_TXRX_H_ 6 7 #include "ice_type.h" 8 9 #define ICE_DFLT_IRQ_WORK 256 10 #define ICE_RXBUF_3072 3072 11 #define ICE_RXBUF_2048 2048 12 #define ICE_RXBUF_1536 1536 13 #define ICE_MAX_CHAINED_RX_BUFS 5 14 #define ICE_MAX_BUF_TXD 8 15 #define ICE_MIN_TX_LEN 17 16 #define ICE_TX_THRESH 32 17 18 /* The size limit for a transmit buffer in a descriptor is (16K - 1). 19 * In order to align with the read requests we will align the value to 20 * the nearest 4K which represents our maximum read request size. 21 */ 22 #define ICE_MAX_READ_REQ_SIZE 4096 23 #define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1) 24 #define ICE_MAX_DATA_PER_TXD_ALIGNED \ 25 (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD) 26 27 #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */ 28 #define ICE_MAX_TXQ_PER_TXQG 128 29 30 /* Attempt to maximize the headroom available for incoming frames. We use a 2K 31 * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame. 32 * This leaves us with 512 bytes of room. From that we need to deduct the 33 * space needed for the shared info and the padding needed to IP align the 34 * frame. 35 * 36 * Note: For cache line sizes 256 or larger this value is going to end 37 * up negative. In these cases we should fall back to the legacy 38 * receive path. 39 */ 40 #if (PAGE_SIZE < 8192) 41 #define ICE_2K_TOO_SMALL_WITH_PADDING \ 42 ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \ 43 SKB_WITH_OVERHEAD(ICE_RXBUF_2048)) 44 45 /** 46 * ice_compute_pad - compute the padding 47 * @rx_buf_len: buffer length 48 * 49 * Figure out the size of half page based on given buffer length and 50 * then subtract the skb_shared_info followed by subtraction of the 51 * actual buffer length; this in turn results in the actual space that 52 * is left for padding usage 53 */ 54 static inline int ice_compute_pad(int rx_buf_len) 55 { 56 int half_page_size; 57 58 half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); 59 return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len; 60 } 61 62 /** 63 * ice_skb_pad - determine the padding that we can supply 64 * 65 * Figure out the right Rx buffer size and based on that calculate the 66 * padding 67 */ 68 static inline int ice_skb_pad(void) 69 { 70 int rx_buf_len; 71 72 /* If a 2K buffer cannot handle a standard Ethernet frame then 73 * optimize padding for a 3K buffer instead of a 1.5K buffer. 74 * 75 * For a 3K buffer we need to add enough padding to allow for 76 * tailroom due to NET_IP_ALIGN possibly shifting us out of 77 * cache-line alignment. 78 */ 79 if (ICE_2K_TOO_SMALL_WITH_PADDING) 80 rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); 81 else 82 rx_buf_len = ICE_RXBUF_1536; 83 84 /* if needed make room for NET_IP_ALIGN */ 85 rx_buf_len -= NET_IP_ALIGN; 86 87 return ice_compute_pad(rx_buf_len); 88 } 89 90 #define ICE_SKB_PAD ice_skb_pad() 91 #else 92 #define ICE_2K_TOO_SMALL_WITH_PADDING false 93 #define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 94 #endif 95 96 /* We are assuming that the cache line is always 64 Bytes here for ice. 97 * In order to make sure that is a correct assumption there is a check in probe 98 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line 99 * size is 128 bytes. We do it this way because we do not want to read the 100 * GLPCI_CNF2 register or a variable containing the value on every pass through 101 * the Tx path. 102 */ 103 #define ICE_CACHE_LINE_BYTES 64 104 #define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \ 105 sizeof(struct ice_tx_desc)) 106 #define ICE_DESCS_FOR_CTX_DESC 1 107 #define ICE_DESCS_FOR_SKB_DATA_PTR 1 108 /* Tx descriptors needed, worst case */ 109 #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \ 110 ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR) 111 #define ICE_DESC_UNUSED(R) \ 112 (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ 113 (R)->next_to_clean - (R)->next_to_use - 1) 114 115 #define ICE_TX_FLAGS_TSO BIT(0) 116 #define ICE_TX_FLAGS_HW_VLAN BIT(1) 117 #define ICE_TX_FLAGS_SW_VLAN BIT(2) 118 /* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be 119 * freed instead of returned like skb packets. 120 */ 121 #define ICE_TX_FLAGS_DUMMY_PKT BIT(3) 122 #define ICE_TX_FLAGS_TSYN BIT(4) 123 #define ICE_TX_FLAGS_IPV4 BIT(5) 124 #define ICE_TX_FLAGS_IPV6 BIT(6) 125 #define ICE_TX_FLAGS_TUNNEL BIT(7) 126 #define ICE_TX_FLAGS_VLAN_M 0xffff0000 127 #define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000 128 #define ICE_TX_FLAGS_VLAN_PR_S 29 129 #define ICE_TX_FLAGS_VLAN_S 16 130 131 #define ICE_XDP_PASS 0 132 #define ICE_XDP_CONSUMED BIT(0) 133 #define ICE_XDP_TX BIT(1) 134 #define ICE_XDP_REDIR BIT(2) 135 136 #define ICE_RX_DMA_ATTR \ 137 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) 138 139 #define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) 140 141 #define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS) 142 143 struct ice_tx_buf { 144 struct ice_tx_desc *next_to_watch; 145 union { 146 struct sk_buff *skb; 147 void *raw_buf; /* used for XDP */ 148 }; 149 unsigned int bytecount; 150 unsigned short gso_segs; 151 u32 tx_flags; 152 DEFINE_DMA_UNMAP_LEN(len); 153 DEFINE_DMA_UNMAP_ADDR(dma); 154 }; 155 156 struct ice_tx_offload_params { 157 u64 cd_qw1; 158 struct ice_tx_ring *tx_ring; 159 u32 td_cmd; 160 u32 td_offset; 161 u32 td_l2tag1; 162 u32 cd_tunnel_params; 163 u16 cd_l2tag2; 164 u8 header_len; 165 }; 166 167 struct ice_rx_buf { 168 dma_addr_t dma; 169 struct page *page; 170 unsigned int page_offset; 171 u16 pagecnt_bias; 172 }; 173 174 struct ice_q_stats { 175 u64 pkts; 176 u64 bytes; 177 }; 178 179 struct ice_txq_stats { 180 u64 restart_q; 181 u64 tx_busy; 182 u64 tx_linearize; 183 int prev_pkt; /* negative if no pending Tx descriptors */ 184 }; 185 186 struct ice_rxq_stats { 187 u64 non_eop_descs; 188 u64 alloc_page_failed; 189 u64 alloc_buf_failed; 190 }; 191 192 enum ice_ring_state_t { 193 ICE_TX_XPS_INIT_DONE, 194 ICE_TX_NBITS, 195 }; 196 197 /* this enum matches hardware bits and is meant to be used by DYN_CTLN 198 * registers and QINT registers or more generally anywhere in the manual 199 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any 200 * register but instead is a special value meaning "don't update" ITR0/1/2. 201 */ 202 enum ice_dyn_idx_t { 203 ICE_IDX_ITR0 = 0, 204 ICE_IDX_ITR1 = 1, 205 ICE_IDX_ITR2 = 2, 206 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ 207 }; 208 209 /* Header split modes defined by DTYPE field of Rx RLAN context */ 210 enum ice_rx_dtype { 211 ICE_RX_DTYPE_NO_SPLIT = 0, 212 ICE_RX_DTYPE_HEADER_SPLIT = 1, 213 ICE_RX_DTYPE_SPLIT_ALWAYS = 2, 214 }; 215 216 /* indices into GLINT_ITR registers */ 217 #define ICE_RX_ITR ICE_IDX_ITR0 218 #define ICE_TX_ITR ICE_IDX_ITR1 219 #define ICE_ITR_8K 124 220 #define ICE_ITR_20K 50 221 #define ICE_ITR_MAX 8160 /* 0x1FE0 */ 222 #define ICE_DFLT_TX_ITR ICE_ITR_20K 223 #define ICE_DFLT_RX_ITR ICE_ITR_20K 224 enum ice_dynamic_itr { 225 ITR_STATIC = 0, 226 ITR_DYNAMIC = 1 227 }; 228 229 #define ITR_IS_DYNAMIC(rc) ((rc)->itr_mode == ITR_DYNAMIC) 230 #define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */ 231 #define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S) 232 #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */ 233 #define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK) 234 235 #define ICE_DFLT_INTRL 0 236 #define ICE_MAX_INTRL 236 237 238 #define ICE_IN_WB_ON_ITR_MODE 255 239 /* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows 240 * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also, 241 * set the write-back latency to the usecs passed in. 242 */ 243 #define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \ 244 ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \ 245 GLINT_DYN_CTL_INTERVAL_M) | \ 246 (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \ 247 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \ 248 GLINT_DYN_CTL_WB_ON_ITR_M) 249 250 /* Legacy or Advanced Mode Queue */ 251 #define ICE_TX_ADVANCED 0 252 #define ICE_TX_LEGACY 1 253 254 /* descriptor ring, associated with a VSI */ 255 struct ice_rx_ring { 256 /* CL1 - 1st cacheline starts here */ 257 struct ice_rx_ring *next; /* pointer to next ring in q_vector */ 258 void *desc; /* Descriptor ring memory */ 259 struct device *dev; /* Used for DMA mapping */ 260 struct net_device *netdev; /* netdev ring maps to */ 261 struct ice_vsi *vsi; /* Backreference to associated VSI */ 262 struct ice_q_vector *q_vector; /* Backreference to associated vector */ 263 u8 __iomem *tail; 264 union { 265 struct ice_rx_buf *rx_buf; 266 struct xdp_buff **xdp_buf; 267 }; 268 /* CL2 - 2nd cacheline starts here */ 269 struct xdp_rxq_info xdp_rxq; 270 /* CL3 - 3rd cacheline starts here */ 271 u16 q_index; /* Queue number of ring */ 272 273 u16 count; /* Number of descriptors */ 274 u16 reg_idx; /* HW register index of the ring */ 275 276 /* used in interrupt processing */ 277 u16 next_to_use; 278 u16 next_to_clean; 279 u16 next_to_alloc; 280 u16 rx_offset; 281 u16 rx_buf_len; 282 283 /* stats structs */ 284 struct ice_rxq_stats rx_stats; 285 struct ice_q_stats stats; 286 struct u64_stats_sync syncp; 287 288 struct rcu_head rcu; /* to avoid race on free */ 289 /* CL4 - 3rd cacheline starts here */ 290 struct ice_channel *ch; 291 struct bpf_prog *xdp_prog; 292 struct ice_tx_ring *xdp_ring; 293 struct xsk_buff_pool *xsk_pool; 294 struct sk_buff *skb; 295 dma_addr_t dma; /* physical address of ring */ 296 #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) 297 u64 cached_phctime; 298 u8 dcb_tc; /* Traffic class of ring */ 299 u8 ptp_rx; 300 u8 flags; 301 } ____cacheline_internodealigned_in_smp; 302 303 struct ice_tx_ring { 304 /* CL1 - 1st cacheline starts here */ 305 struct ice_tx_ring *next; /* pointer to next ring in q_vector */ 306 void *desc; /* Descriptor ring memory */ 307 struct device *dev; /* Used for DMA mapping */ 308 u8 __iomem *tail; 309 struct ice_tx_buf *tx_buf; 310 struct ice_q_vector *q_vector; /* Backreference to associated vector */ 311 struct net_device *netdev; /* netdev ring maps to */ 312 struct ice_vsi *vsi; /* Backreference to associated VSI */ 313 /* CL2 - 2nd cacheline starts here */ 314 dma_addr_t dma; /* physical address of ring */ 315 struct xsk_buff_pool *xsk_pool; 316 u16 next_to_use; 317 u16 next_to_clean; 318 u16 next_rs; 319 u16 next_dd; 320 u16 q_handle; /* Queue handle per TC */ 321 u16 reg_idx; /* HW register index of the ring */ 322 u16 count; /* Number of descriptors */ 323 u16 q_index; /* Queue number of ring */ 324 /* stats structs */ 325 struct ice_q_stats stats; 326 struct u64_stats_sync syncp; 327 struct ice_txq_stats tx_stats; 328 329 /* CL3 - 3rd cacheline starts here */ 330 struct rcu_head rcu; /* to avoid race on free */ 331 DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */ 332 struct ice_channel *ch; 333 struct ice_ptp_tx *tx_tstamps; 334 spinlock_t tx_lock; 335 u32 txq_teid; /* Added Tx queue TEID */ 336 #define ICE_TX_FLAGS_RING_XDP BIT(0) 337 u8 flags; 338 u8 dcb_tc; /* Traffic class of ring */ 339 u8 ptp_tx; 340 } ____cacheline_internodealigned_in_smp; 341 342 static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring) 343 { 344 return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB); 345 } 346 347 static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring) 348 { 349 ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB; 350 } 351 352 static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring) 353 { 354 ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB; 355 } 356 357 static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring) 358 { 359 return !!ring->ch; 360 } 361 362 static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring) 363 { 364 return !!(ring->flags & ICE_TX_FLAGS_RING_XDP); 365 } 366 367 enum ice_container_type { 368 ICE_RX_CONTAINER, 369 ICE_TX_CONTAINER, 370 }; 371 372 struct ice_ring_container { 373 /* head of linked-list of rings */ 374 union { 375 struct ice_rx_ring *rx_ring; 376 struct ice_tx_ring *tx_ring; 377 }; 378 struct dim dim; /* data for net_dim algorithm */ 379 u16 itr_idx; /* index in the interrupt vector */ 380 /* this matches the maximum number of ITR bits, but in usec 381 * values, so it is shifted left one bit (bit zero is ignored) 382 */ 383 u16 itr_setting:13; 384 u16 itr_reserved:2; 385 u16 itr_mode:1; 386 enum ice_container_type type; 387 }; 388 389 struct ice_coalesce_stored { 390 u16 itr_tx; 391 u16 itr_rx; 392 u8 intrl; 393 u8 tx_valid; 394 u8 rx_valid; 395 }; 396 397 /* iterator for handling rings in ring container */ 398 #define ice_for_each_rx_ring(pos, head) \ 399 for (pos = (head).rx_ring; pos; pos = pos->next) 400 401 #define ice_for_each_tx_ring(pos, head) \ 402 for (pos = (head).tx_ring; pos; pos = pos->next) 403 404 static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring) 405 { 406 #if (PAGE_SIZE < 8192) 407 if (ring->rx_buf_len > (PAGE_SIZE / 2)) 408 return 1; 409 #endif 410 return 0; 411 } 412 413 #define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring)) 414 415 union ice_32b_rx_flex_desc; 416 417 bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count); 418 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); 419 u16 420 ice_select_queue(struct net_device *dev, struct sk_buff *skb, 421 struct net_device *sb_dev); 422 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring); 423 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring); 424 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring); 425 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring); 426 void ice_free_tx_ring(struct ice_tx_ring *tx_ring); 427 void ice_free_rx_ring(struct ice_rx_ring *rx_ring); 428 int ice_napi_poll(struct napi_struct *napi, int budget); 429 int 430 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 431 u8 *raw_packet); 432 int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget); 433 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring); 434 #endif /* _ICE_TXRX_H_ */ 435