1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #ifndef _ICE_TXRX_H_ 5 #define _ICE_TXRX_H_ 6 7 #define ICE_DFLT_IRQ_WORK 256 8 #define ICE_RXBUF_2048 2048 9 #define ICE_MAX_CHAINED_RX_BUFS 5 10 #define ICE_MAX_BUF_TXD 8 11 #define ICE_MIN_TX_LEN 17 12 13 /* The size limit for a transmit buffer in a descriptor is (16K - 1). 14 * In order to align with the read requests we will align the value to 15 * the nearest 4K which represents our maximum read request size. 16 */ 17 #define ICE_MAX_READ_REQ_SIZE 4096 18 #define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1) 19 #define ICE_MAX_DATA_PER_TXD_ALIGNED \ 20 (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD) 21 22 #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */ 23 #define ICE_MAX_TXQ_PER_TXQG 128 24 25 /* We are assuming that the cache line is always 64 Bytes here for ice. 26 * In order to make sure that is a correct assumption there is a check in probe 27 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line 28 * size is 128 bytes. We do it this way because we do not want to read the 29 * GLPCI_CNF2 register or a variable containing the value on every pass through 30 * the Tx path. 31 */ 32 #define ICE_CACHE_LINE_BYTES 64 33 #define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \ 34 sizeof(struct ice_tx_desc)) 35 #define ICE_DESCS_FOR_CTX_DESC 1 36 #define ICE_DESCS_FOR_SKB_DATA_PTR 1 37 /* Tx descriptors needed, worst case */ 38 #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \ 39 ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR) 40 #define ICE_DESC_UNUSED(R) \ 41 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ 42 (R)->next_to_clean - (R)->next_to_use - 1) 43 44 #define ICE_TX_FLAGS_TSO BIT(0) 45 #define ICE_TX_FLAGS_HW_VLAN BIT(1) 46 #define ICE_TX_FLAGS_SW_VLAN BIT(2) 47 #define ICE_TX_FLAGS_VLAN_M 0xffff0000 48 #define ICE_TX_FLAGS_VLAN_S 16 49 50 struct ice_tx_buf { 51 struct ice_tx_desc *next_to_watch; 52 struct sk_buff *skb; 53 unsigned int bytecount; 54 unsigned short gso_segs; 55 u32 tx_flags; 56 DEFINE_DMA_UNMAP_ADDR(dma); 57 DEFINE_DMA_UNMAP_LEN(len); 58 }; 59 60 struct ice_tx_offload_params { 61 u8 header_len; 62 u32 td_cmd; 63 u32 td_offset; 64 u32 td_l2tag1; 65 u16 cd_l2tag2; 66 u32 cd_tunnel_params; 67 u64 cd_qw1; 68 struct ice_ring *tx_ring; 69 }; 70 71 struct ice_rx_buf { 72 struct sk_buff *skb; 73 dma_addr_t dma; 74 struct page *page; 75 unsigned int page_offset; 76 }; 77 78 struct ice_q_stats { 79 u64 pkts; 80 u64 bytes; 81 }; 82 83 struct ice_txq_stats { 84 u64 restart_q; 85 u64 tx_busy; 86 u64 tx_linearize; 87 int prev_pkt; /* negative if no pending Tx descriptors */ 88 }; 89 90 struct ice_rxq_stats { 91 u64 non_eop_descs; 92 u64 alloc_page_failed; 93 u64 alloc_buf_failed; 94 u64 page_reuse_count; 95 }; 96 97 /* this enum matches hardware bits and is meant to be used by DYN_CTLN 98 * registers and QINT registers or more generally anywhere in the manual 99 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any 100 * register but instead is a special value meaning "don't update" ITR0/1/2. 101 */ 102 enum ice_dyn_idx_t { 103 ICE_IDX_ITR0 = 0, 104 ICE_IDX_ITR1 = 1, 105 ICE_IDX_ITR2 = 2, 106 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ 107 }; 108 109 /* Header split modes defined by DTYPE field of Rx RLAN context */ 110 enum ice_rx_dtype { 111 ICE_RX_DTYPE_NO_SPLIT = 0, 112 ICE_RX_DTYPE_HEADER_SPLIT = 1, 113 ICE_RX_DTYPE_SPLIT_ALWAYS = 2, 114 }; 115 116 /* indices into GLINT_ITR registers */ 117 #define ICE_RX_ITR ICE_IDX_ITR0 118 #define ICE_TX_ITR ICE_IDX_ITR1 119 #define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ 120 #define ICE_ITR_8K 125 121 #define ICE_ITR_20K 50 122 #define ICE_DFLT_TX_ITR ICE_ITR_20K 123 #define ICE_DFLT_RX_ITR ICE_ITR_20K 124 /* apply ITR granularity translation to program the register. itr_gran is either 125 * 2 or 4 usecs so we need to divide by 2 first then shift by that value 126 */ 127 #define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> \ 128 ((itr_gran) / 2)) 129 130 #define ICE_DFLT_INTRL 0 131 132 /* Legacy or Advanced Mode Queue */ 133 #define ICE_TX_ADVANCED 0 134 #define ICE_TX_LEGACY 1 135 136 /* descriptor ring, associated with a VSI */ 137 struct ice_ring { 138 struct ice_ring *next; /* pointer to next ring in q_vector */ 139 void *desc; /* Descriptor ring memory */ 140 struct device *dev; /* Used for DMA mapping */ 141 struct net_device *netdev; /* netdev ring maps to */ 142 struct ice_vsi *vsi; /* Backreference to associated VSI */ 143 struct ice_q_vector *q_vector; /* Backreference to associated vector */ 144 u8 __iomem *tail; 145 union { 146 struct ice_tx_buf *tx_buf; 147 struct ice_rx_buf *rx_buf; 148 }; 149 u16 q_index; /* Queue number of ring */ 150 u32 txq_teid; /* Added Tx queue TEID */ 151 152 u16 count; /* Number of descriptors */ 153 u16 reg_idx; /* HW register index of the ring */ 154 155 /* used in interrupt processing */ 156 u16 next_to_use; 157 u16 next_to_clean; 158 159 u8 ring_active; /* is ring online or not */ 160 161 /* stats structs */ 162 struct ice_q_stats stats; 163 struct u64_stats_sync syncp; 164 union { 165 struct ice_txq_stats tx_stats; 166 struct ice_rxq_stats rx_stats; 167 }; 168 169 unsigned int size; /* length of descriptor ring in bytes */ 170 dma_addr_t dma; /* physical address of ring */ 171 struct rcu_head rcu; /* to avoid race on free */ 172 u16 next_to_alloc; 173 } ____cacheline_internodealigned_in_smp; 174 175 enum ice_latency_range { 176 ICE_LOWEST_LATENCY = 0, 177 ICE_LOW_LATENCY = 1, 178 ICE_BULK_LATENCY = 2, 179 ICE_ULTRA_LATENCY = 3, 180 }; 181 182 struct ice_ring_container { 183 /* array of pointers to rings */ 184 struct ice_ring *ring; 185 unsigned int total_bytes; /* total bytes processed this int */ 186 unsigned int total_pkts; /* total packets processed this int */ 187 enum ice_latency_range latency_range; 188 int itr_idx; /* index in the interrupt vector */ 189 u16 itr; 190 }; 191 192 /* iterator for handling rings in ring container */ 193 #define ice_for_each_ring(pos, head) \ 194 for (pos = (head).ring; pos; pos = pos->next) 195 196 bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count); 197 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); 198 void ice_clean_tx_ring(struct ice_ring *tx_ring); 199 void ice_clean_rx_ring(struct ice_ring *rx_ring); 200 int ice_setup_tx_ring(struct ice_ring *tx_ring); 201 int ice_setup_rx_ring(struct ice_ring *rx_ring); 202 void ice_free_tx_ring(struct ice_ring *tx_ring); 203 void ice_free_rx_ring(struct ice_ring *rx_ring); 204 int ice_napi_poll(struct napi_struct *napi, int budget); 205 206 #endif /* _ICE_TXRX_H_ */ 207