1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #ifndef _ICE_TXRX_H_ 5 #define _ICE_TXRX_H_ 6 7 #define ICE_DFLT_IRQ_WORK 256 8 #define ICE_RXBUF_2048 2048 9 #define ICE_MAX_CHAINED_RX_BUFS 5 10 #define ICE_MAX_BUF_TXD 8 11 #define ICE_MIN_TX_LEN 17 12 13 /* The size limit for a transmit buffer in a descriptor is (16K - 1). 14 * In order to align with the read requests we will align the value to 15 * the nearest 4K which represents our maximum read request size. 16 */ 17 #define ICE_MAX_READ_REQ_SIZE 4096 18 #define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1) 19 #define ICE_MAX_DATA_PER_TXD_ALIGNED \ 20 (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD) 21 22 #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */ 23 #define ICE_MAX_TXQ_PER_TXQG 128 24 25 /* Tx Descriptors needed, worst case */ 26 #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 27 #define ICE_DESC_UNUSED(R) \ 28 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ 29 (R)->next_to_clean - (R)->next_to_use - 1) 30 31 #define ICE_TX_FLAGS_TSO BIT(0) 32 #define ICE_TX_FLAGS_HW_VLAN BIT(1) 33 #define ICE_TX_FLAGS_SW_VLAN BIT(2) 34 #define ICE_TX_FLAGS_VLAN_M 0xffff0000 35 #define ICE_TX_FLAGS_VLAN_S 16 36 37 struct ice_tx_buf { 38 struct ice_tx_desc *next_to_watch; 39 struct sk_buff *skb; 40 unsigned int bytecount; 41 unsigned short gso_segs; 42 u32 tx_flags; 43 DEFINE_DMA_UNMAP_ADDR(dma); 44 DEFINE_DMA_UNMAP_LEN(len); 45 }; 46 47 struct ice_tx_offload_params { 48 u8 header_len; 49 u32 td_cmd; 50 u32 td_offset; 51 u32 td_l2tag1; 52 u16 cd_l2tag2; 53 u32 cd_tunnel_params; 54 u64 cd_qw1; 55 struct ice_ring *tx_ring; 56 }; 57 58 struct ice_rx_buf { 59 struct sk_buff *skb; 60 dma_addr_t dma; 61 struct page *page; 62 unsigned int page_offset; 63 }; 64 65 struct ice_q_stats { 66 u64 pkts; 67 u64 bytes; 68 }; 69 70 struct ice_txq_stats { 71 u64 restart_q; 72 u64 tx_busy; 73 u64 tx_linearize; 74 int prev_pkt; /* negative if no pending Tx descriptors */ 75 }; 76 77 struct ice_rxq_stats { 78 u64 non_eop_descs; 79 u64 alloc_page_failed; 80 u64 alloc_buf_failed; 81 u64 page_reuse_count; 82 }; 83 84 /* this enum matches hardware bits and is meant to be used by DYN_CTLN 85 * registers and QINT registers or more generally anywhere in the manual 86 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any 87 * register but instead is a special value meaning "don't update" ITR0/1/2. 88 */ 89 enum ice_dyn_idx_t { 90 ICE_IDX_ITR0 = 0, 91 ICE_IDX_ITR1 = 1, 92 ICE_IDX_ITR2 = 2, 93 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ 94 }; 95 96 /* Header split modes defined by DTYPE field of Rx RLAN context */ 97 enum ice_rx_dtype { 98 ICE_RX_DTYPE_NO_SPLIT = 0, 99 ICE_RX_DTYPE_HEADER_SPLIT = 1, 100 ICE_RX_DTYPE_SPLIT_ALWAYS = 2, 101 }; 102 103 /* indices into GLINT_ITR registers */ 104 #define ICE_RX_ITR ICE_IDX_ITR0 105 #define ICE_TX_ITR ICE_IDX_ITR1 106 #define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ 107 #define ICE_ITR_8K 0x003E 108 109 /* apply ITR HW granularity translation to program the HW registers */ 110 #define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran)) 111 112 /* Legacy or Advanced Mode Queue */ 113 #define ICE_TX_ADVANCED 0 114 #define ICE_TX_LEGACY 1 115 116 /* descriptor ring, associated with a VSI */ 117 struct ice_ring { 118 struct ice_ring *next; /* pointer to next ring in q_vector */ 119 void *desc; /* Descriptor ring memory */ 120 struct device *dev; /* Used for DMA mapping */ 121 struct net_device *netdev; /* netdev ring maps to */ 122 struct ice_vsi *vsi; /* Backreference to associated VSI */ 123 struct ice_q_vector *q_vector; /* Backreference to associated vector */ 124 u8 __iomem *tail; 125 union { 126 struct ice_tx_buf *tx_buf; 127 struct ice_rx_buf *rx_buf; 128 }; 129 u16 q_index; /* Queue number of ring */ 130 u32 txq_teid; /* Added Tx queue TEID */ 131 132 /* high bit set means dynamic, use accessor routines to read/write. 133 * hardware supports 2us/1us resolution for the ITR registers. 134 * these values always store the USER setting, and must be converted 135 * before programming to a register. 136 */ 137 u16 rx_itr_setting; 138 u16 tx_itr_setting; 139 140 u16 count; /* Number of descriptors */ 141 u16 reg_idx; /* HW register index of the ring */ 142 143 /* used in interrupt processing */ 144 u16 next_to_use; 145 u16 next_to_clean; 146 147 u8 ring_active; /* is ring online or not */ 148 149 /* stats structs */ 150 struct ice_q_stats stats; 151 struct u64_stats_sync syncp; 152 union { 153 struct ice_txq_stats tx_stats; 154 struct ice_rxq_stats rx_stats; 155 }; 156 157 unsigned int size; /* length of descriptor ring in bytes */ 158 dma_addr_t dma; /* physical address of ring */ 159 struct rcu_head rcu; /* to avoid race on free */ 160 u16 next_to_alloc; 161 } ____cacheline_internodealigned_in_smp; 162 163 enum ice_latency_range { 164 ICE_LOWEST_LATENCY = 0, 165 ICE_LOW_LATENCY = 1, 166 ICE_BULK_LATENCY = 2, 167 ICE_ULTRA_LATENCY = 3, 168 }; 169 170 struct ice_ring_container { 171 /* array of pointers to rings */ 172 struct ice_ring *ring; 173 unsigned int total_bytes; /* total bytes processed this int */ 174 unsigned int total_pkts; /* total packets processed this int */ 175 enum ice_latency_range latency_range; 176 u16 itr; 177 }; 178 179 /* iterator for handling rings in ring container */ 180 #define ice_for_each_ring(pos, head) \ 181 for (pos = (head).ring; pos; pos = pos->next) 182 183 bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count); 184 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); 185 void ice_clean_tx_ring(struct ice_ring *tx_ring); 186 void ice_clean_rx_ring(struct ice_ring *rx_ring); 187 int ice_setup_tx_ring(struct ice_ring *tx_ring); 188 int ice_setup_rx_ring(struct ice_ring *rx_ring); 189 void ice_free_tx_ring(struct ice_ring *tx_ring); 190 void ice_free_rx_ring(struct ice_ring *rx_ring); 191 int ice_napi_poll(struct napi_struct *napi, int budget); 192 193 #endif /* _ICE_TXRX_H_ */ 194