1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019, Intel Corporation. */ 3 4 #include "ice_txrx_lib.h" 5 6 /** 7 * ice_release_rx_desc - Store the new tail and head values 8 * @rx_ring: ring to bump 9 * @val: new head index 10 */ 11 void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) 12 { 13 u16 prev_ntu = rx_ring->next_to_use & ~0x7; 14 15 rx_ring->next_to_use = val; 16 17 /* update next to alloc since we have filled the ring */ 18 rx_ring->next_to_alloc = val; 19 20 /* QRX_TAIL will be updated with any tail value, but hardware ignores 21 * the lower 3 bits. This makes it so we only bump tail on meaningful 22 * boundaries. Also, this allows us to bump tail on intervals of 8 up to 23 * the budget depending on the current traffic load. 24 */ 25 val &= ~0x7; 26 if (prev_ntu != val) { 27 /* Force memory writes to complete before letting h/w 28 * know there are new descriptors to fetch. (Only 29 * applicable for weak-ordered memory model archs, 30 * such as IA-64). 31 */ 32 wmb(); 33 writel(val, rx_ring->tail); 34 } 35 } 36 37 /** 38 * ice_ptype_to_htype - get a hash type 39 * @ptype: the ptype value from the descriptor 40 * 41 * Returns a hash type to be used by skb_set_hash 42 */ 43 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype) 44 { 45 return PKT_HASH_TYPE_NONE; 46 } 47 48 /** 49 * ice_rx_hash - set the hash value in the skb 50 * @rx_ring: descriptor ring 51 * @rx_desc: specific descriptor 52 * @skb: pointer to current skb 53 * @rx_ptype: the ptype value from the descriptor 54 */ 55 static void 56 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 57 struct sk_buff *skb, u8 rx_ptype) 58 { 59 struct ice_32b_rx_flex_desc_nic *nic_mdid; 60 u32 hash; 61 62 if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) 63 return; 64 65 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) 66 return; 67 68 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; 69 hash = le32_to_cpu(nic_mdid->rss_hash); 70 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); 71 } 72 73 /** 74 * ice_rx_csum - Indicate in skb if checksum is good 75 * @ring: the ring we care about 76 * @skb: skb currently being received and modified 77 * @rx_desc: the receive descriptor 78 * @ptype: the packet type decoded by hardware 79 * 80 * skb->protocol must be set before this function is called 81 */ 82 static void 83 ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, 84 union ice_32b_rx_flex_desc *rx_desc, u8 ptype) 85 { 86 struct ice_rx_ptype_decoded decoded; 87 u32 rx_error, rx_status; 88 bool ipv4, ipv6; 89 90 rx_status = le16_to_cpu(rx_desc->wb.status_error0); 91 rx_error = rx_status; 92 93 decoded = ice_decode_rx_desc_ptype(ptype); 94 95 /* Start with CHECKSUM_NONE and by default csum_level = 0 */ 96 skb->ip_summed = CHECKSUM_NONE; 97 skb_checksum_none_assert(skb); 98 99 /* check if Rx checksum is enabled */ 100 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 101 return; 102 103 /* check if HW has decoded the packet and checksum */ 104 if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) 105 return; 106 107 if (!(decoded.known && decoded.outer_ip)) 108 return; 109 110 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 111 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); 112 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 113 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); 114 115 if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | 116 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) 117 goto checksum_fail; 118 else if (ipv6 && (rx_status & 119 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) 120 goto checksum_fail; 121 122 /* check for L4 errors and handle packets that were not able to be 123 * checksummed due to arrival speed 124 */ 125 if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) 126 goto checksum_fail; 127 128 /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 129 switch (decoded.inner_prot) { 130 case ICE_RX_PTYPE_INNER_PROT_TCP: 131 case ICE_RX_PTYPE_INNER_PROT_UDP: 132 case ICE_RX_PTYPE_INNER_PROT_SCTP: 133 skb->ip_summed = CHECKSUM_UNNECESSARY; 134 default: 135 break; 136 } 137 return; 138 139 checksum_fail: 140 ring->vsi->back->hw_csum_rx_error++; 141 } 142 143 /** 144 * ice_process_skb_fields - Populate skb header fields from Rx descriptor 145 * @rx_ring: Rx descriptor ring packet is being transacted on 146 * @rx_desc: pointer to the EOP Rx descriptor 147 * @skb: pointer to current skb being populated 148 * @ptype: the packet type decoded by hardware 149 * 150 * This function checks the ring, descriptor, and packet information in 151 * order to populate the hash, checksum, VLAN, protocol, and 152 * other fields within the skb. 153 */ 154 void 155 ice_process_skb_fields(struct ice_ring *rx_ring, 156 union ice_32b_rx_flex_desc *rx_desc, 157 struct sk_buff *skb, u8 ptype) 158 { 159 ice_rx_hash(rx_ring, rx_desc, skb, ptype); 160 161 /* modifies the skb - consumes the enet header */ 162 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 163 164 ice_rx_csum(rx_ring, skb, rx_desc, ptype); 165 } 166 167 /** 168 * ice_receive_skb - Send a completed packet up the stack 169 * @rx_ring: Rx ring in play 170 * @skb: packet to send up 171 * @vlan_tag: VLAN tag for packet 172 * 173 * This function sends the completed packet (via. skb) up the stack using 174 * gro receive functions (with/without VLAN tag) 175 */ 176 void 177 ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) 178 { 179 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 180 (vlan_tag & VLAN_VID_MASK)) 181 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 182 napi_gro_receive(&rx_ring->q_vector->napi, skb); 183 } 184 185 /** 186 * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission 187 * @data: packet data pointer 188 * @size: packet data size 189 * @xdp_ring: XDP ring for transmission 190 */ 191 int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) 192 { 193 u16 i = xdp_ring->next_to_use; 194 struct ice_tx_desc *tx_desc; 195 struct ice_tx_buf *tx_buf; 196 dma_addr_t dma; 197 198 if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) { 199 xdp_ring->tx_stats.tx_busy++; 200 return ICE_XDP_CONSUMED; 201 } 202 203 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); 204 if (dma_mapping_error(xdp_ring->dev, dma)) 205 return ICE_XDP_CONSUMED; 206 207 tx_buf = &xdp_ring->tx_buf[i]; 208 tx_buf->bytecount = size; 209 tx_buf->gso_segs = 1; 210 tx_buf->raw_buf = data; 211 212 /* record length, and DMA address */ 213 dma_unmap_len_set(tx_buf, len, size); 214 dma_unmap_addr_set(tx_buf, dma, dma); 215 216 tx_desc = ICE_TX_DESC(xdp_ring, i); 217 tx_desc->buf_addr = cpu_to_le64(dma); 218 tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0, 219 size, 0); 220 221 /* Make certain all of the status bits have been updated 222 * before next_to_watch is written. 223 */ 224 smp_wmb(); 225 226 i++; 227 if (i == xdp_ring->count) 228 i = 0; 229 230 tx_buf->next_to_watch = tx_desc; 231 xdp_ring->next_to_use = i; 232 233 return ICE_XDP_TX; 234 } 235 236 /** 237 * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it 238 * @xdp: XDP buffer 239 * @xdp_ring: XDP Tx ring 240 * 241 * Returns negative on failure, 0 on success. 242 */ 243 int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring) 244 { 245 struct xdp_frame *xdpf = convert_to_xdp_frame(xdp); 246 247 if (unlikely(!xdpf)) 248 return ICE_XDP_CONSUMED; 249 250 return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 251 } 252 253 /** 254 * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map 255 * @rx_ring: Rx ring 256 * @xdp_res: Result of the receive batch 257 * 258 * This function bumps XDP Tx tail and/or flush redirect map, and 259 * should be called when a batch of packets has been processed in the 260 * napi loop. 261 */ 262 void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res) 263 { 264 if (xdp_res & ICE_XDP_REDIR) 265 xdp_do_flush_map(); 266 267 if (xdp_res & ICE_XDP_TX) { 268 struct ice_ring *xdp_ring = 269 rx_ring->vsi->xdp_rings[rx_ring->q_index]; 270 271 ice_xdp_ring_update_tail(xdp_ring); 272 } 273 } 274