1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019, Intel Corporation. */ 3 4 #include <linux/filter.h> 5 6 #include "ice_txrx_lib.h" 7 #include "ice_eswitch.h" 8 #include "ice_lib.h" 9 10 /** 11 * ice_release_rx_desc - Store the new tail and head values 12 * @rx_ring: ring to bump 13 * @val: new head index 14 */ 15 void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val) 16 { 17 u16 prev_ntu = rx_ring->next_to_use & ~0x7; 18 19 rx_ring->next_to_use = val; 20 21 /* update next to alloc since we have filled the ring */ 22 rx_ring->next_to_alloc = val; 23 24 /* QRX_TAIL will be updated with any tail value, but hardware ignores 25 * the lower 3 bits. This makes it so we only bump tail on meaningful 26 * boundaries. Also, this allows us to bump tail on intervals of 8 up to 27 * the budget depending on the current traffic load. 28 */ 29 val &= ~0x7; 30 if (prev_ntu != val) { 31 /* Force memory writes to complete before letting h/w 32 * know there are new descriptors to fetch. (Only 33 * applicable for weak-ordered memory model archs, 34 * such as IA-64). 35 */ 36 wmb(); 37 writel(val, rx_ring->tail); 38 } 39 } 40 41 /** 42 * ice_ptype_to_htype - get a hash type 43 * @ptype: the ptype value from the descriptor 44 * 45 * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by 46 * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of 47 * Rx desc. 48 */ 49 static enum pkt_hash_types ice_ptype_to_htype(u16 ptype) 50 { 51 struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype); 52 53 if (!decoded.known) 54 return PKT_HASH_TYPE_NONE; 55 if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4) 56 return PKT_HASH_TYPE_L4; 57 if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3) 58 return PKT_HASH_TYPE_L3; 59 if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2) 60 return PKT_HASH_TYPE_L2; 61 62 return PKT_HASH_TYPE_NONE; 63 } 64 65 /** 66 * ice_rx_hash - set the hash value in the skb 67 * @rx_ring: descriptor ring 68 * @rx_desc: specific descriptor 69 * @skb: pointer to current skb 70 * @rx_ptype: the ptype value from the descriptor 71 */ 72 static void 73 ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 74 struct sk_buff *skb, u16 rx_ptype) 75 { 76 struct ice_32b_rx_flex_desc_nic *nic_mdid; 77 u32 hash; 78 79 if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) 80 return; 81 82 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) 83 return; 84 85 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; 86 hash = le32_to_cpu(nic_mdid->rss_hash); 87 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); 88 } 89 90 /** 91 * ice_rx_csum - Indicate in skb if checksum is good 92 * @ring: the ring we care about 93 * @skb: skb currently being received and modified 94 * @rx_desc: the receive descriptor 95 * @ptype: the packet type decoded by hardware 96 * 97 * skb->protocol must be set before this function is called 98 */ 99 static void 100 ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, 101 union ice_32b_rx_flex_desc *rx_desc, u16 ptype) 102 { 103 struct ice_rx_ptype_decoded decoded; 104 u16 rx_status0, rx_status1; 105 bool ipv4, ipv6; 106 107 rx_status0 = le16_to_cpu(rx_desc->wb.status_error0); 108 rx_status1 = le16_to_cpu(rx_desc->wb.status_error1); 109 110 decoded = ice_decode_rx_desc_ptype(ptype); 111 112 /* Start with CHECKSUM_NONE and by default csum_level = 0 */ 113 skb->ip_summed = CHECKSUM_NONE; 114 skb_checksum_none_assert(skb); 115 116 /* check if Rx checksum is enabled */ 117 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 118 return; 119 120 /* check if HW has decoded the packet and checksum */ 121 if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) 122 return; 123 124 if (!(decoded.known && decoded.outer_ip)) 125 return; 126 127 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 128 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); 129 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 130 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); 131 132 if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | 133 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) 134 goto checksum_fail; 135 136 if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) 137 goto checksum_fail; 138 139 /* check for L4 errors and handle packets that were not able to be 140 * checksummed due to arrival speed 141 */ 142 if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) 143 goto checksum_fail; 144 145 /* check for outer UDP checksum error in tunneled packets */ 146 if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) && 147 (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) 148 goto checksum_fail; 149 150 /* If there is an outer header present that might contain a checksum 151 * we need to bump the checksum level by 1 to reflect the fact that 152 * we are indicating we validated the inner checksum. 153 */ 154 if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT) 155 skb->csum_level = 1; 156 157 /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 158 switch (decoded.inner_prot) { 159 case ICE_RX_PTYPE_INNER_PROT_TCP: 160 case ICE_RX_PTYPE_INNER_PROT_UDP: 161 case ICE_RX_PTYPE_INNER_PROT_SCTP: 162 skb->ip_summed = CHECKSUM_UNNECESSARY; 163 break; 164 default: 165 break; 166 } 167 return; 168 169 checksum_fail: 170 ring->vsi->back->hw_csum_rx_error++; 171 } 172 173 /** 174 * ice_process_skb_fields - Populate skb header fields from Rx descriptor 175 * @rx_ring: Rx descriptor ring packet is being transacted on 176 * @rx_desc: pointer to the EOP Rx descriptor 177 * @skb: pointer to current skb being populated 178 * @ptype: the packet type decoded by hardware 179 * 180 * This function checks the ring, descriptor, and packet information in 181 * order to populate the hash, checksum, VLAN, protocol, and 182 * other fields within the skb. 183 */ 184 void 185 ice_process_skb_fields(struct ice_rx_ring *rx_ring, 186 union ice_32b_rx_flex_desc *rx_desc, 187 struct sk_buff *skb, u16 ptype) 188 { 189 ice_rx_hash(rx_ring, rx_desc, skb, ptype); 190 191 /* modifies the skb - consumes the enet header */ 192 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 193 194 ice_rx_csum(rx_ring, skb, rx_desc, ptype); 195 196 if (rx_ring->ptp_rx) 197 ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 198 } 199 200 /** 201 * ice_receive_skb - Send a completed packet up the stack 202 * @rx_ring: Rx ring in play 203 * @skb: packet to send up 204 * @vlan_tag: VLAN tag for packet 205 * 206 * This function sends the completed packet (via. skb) up the stack using 207 * gro receive functions (with/without VLAN tag) 208 */ 209 void 210 ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) 211 { 212 netdev_features_t features = rx_ring->netdev->features; 213 bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK); 214 215 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan) 216 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 217 else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan) 218 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); 219 220 napi_gro_receive(&rx_ring->q_vector->napi, skb); 221 } 222 223 /** 224 * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer 225 * @xdp_ring: XDP Tx ring 226 * @tx_buf: Tx buffer to clean 227 */ 228 static void 229 ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf) 230 { 231 dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), 232 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); 233 dma_unmap_len_set(tx_buf, len, 0); 234 xdp_ring->xdp_tx_active--; 235 page_frag_free(tx_buf->raw_buf); 236 tx_buf->raw_buf = NULL; 237 } 238 239 /** 240 * ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring 241 * @xdp_ring: XDP ring to clean 242 */ 243 static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring) 244 { 245 int total_bytes = 0, total_pkts = 0; 246 u32 ntc = xdp_ring->next_to_clean; 247 struct ice_tx_desc *tx_desc; 248 u32 cnt = xdp_ring->count; 249 u32 ready_frames = 0; 250 u32 frags; 251 u32 idx; 252 u32 ret; 253 254 idx = xdp_ring->tx_buf[ntc].rs_idx; 255 tx_desc = ICE_TX_DESC(xdp_ring, idx); 256 if (tx_desc->cmd_type_offset_bsz & 257 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) { 258 if (idx >= ntc) 259 ready_frames = idx - ntc + 1; 260 else 261 ready_frames = idx + cnt - ntc + 1; 262 } 263 264 if (!ready_frames) 265 return 0; 266 ret = ready_frames; 267 268 while (ready_frames) { 269 struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc]; 270 271 /* bytecount holds size of head + frags */ 272 total_bytes += tx_buf->bytecount; 273 frags = tx_buf->nr_frags; 274 total_pkts++; 275 /* count head + frags */ 276 ready_frames -= frags + 1; 277 278 if (xdp_ring->xsk_pool) 279 xsk_buff_free(tx_buf->xdp); 280 else 281 ice_clean_xdp_tx_buf(xdp_ring, tx_buf); 282 ntc++; 283 if (ntc == cnt) 284 ntc = 0; 285 286 for (int i = 0; i < frags; i++) { 287 tx_buf = &xdp_ring->tx_buf[ntc]; 288 289 ice_clean_xdp_tx_buf(xdp_ring, tx_buf); 290 ntc++; 291 if (ntc == cnt) 292 ntc = 0; 293 } 294 } 295 296 tx_desc->cmd_type_offset_bsz = 0; 297 xdp_ring->next_to_clean = ntc; 298 ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes); 299 300 return ret; 301 } 302 303 /** 304 * __ice_xmit_xdp_ring - submit frame to XDP ring for transmission 305 * @xdp: XDP buffer to be placed onto Tx descriptors 306 * @xdp_ring: XDP ring for transmission 307 */ 308 int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring) 309 { 310 struct skb_shared_info *sinfo = NULL; 311 u32 size = xdp->data_end - xdp->data; 312 struct device *dev = xdp_ring->dev; 313 u32 ntu = xdp_ring->next_to_use; 314 struct ice_tx_desc *tx_desc; 315 struct ice_tx_buf *tx_head; 316 struct ice_tx_buf *tx_buf; 317 u32 cnt = xdp_ring->count; 318 void *data = xdp->data; 319 u32 nr_frags = 0; 320 u32 free_space; 321 u32 frag = 0; 322 323 free_space = ICE_DESC_UNUSED(xdp_ring); 324 325 if (ICE_DESC_UNUSED(xdp_ring) < ICE_RING_QUARTER(xdp_ring)) 326 free_space += ice_clean_xdp_irq(xdp_ring); 327 328 if (unlikely(xdp_buff_has_frags(xdp))) { 329 sinfo = xdp_get_shared_info_from_buff(xdp); 330 nr_frags = sinfo->nr_frags; 331 if (free_space < nr_frags + 1) { 332 xdp_ring->ring_stats->tx_stats.tx_busy++; 333 return ICE_XDP_CONSUMED; 334 } 335 } 336 337 tx_desc = ICE_TX_DESC(xdp_ring, ntu); 338 tx_head = &xdp_ring->tx_buf[ntu]; 339 tx_buf = tx_head; 340 341 for (;;) { 342 dma_addr_t dma; 343 344 dma = dma_map_single(dev, data, size, DMA_TO_DEVICE); 345 if (dma_mapping_error(dev, dma)) 346 goto dma_unmap; 347 348 /* record length, and DMA address */ 349 dma_unmap_len_set(tx_buf, len, size); 350 dma_unmap_addr_set(tx_buf, dma, dma); 351 352 tx_desc->buf_addr = cpu_to_le64(dma); 353 tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0); 354 tx_buf->raw_buf = data; 355 356 ntu++; 357 if (ntu == cnt) 358 ntu = 0; 359 360 if (frag == nr_frags) 361 break; 362 363 tx_desc = ICE_TX_DESC(xdp_ring, ntu); 364 tx_buf = &xdp_ring->tx_buf[ntu]; 365 366 data = skb_frag_address(&sinfo->frags[frag]); 367 size = skb_frag_size(&sinfo->frags[frag]); 368 frag++; 369 } 370 371 /* store info about bytecount and frag count in first desc */ 372 tx_head->bytecount = xdp_get_buff_len(xdp); 373 tx_head->nr_frags = nr_frags; 374 375 /* update last descriptor from a frame with EOP */ 376 tx_desc->cmd_type_offset_bsz |= 377 cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S); 378 379 xdp_ring->xdp_tx_active++; 380 xdp_ring->next_to_use = ntu; 381 382 return ICE_XDP_TX; 383 384 dma_unmap: 385 for (;;) { 386 tx_buf = &xdp_ring->tx_buf[ntu]; 387 dma_unmap_page(dev, dma_unmap_addr(tx_buf, dma), 388 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); 389 dma_unmap_len_set(tx_buf, len, 0); 390 if (tx_buf == tx_head) 391 break; 392 393 if (!ntu) 394 ntu += cnt; 395 ntu--; 396 } 397 return ICE_XDP_CONSUMED; 398 } 399 400 /** 401 * ice_xmit_xdp_ring - submit frame to XDP ring for transmission 402 * @xdpf: XDP frame that will be converted to XDP buff 403 * @xdp_ring: XDP ring for transmission 404 */ 405 int ice_xmit_xdp_ring(struct xdp_frame *xdpf, struct ice_tx_ring *xdp_ring) 406 { 407 struct xdp_buff xdp; 408 409 xdp_convert_frame_to_buff(xdpf, &xdp); 410 return __ice_xmit_xdp_ring(&xdp, xdp_ring); 411 } 412 413 /** 414 * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map 415 * @xdp_ring: XDP ring 416 * @xdp_res: Result of the receive batch 417 * 418 * This function bumps XDP Tx tail and/or flush redirect map, and 419 * should be called when a batch of packets has been processed in the 420 * napi loop. 421 */ 422 void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, 423 u32 first_idx) 424 { 425 struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx]; 426 427 if (xdp_res & ICE_XDP_REDIR) 428 xdp_do_flush_map(); 429 430 if (xdp_res & ICE_XDP_TX) { 431 if (static_branch_unlikely(&ice_xdp_locking_key)) 432 spin_lock(&xdp_ring->tx_lock); 433 /* store index of descriptor with RS bit set in the first 434 * ice_tx_buf of given NAPI batch 435 */ 436 tx_buf->rs_idx = ice_set_rs_bit(xdp_ring); 437 ice_xdp_ring_update_tail(xdp_ring); 438 if (static_branch_unlikely(&ice_xdp_locking_key)) 439 spin_unlock(&xdp_ring->tx_lock); 440 } 441 } 442