1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* The driver transmit and receive code */ 5 6 #include <linux/mm.h> 7 #include <linux/netdevice.h> 8 #include <linux/prefetch.h> 9 #include <linux/bpf_trace.h> 10 #include <net/dsfield.h> 11 #include <net/xdp.h> 12 #include "ice_txrx_lib.h" 13 #include "ice_lib.h" 14 #include "ice.h" 15 #include "ice_trace.h" 16 #include "ice_dcb_lib.h" 17 #include "ice_xsk.h" 18 #include "ice_eswitch.h" 19 20 #define ICE_RX_HDR_SIZE 256 21 22 #define FDIR_DESC_RXDID 0x40 23 #define ICE_FDIR_CLEAN_DELAY 10 24 25 /** 26 * ice_prgm_fdir_fltr - Program a Flow Director filter 27 * @vsi: VSI to send dummy packet 28 * @fdir_desc: flow director descriptor 29 * @raw_packet: allocated buffer for flow director 30 */ 31 int 32 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 33 u8 *raw_packet) 34 { 35 struct ice_tx_buf *tx_buf, *first; 36 struct ice_fltr_desc *f_desc; 37 struct ice_tx_desc *tx_desc; 38 struct ice_tx_ring *tx_ring; 39 struct device *dev; 40 dma_addr_t dma; 41 u32 td_cmd; 42 u16 i; 43 44 /* VSI and Tx ring */ 45 if (!vsi) 46 return -ENOENT; 47 tx_ring = vsi->tx_rings[0]; 48 if (!tx_ring || !tx_ring->desc) 49 return -ENOENT; 50 dev = tx_ring->dev; 51 52 /* we are using two descriptors to add/del a filter and we can wait */ 53 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 54 if (!i) 55 return -EAGAIN; 56 msleep_interruptible(1); 57 } 58 59 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 60 DMA_TO_DEVICE); 61 62 if (dma_mapping_error(dev, dma)) 63 return -EINVAL; 64 65 /* grab the next descriptor */ 66 i = tx_ring->next_to_use; 67 first = &tx_ring->tx_buf[i]; 68 f_desc = ICE_TX_FDIRDESC(tx_ring, i); 69 memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 70 71 i++; 72 i = (i < tx_ring->count) ? i : 0; 73 tx_desc = ICE_TX_DESC(tx_ring, i); 74 tx_buf = &tx_ring->tx_buf[i]; 75 76 i++; 77 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 78 79 memset(tx_buf, 0, sizeof(*tx_buf)); 80 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 81 dma_unmap_addr_set(tx_buf, dma, dma); 82 83 tx_desc->buf_addr = cpu_to_le64(dma); 84 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 85 ICE_TX_DESC_CMD_RE; 86 87 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; 88 tx_buf->raw_buf = raw_packet; 89 90 tx_desc->cmd_type_offset_bsz = 91 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 92 93 /* Force memory write to complete before letting h/w know 94 * there are new descriptors to fetch. 95 */ 96 wmb(); 97 98 /* mark the data descriptor to be watched */ 99 first->next_to_watch = tx_desc; 100 101 writel(tx_ring->next_to_use, tx_ring->tail); 102 103 return 0; 104 } 105 106 /** 107 * ice_unmap_and_free_tx_buf - Release a Tx buffer 108 * @ring: the ring that owns the buffer 109 * @tx_buf: the buffer to free 110 */ 111 static void 112 ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) 113 { 114 if (tx_buf->skb) { 115 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 116 devm_kfree(ring->dev, tx_buf->raw_buf); 117 else if (ice_ring_is_xdp(ring)) 118 page_frag_free(tx_buf->raw_buf); 119 else 120 dev_kfree_skb_any(tx_buf->skb); 121 if (dma_unmap_len(tx_buf, len)) 122 dma_unmap_single(ring->dev, 123 dma_unmap_addr(tx_buf, dma), 124 dma_unmap_len(tx_buf, len), 125 DMA_TO_DEVICE); 126 } else if (dma_unmap_len(tx_buf, len)) { 127 dma_unmap_page(ring->dev, 128 dma_unmap_addr(tx_buf, dma), 129 dma_unmap_len(tx_buf, len), 130 DMA_TO_DEVICE); 131 } 132 133 tx_buf->next_to_watch = NULL; 134 tx_buf->skb = NULL; 135 dma_unmap_len_set(tx_buf, len, 0); 136 /* tx_buf must be completely set up in the transmit path */ 137 } 138 139 static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring) 140 { 141 return netdev_get_tx_queue(ring->netdev, ring->q_index); 142 } 143 144 /** 145 * ice_clean_tx_ring - Free any empty Tx buffers 146 * @tx_ring: ring to be cleaned 147 */ 148 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring) 149 { 150 u32 size; 151 u16 i; 152 153 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 154 ice_xsk_clean_xdp_ring(tx_ring); 155 goto tx_skip_free; 156 } 157 158 /* ring already cleared, nothing to do */ 159 if (!tx_ring->tx_buf) 160 return; 161 162 /* Free all the Tx ring sk_buffs */ 163 for (i = 0; i < tx_ring->count; i++) 164 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 165 166 tx_skip_free: 167 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 168 169 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 170 PAGE_SIZE); 171 /* Zero out the descriptor ring */ 172 memset(tx_ring->desc, 0, size); 173 174 tx_ring->next_to_use = 0; 175 tx_ring->next_to_clean = 0; 176 177 if (!tx_ring->netdev) 178 return; 179 180 /* cleanup Tx queue statistics */ 181 netdev_tx_reset_queue(txring_txq(tx_ring)); 182 } 183 184 /** 185 * ice_free_tx_ring - Free Tx resources per queue 186 * @tx_ring: Tx descriptor ring for a specific queue 187 * 188 * Free all transmit software resources 189 */ 190 void ice_free_tx_ring(struct ice_tx_ring *tx_ring) 191 { 192 u32 size; 193 194 ice_clean_tx_ring(tx_ring); 195 devm_kfree(tx_ring->dev, tx_ring->tx_buf); 196 tx_ring->tx_buf = NULL; 197 198 if (tx_ring->desc) { 199 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 200 PAGE_SIZE); 201 dmam_free_coherent(tx_ring->dev, size, 202 tx_ring->desc, tx_ring->dma); 203 tx_ring->desc = NULL; 204 } 205 } 206 207 /** 208 * ice_clean_tx_irq - Reclaim resources after transmit completes 209 * @tx_ring: Tx ring to clean 210 * @napi_budget: Used to determine if we are in netpoll 211 * 212 * Returns true if there's any budget left (e.g. the clean is finished) 213 */ 214 static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) 215 { 216 unsigned int total_bytes = 0, total_pkts = 0; 217 unsigned int budget = ICE_DFLT_IRQ_WORK; 218 struct ice_vsi *vsi = tx_ring->vsi; 219 s16 i = tx_ring->next_to_clean; 220 struct ice_tx_desc *tx_desc; 221 struct ice_tx_buf *tx_buf; 222 223 /* get the bql data ready */ 224 if (!ice_ring_is_xdp(tx_ring)) 225 netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); 226 227 tx_buf = &tx_ring->tx_buf[i]; 228 tx_desc = ICE_TX_DESC(tx_ring, i); 229 i -= tx_ring->count; 230 231 prefetch(&vsi->state); 232 233 do { 234 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 235 236 /* if next_to_watch is not set then there is no work pending */ 237 if (!eop_desc) 238 break; 239 240 /* follow the guidelines of other drivers */ 241 prefetchw(&tx_buf->skb->users); 242 243 smp_rmb(); /* prevent any other reads prior to eop_desc */ 244 245 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 246 /* if the descriptor isn't done, no work yet to do */ 247 if (!(eop_desc->cmd_type_offset_bsz & 248 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 249 break; 250 251 /* clear next_to_watch to prevent false hangs */ 252 tx_buf->next_to_watch = NULL; 253 254 /* update the statistics for this packet */ 255 total_bytes += tx_buf->bytecount; 256 total_pkts += tx_buf->gso_segs; 257 258 /* free the skb */ 259 napi_consume_skb(tx_buf->skb, napi_budget); 260 261 /* unmap skb header data */ 262 dma_unmap_single(tx_ring->dev, 263 dma_unmap_addr(tx_buf, dma), 264 dma_unmap_len(tx_buf, len), 265 DMA_TO_DEVICE); 266 267 /* clear tx_buf data */ 268 tx_buf->skb = NULL; 269 dma_unmap_len_set(tx_buf, len, 0); 270 271 /* unmap remaining buffers */ 272 while (tx_desc != eop_desc) { 273 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); 274 tx_buf++; 275 tx_desc++; 276 i++; 277 if (unlikely(!i)) { 278 i -= tx_ring->count; 279 tx_buf = tx_ring->tx_buf; 280 tx_desc = ICE_TX_DESC(tx_ring, 0); 281 } 282 283 /* unmap any remaining paged data */ 284 if (dma_unmap_len(tx_buf, len)) { 285 dma_unmap_page(tx_ring->dev, 286 dma_unmap_addr(tx_buf, dma), 287 dma_unmap_len(tx_buf, len), 288 DMA_TO_DEVICE); 289 dma_unmap_len_set(tx_buf, len, 0); 290 } 291 } 292 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf); 293 294 /* move us one more past the eop_desc for start of next pkt */ 295 tx_buf++; 296 tx_desc++; 297 i++; 298 if (unlikely(!i)) { 299 i -= tx_ring->count; 300 tx_buf = tx_ring->tx_buf; 301 tx_desc = ICE_TX_DESC(tx_ring, 0); 302 } 303 304 prefetch(tx_desc); 305 306 /* update budget accounting */ 307 budget--; 308 } while (likely(budget)); 309 310 i += tx_ring->count; 311 tx_ring->next_to_clean = i; 312 313 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 314 315 if (ice_ring_is_xdp(tx_ring)) 316 return !!budget; 317 318 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); 319 320 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 321 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 322 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 323 /* Make sure that anybody stopping the queue after this 324 * sees the new next_to_clean. 325 */ 326 smp_mb(); 327 if (netif_tx_queue_stopped(txring_txq(tx_ring)) && 328 !test_bit(ICE_VSI_DOWN, vsi->state)) { 329 netif_tx_wake_queue(txring_txq(tx_ring)); 330 ++tx_ring->tx_stats.restart_q; 331 } 332 } 333 334 return !!budget; 335 } 336 337 /** 338 * ice_setup_tx_ring - Allocate the Tx descriptors 339 * @tx_ring: the Tx ring to set up 340 * 341 * Return 0 on success, negative on error 342 */ 343 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring) 344 { 345 struct device *dev = tx_ring->dev; 346 u32 size; 347 348 if (!dev) 349 return -ENOMEM; 350 351 /* warn if we are about to overwrite the pointer */ 352 WARN_ON(tx_ring->tx_buf); 353 tx_ring->tx_buf = 354 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, 355 GFP_KERNEL); 356 if (!tx_ring->tx_buf) 357 return -ENOMEM; 358 359 /* round up to nearest page */ 360 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 361 PAGE_SIZE); 362 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, 363 GFP_KERNEL); 364 if (!tx_ring->desc) { 365 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 366 size); 367 goto err; 368 } 369 370 tx_ring->next_to_use = 0; 371 tx_ring->next_to_clean = 0; 372 tx_ring->tx_stats.prev_pkt = -1; 373 return 0; 374 375 err: 376 devm_kfree(dev, tx_ring->tx_buf); 377 tx_ring->tx_buf = NULL; 378 return -ENOMEM; 379 } 380 381 /** 382 * ice_clean_rx_ring - Free Rx buffers 383 * @rx_ring: ring to be cleaned 384 */ 385 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) 386 { 387 struct device *dev = rx_ring->dev; 388 u32 size; 389 u16 i; 390 391 /* ring already cleared, nothing to do */ 392 if (!rx_ring->rx_buf) 393 return; 394 395 if (rx_ring->skb) { 396 dev_kfree_skb(rx_ring->skb); 397 rx_ring->skb = NULL; 398 } 399 400 if (rx_ring->xsk_pool) { 401 ice_xsk_clean_rx_ring(rx_ring); 402 goto rx_skip_free; 403 } 404 405 /* Free all the Rx ring sk_buffs */ 406 for (i = 0; i < rx_ring->count; i++) { 407 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 408 409 if (!rx_buf->page) 410 continue; 411 412 /* Invalidate cache lines that may have been written to by 413 * device so that we avoid corrupting memory. 414 */ 415 dma_sync_single_range_for_cpu(dev, rx_buf->dma, 416 rx_buf->page_offset, 417 rx_ring->rx_buf_len, 418 DMA_FROM_DEVICE); 419 420 /* free resources associated with mapping */ 421 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 422 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 423 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 424 425 rx_buf->page = NULL; 426 rx_buf->page_offset = 0; 427 } 428 429 rx_skip_free: 430 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 431 432 /* Zero out the descriptor ring */ 433 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 434 PAGE_SIZE); 435 memset(rx_ring->desc, 0, size); 436 437 rx_ring->next_to_alloc = 0; 438 rx_ring->next_to_clean = 0; 439 rx_ring->next_to_use = 0; 440 } 441 442 /** 443 * ice_free_rx_ring - Free Rx resources 444 * @rx_ring: ring to clean the resources from 445 * 446 * Free all receive software resources 447 */ 448 void ice_free_rx_ring(struct ice_rx_ring *rx_ring) 449 { 450 u32 size; 451 452 ice_clean_rx_ring(rx_ring); 453 if (rx_ring->vsi->type == ICE_VSI_PF) 454 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 455 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 456 rx_ring->xdp_prog = NULL; 457 devm_kfree(rx_ring->dev, rx_ring->rx_buf); 458 rx_ring->rx_buf = NULL; 459 460 if (rx_ring->desc) { 461 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 462 PAGE_SIZE); 463 dmam_free_coherent(rx_ring->dev, size, 464 rx_ring->desc, rx_ring->dma); 465 rx_ring->desc = NULL; 466 } 467 } 468 469 /** 470 * ice_setup_rx_ring - Allocate the Rx descriptors 471 * @rx_ring: the Rx ring to set up 472 * 473 * Return 0 on success, negative on error 474 */ 475 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) 476 { 477 struct device *dev = rx_ring->dev; 478 u32 size; 479 480 if (!dev) 481 return -ENOMEM; 482 483 /* warn if we are about to overwrite the pointer */ 484 WARN_ON(rx_ring->rx_buf); 485 rx_ring->rx_buf = 486 devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count, 487 GFP_KERNEL); 488 if (!rx_ring->rx_buf) 489 return -ENOMEM; 490 491 /* round up to nearest page */ 492 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 493 PAGE_SIZE); 494 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, 495 GFP_KERNEL); 496 if (!rx_ring->desc) { 497 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 498 size); 499 goto err; 500 } 501 502 rx_ring->next_to_use = 0; 503 rx_ring->next_to_clean = 0; 504 505 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 506 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 507 508 if (rx_ring->vsi->type == ICE_VSI_PF && 509 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 510 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 511 rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) 512 goto err; 513 return 0; 514 515 err: 516 devm_kfree(dev, rx_ring->rx_buf); 517 rx_ring->rx_buf = NULL; 518 return -ENOMEM; 519 } 520 521 static unsigned int 522 ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size) 523 { 524 unsigned int truesize; 525 526 #if (PAGE_SIZE < 8192) 527 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 528 #else 529 truesize = rx_ring->rx_offset ? 530 SKB_DATA_ALIGN(rx_ring->rx_offset + size) + 531 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 532 SKB_DATA_ALIGN(size); 533 #endif 534 return truesize; 535 } 536 537 /** 538 * ice_run_xdp - Executes an XDP program on initialized xdp_buff 539 * @rx_ring: Rx ring 540 * @xdp: xdp_buff used as input to the XDP program 541 * @xdp_prog: XDP program to run 542 * @xdp_ring: ring to be used for XDP_TX action 543 * 544 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 545 */ 546 static int 547 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 548 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) 549 { 550 int err; 551 u32 act; 552 553 act = bpf_prog_run_xdp(xdp_prog, xdp); 554 switch (act) { 555 case XDP_PASS: 556 return ICE_XDP_PASS; 557 case XDP_TX: 558 if (static_branch_unlikely(&ice_xdp_locking_key)) 559 spin_lock(&xdp_ring->tx_lock); 560 err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring); 561 if (static_branch_unlikely(&ice_xdp_locking_key)) 562 spin_unlock(&xdp_ring->tx_lock); 563 if (err == ICE_XDP_CONSUMED) 564 goto out_failure; 565 return err; 566 case XDP_REDIRECT: 567 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 568 if (err) 569 goto out_failure; 570 return ICE_XDP_REDIR; 571 default: 572 bpf_warn_invalid_xdp_action(act); 573 fallthrough; 574 case XDP_ABORTED: 575 out_failure: 576 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 577 fallthrough; 578 case XDP_DROP: 579 return ICE_XDP_CONSUMED; 580 } 581 } 582 583 /** 584 * ice_xdp_xmit - submit packets to XDP ring for transmission 585 * @dev: netdev 586 * @n: number of XDP frames to be transmitted 587 * @frames: XDP frames to be transmitted 588 * @flags: transmit flags 589 * 590 * Returns number of frames successfully sent. Failed frames 591 * will be free'ed by XDP core. 592 * For error cases, a negative errno code is returned and no-frames 593 * are transmitted (caller must handle freeing frames). 594 */ 595 int 596 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 597 u32 flags) 598 { 599 struct ice_netdev_priv *np = netdev_priv(dev); 600 unsigned int queue_index = smp_processor_id(); 601 struct ice_vsi *vsi = np->vsi; 602 struct ice_tx_ring *xdp_ring; 603 int nxmit = 0, i; 604 605 if (test_bit(ICE_VSI_DOWN, vsi->state)) 606 return -ENETDOWN; 607 608 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) 609 return -ENXIO; 610 611 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 612 return -EINVAL; 613 614 if (static_branch_unlikely(&ice_xdp_locking_key)) { 615 queue_index %= vsi->num_xdp_txq; 616 xdp_ring = vsi->xdp_rings[queue_index]; 617 spin_lock(&xdp_ring->tx_lock); 618 } else { 619 xdp_ring = vsi->xdp_rings[queue_index]; 620 } 621 622 for (i = 0; i < n; i++) { 623 struct xdp_frame *xdpf = frames[i]; 624 int err; 625 626 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 627 if (err != ICE_XDP_TX) 628 break; 629 nxmit++; 630 } 631 632 if (unlikely(flags & XDP_XMIT_FLUSH)) 633 ice_xdp_ring_update_tail(xdp_ring); 634 635 if (static_branch_unlikely(&ice_xdp_locking_key)) 636 spin_unlock(&xdp_ring->tx_lock); 637 638 return nxmit; 639 } 640 641 /** 642 * ice_alloc_mapped_page - recycle or make a new page 643 * @rx_ring: ring to use 644 * @bi: rx_buf struct to modify 645 * 646 * Returns true if the page was successfully allocated or 647 * reused. 648 */ 649 static bool 650 ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) 651 { 652 struct page *page = bi->page; 653 dma_addr_t dma; 654 655 /* since we are recycling buffers we should seldom need to alloc */ 656 if (likely(page)) 657 return true; 658 659 /* alloc new page for storage */ 660 page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 661 if (unlikely(!page)) { 662 rx_ring->rx_stats.alloc_page_failed++; 663 return false; 664 } 665 666 /* map page for use */ 667 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 668 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 669 670 /* if mapping failed free memory back to system since 671 * there isn't much point in holding memory we can't use 672 */ 673 if (dma_mapping_error(rx_ring->dev, dma)) { 674 __free_pages(page, ice_rx_pg_order(rx_ring)); 675 rx_ring->rx_stats.alloc_page_failed++; 676 return false; 677 } 678 679 bi->dma = dma; 680 bi->page = page; 681 bi->page_offset = rx_ring->rx_offset; 682 page_ref_add(page, USHRT_MAX - 1); 683 bi->pagecnt_bias = USHRT_MAX; 684 685 return true; 686 } 687 688 /** 689 * ice_alloc_rx_bufs - Replace used receive buffers 690 * @rx_ring: ring to place buffers on 691 * @cleaned_count: number of buffers to replace 692 * 693 * Returns false if all allocations were successful, true if any fail. Returning 694 * true signals to the caller that we didn't replace cleaned_count buffers and 695 * there is more work to do. 696 * 697 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 698 * buffers. Then bump tail at most one time. Grouping like this lets us avoid 699 * multiple tail writes per call. 700 */ 701 bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count) 702 { 703 union ice_32b_rx_flex_desc *rx_desc; 704 u16 ntu = rx_ring->next_to_use; 705 struct ice_rx_buf *bi; 706 707 /* do nothing if no valid netdev defined */ 708 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || 709 !cleaned_count) 710 return false; 711 712 /* get the Rx descriptor and buffer based on next_to_use */ 713 rx_desc = ICE_RX_DESC(rx_ring, ntu); 714 bi = &rx_ring->rx_buf[ntu]; 715 716 do { 717 /* if we fail here, we have work remaining */ 718 if (!ice_alloc_mapped_page(rx_ring, bi)) 719 break; 720 721 /* sync the buffer for use by the device */ 722 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 723 bi->page_offset, 724 rx_ring->rx_buf_len, 725 DMA_FROM_DEVICE); 726 727 /* Refresh the desc even if buffer_addrs didn't change 728 * because each write-back erases this info. 729 */ 730 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 731 732 rx_desc++; 733 bi++; 734 ntu++; 735 if (unlikely(ntu == rx_ring->count)) { 736 rx_desc = ICE_RX_DESC(rx_ring, 0); 737 bi = rx_ring->rx_buf; 738 ntu = 0; 739 } 740 741 /* clear the status bits for the next_to_use descriptor */ 742 rx_desc->wb.status_error0 = 0; 743 744 cleaned_count--; 745 } while (cleaned_count); 746 747 if (rx_ring->next_to_use != ntu) 748 ice_release_rx_desc(rx_ring, ntu); 749 750 return !!cleaned_count; 751 } 752 753 /** 754 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 755 * @rx_buf: Rx buffer to adjust 756 * @size: Size of adjustment 757 * 758 * Update the offset within page so that Rx buf will be ready to be reused. 759 * For systems with PAGE_SIZE < 8192 this function will flip the page offset 760 * so the second half of page assigned to Rx buffer will be used, otherwise 761 * the offset is moved by "size" bytes 762 */ 763 static void 764 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 765 { 766 #if (PAGE_SIZE < 8192) 767 /* flip page offset to other buffer */ 768 rx_buf->page_offset ^= size; 769 #else 770 /* move offset up to the next cache line */ 771 rx_buf->page_offset += size; 772 #endif 773 } 774 775 /** 776 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 777 * @rx_buf: buffer containing the page 778 * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call 779 * 780 * If page is reusable, we have a green light for calling ice_reuse_rx_page, 781 * which will assign the current buffer to the buffer that next_to_alloc is 782 * pointing to; otherwise, the DMA mapping needs to be destroyed and 783 * page freed 784 */ 785 static bool 786 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) 787 { 788 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 789 struct page *page = rx_buf->page; 790 791 /* avoid re-using remote and pfmemalloc pages */ 792 if (!dev_page_is_reusable(page)) 793 return false; 794 795 #if (PAGE_SIZE < 8192) 796 /* if we are only owner of page we can reuse it */ 797 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) 798 return false; 799 #else 800 #define ICE_LAST_OFFSET \ 801 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 802 if (rx_buf->page_offset > ICE_LAST_OFFSET) 803 return false; 804 #endif /* PAGE_SIZE < 8192) */ 805 806 /* If we have drained the page fragment pool we need to update 807 * the pagecnt_bias and page count so that we fully restock the 808 * number of references the driver holds. 809 */ 810 if (unlikely(pagecnt_bias == 1)) { 811 page_ref_add(page, USHRT_MAX - 1); 812 rx_buf->pagecnt_bias = USHRT_MAX; 813 } 814 815 return true; 816 } 817 818 /** 819 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 820 * @rx_ring: Rx descriptor ring to transact packets on 821 * @rx_buf: buffer containing page to add 822 * @skb: sk_buff to place the data into 823 * @size: packet length from rx_desc 824 * 825 * This function will add the data contained in rx_buf->page to the skb. 826 * It will just attach the page as a frag to the skb. 827 * The function will then update the page offset. 828 */ 829 static void 830 ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 831 struct sk_buff *skb, unsigned int size) 832 { 833 #if (PAGE_SIZE >= 8192) 834 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset); 835 #else 836 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 837 #endif 838 839 if (!size) 840 return; 841 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 842 rx_buf->page_offset, size, truesize); 843 844 /* page is being used so we must update the page offset */ 845 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 846 } 847 848 /** 849 * ice_reuse_rx_page - page flip buffer and store it back on the ring 850 * @rx_ring: Rx descriptor ring to store buffers on 851 * @old_buf: donor buffer to have page reused 852 * 853 * Synchronizes page for reuse by the adapter 854 */ 855 static void 856 ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) 857 { 858 u16 nta = rx_ring->next_to_alloc; 859 struct ice_rx_buf *new_buf; 860 861 new_buf = &rx_ring->rx_buf[nta]; 862 863 /* update, and store next to alloc */ 864 nta++; 865 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 866 867 /* Transfer page from old buffer to new buffer. 868 * Move each member individually to avoid possible store 869 * forwarding stalls and unnecessary copy of skb. 870 */ 871 new_buf->dma = old_buf->dma; 872 new_buf->page = old_buf->page; 873 new_buf->page_offset = old_buf->page_offset; 874 new_buf->pagecnt_bias = old_buf->pagecnt_bias; 875 } 876 877 /** 878 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 879 * @rx_ring: Rx descriptor ring to transact packets on 880 * @size: size of buffer to add to skb 881 * @rx_buf_pgcnt: rx_buf page refcount 882 * 883 * This function will pull an Rx buffer from the ring and synchronize it 884 * for use by the CPU. 885 */ 886 static struct ice_rx_buf * 887 ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, 888 int *rx_buf_pgcnt) 889 { 890 struct ice_rx_buf *rx_buf; 891 892 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 893 *rx_buf_pgcnt = 894 #if (PAGE_SIZE < 8192) 895 page_count(rx_buf->page); 896 #else 897 0; 898 #endif 899 prefetchw(rx_buf->page); 900 901 if (!size) 902 return rx_buf; 903 /* we are reusing so sync this buffer for CPU use */ 904 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 905 rx_buf->page_offset, size, 906 DMA_FROM_DEVICE); 907 908 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 909 rx_buf->pagecnt_bias--; 910 911 return rx_buf; 912 } 913 914 /** 915 * ice_build_skb - Build skb around an existing buffer 916 * @rx_ring: Rx descriptor ring to transact packets on 917 * @rx_buf: Rx buffer to pull data from 918 * @xdp: xdp_buff pointing to the data 919 * 920 * This function builds an skb around an existing Rx buffer, taking care 921 * to set up the skb correctly and avoid any memcpy overhead. 922 */ 923 static struct sk_buff * 924 ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 925 struct xdp_buff *xdp) 926 { 927 u8 metasize = xdp->data - xdp->data_meta; 928 #if (PAGE_SIZE < 8192) 929 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 930 #else 931 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 932 SKB_DATA_ALIGN(xdp->data_end - 933 xdp->data_hard_start); 934 #endif 935 struct sk_buff *skb; 936 937 /* Prefetch first cache line of first page. If xdp->data_meta 938 * is unused, this points exactly as xdp->data, otherwise we 939 * likely have a consumer accessing first few bytes of meta 940 * data, and then actual data. 941 */ 942 net_prefetch(xdp->data_meta); 943 /* build an skb around the page buffer */ 944 skb = build_skb(xdp->data_hard_start, truesize); 945 if (unlikely(!skb)) 946 return NULL; 947 948 /* must to record Rx queue, otherwise OS features such as 949 * symmetric queue won't work 950 */ 951 skb_record_rx_queue(skb, rx_ring->q_index); 952 953 /* update pointers within the skb to store the data */ 954 skb_reserve(skb, xdp->data - xdp->data_hard_start); 955 __skb_put(skb, xdp->data_end - xdp->data); 956 if (metasize) 957 skb_metadata_set(skb, metasize); 958 959 /* buffer is used by skb, update page_offset */ 960 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 961 962 return skb; 963 } 964 965 /** 966 * ice_construct_skb - Allocate skb and populate it 967 * @rx_ring: Rx descriptor ring to transact packets on 968 * @rx_buf: Rx buffer to pull data from 969 * @xdp: xdp_buff pointing to the data 970 * 971 * This function allocates an skb. It then populates it with the page 972 * data from the current receive descriptor, taking care to set up the 973 * skb correctly. 974 */ 975 static struct sk_buff * 976 ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 977 struct xdp_buff *xdp) 978 { 979 unsigned int size = xdp->data_end - xdp->data; 980 unsigned int headlen; 981 struct sk_buff *skb; 982 983 /* prefetch first cache line of first page */ 984 net_prefetch(xdp->data); 985 986 /* allocate a skb to store the frags */ 987 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 988 GFP_ATOMIC | __GFP_NOWARN); 989 if (unlikely(!skb)) 990 return NULL; 991 992 skb_record_rx_queue(skb, rx_ring->q_index); 993 /* Determine available headroom for copy */ 994 headlen = size; 995 if (headlen > ICE_RX_HDR_SIZE) 996 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 997 998 /* align pull length to size of long to optimize memcpy performance */ 999 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 1000 sizeof(long))); 1001 1002 /* if we exhaust the linear part then add what is left as a frag */ 1003 size -= headlen; 1004 if (size) { 1005 #if (PAGE_SIZE >= 8192) 1006 unsigned int truesize = SKB_DATA_ALIGN(size); 1007 #else 1008 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 1009 #endif 1010 skb_add_rx_frag(skb, 0, rx_buf->page, 1011 rx_buf->page_offset + headlen, size, truesize); 1012 /* buffer is used by skb, update page_offset */ 1013 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 1014 } else { 1015 /* buffer is unused, reset bias back to rx_buf; data was copied 1016 * onto skb's linear part so there's no need for adjusting 1017 * page offset and we can reuse this buffer as-is 1018 */ 1019 rx_buf->pagecnt_bias++; 1020 } 1021 1022 return skb; 1023 } 1024 1025 /** 1026 * ice_put_rx_buf - Clean up used buffer and either recycle or free 1027 * @rx_ring: Rx descriptor ring to transact packets on 1028 * @rx_buf: Rx buffer to pull data from 1029 * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect() 1030 * 1031 * This function will update next_to_clean and then clean up the contents 1032 * of the rx_buf. It will either recycle the buffer or unmap it and free 1033 * the associated resources. 1034 */ 1035 static void 1036 ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 1037 int rx_buf_pgcnt) 1038 { 1039 u16 ntc = rx_ring->next_to_clean + 1; 1040 1041 /* fetch, update, and store next to clean */ 1042 ntc = (ntc < rx_ring->count) ? ntc : 0; 1043 rx_ring->next_to_clean = ntc; 1044 1045 if (!rx_buf) 1046 return; 1047 1048 if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) { 1049 /* hand second half of page back to the ring */ 1050 ice_reuse_rx_page(rx_ring, rx_buf); 1051 } else { 1052 /* we are not reusing the buffer so unmap it */ 1053 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 1054 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 1055 ICE_RX_DMA_ATTR); 1056 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 1057 } 1058 1059 /* clear contents of buffer_info */ 1060 rx_buf->page = NULL; 1061 } 1062 1063 /** 1064 * ice_is_non_eop - process handling of non-EOP buffers 1065 * @rx_ring: Rx ring being processed 1066 * @rx_desc: Rx descriptor for current buffer 1067 * 1068 * If the buffer is an EOP buffer, this function exits returning false, 1069 * otherwise return true indicating that this is in fact a non-EOP buffer. 1070 */ 1071 static bool 1072 ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) 1073 { 1074 /* if we are the last buffer then there is nothing else to do */ 1075 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 1076 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 1077 return false; 1078 1079 rx_ring->rx_stats.non_eop_descs++; 1080 1081 return true; 1082 } 1083 1084 /** 1085 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 1086 * @rx_ring: Rx descriptor ring to transact packets on 1087 * @budget: Total limit on number of packets to process 1088 * 1089 * This function provides a "bounce buffer" approach to Rx interrupt 1090 * processing. The advantage to this is that on systems that have 1091 * expensive overhead for IOMMU access this provides a means of avoiding 1092 * it by maintaining the mapping of the page to the system. 1093 * 1094 * Returns amount of work completed 1095 */ 1096 int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) 1097 { 1098 unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0; 1099 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1100 unsigned int offset = rx_ring->rx_offset; 1101 struct ice_tx_ring *xdp_ring = NULL; 1102 unsigned int xdp_res, xdp_xmit = 0; 1103 struct sk_buff *skb = rx_ring->skb; 1104 struct bpf_prog *xdp_prog = NULL; 1105 struct xdp_buff xdp; 1106 bool failure; 1107 1108 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1109 #if (PAGE_SIZE < 8192) 1110 frame_sz = ice_rx_frame_truesize(rx_ring, 0); 1111 #endif 1112 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); 1113 1114 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1115 if (xdp_prog) 1116 xdp_ring = rx_ring->xdp_ring; 1117 1118 /* start the loop to process Rx packets bounded by 'budget' */ 1119 while (likely(total_rx_pkts < (unsigned int)budget)) { 1120 union ice_32b_rx_flex_desc *rx_desc; 1121 struct ice_rx_buf *rx_buf; 1122 unsigned char *hard_start; 1123 unsigned int size; 1124 u16 stat_err_bits; 1125 int rx_buf_pgcnt; 1126 u16 vlan_tag = 0; 1127 u16 rx_ptype; 1128 1129 /* get the Rx desc from Rx ring based on 'next_to_clean' */ 1130 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 1131 1132 /* status_error_len will always be zero for unused descriptors 1133 * because it's cleared in cleanup, and overlaps with hdr_addr 1134 * which is always zero because packet split isn't used, if the 1135 * hardware wrote DD then it will be non-zero 1136 */ 1137 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 1138 if (!ice_test_staterr(rx_desc, stat_err_bits)) 1139 break; 1140 1141 /* This memory barrier is needed to keep us from reading 1142 * any other fields out of the rx_desc until we know the 1143 * DD bit is set. 1144 */ 1145 dma_rmb(); 1146 1147 ice_trace(clean_rx_irq, rx_ring, rx_desc); 1148 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 1149 struct ice_vsi *ctrl_vsi = rx_ring->vsi; 1150 1151 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && 1152 ctrl_vsi->vf_id != ICE_INVAL_VFID) 1153 ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); 1154 ice_put_rx_buf(rx_ring, NULL, 0); 1155 cleaned_count++; 1156 continue; 1157 } 1158 1159 size = le16_to_cpu(rx_desc->wb.pkt_len) & 1160 ICE_RX_FLX_DESC_PKT_LEN_M; 1161 1162 /* retrieve a buffer from the ring */ 1163 rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt); 1164 1165 if (!size) { 1166 xdp.data = NULL; 1167 xdp.data_end = NULL; 1168 xdp.data_hard_start = NULL; 1169 xdp.data_meta = NULL; 1170 goto construct_skb; 1171 } 1172 1173 hard_start = page_address(rx_buf->page) + rx_buf->page_offset - 1174 offset; 1175 xdp_prepare_buff(&xdp, hard_start, offset, size, true); 1176 #if (PAGE_SIZE > 4096) 1177 /* At larger PAGE_SIZE, frame_sz depend on len size */ 1178 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); 1179 #endif 1180 1181 if (!xdp_prog) 1182 goto construct_skb; 1183 1184 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring); 1185 if (!xdp_res) 1186 goto construct_skb; 1187 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1188 xdp_xmit |= xdp_res; 1189 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); 1190 } else { 1191 rx_buf->pagecnt_bias++; 1192 } 1193 total_rx_bytes += size; 1194 total_rx_pkts++; 1195 1196 cleaned_count++; 1197 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 1198 continue; 1199 construct_skb: 1200 if (skb) { 1201 ice_add_rx_frag(rx_ring, rx_buf, skb, size); 1202 } else if (likely(xdp.data)) { 1203 if (ice_ring_uses_build_skb(rx_ring)) 1204 skb = ice_build_skb(rx_ring, rx_buf, &xdp); 1205 else 1206 skb = ice_construct_skb(rx_ring, rx_buf, &xdp); 1207 } 1208 /* exit if we failed to retrieve a buffer */ 1209 if (!skb) { 1210 rx_ring->rx_stats.alloc_buf_failed++; 1211 if (rx_buf) 1212 rx_buf->pagecnt_bias++; 1213 break; 1214 } 1215 1216 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 1217 cleaned_count++; 1218 1219 /* skip if it is NOP desc */ 1220 if (ice_is_non_eop(rx_ring, rx_desc)) 1221 continue; 1222 1223 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 1224 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 1225 dev_kfree_skb_any(skb); 1226 continue; 1227 } 1228 1229 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 1230 if (ice_test_staterr(rx_desc, stat_err_bits)) 1231 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 1232 1233 /* pad the skb if needed, to make a valid ethernet frame */ 1234 if (eth_skb_pad(skb)) { 1235 skb = NULL; 1236 continue; 1237 } 1238 1239 /* probably a little skewed due to removing CRC */ 1240 total_rx_bytes += skb->len; 1241 1242 /* populate checksum, VLAN, and protocol */ 1243 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 1244 ICE_RX_FLEX_DESC_PTYPE_M; 1245 1246 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1247 1248 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); 1249 /* send completed skb up the stack */ 1250 ice_receive_skb(rx_ring, skb, vlan_tag); 1251 skb = NULL; 1252 1253 /* update budget accounting */ 1254 total_rx_pkts++; 1255 } 1256 1257 /* return up to cleaned_count buffers to hardware */ 1258 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1259 1260 if (xdp_prog) 1261 ice_finalize_xdp_rx(xdp_ring, xdp_xmit); 1262 rx_ring->skb = skb; 1263 1264 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); 1265 1266 /* guarantee a trip back through this routine if there was a failure */ 1267 return failure ? budget : (int)total_rx_pkts; 1268 } 1269 1270 static void __ice_update_sample(struct ice_q_vector *q_vector, 1271 struct ice_ring_container *rc, 1272 struct dim_sample *sample, 1273 bool is_tx) 1274 { 1275 u64 packets = 0, bytes = 0; 1276 1277 if (is_tx) { 1278 struct ice_tx_ring *tx_ring; 1279 1280 ice_for_each_tx_ring(tx_ring, *rc) { 1281 packets += tx_ring->stats.pkts; 1282 bytes += tx_ring->stats.bytes; 1283 } 1284 } else { 1285 struct ice_rx_ring *rx_ring; 1286 1287 ice_for_each_rx_ring(rx_ring, *rc) { 1288 packets += rx_ring->stats.pkts; 1289 bytes += rx_ring->stats.bytes; 1290 } 1291 } 1292 1293 dim_update_sample(q_vector->total_events, packets, bytes, sample); 1294 sample->comp_ctr = 0; 1295 1296 /* if dim settings get stale, like when not updated for 1 1297 * second or longer, force it to start again. This addresses the 1298 * frequent case of an idle queue being switched to by the 1299 * scheduler. The 1,000 here means 1,000 milliseconds. 1300 */ 1301 if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) 1302 rc->dim.state = DIM_START_MEASURE; 1303 } 1304 1305 /** 1306 * ice_net_dim - Update net DIM algorithm 1307 * @q_vector: the vector associated with the interrupt 1308 * 1309 * Create a DIM sample and notify net_dim() so that it can possibly decide 1310 * a new ITR value based on incoming packets, bytes, and interrupts. 1311 * 1312 * This function is a no-op if the ring is not configured to dynamic ITR. 1313 */ 1314 static void ice_net_dim(struct ice_q_vector *q_vector) 1315 { 1316 struct ice_ring_container *tx = &q_vector->tx; 1317 struct ice_ring_container *rx = &q_vector->rx; 1318 1319 if (ITR_IS_DYNAMIC(tx)) { 1320 struct dim_sample dim_sample; 1321 1322 __ice_update_sample(q_vector, tx, &dim_sample, true); 1323 net_dim(&tx->dim, dim_sample); 1324 } 1325 1326 if (ITR_IS_DYNAMIC(rx)) { 1327 struct dim_sample dim_sample; 1328 1329 __ice_update_sample(q_vector, rx, &dim_sample, false); 1330 net_dim(&rx->dim, dim_sample); 1331 } 1332 } 1333 1334 /** 1335 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 1336 * @itr_idx: interrupt throttling index 1337 * @itr: interrupt throttling value in usecs 1338 */ 1339 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 1340 { 1341 /* The ITR value is reported in microseconds, and the register value is 1342 * recorded in 2 microsecond units. For this reason we only need to 1343 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 1344 * granularity as a shift instead of division. The mask makes sure the 1345 * ITR value is never odd so we don't accidentally write into the field 1346 * prior to the ITR field. 1347 */ 1348 itr &= ICE_ITR_MASK; 1349 1350 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 1351 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 1352 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 1353 } 1354 1355 /** 1356 * ice_enable_interrupt - re-enable MSI-X interrupt 1357 * @q_vector: the vector associated with the interrupt to enable 1358 * 1359 * If the VSI is down, the interrupt will not be re-enabled. Also, 1360 * when enabling the interrupt always reset the wb_on_itr to false 1361 * and trigger a software interrupt to clean out internal state. 1362 */ 1363 static void ice_enable_interrupt(struct ice_q_vector *q_vector) 1364 { 1365 struct ice_vsi *vsi = q_vector->vsi; 1366 bool wb_en = q_vector->wb_on_itr; 1367 u32 itr_val; 1368 1369 if (test_bit(ICE_DOWN, vsi->state)) 1370 return; 1371 1372 /* trigger an ITR delayed software interrupt when exiting busy poll, to 1373 * make sure to catch any pending cleanups that might have been missed 1374 * due to interrupt state transition. If busy poll or poll isn't 1375 * enabled, then don't update ITR, and just enable the interrupt. 1376 */ 1377 if (!wb_en) { 1378 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 1379 } else { 1380 q_vector->wb_on_itr = false; 1381 1382 /* do two things here with a single write. Set up the third ITR 1383 * index to be used for software interrupt moderation, and then 1384 * trigger a software interrupt with a rate limit of 20K on 1385 * software interrupts, this will help avoid high interrupt 1386 * loads due to frequently polling and exiting polling. 1387 */ 1388 itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K); 1389 itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M | 1390 ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S | 1391 GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; 1392 } 1393 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 1394 } 1395 1396 /** 1397 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 1398 * @q_vector: q_vector to set WB_ON_ITR on 1399 * 1400 * We need to tell hardware to write-back completed descriptors even when 1401 * interrupts are disabled. Descriptors will be written back on cache line 1402 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 1403 * descriptors may not be written back if they don't fill a cache line until 1404 * the next interrupt. 1405 * 1406 * This sets the write-back frequency to whatever was set previously for the 1407 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we 1408 * aren't meddling with the INTENA_M bit. 1409 */ 1410 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 1411 { 1412 struct ice_vsi *vsi = q_vector->vsi; 1413 1414 /* already in wb_on_itr mode no need to change it */ 1415 if (q_vector->wb_on_itr) 1416 return; 1417 1418 /* use previously set ITR values for all of the ITR indices by 1419 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and 1420 * be static in non-adaptive mode (user configured) 1421 */ 1422 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 1423 ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) & 1424 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | 1425 GLINT_DYN_CTL_WB_ON_ITR_M); 1426 1427 q_vector->wb_on_itr = true; 1428 } 1429 1430 /** 1431 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 1432 * @napi: napi struct with our devices info in it 1433 * @budget: amount of work driver is allowed to do this pass, in packets 1434 * 1435 * This function will clean all queues associated with a q_vector. 1436 * 1437 * Returns the amount of work done 1438 */ 1439 int ice_napi_poll(struct napi_struct *napi, int budget) 1440 { 1441 struct ice_q_vector *q_vector = 1442 container_of(napi, struct ice_q_vector, napi); 1443 struct ice_tx_ring *tx_ring; 1444 struct ice_rx_ring *rx_ring; 1445 bool clean_complete = true; 1446 int budget_per_ring; 1447 int work_done = 0; 1448 1449 /* Since the actual Tx work is minimal, we can give the Tx a larger 1450 * budget and be more aggressive about cleaning up the Tx descriptors. 1451 */ 1452 ice_for_each_tx_ring(tx_ring, q_vector->tx) { 1453 bool wd; 1454 1455 if (tx_ring->xsk_pool) 1456 wd = ice_clean_tx_irq_zc(tx_ring, budget); 1457 else if (ice_ring_is_xdp(tx_ring)) 1458 wd = true; 1459 else 1460 wd = ice_clean_tx_irq(tx_ring, budget); 1461 1462 if (!wd) 1463 clean_complete = false; 1464 } 1465 1466 /* Handle case where we are called by netpoll with a budget of 0 */ 1467 if (unlikely(budget <= 0)) 1468 return budget; 1469 1470 /* normally we have 1 Rx ring per q_vector */ 1471 if (unlikely(q_vector->num_ring_rx > 1)) 1472 /* We attempt to distribute budget to each Rx queue fairly, but 1473 * don't allow the budget to go below 1 because that would exit 1474 * polling early. 1475 */ 1476 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 1477 else 1478 /* Max of 1 Rx ring in this q_vector so give it the budget */ 1479 budget_per_ring = budget; 1480 1481 ice_for_each_rx_ring(rx_ring, q_vector->rx) { 1482 int cleaned; 1483 1484 /* A dedicated path for zero-copy allows making a single 1485 * comparison in the irq context instead of many inside the 1486 * ice_clean_rx_irq function and makes the codebase cleaner. 1487 */ 1488 cleaned = rx_ring->xsk_pool ? 1489 ice_clean_rx_irq_zc(rx_ring, budget_per_ring) : 1490 ice_clean_rx_irq(rx_ring, budget_per_ring); 1491 work_done += cleaned; 1492 /* if we clean as many as budgeted, we must not be done */ 1493 if (cleaned >= budget_per_ring) 1494 clean_complete = false; 1495 } 1496 1497 /* If work not completed, return budget and polling will return */ 1498 if (!clean_complete) { 1499 /* Set the writeback on ITR so partial completions of 1500 * cache-lines will still continue even if we're polling. 1501 */ 1502 ice_set_wb_on_itr(q_vector); 1503 return budget; 1504 } 1505 1506 /* Exit the polling mode, but don't re-enable interrupts if stack might 1507 * poll us due to busy-polling 1508 */ 1509 if (likely(napi_complete_done(napi, work_done))) { 1510 ice_net_dim(q_vector); 1511 ice_enable_interrupt(q_vector); 1512 } else { 1513 ice_set_wb_on_itr(q_vector); 1514 } 1515 1516 return min_t(int, work_done, budget - 1); 1517 } 1518 1519 /** 1520 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 1521 * @tx_ring: the ring to be checked 1522 * @size: the size buffer we want to assure is available 1523 * 1524 * Returns -EBUSY if a stop is needed, else 0 1525 */ 1526 static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 1527 { 1528 netif_tx_stop_queue(txring_txq(tx_ring)); 1529 /* Memory barrier before checking head and tail */ 1530 smp_mb(); 1531 1532 /* Check again in a case another CPU has just made room available. */ 1533 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 1534 return -EBUSY; 1535 1536 /* A reprieve! - use start_queue because it doesn't call schedule */ 1537 netif_tx_start_queue(txring_txq(tx_ring)); 1538 ++tx_ring->tx_stats.restart_q; 1539 return 0; 1540 } 1541 1542 /** 1543 * ice_maybe_stop_tx - 1st level check for Tx stop conditions 1544 * @tx_ring: the ring to be checked 1545 * @size: the size buffer we want to assure is available 1546 * 1547 * Returns 0 if stop is not needed 1548 */ 1549 static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 1550 { 1551 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 1552 return 0; 1553 1554 return __ice_maybe_stop_tx(tx_ring, size); 1555 } 1556 1557 /** 1558 * ice_tx_map - Build the Tx descriptor 1559 * @tx_ring: ring to send buffer on 1560 * @first: first buffer info buffer to use 1561 * @off: pointer to struct that holds offload parameters 1562 * 1563 * This function loops over the skb data pointed to by *first 1564 * and gets a physical address for each memory location and programs 1565 * it and the length into the transmit descriptor. 1566 */ 1567 static void 1568 ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, 1569 struct ice_tx_offload_params *off) 1570 { 1571 u64 td_offset, td_tag, td_cmd; 1572 u16 i = tx_ring->next_to_use; 1573 unsigned int data_len, size; 1574 struct ice_tx_desc *tx_desc; 1575 struct ice_tx_buf *tx_buf; 1576 struct sk_buff *skb; 1577 skb_frag_t *frag; 1578 dma_addr_t dma; 1579 bool kick; 1580 1581 td_tag = off->td_l2tag1; 1582 td_cmd = off->td_cmd; 1583 td_offset = off->td_offset; 1584 skb = first->skb; 1585 1586 data_len = skb->data_len; 1587 size = skb_headlen(skb); 1588 1589 tx_desc = ICE_TX_DESC(tx_ring, i); 1590 1591 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1592 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1593 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1594 ICE_TX_FLAGS_VLAN_S; 1595 } 1596 1597 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1598 1599 tx_buf = first; 1600 1601 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1602 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1603 1604 if (dma_mapping_error(tx_ring->dev, dma)) 1605 goto dma_error; 1606 1607 /* record length, and DMA address */ 1608 dma_unmap_len_set(tx_buf, len, size); 1609 dma_unmap_addr_set(tx_buf, dma, dma); 1610 1611 /* align size to end of page */ 1612 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 1613 tx_desc->buf_addr = cpu_to_le64(dma); 1614 1615 /* account for data chunks larger than the hardware 1616 * can handle 1617 */ 1618 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 1619 tx_desc->cmd_type_offset_bsz = 1620 ice_build_ctob(td_cmd, td_offset, max_data, 1621 td_tag); 1622 1623 tx_desc++; 1624 i++; 1625 1626 if (i == tx_ring->count) { 1627 tx_desc = ICE_TX_DESC(tx_ring, 0); 1628 i = 0; 1629 } 1630 1631 dma += max_data; 1632 size -= max_data; 1633 1634 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1635 tx_desc->buf_addr = cpu_to_le64(dma); 1636 } 1637 1638 if (likely(!data_len)) 1639 break; 1640 1641 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 1642 size, td_tag); 1643 1644 tx_desc++; 1645 i++; 1646 1647 if (i == tx_ring->count) { 1648 tx_desc = ICE_TX_DESC(tx_ring, 0); 1649 i = 0; 1650 } 1651 1652 size = skb_frag_size(frag); 1653 data_len -= size; 1654 1655 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1656 DMA_TO_DEVICE); 1657 1658 tx_buf = &tx_ring->tx_buf[i]; 1659 } 1660 1661 /* record SW timestamp if HW timestamp is not available */ 1662 skb_tx_timestamp(first->skb); 1663 1664 i++; 1665 if (i == tx_ring->count) 1666 i = 0; 1667 1668 /* write last descriptor with RS and EOP bits */ 1669 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 1670 tx_desc->cmd_type_offset_bsz = 1671 ice_build_ctob(td_cmd, td_offset, size, td_tag); 1672 1673 /* Force memory writes to complete before letting h/w know there 1674 * are new descriptors to fetch. 1675 * 1676 * We also use this memory barrier to make certain all of the 1677 * status bits have been updated before next_to_watch is written. 1678 */ 1679 wmb(); 1680 1681 /* set next_to_watch value indicating a packet is present */ 1682 first->next_to_watch = tx_desc; 1683 1684 tx_ring->next_to_use = i; 1685 1686 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 1687 1688 /* notify HW of packet */ 1689 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, 1690 netdev_xmit_more()); 1691 if (kick) 1692 /* notify HW of packet */ 1693 writel(i, tx_ring->tail); 1694 1695 return; 1696 1697 dma_error: 1698 /* clear DMA mappings for failed tx_buf map */ 1699 for (;;) { 1700 tx_buf = &tx_ring->tx_buf[i]; 1701 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 1702 if (tx_buf == first) 1703 break; 1704 if (i == 0) 1705 i = tx_ring->count; 1706 i--; 1707 } 1708 1709 tx_ring->next_to_use = i; 1710 } 1711 1712 /** 1713 * ice_tx_csum - Enable Tx checksum offloads 1714 * @first: pointer to the first descriptor 1715 * @off: pointer to struct that holds offload parameters 1716 * 1717 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1718 */ 1719 static 1720 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1721 { 1722 u32 l4_len = 0, l3_len = 0, l2_len = 0; 1723 struct sk_buff *skb = first->skb; 1724 union { 1725 struct iphdr *v4; 1726 struct ipv6hdr *v6; 1727 unsigned char *hdr; 1728 } ip; 1729 union { 1730 struct tcphdr *tcp; 1731 unsigned char *hdr; 1732 } l4; 1733 __be16 frag_off, protocol; 1734 unsigned char *exthdr; 1735 u32 offset, cmd = 0; 1736 u8 l4_proto = 0; 1737 1738 if (skb->ip_summed != CHECKSUM_PARTIAL) 1739 return 0; 1740 1741 ip.hdr = skb_network_header(skb); 1742 l4.hdr = skb_transport_header(skb); 1743 1744 /* compute outer L2 header size */ 1745 l2_len = ip.hdr - skb->data; 1746 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1747 1748 protocol = vlan_get_protocol(skb); 1749 1750 if (protocol == htons(ETH_P_IP)) 1751 first->tx_flags |= ICE_TX_FLAGS_IPV4; 1752 else if (protocol == htons(ETH_P_IPV6)) 1753 first->tx_flags |= ICE_TX_FLAGS_IPV6; 1754 1755 if (skb->encapsulation) { 1756 bool gso_ena = false; 1757 u32 tunnel = 0; 1758 1759 /* define outer network header type */ 1760 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1761 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 1762 ICE_TX_CTX_EIPT_IPV4 : 1763 ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 1764 l4_proto = ip.v4->protocol; 1765 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1766 int ret; 1767 1768 tunnel |= ICE_TX_CTX_EIPT_IPV6; 1769 exthdr = ip.hdr + sizeof(*ip.v6); 1770 l4_proto = ip.v6->nexthdr; 1771 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 1772 &l4_proto, &frag_off); 1773 if (ret < 0) 1774 return -1; 1775 } 1776 1777 /* define outer transport */ 1778 switch (l4_proto) { 1779 case IPPROTO_UDP: 1780 tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 1781 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1782 break; 1783 case IPPROTO_GRE: 1784 tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 1785 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1786 break; 1787 case IPPROTO_IPIP: 1788 case IPPROTO_IPV6: 1789 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1790 l4.hdr = skb_inner_network_header(skb); 1791 break; 1792 default: 1793 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1794 return -1; 1795 1796 skb_checksum_help(skb); 1797 return 0; 1798 } 1799 1800 /* compute outer L3 header size */ 1801 tunnel |= ((l4.hdr - ip.hdr) / 4) << 1802 ICE_TXD_CTX_QW0_EIPLEN_S; 1803 1804 /* switch IP header pointer from outer to inner header */ 1805 ip.hdr = skb_inner_network_header(skb); 1806 1807 /* compute tunnel header size */ 1808 tunnel |= ((ip.hdr - l4.hdr) / 2) << 1809 ICE_TXD_CTX_QW0_NATLEN_S; 1810 1811 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 1812 /* indicate if we need to offload outer UDP header */ 1813 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 1814 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1815 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 1816 1817 /* record tunnel offload values */ 1818 off->cd_tunnel_params |= tunnel; 1819 1820 /* set DTYP=1 to indicate that it's an Tx context descriptor 1821 * in IPsec tunnel mode with Tx offloads in Quad word 1 1822 */ 1823 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 1824 1825 /* switch L4 header pointer from outer to inner */ 1826 l4.hdr = skb_inner_transport_header(skb); 1827 l4_proto = 0; 1828 1829 /* reset type as we transition from outer to inner headers */ 1830 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 1831 if (ip.v4->version == 4) 1832 first->tx_flags |= ICE_TX_FLAGS_IPV4; 1833 if (ip.v6->version == 6) 1834 first->tx_flags |= ICE_TX_FLAGS_IPV6; 1835 } 1836 1837 /* Enable IP checksum offloads */ 1838 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1839 l4_proto = ip.v4->protocol; 1840 /* the stack computes the IP header already, the only time we 1841 * need the hardware to recompute it is in the case of TSO. 1842 */ 1843 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1844 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1845 else 1846 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1847 1848 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1849 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1850 exthdr = ip.hdr + sizeof(*ip.v6); 1851 l4_proto = ip.v6->nexthdr; 1852 if (l4.hdr != exthdr) 1853 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1854 &frag_off); 1855 } else { 1856 return -1; 1857 } 1858 1859 /* compute inner L3 header size */ 1860 l3_len = l4.hdr - ip.hdr; 1861 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1862 1863 /* Enable L4 checksum offloads */ 1864 switch (l4_proto) { 1865 case IPPROTO_TCP: 1866 /* enable checksum offloads */ 1867 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1868 l4_len = l4.tcp->doff; 1869 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1870 break; 1871 case IPPROTO_UDP: 1872 /* enable UDP checksum offload */ 1873 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1874 l4_len = (sizeof(struct udphdr) >> 2); 1875 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1876 break; 1877 case IPPROTO_SCTP: 1878 /* enable SCTP checksum offload */ 1879 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1880 l4_len = sizeof(struct sctphdr) >> 2; 1881 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1882 break; 1883 1884 default: 1885 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1886 return -1; 1887 skb_checksum_help(skb); 1888 return 0; 1889 } 1890 1891 off->td_cmd |= cmd; 1892 off->td_offset |= offset; 1893 return 1; 1894 } 1895 1896 /** 1897 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1898 * @tx_ring: ring to send buffer on 1899 * @first: pointer to struct ice_tx_buf 1900 * 1901 * Checks the skb and set up correspondingly several generic transmit flags 1902 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1903 */ 1904 static void 1905 ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) 1906 { 1907 struct sk_buff *skb = first->skb; 1908 1909 /* nothing left to do, software offloaded VLAN */ 1910 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) 1911 return; 1912 1913 /* currently, we always assume 802.1Q for VLAN insertion as VLAN 1914 * insertion for 802.1AD is not supported 1915 */ 1916 if (skb_vlan_tag_present(skb)) { 1917 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1918 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1919 } 1920 1921 ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 1922 } 1923 1924 /** 1925 * ice_tso - computes mss and TSO length to prepare for TSO 1926 * @first: pointer to struct ice_tx_buf 1927 * @off: pointer to struct that holds offload parameters 1928 * 1929 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1930 */ 1931 static 1932 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1933 { 1934 struct sk_buff *skb = first->skb; 1935 union { 1936 struct iphdr *v4; 1937 struct ipv6hdr *v6; 1938 unsigned char *hdr; 1939 } ip; 1940 union { 1941 struct tcphdr *tcp; 1942 struct udphdr *udp; 1943 unsigned char *hdr; 1944 } l4; 1945 u64 cd_mss, cd_tso_len; 1946 u32 paylen; 1947 u8 l4_start; 1948 int err; 1949 1950 if (skb->ip_summed != CHECKSUM_PARTIAL) 1951 return 0; 1952 1953 if (!skb_is_gso(skb)) 1954 return 0; 1955 1956 err = skb_cow_head(skb, 0); 1957 if (err < 0) 1958 return err; 1959 1960 /* cppcheck-suppress unreadVariable */ 1961 ip.hdr = skb_network_header(skb); 1962 l4.hdr = skb_transport_header(skb); 1963 1964 /* initialize outer IP header fields */ 1965 if (ip.v4->version == 4) { 1966 ip.v4->tot_len = 0; 1967 ip.v4->check = 0; 1968 } else { 1969 ip.v6->payload_len = 0; 1970 } 1971 1972 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 1973 SKB_GSO_GRE_CSUM | 1974 SKB_GSO_IPXIP4 | 1975 SKB_GSO_IPXIP6 | 1976 SKB_GSO_UDP_TUNNEL | 1977 SKB_GSO_UDP_TUNNEL_CSUM)) { 1978 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 1979 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 1980 l4.udp->len = 0; 1981 1982 /* determine offset of outer transport header */ 1983 l4_start = (u8)(l4.hdr - skb->data); 1984 1985 /* remove payload length from outer checksum */ 1986 paylen = skb->len - l4_start; 1987 csum_replace_by_diff(&l4.udp->check, 1988 (__force __wsum)htonl(paylen)); 1989 } 1990 1991 /* reset pointers to inner headers */ 1992 1993 /* cppcheck-suppress unreadVariable */ 1994 ip.hdr = skb_inner_network_header(skb); 1995 l4.hdr = skb_inner_transport_header(skb); 1996 1997 /* initialize inner IP header fields */ 1998 if (ip.v4->version == 4) { 1999 ip.v4->tot_len = 0; 2000 ip.v4->check = 0; 2001 } else { 2002 ip.v6->payload_len = 0; 2003 } 2004 } 2005 2006 /* determine offset of transport header */ 2007 l4_start = (u8)(l4.hdr - skb->data); 2008 2009 /* remove payload length from checksum */ 2010 paylen = skb->len - l4_start; 2011 2012 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 2013 csum_replace_by_diff(&l4.udp->check, 2014 (__force __wsum)htonl(paylen)); 2015 /* compute length of UDP segmentation header */ 2016 off->header_len = (u8)sizeof(l4.udp) + l4_start; 2017 } else { 2018 csum_replace_by_diff(&l4.tcp->check, 2019 (__force __wsum)htonl(paylen)); 2020 /* compute length of TCP segmentation header */ 2021 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 2022 } 2023 2024 /* update gso_segs and bytecount */ 2025 first->gso_segs = skb_shinfo(skb)->gso_segs; 2026 first->bytecount += (first->gso_segs - 1) * off->header_len; 2027 2028 cd_tso_len = skb->len - off->header_len; 2029 cd_mss = skb_shinfo(skb)->gso_size; 2030 2031 /* record cdesc_qw1 with TSO parameters */ 2032 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2033 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 2034 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2035 (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2036 first->tx_flags |= ICE_TX_FLAGS_TSO; 2037 return 1; 2038 } 2039 2040 /** 2041 * ice_txd_use_count - estimate the number of descriptors needed for Tx 2042 * @size: transmit request size in bytes 2043 * 2044 * Due to hardware alignment restrictions (4K alignment), we need to 2045 * assume that we can have no more than 12K of data per descriptor, even 2046 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 2047 * Thus, we need to divide by 12K. But division is slow! Instead, 2048 * we decompose the operation into shifts and one relatively cheap 2049 * multiply operation. 2050 * 2051 * To divide by 12K, we first divide by 4K, then divide by 3: 2052 * To divide by 4K, shift right by 12 bits 2053 * To divide by 3, multiply by 85, then divide by 256 2054 * (Divide by 256 is done by shifting right by 8 bits) 2055 * Finally, we add one to round up. Because 256 isn't an exact multiple of 2056 * 3, we'll underestimate near each multiple of 12K. This is actually more 2057 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 2058 * segment. For our purposes this is accurate out to 1M which is orders of 2059 * magnitude greater than our largest possible GSO size. 2060 * 2061 * This would then be implemented as: 2062 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 2063 * 2064 * Since multiplication and division are commutative, we can reorder 2065 * operations into: 2066 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2067 */ 2068 static unsigned int ice_txd_use_count(unsigned int size) 2069 { 2070 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2071 } 2072 2073 /** 2074 * ice_xmit_desc_count - calculate number of Tx descriptors needed 2075 * @skb: send buffer 2076 * 2077 * Returns number of data descriptors needed for this skb. 2078 */ 2079 static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 2080 { 2081 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 2082 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2083 unsigned int count = 0, size = skb_headlen(skb); 2084 2085 for (;;) { 2086 count += ice_txd_use_count(size); 2087 2088 if (!nr_frags--) 2089 break; 2090 2091 size = skb_frag_size(frag++); 2092 } 2093 2094 return count; 2095 } 2096 2097 /** 2098 * __ice_chk_linearize - Check if there are more than 8 buffers per packet 2099 * @skb: send buffer 2100 * 2101 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 2102 * and so we need to figure out the cases where we need to linearize the skb. 2103 * 2104 * For TSO we need to count the TSO header and segment payload separately. 2105 * As such we need to check cases where we have 7 fragments or more as we 2106 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 2107 * the segment payload in the first descriptor, and another 7 for the 2108 * fragments. 2109 */ 2110 static bool __ice_chk_linearize(struct sk_buff *skb) 2111 { 2112 const skb_frag_t *frag, *stale; 2113 int nr_frags, sum; 2114 2115 /* no need to check if number of frags is less than 7 */ 2116 nr_frags = skb_shinfo(skb)->nr_frags; 2117 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 2118 return false; 2119 2120 /* We need to walk through the list and validate that each group 2121 * of 6 fragments totals at least gso_size. 2122 */ 2123 nr_frags -= ICE_MAX_BUF_TXD - 2; 2124 frag = &skb_shinfo(skb)->frags[0]; 2125 2126 /* Initialize size to the negative value of gso_size minus 1. We 2127 * use this as the worst case scenario in which the frag ahead 2128 * of us only provides one byte which is why we are limited to 6 2129 * descriptors for a single transmit as the header and previous 2130 * fragment are already consuming 2 descriptors. 2131 */ 2132 sum = 1 - skb_shinfo(skb)->gso_size; 2133 2134 /* Add size of frags 0 through 4 to create our initial sum */ 2135 sum += skb_frag_size(frag++); 2136 sum += skb_frag_size(frag++); 2137 sum += skb_frag_size(frag++); 2138 sum += skb_frag_size(frag++); 2139 sum += skb_frag_size(frag++); 2140 2141 /* Walk through fragments adding latest fragment, testing it, and 2142 * then removing stale fragments from the sum. 2143 */ 2144 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 2145 int stale_size = skb_frag_size(stale); 2146 2147 sum += skb_frag_size(frag++); 2148 2149 /* The stale fragment may present us with a smaller 2150 * descriptor than the actual fragment size. To account 2151 * for that we need to remove all the data on the front and 2152 * figure out what the remainder would be in the last 2153 * descriptor associated with the fragment. 2154 */ 2155 if (stale_size > ICE_MAX_DATA_PER_TXD) { 2156 int align_pad = -(skb_frag_off(stale)) & 2157 (ICE_MAX_READ_REQ_SIZE - 1); 2158 2159 sum -= align_pad; 2160 stale_size -= align_pad; 2161 2162 do { 2163 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; 2164 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; 2165 } while (stale_size > ICE_MAX_DATA_PER_TXD); 2166 } 2167 2168 /* if sum is negative we failed to make sufficient progress */ 2169 if (sum < 0) 2170 return true; 2171 2172 if (!nr_frags--) 2173 break; 2174 2175 sum -= stale_size; 2176 } 2177 2178 return false; 2179 } 2180 2181 /** 2182 * ice_chk_linearize - Check if there are more than 8 fragments per packet 2183 * @skb: send buffer 2184 * @count: number of buffers used 2185 * 2186 * Note: Our HW can't scatter-gather more than 8 fragments to build 2187 * a packet on the wire and so we need to figure out the cases where we 2188 * need to linearize the skb. 2189 */ 2190 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 2191 { 2192 /* Both TSO and single send will work if count is less than 8 */ 2193 if (likely(count < ICE_MAX_BUF_TXD)) 2194 return false; 2195 2196 if (skb_is_gso(skb)) 2197 return __ice_chk_linearize(skb); 2198 2199 /* we can support up to 8 data buffers for a single send */ 2200 return count != ICE_MAX_BUF_TXD; 2201 } 2202 2203 /** 2204 * ice_tstamp - set up context descriptor for hardware timestamp 2205 * @tx_ring: pointer to the Tx ring to send buffer on 2206 * @skb: pointer to the SKB we're sending 2207 * @first: Tx buffer 2208 * @off: Tx offload parameters 2209 */ 2210 static void 2211 ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, 2212 struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2213 { 2214 s8 idx; 2215 2216 /* only timestamp the outbound packet if the user has requested it */ 2217 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 2218 return; 2219 2220 if (!tx_ring->ptp_tx) 2221 return; 2222 2223 /* Tx timestamps cannot be sampled when doing TSO */ 2224 if (first->tx_flags & ICE_TX_FLAGS_TSO) 2225 return; 2226 2227 /* Grab an open timestamp slot */ 2228 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); 2229 if (idx < 0) 2230 return; 2231 2232 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2233 (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) | 2234 ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S)); 2235 first->tx_flags |= ICE_TX_FLAGS_TSYN; 2236 } 2237 2238 /** 2239 * ice_xmit_frame_ring - Sends buffer on Tx ring 2240 * @skb: send buffer 2241 * @tx_ring: ring to send buffer on 2242 * 2243 * Returns NETDEV_TX_OK if sent, else an error code 2244 */ 2245 static netdev_tx_t 2246 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) 2247 { 2248 struct ice_tx_offload_params offload = { 0 }; 2249 struct ice_vsi *vsi = tx_ring->vsi; 2250 struct ice_tx_buf *first; 2251 struct ethhdr *eth; 2252 unsigned int count; 2253 int tso, csum; 2254 2255 ice_trace(xmit_frame_ring, tx_ring, skb); 2256 2257 count = ice_xmit_desc_count(skb); 2258 if (ice_chk_linearize(skb, count)) { 2259 if (__skb_linearize(skb)) 2260 goto out_drop; 2261 count = ice_txd_use_count(skb->len); 2262 tx_ring->tx_stats.tx_linearize++; 2263 } 2264 2265 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 2266 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 2267 * + 4 desc gap to avoid the cache line where head is, 2268 * + 1 desc for context descriptor, 2269 * otherwise try next time 2270 */ 2271 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2272 ICE_DESCS_FOR_CTX_DESC)) { 2273 tx_ring->tx_stats.tx_busy++; 2274 return NETDEV_TX_BUSY; 2275 } 2276 2277 /* prefetch for bql data which is infrequently used */ 2278 netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring)); 2279 2280 offload.tx_ring = tx_ring; 2281 2282 /* record the location of the first descriptor for this packet */ 2283 first = &tx_ring->tx_buf[tx_ring->next_to_use]; 2284 first->skb = skb; 2285 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 2286 first->gso_segs = 1; 2287 first->tx_flags = 0; 2288 2289 /* prepare the VLAN tagging flags for Tx */ 2290 ice_tx_prepare_vlan_flags(tx_ring, first); 2291 2292 /* set up TSO offload */ 2293 tso = ice_tso(first, &offload); 2294 if (tso < 0) 2295 goto out_drop; 2296 2297 /* always set up Tx checksum offload */ 2298 csum = ice_tx_csum(first, &offload); 2299 if (csum < 0) 2300 goto out_drop; 2301 2302 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 2303 eth = (struct ethhdr *)skb_mac_header(skb); 2304 if (unlikely((skb->priority == TC_PRIO_CONTROL || 2305 eth->h_proto == htons(ETH_P_LLDP)) && 2306 vsi->type == ICE_VSI_PF && 2307 vsi->port_info->qos_cfg.is_sw_lldp)) 2308 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2309 ICE_TX_CTX_DESC_SWTCH_UPLINK << 2310 ICE_TXD_CTX_QW1_CMD_S); 2311 2312 ice_tstamp(tx_ring, skb, first, &offload); 2313 if (ice_is_switchdev_running(vsi->back)) 2314 ice_eswitch_set_target_vsi(skb, &offload); 2315 2316 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2317 struct ice_tx_ctx_desc *cdesc; 2318 u16 i = tx_ring->next_to_use; 2319 2320 /* grab the next descriptor */ 2321 cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2322 i++; 2323 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2324 2325 /* setup context descriptor */ 2326 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2327 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2328 cdesc->rsvd = cpu_to_le16(0); 2329 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2330 } 2331 2332 ice_tx_map(tx_ring, first, &offload); 2333 return NETDEV_TX_OK; 2334 2335 out_drop: 2336 ice_trace(xmit_frame_ring_drop, tx_ring, skb); 2337 dev_kfree_skb_any(skb); 2338 return NETDEV_TX_OK; 2339 } 2340 2341 /** 2342 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 2343 * @skb: send buffer 2344 * @netdev: network interface device structure 2345 * 2346 * Returns NETDEV_TX_OK if sent, else an error code 2347 */ 2348 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2349 { 2350 struct ice_netdev_priv *np = netdev_priv(netdev); 2351 struct ice_vsi *vsi = np->vsi; 2352 struct ice_tx_ring *tx_ring; 2353 2354 tx_ring = vsi->tx_rings[skb->queue_mapping]; 2355 2356 /* hardware can't handle really short frames, hardware padding works 2357 * beyond this point 2358 */ 2359 if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 2360 return NETDEV_TX_OK; 2361 2362 return ice_xmit_frame_ring(skb, tx_ring); 2363 } 2364 2365 /** 2366 * ice_get_dscp_up - return the UP/TC value for a SKB 2367 * @dcbcfg: DCB config that contains DSCP to UP/TC mapping 2368 * @skb: SKB to query for info to determine UP/TC 2369 * 2370 * This function is to only be called when the PF is in L3 DSCP PFC mode 2371 */ 2372 static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb) 2373 { 2374 u8 dscp = 0; 2375 2376 if (skb->protocol == htons(ETH_P_IP)) 2377 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 2378 else if (skb->protocol == htons(ETH_P_IPV6)) 2379 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 2380 2381 return dcbcfg->dscp_map[dscp]; 2382 } 2383 2384 u16 2385 ice_select_queue(struct net_device *netdev, struct sk_buff *skb, 2386 struct net_device *sb_dev) 2387 { 2388 struct ice_pf *pf = ice_netdev_to_pf(netdev); 2389 struct ice_dcbx_cfg *dcbcfg; 2390 2391 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 2392 if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) 2393 skb->priority = ice_get_dscp_up(dcbcfg, skb); 2394 2395 return netdev_pick_tx(netdev, skb, sb_dev); 2396 } 2397 2398 /** 2399 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 2400 * @tx_ring: tx_ring to clean 2401 */ 2402 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) 2403 { 2404 struct ice_vsi *vsi = tx_ring->vsi; 2405 s16 i = tx_ring->next_to_clean; 2406 int budget = ICE_DFLT_IRQ_WORK; 2407 struct ice_tx_desc *tx_desc; 2408 struct ice_tx_buf *tx_buf; 2409 2410 tx_buf = &tx_ring->tx_buf[i]; 2411 tx_desc = ICE_TX_DESC(tx_ring, i); 2412 i -= tx_ring->count; 2413 2414 do { 2415 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2416 2417 /* if next_to_watch is not set then there is no pending work */ 2418 if (!eop_desc) 2419 break; 2420 2421 /* prevent any other reads prior to eop_desc */ 2422 smp_rmb(); 2423 2424 /* if the descriptor isn't done, no work to do */ 2425 if (!(eop_desc->cmd_type_offset_bsz & 2426 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2427 break; 2428 2429 /* clear next_to_watch to prevent false hangs */ 2430 tx_buf->next_to_watch = NULL; 2431 tx_desc->buf_addr = 0; 2432 tx_desc->cmd_type_offset_bsz = 0; 2433 2434 /* move past filter desc */ 2435 tx_buf++; 2436 tx_desc++; 2437 i++; 2438 if (unlikely(!i)) { 2439 i -= tx_ring->count; 2440 tx_buf = tx_ring->tx_buf; 2441 tx_desc = ICE_TX_DESC(tx_ring, 0); 2442 } 2443 2444 /* unmap the data header */ 2445 if (dma_unmap_len(tx_buf, len)) 2446 dma_unmap_single(tx_ring->dev, 2447 dma_unmap_addr(tx_buf, dma), 2448 dma_unmap_len(tx_buf, len), 2449 DMA_TO_DEVICE); 2450 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 2451 devm_kfree(tx_ring->dev, tx_buf->raw_buf); 2452 2453 /* clear next_to_watch to prevent false hangs */ 2454 tx_buf->raw_buf = NULL; 2455 tx_buf->tx_flags = 0; 2456 tx_buf->next_to_watch = NULL; 2457 dma_unmap_len_set(tx_buf, len, 0); 2458 tx_desc->buf_addr = 0; 2459 tx_desc->cmd_type_offset_bsz = 0; 2460 2461 /* move past eop_desc for start of next FD desc */ 2462 tx_buf++; 2463 tx_desc++; 2464 i++; 2465 if (unlikely(!i)) { 2466 i -= tx_ring->count; 2467 tx_buf = tx_ring->tx_buf; 2468 tx_desc = ICE_TX_DESC(tx_ring, 0); 2469 } 2470 2471 budget--; 2472 } while (likely(budget)); 2473 2474 i += tx_ring->count; 2475 tx_ring->next_to_clean = i; 2476 2477 /* re-enable interrupt if needed */ 2478 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 2479 } 2480