1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* The driver transmit and receive code */ 5 6 #include <linux/prefetch.h> 7 #include <linux/mm.h> 8 #include <linux/bpf_trace.h> 9 #include <net/dsfield.h> 10 #include <net/xdp.h> 11 #include "ice_txrx_lib.h" 12 #include "ice_lib.h" 13 #include "ice.h" 14 #include "ice_trace.h" 15 #include "ice_dcb_lib.h" 16 #include "ice_xsk.h" 17 #include "ice_eswitch.h" 18 19 #define ICE_RX_HDR_SIZE 256 20 21 #define FDIR_DESC_RXDID 0x40 22 #define ICE_FDIR_CLEAN_DELAY 10 23 24 /** 25 * ice_prgm_fdir_fltr - Program a Flow Director filter 26 * @vsi: VSI to send dummy packet 27 * @fdir_desc: flow director descriptor 28 * @raw_packet: allocated buffer for flow director 29 */ 30 int 31 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 32 u8 *raw_packet) 33 { 34 struct ice_tx_buf *tx_buf, *first; 35 struct ice_fltr_desc *f_desc; 36 struct ice_tx_desc *tx_desc; 37 struct ice_tx_ring *tx_ring; 38 struct device *dev; 39 dma_addr_t dma; 40 u32 td_cmd; 41 u16 i; 42 43 /* VSI and Tx ring */ 44 if (!vsi) 45 return -ENOENT; 46 tx_ring = vsi->tx_rings[0]; 47 if (!tx_ring || !tx_ring->desc) 48 return -ENOENT; 49 dev = tx_ring->dev; 50 51 /* we are using two descriptors to add/del a filter and we can wait */ 52 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 53 if (!i) 54 return -EAGAIN; 55 msleep_interruptible(1); 56 } 57 58 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 59 DMA_TO_DEVICE); 60 61 if (dma_mapping_error(dev, dma)) 62 return -EINVAL; 63 64 /* grab the next descriptor */ 65 i = tx_ring->next_to_use; 66 first = &tx_ring->tx_buf[i]; 67 f_desc = ICE_TX_FDIRDESC(tx_ring, i); 68 memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 69 70 i++; 71 i = (i < tx_ring->count) ? i : 0; 72 tx_desc = ICE_TX_DESC(tx_ring, i); 73 tx_buf = &tx_ring->tx_buf[i]; 74 75 i++; 76 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 77 78 memset(tx_buf, 0, sizeof(*tx_buf)); 79 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 80 dma_unmap_addr_set(tx_buf, dma, dma); 81 82 tx_desc->buf_addr = cpu_to_le64(dma); 83 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 84 ICE_TX_DESC_CMD_RE; 85 86 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; 87 tx_buf->raw_buf = raw_packet; 88 89 tx_desc->cmd_type_offset_bsz = 90 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 91 92 /* Force memory write to complete before letting h/w know 93 * there are new descriptors to fetch. 94 */ 95 wmb(); 96 97 /* mark the data descriptor to be watched */ 98 first->next_to_watch = tx_desc; 99 100 writel(tx_ring->next_to_use, tx_ring->tail); 101 102 return 0; 103 } 104 105 /** 106 * ice_unmap_and_free_tx_buf - Release a Tx buffer 107 * @ring: the ring that owns the buffer 108 * @tx_buf: the buffer to free 109 */ 110 static void 111 ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) 112 { 113 if (tx_buf->skb) { 114 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 115 devm_kfree(ring->dev, tx_buf->raw_buf); 116 else if (ice_ring_is_xdp(ring)) 117 page_frag_free(tx_buf->raw_buf); 118 else 119 dev_kfree_skb_any(tx_buf->skb); 120 if (dma_unmap_len(tx_buf, len)) 121 dma_unmap_single(ring->dev, 122 dma_unmap_addr(tx_buf, dma), 123 dma_unmap_len(tx_buf, len), 124 DMA_TO_DEVICE); 125 } else if (dma_unmap_len(tx_buf, len)) { 126 dma_unmap_page(ring->dev, 127 dma_unmap_addr(tx_buf, dma), 128 dma_unmap_len(tx_buf, len), 129 DMA_TO_DEVICE); 130 } 131 132 tx_buf->next_to_watch = NULL; 133 tx_buf->skb = NULL; 134 dma_unmap_len_set(tx_buf, len, 0); 135 /* tx_buf must be completely set up in the transmit path */ 136 } 137 138 static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring) 139 { 140 return netdev_get_tx_queue(ring->netdev, ring->q_index); 141 } 142 143 /** 144 * ice_clean_tx_ring - Free any empty Tx buffers 145 * @tx_ring: ring to be cleaned 146 */ 147 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring) 148 { 149 u32 size; 150 u16 i; 151 152 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 153 ice_xsk_clean_xdp_ring(tx_ring); 154 goto tx_skip_free; 155 } 156 157 /* ring already cleared, nothing to do */ 158 if (!tx_ring->tx_buf) 159 return; 160 161 /* Free all the Tx ring sk_buffs */ 162 for (i = 0; i < tx_ring->count; i++) 163 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 164 165 tx_skip_free: 166 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 167 168 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 169 PAGE_SIZE); 170 /* Zero out the descriptor ring */ 171 memset(tx_ring->desc, 0, size); 172 173 tx_ring->next_to_use = 0; 174 tx_ring->next_to_clean = 0; 175 176 if (!tx_ring->netdev) 177 return; 178 179 /* cleanup Tx queue statistics */ 180 netdev_tx_reset_queue(txring_txq(tx_ring)); 181 } 182 183 /** 184 * ice_free_tx_ring - Free Tx resources per queue 185 * @tx_ring: Tx descriptor ring for a specific queue 186 * 187 * Free all transmit software resources 188 */ 189 void ice_free_tx_ring(struct ice_tx_ring *tx_ring) 190 { 191 u32 size; 192 193 ice_clean_tx_ring(tx_ring); 194 devm_kfree(tx_ring->dev, tx_ring->tx_buf); 195 tx_ring->tx_buf = NULL; 196 197 if (tx_ring->desc) { 198 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 199 PAGE_SIZE); 200 dmam_free_coherent(tx_ring->dev, size, 201 tx_ring->desc, tx_ring->dma); 202 tx_ring->desc = NULL; 203 } 204 } 205 206 /** 207 * ice_clean_tx_irq - Reclaim resources after transmit completes 208 * @tx_ring: Tx ring to clean 209 * @napi_budget: Used to determine if we are in netpoll 210 * 211 * Returns true if there's any budget left (e.g. the clean is finished) 212 */ 213 static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) 214 { 215 unsigned int total_bytes = 0, total_pkts = 0; 216 unsigned int budget = ICE_DFLT_IRQ_WORK; 217 struct ice_vsi *vsi = tx_ring->vsi; 218 s16 i = tx_ring->next_to_clean; 219 struct ice_tx_desc *tx_desc; 220 struct ice_tx_buf *tx_buf; 221 222 tx_buf = &tx_ring->tx_buf[i]; 223 tx_desc = ICE_TX_DESC(tx_ring, i); 224 i -= tx_ring->count; 225 226 prefetch(&vsi->state); 227 228 do { 229 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 230 231 /* if next_to_watch is not set then there is no work pending */ 232 if (!eop_desc) 233 break; 234 235 smp_rmb(); /* prevent any other reads prior to eop_desc */ 236 237 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 238 /* if the descriptor isn't done, no work yet to do */ 239 if (!(eop_desc->cmd_type_offset_bsz & 240 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 241 break; 242 243 /* clear next_to_watch to prevent false hangs */ 244 tx_buf->next_to_watch = NULL; 245 246 /* update the statistics for this packet */ 247 total_bytes += tx_buf->bytecount; 248 total_pkts += tx_buf->gso_segs; 249 250 /* free the skb */ 251 napi_consume_skb(tx_buf->skb, napi_budget); 252 253 /* unmap skb header data */ 254 dma_unmap_single(tx_ring->dev, 255 dma_unmap_addr(tx_buf, dma), 256 dma_unmap_len(tx_buf, len), 257 DMA_TO_DEVICE); 258 259 /* clear tx_buf data */ 260 tx_buf->skb = NULL; 261 dma_unmap_len_set(tx_buf, len, 0); 262 263 /* unmap remaining buffers */ 264 while (tx_desc != eop_desc) { 265 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); 266 tx_buf++; 267 tx_desc++; 268 i++; 269 if (unlikely(!i)) { 270 i -= tx_ring->count; 271 tx_buf = tx_ring->tx_buf; 272 tx_desc = ICE_TX_DESC(tx_ring, 0); 273 } 274 275 /* unmap any remaining paged data */ 276 if (dma_unmap_len(tx_buf, len)) { 277 dma_unmap_page(tx_ring->dev, 278 dma_unmap_addr(tx_buf, dma), 279 dma_unmap_len(tx_buf, len), 280 DMA_TO_DEVICE); 281 dma_unmap_len_set(tx_buf, len, 0); 282 } 283 } 284 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf); 285 286 /* move us one more past the eop_desc for start of next pkt */ 287 tx_buf++; 288 tx_desc++; 289 i++; 290 if (unlikely(!i)) { 291 i -= tx_ring->count; 292 tx_buf = tx_ring->tx_buf; 293 tx_desc = ICE_TX_DESC(tx_ring, 0); 294 } 295 296 prefetch(tx_desc); 297 298 /* update budget accounting */ 299 budget--; 300 } while (likely(budget)); 301 302 i += tx_ring->count; 303 tx_ring->next_to_clean = i; 304 305 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 306 307 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 308 total_bytes); 309 310 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 311 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 312 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 313 /* Make sure that anybody stopping the queue after this 314 * sees the new next_to_clean. 315 */ 316 smp_mb(); 317 if (__netif_subqueue_stopped(tx_ring->netdev, 318 tx_ring->q_index) && 319 !test_bit(ICE_VSI_DOWN, vsi->state)) { 320 netif_wake_subqueue(tx_ring->netdev, 321 tx_ring->q_index); 322 ++tx_ring->tx_stats.restart_q; 323 } 324 } 325 326 return !!budget; 327 } 328 329 /** 330 * ice_setup_tx_ring - Allocate the Tx descriptors 331 * @tx_ring: the Tx ring to set up 332 * 333 * Return 0 on success, negative on error 334 */ 335 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring) 336 { 337 struct device *dev = tx_ring->dev; 338 u32 size; 339 340 if (!dev) 341 return -ENOMEM; 342 343 /* warn if we are about to overwrite the pointer */ 344 WARN_ON(tx_ring->tx_buf); 345 tx_ring->tx_buf = 346 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, 347 GFP_KERNEL); 348 if (!tx_ring->tx_buf) 349 return -ENOMEM; 350 351 /* round up to nearest page */ 352 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 353 PAGE_SIZE); 354 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, 355 GFP_KERNEL); 356 if (!tx_ring->desc) { 357 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 358 size); 359 goto err; 360 } 361 362 tx_ring->next_to_use = 0; 363 tx_ring->next_to_clean = 0; 364 tx_ring->tx_stats.prev_pkt = -1; 365 return 0; 366 367 err: 368 devm_kfree(dev, tx_ring->tx_buf); 369 tx_ring->tx_buf = NULL; 370 return -ENOMEM; 371 } 372 373 /** 374 * ice_clean_rx_ring - Free Rx buffers 375 * @rx_ring: ring to be cleaned 376 */ 377 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) 378 { 379 struct device *dev = rx_ring->dev; 380 u32 size; 381 u16 i; 382 383 /* ring already cleared, nothing to do */ 384 if (!rx_ring->rx_buf) 385 return; 386 387 if (rx_ring->skb) { 388 dev_kfree_skb(rx_ring->skb); 389 rx_ring->skb = NULL; 390 } 391 392 if (rx_ring->xsk_pool) { 393 ice_xsk_clean_rx_ring(rx_ring); 394 goto rx_skip_free; 395 } 396 397 /* Free all the Rx ring sk_buffs */ 398 for (i = 0; i < rx_ring->count; i++) { 399 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 400 401 if (!rx_buf->page) 402 continue; 403 404 /* Invalidate cache lines that may have been written to by 405 * device so that we avoid corrupting memory. 406 */ 407 dma_sync_single_range_for_cpu(dev, rx_buf->dma, 408 rx_buf->page_offset, 409 rx_ring->rx_buf_len, 410 DMA_FROM_DEVICE); 411 412 /* free resources associated with mapping */ 413 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 414 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 415 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 416 417 rx_buf->page = NULL; 418 rx_buf->page_offset = 0; 419 } 420 421 rx_skip_free: 422 if (rx_ring->xsk_pool) 423 memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); 424 else 425 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); 426 427 /* Zero out the descriptor ring */ 428 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 429 PAGE_SIZE); 430 memset(rx_ring->desc, 0, size); 431 432 rx_ring->next_to_alloc = 0; 433 rx_ring->next_to_clean = 0; 434 rx_ring->next_to_use = 0; 435 } 436 437 /** 438 * ice_free_rx_ring - Free Rx resources 439 * @rx_ring: ring to clean the resources from 440 * 441 * Free all receive software resources 442 */ 443 void ice_free_rx_ring(struct ice_rx_ring *rx_ring) 444 { 445 u32 size; 446 447 ice_clean_rx_ring(rx_ring); 448 if (rx_ring->vsi->type == ICE_VSI_PF) 449 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 450 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 451 rx_ring->xdp_prog = NULL; 452 if (rx_ring->xsk_pool) { 453 kfree(rx_ring->xdp_buf); 454 rx_ring->xdp_buf = NULL; 455 } else { 456 kfree(rx_ring->rx_buf); 457 rx_ring->rx_buf = NULL; 458 } 459 460 if (rx_ring->desc) { 461 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 462 PAGE_SIZE); 463 dmam_free_coherent(rx_ring->dev, size, 464 rx_ring->desc, rx_ring->dma); 465 rx_ring->desc = NULL; 466 } 467 } 468 469 /** 470 * ice_setup_rx_ring - Allocate the Rx descriptors 471 * @rx_ring: the Rx ring to set up 472 * 473 * Return 0 on success, negative on error 474 */ 475 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) 476 { 477 struct device *dev = rx_ring->dev; 478 u32 size; 479 480 if (!dev) 481 return -ENOMEM; 482 483 /* warn if we are about to overwrite the pointer */ 484 WARN_ON(rx_ring->rx_buf); 485 rx_ring->rx_buf = 486 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); 487 if (!rx_ring->rx_buf) 488 return -ENOMEM; 489 490 /* round up to nearest page */ 491 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 492 PAGE_SIZE); 493 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, 494 GFP_KERNEL); 495 if (!rx_ring->desc) { 496 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 497 size); 498 goto err; 499 } 500 501 rx_ring->next_to_use = 0; 502 rx_ring->next_to_clean = 0; 503 504 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 505 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 506 507 if (rx_ring->vsi->type == ICE_VSI_PF && 508 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 509 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 510 rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) 511 goto err; 512 return 0; 513 514 err: 515 kfree(rx_ring->rx_buf); 516 rx_ring->rx_buf = NULL; 517 return -ENOMEM; 518 } 519 520 static unsigned int 521 ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size) 522 { 523 unsigned int truesize; 524 525 #if (PAGE_SIZE < 8192) 526 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 527 #else 528 truesize = rx_ring->rx_offset ? 529 SKB_DATA_ALIGN(rx_ring->rx_offset + size) + 530 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 531 SKB_DATA_ALIGN(size); 532 #endif 533 return truesize; 534 } 535 536 /** 537 * ice_run_xdp - Executes an XDP program on initialized xdp_buff 538 * @rx_ring: Rx ring 539 * @xdp: xdp_buff used as input to the XDP program 540 * @xdp_prog: XDP program to run 541 * @xdp_ring: ring to be used for XDP_TX action 542 * 543 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 544 */ 545 static int 546 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 547 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) 548 { 549 int err; 550 u32 act; 551 552 act = bpf_prog_run_xdp(xdp_prog, xdp); 553 switch (act) { 554 case XDP_PASS: 555 return ICE_XDP_PASS; 556 case XDP_TX: 557 if (static_branch_unlikely(&ice_xdp_locking_key)) 558 spin_lock(&xdp_ring->tx_lock); 559 err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring); 560 if (static_branch_unlikely(&ice_xdp_locking_key)) 561 spin_unlock(&xdp_ring->tx_lock); 562 if (err == ICE_XDP_CONSUMED) 563 goto out_failure; 564 return err; 565 case XDP_REDIRECT: 566 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 567 if (err) 568 goto out_failure; 569 return ICE_XDP_REDIR; 570 default: 571 bpf_warn_invalid_xdp_action(act); 572 fallthrough; 573 case XDP_ABORTED: 574 out_failure: 575 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 576 fallthrough; 577 case XDP_DROP: 578 return ICE_XDP_CONSUMED; 579 } 580 } 581 582 /** 583 * ice_xdp_xmit - submit packets to XDP ring for transmission 584 * @dev: netdev 585 * @n: number of XDP frames to be transmitted 586 * @frames: XDP frames to be transmitted 587 * @flags: transmit flags 588 * 589 * Returns number of frames successfully sent. Failed frames 590 * will be free'ed by XDP core. 591 * For error cases, a negative errno code is returned and no-frames 592 * are transmitted (caller must handle freeing frames). 593 */ 594 int 595 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 596 u32 flags) 597 { 598 struct ice_netdev_priv *np = netdev_priv(dev); 599 unsigned int queue_index = smp_processor_id(); 600 struct ice_vsi *vsi = np->vsi; 601 struct ice_tx_ring *xdp_ring; 602 int nxmit = 0, i; 603 604 if (test_bit(ICE_VSI_DOWN, vsi->state)) 605 return -ENETDOWN; 606 607 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) 608 return -ENXIO; 609 610 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 611 return -EINVAL; 612 613 if (static_branch_unlikely(&ice_xdp_locking_key)) { 614 queue_index %= vsi->num_xdp_txq; 615 xdp_ring = vsi->xdp_rings[queue_index]; 616 spin_lock(&xdp_ring->tx_lock); 617 } else { 618 xdp_ring = vsi->xdp_rings[queue_index]; 619 } 620 621 for (i = 0; i < n; i++) { 622 struct xdp_frame *xdpf = frames[i]; 623 int err; 624 625 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 626 if (err != ICE_XDP_TX) 627 break; 628 nxmit++; 629 } 630 631 if (unlikely(flags & XDP_XMIT_FLUSH)) 632 ice_xdp_ring_update_tail(xdp_ring); 633 634 if (static_branch_unlikely(&ice_xdp_locking_key)) 635 spin_unlock(&xdp_ring->tx_lock); 636 637 return nxmit; 638 } 639 640 /** 641 * ice_alloc_mapped_page - recycle or make a new page 642 * @rx_ring: ring to use 643 * @bi: rx_buf struct to modify 644 * 645 * Returns true if the page was successfully allocated or 646 * reused. 647 */ 648 static bool 649 ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) 650 { 651 struct page *page = bi->page; 652 dma_addr_t dma; 653 654 /* since we are recycling buffers we should seldom need to alloc */ 655 if (likely(page)) 656 return true; 657 658 /* alloc new page for storage */ 659 page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 660 if (unlikely(!page)) { 661 rx_ring->rx_stats.alloc_page_failed++; 662 return false; 663 } 664 665 /* map page for use */ 666 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 667 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 668 669 /* if mapping failed free memory back to system since 670 * there isn't much point in holding memory we can't use 671 */ 672 if (dma_mapping_error(rx_ring->dev, dma)) { 673 __free_pages(page, ice_rx_pg_order(rx_ring)); 674 rx_ring->rx_stats.alloc_page_failed++; 675 return false; 676 } 677 678 bi->dma = dma; 679 bi->page = page; 680 bi->page_offset = rx_ring->rx_offset; 681 page_ref_add(page, USHRT_MAX - 1); 682 bi->pagecnt_bias = USHRT_MAX; 683 684 return true; 685 } 686 687 /** 688 * ice_alloc_rx_bufs - Replace used receive buffers 689 * @rx_ring: ring to place buffers on 690 * @cleaned_count: number of buffers to replace 691 * 692 * Returns false if all allocations were successful, true if any fail. Returning 693 * true signals to the caller that we didn't replace cleaned_count buffers and 694 * there is more work to do. 695 * 696 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 697 * buffers. Then bump tail at most one time. Grouping like this lets us avoid 698 * multiple tail writes per call. 699 */ 700 bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count) 701 { 702 union ice_32b_rx_flex_desc *rx_desc; 703 u16 ntu = rx_ring->next_to_use; 704 struct ice_rx_buf *bi; 705 706 /* do nothing if no valid netdev defined */ 707 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || 708 !cleaned_count) 709 return false; 710 711 /* get the Rx descriptor and buffer based on next_to_use */ 712 rx_desc = ICE_RX_DESC(rx_ring, ntu); 713 bi = &rx_ring->rx_buf[ntu]; 714 715 do { 716 /* if we fail here, we have work remaining */ 717 if (!ice_alloc_mapped_page(rx_ring, bi)) 718 break; 719 720 /* sync the buffer for use by the device */ 721 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 722 bi->page_offset, 723 rx_ring->rx_buf_len, 724 DMA_FROM_DEVICE); 725 726 /* Refresh the desc even if buffer_addrs didn't change 727 * because each write-back erases this info. 728 */ 729 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 730 731 rx_desc++; 732 bi++; 733 ntu++; 734 if (unlikely(ntu == rx_ring->count)) { 735 rx_desc = ICE_RX_DESC(rx_ring, 0); 736 bi = rx_ring->rx_buf; 737 ntu = 0; 738 } 739 740 /* clear the status bits for the next_to_use descriptor */ 741 rx_desc->wb.status_error0 = 0; 742 743 cleaned_count--; 744 } while (cleaned_count); 745 746 if (rx_ring->next_to_use != ntu) 747 ice_release_rx_desc(rx_ring, ntu); 748 749 return !!cleaned_count; 750 } 751 752 /** 753 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 754 * @rx_buf: Rx buffer to adjust 755 * @size: Size of adjustment 756 * 757 * Update the offset within page so that Rx buf will be ready to be reused. 758 * For systems with PAGE_SIZE < 8192 this function will flip the page offset 759 * so the second half of page assigned to Rx buffer will be used, otherwise 760 * the offset is moved by "size" bytes 761 */ 762 static void 763 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 764 { 765 #if (PAGE_SIZE < 8192) 766 /* flip page offset to other buffer */ 767 rx_buf->page_offset ^= size; 768 #else 769 /* move offset up to the next cache line */ 770 rx_buf->page_offset += size; 771 #endif 772 } 773 774 /** 775 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 776 * @rx_buf: buffer containing the page 777 * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call 778 * 779 * If page is reusable, we have a green light for calling ice_reuse_rx_page, 780 * which will assign the current buffer to the buffer that next_to_alloc is 781 * pointing to; otherwise, the DMA mapping needs to be destroyed and 782 * page freed 783 */ 784 static bool 785 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) 786 { 787 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 788 struct page *page = rx_buf->page; 789 790 /* avoid re-using remote and pfmemalloc pages */ 791 if (!dev_page_is_reusable(page)) 792 return false; 793 794 #if (PAGE_SIZE < 8192) 795 /* if we are only owner of page we can reuse it */ 796 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) 797 return false; 798 #else 799 #define ICE_LAST_OFFSET \ 800 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 801 if (rx_buf->page_offset > ICE_LAST_OFFSET) 802 return false; 803 #endif /* PAGE_SIZE < 8192) */ 804 805 /* If we have drained the page fragment pool we need to update 806 * the pagecnt_bias and page count so that we fully restock the 807 * number of references the driver holds. 808 */ 809 if (unlikely(pagecnt_bias == 1)) { 810 page_ref_add(page, USHRT_MAX - 1); 811 rx_buf->pagecnt_bias = USHRT_MAX; 812 } 813 814 return true; 815 } 816 817 /** 818 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 819 * @rx_ring: Rx descriptor ring to transact packets on 820 * @rx_buf: buffer containing page to add 821 * @skb: sk_buff to place the data into 822 * @size: packet length from rx_desc 823 * 824 * This function will add the data contained in rx_buf->page to the skb. 825 * It will just attach the page as a frag to the skb. 826 * The function will then update the page offset. 827 */ 828 static void 829 ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 830 struct sk_buff *skb, unsigned int size) 831 { 832 #if (PAGE_SIZE >= 8192) 833 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset); 834 #else 835 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 836 #endif 837 838 if (!size) 839 return; 840 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 841 rx_buf->page_offset, size, truesize); 842 843 /* page is being used so we must update the page offset */ 844 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 845 } 846 847 /** 848 * ice_reuse_rx_page - page flip buffer and store it back on the ring 849 * @rx_ring: Rx descriptor ring to store buffers on 850 * @old_buf: donor buffer to have page reused 851 * 852 * Synchronizes page for reuse by the adapter 853 */ 854 static void 855 ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) 856 { 857 u16 nta = rx_ring->next_to_alloc; 858 struct ice_rx_buf *new_buf; 859 860 new_buf = &rx_ring->rx_buf[nta]; 861 862 /* update, and store next to alloc */ 863 nta++; 864 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 865 866 /* Transfer page from old buffer to new buffer. 867 * Move each member individually to avoid possible store 868 * forwarding stalls and unnecessary copy of skb. 869 */ 870 new_buf->dma = old_buf->dma; 871 new_buf->page = old_buf->page; 872 new_buf->page_offset = old_buf->page_offset; 873 new_buf->pagecnt_bias = old_buf->pagecnt_bias; 874 } 875 876 /** 877 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 878 * @rx_ring: Rx descriptor ring to transact packets on 879 * @size: size of buffer to add to skb 880 * @rx_buf_pgcnt: rx_buf page refcount 881 * 882 * This function will pull an Rx buffer from the ring and synchronize it 883 * for use by the CPU. 884 */ 885 static struct ice_rx_buf * 886 ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, 887 int *rx_buf_pgcnt) 888 { 889 struct ice_rx_buf *rx_buf; 890 891 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 892 *rx_buf_pgcnt = 893 #if (PAGE_SIZE < 8192) 894 page_count(rx_buf->page); 895 #else 896 0; 897 #endif 898 prefetchw(rx_buf->page); 899 900 if (!size) 901 return rx_buf; 902 /* we are reusing so sync this buffer for CPU use */ 903 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 904 rx_buf->page_offset, size, 905 DMA_FROM_DEVICE); 906 907 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 908 rx_buf->pagecnt_bias--; 909 910 return rx_buf; 911 } 912 913 /** 914 * ice_build_skb - Build skb around an existing buffer 915 * @rx_ring: Rx descriptor ring to transact packets on 916 * @rx_buf: Rx buffer to pull data from 917 * @xdp: xdp_buff pointing to the data 918 * 919 * This function builds an skb around an existing Rx buffer, taking care 920 * to set up the skb correctly and avoid any memcpy overhead. 921 */ 922 static struct sk_buff * 923 ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 924 struct xdp_buff *xdp) 925 { 926 u8 metasize = xdp->data - xdp->data_meta; 927 #if (PAGE_SIZE < 8192) 928 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 929 #else 930 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 931 SKB_DATA_ALIGN(xdp->data_end - 932 xdp->data_hard_start); 933 #endif 934 struct sk_buff *skb; 935 936 /* Prefetch first cache line of first page. If xdp->data_meta 937 * is unused, this points exactly as xdp->data, otherwise we 938 * likely have a consumer accessing first few bytes of meta 939 * data, and then actual data. 940 */ 941 net_prefetch(xdp->data_meta); 942 /* build an skb around the page buffer */ 943 skb = build_skb(xdp->data_hard_start, truesize); 944 if (unlikely(!skb)) 945 return NULL; 946 947 /* must to record Rx queue, otherwise OS features such as 948 * symmetric queue won't work 949 */ 950 skb_record_rx_queue(skb, rx_ring->q_index); 951 952 /* update pointers within the skb to store the data */ 953 skb_reserve(skb, xdp->data - xdp->data_hard_start); 954 __skb_put(skb, xdp->data_end - xdp->data); 955 if (metasize) 956 skb_metadata_set(skb, metasize); 957 958 /* buffer is used by skb, update page_offset */ 959 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 960 961 return skb; 962 } 963 964 /** 965 * ice_construct_skb - Allocate skb and populate it 966 * @rx_ring: Rx descriptor ring to transact packets on 967 * @rx_buf: Rx buffer to pull data from 968 * @xdp: xdp_buff pointing to the data 969 * 970 * This function allocates an skb. It then populates it with the page 971 * data from the current receive descriptor, taking care to set up the 972 * skb correctly. 973 */ 974 static struct sk_buff * 975 ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 976 struct xdp_buff *xdp) 977 { 978 unsigned int size = xdp->data_end - xdp->data; 979 unsigned int headlen; 980 struct sk_buff *skb; 981 982 /* prefetch first cache line of first page */ 983 net_prefetch(xdp->data); 984 985 /* allocate a skb to store the frags */ 986 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 987 GFP_ATOMIC | __GFP_NOWARN); 988 if (unlikely(!skb)) 989 return NULL; 990 991 skb_record_rx_queue(skb, rx_ring->q_index); 992 /* Determine available headroom for copy */ 993 headlen = size; 994 if (headlen > ICE_RX_HDR_SIZE) 995 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 996 997 /* align pull length to size of long to optimize memcpy performance */ 998 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 999 sizeof(long))); 1000 1001 /* if we exhaust the linear part then add what is left as a frag */ 1002 size -= headlen; 1003 if (size) { 1004 #if (PAGE_SIZE >= 8192) 1005 unsigned int truesize = SKB_DATA_ALIGN(size); 1006 #else 1007 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 1008 #endif 1009 skb_add_rx_frag(skb, 0, rx_buf->page, 1010 rx_buf->page_offset + headlen, size, truesize); 1011 /* buffer is used by skb, update page_offset */ 1012 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 1013 } else { 1014 /* buffer is unused, reset bias back to rx_buf; data was copied 1015 * onto skb's linear part so there's no need for adjusting 1016 * page offset and we can reuse this buffer as-is 1017 */ 1018 rx_buf->pagecnt_bias++; 1019 } 1020 1021 return skb; 1022 } 1023 1024 /** 1025 * ice_put_rx_buf - Clean up used buffer and either recycle or free 1026 * @rx_ring: Rx descriptor ring to transact packets on 1027 * @rx_buf: Rx buffer to pull data from 1028 * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect() 1029 * 1030 * This function will update next_to_clean and then clean up the contents 1031 * of the rx_buf. It will either recycle the buffer or unmap it and free 1032 * the associated resources. 1033 */ 1034 static void 1035 ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 1036 int rx_buf_pgcnt) 1037 { 1038 u16 ntc = rx_ring->next_to_clean + 1; 1039 1040 /* fetch, update, and store next to clean */ 1041 ntc = (ntc < rx_ring->count) ? ntc : 0; 1042 rx_ring->next_to_clean = ntc; 1043 1044 if (!rx_buf) 1045 return; 1046 1047 if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) { 1048 /* hand second half of page back to the ring */ 1049 ice_reuse_rx_page(rx_ring, rx_buf); 1050 } else { 1051 /* we are not reusing the buffer so unmap it */ 1052 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 1053 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 1054 ICE_RX_DMA_ATTR); 1055 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 1056 } 1057 1058 /* clear contents of buffer_info */ 1059 rx_buf->page = NULL; 1060 } 1061 1062 /** 1063 * ice_is_non_eop - process handling of non-EOP buffers 1064 * @rx_ring: Rx ring being processed 1065 * @rx_desc: Rx descriptor for current buffer 1066 * 1067 * If the buffer is an EOP buffer, this function exits returning false, 1068 * otherwise return true indicating that this is in fact a non-EOP buffer. 1069 */ 1070 static bool 1071 ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) 1072 { 1073 /* if we are the last buffer then there is nothing else to do */ 1074 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 1075 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 1076 return false; 1077 1078 rx_ring->rx_stats.non_eop_descs++; 1079 1080 return true; 1081 } 1082 1083 /** 1084 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 1085 * @rx_ring: Rx descriptor ring to transact packets on 1086 * @budget: Total limit on number of packets to process 1087 * 1088 * This function provides a "bounce buffer" approach to Rx interrupt 1089 * processing. The advantage to this is that on systems that have 1090 * expensive overhead for IOMMU access this provides a means of avoiding 1091 * it by maintaining the mapping of the page to the system. 1092 * 1093 * Returns amount of work completed 1094 */ 1095 int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) 1096 { 1097 unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0; 1098 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1099 unsigned int offset = rx_ring->rx_offset; 1100 struct ice_tx_ring *xdp_ring = NULL; 1101 unsigned int xdp_res, xdp_xmit = 0; 1102 struct sk_buff *skb = rx_ring->skb; 1103 struct bpf_prog *xdp_prog = NULL; 1104 struct xdp_buff xdp; 1105 bool failure; 1106 1107 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1108 #if (PAGE_SIZE < 8192) 1109 frame_sz = ice_rx_frame_truesize(rx_ring, 0); 1110 #endif 1111 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); 1112 1113 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1114 if (xdp_prog) 1115 xdp_ring = rx_ring->xdp_ring; 1116 1117 /* start the loop to process Rx packets bounded by 'budget' */ 1118 while (likely(total_rx_pkts < (unsigned int)budget)) { 1119 union ice_32b_rx_flex_desc *rx_desc; 1120 struct ice_rx_buf *rx_buf; 1121 unsigned char *hard_start; 1122 unsigned int size; 1123 u16 stat_err_bits; 1124 int rx_buf_pgcnt; 1125 u16 vlan_tag = 0; 1126 u16 rx_ptype; 1127 1128 /* get the Rx desc from Rx ring based on 'next_to_clean' */ 1129 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 1130 1131 /* status_error_len will always be zero for unused descriptors 1132 * because it's cleared in cleanup, and overlaps with hdr_addr 1133 * which is always zero because packet split isn't used, if the 1134 * hardware wrote DD then it will be non-zero 1135 */ 1136 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 1137 if (!ice_test_staterr(rx_desc, stat_err_bits)) 1138 break; 1139 1140 /* This memory barrier is needed to keep us from reading 1141 * any other fields out of the rx_desc until we know the 1142 * DD bit is set. 1143 */ 1144 dma_rmb(); 1145 1146 ice_trace(clean_rx_irq, rx_ring, rx_desc); 1147 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 1148 struct ice_vsi *ctrl_vsi = rx_ring->vsi; 1149 1150 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && 1151 ctrl_vsi->vf_id != ICE_INVAL_VFID) 1152 ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); 1153 ice_put_rx_buf(rx_ring, NULL, 0); 1154 cleaned_count++; 1155 continue; 1156 } 1157 1158 size = le16_to_cpu(rx_desc->wb.pkt_len) & 1159 ICE_RX_FLX_DESC_PKT_LEN_M; 1160 1161 /* retrieve a buffer from the ring */ 1162 rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt); 1163 1164 if (!size) { 1165 xdp.data = NULL; 1166 xdp.data_end = NULL; 1167 xdp.data_hard_start = NULL; 1168 xdp.data_meta = NULL; 1169 goto construct_skb; 1170 } 1171 1172 hard_start = page_address(rx_buf->page) + rx_buf->page_offset - 1173 offset; 1174 xdp_prepare_buff(&xdp, hard_start, offset, size, true); 1175 #if (PAGE_SIZE > 4096) 1176 /* At larger PAGE_SIZE, frame_sz depend on len size */ 1177 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); 1178 #endif 1179 1180 if (!xdp_prog) 1181 goto construct_skb; 1182 1183 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring); 1184 if (!xdp_res) 1185 goto construct_skb; 1186 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1187 xdp_xmit |= xdp_res; 1188 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); 1189 } else { 1190 rx_buf->pagecnt_bias++; 1191 } 1192 total_rx_bytes += size; 1193 total_rx_pkts++; 1194 1195 cleaned_count++; 1196 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 1197 continue; 1198 construct_skb: 1199 if (skb) { 1200 ice_add_rx_frag(rx_ring, rx_buf, skb, size); 1201 } else if (likely(xdp.data)) { 1202 if (ice_ring_uses_build_skb(rx_ring)) 1203 skb = ice_build_skb(rx_ring, rx_buf, &xdp); 1204 else 1205 skb = ice_construct_skb(rx_ring, rx_buf, &xdp); 1206 } 1207 /* exit if we failed to retrieve a buffer */ 1208 if (!skb) { 1209 rx_ring->rx_stats.alloc_buf_failed++; 1210 if (rx_buf) 1211 rx_buf->pagecnt_bias++; 1212 break; 1213 } 1214 1215 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 1216 cleaned_count++; 1217 1218 /* skip if it is NOP desc */ 1219 if (ice_is_non_eop(rx_ring, rx_desc)) 1220 continue; 1221 1222 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 1223 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 1224 dev_kfree_skb_any(skb); 1225 continue; 1226 } 1227 1228 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 1229 if (ice_test_staterr(rx_desc, stat_err_bits)) 1230 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 1231 1232 /* pad the skb if needed, to make a valid ethernet frame */ 1233 if (eth_skb_pad(skb)) { 1234 skb = NULL; 1235 continue; 1236 } 1237 1238 /* probably a little skewed due to removing CRC */ 1239 total_rx_bytes += skb->len; 1240 1241 /* populate checksum, VLAN, and protocol */ 1242 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 1243 ICE_RX_FLEX_DESC_PTYPE_M; 1244 1245 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1246 1247 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); 1248 /* send completed skb up the stack */ 1249 ice_receive_skb(rx_ring, skb, vlan_tag); 1250 skb = NULL; 1251 1252 /* update budget accounting */ 1253 total_rx_pkts++; 1254 } 1255 1256 /* return up to cleaned_count buffers to hardware */ 1257 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1258 1259 if (xdp_prog) 1260 ice_finalize_xdp_rx(xdp_ring, xdp_xmit); 1261 rx_ring->skb = skb; 1262 1263 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); 1264 1265 /* guarantee a trip back through this routine if there was a failure */ 1266 return failure ? budget : (int)total_rx_pkts; 1267 } 1268 1269 static void __ice_update_sample(struct ice_q_vector *q_vector, 1270 struct ice_ring_container *rc, 1271 struct dim_sample *sample, 1272 bool is_tx) 1273 { 1274 u64 packets = 0, bytes = 0; 1275 1276 if (is_tx) { 1277 struct ice_tx_ring *tx_ring; 1278 1279 ice_for_each_tx_ring(tx_ring, *rc) { 1280 packets += tx_ring->stats.pkts; 1281 bytes += tx_ring->stats.bytes; 1282 } 1283 } else { 1284 struct ice_rx_ring *rx_ring; 1285 1286 ice_for_each_rx_ring(rx_ring, *rc) { 1287 packets += rx_ring->stats.pkts; 1288 bytes += rx_ring->stats.bytes; 1289 } 1290 } 1291 1292 dim_update_sample(q_vector->total_events, packets, bytes, sample); 1293 sample->comp_ctr = 0; 1294 1295 /* if dim settings get stale, like when not updated for 1 1296 * second or longer, force it to start again. This addresses the 1297 * frequent case of an idle queue being switched to by the 1298 * scheduler. The 1,000 here means 1,000 milliseconds. 1299 */ 1300 if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) 1301 rc->dim.state = DIM_START_MEASURE; 1302 } 1303 1304 /** 1305 * ice_net_dim - Update net DIM algorithm 1306 * @q_vector: the vector associated with the interrupt 1307 * 1308 * Create a DIM sample and notify net_dim() so that it can possibly decide 1309 * a new ITR value based on incoming packets, bytes, and interrupts. 1310 * 1311 * This function is a no-op if the ring is not configured to dynamic ITR. 1312 */ 1313 static void ice_net_dim(struct ice_q_vector *q_vector) 1314 { 1315 struct ice_ring_container *tx = &q_vector->tx; 1316 struct ice_ring_container *rx = &q_vector->rx; 1317 1318 if (ITR_IS_DYNAMIC(tx)) { 1319 struct dim_sample dim_sample; 1320 1321 __ice_update_sample(q_vector, tx, &dim_sample, true); 1322 net_dim(&tx->dim, dim_sample); 1323 } 1324 1325 if (ITR_IS_DYNAMIC(rx)) { 1326 struct dim_sample dim_sample; 1327 1328 __ice_update_sample(q_vector, rx, &dim_sample, false); 1329 net_dim(&rx->dim, dim_sample); 1330 } 1331 } 1332 1333 /** 1334 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 1335 * @itr_idx: interrupt throttling index 1336 * @itr: interrupt throttling value in usecs 1337 */ 1338 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 1339 { 1340 /* The ITR value is reported in microseconds, and the register value is 1341 * recorded in 2 microsecond units. For this reason we only need to 1342 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 1343 * granularity as a shift instead of division. The mask makes sure the 1344 * ITR value is never odd so we don't accidentally write into the field 1345 * prior to the ITR field. 1346 */ 1347 itr &= ICE_ITR_MASK; 1348 1349 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 1350 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 1351 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 1352 } 1353 1354 /** 1355 * ice_enable_interrupt - re-enable MSI-X interrupt 1356 * @q_vector: the vector associated with the interrupt to enable 1357 * 1358 * If the VSI is down, the interrupt will not be re-enabled. Also, 1359 * when enabling the interrupt always reset the wb_on_itr to false 1360 * and trigger a software interrupt to clean out internal state. 1361 */ 1362 static void ice_enable_interrupt(struct ice_q_vector *q_vector) 1363 { 1364 struct ice_vsi *vsi = q_vector->vsi; 1365 bool wb_en = q_vector->wb_on_itr; 1366 u32 itr_val; 1367 1368 if (test_bit(ICE_DOWN, vsi->state)) 1369 return; 1370 1371 /* trigger an ITR delayed software interrupt when exiting busy poll, to 1372 * make sure to catch any pending cleanups that might have been missed 1373 * due to interrupt state transition. If busy poll or poll isn't 1374 * enabled, then don't update ITR, and just enable the interrupt. 1375 */ 1376 if (!wb_en) { 1377 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 1378 } else { 1379 q_vector->wb_on_itr = false; 1380 1381 /* do two things here with a single write. Set up the third ITR 1382 * index to be used for software interrupt moderation, and then 1383 * trigger a software interrupt with a rate limit of 20K on 1384 * software interrupts, this will help avoid high interrupt 1385 * loads due to frequently polling and exiting polling. 1386 */ 1387 itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K); 1388 itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M | 1389 ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S | 1390 GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; 1391 } 1392 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 1393 } 1394 1395 /** 1396 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 1397 * @q_vector: q_vector to set WB_ON_ITR on 1398 * 1399 * We need to tell hardware to write-back completed descriptors even when 1400 * interrupts are disabled. Descriptors will be written back on cache line 1401 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 1402 * descriptors may not be written back if they don't fill a cache line until 1403 * the next interrupt. 1404 * 1405 * This sets the write-back frequency to whatever was set previously for the 1406 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we 1407 * aren't meddling with the INTENA_M bit. 1408 */ 1409 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 1410 { 1411 struct ice_vsi *vsi = q_vector->vsi; 1412 1413 /* already in wb_on_itr mode no need to change it */ 1414 if (q_vector->wb_on_itr) 1415 return; 1416 1417 /* use previously set ITR values for all of the ITR indices by 1418 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and 1419 * be static in non-adaptive mode (user configured) 1420 */ 1421 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 1422 ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) & 1423 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | 1424 GLINT_DYN_CTL_WB_ON_ITR_M); 1425 1426 q_vector->wb_on_itr = true; 1427 } 1428 1429 /** 1430 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 1431 * @napi: napi struct with our devices info in it 1432 * @budget: amount of work driver is allowed to do this pass, in packets 1433 * 1434 * This function will clean all queues associated with a q_vector. 1435 * 1436 * Returns the amount of work done 1437 */ 1438 int ice_napi_poll(struct napi_struct *napi, int budget) 1439 { 1440 struct ice_q_vector *q_vector = 1441 container_of(napi, struct ice_q_vector, napi); 1442 struct ice_tx_ring *tx_ring; 1443 struct ice_rx_ring *rx_ring; 1444 bool clean_complete = true; 1445 int budget_per_ring; 1446 int work_done = 0; 1447 1448 /* Since the actual Tx work is minimal, we can give the Tx a larger 1449 * budget and be more aggressive about cleaning up the Tx descriptors. 1450 */ 1451 ice_for_each_tx_ring(tx_ring, q_vector->tx) { 1452 bool wd; 1453 1454 if (tx_ring->xsk_pool) 1455 wd = ice_clean_tx_irq_zc(tx_ring, budget); 1456 else if (ice_ring_is_xdp(tx_ring)) 1457 wd = true; 1458 else 1459 wd = ice_clean_tx_irq(tx_ring, budget); 1460 1461 if (!wd) 1462 clean_complete = false; 1463 } 1464 1465 /* Handle case where we are called by netpoll with a budget of 0 */ 1466 if (unlikely(budget <= 0)) 1467 return budget; 1468 1469 /* normally we have 1 Rx ring per q_vector */ 1470 if (unlikely(q_vector->num_ring_rx > 1)) 1471 /* We attempt to distribute budget to each Rx queue fairly, but 1472 * don't allow the budget to go below 1 because that would exit 1473 * polling early. 1474 */ 1475 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 1476 else 1477 /* Max of 1 Rx ring in this q_vector so give it the budget */ 1478 budget_per_ring = budget; 1479 1480 ice_for_each_rx_ring(rx_ring, q_vector->rx) { 1481 int cleaned; 1482 1483 /* A dedicated path for zero-copy allows making a single 1484 * comparison in the irq context instead of many inside the 1485 * ice_clean_rx_irq function and makes the codebase cleaner. 1486 */ 1487 cleaned = rx_ring->xsk_pool ? 1488 ice_clean_rx_irq_zc(rx_ring, budget_per_ring) : 1489 ice_clean_rx_irq(rx_ring, budget_per_ring); 1490 work_done += cleaned; 1491 /* if we clean as many as budgeted, we must not be done */ 1492 if (cleaned >= budget_per_ring) 1493 clean_complete = false; 1494 } 1495 1496 /* If work not completed, return budget and polling will return */ 1497 if (!clean_complete) { 1498 /* Set the writeback on ITR so partial completions of 1499 * cache-lines will still continue even if we're polling. 1500 */ 1501 ice_set_wb_on_itr(q_vector); 1502 return budget; 1503 } 1504 1505 /* Exit the polling mode, but don't re-enable interrupts if stack might 1506 * poll us due to busy-polling 1507 */ 1508 if (likely(napi_complete_done(napi, work_done))) { 1509 ice_net_dim(q_vector); 1510 ice_enable_interrupt(q_vector); 1511 } else { 1512 ice_set_wb_on_itr(q_vector); 1513 } 1514 1515 return min_t(int, work_done, budget - 1); 1516 } 1517 1518 /** 1519 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 1520 * @tx_ring: the ring to be checked 1521 * @size: the size buffer we want to assure is available 1522 * 1523 * Returns -EBUSY if a stop is needed, else 0 1524 */ 1525 static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 1526 { 1527 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 1528 /* Memory barrier before checking head and tail */ 1529 smp_mb(); 1530 1531 /* Check again in a case another CPU has just made room available. */ 1532 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 1533 return -EBUSY; 1534 1535 /* A reprieve! - use start_subqueue because it doesn't call schedule */ 1536 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 1537 ++tx_ring->tx_stats.restart_q; 1538 return 0; 1539 } 1540 1541 /** 1542 * ice_maybe_stop_tx - 1st level check for Tx stop conditions 1543 * @tx_ring: the ring to be checked 1544 * @size: the size buffer we want to assure is available 1545 * 1546 * Returns 0 if stop is not needed 1547 */ 1548 static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 1549 { 1550 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 1551 return 0; 1552 1553 return __ice_maybe_stop_tx(tx_ring, size); 1554 } 1555 1556 /** 1557 * ice_tx_map - Build the Tx descriptor 1558 * @tx_ring: ring to send buffer on 1559 * @first: first buffer info buffer to use 1560 * @off: pointer to struct that holds offload parameters 1561 * 1562 * This function loops over the skb data pointed to by *first 1563 * and gets a physical address for each memory location and programs 1564 * it and the length into the transmit descriptor. 1565 */ 1566 static void 1567 ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, 1568 struct ice_tx_offload_params *off) 1569 { 1570 u64 td_offset, td_tag, td_cmd; 1571 u16 i = tx_ring->next_to_use; 1572 unsigned int data_len, size; 1573 struct ice_tx_desc *tx_desc; 1574 struct ice_tx_buf *tx_buf; 1575 struct sk_buff *skb; 1576 skb_frag_t *frag; 1577 dma_addr_t dma; 1578 1579 td_tag = off->td_l2tag1; 1580 td_cmd = off->td_cmd; 1581 td_offset = off->td_offset; 1582 skb = first->skb; 1583 1584 data_len = skb->data_len; 1585 size = skb_headlen(skb); 1586 1587 tx_desc = ICE_TX_DESC(tx_ring, i); 1588 1589 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1590 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1591 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1592 ICE_TX_FLAGS_VLAN_S; 1593 } 1594 1595 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1596 1597 tx_buf = first; 1598 1599 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1600 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1601 1602 if (dma_mapping_error(tx_ring->dev, dma)) 1603 goto dma_error; 1604 1605 /* record length, and DMA address */ 1606 dma_unmap_len_set(tx_buf, len, size); 1607 dma_unmap_addr_set(tx_buf, dma, dma); 1608 1609 /* align size to end of page */ 1610 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 1611 tx_desc->buf_addr = cpu_to_le64(dma); 1612 1613 /* account for data chunks larger than the hardware 1614 * can handle 1615 */ 1616 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 1617 tx_desc->cmd_type_offset_bsz = 1618 ice_build_ctob(td_cmd, td_offset, max_data, 1619 td_tag); 1620 1621 tx_desc++; 1622 i++; 1623 1624 if (i == tx_ring->count) { 1625 tx_desc = ICE_TX_DESC(tx_ring, 0); 1626 i = 0; 1627 } 1628 1629 dma += max_data; 1630 size -= max_data; 1631 1632 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1633 tx_desc->buf_addr = cpu_to_le64(dma); 1634 } 1635 1636 if (likely(!data_len)) 1637 break; 1638 1639 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 1640 size, td_tag); 1641 1642 tx_desc++; 1643 i++; 1644 1645 if (i == tx_ring->count) { 1646 tx_desc = ICE_TX_DESC(tx_ring, 0); 1647 i = 0; 1648 } 1649 1650 size = skb_frag_size(frag); 1651 data_len -= size; 1652 1653 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1654 DMA_TO_DEVICE); 1655 1656 tx_buf = &tx_ring->tx_buf[i]; 1657 } 1658 1659 /* record bytecount for BQL */ 1660 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1661 1662 /* record SW timestamp if HW timestamp is not available */ 1663 skb_tx_timestamp(first->skb); 1664 1665 i++; 1666 if (i == tx_ring->count) 1667 i = 0; 1668 1669 /* write last descriptor with RS and EOP bits */ 1670 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 1671 tx_desc->cmd_type_offset_bsz = 1672 ice_build_ctob(td_cmd, td_offset, size, td_tag); 1673 1674 /* Force memory writes to complete before letting h/w know there 1675 * are new descriptors to fetch. 1676 * 1677 * We also use this memory barrier to make certain all of the 1678 * status bits have been updated before next_to_watch is written. 1679 */ 1680 wmb(); 1681 1682 /* set next_to_watch value indicating a packet is present */ 1683 first->next_to_watch = tx_desc; 1684 1685 tx_ring->next_to_use = i; 1686 1687 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 1688 1689 /* notify HW of packet */ 1690 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) 1691 writel(i, tx_ring->tail); 1692 1693 return; 1694 1695 dma_error: 1696 /* clear DMA mappings for failed tx_buf map */ 1697 for (;;) { 1698 tx_buf = &tx_ring->tx_buf[i]; 1699 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 1700 if (tx_buf == first) 1701 break; 1702 if (i == 0) 1703 i = tx_ring->count; 1704 i--; 1705 } 1706 1707 tx_ring->next_to_use = i; 1708 } 1709 1710 /** 1711 * ice_tx_csum - Enable Tx checksum offloads 1712 * @first: pointer to the first descriptor 1713 * @off: pointer to struct that holds offload parameters 1714 * 1715 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1716 */ 1717 static 1718 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1719 { 1720 u32 l4_len = 0, l3_len = 0, l2_len = 0; 1721 struct sk_buff *skb = first->skb; 1722 union { 1723 struct iphdr *v4; 1724 struct ipv6hdr *v6; 1725 unsigned char *hdr; 1726 } ip; 1727 union { 1728 struct tcphdr *tcp; 1729 unsigned char *hdr; 1730 } l4; 1731 __be16 frag_off, protocol; 1732 unsigned char *exthdr; 1733 u32 offset, cmd = 0; 1734 u8 l4_proto = 0; 1735 1736 if (skb->ip_summed != CHECKSUM_PARTIAL) 1737 return 0; 1738 1739 ip.hdr = skb_network_header(skb); 1740 l4.hdr = skb_transport_header(skb); 1741 1742 /* compute outer L2 header size */ 1743 l2_len = ip.hdr - skb->data; 1744 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1745 1746 protocol = vlan_get_protocol(skb); 1747 1748 if (protocol == htons(ETH_P_IP)) 1749 first->tx_flags |= ICE_TX_FLAGS_IPV4; 1750 else if (protocol == htons(ETH_P_IPV6)) 1751 first->tx_flags |= ICE_TX_FLAGS_IPV6; 1752 1753 if (skb->encapsulation) { 1754 bool gso_ena = false; 1755 u32 tunnel = 0; 1756 1757 /* define outer network header type */ 1758 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1759 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 1760 ICE_TX_CTX_EIPT_IPV4 : 1761 ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 1762 l4_proto = ip.v4->protocol; 1763 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1764 int ret; 1765 1766 tunnel |= ICE_TX_CTX_EIPT_IPV6; 1767 exthdr = ip.hdr + sizeof(*ip.v6); 1768 l4_proto = ip.v6->nexthdr; 1769 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 1770 &l4_proto, &frag_off); 1771 if (ret < 0) 1772 return -1; 1773 } 1774 1775 /* define outer transport */ 1776 switch (l4_proto) { 1777 case IPPROTO_UDP: 1778 tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 1779 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1780 break; 1781 case IPPROTO_GRE: 1782 tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 1783 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1784 break; 1785 case IPPROTO_IPIP: 1786 case IPPROTO_IPV6: 1787 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1788 l4.hdr = skb_inner_network_header(skb); 1789 break; 1790 default: 1791 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1792 return -1; 1793 1794 skb_checksum_help(skb); 1795 return 0; 1796 } 1797 1798 /* compute outer L3 header size */ 1799 tunnel |= ((l4.hdr - ip.hdr) / 4) << 1800 ICE_TXD_CTX_QW0_EIPLEN_S; 1801 1802 /* switch IP header pointer from outer to inner header */ 1803 ip.hdr = skb_inner_network_header(skb); 1804 1805 /* compute tunnel header size */ 1806 tunnel |= ((ip.hdr - l4.hdr) / 2) << 1807 ICE_TXD_CTX_QW0_NATLEN_S; 1808 1809 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 1810 /* indicate if we need to offload outer UDP header */ 1811 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 1812 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1813 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 1814 1815 /* record tunnel offload values */ 1816 off->cd_tunnel_params |= tunnel; 1817 1818 /* set DTYP=1 to indicate that it's an Tx context descriptor 1819 * in IPsec tunnel mode with Tx offloads in Quad word 1 1820 */ 1821 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 1822 1823 /* switch L4 header pointer from outer to inner */ 1824 l4.hdr = skb_inner_transport_header(skb); 1825 l4_proto = 0; 1826 1827 /* reset type as we transition from outer to inner headers */ 1828 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 1829 if (ip.v4->version == 4) 1830 first->tx_flags |= ICE_TX_FLAGS_IPV4; 1831 if (ip.v6->version == 6) 1832 first->tx_flags |= ICE_TX_FLAGS_IPV6; 1833 } 1834 1835 /* Enable IP checksum offloads */ 1836 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1837 l4_proto = ip.v4->protocol; 1838 /* the stack computes the IP header already, the only time we 1839 * need the hardware to recompute it is in the case of TSO. 1840 */ 1841 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1842 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1843 else 1844 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1845 1846 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1847 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1848 exthdr = ip.hdr + sizeof(*ip.v6); 1849 l4_proto = ip.v6->nexthdr; 1850 if (l4.hdr != exthdr) 1851 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1852 &frag_off); 1853 } else { 1854 return -1; 1855 } 1856 1857 /* compute inner L3 header size */ 1858 l3_len = l4.hdr - ip.hdr; 1859 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1860 1861 /* Enable L4 checksum offloads */ 1862 switch (l4_proto) { 1863 case IPPROTO_TCP: 1864 /* enable checksum offloads */ 1865 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1866 l4_len = l4.tcp->doff; 1867 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1868 break; 1869 case IPPROTO_UDP: 1870 /* enable UDP checksum offload */ 1871 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1872 l4_len = (sizeof(struct udphdr) >> 2); 1873 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1874 break; 1875 case IPPROTO_SCTP: 1876 /* enable SCTP checksum offload */ 1877 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1878 l4_len = sizeof(struct sctphdr) >> 2; 1879 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1880 break; 1881 1882 default: 1883 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1884 return -1; 1885 skb_checksum_help(skb); 1886 return 0; 1887 } 1888 1889 off->td_cmd |= cmd; 1890 off->td_offset |= offset; 1891 return 1; 1892 } 1893 1894 /** 1895 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1896 * @tx_ring: ring to send buffer on 1897 * @first: pointer to struct ice_tx_buf 1898 * 1899 * Checks the skb and set up correspondingly several generic transmit flags 1900 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1901 */ 1902 static void 1903 ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) 1904 { 1905 struct sk_buff *skb = first->skb; 1906 1907 /* nothing left to do, software offloaded VLAN */ 1908 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) 1909 return; 1910 1911 /* currently, we always assume 802.1Q for VLAN insertion as VLAN 1912 * insertion for 802.1AD is not supported 1913 */ 1914 if (skb_vlan_tag_present(skb)) { 1915 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1916 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1917 } 1918 1919 ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 1920 } 1921 1922 /** 1923 * ice_tso - computes mss and TSO length to prepare for TSO 1924 * @first: pointer to struct ice_tx_buf 1925 * @off: pointer to struct that holds offload parameters 1926 * 1927 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1928 */ 1929 static 1930 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1931 { 1932 struct sk_buff *skb = first->skb; 1933 union { 1934 struct iphdr *v4; 1935 struct ipv6hdr *v6; 1936 unsigned char *hdr; 1937 } ip; 1938 union { 1939 struct tcphdr *tcp; 1940 struct udphdr *udp; 1941 unsigned char *hdr; 1942 } l4; 1943 u64 cd_mss, cd_tso_len; 1944 u32 paylen; 1945 u8 l4_start; 1946 int err; 1947 1948 if (skb->ip_summed != CHECKSUM_PARTIAL) 1949 return 0; 1950 1951 if (!skb_is_gso(skb)) 1952 return 0; 1953 1954 err = skb_cow_head(skb, 0); 1955 if (err < 0) 1956 return err; 1957 1958 /* cppcheck-suppress unreadVariable */ 1959 ip.hdr = skb_network_header(skb); 1960 l4.hdr = skb_transport_header(skb); 1961 1962 /* initialize outer IP header fields */ 1963 if (ip.v4->version == 4) { 1964 ip.v4->tot_len = 0; 1965 ip.v4->check = 0; 1966 } else { 1967 ip.v6->payload_len = 0; 1968 } 1969 1970 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 1971 SKB_GSO_GRE_CSUM | 1972 SKB_GSO_IPXIP4 | 1973 SKB_GSO_IPXIP6 | 1974 SKB_GSO_UDP_TUNNEL | 1975 SKB_GSO_UDP_TUNNEL_CSUM)) { 1976 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 1977 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 1978 l4.udp->len = 0; 1979 1980 /* determine offset of outer transport header */ 1981 l4_start = (u8)(l4.hdr - skb->data); 1982 1983 /* remove payload length from outer checksum */ 1984 paylen = skb->len - l4_start; 1985 csum_replace_by_diff(&l4.udp->check, 1986 (__force __wsum)htonl(paylen)); 1987 } 1988 1989 /* reset pointers to inner headers */ 1990 1991 /* cppcheck-suppress unreadVariable */ 1992 ip.hdr = skb_inner_network_header(skb); 1993 l4.hdr = skb_inner_transport_header(skb); 1994 1995 /* initialize inner IP header fields */ 1996 if (ip.v4->version == 4) { 1997 ip.v4->tot_len = 0; 1998 ip.v4->check = 0; 1999 } else { 2000 ip.v6->payload_len = 0; 2001 } 2002 } 2003 2004 /* determine offset of transport header */ 2005 l4_start = (u8)(l4.hdr - skb->data); 2006 2007 /* remove payload length from checksum */ 2008 paylen = skb->len - l4_start; 2009 2010 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 2011 csum_replace_by_diff(&l4.udp->check, 2012 (__force __wsum)htonl(paylen)); 2013 /* compute length of UDP segmentation header */ 2014 off->header_len = (u8)sizeof(l4.udp) + l4_start; 2015 } else { 2016 csum_replace_by_diff(&l4.tcp->check, 2017 (__force __wsum)htonl(paylen)); 2018 /* compute length of TCP segmentation header */ 2019 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 2020 } 2021 2022 /* update gso_segs and bytecount */ 2023 first->gso_segs = skb_shinfo(skb)->gso_segs; 2024 first->bytecount += (first->gso_segs - 1) * off->header_len; 2025 2026 cd_tso_len = skb->len - off->header_len; 2027 cd_mss = skb_shinfo(skb)->gso_size; 2028 2029 /* record cdesc_qw1 with TSO parameters */ 2030 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2031 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 2032 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2033 (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2034 first->tx_flags |= ICE_TX_FLAGS_TSO; 2035 return 1; 2036 } 2037 2038 /** 2039 * ice_txd_use_count - estimate the number of descriptors needed for Tx 2040 * @size: transmit request size in bytes 2041 * 2042 * Due to hardware alignment restrictions (4K alignment), we need to 2043 * assume that we can have no more than 12K of data per descriptor, even 2044 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 2045 * Thus, we need to divide by 12K. But division is slow! Instead, 2046 * we decompose the operation into shifts and one relatively cheap 2047 * multiply operation. 2048 * 2049 * To divide by 12K, we first divide by 4K, then divide by 3: 2050 * To divide by 4K, shift right by 12 bits 2051 * To divide by 3, multiply by 85, then divide by 256 2052 * (Divide by 256 is done by shifting right by 8 bits) 2053 * Finally, we add one to round up. Because 256 isn't an exact multiple of 2054 * 3, we'll underestimate near each multiple of 12K. This is actually more 2055 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 2056 * segment. For our purposes this is accurate out to 1M which is orders of 2057 * magnitude greater than our largest possible GSO size. 2058 * 2059 * This would then be implemented as: 2060 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 2061 * 2062 * Since multiplication and division are commutative, we can reorder 2063 * operations into: 2064 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2065 */ 2066 static unsigned int ice_txd_use_count(unsigned int size) 2067 { 2068 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2069 } 2070 2071 /** 2072 * ice_xmit_desc_count - calculate number of Tx descriptors needed 2073 * @skb: send buffer 2074 * 2075 * Returns number of data descriptors needed for this skb. 2076 */ 2077 static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 2078 { 2079 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 2080 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2081 unsigned int count = 0, size = skb_headlen(skb); 2082 2083 for (;;) { 2084 count += ice_txd_use_count(size); 2085 2086 if (!nr_frags--) 2087 break; 2088 2089 size = skb_frag_size(frag++); 2090 } 2091 2092 return count; 2093 } 2094 2095 /** 2096 * __ice_chk_linearize - Check if there are more than 8 buffers per packet 2097 * @skb: send buffer 2098 * 2099 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 2100 * and so we need to figure out the cases where we need to linearize the skb. 2101 * 2102 * For TSO we need to count the TSO header and segment payload separately. 2103 * As such we need to check cases where we have 7 fragments or more as we 2104 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 2105 * the segment payload in the first descriptor, and another 7 for the 2106 * fragments. 2107 */ 2108 static bool __ice_chk_linearize(struct sk_buff *skb) 2109 { 2110 const skb_frag_t *frag, *stale; 2111 int nr_frags, sum; 2112 2113 /* no need to check if number of frags is less than 7 */ 2114 nr_frags = skb_shinfo(skb)->nr_frags; 2115 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 2116 return false; 2117 2118 /* We need to walk through the list and validate that each group 2119 * of 6 fragments totals at least gso_size. 2120 */ 2121 nr_frags -= ICE_MAX_BUF_TXD - 2; 2122 frag = &skb_shinfo(skb)->frags[0]; 2123 2124 /* Initialize size to the negative value of gso_size minus 1. We 2125 * use this as the worst case scenario in which the frag ahead 2126 * of us only provides one byte which is why we are limited to 6 2127 * descriptors for a single transmit as the header and previous 2128 * fragment are already consuming 2 descriptors. 2129 */ 2130 sum = 1 - skb_shinfo(skb)->gso_size; 2131 2132 /* Add size of frags 0 through 4 to create our initial sum */ 2133 sum += skb_frag_size(frag++); 2134 sum += skb_frag_size(frag++); 2135 sum += skb_frag_size(frag++); 2136 sum += skb_frag_size(frag++); 2137 sum += skb_frag_size(frag++); 2138 2139 /* Walk through fragments adding latest fragment, testing it, and 2140 * then removing stale fragments from the sum. 2141 */ 2142 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 2143 int stale_size = skb_frag_size(stale); 2144 2145 sum += skb_frag_size(frag++); 2146 2147 /* The stale fragment may present us with a smaller 2148 * descriptor than the actual fragment size. To account 2149 * for that we need to remove all the data on the front and 2150 * figure out what the remainder would be in the last 2151 * descriptor associated with the fragment. 2152 */ 2153 if (stale_size > ICE_MAX_DATA_PER_TXD) { 2154 int align_pad = -(skb_frag_off(stale)) & 2155 (ICE_MAX_READ_REQ_SIZE - 1); 2156 2157 sum -= align_pad; 2158 stale_size -= align_pad; 2159 2160 do { 2161 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; 2162 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; 2163 } while (stale_size > ICE_MAX_DATA_PER_TXD); 2164 } 2165 2166 /* if sum is negative we failed to make sufficient progress */ 2167 if (sum < 0) 2168 return true; 2169 2170 if (!nr_frags--) 2171 break; 2172 2173 sum -= stale_size; 2174 } 2175 2176 return false; 2177 } 2178 2179 /** 2180 * ice_chk_linearize - Check if there are more than 8 fragments per packet 2181 * @skb: send buffer 2182 * @count: number of buffers used 2183 * 2184 * Note: Our HW can't scatter-gather more than 8 fragments to build 2185 * a packet on the wire and so we need to figure out the cases where we 2186 * need to linearize the skb. 2187 */ 2188 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 2189 { 2190 /* Both TSO and single send will work if count is less than 8 */ 2191 if (likely(count < ICE_MAX_BUF_TXD)) 2192 return false; 2193 2194 if (skb_is_gso(skb)) 2195 return __ice_chk_linearize(skb); 2196 2197 /* we can support up to 8 data buffers for a single send */ 2198 return count != ICE_MAX_BUF_TXD; 2199 } 2200 2201 /** 2202 * ice_tstamp - set up context descriptor for hardware timestamp 2203 * @tx_ring: pointer to the Tx ring to send buffer on 2204 * @skb: pointer to the SKB we're sending 2205 * @first: Tx buffer 2206 * @off: Tx offload parameters 2207 */ 2208 static void 2209 ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, 2210 struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2211 { 2212 s8 idx; 2213 2214 /* only timestamp the outbound packet if the user has requested it */ 2215 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 2216 return; 2217 2218 if (!tx_ring->ptp_tx) 2219 return; 2220 2221 /* Tx timestamps cannot be sampled when doing TSO */ 2222 if (first->tx_flags & ICE_TX_FLAGS_TSO) 2223 return; 2224 2225 /* Grab an open timestamp slot */ 2226 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); 2227 if (idx < 0) 2228 return; 2229 2230 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2231 (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) | 2232 ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S)); 2233 first->tx_flags |= ICE_TX_FLAGS_TSYN; 2234 } 2235 2236 /** 2237 * ice_xmit_frame_ring - Sends buffer on Tx ring 2238 * @skb: send buffer 2239 * @tx_ring: ring to send buffer on 2240 * 2241 * Returns NETDEV_TX_OK if sent, else an error code 2242 */ 2243 static netdev_tx_t 2244 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) 2245 { 2246 struct ice_tx_offload_params offload = { 0 }; 2247 struct ice_vsi *vsi = tx_ring->vsi; 2248 struct ice_tx_buf *first; 2249 struct ethhdr *eth; 2250 unsigned int count; 2251 int tso, csum; 2252 2253 ice_trace(xmit_frame_ring, tx_ring, skb); 2254 2255 count = ice_xmit_desc_count(skb); 2256 if (ice_chk_linearize(skb, count)) { 2257 if (__skb_linearize(skb)) 2258 goto out_drop; 2259 count = ice_txd_use_count(skb->len); 2260 tx_ring->tx_stats.tx_linearize++; 2261 } 2262 2263 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 2264 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 2265 * + 4 desc gap to avoid the cache line where head is, 2266 * + 1 desc for context descriptor, 2267 * otherwise try next time 2268 */ 2269 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2270 ICE_DESCS_FOR_CTX_DESC)) { 2271 tx_ring->tx_stats.tx_busy++; 2272 return NETDEV_TX_BUSY; 2273 } 2274 2275 offload.tx_ring = tx_ring; 2276 2277 /* record the location of the first descriptor for this packet */ 2278 first = &tx_ring->tx_buf[tx_ring->next_to_use]; 2279 first->skb = skb; 2280 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 2281 first->gso_segs = 1; 2282 first->tx_flags = 0; 2283 2284 /* prepare the VLAN tagging flags for Tx */ 2285 ice_tx_prepare_vlan_flags(tx_ring, first); 2286 2287 /* set up TSO offload */ 2288 tso = ice_tso(first, &offload); 2289 if (tso < 0) 2290 goto out_drop; 2291 2292 /* always set up Tx checksum offload */ 2293 csum = ice_tx_csum(first, &offload); 2294 if (csum < 0) 2295 goto out_drop; 2296 2297 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 2298 eth = (struct ethhdr *)skb_mac_header(skb); 2299 if (unlikely((skb->priority == TC_PRIO_CONTROL || 2300 eth->h_proto == htons(ETH_P_LLDP)) && 2301 vsi->type == ICE_VSI_PF && 2302 vsi->port_info->qos_cfg.is_sw_lldp)) 2303 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2304 ICE_TX_CTX_DESC_SWTCH_UPLINK << 2305 ICE_TXD_CTX_QW1_CMD_S); 2306 2307 ice_tstamp(tx_ring, skb, first, &offload); 2308 if (ice_is_switchdev_running(vsi->back)) 2309 ice_eswitch_set_target_vsi(skb, &offload); 2310 2311 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2312 struct ice_tx_ctx_desc *cdesc; 2313 u16 i = tx_ring->next_to_use; 2314 2315 /* grab the next descriptor */ 2316 cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2317 i++; 2318 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2319 2320 /* setup context descriptor */ 2321 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2322 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2323 cdesc->rsvd = cpu_to_le16(0); 2324 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2325 } 2326 2327 ice_tx_map(tx_ring, first, &offload); 2328 return NETDEV_TX_OK; 2329 2330 out_drop: 2331 ice_trace(xmit_frame_ring_drop, tx_ring, skb); 2332 dev_kfree_skb_any(skb); 2333 return NETDEV_TX_OK; 2334 } 2335 2336 /** 2337 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 2338 * @skb: send buffer 2339 * @netdev: network interface device structure 2340 * 2341 * Returns NETDEV_TX_OK if sent, else an error code 2342 */ 2343 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2344 { 2345 struct ice_netdev_priv *np = netdev_priv(netdev); 2346 struct ice_vsi *vsi = np->vsi; 2347 struct ice_tx_ring *tx_ring; 2348 2349 tx_ring = vsi->tx_rings[skb->queue_mapping]; 2350 2351 /* hardware can't handle really short frames, hardware padding works 2352 * beyond this point 2353 */ 2354 if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 2355 return NETDEV_TX_OK; 2356 2357 return ice_xmit_frame_ring(skb, tx_ring); 2358 } 2359 2360 /** 2361 * ice_get_dscp_up - return the UP/TC value for a SKB 2362 * @dcbcfg: DCB config that contains DSCP to UP/TC mapping 2363 * @skb: SKB to query for info to determine UP/TC 2364 * 2365 * This function is to only be called when the PF is in L3 DSCP PFC mode 2366 */ 2367 static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb) 2368 { 2369 u8 dscp = 0; 2370 2371 if (skb->protocol == htons(ETH_P_IP)) 2372 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 2373 else if (skb->protocol == htons(ETH_P_IPV6)) 2374 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 2375 2376 return dcbcfg->dscp_map[dscp]; 2377 } 2378 2379 u16 2380 ice_select_queue(struct net_device *netdev, struct sk_buff *skb, 2381 struct net_device *sb_dev) 2382 { 2383 struct ice_pf *pf = ice_netdev_to_pf(netdev); 2384 struct ice_dcbx_cfg *dcbcfg; 2385 2386 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 2387 if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) 2388 skb->priority = ice_get_dscp_up(dcbcfg, skb); 2389 2390 return netdev_pick_tx(netdev, skb, sb_dev); 2391 } 2392 2393 /** 2394 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 2395 * @tx_ring: tx_ring to clean 2396 */ 2397 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) 2398 { 2399 struct ice_vsi *vsi = tx_ring->vsi; 2400 s16 i = tx_ring->next_to_clean; 2401 int budget = ICE_DFLT_IRQ_WORK; 2402 struct ice_tx_desc *tx_desc; 2403 struct ice_tx_buf *tx_buf; 2404 2405 tx_buf = &tx_ring->tx_buf[i]; 2406 tx_desc = ICE_TX_DESC(tx_ring, i); 2407 i -= tx_ring->count; 2408 2409 do { 2410 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2411 2412 /* if next_to_watch is not set then there is no pending work */ 2413 if (!eop_desc) 2414 break; 2415 2416 /* prevent any other reads prior to eop_desc */ 2417 smp_rmb(); 2418 2419 /* if the descriptor isn't done, no work to do */ 2420 if (!(eop_desc->cmd_type_offset_bsz & 2421 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2422 break; 2423 2424 /* clear next_to_watch to prevent false hangs */ 2425 tx_buf->next_to_watch = NULL; 2426 tx_desc->buf_addr = 0; 2427 tx_desc->cmd_type_offset_bsz = 0; 2428 2429 /* move past filter desc */ 2430 tx_buf++; 2431 tx_desc++; 2432 i++; 2433 if (unlikely(!i)) { 2434 i -= tx_ring->count; 2435 tx_buf = tx_ring->tx_buf; 2436 tx_desc = ICE_TX_DESC(tx_ring, 0); 2437 } 2438 2439 /* unmap the data header */ 2440 if (dma_unmap_len(tx_buf, len)) 2441 dma_unmap_single(tx_ring->dev, 2442 dma_unmap_addr(tx_buf, dma), 2443 dma_unmap_len(tx_buf, len), 2444 DMA_TO_DEVICE); 2445 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 2446 devm_kfree(tx_ring->dev, tx_buf->raw_buf); 2447 2448 /* clear next_to_watch to prevent false hangs */ 2449 tx_buf->raw_buf = NULL; 2450 tx_buf->tx_flags = 0; 2451 tx_buf->next_to_watch = NULL; 2452 dma_unmap_len_set(tx_buf, len, 0); 2453 tx_desc->buf_addr = 0; 2454 tx_desc->cmd_type_offset_bsz = 0; 2455 2456 /* move past eop_desc for start of next FD desc */ 2457 tx_buf++; 2458 tx_desc++; 2459 i++; 2460 if (unlikely(!i)) { 2461 i -= tx_ring->count; 2462 tx_buf = tx_ring->tx_buf; 2463 tx_desc = ICE_TX_DESC(tx_ring, 0); 2464 } 2465 2466 budget--; 2467 } while (likely(budget)); 2468 2469 i += tx_ring->count; 2470 tx_ring->next_to_clean = i; 2471 2472 /* re-enable interrupt if needed */ 2473 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 2474 } 2475