1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* The driver transmit and receive code */ 5 6 #include <linux/mm.h> 7 #include <linux/netdevice.h> 8 #include <linux/prefetch.h> 9 #include <linux/bpf_trace.h> 10 #include <net/dsfield.h> 11 #include <net/mpls.h> 12 #include <net/xdp.h> 13 #include "ice_txrx_lib.h" 14 #include "ice_lib.h" 15 #include "ice.h" 16 #include "ice_trace.h" 17 #include "ice_dcb_lib.h" 18 #include "ice_xsk.h" 19 #include "ice_eswitch.h" 20 21 #define ICE_RX_HDR_SIZE 256 22 23 #define FDIR_DESC_RXDID 0x40 24 #define ICE_FDIR_CLEAN_DELAY 10 25 26 /** 27 * ice_prgm_fdir_fltr - Program a Flow Director filter 28 * @vsi: VSI to send dummy packet 29 * @fdir_desc: flow director descriptor 30 * @raw_packet: allocated buffer for flow director 31 */ 32 int 33 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 34 u8 *raw_packet) 35 { 36 struct ice_tx_buf *tx_buf, *first; 37 struct ice_fltr_desc *f_desc; 38 struct ice_tx_desc *tx_desc; 39 struct ice_tx_ring *tx_ring; 40 struct device *dev; 41 dma_addr_t dma; 42 u32 td_cmd; 43 u16 i; 44 45 /* VSI and Tx ring */ 46 if (!vsi) 47 return -ENOENT; 48 tx_ring = vsi->tx_rings[0]; 49 if (!tx_ring || !tx_ring->desc) 50 return -ENOENT; 51 dev = tx_ring->dev; 52 53 /* we are using two descriptors to add/del a filter and we can wait */ 54 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 55 if (!i) 56 return -EAGAIN; 57 msleep_interruptible(1); 58 } 59 60 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 61 DMA_TO_DEVICE); 62 63 if (dma_mapping_error(dev, dma)) 64 return -EINVAL; 65 66 /* grab the next descriptor */ 67 i = tx_ring->next_to_use; 68 first = &tx_ring->tx_buf[i]; 69 f_desc = ICE_TX_FDIRDESC(tx_ring, i); 70 memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 71 72 i++; 73 i = (i < tx_ring->count) ? i : 0; 74 tx_desc = ICE_TX_DESC(tx_ring, i); 75 tx_buf = &tx_ring->tx_buf[i]; 76 77 i++; 78 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 79 80 memset(tx_buf, 0, sizeof(*tx_buf)); 81 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 82 dma_unmap_addr_set(tx_buf, dma, dma); 83 84 tx_desc->buf_addr = cpu_to_le64(dma); 85 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 86 ICE_TX_DESC_CMD_RE; 87 88 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; 89 tx_buf->raw_buf = raw_packet; 90 91 tx_desc->cmd_type_offset_bsz = 92 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 93 94 /* Force memory write to complete before letting h/w know 95 * there are new descriptors to fetch. 96 */ 97 wmb(); 98 99 /* mark the data descriptor to be watched */ 100 first->next_to_watch = tx_desc; 101 102 writel(tx_ring->next_to_use, tx_ring->tail); 103 104 return 0; 105 } 106 107 /** 108 * ice_unmap_and_free_tx_buf - Release a Tx buffer 109 * @ring: the ring that owns the buffer 110 * @tx_buf: the buffer to free 111 */ 112 static void 113 ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) 114 { 115 if (tx_buf->skb) { 116 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 117 devm_kfree(ring->dev, tx_buf->raw_buf); 118 else if (ice_ring_is_xdp(ring)) 119 page_frag_free(tx_buf->raw_buf); 120 else 121 dev_kfree_skb_any(tx_buf->skb); 122 if (dma_unmap_len(tx_buf, len)) 123 dma_unmap_single(ring->dev, 124 dma_unmap_addr(tx_buf, dma), 125 dma_unmap_len(tx_buf, len), 126 DMA_TO_DEVICE); 127 } else if (dma_unmap_len(tx_buf, len)) { 128 dma_unmap_page(ring->dev, 129 dma_unmap_addr(tx_buf, dma), 130 dma_unmap_len(tx_buf, len), 131 DMA_TO_DEVICE); 132 } 133 134 tx_buf->next_to_watch = NULL; 135 tx_buf->skb = NULL; 136 dma_unmap_len_set(tx_buf, len, 0); 137 /* tx_buf must be completely set up in the transmit path */ 138 } 139 140 static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring) 141 { 142 return netdev_get_tx_queue(ring->netdev, ring->q_index); 143 } 144 145 /** 146 * ice_clean_tx_ring - Free any empty Tx buffers 147 * @tx_ring: ring to be cleaned 148 */ 149 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring) 150 { 151 u32 size; 152 u16 i; 153 154 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 155 ice_xsk_clean_xdp_ring(tx_ring); 156 goto tx_skip_free; 157 } 158 159 /* ring already cleared, nothing to do */ 160 if (!tx_ring->tx_buf) 161 return; 162 163 /* Free all the Tx ring sk_buffs */ 164 for (i = 0; i < tx_ring->count; i++) 165 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 166 167 tx_skip_free: 168 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 169 170 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 171 PAGE_SIZE); 172 /* Zero out the descriptor ring */ 173 memset(tx_ring->desc, 0, size); 174 175 tx_ring->next_to_use = 0; 176 tx_ring->next_to_clean = 0; 177 tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1; 178 tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1; 179 180 if (!tx_ring->netdev) 181 return; 182 183 /* cleanup Tx queue statistics */ 184 netdev_tx_reset_queue(txring_txq(tx_ring)); 185 } 186 187 /** 188 * ice_free_tx_ring - Free Tx resources per queue 189 * @tx_ring: Tx descriptor ring for a specific queue 190 * 191 * Free all transmit software resources 192 */ 193 void ice_free_tx_ring(struct ice_tx_ring *tx_ring) 194 { 195 u32 size; 196 197 ice_clean_tx_ring(tx_ring); 198 devm_kfree(tx_ring->dev, tx_ring->tx_buf); 199 tx_ring->tx_buf = NULL; 200 201 if (tx_ring->desc) { 202 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 203 PAGE_SIZE); 204 dmam_free_coherent(tx_ring->dev, size, 205 tx_ring->desc, tx_ring->dma); 206 tx_ring->desc = NULL; 207 } 208 } 209 210 /** 211 * ice_clean_tx_irq - Reclaim resources after transmit completes 212 * @tx_ring: Tx ring to clean 213 * @napi_budget: Used to determine if we are in netpoll 214 * 215 * Returns true if there's any budget left (e.g. the clean is finished) 216 */ 217 static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) 218 { 219 unsigned int total_bytes = 0, total_pkts = 0; 220 unsigned int budget = ICE_DFLT_IRQ_WORK; 221 struct ice_vsi *vsi = tx_ring->vsi; 222 s16 i = tx_ring->next_to_clean; 223 struct ice_tx_desc *tx_desc; 224 struct ice_tx_buf *tx_buf; 225 226 /* get the bql data ready */ 227 netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); 228 229 tx_buf = &tx_ring->tx_buf[i]; 230 tx_desc = ICE_TX_DESC(tx_ring, i); 231 i -= tx_ring->count; 232 233 prefetch(&vsi->state); 234 235 do { 236 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 237 238 /* if next_to_watch is not set then there is no work pending */ 239 if (!eop_desc) 240 break; 241 242 /* follow the guidelines of other drivers */ 243 prefetchw(&tx_buf->skb->users); 244 245 smp_rmb(); /* prevent any other reads prior to eop_desc */ 246 247 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 248 /* if the descriptor isn't done, no work yet to do */ 249 if (!(eop_desc->cmd_type_offset_bsz & 250 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 251 break; 252 253 /* clear next_to_watch to prevent false hangs */ 254 tx_buf->next_to_watch = NULL; 255 256 /* update the statistics for this packet */ 257 total_bytes += tx_buf->bytecount; 258 total_pkts += tx_buf->gso_segs; 259 260 /* free the skb */ 261 napi_consume_skb(tx_buf->skb, napi_budget); 262 263 /* unmap skb header data */ 264 dma_unmap_single(tx_ring->dev, 265 dma_unmap_addr(tx_buf, dma), 266 dma_unmap_len(tx_buf, len), 267 DMA_TO_DEVICE); 268 269 /* clear tx_buf data */ 270 tx_buf->skb = NULL; 271 dma_unmap_len_set(tx_buf, len, 0); 272 273 /* unmap remaining buffers */ 274 while (tx_desc != eop_desc) { 275 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); 276 tx_buf++; 277 tx_desc++; 278 i++; 279 if (unlikely(!i)) { 280 i -= tx_ring->count; 281 tx_buf = tx_ring->tx_buf; 282 tx_desc = ICE_TX_DESC(tx_ring, 0); 283 } 284 285 /* unmap any remaining paged data */ 286 if (dma_unmap_len(tx_buf, len)) { 287 dma_unmap_page(tx_ring->dev, 288 dma_unmap_addr(tx_buf, dma), 289 dma_unmap_len(tx_buf, len), 290 DMA_TO_DEVICE); 291 dma_unmap_len_set(tx_buf, len, 0); 292 } 293 } 294 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf); 295 296 /* move us one more past the eop_desc for start of next pkt */ 297 tx_buf++; 298 tx_desc++; 299 i++; 300 if (unlikely(!i)) { 301 i -= tx_ring->count; 302 tx_buf = tx_ring->tx_buf; 303 tx_desc = ICE_TX_DESC(tx_ring, 0); 304 } 305 306 prefetch(tx_desc); 307 308 /* update budget accounting */ 309 budget--; 310 } while (likely(budget)); 311 312 i += tx_ring->count; 313 tx_ring->next_to_clean = i; 314 315 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 316 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); 317 318 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 319 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 320 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 321 /* Make sure that anybody stopping the queue after this 322 * sees the new next_to_clean. 323 */ 324 smp_mb(); 325 if (netif_tx_queue_stopped(txring_txq(tx_ring)) && 326 !test_bit(ICE_VSI_DOWN, vsi->state)) { 327 netif_tx_wake_queue(txring_txq(tx_ring)); 328 ++tx_ring->ring_stats->tx_stats.restart_q; 329 } 330 } 331 332 return !!budget; 333 } 334 335 /** 336 * ice_setup_tx_ring - Allocate the Tx descriptors 337 * @tx_ring: the Tx ring to set up 338 * 339 * Return 0 on success, negative on error 340 */ 341 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring) 342 { 343 struct device *dev = tx_ring->dev; 344 u32 size; 345 346 if (!dev) 347 return -ENOMEM; 348 349 /* warn if we are about to overwrite the pointer */ 350 WARN_ON(tx_ring->tx_buf); 351 tx_ring->tx_buf = 352 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, 353 GFP_KERNEL); 354 if (!tx_ring->tx_buf) 355 return -ENOMEM; 356 357 /* round up to nearest page */ 358 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 359 PAGE_SIZE); 360 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, 361 GFP_KERNEL); 362 if (!tx_ring->desc) { 363 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 364 size); 365 goto err; 366 } 367 368 tx_ring->next_to_use = 0; 369 tx_ring->next_to_clean = 0; 370 tx_ring->ring_stats->tx_stats.prev_pkt = -1; 371 return 0; 372 373 err: 374 devm_kfree(dev, tx_ring->tx_buf); 375 tx_ring->tx_buf = NULL; 376 return -ENOMEM; 377 } 378 379 /** 380 * ice_clean_rx_ring - Free Rx buffers 381 * @rx_ring: ring to be cleaned 382 */ 383 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) 384 { 385 struct device *dev = rx_ring->dev; 386 u32 size; 387 u16 i; 388 389 /* ring already cleared, nothing to do */ 390 if (!rx_ring->rx_buf) 391 return; 392 393 if (rx_ring->skb) { 394 dev_kfree_skb(rx_ring->skb); 395 rx_ring->skb = NULL; 396 } 397 398 if (rx_ring->xsk_pool) { 399 ice_xsk_clean_rx_ring(rx_ring); 400 goto rx_skip_free; 401 } 402 403 /* Free all the Rx ring sk_buffs */ 404 for (i = 0; i < rx_ring->count; i++) { 405 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 406 407 if (!rx_buf->page) 408 continue; 409 410 /* Invalidate cache lines that may have been written to by 411 * device so that we avoid corrupting memory. 412 */ 413 dma_sync_single_range_for_cpu(dev, rx_buf->dma, 414 rx_buf->page_offset, 415 rx_ring->rx_buf_len, 416 DMA_FROM_DEVICE); 417 418 /* free resources associated with mapping */ 419 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 420 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 421 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 422 423 rx_buf->page = NULL; 424 rx_buf->page_offset = 0; 425 } 426 427 rx_skip_free: 428 if (rx_ring->xsk_pool) 429 memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); 430 else 431 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); 432 433 /* Zero out the descriptor ring */ 434 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 435 PAGE_SIZE); 436 memset(rx_ring->desc, 0, size); 437 438 rx_ring->next_to_alloc = 0; 439 rx_ring->next_to_clean = 0; 440 rx_ring->next_to_use = 0; 441 } 442 443 /** 444 * ice_free_rx_ring - Free Rx resources 445 * @rx_ring: ring to clean the resources from 446 * 447 * Free all receive software resources 448 */ 449 void ice_free_rx_ring(struct ice_rx_ring *rx_ring) 450 { 451 u32 size; 452 453 ice_clean_rx_ring(rx_ring); 454 if (rx_ring->vsi->type == ICE_VSI_PF) 455 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 456 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 457 rx_ring->xdp_prog = NULL; 458 if (rx_ring->xsk_pool) { 459 kfree(rx_ring->xdp_buf); 460 rx_ring->xdp_buf = NULL; 461 } else { 462 kfree(rx_ring->rx_buf); 463 rx_ring->rx_buf = NULL; 464 } 465 466 if (rx_ring->desc) { 467 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 468 PAGE_SIZE); 469 dmam_free_coherent(rx_ring->dev, size, 470 rx_ring->desc, rx_ring->dma); 471 rx_ring->desc = NULL; 472 } 473 } 474 475 /** 476 * ice_setup_rx_ring - Allocate the Rx descriptors 477 * @rx_ring: the Rx ring to set up 478 * 479 * Return 0 on success, negative on error 480 */ 481 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) 482 { 483 struct device *dev = rx_ring->dev; 484 u32 size; 485 486 if (!dev) 487 return -ENOMEM; 488 489 /* warn if we are about to overwrite the pointer */ 490 WARN_ON(rx_ring->rx_buf); 491 rx_ring->rx_buf = 492 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); 493 if (!rx_ring->rx_buf) 494 return -ENOMEM; 495 496 /* round up to nearest page */ 497 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 498 PAGE_SIZE); 499 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, 500 GFP_KERNEL); 501 if (!rx_ring->desc) { 502 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 503 size); 504 goto err; 505 } 506 507 rx_ring->next_to_use = 0; 508 rx_ring->next_to_clean = 0; 509 510 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 511 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 512 513 if (rx_ring->vsi->type == ICE_VSI_PF && 514 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 515 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 516 rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) 517 goto err; 518 return 0; 519 520 err: 521 kfree(rx_ring->rx_buf); 522 rx_ring->rx_buf = NULL; 523 return -ENOMEM; 524 } 525 526 /** 527 * ice_rx_frame_truesize 528 * @rx_ring: ptr to Rx ring 529 * @size: size 530 * 531 * calculate the truesize with taking into the account PAGE_SIZE of 532 * underlying arch 533 */ 534 static unsigned int 535 ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size) 536 { 537 unsigned int truesize; 538 539 #if (PAGE_SIZE < 8192) 540 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 541 #else 542 truesize = rx_ring->rx_offset ? 543 SKB_DATA_ALIGN(rx_ring->rx_offset + size) + 544 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 545 SKB_DATA_ALIGN(size); 546 #endif 547 return truesize; 548 } 549 550 /** 551 * ice_run_xdp - Executes an XDP program on initialized xdp_buff 552 * @rx_ring: Rx ring 553 * @xdp: xdp_buff used as input to the XDP program 554 * @xdp_prog: XDP program to run 555 * @xdp_ring: ring to be used for XDP_TX action 556 * 557 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 558 */ 559 static int 560 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 561 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) 562 { 563 int err; 564 u32 act; 565 566 act = bpf_prog_run_xdp(xdp_prog, xdp); 567 switch (act) { 568 case XDP_PASS: 569 return ICE_XDP_PASS; 570 case XDP_TX: 571 if (static_branch_unlikely(&ice_xdp_locking_key)) 572 spin_lock(&xdp_ring->tx_lock); 573 err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring); 574 if (static_branch_unlikely(&ice_xdp_locking_key)) 575 spin_unlock(&xdp_ring->tx_lock); 576 if (err == ICE_XDP_CONSUMED) 577 goto out_failure; 578 return err; 579 case XDP_REDIRECT: 580 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 581 if (err) 582 goto out_failure; 583 return ICE_XDP_REDIR; 584 default: 585 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); 586 fallthrough; 587 case XDP_ABORTED: 588 out_failure: 589 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 590 fallthrough; 591 case XDP_DROP: 592 return ICE_XDP_CONSUMED; 593 } 594 } 595 596 /** 597 * ice_xdp_xmit - submit packets to XDP ring for transmission 598 * @dev: netdev 599 * @n: number of XDP frames to be transmitted 600 * @frames: XDP frames to be transmitted 601 * @flags: transmit flags 602 * 603 * Returns number of frames successfully sent. Failed frames 604 * will be free'ed by XDP core. 605 * For error cases, a negative errno code is returned and no-frames 606 * are transmitted (caller must handle freeing frames). 607 */ 608 int 609 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 610 u32 flags) 611 { 612 struct ice_netdev_priv *np = netdev_priv(dev); 613 unsigned int queue_index = smp_processor_id(); 614 struct ice_vsi *vsi = np->vsi; 615 struct ice_tx_ring *xdp_ring; 616 int nxmit = 0, i; 617 618 if (test_bit(ICE_VSI_DOWN, vsi->state)) 619 return -ENETDOWN; 620 621 if (!ice_is_xdp_ena_vsi(vsi)) 622 return -ENXIO; 623 624 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 625 return -EINVAL; 626 627 if (static_branch_unlikely(&ice_xdp_locking_key)) { 628 queue_index %= vsi->num_xdp_txq; 629 xdp_ring = vsi->xdp_rings[queue_index]; 630 spin_lock(&xdp_ring->tx_lock); 631 } else { 632 /* Generally, should not happen */ 633 if (unlikely(queue_index >= vsi->num_xdp_txq)) 634 return -ENXIO; 635 xdp_ring = vsi->xdp_rings[queue_index]; 636 } 637 638 for (i = 0; i < n; i++) { 639 struct xdp_frame *xdpf = frames[i]; 640 int err; 641 642 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 643 if (err != ICE_XDP_TX) 644 break; 645 nxmit++; 646 } 647 648 if (unlikely(flags & XDP_XMIT_FLUSH)) 649 ice_xdp_ring_update_tail(xdp_ring); 650 651 if (static_branch_unlikely(&ice_xdp_locking_key)) 652 spin_unlock(&xdp_ring->tx_lock); 653 654 return nxmit; 655 } 656 657 /** 658 * ice_alloc_mapped_page - recycle or make a new page 659 * @rx_ring: ring to use 660 * @bi: rx_buf struct to modify 661 * 662 * Returns true if the page was successfully allocated or 663 * reused. 664 */ 665 static bool 666 ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) 667 { 668 struct page *page = bi->page; 669 dma_addr_t dma; 670 671 /* since we are recycling buffers we should seldom need to alloc */ 672 if (likely(page)) 673 return true; 674 675 /* alloc new page for storage */ 676 page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 677 if (unlikely(!page)) { 678 rx_ring->ring_stats->rx_stats.alloc_page_failed++; 679 return false; 680 } 681 682 /* map page for use */ 683 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 684 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 685 686 /* if mapping failed free memory back to system since 687 * there isn't much point in holding memory we can't use 688 */ 689 if (dma_mapping_error(rx_ring->dev, dma)) { 690 __free_pages(page, ice_rx_pg_order(rx_ring)); 691 rx_ring->ring_stats->rx_stats.alloc_page_failed++; 692 return false; 693 } 694 695 bi->dma = dma; 696 bi->page = page; 697 bi->page_offset = rx_ring->rx_offset; 698 page_ref_add(page, USHRT_MAX - 1); 699 bi->pagecnt_bias = USHRT_MAX; 700 701 return true; 702 } 703 704 /** 705 * ice_alloc_rx_bufs - Replace used receive buffers 706 * @rx_ring: ring to place buffers on 707 * @cleaned_count: number of buffers to replace 708 * 709 * Returns false if all allocations were successful, true if any fail. Returning 710 * true signals to the caller that we didn't replace cleaned_count buffers and 711 * there is more work to do. 712 * 713 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 714 * buffers. Then bump tail at most one time. Grouping like this lets us avoid 715 * multiple tail writes per call. 716 */ 717 bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count) 718 { 719 union ice_32b_rx_flex_desc *rx_desc; 720 u16 ntu = rx_ring->next_to_use; 721 struct ice_rx_buf *bi; 722 723 /* do nothing if no valid netdev defined */ 724 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || 725 !cleaned_count) 726 return false; 727 728 /* get the Rx descriptor and buffer based on next_to_use */ 729 rx_desc = ICE_RX_DESC(rx_ring, ntu); 730 bi = &rx_ring->rx_buf[ntu]; 731 732 do { 733 /* if we fail here, we have work remaining */ 734 if (!ice_alloc_mapped_page(rx_ring, bi)) 735 break; 736 737 /* sync the buffer for use by the device */ 738 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 739 bi->page_offset, 740 rx_ring->rx_buf_len, 741 DMA_FROM_DEVICE); 742 743 /* Refresh the desc even if buffer_addrs didn't change 744 * because each write-back erases this info. 745 */ 746 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 747 748 rx_desc++; 749 bi++; 750 ntu++; 751 if (unlikely(ntu == rx_ring->count)) { 752 rx_desc = ICE_RX_DESC(rx_ring, 0); 753 bi = rx_ring->rx_buf; 754 ntu = 0; 755 } 756 757 /* clear the status bits for the next_to_use descriptor */ 758 rx_desc->wb.status_error0 = 0; 759 760 cleaned_count--; 761 } while (cleaned_count); 762 763 if (rx_ring->next_to_use != ntu) 764 ice_release_rx_desc(rx_ring, ntu); 765 766 return !!cleaned_count; 767 } 768 769 /** 770 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 771 * @rx_buf: Rx buffer to adjust 772 * @size: Size of adjustment 773 * 774 * Update the offset within page so that Rx buf will be ready to be reused. 775 * For systems with PAGE_SIZE < 8192 this function will flip the page offset 776 * so the second half of page assigned to Rx buffer will be used, otherwise 777 * the offset is moved by "size" bytes 778 */ 779 static void 780 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 781 { 782 #if (PAGE_SIZE < 8192) 783 /* flip page offset to other buffer */ 784 rx_buf->page_offset ^= size; 785 #else 786 /* move offset up to the next cache line */ 787 rx_buf->page_offset += size; 788 #endif 789 } 790 791 /** 792 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 793 * @rx_buf: buffer containing the page 794 * 795 * If page is reusable, we have a green light for calling ice_reuse_rx_page, 796 * which will assign the current buffer to the buffer that next_to_alloc is 797 * pointing to; otherwise, the DMA mapping needs to be destroyed and 798 * page freed 799 */ 800 static bool 801 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) 802 { 803 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 804 struct page *page = rx_buf->page; 805 806 /* avoid re-using remote and pfmemalloc pages */ 807 if (!dev_page_is_reusable(page)) 808 return false; 809 810 #if (PAGE_SIZE < 8192) 811 /* if we are only owner of page we can reuse it */ 812 if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) 813 return false; 814 #else 815 #define ICE_LAST_OFFSET \ 816 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 817 if (rx_buf->page_offset > ICE_LAST_OFFSET) 818 return false; 819 #endif /* PAGE_SIZE < 8192) */ 820 821 /* If we have drained the page fragment pool we need to update 822 * the pagecnt_bias and page count so that we fully restock the 823 * number of references the driver holds. 824 */ 825 if (unlikely(pagecnt_bias == 1)) { 826 page_ref_add(page, USHRT_MAX - 1); 827 rx_buf->pagecnt_bias = USHRT_MAX; 828 } 829 830 return true; 831 } 832 833 /** 834 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 835 * @rx_ring: Rx descriptor ring to transact packets on 836 * @rx_buf: buffer containing page to add 837 * @skb: sk_buff to place the data into 838 * @size: packet length from rx_desc 839 * 840 * This function will add the data contained in rx_buf->page to the skb. 841 * It will just attach the page as a frag to the skb. 842 * The function will then update the page offset. 843 */ 844 static void 845 ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 846 struct sk_buff *skb, unsigned int size) 847 { 848 #if (PAGE_SIZE >= 8192) 849 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset); 850 #else 851 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 852 #endif 853 854 if (!size) 855 return; 856 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 857 rx_buf->page_offset, size, truesize); 858 859 /* page is being used so we must update the page offset */ 860 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 861 } 862 863 /** 864 * ice_reuse_rx_page - page flip buffer and store it back on the ring 865 * @rx_ring: Rx descriptor ring to store buffers on 866 * @old_buf: donor buffer to have page reused 867 * 868 * Synchronizes page for reuse by the adapter 869 */ 870 static void 871 ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) 872 { 873 u16 nta = rx_ring->next_to_alloc; 874 struct ice_rx_buf *new_buf; 875 876 new_buf = &rx_ring->rx_buf[nta]; 877 878 /* update, and store next to alloc */ 879 nta++; 880 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 881 882 /* Transfer page from old buffer to new buffer. 883 * Move each member individually to avoid possible store 884 * forwarding stalls and unnecessary copy of skb. 885 */ 886 new_buf->dma = old_buf->dma; 887 new_buf->page = old_buf->page; 888 new_buf->page_offset = old_buf->page_offset; 889 new_buf->pagecnt_bias = old_buf->pagecnt_bias; 890 } 891 892 /** 893 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 894 * @rx_ring: Rx descriptor ring to transact packets on 895 * @size: size of buffer to add to skb 896 * 897 * This function will pull an Rx buffer from the ring and synchronize it 898 * for use by the CPU. 899 */ 900 static struct ice_rx_buf * 901 ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, 902 const unsigned int ntc) 903 { 904 struct ice_rx_buf *rx_buf; 905 906 rx_buf = &rx_ring->rx_buf[ntc]; 907 rx_buf->pgcnt = 908 #if (PAGE_SIZE < 8192) 909 page_count(rx_buf->page); 910 #else 911 0; 912 #endif 913 prefetchw(rx_buf->page); 914 915 if (!size) 916 return rx_buf; 917 /* we are reusing so sync this buffer for CPU use */ 918 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 919 rx_buf->page_offset, size, 920 DMA_FROM_DEVICE); 921 922 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 923 rx_buf->pagecnt_bias--; 924 925 return rx_buf; 926 } 927 928 /** 929 * ice_build_skb - Build skb around an existing buffer 930 * @rx_ring: Rx descriptor ring to transact packets on 931 * @rx_buf: Rx buffer to pull data from 932 * @xdp: xdp_buff pointing to the data 933 * 934 * This function builds an skb around an existing Rx buffer, taking care 935 * to set up the skb correctly and avoid any memcpy overhead. 936 */ 937 static struct sk_buff * 938 ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 939 struct xdp_buff *xdp) 940 { 941 u8 metasize = xdp->data - xdp->data_meta; 942 #if (PAGE_SIZE < 8192) 943 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 944 #else 945 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 946 SKB_DATA_ALIGN(xdp->data_end - 947 xdp->data_hard_start); 948 #endif 949 struct sk_buff *skb; 950 951 /* Prefetch first cache line of first page. If xdp->data_meta 952 * is unused, this points exactly as xdp->data, otherwise we 953 * likely have a consumer accessing first few bytes of meta 954 * data, and then actual data. 955 */ 956 net_prefetch(xdp->data_meta); 957 /* build an skb around the page buffer */ 958 skb = napi_build_skb(xdp->data_hard_start, truesize); 959 if (unlikely(!skb)) 960 return NULL; 961 962 /* must to record Rx queue, otherwise OS features such as 963 * symmetric queue won't work 964 */ 965 skb_record_rx_queue(skb, rx_ring->q_index); 966 967 /* update pointers within the skb to store the data */ 968 skb_reserve(skb, xdp->data - xdp->data_hard_start); 969 __skb_put(skb, xdp->data_end - xdp->data); 970 if (metasize) 971 skb_metadata_set(skb, metasize); 972 973 /* buffer is used by skb, update page_offset */ 974 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 975 976 return skb; 977 } 978 979 /** 980 * ice_construct_skb - Allocate skb and populate it 981 * @rx_ring: Rx descriptor ring to transact packets on 982 * @rx_buf: Rx buffer to pull data from 983 * @xdp: xdp_buff pointing to the data 984 * 985 * This function allocates an skb. It then populates it with the page 986 * data from the current receive descriptor, taking care to set up the 987 * skb correctly. 988 */ 989 static struct sk_buff * 990 ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 991 struct xdp_buff *xdp) 992 { 993 unsigned int size = xdp->data_end - xdp->data; 994 unsigned int headlen; 995 struct sk_buff *skb; 996 997 /* prefetch first cache line of first page */ 998 net_prefetch(xdp->data); 999 1000 /* allocate a skb to store the frags */ 1001 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 1002 GFP_ATOMIC | __GFP_NOWARN); 1003 if (unlikely(!skb)) 1004 return NULL; 1005 1006 skb_record_rx_queue(skb, rx_ring->q_index); 1007 /* Determine available headroom for copy */ 1008 headlen = size; 1009 if (headlen > ICE_RX_HDR_SIZE) 1010 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 1011 1012 /* align pull length to size of long to optimize memcpy performance */ 1013 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 1014 sizeof(long))); 1015 1016 /* if we exhaust the linear part then add what is left as a frag */ 1017 size -= headlen; 1018 if (size) { 1019 #if (PAGE_SIZE >= 8192) 1020 unsigned int truesize = SKB_DATA_ALIGN(size); 1021 #else 1022 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 1023 #endif 1024 skb_add_rx_frag(skb, 0, rx_buf->page, 1025 rx_buf->page_offset + headlen, size, truesize); 1026 /* buffer is used by skb, update page_offset */ 1027 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 1028 } else { 1029 /* buffer is unused, reset bias back to rx_buf; data was copied 1030 * onto skb's linear part so there's no need for adjusting 1031 * page offset and we can reuse this buffer as-is 1032 */ 1033 rx_buf->pagecnt_bias++; 1034 } 1035 1036 return skb; 1037 } 1038 1039 /** 1040 * ice_put_rx_buf - Clean up used buffer and either recycle or free 1041 * @rx_ring: Rx descriptor ring to transact packets on 1042 * @rx_buf: Rx buffer to pull data from 1043 * 1044 * This function will clean up the contents of the rx_buf. It will either 1045 * recycle the buffer or unmap it and free the associated resources. 1046 */ 1047 static void 1048 ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) 1049 { 1050 if (!rx_buf) 1051 return; 1052 1053 if (ice_can_reuse_rx_page(rx_buf)) { 1054 /* hand second half of page back to the ring */ 1055 ice_reuse_rx_page(rx_ring, rx_buf); 1056 } else { 1057 /* we are not reusing the buffer so unmap it */ 1058 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 1059 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 1060 ICE_RX_DMA_ATTR); 1061 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 1062 } 1063 1064 /* clear contents of buffer_info */ 1065 rx_buf->page = NULL; 1066 } 1067 1068 /** 1069 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 1070 * @rx_ring: Rx descriptor ring to transact packets on 1071 * @budget: Total limit on number of packets to process 1072 * 1073 * This function provides a "bounce buffer" approach to Rx interrupt 1074 * processing. The advantage to this is that on systems that have 1075 * expensive overhead for IOMMU access this provides a means of avoiding 1076 * it by maintaining the mapping of the page to the system. 1077 * 1078 * Returns amount of work completed 1079 */ 1080 int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) 1081 { 1082 unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 1083 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1084 unsigned int offset = rx_ring->rx_offset; 1085 struct xdp_buff *xdp = &rx_ring->xdp; 1086 struct ice_tx_ring *xdp_ring = NULL; 1087 unsigned int xdp_res, xdp_xmit = 0; 1088 struct sk_buff *skb = rx_ring->skb; 1089 struct bpf_prog *xdp_prog = NULL; 1090 u32 ntc = rx_ring->next_to_clean; 1091 u32 cnt = rx_ring->count; 1092 bool failure; 1093 1094 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1095 #if (PAGE_SIZE < 8192) 1096 xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0); 1097 #endif 1098 1099 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1100 if (xdp_prog) 1101 xdp_ring = rx_ring->xdp_ring; 1102 1103 /* start the loop to process Rx packets bounded by 'budget' */ 1104 while (likely(total_rx_pkts < (unsigned int)budget)) { 1105 union ice_32b_rx_flex_desc *rx_desc; 1106 struct ice_rx_buf *rx_buf; 1107 unsigned char *hard_start; 1108 unsigned int size; 1109 u16 stat_err_bits; 1110 u16 vlan_tag = 0; 1111 u16 rx_ptype; 1112 1113 /* get the Rx desc from Rx ring based on 'next_to_clean' */ 1114 rx_desc = ICE_RX_DESC(rx_ring, ntc); 1115 1116 /* status_error_len will always be zero for unused descriptors 1117 * because it's cleared in cleanup, and overlaps with hdr_addr 1118 * which is always zero because packet split isn't used, if the 1119 * hardware wrote DD then it will be non-zero 1120 */ 1121 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 1122 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) 1123 break; 1124 1125 /* This memory barrier is needed to keep us from reading 1126 * any other fields out of the rx_desc until we know the 1127 * DD bit is set. 1128 */ 1129 dma_rmb(); 1130 1131 ice_trace(clean_rx_irq, rx_ring, rx_desc); 1132 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 1133 struct ice_vsi *ctrl_vsi = rx_ring->vsi; 1134 1135 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && 1136 ctrl_vsi->vf) 1137 ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); 1138 if (++ntc == cnt) 1139 ntc = 0; 1140 ice_put_rx_buf(rx_ring, NULL); 1141 cleaned_count++; 1142 continue; 1143 } 1144 1145 size = le16_to_cpu(rx_desc->wb.pkt_len) & 1146 ICE_RX_FLX_DESC_PKT_LEN_M; 1147 1148 /* retrieve a buffer from the ring */ 1149 rx_buf = ice_get_rx_buf(rx_ring, size, ntc); 1150 1151 if (!size) { 1152 xdp->data = NULL; 1153 xdp->data_end = NULL; 1154 xdp->data_hard_start = NULL; 1155 xdp->data_meta = NULL; 1156 goto construct_skb; 1157 } 1158 1159 hard_start = page_address(rx_buf->page) + rx_buf->page_offset - 1160 offset; 1161 xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); 1162 #if (PAGE_SIZE > 4096) 1163 /* At larger PAGE_SIZE, frame_sz depend on len size */ 1164 xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size); 1165 #endif 1166 1167 if (!xdp_prog) 1168 goto construct_skb; 1169 1170 xdp_res = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring); 1171 if (!xdp_res) 1172 goto construct_skb; 1173 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1174 xdp_xmit |= xdp_res; 1175 ice_rx_buf_adjust_pg_offset(rx_buf, xdp->frame_sz); 1176 } else { 1177 rx_buf->pagecnt_bias++; 1178 } 1179 total_rx_bytes += size; 1180 total_rx_pkts++; 1181 1182 cleaned_count++; 1183 if (++ntc == cnt) 1184 ntc = 0; 1185 ice_put_rx_buf(rx_ring, rx_buf); 1186 continue; 1187 construct_skb: 1188 if (skb) { 1189 ice_add_rx_frag(rx_ring, rx_buf, skb, size); 1190 } else if (likely(xdp->data)) { 1191 if (ice_ring_uses_build_skb(rx_ring)) 1192 skb = ice_build_skb(rx_ring, rx_buf, xdp); 1193 else 1194 skb = ice_construct_skb(rx_ring, rx_buf, xdp); 1195 } 1196 /* exit if we failed to retrieve a buffer */ 1197 if (!skb) { 1198 rx_ring->ring_stats->rx_stats.alloc_buf_failed++; 1199 if (rx_buf) 1200 rx_buf->pagecnt_bias++; 1201 break; 1202 } 1203 1204 if (++ntc == cnt) 1205 ntc = 0; 1206 ice_put_rx_buf(rx_ring, rx_buf); 1207 cleaned_count++; 1208 1209 /* skip if it is NOP desc */ 1210 if (ice_is_non_eop(rx_ring, rx_desc)) 1211 continue; 1212 1213 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 1214 if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, 1215 stat_err_bits))) { 1216 dev_kfree_skb_any(skb); 1217 continue; 1218 } 1219 1220 vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc); 1221 1222 /* pad the skb if needed, to make a valid ethernet frame */ 1223 if (eth_skb_pad(skb)) { 1224 skb = NULL; 1225 continue; 1226 } 1227 1228 /* probably a little skewed due to removing CRC */ 1229 total_rx_bytes += skb->len; 1230 1231 /* populate checksum, VLAN, and protocol */ 1232 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 1233 ICE_RX_FLEX_DESC_PTYPE_M; 1234 1235 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1236 1237 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); 1238 /* send completed skb up the stack */ 1239 ice_receive_skb(rx_ring, skb, vlan_tag); 1240 skb = NULL; 1241 1242 /* update budget accounting */ 1243 total_rx_pkts++; 1244 } 1245 1246 rx_ring->next_to_clean = ntc; 1247 /* return up to cleaned_count buffers to hardware */ 1248 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1249 1250 if (xdp_prog) 1251 ice_finalize_xdp_rx(xdp_ring, xdp_xmit); 1252 rx_ring->skb = skb; 1253 1254 if (rx_ring->ring_stats) 1255 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, 1256 total_rx_bytes); 1257 1258 /* guarantee a trip back through this routine if there was a failure */ 1259 return failure ? budget : (int)total_rx_pkts; 1260 } 1261 1262 static void __ice_update_sample(struct ice_q_vector *q_vector, 1263 struct ice_ring_container *rc, 1264 struct dim_sample *sample, 1265 bool is_tx) 1266 { 1267 u64 packets = 0, bytes = 0; 1268 1269 if (is_tx) { 1270 struct ice_tx_ring *tx_ring; 1271 1272 ice_for_each_tx_ring(tx_ring, *rc) { 1273 struct ice_ring_stats *ring_stats; 1274 1275 ring_stats = tx_ring->ring_stats; 1276 if (!ring_stats) 1277 continue; 1278 packets += ring_stats->stats.pkts; 1279 bytes += ring_stats->stats.bytes; 1280 } 1281 } else { 1282 struct ice_rx_ring *rx_ring; 1283 1284 ice_for_each_rx_ring(rx_ring, *rc) { 1285 struct ice_ring_stats *ring_stats; 1286 1287 ring_stats = rx_ring->ring_stats; 1288 if (!ring_stats) 1289 continue; 1290 packets += ring_stats->stats.pkts; 1291 bytes += ring_stats->stats.bytes; 1292 } 1293 } 1294 1295 dim_update_sample(q_vector->total_events, packets, bytes, sample); 1296 sample->comp_ctr = 0; 1297 1298 /* if dim settings get stale, like when not updated for 1 1299 * second or longer, force it to start again. This addresses the 1300 * frequent case of an idle queue being switched to by the 1301 * scheduler. The 1,000 here means 1,000 milliseconds. 1302 */ 1303 if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) 1304 rc->dim.state = DIM_START_MEASURE; 1305 } 1306 1307 /** 1308 * ice_net_dim - Update net DIM algorithm 1309 * @q_vector: the vector associated with the interrupt 1310 * 1311 * Create a DIM sample and notify net_dim() so that it can possibly decide 1312 * a new ITR value based on incoming packets, bytes, and interrupts. 1313 * 1314 * This function is a no-op if the ring is not configured to dynamic ITR. 1315 */ 1316 static void ice_net_dim(struct ice_q_vector *q_vector) 1317 { 1318 struct ice_ring_container *tx = &q_vector->tx; 1319 struct ice_ring_container *rx = &q_vector->rx; 1320 1321 if (ITR_IS_DYNAMIC(tx)) { 1322 struct dim_sample dim_sample; 1323 1324 __ice_update_sample(q_vector, tx, &dim_sample, true); 1325 net_dim(&tx->dim, dim_sample); 1326 } 1327 1328 if (ITR_IS_DYNAMIC(rx)) { 1329 struct dim_sample dim_sample; 1330 1331 __ice_update_sample(q_vector, rx, &dim_sample, false); 1332 net_dim(&rx->dim, dim_sample); 1333 } 1334 } 1335 1336 /** 1337 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 1338 * @itr_idx: interrupt throttling index 1339 * @itr: interrupt throttling value in usecs 1340 */ 1341 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 1342 { 1343 /* The ITR value is reported in microseconds, and the register value is 1344 * recorded in 2 microsecond units. For this reason we only need to 1345 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 1346 * granularity as a shift instead of division. The mask makes sure the 1347 * ITR value is never odd so we don't accidentally write into the field 1348 * prior to the ITR field. 1349 */ 1350 itr &= ICE_ITR_MASK; 1351 1352 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 1353 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 1354 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 1355 } 1356 1357 /** 1358 * ice_enable_interrupt - re-enable MSI-X interrupt 1359 * @q_vector: the vector associated with the interrupt to enable 1360 * 1361 * If the VSI is down, the interrupt will not be re-enabled. Also, 1362 * when enabling the interrupt always reset the wb_on_itr to false 1363 * and trigger a software interrupt to clean out internal state. 1364 */ 1365 static void ice_enable_interrupt(struct ice_q_vector *q_vector) 1366 { 1367 struct ice_vsi *vsi = q_vector->vsi; 1368 bool wb_en = q_vector->wb_on_itr; 1369 u32 itr_val; 1370 1371 if (test_bit(ICE_DOWN, vsi->state)) 1372 return; 1373 1374 /* trigger an ITR delayed software interrupt when exiting busy poll, to 1375 * make sure to catch any pending cleanups that might have been missed 1376 * due to interrupt state transition. If busy poll or poll isn't 1377 * enabled, then don't update ITR, and just enable the interrupt. 1378 */ 1379 if (!wb_en) { 1380 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 1381 } else { 1382 q_vector->wb_on_itr = false; 1383 1384 /* do two things here with a single write. Set up the third ITR 1385 * index to be used for software interrupt moderation, and then 1386 * trigger a software interrupt with a rate limit of 20K on 1387 * software interrupts, this will help avoid high interrupt 1388 * loads due to frequently polling and exiting polling. 1389 */ 1390 itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K); 1391 itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M | 1392 ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S | 1393 GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; 1394 } 1395 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 1396 } 1397 1398 /** 1399 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 1400 * @q_vector: q_vector to set WB_ON_ITR on 1401 * 1402 * We need to tell hardware to write-back completed descriptors even when 1403 * interrupts are disabled. Descriptors will be written back on cache line 1404 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 1405 * descriptors may not be written back if they don't fill a cache line until 1406 * the next interrupt. 1407 * 1408 * This sets the write-back frequency to whatever was set previously for the 1409 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we 1410 * aren't meddling with the INTENA_M bit. 1411 */ 1412 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 1413 { 1414 struct ice_vsi *vsi = q_vector->vsi; 1415 1416 /* already in wb_on_itr mode no need to change it */ 1417 if (q_vector->wb_on_itr) 1418 return; 1419 1420 /* use previously set ITR values for all of the ITR indices by 1421 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and 1422 * be static in non-adaptive mode (user configured) 1423 */ 1424 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 1425 ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) & 1426 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | 1427 GLINT_DYN_CTL_WB_ON_ITR_M); 1428 1429 q_vector->wb_on_itr = true; 1430 } 1431 1432 /** 1433 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 1434 * @napi: napi struct with our devices info in it 1435 * @budget: amount of work driver is allowed to do this pass, in packets 1436 * 1437 * This function will clean all queues associated with a q_vector. 1438 * 1439 * Returns the amount of work done 1440 */ 1441 int ice_napi_poll(struct napi_struct *napi, int budget) 1442 { 1443 struct ice_q_vector *q_vector = 1444 container_of(napi, struct ice_q_vector, napi); 1445 struct ice_tx_ring *tx_ring; 1446 struct ice_rx_ring *rx_ring; 1447 bool clean_complete = true; 1448 int budget_per_ring; 1449 int work_done = 0; 1450 1451 /* Since the actual Tx work is minimal, we can give the Tx a larger 1452 * budget and be more aggressive about cleaning up the Tx descriptors. 1453 */ 1454 ice_for_each_tx_ring(tx_ring, q_vector->tx) { 1455 bool wd; 1456 1457 if (tx_ring->xsk_pool) 1458 wd = ice_xmit_zc(tx_ring); 1459 else if (ice_ring_is_xdp(tx_ring)) 1460 wd = true; 1461 else 1462 wd = ice_clean_tx_irq(tx_ring, budget); 1463 1464 if (!wd) 1465 clean_complete = false; 1466 } 1467 1468 /* Handle case where we are called by netpoll with a budget of 0 */ 1469 if (unlikely(budget <= 0)) 1470 return budget; 1471 1472 /* normally we have 1 Rx ring per q_vector */ 1473 if (unlikely(q_vector->num_ring_rx > 1)) 1474 /* We attempt to distribute budget to each Rx queue fairly, but 1475 * don't allow the budget to go below 1 because that would exit 1476 * polling early. 1477 */ 1478 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 1479 else 1480 /* Max of 1 Rx ring in this q_vector so give it the budget */ 1481 budget_per_ring = budget; 1482 1483 ice_for_each_rx_ring(rx_ring, q_vector->rx) { 1484 int cleaned; 1485 1486 /* A dedicated path for zero-copy allows making a single 1487 * comparison in the irq context instead of many inside the 1488 * ice_clean_rx_irq function and makes the codebase cleaner. 1489 */ 1490 cleaned = rx_ring->xsk_pool ? 1491 ice_clean_rx_irq_zc(rx_ring, budget_per_ring) : 1492 ice_clean_rx_irq(rx_ring, budget_per_ring); 1493 work_done += cleaned; 1494 /* if we clean as many as budgeted, we must not be done */ 1495 if (cleaned >= budget_per_ring) 1496 clean_complete = false; 1497 } 1498 1499 /* If work not completed, return budget and polling will return */ 1500 if (!clean_complete) { 1501 /* Set the writeback on ITR so partial completions of 1502 * cache-lines will still continue even if we're polling. 1503 */ 1504 ice_set_wb_on_itr(q_vector); 1505 return budget; 1506 } 1507 1508 /* Exit the polling mode, but don't re-enable interrupts if stack might 1509 * poll us due to busy-polling 1510 */ 1511 if (napi_complete_done(napi, work_done)) { 1512 ice_net_dim(q_vector); 1513 ice_enable_interrupt(q_vector); 1514 } else { 1515 ice_set_wb_on_itr(q_vector); 1516 } 1517 1518 return min_t(int, work_done, budget - 1); 1519 } 1520 1521 /** 1522 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 1523 * @tx_ring: the ring to be checked 1524 * @size: the size buffer we want to assure is available 1525 * 1526 * Returns -EBUSY if a stop is needed, else 0 1527 */ 1528 static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 1529 { 1530 netif_tx_stop_queue(txring_txq(tx_ring)); 1531 /* Memory barrier before checking head and tail */ 1532 smp_mb(); 1533 1534 /* Check again in a case another CPU has just made room available. */ 1535 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 1536 return -EBUSY; 1537 1538 /* A reprieve! - use start_queue because it doesn't call schedule */ 1539 netif_tx_start_queue(txring_txq(tx_ring)); 1540 ++tx_ring->ring_stats->tx_stats.restart_q; 1541 return 0; 1542 } 1543 1544 /** 1545 * ice_maybe_stop_tx - 1st level check for Tx stop conditions 1546 * @tx_ring: the ring to be checked 1547 * @size: the size buffer we want to assure is available 1548 * 1549 * Returns 0 if stop is not needed 1550 */ 1551 static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 1552 { 1553 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 1554 return 0; 1555 1556 return __ice_maybe_stop_tx(tx_ring, size); 1557 } 1558 1559 /** 1560 * ice_tx_map - Build the Tx descriptor 1561 * @tx_ring: ring to send buffer on 1562 * @first: first buffer info buffer to use 1563 * @off: pointer to struct that holds offload parameters 1564 * 1565 * This function loops over the skb data pointed to by *first 1566 * and gets a physical address for each memory location and programs 1567 * it and the length into the transmit descriptor. 1568 */ 1569 static void 1570 ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, 1571 struct ice_tx_offload_params *off) 1572 { 1573 u64 td_offset, td_tag, td_cmd; 1574 u16 i = tx_ring->next_to_use; 1575 unsigned int data_len, size; 1576 struct ice_tx_desc *tx_desc; 1577 struct ice_tx_buf *tx_buf; 1578 struct sk_buff *skb; 1579 skb_frag_t *frag; 1580 dma_addr_t dma; 1581 bool kick; 1582 1583 td_tag = off->td_l2tag1; 1584 td_cmd = off->td_cmd; 1585 td_offset = off->td_offset; 1586 skb = first->skb; 1587 1588 data_len = skb->data_len; 1589 size = skb_headlen(skb); 1590 1591 tx_desc = ICE_TX_DESC(tx_ring, i); 1592 1593 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1594 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1595 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1596 ICE_TX_FLAGS_VLAN_S; 1597 } 1598 1599 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1600 1601 tx_buf = first; 1602 1603 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1604 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1605 1606 if (dma_mapping_error(tx_ring->dev, dma)) 1607 goto dma_error; 1608 1609 /* record length, and DMA address */ 1610 dma_unmap_len_set(tx_buf, len, size); 1611 dma_unmap_addr_set(tx_buf, dma, dma); 1612 1613 /* align size to end of page */ 1614 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 1615 tx_desc->buf_addr = cpu_to_le64(dma); 1616 1617 /* account for data chunks larger than the hardware 1618 * can handle 1619 */ 1620 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 1621 tx_desc->cmd_type_offset_bsz = 1622 ice_build_ctob(td_cmd, td_offset, max_data, 1623 td_tag); 1624 1625 tx_desc++; 1626 i++; 1627 1628 if (i == tx_ring->count) { 1629 tx_desc = ICE_TX_DESC(tx_ring, 0); 1630 i = 0; 1631 } 1632 1633 dma += max_data; 1634 size -= max_data; 1635 1636 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1637 tx_desc->buf_addr = cpu_to_le64(dma); 1638 } 1639 1640 if (likely(!data_len)) 1641 break; 1642 1643 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 1644 size, td_tag); 1645 1646 tx_desc++; 1647 i++; 1648 1649 if (i == tx_ring->count) { 1650 tx_desc = ICE_TX_DESC(tx_ring, 0); 1651 i = 0; 1652 } 1653 1654 size = skb_frag_size(frag); 1655 data_len -= size; 1656 1657 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1658 DMA_TO_DEVICE); 1659 1660 tx_buf = &tx_ring->tx_buf[i]; 1661 } 1662 1663 /* record SW timestamp if HW timestamp is not available */ 1664 skb_tx_timestamp(first->skb); 1665 1666 i++; 1667 if (i == tx_ring->count) 1668 i = 0; 1669 1670 /* write last descriptor with RS and EOP bits */ 1671 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 1672 tx_desc->cmd_type_offset_bsz = 1673 ice_build_ctob(td_cmd, td_offset, size, td_tag); 1674 1675 /* Force memory writes to complete before letting h/w know there 1676 * are new descriptors to fetch. 1677 * 1678 * We also use this memory barrier to make certain all of the 1679 * status bits have been updated before next_to_watch is written. 1680 */ 1681 wmb(); 1682 1683 /* set next_to_watch value indicating a packet is present */ 1684 first->next_to_watch = tx_desc; 1685 1686 tx_ring->next_to_use = i; 1687 1688 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 1689 1690 /* notify HW of packet */ 1691 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, 1692 netdev_xmit_more()); 1693 if (kick) 1694 /* notify HW of packet */ 1695 writel(i, tx_ring->tail); 1696 1697 return; 1698 1699 dma_error: 1700 /* clear DMA mappings for failed tx_buf map */ 1701 for (;;) { 1702 tx_buf = &tx_ring->tx_buf[i]; 1703 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 1704 if (tx_buf == first) 1705 break; 1706 if (i == 0) 1707 i = tx_ring->count; 1708 i--; 1709 } 1710 1711 tx_ring->next_to_use = i; 1712 } 1713 1714 /** 1715 * ice_tx_csum - Enable Tx checksum offloads 1716 * @first: pointer to the first descriptor 1717 * @off: pointer to struct that holds offload parameters 1718 * 1719 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1720 */ 1721 static 1722 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1723 { 1724 u32 l4_len = 0, l3_len = 0, l2_len = 0; 1725 struct sk_buff *skb = first->skb; 1726 union { 1727 struct iphdr *v4; 1728 struct ipv6hdr *v6; 1729 unsigned char *hdr; 1730 } ip; 1731 union { 1732 struct tcphdr *tcp; 1733 unsigned char *hdr; 1734 } l4; 1735 __be16 frag_off, protocol; 1736 unsigned char *exthdr; 1737 u32 offset, cmd = 0; 1738 u8 l4_proto = 0; 1739 1740 if (skb->ip_summed != CHECKSUM_PARTIAL) 1741 return 0; 1742 1743 protocol = vlan_get_protocol(skb); 1744 1745 if (eth_p_mpls(protocol)) { 1746 ip.hdr = skb_inner_network_header(skb); 1747 l4.hdr = skb_checksum_start(skb); 1748 } else { 1749 ip.hdr = skb_network_header(skb); 1750 l4.hdr = skb_transport_header(skb); 1751 } 1752 1753 /* compute outer L2 header size */ 1754 l2_len = ip.hdr - skb->data; 1755 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1756 1757 /* set the tx_flags to indicate the IP protocol type. this is 1758 * required so that checksum header computation below is accurate. 1759 */ 1760 if (ip.v4->version == 4) 1761 first->tx_flags |= ICE_TX_FLAGS_IPV4; 1762 else if (ip.v6->version == 6) 1763 first->tx_flags |= ICE_TX_FLAGS_IPV6; 1764 1765 if (skb->encapsulation) { 1766 bool gso_ena = false; 1767 u32 tunnel = 0; 1768 1769 /* define outer network header type */ 1770 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1771 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 1772 ICE_TX_CTX_EIPT_IPV4 : 1773 ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 1774 l4_proto = ip.v4->protocol; 1775 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1776 int ret; 1777 1778 tunnel |= ICE_TX_CTX_EIPT_IPV6; 1779 exthdr = ip.hdr + sizeof(*ip.v6); 1780 l4_proto = ip.v6->nexthdr; 1781 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 1782 &l4_proto, &frag_off); 1783 if (ret < 0) 1784 return -1; 1785 } 1786 1787 /* define outer transport */ 1788 switch (l4_proto) { 1789 case IPPROTO_UDP: 1790 tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 1791 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1792 break; 1793 case IPPROTO_GRE: 1794 tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 1795 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1796 break; 1797 case IPPROTO_IPIP: 1798 case IPPROTO_IPV6: 1799 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1800 l4.hdr = skb_inner_network_header(skb); 1801 break; 1802 default: 1803 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1804 return -1; 1805 1806 skb_checksum_help(skb); 1807 return 0; 1808 } 1809 1810 /* compute outer L3 header size */ 1811 tunnel |= ((l4.hdr - ip.hdr) / 4) << 1812 ICE_TXD_CTX_QW0_EIPLEN_S; 1813 1814 /* switch IP header pointer from outer to inner header */ 1815 ip.hdr = skb_inner_network_header(skb); 1816 1817 /* compute tunnel header size */ 1818 tunnel |= ((ip.hdr - l4.hdr) / 2) << 1819 ICE_TXD_CTX_QW0_NATLEN_S; 1820 1821 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 1822 /* indicate if we need to offload outer UDP header */ 1823 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 1824 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1825 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 1826 1827 /* record tunnel offload values */ 1828 off->cd_tunnel_params |= tunnel; 1829 1830 /* set DTYP=1 to indicate that it's an Tx context descriptor 1831 * in IPsec tunnel mode with Tx offloads in Quad word 1 1832 */ 1833 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 1834 1835 /* switch L4 header pointer from outer to inner */ 1836 l4.hdr = skb_inner_transport_header(skb); 1837 l4_proto = 0; 1838 1839 /* reset type as we transition from outer to inner headers */ 1840 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 1841 if (ip.v4->version == 4) 1842 first->tx_flags |= ICE_TX_FLAGS_IPV4; 1843 if (ip.v6->version == 6) 1844 first->tx_flags |= ICE_TX_FLAGS_IPV6; 1845 } 1846 1847 /* Enable IP checksum offloads */ 1848 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1849 l4_proto = ip.v4->protocol; 1850 /* the stack computes the IP header already, the only time we 1851 * need the hardware to recompute it is in the case of TSO. 1852 */ 1853 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1854 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1855 else 1856 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1857 1858 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1859 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1860 exthdr = ip.hdr + sizeof(*ip.v6); 1861 l4_proto = ip.v6->nexthdr; 1862 if (l4.hdr != exthdr) 1863 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1864 &frag_off); 1865 } else { 1866 return -1; 1867 } 1868 1869 /* compute inner L3 header size */ 1870 l3_len = l4.hdr - ip.hdr; 1871 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1872 1873 /* Enable L4 checksum offloads */ 1874 switch (l4_proto) { 1875 case IPPROTO_TCP: 1876 /* enable checksum offloads */ 1877 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1878 l4_len = l4.tcp->doff; 1879 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1880 break; 1881 case IPPROTO_UDP: 1882 /* enable UDP checksum offload */ 1883 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1884 l4_len = (sizeof(struct udphdr) >> 2); 1885 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1886 break; 1887 case IPPROTO_SCTP: 1888 /* enable SCTP checksum offload */ 1889 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1890 l4_len = sizeof(struct sctphdr) >> 2; 1891 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1892 break; 1893 1894 default: 1895 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1896 return -1; 1897 skb_checksum_help(skb); 1898 return 0; 1899 } 1900 1901 off->td_cmd |= cmd; 1902 off->td_offset |= offset; 1903 return 1; 1904 } 1905 1906 /** 1907 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1908 * @tx_ring: ring to send buffer on 1909 * @first: pointer to struct ice_tx_buf 1910 * 1911 * Checks the skb and set up correspondingly several generic transmit flags 1912 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1913 */ 1914 static void 1915 ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) 1916 { 1917 struct sk_buff *skb = first->skb; 1918 1919 /* nothing left to do, software offloaded VLAN */ 1920 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) 1921 return; 1922 1923 /* the VLAN ethertype/tpid is determined by VSI configuration and netdev 1924 * feature flags, which the driver only allows either 802.1Q or 802.1ad 1925 * VLAN offloads exclusively so we only care about the VLAN ID here 1926 */ 1927 if (skb_vlan_tag_present(skb)) { 1928 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1929 if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2) 1930 first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN; 1931 else 1932 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1933 } 1934 1935 ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 1936 } 1937 1938 /** 1939 * ice_tso - computes mss and TSO length to prepare for TSO 1940 * @first: pointer to struct ice_tx_buf 1941 * @off: pointer to struct that holds offload parameters 1942 * 1943 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1944 */ 1945 static 1946 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1947 { 1948 struct sk_buff *skb = first->skb; 1949 union { 1950 struct iphdr *v4; 1951 struct ipv6hdr *v6; 1952 unsigned char *hdr; 1953 } ip; 1954 union { 1955 struct tcphdr *tcp; 1956 struct udphdr *udp; 1957 unsigned char *hdr; 1958 } l4; 1959 u64 cd_mss, cd_tso_len; 1960 __be16 protocol; 1961 u32 paylen; 1962 u8 l4_start; 1963 int err; 1964 1965 if (skb->ip_summed != CHECKSUM_PARTIAL) 1966 return 0; 1967 1968 if (!skb_is_gso(skb)) 1969 return 0; 1970 1971 err = skb_cow_head(skb, 0); 1972 if (err < 0) 1973 return err; 1974 1975 protocol = vlan_get_protocol(skb); 1976 1977 if (eth_p_mpls(protocol)) 1978 ip.hdr = skb_inner_network_header(skb); 1979 else 1980 ip.hdr = skb_network_header(skb); 1981 l4.hdr = skb_checksum_start(skb); 1982 1983 /* initialize outer IP header fields */ 1984 if (ip.v4->version == 4) { 1985 ip.v4->tot_len = 0; 1986 ip.v4->check = 0; 1987 } else { 1988 ip.v6->payload_len = 0; 1989 } 1990 1991 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 1992 SKB_GSO_GRE_CSUM | 1993 SKB_GSO_IPXIP4 | 1994 SKB_GSO_IPXIP6 | 1995 SKB_GSO_UDP_TUNNEL | 1996 SKB_GSO_UDP_TUNNEL_CSUM)) { 1997 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 1998 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 1999 l4.udp->len = 0; 2000 2001 /* determine offset of outer transport header */ 2002 l4_start = (u8)(l4.hdr - skb->data); 2003 2004 /* remove payload length from outer checksum */ 2005 paylen = skb->len - l4_start; 2006 csum_replace_by_diff(&l4.udp->check, 2007 (__force __wsum)htonl(paylen)); 2008 } 2009 2010 /* reset pointers to inner headers */ 2011 ip.hdr = skb_inner_network_header(skb); 2012 l4.hdr = skb_inner_transport_header(skb); 2013 2014 /* initialize inner IP header fields */ 2015 if (ip.v4->version == 4) { 2016 ip.v4->tot_len = 0; 2017 ip.v4->check = 0; 2018 } else { 2019 ip.v6->payload_len = 0; 2020 } 2021 } 2022 2023 /* determine offset of transport header */ 2024 l4_start = (u8)(l4.hdr - skb->data); 2025 2026 /* remove payload length from checksum */ 2027 paylen = skb->len - l4_start; 2028 2029 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 2030 csum_replace_by_diff(&l4.udp->check, 2031 (__force __wsum)htonl(paylen)); 2032 /* compute length of UDP segmentation header */ 2033 off->header_len = (u8)sizeof(l4.udp) + l4_start; 2034 } else { 2035 csum_replace_by_diff(&l4.tcp->check, 2036 (__force __wsum)htonl(paylen)); 2037 /* compute length of TCP segmentation header */ 2038 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 2039 } 2040 2041 /* update gso_segs and bytecount */ 2042 first->gso_segs = skb_shinfo(skb)->gso_segs; 2043 first->bytecount += (first->gso_segs - 1) * off->header_len; 2044 2045 cd_tso_len = skb->len - off->header_len; 2046 cd_mss = skb_shinfo(skb)->gso_size; 2047 2048 /* record cdesc_qw1 with TSO parameters */ 2049 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2050 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 2051 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2052 (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2053 first->tx_flags |= ICE_TX_FLAGS_TSO; 2054 return 1; 2055 } 2056 2057 /** 2058 * ice_txd_use_count - estimate the number of descriptors needed for Tx 2059 * @size: transmit request size in bytes 2060 * 2061 * Due to hardware alignment restrictions (4K alignment), we need to 2062 * assume that we can have no more than 12K of data per descriptor, even 2063 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 2064 * Thus, we need to divide by 12K. But division is slow! Instead, 2065 * we decompose the operation into shifts and one relatively cheap 2066 * multiply operation. 2067 * 2068 * To divide by 12K, we first divide by 4K, then divide by 3: 2069 * To divide by 4K, shift right by 12 bits 2070 * To divide by 3, multiply by 85, then divide by 256 2071 * (Divide by 256 is done by shifting right by 8 bits) 2072 * Finally, we add one to round up. Because 256 isn't an exact multiple of 2073 * 3, we'll underestimate near each multiple of 12K. This is actually more 2074 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 2075 * segment. For our purposes this is accurate out to 1M which is orders of 2076 * magnitude greater than our largest possible GSO size. 2077 * 2078 * This would then be implemented as: 2079 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 2080 * 2081 * Since multiplication and division are commutative, we can reorder 2082 * operations into: 2083 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2084 */ 2085 static unsigned int ice_txd_use_count(unsigned int size) 2086 { 2087 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2088 } 2089 2090 /** 2091 * ice_xmit_desc_count - calculate number of Tx descriptors needed 2092 * @skb: send buffer 2093 * 2094 * Returns number of data descriptors needed for this skb. 2095 */ 2096 static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 2097 { 2098 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 2099 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2100 unsigned int count = 0, size = skb_headlen(skb); 2101 2102 for (;;) { 2103 count += ice_txd_use_count(size); 2104 2105 if (!nr_frags--) 2106 break; 2107 2108 size = skb_frag_size(frag++); 2109 } 2110 2111 return count; 2112 } 2113 2114 /** 2115 * __ice_chk_linearize - Check if there are more than 8 buffers per packet 2116 * @skb: send buffer 2117 * 2118 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 2119 * and so we need to figure out the cases where we need to linearize the skb. 2120 * 2121 * For TSO we need to count the TSO header and segment payload separately. 2122 * As such we need to check cases where we have 7 fragments or more as we 2123 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 2124 * the segment payload in the first descriptor, and another 7 for the 2125 * fragments. 2126 */ 2127 static bool __ice_chk_linearize(struct sk_buff *skb) 2128 { 2129 const skb_frag_t *frag, *stale; 2130 int nr_frags, sum; 2131 2132 /* no need to check if number of frags is less than 7 */ 2133 nr_frags = skb_shinfo(skb)->nr_frags; 2134 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 2135 return false; 2136 2137 /* We need to walk through the list and validate that each group 2138 * of 6 fragments totals at least gso_size. 2139 */ 2140 nr_frags -= ICE_MAX_BUF_TXD - 2; 2141 frag = &skb_shinfo(skb)->frags[0]; 2142 2143 /* Initialize size to the negative value of gso_size minus 1. We 2144 * use this as the worst case scenario in which the frag ahead 2145 * of us only provides one byte which is why we are limited to 6 2146 * descriptors for a single transmit as the header and previous 2147 * fragment are already consuming 2 descriptors. 2148 */ 2149 sum = 1 - skb_shinfo(skb)->gso_size; 2150 2151 /* Add size of frags 0 through 4 to create our initial sum */ 2152 sum += skb_frag_size(frag++); 2153 sum += skb_frag_size(frag++); 2154 sum += skb_frag_size(frag++); 2155 sum += skb_frag_size(frag++); 2156 sum += skb_frag_size(frag++); 2157 2158 /* Walk through fragments adding latest fragment, testing it, and 2159 * then removing stale fragments from the sum. 2160 */ 2161 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 2162 int stale_size = skb_frag_size(stale); 2163 2164 sum += skb_frag_size(frag++); 2165 2166 /* The stale fragment may present us with a smaller 2167 * descriptor than the actual fragment size. To account 2168 * for that we need to remove all the data on the front and 2169 * figure out what the remainder would be in the last 2170 * descriptor associated with the fragment. 2171 */ 2172 if (stale_size > ICE_MAX_DATA_PER_TXD) { 2173 int align_pad = -(skb_frag_off(stale)) & 2174 (ICE_MAX_READ_REQ_SIZE - 1); 2175 2176 sum -= align_pad; 2177 stale_size -= align_pad; 2178 2179 do { 2180 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; 2181 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; 2182 } while (stale_size > ICE_MAX_DATA_PER_TXD); 2183 } 2184 2185 /* if sum is negative we failed to make sufficient progress */ 2186 if (sum < 0) 2187 return true; 2188 2189 if (!nr_frags--) 2190 break; 2191 2192 sum -= stale_size; 2193 } 2194 2195 return false; 2196 } 2197 2198 /** 2199 * ice_chk_linearize - Check if there are more than 8 fragments per packet 2200 * @skb: send buffer 2201 * @count: number of buffers used 2202 * 2203 * Note: Our HW can't scatter-gather more than 8 fragments to build 2204 * a packet on the wire and so we need to figure out the cases where we 2205 * need to linearize the skb. 2206 */ 2207 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 2208 { 2209 /* Both TSO and single send will work if count is less than 8 */ 2210 if (likely(count < ICE_MAX_BUF_TXD)) 2211 return false; 2212 2213 if (skb_is_gso(skb)) 2214 return __ice_chk_linearize(skb); 2215 2216 /* we can support up to 8 data buffers for a single send */ 2217 return count != ICE_MAX_BUF_TXD; 2218 } 2219 2220 /** 2221 * ice_tstamp - set up context descriptor for hardware timestamp 2222 * @tx_ring: pointer to the Tx ring to send buffer on 2223 * @skb: pointer to the SKB we're sending 2224 * @first: Tx buffer 2225 * @off: Tx offload parameters 2226 */ 2227 static void 2228 ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, 2229 struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2230 { 2231 s8 idx; 2232 2233 /* only timestamp the outbound packet if the user has requested it */ 2234 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 2235 return; 2236 2237 if (!tx_ring->ptp_tx) 2238 return; 2239 2240 /* Tx timestamps cannot be sampled when doing TSO */ 2241 if (first->tx_flags & ICE_TX_FLAGS_TSO) 2242 return; 2243 2244 /* Grab an open timestamp slot */ 2245 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); 2246 if (idx < 0) { 2247 tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++; 2248 return; 2249 } 2250 2251 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2252 (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) | 2253 ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S)); 2254 first->tx_flags |= ICE_TX_FLAGS_TSYN; 2255 } 2256 2257 /** 2258 * ice_xmit_frame_ring - Sends buffer on Tx ring 2259 * @skb: send buffer 2260 * @tx_ring: ring to send buffer on 2261 * 2262 * Returns NETDEV_TX_OK if sent, else an error code 2263 */ 2264 static netdev_tx_t 2265 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) 2266 { 2267 struct ice_tx_offload_params offload = { 0 }; 2268 struct ice_vsi *vsi = tx_ring->vsi; 2269 struct ice_tx_buf *first; 2270 struct ethhdr *eth; 2271 unsigned int count; 2272 int tso, csum; 2273 2274 ice_trace(xmit_frame_ring, tx_ring, skb); 2275 2276 count = ice_xmit_desc_count(skb); 2277 if (ice_chk_linearize(skb, count)) { 2278 if (__skb_linearize(skb)) 2279 goto out_drop; 2280 count = ice_txd_use_count(skb->len); 2281 tx_ring->ring_stats->tx_stats.tx_linearize++; 2282 } 2283 2284 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 2285 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 2286 * + 4 desc gap to avoid the cache line where head is, 2287 * + 1 desc for context descriptor, 2288 * otherwise try next time 2289 */ 2290 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2291 ICE_DESCS_FOR_CTX_DESC)) { 2292 tx_ring->ring_stats->tx_stats.tx_busy++; 2293 return NETDEV_TX_BUSY; 2294 } 2295 2296 /* prefetch for bql data which is infrequently used */ 2297 netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring)); 2298 2299 offload.tx_ring = tx_ring; 2300 2301 /* record the location of the first descriptor for this packet */ 2302 first = &tx_ring->tx_buf[tx_ring->next_to_use]; 2303 first->skb = skb; 2304 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 2305 first->gso_segs = 1; 2306 first->tx_flags = 0; 2307 2308 /* prepare the VLAN tagging flags for Tx */ 2309 ice_tx_prepare_vlan_flags(tx_ring, first); 2310 if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { 2311 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2312 (ICE_TX_CTX_DESC_IL2TAG2 << 2313 ICE_TXD_CTX_QW1_CMD_S)); 2314 offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 2315 ICE_TX_FLAGS_VLAN_S; 2316 } 2317 2318 /* set up TSO offload */ 2319 tso = ice_tso(first, &offload); 2320 if (tso < 0) 2321 goto out_drop; 2322 2323 /* always set up Tx checksum offload */ 2324 csum = ice_tx_csum(first, &offload); 2325 if (csum < 0) 2326 goto out_drop; 2327 2328 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 2329 eth = (struct ethhdr *)skb_mac_header(skb); 2330 if (unlikely((skb->priority == TC_PRIO_CONTROL || 2331 eth->h_proto == htons(ETH_P_LLDP)) && 2332 vsi->type == ICE_VSI_PF && 2333 vsi->port_info->qos_cfg.is_sw_lldp)) 2334 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2335 ICE_TX_CTX_DESC_SWTCH_UPLINK << 2336 ICE_TXD_CTX_QW1_CMD_S); 2337 2338 ice_tstamp(tx_ring, skb, first, &offload); 2339 if (ice_is_switchdev_running(vsi->back)) 2340 ice_eswitch_set_target_vsi(skb, &offload); 2341 2342 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2343 struct ice_tx_ctx_desc *cdesc; 2344 u16 i = tx_ring->next_to_use; 2345 2346 /* grab the next descriptor */ 2347 cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2348 i++; 2349 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2350 2351 /* setup context descriptor */ 2352 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2353 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2354 cdesc->rsvd = cpu_to_le16(0); 2355 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2356 } 2357 2358 ice_tx_map(tx_ring, first, &offload); 2359 return NETDEV_TX_OK; 2360 2361 out_drop: 2362 ice_trace(xmit_frame_ring_drop, tx_ring, skb); 2363 dev_kfree_skb_any(skb); 2364 return NETDEV_TX_OK; 2365 } 2366 2367 /** 2368 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 2369 * @skb: send buffer 2370 * @netdev: network interface device structure 2371 * 2372 * Returns NETDEV_TX_OK if sent, else an error code 2373 */ 2374 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2375 { 2376 struct ice_netdev_priv *np = netdev_priv(netdev); 2377 struct ice_vsi *vsi = np->vsi; 2378 struct ice_tx_ring *tx_ring; 2379 2380 tx_ring = vsi->tx_rings[skb->queue_mapping]; 2381 2382 /* hardware can't handle really short frames, hardware padding works 2383 * beyond this point 2384 */ 2385 if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 2386 return NETDEV_TX_OK; 2387 2388 return ice_xmit_frame_ring(skb, tx_ring); 2389 } 2390 2391 /** 2392 * ice_get_dscp_up - return the UP/TC value for a SKB 2393 * @dcbcfg: DCB config that contains DSCP to UP/TC mapping 2394 * @skb: SKB to query for info to determine UP/TC 2395 * 2396 * This function is to only be called when the PF is in L3 DSCP PFC mode 2397 */ 2398 static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb) 2399 { 2400 u8 dscp = 0; 2401 2402 if (skb->protocol == htons(ETH_P_IP)) 2403 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 2404 else if (skb->protocol == htons(ETH_P_IPV6)) 2405 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 2406 2407 return dcbcfg->dscp_map[dscp]; 2408 } 2409 2410 u16 2411 ice_select_queue(struct net_device *netdev, struct sk_buff *skb, 2412 struct net_device *sb_dev) 2413 { 2414 struct ice_pf *pf = ice_netdev_to_pf(netdev); 2415 struct ice_dcbx_cfg *dcbcfg; 2416 2417 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 2418 if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) 2419 skb->priority = ice_get_dscp_up(dcbcfg, skb); 2420 2421 return netdev_pick_tx(netdev, skb, sb_dev); 2422 } 2423 2424 /** 2425 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 2426 * @tx_ring: tx_ring to clean 2427 */ 2428 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) 2429 { 2430 struct ice_vsi *vsi = tx_ring->vsi; 2431 s16 i = tx_ring->next_to_clean; 2432 int budget = ICE_DFLT_IRQ_WORK; 2433 struct ice_tx_desc *tx_desc; 2434 struct ice_tx_buf *tx_buf; 2435 2436 tx_buf = &tx_ring->tx_buf[i]; 2437 tx_desc = ICE_TX_DESC(tx_ring, i); 2438 i -= tx_ring->count; 2439 2440 do { 2441 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2442 2443 /* if next_to_watch is not set then there is no pending work */ 2444 if (!eop_desc) 2445 break; 2446 2447 /* prevent any other reads prior to eop_desc */ 2448 smp_rmb(); 2449 2450 /* if the descriptor isn't done, no work to do */ 2451 if (!(eop_desc->cmd_type_offset_bsz & 2452 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2453 break; 2454 2455 /* clear next_to_watch to prevent false hangs */ 2456 tx_buf->next_to_watch = NULL; 2457 tx_desc->buf_addr = 0; 2458 tx_desc->cmd_type_offset_bsz = 0; 2459 2460 /* move past filter desc */ 2461 tx_buf++; 2462 tx_desc++; 2463 i++; 2464 if (unlikely(!i)) { 2465 i -= tx_ring->count; 2466 tx_buf = tx_ring->tx_buf; 2467 tx_desc = ICE_TX_DESC(tx_ring, 0); 2468 } 2469 2470 /* unmap the data header */ 2471 if (dma_unmap_len(tx_buf, len)) 2472 dma_unmap_single(tx_ring->dev, 2473 dma_unmap_addr(tx_buf, dma), 2474 dma_unmap_len(tx_buf, len), 2475 DMA_TO_DEVICE); 2476 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 2477 devm_kfree(tx_ring->dev, tx_buf->raw_buf); 2478 2479 /* clear next_to_watch to prevent false hangs */ 2480 tx_buf->raw_buf = NULL; 2481 tx_buf->tx_flags = 0; 2482 tx_buf->next_to_watch = NULL; 2483 dma_unmap_len_set(tx_buf, len, 0); 2484 tx_desc->buf_addr = 0; 2485 tx_desc->cmd_type_offset_bsz = 0; 2486 2487 /* move past eop_desc for start of next FD desc */ 2488 tx_buf++; 2489 tx_desc++; 2490 i++; 2491 if (unlikely(!i)) { 2492 i -= tx_ring->count; 2493 tx_buf = tx_ring->tx_buf; 2494 tx_desc = ICE_TX_DESC(tx_ring, 0); 2495 } 2496 2497 budget--; 2498 } while (likely(budget)); 2499 2500 i += tx_ring->count; 2501 tx_ring->next_to_clean = i; 2502 2503 /* re-enable interrupt if needed */ 2504 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 2505 } 2506