1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* The driver transmit and receive code */ 5 6 #include <linux/mm.h> 7 #include <linux/netdevice.h> 8 #include <linux/prefetch.h> 9 #include <linux/bpf_trace.h> 10 #include <net/dsfield.h> 11 #include <net/mpls.h> 12 #include <net/xdp.h> 13 #include "ice_txrx_lib.h" 14 #include "ice_lib.h" 15 #include "ice.h" 16 #include "ice_trace.h" 17 #include "ice_dcb_lib.h" 18 #include "ice_xsk.h" 19 #include "ice_eswitch.h" 20 21 #define ICE_RX_HDR_SIZE 256 22 23 #define FDIR_DESC_RXDID 0x40 24 #define ICE_FDIR_CLEAN_DELAY 10 25 26 /** 27 * ice_prgm_fdir_fltr - Program a Flow Director filter 28 * @vsi: VSI to send dummy packet 29 * @fdir_desc: flow director descriptor 30 * @raw_packet: allocated buffer for flow director 31 */ 32 int 33 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 34 u8 *raw_packet) 35 { 36 struct ice_tx_buf *tx_buf, *first; 37 struct ice_fltr_desc *f_desc; 38 struct ice_tx_desc *tx_desc; 39 struct ice_tx_ring *tx_ring; 40 struct device *dev; 41 dma_addr_t dma; 42 u32 td_cmd; 43 u16 i; 44 45 /* VSI and Tx ring */ 46 if (!vsi) 47 return -ENOENT; 48 tx_ring = vsi->tx_rings[0]; 49 if (!tx_ring || !tx_ring->desc) 50 return -ENOENT; 51 dev = tx_ring->dev; 52 53 /* we are using two descriptors to add/del a filter and we can wait */ 54 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 55 if (!i) 56 return -EAGAIN; 57 msleep_interruptible(1); 58 } 59 60 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 61 DMA_TO_DEVICE); 62 63 if (dma_mapping_error(dev, dma)) 64 return -EINVAL; 65 66 /* grab the next descriptor */ 67 i = tx_ring->next_to_use; 68 first = &tx_ring->tx_buf[i]; 69 f_desc = ICE_TX_FDIRDESC(tx_ring, i); 70 memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 71 72 i++; 73 i = (i < tx_ring->count) ? i : 0; 74 tx_desc = ICE_TX_DESC(tx_ring, i); 75 tx_buf = &tx_ring->tx_buf[i]; 76 77 i++; 78 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 79 80 memset(tx_buf, 0, sizeof(*tx_buf)); 81 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 82 dma_unmap_addr_set(tx_buf, dma, dma); 83 84 tx_desc->buf_addr = cpu_to_le64(dma); 85 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 86 ICE_TX_DESC_CMD_RE; 87 88 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; 89 tx_buf->raw_buf = raw_packet; 90 91 tx_desc->cmd_type_offset_bsz = 92 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 93 94 /* Force memory write to complete before letting h/w know 95 * there are new descriptors to fetch. 96 */ 97 wmb(); 98 99 /* mark the data descriptor to be watched */ 100 first->next_to_watch = tx_desc; 101 102 writel(tx_ring->next_to_use, tx_ring->tail); 103 104 return 0; 105 } 106 107 /** 108 * ice_unmap_and_free_tx_buf - Release a Tx buffer 109 * @ring: the ring that owns the buffer 110 * @tx_buf: the buffer to free 111 */ 112 static void 113 ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) 114 { 115 if (tx_buf->skb) { 116 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 117 devm_kfree(ring->dev, tx_buf->raw_buf); 118 else if (ice_ring_is_xdp(ring)) 119 page_frag_free(tx_buf->raw_buf); 120 else 121 dev_kfree_skb_any(tx_buf->skb); 122 if (dma_unmap_len(tx_buf, len)) 123 dma_unmap_single(ring->dev, 124 dma_unmap_addr(tx_buf, dma), 125 dma_unmap_len(tx_buf, len), 126 DMA_TO_DEVICE); 127 } else if (dma_unmap_len(tx_buf, len)) { 128 dma_unmap_page(ring->dev, 129 dma_unmap_addr(tx_buf, dma), 130 dma_unmap_len(tx_buf, len), 131 DMA_TO_DEVICE); 132 } 133 134 tx_buf->next_to_watch = NULL; 135 tx_buf->skb = NULL; 136 dma_unmap_len_set(tx_buf, len, 0); 137 /* tx_buf must be completely set up in the transmit path */ 138 } 139 140 static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring) 141 { 142 return netdev_get_tx_queue(ring->netdev, ring->q_index); 143 } 144 145 /** 146 * ice_clean_tx_ring - Free any empty Tx buffers 147 * @tx_ring: ring to be cleaned 148 */ 149 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring) 150 { 151 u32 size; 152 u16 i; 153 154 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 155 ice_xsk_clean_xdp_ring(tx_ring); 156 goto tx_skip_free; 157 } 158 159 /* ring already cleared, nothing to do */ 160 if (!tx_ring->tx_buf) 161 return; 162 163 /* Free all the Tx ring sk_buffs */ 164 for (i = 0; i < tx_ring->count; i++) 165 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 166 167 tx_skip_free: 168 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 169 170 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 171 PAGE_SIZE); 172 /* Zero out the descriptor ring */ 173 memset(tx_ring->desc, 0, size); 174 175 tx_ring->next_to_use = 0; 176 tx_ring->next_to_clean = 0; 177 tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1; 178 tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1; 179 180 if (!tx_ring->netdev) 181 return; 182 183 /* cleanup Tx queue statistics */ 184 netdev_tx_reset_queue(txring_txq(tx_ring)); 185 } 186 187 /** 188 * ice_free_tx_ring - Free Tx resources per queue 189 * @tx_ring: Tx descriptor ring for a specific queue 190 * 191 * Free all transmit software resources 192 */ 193 void ice_free_tx_ring(struct ice_tx_ring *tx_ring) 194 { 195 u32 size; 196 197 ice_clean_tx_ring(tx_ring); 198 devm_kfree(tx_ring->dev, tx_ring->tx_buf); 199 tx_ring->tx_buf = NULL; 200 201 if (tx_ring->desc) { 202 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 203 PAGE_SIZE); 204 dmam_free_coherent(tx_ring->dev, size, 205 tx_ring->desc, tx_ring->dma); 206 tx_ring->desc = NULL; 207 } 208 } 209 210 /** 211 * ice_clean_tx_irq - Reclaim resources after transmit completes 212 * @tx_ring: Tx ring to clean 213 * @napi_budget: Used to determine if we are in netpoll 214 * 215 * Returns true if there's any budget left (e.g. the clean is finished) 216 */ 217 static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) 218 { 219 unsigned int total_bytes = 0, total_pkts = 0; 220 unsigned int budget = ICE_DFLT_IRQ_WORK; 221 struct ice_vsi *vsi = tx_ring->vsi; 222 s16 i = tx_ring->next_to_clean; 223 struct ice_tx_desc *tx_desc; 224 struct ice_tx_buf *tx_buf; 225 226 /* get the bql data ready */ 227 netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); 228 229 tx_buf = &tx_ring->tx_buf[i]; 230 tx_desc = ICE_TX_DESC(tx_ring, i); 231 i -= tx_ring->count; 232 233 prefetch(&vsi->state); 234 235 do { 236 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 237 238 /* if next_to_watch is not set then there is no work pending */ 239 if (!eop_desc) 240 break; 241 242 /* follow the guidelines of other drivers */ 243 prefetchw(&tx_buf->skb->users); 244 245 smp_rmb(); /* prevent any other reads prior to eop_desc */ 246 247 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 248 /* if the descriptor isn't done, no work yet to do */ 249 if (!(eop_desc->cmd_type_offset_bsz & 250 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 251 break; 252 253 /* clear next_to_watch to prevent false hangs */ 254 tx_buf->next_to_watch = NULL; 255 256 /* update the statistics for this packet */ 257 total_bytes += tx_buf->bytecount; 258 total_pkts += tx_buf->gso_segs; 259 260 /* free the skb */ 261 napi_consume_skb(tx_buf->skb, napi_budget); 262 263 /* unmap skb header data */ 264 dma_unmap_single(tx_ring->dev, 265 dma_unmap_addr(tx_buf, dma), 266 dma_unmap_len(tx_buf, len), 267 DMA_TO_DEVICE); 268 269 /* clear tx_buf data */ 270 tx_buf->skb = NULL; 271 dma_unmap_len_set(tx_buf, len, 0); 272 273 /* unmap remaining buffers */ 274 while (tx_desc != eop_desc) { 275 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf); 276 tx_buf++; 277 tx_desc++; 278 i++; 279 if (unlikely(!i)) { 280 i -= tx_ring->count; 281 tx_buf = tx_ring->tx_buf; 282 tx_desc = ICE_TX_DESC(tx_ring, 0); 283 } 284 285 /* unmap any remaining paged data */ 286 if (dma_unmap_len(tx_buf, len)) { 287 dma_unmap_page(tx_ring->dev, 288 dma_unmap_addr(tx_buf, dma), 289 dma_unmap_len(tx_buf, len), 290 DMA_TO_DEVICE); 291 dma_unmap_len_set(tx_buf, len, 0); 292 } 293 } 294 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf); 295 296 /* move us one more past the eop_desc for start of next pkt */ 297 tx_buf++; 298 tx_desc++; 299 i++; 300 if (unlikely(!i)) { 301 i -= tx_ring->count; 302 tx_buf = tx_ring->tx_buf; 303 tx_desc = ICE_TX_DESC(tx_ring, 0); 304 } 305 306 prefetch(tx_desc); 307 308 /* update budget accounting */ 309 budget--; 310 } while (likely(budget)); 311 312 i += tx_ring->count; 313 tx_ring->next_to_clean = i; 314 315 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 316 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); 317 318 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 319 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 320 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 321 /* Make sure that anybody stopping the queue after this 322 * sees the new next_to_clean. 323 */ 324 smp_mb(); 325 if (netif_tx_queue_stopped(txring_txq(tx_ring)) && 326 !test_bit(ICE_VSI_DOWN, vsi->state)) { 327 netif_tx_wake_queue(txring_txq(tx_ring)); 328 ++tx_ring->ring_stats->tx_stats.restart_q; 329 } 330 } 331 332 return !!budget; 333 } 334 335 /** 336 * ice_setup_tx_ring - Allocate the Tx descriptors 337 * @tx_ring: the Tx ring to set up 338 * 339 * Return 0 on success, negative on error 340 */ 341 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring) 342 { 343 struct device *dev = tx_ring->dev; 344 u32 size; 345 346 if (!dev) 347 return -ENOMEM; 348 349 /* warn if we are about to overwrite the pointer */ 350 WARN_ON(tx_ring->tx_buf); 351 tx_ring->tx_buf = 352 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count, 353 GFP_KERNEL); 354 if (!tx_ring->tx_buf) 355 return -ENOMEM; 356 357 /* round up to nearest page */ 358 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 359 PAGE_SIZE); 360 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma, 361 GFP_KERNEL); 362 if (!tx_ring->desc) { 363 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 364 size); 365 goto err; 366 } 367 368 tx_ring->next_to_use = 0; 369 tx_ring->next_to_clean = 0; 370 tx_ring->ring_stats->tx_stats.prev_pkt = -1; 371 return 0; 372 373 err: 374 devm_kfree(dev, tx_ring->tx_buf); 375 tx_ring->tx_buf = NULL; 376 return -ENOMEM; 377 } 378 379 /** 380 * ice_clean_rx_ring - Free Rx buffers 381 * @rx_ring: ring to be cleaned 382 */ 383 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) 384 { 385 struct device *dev = rx_ring->dev; 386 u32 size; 387 u16 i; 388 389 /* ring already cleared, nothing to do */ 390 if (!rx_ring->rx_buf) 391 return; 392 393 if (rx_ring->skb) { 394 dev_kfree_skb(rx_ring->skb); 395 rx_ring->skb = NULL; 396 } 397 398 if (rx_ring->xsk_pool) { 399 ice_xsk_clean_rx_ring(rx_ring); 400 goto rx_skip_free; 401 } 402 403 /* Free all the Rx ring sk_buffs */ 404 for (i = 0; i < rx_ring->count; i++) { 405 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 406 407 if (!rx_buf->page) 408 continue; 409 410 /* Invalidate cache lines that may have been written to by 411 * device so that we avoid corrupting memory. 412 */ 413 dma_sync_single_range_for_cpu(dev, rx_buf->dma, 414 rx_buf->page_offset, 415 rx_ring->rx_buf_len, 416 DMA_FROM_DEVICE); 417 418 /* free resources associated with mapping */ 419 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 420 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 421 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 422 423 rx_buf->page = NULL; 424 rx_buf->page_offset = 0; 425 } 426 427 rx_skip_free: 428 if (rx_ring->xsk_pool) 429 memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); 430 else 431 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); 432 433 /* Zero out the descriptor ring */ 434 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 435 PAGE_SIZE); 436 memset(rx_ring->desc, 0, size); 437 438 rx_ring->next_to_alloc = 0; 439 rx_ring->next_to_clean = 0; 440 rx_ring->next_to_use = 0; 441 } 442 443 /** 444 * ice_free_rx_ring - Free Rx resources 445 * @rx_ring: ring to clean the resources from 446 * 447 * Free all receive software resources 448 */ 449 void ice_free_rx_ring(struct ice_rx_ring *rx_ring) 450 { 451 u32 size; 452 453 ice_clean_rx_ring(rx_ring); 454 if (rx_ring->vsi->type == ICE_VSI_PF) 455 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 456 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 457 rx_ring->xdp_prog = NULL; 458 if (rx_ring->xsk_pool) { 459 kfree(rx_ring->xdp_buf); 460 rx_ring->xdp_buf = NULL; 461 } else { 462 kfree(rx_ring->rx_buf); 463 rx_ring->rx_buf = NULL; 464 } 465 466 if (rx_ring->desc) { 467 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 468 PAGE_SIZE); 469 dmam_free_coherent(rx_ring->dev, size, 470 rx_ring->desc, rx_ring->dma); 471 rx_ring->desc = NULL; 472 } 473 } 474 475 /** 476 * ice_setup_rx_ring - Allocate the Rx descriptors 477 * @rx_ring: the Rx ring to set up 478 * 479 * Return 0 on success, negative on error 480 */ 481 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) 482 { 483 struct device *dev = rx_ring->dev; 484 u32 size; 485 486 if (!dev) 487 return -ENOMEM; 488 489 /* warn if we are about to overwrite the pointer */ 490 WARN_ON(rx_ring->rx_buf); 491 rx_ring->rx_buf = 492 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); 493 if (!rx_ring->rx_buf) 494 return -ENOMEM; 495 496 /* round up to nearest page */ 497 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 498 PAGE_SIZE); 499 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, 500 GFP_KERNEL); 501 if (!rx_ring->desc) { 502 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 503 size); 504 goto err; 505 } 506 507 rx_ring->next_to_use = 0; 508 rx_ring->next_to_clean = 0; 509 510 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 511 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 512 513 if (rx_ring->vsi->type == ICE_VSI_PF && 514 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 515 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 516 rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) 517 goto err; 518 return 0; 519 520 err: 521 kfree(rx_ring->rx_buf); 522 rx_ring->rx_buf = NULL; 523 return -ENOMEM; 524 } 525 526 /** 527 * ice_rx_frame_truesize 528 * @rx_ring: ptr to Rx ring 529 * @size: size 530 * 531 * calculate the truesize with taking into the account PAGE_SIZE of 532 * underlying arch 533 */ 534 static unsigned int 535 ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size) 536 { 537 unsigned int truesize; 538 539 #if (PAGE_SIZE < 8192) 540 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 541 #else 542 truesize = rx_ring->rx_offset ? 543 SKB_DATA_ALIGN(rx_ring->rx_offset + size) + 544 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 545 SKB_DATA_ALIGN(size); 546 #endif 547 return truesize; 548 } 549 550 /** 551 * ice_run_xdp - Executes an XDP program on initialized xdp_buff 552 * @rx_ring: Rx ring 553 * @xdp: xdp_buff used as input to the XDP program 554 * @xdp_prog: XDP program to run 555 * @xdp_ring: ring to be used for XDP_TX action 556 * @rx_buf: Rx buffer to store the XDP action 557 * 558 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 559 */ 560 static void 561 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 562 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, 563 struct ice_rx_buf *rx_buf) 564 { 565 unsigned int ret = ICE_XDP_PASS; 566 u32 act; 567 568 if (!xdp_prog) 569 goto exit; 570 571 act = bpf_prog_run_xdp(xdp_prog, xdp); 572 switch (act) { 573 case XDP_PASS: 574 break; 575 case XDP_TX: 576 if (static_branch_unlikely(&ice_xdp_locking_key)) 577 spin_lock(&xdp_ring->tx_lock); 578 ret = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring); 579 if (static_branch_unlikely(&ice_xdp_locking_key)) 580 spin_unlock(&xdp_ring->tx_lock); 581 if (ret == ICE_XDP_CONSUMED) 582 goto out_failure; 583 break; 584 case XDP_REDIRECT: 585 if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) 586 goto out_failure; 587 ret = ICE_XDP_REDIR; 588 break; 589 default: 590 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); 591 fallthrough; 592 case XDP_ABORTED: 593 out_failure: 594 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 595 fallthrough; 596 case XDP_DROP: 597 ret = ICE_XDP_CONSUMED; 598 } 599 exit: 600 rx_buf->act = ret; 601 } 602 603 /** 604 * ice_xdp_xmit - submit packets to XDP ring for transmission 605 * @dev: netdev 606 * @n: number of XDP frames to be transmitted 607 * @frames: XDP frames to be transmitted 608 * @flags: transmit flags 609 * 610 * Returns number of frames successfully sent. Failed frames 611 * will be free'ed by XDP core. 612 * For error cases, a negative errno code is returned and no-frames 613 * are transmitted (caller must handle freeing frames). 614 */ 615 int 616 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 617 u32 flags) 618 { 619 struct ice_netdev_priv *np = netdev_priv(dev); 620 unsigned int queue_index = smp_processor_id(); 621 struct ice_vsi *vsi = np->vsi; 622 struct ice_tx_ring *xdp_ring; 623 int nxmit = 0, i; 624 625 if (test_bit(ICE_VSI_DOWN, vsi->state)) 626 return -ENETDOWN; 627 628 if (!ice_is_xdp_ena_vsi(vsi)) 629 return -ENXIO; 630 631 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 632 return -EINVAL; 633 634 if (static_branch_unlikely(&ice_xdp_locking_key)) { 635 queue_index %= vsi->num_xdp_txq; 636 xdp_ring = vsi->xdp_rings[queue_index]; 637 spin_lock(&xdp_ring->tx_lock); 638 } else { 639 /* Generally, should not happen */ 640 if (unlikely(queue_index >= vsi->num_xdp_txq)) 641 return -ENXIO; 642 xdp_ring = vsi->xdp_rings[queue_index]; 643 } 644 645 for (i = 0; i < n; i++) { 646 struct xdp_frame *xdpf = frames[i]; 647 int err; 648 649 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 650 if (err != ICE_XDP_TX) 651 break; 652 nxmit++; 653 } 654 655 if (unlikely(flags & XDP_XMIT_FLUSH)) 656 ice_xdp_ring_update_tail(xdp_ring); 657 658 if (static_branch_unlikely(&ice_xdp_locking_key)) 659 spin_unlock(&xdp_ring->tx_lock); 660 661 return nxmit; 662 } 663 664 /** 665 * ice_alloc_mapped_page - recycle or make a new page 666 * @rx_ring: ring to use 667 * @bi: rx_buf struct to modify 668 * 669 * Returns true if the page was successfully allocated or 670 * reused. 671 */ 672 static bool 673 ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) 674 { 675 struct page *page = bi->page; 676 dma_addr_t dma; 677 678 /* since we are recycling buffers we should seldom need to alloc */ 679 if (likely(page)) 680 return true; 681 682 /* alloc new page for storage */ 683 page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 684 if (unlikely(!page)) { 685 rx_ring->ring_stats->rx_stats.alloc_page_failed++; 686 return false; 687 } 688 689 /* map page for use */ 690 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 691 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 692 693 /* if mapping failed free memory back to system since 694 * there isn't much point in holding memory we can't use 695 */ 696 if (dma_mapping_error(rx_ring->dev, dma)) { 697 __free_pages(page, ice_rx_pg_order(rx_ring)); 698 rx_ring->ring_stats->rx_stats.alloc_page_failed++; 699 return false; 700 } 701 702 bi->dma = dma; 703 bi->page = page; 704 bi->page_offset = rx_ring->rx_offset; 705 page_ref_add(page, USHRT_MAX - 1); 706 bi->pagecnt_bias = USHRT_MAX; 707 708 return true; 709 } 710 711 /** 712 * ice_alloc_rx_bufs - Replace used receive buffers 713 * @rx_ring: ring to place buffers on 714 * @cleaned_count: number of buffers to replace 715 * 716 * Returns false if all allocations were successful, true if any fail. Returning 717 * true signals to the caller that we didn't replace cleaned_count buffers and 718 * there is more work to do. 719 * 720 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 721 * buffers. Then bump tail at most one time. Grouping like this lets us avoid 722 * multiple tail writes per call. 723 */ 724 bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count) 725 { 726 union ice_32b_rx_flex_desc *rx_desc; 727 u16 ntu = rx_ring->next_to_use; 728 struct ice_rx_buf *bi; 729 730 /* do nothing if no valid netdev defined */ 731 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || 732 !cleaned_count) 733 return false; 734 735 /* get the Rx descriptor and buffer based on next_to_use */ 736 rx_desc = ICE_RX_DESC(rx_ring, ntu); 737 bi = &rx_ring->rx_buf[ntu]; 738 739 do { 740 /* if we fail here, we have work remaining */ 741 if (!ice_alloc_mapped_page(rx_ring, bi)) 742 break; 743 744 /* sync the buffer for use by the device */ 745 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 746 bi->page_offset, 747 rx_ring->rx_buf_len, 748 DMA_FROM_DEVICE); 749 750 /* Refresh the desc even if buffer_addrs didn't change 751 * because each write-back erases this info. 752 */ 753 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 754 755 rx_desc++; 756 bi++; 757 ntu++; 758 if (unlikely(ntu == rx_ring->count)) { 759 rx_desc = ICE_RX_DESC(rx_ring, 0); 760 bi = rx_ring->rx_buf; 761 ntu = 0; 762 } 763 764 /* clear the status bits for the next_to_use descriptor */ 765 rx_desc->wb.status_error0 = 0; 766 767 cleaned_count--; 768 } while (cleaned_count); 769 770 if (rx_ring->next_to_use != ntu) 771 ice_release_rx_desc(rx_ring, ntu); 772 773 return !!cleaned_count; 774 } 775 776 /** 777 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 778 * @rx_buf: Rx buffer to adjust 779 * @size: Size of adjustment 780 * 781 * Update the offset within page so that Rx buf will be ready to be reused. 782 * For systems with PAGE_SIZE < 8192 this function will flip the page offset 783 * so the second half of page assigned to Rx buffer will be used, otherwise 784 * the offset is moved by "size" bytes 785 */ 786 static void 787 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 788 { 789 #if (PAGE_SIZE < 8192) 790 /* flip page offset to other buffer */ 791 rx_buf->page_offset ^= size; 792 #else 793 /* move offset up to the next cache line */ 794 rx_buf->page_offset += size; 795 #endif 796 } 797 798 /** 799 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 800 * @rx_buf: buffer containing the page 801 * 802 * If page is reusable, we have a green light for calling ice_reuse_rx_page, 803 * which will assign the current buffer to the buffer that next_to_alloc is 804 * pointing to; otherwise, the DMA mapping needs to be destroyed and 805 * page freed 806 */ 807 static bool 808 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) 809 { 810 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 811 struct page *page = rx_buf->page; 812 813 /* avoid re-using remote and pfmemalloc pages */ 814 if (!dev_page_is_reusable(page)) 815 return false; 816 817 #if (PAGE_SIZE < 8192) 818 /* if we are only owner of page we can reuse it */ 819 if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) 820 return false; 821 #else 822 #define ICE_LAST_OFFSET \ 823 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 824 if (rx_buf->page_offset > ICE_LAST_OFFSET) 825 return false; 826 #endif /* PAGE_SIZE < 8192) */ 827 828 /* If we have drained the page fragment pool we need to update 829 * the pagecnt_bias and page count so that we fully restock the 830 * number of references the driver holds. 831 */ 832 if (unlikely(pagecnt_bias == 1)) { 833 page_ref_add(page, USHRT_MAX - 1); 834 rx_buf->pagecnt_bias = USHRT_MAX; 835 } 836 837 return true; 838 } 839 840 /** 841 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 842 * @rx_ring: Rx descriptor ring to transact packets on 843 * @xdp: XDP buffer 844 * @rx_buf: buffer containing page to add 845 * @skb: sk_buff to place the data into 846 * @size: packet length from rx_desc 847 * 848 * This function will add the data contained in rx_buf->page to the skb. 849 * It will just attach the page as a frag to the skb. 850 * The function will then update the page offset. 851 */ 852 static void 853 ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, 854 struct ice_rx_buf *rx_buf, struct sk_buff *skb, 855 unsigned int size) 856 { 857 if (!size) 858 return; 859 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 860 rx_buf->page_offset, size, xdp->frame_sz); 861 } 862 863 /** 864 * ice_reuse_rx_page - page flip buffer and store it back on the ring 865 * @rx_ring: Rx descriptor ring to store buffers on 866 * @old_buf: donor buffer to have page reused 867 * 868 * Synchronizes page for reuse by the adapter 869 */ 870 static void 871 ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) 872 { 873 u16 nta = rx_ring->next_to_alloc; 874 struct ice_rx_buf *new_buf; 875 876 new_buf = &rx_ring->rx_buf[nta]; 877 878 /* update, and store next to alloc */ 879 nta++; 880 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 881 882 /* Transfer page from old buffer to new buffer. 883 * Move each member individually to avoid possible store 884 * forwarding stalls and unnecessary copy of skb. 885 */ 886 new_buf->dma = old_buf->dma; 887 new_buf->page = old_buf->page; 888 new_buf->page_offset = old_buf->page_offset; 889 new_buf->pagecnt_bias = old_buf->pagecnt_bias; 890 } 891 892 /** 893 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 894 * @rx_ring: Rx descriptor ring to transact packets on 895 * @size: size of buffer to add to skb 896 * 897 * This function will pull an Rx buffer from the ring and synchronize it 898 * for use by the CPU. 899 */ 900 static struct ice_rx_buf * 901 ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, 902 const unsigned int ntc) 903 { 904 struct ice_rx_buf *rx_buf; 905 906 rx_buf = &rx_ring->rx_buf[ntc]; 907 rx_buf->pgcnt = 908 #if (PAGE_SIZE < 8192) 909 page_count(rx_buf->page); 910 #else 911 0; 912 #endif 913 prefetchw(rx_buf->page); 914 915 if (!size) 916 return rx_buf; 917 /* we are reusing so sync this buffer for CPU use */ 918 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 919 rx_buf->page_offset, size, 920 DMA_FROM_DEVICE); 921 922 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 923 rx_buf->pagecnt_bias--; 924 925 return rx_buf; 926 } 927 928 /** 929 * ice_build_skb - Build skb around an existing buffer 930 * @rx_ring: Rx descriptor ring to transact packets on 931 * @rx_buf: Rx buffer to pull data from 932 * @xdp: xdp_buff pointing to the data 933 * 934 * This function builds an skb around an existing Rx buffer, taking care 935 * to set up the skb correctly and avoid any memcpy overhead. 936 */ 937 static struct sk_buff * 938 ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 939 struct xdp_buff *xdp) 940 { 941 u8 metasize = xdp->data - xdp->data_meta; 942 struct sk_buff *skb; 943 944 /* Prefetch first cache line of first page. If xdp->data_meta 945 * is unused, this points exactly as xdp->data, otherwise we 946 * likely have a consumer accessing first few bytes of meta 947 * data, and then actual data. 948 */ 949 net_prefetch(xdp->data_meta); 950 /* build an skb around the page buffer */ 951 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); 952 if (unlikely(!skb)) 953 return NULL; 954 955 /* must to record Rx queue, otherwise OS features such as 956 * symmetric queue won't work 957 */ 958 skb_record_rx_queue(skb, rx_ring->q_index); 959 960 /* update pointers within the skb to store the data */ 961 skb_reserve(skb, xdp->data - xdp->data_hard_start); 962 __skb_put(skb, xdp->data_end - xdp->data); 963 if (metasize) 964 skb_metadata_set(skb, metasize); 965 966 return skb; 967 } 968 969 /** 970 * ice_construct_skb - Allocate skb and populate it 971 * @rx_ring: Rx descriptor ring to transact packets on 972 * @rx_buf: Rx buffer to pull data from 973 * @xdp: xdp_buff pointing to the data 974 * 975 * This function allocates an skb. It then populates it with the page 976 * data from the current receive descriptor, taking care to set up the 977 * skb correctly. 978 */ 979 static struct sk_buff * 980 ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, 981 struct xdp_buff *xdp) 982 { 983 unsigned int size = xdp->data_end - xdp->data; 984 unsigned int headlen; 985 struct sk_buff *skb; 986 987 /* prefetch first cache line of first page */ 988 net_prefetch(xdp->data); 989 990 /* allocate a skb to store the frags */ 991 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 992 GFP_ATOMIC | __GFP_NOWARN); 993 if (unlikely(!skb)) 994 return NULL; 995 996 skb_record_rx_queue(skb, rx_ring->q_index); 997 /* Determine available headroom for copy */ 998 headlen = size; 999 if (headlen > ICE_RX_HDR_SIZE) 1000 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 1001 1002 /* align pull length to size of long to optimize memcpy performance */ 1003 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 1004 sizeof(long))); 1005 1006 /* if we exhaust the linear part then add what is left as a frag */ 1007 size -= headlen; 1008 if (size) { 1009 skb_add_rx_frag(skb, 0, rx_buf->page, 1010 rx_buf->page_offset + headlen, size, 1011 xdp->frame_sz); 1012 } else { 1013 /* buffer is unused, change the act that should be taken later 1014 * on; data was copied onto skb's linear part so there's no 1015 * need for adjusting page offset and we can reuse this buffer 1016 * as-is 1017 */ 1018 rx_buf->act = ICE_XDP_CONSUMED; 1019 } 1020 1021 return skb; 1022 } 1023 1024 /** 1025 * ice_put_rx_buf - Clean up used buffer and either recycle or free 1026 * @rx_ring: Rx descriptor ring to transact packets on 1027 * @rx_buf: Rx buffer to pull data from 1028 * 1029 * This function will clean up the contents of the rx_buf. It will either 1030 * recycle the buffer or unmap it and free the associated resources. 1031 */ 1032 static void 1033 ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) 1034 { 1035 if (!rx_buf) 1036 return; 1037 1038 if (ice_can_reuse_rx_page(rx_buf)) { 1039 /* hand second half of page back to the ring */ 1040 ice_reuse_rx_page(rx_ring, rx_buf); 1041 } else { 1042 /* we are not reusing the buffer so unmap it */ 1043 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 1044 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 1045 ICE_RX_DMA_ATTR); 1046 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 1047 } 1048 1049 /* clear contents of buffer_info */ 1050 rx_buf->page = NULL; 1051 } 1052 1053 /** 1054 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 1055 * @rx_ring: Rx descriptor ring to transact packets on 1056 * @budget: Total limit on number of packets to process 1057 * 1058 * This function provides a "bounce buffer" approach to Rx interrupt 1059 * processing. The advantage to this is that on systems that have 1060 * expensive overhead for IOMMU access this provides a means of avoiding 1061 * it by maintaining the mapping of the page to the system. 1062 * 1063 * Returns amount of work completed 1064 */ 1065 int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) 1066 { 1067 unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 1068 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1069 unsigned int offset = rx_ring->rx_offset; 1070 struct xdp_buff *xdp = &rx_ring->xdp; 1071 struct ice_tx_ring *xdp_ring = NULL; 1072 struct sk_buff *skb = rx_ring->skb; 1073 struct bpf_prog *xdp_prog = NULL; 1074 u32 ntc = rx_ring->next_to_clean; 1075 u32 cnt = rx_ring->count; 1076 u32 cached_ntc = ntc; 1077 u32 xdp_xmit = 0; 1078 bool failure; 1079 1080 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1081 #if (PAGE_SIZE < 8192) 1082 xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0); 1083 #endif 1084 1085 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1086 if (xdp_prog) 1087 xdp_ring = rx_ring->xdp_ring; 1088 1089 /* start the loop to process Rx packets bounded by 'budget' */ 1090 while (likely(total_rx_pkts < (unsigned int)budget)) { 1091 union ice_32b_rx_flex_desc *rx_desc; 1092 struct ice_rx_buf *rx_buf; 1093 unsigned char *hard_start; 1094 unsigned int size; 1095 u16 stat_err_bits; 1096 u16 vlan_tag = 0; 1097 u16 rx_ptype; 1098 1099 /* get the Rx desc from Rx ring based on 'next_to_clean' */ 1100 rx_desc = ICE_RX_DESC(rx_ring, ntc); 1101 1102 /* status_error_len will always be zero for unused descriptors 1103 * because it's cleared in cleanup, and overlaps with hdr_addr 1104 * which is always zero because packet split isn't used, if the 1105 * hardware wrote DD then it will be non-zero 1106 */ 1107 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 1108 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) 1109 break; 1110 1111 /* This memory barrier is needed to keep us from reading 1112 * any other fields out of the rx_desc until we know the 1113 * DD bit is set. 1114 */ 1115 dma_rmb(); 1116 1117 ice_trace(clean_rx_irq, rx_ring, rx_desc); 1118 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 1119 struct ice_vsi *ctrl_vsi = rx_ring->vsi; 1120 1121 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && 1122 ctrl_vsi->vf) 1123 ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); 1124 if (++ntc == cnt) 1125 ntc = 0; 1126 cleaned_count++; 1127 continue; 1128 } 1129 1130 size = le16_to_cpu(rx_desc->wb.pkt_len) & 1131 ICE_RX_FLX_DESC_PKT_LEN_M; 1132 1133 /* retrieve a buffer from the ring */ 1134 rx_buf = ice_get_rx_buf(rx_ring, size, ntc); 1135 1136 if (!size) { 1137 xdp->data = NULL; 1138 xdp->data_end = NULL; 1139 xdp->data_hard_start = NULL; 1140 xdp->data_meta = NULL; 1141 goto construct_skb; 1142 } 1143 1144 hard_start = page_address(rx_buf->page) + rx_buf->page_offset - 1145 offset; 1146 xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); 1147 #if (PAGE_SIZE > 4096) 1148 /* At larger PAGE_SIZE, frame_sz depend on len size */ 1149 xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size); 1150 #endif 1151 1152 ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf); 1153 if (rx_buf->act == ICE_XDP_PASS) 1154 goto construct_skb; 1155 total_rx_bytes += size; 1156 total_rx_pkts++; 1157 1158 cleaned_count++; 1159 if (++ntc == cnt) 1160 ntc = 0; 1161 continue; 1162 construct_skb: 1163 if (skb) { 1164 ice_add_rx_frag(rx_ring, xdp, rx_buf, skb, size); 1165 } else if (likely(xdp->data)) { 1166 if (ice_ring_uses_build_skb(rx_ring)) 1167 skb = ice_build_skb(rx_ring, rx_buf, xdp); 1168 else 1169 skb = ice_construct_skb(rx_ring, rx_buf, xdp); 1170 } 1171 /* exit if we failed to retrieve a buffer */ 1172 if (!skb) { 1173 rx_ring->ring_stats->rx_stats.alloc_buf_failed++; 1174 if (rx_buf) 1175 rx_buf->pagecnt_bias++; 1176 break; 1177 } 1178 1179 if (++ntc == cnt) 1180 ntc = 0; 1181 cleaned_count++; 1182 1183 /* skip if it is NOP desc */ 1184 if (ice_is_non_eop(rx_ring, rx_desc)) 1185 continue; 1186 1187 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 1188 if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, 1189 stat_err_bits))) { 1190 dev_kfree_skb_any(skb); 1191 continue; 1192 } 1193 1194 vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc); 1195 1196 /* pad the skb if needed, to make a valid ethernet frame */ 1197 if (eth_skb_pad(skb)) { 1198 skb = NULL; 1199 continue; 1200 } 1201 1202 /* probably a little skewed due to removing CRC */ 1203 total_rx_bytes += skb->len; 1204 1205 /* populate checksum, VLAN, and protocol */ 1206 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 1207 ICE_RX_FLEX_DESC_PTYPE_M; 1208 1209 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1210 1211 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); 1212 /* send completed skb up the stack */ 1213 ice_receive_skb(rx_ring, skb, vlan_tag); 1214 skb = NULL; 1215 1216 /* update budget accounting */ 1217 total_rx_pkts++; 1218 } 1219 1220 while (cached_ntc != ntc) { 1221 struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc]; 1222 1223 if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1224 ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); 1225 xdp_xmit |= buf->act; 1226 } else if (buf->act & ICE_XDP_CONSUMED) { 1227 buf->pagecnt_bias++; 1228 } else if (buf->act == ICE_XDP_PASS) { 1229 ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); 1230 } 1231 1232 ice_put_rx_buf(rx_ring, buf); 1233 if (++cached_ntc >= cnt) 1234 cached_ntc = 0; 1235 } 1236 rx_ring->next_to_clean = ntc; 1237 /* return up to cleaned_count buffers to hardware */ 1238 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1239 1240 if (xdp_xmit) 1241 ice_finalize_xdp_rx(xdp_ring, xdp_xmit); 1242 rx_ring->skb = skb; 1243 1244 if (rx_ring->ring_stats) 1245 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, 1246 total_rx_bytes); 1247 1248 /* guarantee a trip back through this routine if there was a failure */ 1249 return failure ? budget : (int)total_rx_pkts; 1250 } 1251 1252 static void __ice_update_sample(struct ice_q_vector *q_vector, 1253 struct ice_ring_container *rc, 1254 struct dim_sample *sample, 1255 bool is_tx) 1256 { 1257 u64 packets = 0, bytes = 0; 1258 1259 if (is_tx) { 1260 struct ice_tx_ring *tx_ring; 1261 1262 ice_for_each_tx_ring(tx_ring, *rc) { 1263 struct ice_ring_stats *ring_stats; 1264 1265 ring_stats = tx_ring->ring_stats; 1266 if (!ring_stats) 1267 continue; 1268 packets += ring_stats->stats.pkts; 1269 bytes += ring_stats->stats.bytes; 1270 } 1271 } else { 1272 struct ice_rx_ring *rx_ring; 1273 1274 ice_for_each_rx_ring(rx_ring, *rc) { 1275 struct ice_ring_stats *ring_stats; 1276 1277 ring_stats = rx_ring->ring_stats; 1278 if (!ring_stats) 1279 continue; 1280 packets += ring_stats->stats.pkts; 1281 bytes += ring_stats->stats.bytes; 1282 } 1283 } 1284 1285 dim_update_sample(q_vector->total_events, packets, bytes, sample); 1286 sample->comp_ctr = 0; 1287 1288 /* if dim settings get stale, like when not updated for 1 1289 * second or longer, force it to start again. This addresses the 1290 * frequent case of an idle queue being switched to by the 1291 * scheduler. The 1,000 here means 1,000 milliseconds. 1292 */ 1293 if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000) 1294 rc->dim.state = DIM_START_MEASURE; 1295 } 1296 1297 /** 1298 * ice_net_dim - Update net DIM algorithm 1299 * @q_vector: the vector associated with the interrupt 1300 * 1301 * Create a DIM sample and notify net_dim() so that it can possibly decide 1302 * a new ITR value based on incoming packets, bytes, and interrupts. 1303 * 1304 * This function is a no-op if the ring is not configured to dynamic ITR. 1305 */ 1306 static void ice_net_dim(struct ice_q_vector *q_vector) 1307 { 1308 struct ice_ring_container *tx = &q_vector->tx; 1309 struct ice_ring_container *rx = &q_vector->rx; 1310 1311 if (ITR_IS_DYNAMIC(tx)) { 1312 struct dim_sample dim_sample; 1313 1314 __ice_update_sample(q_vector, tx, &dim_sample, true); 1315 net_dim(&tx->dim, dim_sample); 1316 } 1317 1318 if (ITR_IS_DYNAMIC(rx)) { 1319 struct dim_sample dim_sample; 1320 1321 __ice_update_sample(q_vector, rx, &dim_sample, false); 1322 net_dim(&rx->dim, dim_sample); 1323 } 1324 } 1325 1326 /** 1327 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 1328 * @itr_idx: interrupt throttling index 1329 * @itr: interrupt throttling value in usecs 1330 */ 1331 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 1332 { 1333 /* The ITR value is reported in microseconds, and the register value is 1334 * recorded in 2 microsecond units. For this reason we only need to 1335 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 1336 * granularity as a shift instead of division. The mask makes sure the 1337 * ITR value is never odd so we don't accidentally write into the field 1338 * prior to the ITR field. 1339 */ 1340 itr &= ICE_ITR_MASK; 1341 1342 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 1343 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 1344 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 1345 } 1346 1347 /** 1348 * ice_enable_interrupt - re-enable MSI-X interrupt 1349 * @q_vector: the vector associated with the interrupt to enable 1350 * 1351 * If the VSI is down, the interrupt will not be re-enabled. Also, 1352 * when enabling the interrupt always reset the wb_on_itr to false 1353 * and trigger a software interrupt to clean out internal state. 1354 */ 1355 static void ice_enable_interrupt(struct ice_q_vector *q_vector) 1356 { 1357 struct ice_vsi *vsi = q_vector->vsi; 1358 bool wb_en = q_vector->wb_on_itr; 1359 u32 itr_val; 1360 1361 if (test_bit(ICE_DOWN, vsi->state)) 1362 return; 1363 1364 /* trigger an ITR delayed software interrupt when exiting busy poll, to 1365 * make sure to catch any pending cleanups that might have been missed 1366 * due to interrupt state transition. If busy poll or poll isn't 1367 * enabled, then don't update ITR, and just enable the interrupt. 1368 */ 1369 if (!wb_en) { 1370 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 1371 } else { 1372 q_vector->wb_on_itr = false; 1373 1374 /* do two things here with a single write. Set up the third ITR 1375 * index to be used for software interrupt moderation, and then 1376 * trigger a software interrupt with a rate limit of 20K on 1377 * software interrupts, this will help avoid high interrupt 1378 * loads due to frequently polling and exiting polling. 1379 */ 1380 itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K); 1381 itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M | 1382 ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S | 1383 GLINT_DYN_CTL_SW_ITR_INDX_ENA_M; 1384 } 1385 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 1386 } 1387 1388 /** 1389 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 1390 * @q_vector: q_vector to set WB_ON_ITR on 1391 * 1392 * We need to tell hardware to write-back completed descriptors even when 1393 * interrupts are disabled. Descriptors will be written back on cache line 1394 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 1395 * descriptors may not be written back if they don't fill a cache line until 1396 * the next interrupt. 1397 * 1398 * This sets the write-back frequency to whatever was set previously for the 1399 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we 1400 * aren't meddling with the INTENA_M bit. 1401 */ 1402 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 1403 { 1404 struct ice_vsi *vsi = q_vector->vsi; 1405 1406 /* already in wb_on_itr mode no need to change it */ 1407 if (q_vector->wb_on_itr) 1408 return; 1409 1410 /* use previously set ITR values for all of the ITR indices by 1411 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and 1412 * be static in non-adaptive mode (user configured) 1413 */ 1414 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 1415 ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) & 1416 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | 1417 GLINT_DYN_CTL_WB_ON_ITR_M); 1418 1419 q_vector->wb_on_itr = true; 1420 } 1421 1422 /** 1423 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 1424 * @napi: napi struct with our devices info in it 1425 * @budget: amount of work driver is allowed to do this pass, in packets 1426 * 1427 * This function will clean all queues associated with a q_vector. 1428 * 1429 * Returns the amount of work done 1430 */ 1431 int ice_napi_poll(struct napi_struct *napi, int budget) 1432 { 1433 struct ice_q_vector *q_vector = 1434 container_of(napi, struct ice_q_vector, napi); 1435 struct ice_tx_ring *tx_ring; 1436 struct ice_rx_ring *rx_ring; 1437 bool clean_complete = true; 1438 int budget_per_ring; 1439 int work_done = 0; 1440 1441 /* Since the actual Tx work is minimal, we can give the Tx a larger 1442 * budget and be more aggressive about cleaning up the Tx descriptors. 1443 */ 1444 ice_for_each_tx_ring(tx_ring, q_vector->tx) { 1445 bool wd; 1446 1447 if (tx_ring->xsk_pool) 1448 wd = ice_xmit_zc(tx_ring); 1449 else if (ice_ring_is_xdp(tx_ring)) 1450 wd = true; 1451 else 1452 wd = ice_clean_tx_irq(tx_ring, budget); 1453 1454 if (!wd) 1455 clean_complete = false; 1456 } 1457 1458 /* Handle case where we are called by netpoll with a budget of 0 */ 1459 if (unlikely(budget <= 0)) 1460 return budget; 1461 1462 /* normally we have 1 Rx ring per q_vector */ 1463 if (unlikely(q_vector->num_ring_rx > 1)) 1464 /* We attempt to distribute budget to each Rx queue fairly, but 1465 * don't allow the budget to go below 1 because that would exit 1466 * polling early. 1467 */ 1468 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 1469 else 1470 /* Max of 1 Rx ring in this q_vector so give it the budget */ 1471 budget_per_ring = budget; 1472 1473 ice_for_each_rx_ring(rx_ring, q_vector->rx) { 1474 int cleaned; 1475 1476 /* A dedicated path for zero-copy allows making a single 1477 * comparison in the irq context instead of many inside the 1478 * ice_clean_rx_irq function and makes the codebase cleaner. 1479 */ 1480 cleaned = rx_ring->xsk_pool ? 1481 ice_clean_rx_irq_zc(rx_ring, budget_per_ring) : 1482 ice_clean_rx_irq(rx_ring, budget_per_ring); 1483 work_done += cleaned; 1484 /* if we clean as many as budgeted, we must not be done */ 1485 if (cleaned >= budget_per_ring) 1486 clean_complete = false; 1487 } 1488 1489 /* If work not completed, return budget and polling will return */ 1490 if (!clean_complete) { 1491 /* Set the writeback on ITR so partial completions of 1492 * cache-lines will still continue even if we're polling. 1493 */ 1494 ice_set_wb_on_itr(q_vector); 1495 return budget; 1496 } 1497 1498 /* Exit the polling mode, but don't re-enable interrupts if stack might 1499 * poll us due to busy-polling 1500 */ 1501 if (napi_complete_done(napi, work_done)) { 1502 ice_net_dim(q_vector); 1503 ice_enable_interrupt(q_vector); 1504 } else { 1505 ice_set_wb_on_itr(q_vector); 1506 } 1507 1508 return min_t(int, work_done, budget - 1); 1509 } 1510 1511 /** 1512 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 1513 * @tx_ring: the ring to be checked 1514 * @size: the size buffer we want to assure is available 1515 * 1516 * Returns -EBUSY if a stop is needed, else 0 1517 */ 1518 static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 1519 { 1520 netif_tx_stop_queue(txring_txq(tx_ring)); 1521 /* Memory barrier before checking head and tail */ 1522 smp_mb(); 1523 1524 /* Check again in a case another CPU has just made room available. */ 1525 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 1526 return -EBUSY; 1527 1528 /* A reprieve! - use start_queue because it doesn't call schedule */ 1529 netif_tx_start_queue(txring_txq(tx_ring)); 1530 ++tx_ring->ring_stats->tx_stats.restart_q; 1531 return 0; 1532 } 1533 1534 /** 1535 * ice_maybe_stop_tx - 1st level check for Tx stop conditions 1536 * @tx_ring: the ring to be checked 1537 * @size: the size buffer we want to assure is available 1538 * 1539 * Returns 0 if stop is not needed 1540 */ 1541 static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) 1542 { 1543 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 1544 return 0; 1545 1546 return __ice_maybe_stop_tx(tx_ring, size); 1547 } 1548 1549 /** 1550 * ice_tx_map - Build the Tx descriptor 1551 * @tx_ring: ring to send buffer on 1552 * @first: first buffer info buffer to use 1553 * @off: pointer to struct that holds offload parameters 1554 * 1555 * This function loops over the skb data pointed to by *first 1556 * and gets a physical address for each memory location and programs 1557 * it and the length into the transmit descriptor. 1558 */ 1559 static void 1560 ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, 1561 struct ice_tx_offload_params *off) 1562 { 1563 u64 td_offset, td_tag, td_cmd; 1564 u16 i = tx_ring->next_to_use; 1565 unsigned int data_len, size; 1566 struct ice_tx_desc *tx_desc; 1567 struct ice_tx_buf *tx_buf; 1568 struct sk_buff *skb; 1569 skb_frag_t *frag; 1570 dma_addr_t dma; 1571 bool kick; 1572 1573 td_tag = off->td_l2tag1; 1574 td_cmd = off->td_cmd; 1575 td_offset = off->td_offset; 1576 skb = first->skb; 1577 1578 data_len = skb->data_len; 1579 size = skb_headlen(skb); 1580 1581 tx_desc = ICE_TX_DESC(tx_ring, i); 1582 1583 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1584 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1585 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1586 ICE_TX_FLAGS_VLAN_S; 1587 } 1588 1589 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1590 1591 tx_buf = first; 1592 1593 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1594 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1595 1596 if (dma_mapping_error(tx_ring->dev, dma)) 1597 goto dma_error; 1598 1599 /* record length, and DMA address */ 1600 dma_unmap_len_set(tx_buf, len, size); 1601 dma_unmap_addr_set(tx_buf, dma, dma); 1602 1603 /* align size to end of page */ 1604 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 1605 tx_desc->buf_addr = cpu_to_le64(dma); 1606 1607 /* account for data chunks larger than the hardware 1608 * can handle 1609 */ 1610 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 1611 tx_desc->cmd_type_offset_bsz = 1612 ice_build_ctob(td_cmd, td_offset, max_data, 1613 td_tag); 1614 1615 tx_desc++; 1616 i++; 1617 1618 if (i == tx_ring->count) { 1619 tx_desc = ICE_TX_DESC(tx_ring, 0); 1620 i = 0; 1621 } 1622 1623 dma += max_data; 1624 size -= max_data; 1625 1626 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1627 tx_desc->buf_addr = cpu_to_le64(dma); 1628 } 1629 1630 if (likely(!data_len)) 1631 break; 1632 1633 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 1634 size, td_tag); 1635 1636 tx_desc++; 1637 i++; 1638 1639 if (i == tx_ring->count) { 1640 tx_desc = ICE_TX_DESC(tx_ring, 0); 1641 i = 0; 1642 } 1643 1644 size = skb_frag_size(frag); 1645 data_len -= size; 1646 1647 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1648 DMA_TO_DEVICE); 1649 1650 tx_buf = &tx_ring->tx_buf[i]; 1651 } 1652 1653 /* record SW timestamp if HW timestamp is not available */ 1654 skb_tx_timestamp(first->skb); 1655 1656 i++; 1657 if (i == tx_ring->count) 1658 i = 0; 1659 1660 /* write last descriptor with RS and EOP bits */ 1661 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 1662 tx_desc->cmd_type_offset_bsz = 1663 ice_build_ctob(td_cmd, td_offset, size, td_tag); 1664 1665 /* Force memory writes to complete before letting h/w know there 1666 * are new descriptors to fetch. 1667 * 1668 * We also use this memory barrier to make certain all of the 1669 * status bits have been updated before next_to_watch is written. 1670 */ 1671 wmb(); 1672 1673 /* set next_to_watch value indicating a packet is present */ 1674 first->next_to_watch = tx_desc; 1675 1676 tx_ring->next_to_use = i; 1677 1678 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 1679 1680 /* notify HW of packet */ 1681 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, 1682 netdev_xmit_more()); 1683 if (kick) 1684 /* notify HW of packet */ 1685 writel(i, tx_ring->tail); 1686 1687 return; 1688 1689 dma_error: 1690 /* clear DMA mappings for failed tx_buf map */ 1691 for (;;) { 1692 tx_buf = &tx_ring->tx_buf[i]; 1693 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 1694 if (tx_buf == first) 1695 break; 1696 if (i == 0) 1697 i = tx_ring->count; 1698 i--; 1699 } 1700 1701 tx_ring->next_to_use = i; 1702 } 1703 1704 /** 1705 * ice_tx_csum - Enable Tx checksum offloads 1706 * @first: pointer to the first descriptor 1707 * @off: pointer to struct that holds offload parameters 1708 * 1709 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1710 */ 1711 static 1712 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1713 { 1714 u32 l4_len = 0, l3_len = 0, l2_len = 0; 1715 struct sk_buff *skb = first->skb; 1716 union { 1717 struct iphdr *v4; 1718 struct ipv6hdr *v6; 1719 unsigned char *hdr; 1720 } ip; 1721 union { 1722 struct tcphdr *tcp; 1723 unsigned char *hdr; 1724 } l4; 1725 __be16 frag_off, protocol; 1726 unsigned char *exthdr; 1727 u32 offset, cmd = 0; 1728 u8 l4_proto = 0; 1729 1730 if (skb->ip_summed != CHECKSUM_PARTIAL) 1731 return 0; 1732 1733 protocol = vlan_get_protocol(skb); 1734 1735 if (eth_p_mpls(protocol)) { 1736 ip.hdr = skb_inner_network_header(skb); 1737 l4.hdr = skb_checksum_start(skb); 1738 } else { 1739 ip.hdr = skb_network_header(skb); 1740 l4.hdr = skb_transport_header(skb); 1741 } 1742 1743 /* compute outer L2 header size */ 1744 l2_len = ip.hdr - skb->data; 1745 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1746 1747 /* set the tx_flags to indicate the IP protocol type. this is 1748 * required so that checksum header computation below is accurate. 1749 */ 1750 if (ip.v4->version == 4) 1751 first->tx_flags |= ICE_TX_FLAGS_IPV4; 1752 else if (ip.v6->version == 6) 1753 first->tx_flags |= ICE_TX_FLAGS_IPV6; 1754 1755 if (skb->encapsulation) { 1756 bool gso_ena = false; 1757 u32 tunnel = 0; 1758 1759 /* define outer network header type */ 1760 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1761 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 1762 ICE_TX_CTX_EIPT_IPV4 : 1763 ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 1764 l4_proto = ip.v4->protocol; 1765 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1766 int ret; 1767 1768 tunnel |= ICE_TX_CTX_EIPT_IPV6; 1769 exthdr = ip.hdr + sizeof(*ip.v6); 1770 l4_proto = ip.v6->nexthdr; 1771 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 1772 &l4_proto, &frag_off); 1773 if (ret < 0) 1774 return -1; 1775 } 1776 1777 /* define outer transport */ 1778 switch (l4_proto) { 1779 case IPPROTO_UDP: 1780 tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 1781 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1782 break; 1783 case IPPROTO_GRE: 1784 tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 1785 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1786 break; 1787 case IPPROTO_IPIP: 1788 case IPPROTO_IPV6: 1789 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1790 l4.hdr = skb_inner_network_header(skb); 1791 break; 1792 default: 1793 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1794 return -1; 1795 1796 skb_checksum_help(skb); 1797 return 0; 1798 } 1799 1800 /* compute outer L3 header size */ 1801 tunnel |= ((l4.hdr - ip.hdr) / 4) << 1802 ICE_TXD_CTX_QW0_EIPLEN_S; 1803 1804 /* switch IP header pointer from outer to inner header */ 1805 ip.hdr = skb_inner_network_header(skb); 1806 1807 /* compute tunnel header size */ 1808 tunnel |= ((ip.hdr - l4.hdr) / 2) << 1809 ICE_TXD_CTX_QW0_NATLEN_S; 1810 1811 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 1812 /* indicate if we need to offload outer UDP header */ 1813 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 1814 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1815 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 1816 1817 /* record tunnel offload values */ 1818 off->cd_tunnel_params |= tunnel; 1819 1820 /* set DTYP=1 to indicate that it's an Tx context descriptor 1821 * in IPsec tunnel mode with Tx offloads in Quad word 1 1822 */ 1823 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 1824 1825 /* switch L4 header pointer from outer to inner */ 1826 l4.hdr = skb_inner_transport_header(skb); 1827 l4_proto = 0; 1828 1829 /* reset type as we transition from outer to inner headers */ 1830 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 1831 if (ip.v4->version == 4) 1832 first->tx_flags |= ICE_TX_FLAGS_IPV4; 1833 if (ip.v6->version == 6) 1834 first->tx_flags |= ICE_TX_FLAGS_IPV6; 1835 } 1836 1837 /* Enable IP checksum offloads */ 1838 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1839 l4_proto = ip.v4->protocol; 1840 /* the stack computes the IP header already, the only time we 1841 * need the hardware to recompute it is in the case of TSO. 1842 */ 1843 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1844 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1845 else 1846 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1847 1848 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1849 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1850 exthdr = ip.hdr + sizeof(*ip.v6); 1851 l4_proto = ip.v6->nexthdr; 1852 if (l4.hdr != exthdr) 1853 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1854 &frag_off); 1855 } else { 1856 return -1; 1857 } 1858 1859 /* compute inner L3 header size */ 1860 l3_len = l4.hdr - ip.hdr; 1861 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1862 1863 /* Enable L4 checksum offloads */ 1864 switch (l4_proto) { 1865 case IPPROTO_TCP: 1866 /* enable checksum offloads */ 1867 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1868 l4_len = l4.tcp->doff; 1869 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1870 break; 1871 case IPPROTO_UDP: 1872 /* enable UDP checksum offload */ 1873 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1874 l4_len = (sizeof(struct udphdr) >> 2); 1875 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1876 break; 1877 case IPPROTO_SCTP: 1878 /* enable SCTP checksum offload */ 1879 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1880 l4_len = sizeof(struct sctphdr) >> 2; 1881 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1882 break; 1883 1884 default: 1885 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1886 return -1; 1887 skb_checksum_help(skb); 1888 return 0; 1889 } 1890 1891 off->td_cmd |= cmd; 1892 off->td_offset |= offset; 1893 return 1; 1894 } 1895 1896 /** 1897 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1898 * @tx_ring: ring to send buffer on 1899 * @first: pointer to struct ice_tx_buf 1900 * 1901 * Checks the skb and set up correspondingly several generic transmit flags 1902 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1903 */ 1904 static void 1905 ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first) 1906 { 1907 struct sk_buff *skb = first->skb; 1908 1909 /* nothing left to do, software offloaded VLAN */ 1910 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) 1911 return; 1912 1913 /* the VLAN ethertype/tpid is determined by VSI configuration and netdev 1914 * feature flags, which the driver only allows either 802.1Q or 802.1ad 1915 * VLAN offloads exclusively so we only care about the VLAN ID here 1916 */ 1917 if (skb_vlan_tag_present(skb)) { 1918 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1919 if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2) 1920 first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN; 1921 else 1922 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1923 } 1924 1925 ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 1926 } 1927 1928 /** 1929 * ice_tso - computes mss and TSO length to prepare for TSO 1930 * @first: pointer to struct ice_tx_buf 1931 * @off: pointer to struct that holds offload parameters 1932 * 1933 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1934 */ 1935 static 1936 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1937 { 1938 struct sk_buff *skb = first->skb; 1939 union { 1940 struct iphdr *v4; 1941 struct ipv6hdr *v6; 1942 unsigned char *hdr; 1943 } ip; 1944 union { 1945 struct tcphdr *tcp; 1946 struct udphdr *udp; 1947 unsigned char *hdr; 1948 } l4; 1949 u64 cd_mss, cd_tso_len; 1950 __be16 protocol; 1951 u32 paylen; 1952 u8 l4_start; 1953 int err; 1954 1955 if (skb->ip_summed != CHECKSUM_PARTIAL) 1956 return 0; 1957 1958 if (!skb_is_gso(skb)) 1959 return 0; 1960 1961 err = skb_cow_head(skb, 0); 1962 if (err < 0) 1963 return err; 1964 1965 protocol = vlan_get_protocol(skb); 1966 1967 if (eth_p_mpls(protocol)) 1968 ip.hdr = skb_inner_network_header(skb); 1969 else 1970 ip.hdr = skb_network_header(skb); 1971 l4.hdr = skb_checksum_start(skb); 1972 1973 /* initialize outer IP header fields */ 1974 if (ip.v4->version == 4) { 1975 ip.v4->tot_len = 0; 1976 ip.v4->check = 0; 1977 } else { 1978 ip.v6->payload_len = 0; 1979 } 1980 1981 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 1982 SKB_GSO_GRE_CSUM | 1983 SKB_GSO_IPXIP4 | 1984 SKB_GSO_IPXIP6 | 1985 SKB_GSO_UDP_TUNNEL | 1986 SKB_GSO_UDP_TUNNEL_CSUM)) { 1987 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 1988 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 1989 l4.udp->len = 0; 1990 1991 /* determine offset of outer transport header */ 1992 l4_start = (u8)(l4.hdr - skb->data); 1993 1994 /* remove payload length from outer checksum */ 1995 paylen = skb->len - l4_start; 1996 csum_replace_by_diff(&l4.udp->check, 1997 (__force __wsum)htonl(paylen)); 1998 } 1999 2000 /* reset pointers to inner headers */ 2001 ip.hdr = skb_inner_network_header(skb); 2002 l4.hdr = skb_inner_transport_header(skb); 2003 2004 /* initialize inner IP header fields */ 2005 if (ip.v4->version == 4) { 2006 ip.v4->tot_len = 0; 2007 ip.v4->check = 0; 2008 } else { 2009 ip.v6->payload_len = 0; 2010 } 2011 } 2012 2013 /* determine offset of transport header */ 2014 l4_start = (u8)(l4.hdr - skb->data); 2015 2016 /* remove payload length from checksum */ 2017 paylen = skb->len - l4_start; 2018 2019 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 2020 csum_replace_by_diff(&l4.udp->check, 2021 (__force __wsum)htonl(paylen)); 2022 /* compute length of UDP segmentation header */ 2023 off->header_len = (u8)sizeof(l4.udp) + l4_start; 2024 } else { 2025 csum_replace_by_diff(&l4.tcp->check, 2026 (__force __wsum)htonl(paylen)); 2027 /* compute length of TCP segmentation header */ 2028 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 2029 } 2030 2031 /* update gso_segs and bytecount */ 2032 first->gso_segs = skb_shinfo(skb)->gso_segs; 2033 first->bytecount += (first->gso_segs - 1) * off->header_len; 2034 2035 cd_tso_len = skb->len - off->header_len; 2036 cd_mss = skb_shinfo(skb)->gso_size; 2037 2038 /* record cdesc_qw1 with TSO parameters */ 2039 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2040 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 2041 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2042 (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2043 first->tx_flags |= ICE_TX_FLAGS_TSO; 2044 return 1; 2045 } 2046 2047 /** 2048 * ice_txd_use_count - estimate the number of descriptors needed for Tx 2049 * @size: transmit request size in bytes 2050 * 2051 * Due to hardware alignment restrictions (4K alignment), we need to 2052 * assume that we can have no more than 12K of data per descriptor, even 2053 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 2054 * Thus, we need to divide by 12K. But division is slow! Instead, 2055 * we decompose the operation into shifts and one relatively cheap 2056 * multiply operation. 2057 * 2058 * To divide by 12K, we first divide by 4K, then divide by 3: 2059 * To divide by 4K, shift right by 12 bits 2060 * To divide by 3, multiply by 85, then divide by 256 2061 * (Divide by 256 is done by shifting right by 8 bits) 2062 * Finally, we add one to round up. Because 256 isn't an exact multiple of 2063 * 3, we'll underestimate near each multiple of 12K. This is actually more 2064 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 2065 * segment. For our purposes this is accurate out to 1M which is orders of 2066 * magnitude greater than our largest possible GSO size. 2067 * 2068 * This would then be implemented as: 2069 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 2070 * 2071 * Since multiplication and division are commutative, we can reorder 2072 * operations into: 2073 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2074 */ 2075 static unsigned int ice_txd_use_count(unsigned int size) 2076 { 2077 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2078 } 2079 2080 /** 2081 * ice_xmit_desc_count - calculate number of Tx descriptors needed 2082 * @skb: send buffer 2083 * 2084 * Returns number of data descriptors needed for this skb. 2085 */ 2086 static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 2087 { 2088 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 2089 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2090 unsigned int count = 0, size = skb_headlen(skb); 2091 2092 for (;;) { 2093 count += ice_txd_use_count(size); 2094 2095 if (!nr_frags--) 2096 break; 2097 2098 size = skb_frag_size(frag++); 2099 } 2100 2101 return count; 2102 } 2103 2104 /** 2105 * __ice_chk_linearize - Check if there are more than 8 buffers per packet 2106 * @skb: send buffer 2107 * 2108 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 2109 * and so we need to figure out the cases where we need to linearize the skb. 2110 * 2111 * For TSO we need to count the TSO header and segment payload separately. 2112 * As such we need to check cases where we have 7 fragments or more as we 2113 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 2114 * the segment payload in the first descriptor, and another 7 for the 2115 * fragments. 2116 */ 2117 static bool __ice_chk_linearize(struct sk_buff *skb) 2118 { 2119 const skb_frag_t *frag, *stale; 2120 int nr_frags, sum; 2121 2122 /* no need to check if number of frags is less than 7 */ 2123 nr_frags = skb_shinfo(skb)->nr_frags; 2124 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 2125 return false; 2126 2127 /* We need to walk through the list and validate that each group 2128 * of 6 fragments totals at least gso_size. 2129 */ 2130 nr_frags -= ICE_MAX_BUF_TXD - 2; 2131 frag = &skb_shinfo(skb)->frags[0]; 2132 2133 /* Initialize size to the negative value of gso_size minus 1. We 2134 * use this as the worst case scenario in which the frag ahead 2135 * of us only provides one byte which is why we are limited to 6 2136 * descriptors for a single transmit as the header and previous 2137 * fragment are already consuming 2 descriptors. 2138 */ 2139 sum = 1 - skb_shinfo(skb)->gso_size; 2140 2141 /* Add size of frags 0 through 4 to create our initial sum */ 2142 sum += skb_frag_size(frag++); 2143 sum += skb_frag_size(frag++); 2144 sum += skb_frag_size(frag++); 2145 sum += skb_frag_size(frag++); 2146 sum += skb_frag_size(frag++); 2147 2148 /* Walk through fragments adding latest fragment, testing it, and 2149 * then removing stale fragments from the sum. 2150 */ 2151 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 2152 int stale_size = skb_frag_size(stale); 2153 2154 sum += skb_frag_size(frag++); 2155 2156 /* The stale fragment may present us with a smaller 2157 * descriptor than the actual fragment size. To account 2158 * for that we need to remove all the data on the front and 2159 * figure out what the remainder would be in the last 2160 * descriptor associated with the fragment. 2161 */ 2162 if (stale_size > ICE_MAX_DATA_PER_TXD) { 2163 int align_pad = -(skb_frag_off(stale)) & 2164 (ICE_MAX_READ_REQ_SIZE - 1); 2165 2166 sum -= align_pad; 2167 stale_size -= align_pad; 2168 2169 do { 2170 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; 2171 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; 2172 } while (stale_size > ICE_MAX_DATA_PER_TXD); 2173 } 2174 2175 /* if sum is negative we failed to make sufficient progress */ 2176 if (sum < 0) 2177 return true; 2178 2179 if (!nr_frags--) 2180 break; 2181 2182 sum -= stale_size; 2183 } 2184 2185 return false; 2186 } 2187 2188 /** 2189 * ice_chk_linearize - Check if there are more than 8 fragments per packet 2190 * @skb: send buffer 2191 * @count: number of buffers used 2192 * 2193 * Note: Our HW can't scatter-gather more than 8 fragments to build 2194 * a packet on the wire and so we need to figure out the cases where we 2195 * need to linearize the skb. 2196 */ 2197 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 2198 { 2199 /* Both TSO and single send will work if count is less than 8 */ 2200 if (likely(count < ICE_MAX_BUF_TXD)) 2201 return false; 2202 2203 if (skb_is_gso(skb)) 2204 return __ice_chk_linearize(skb); 2205 2206 /* we can support up to 8 data buffers for a single send */ 2207 return count != ICE_MAX_BUF_TXD; 2208 } 2209 2210 /** 2211 * ice_tstamp - set up context descriptor for hardware timestamp 2212 * @tx_ring: pointer to the Tx ring to send buffer on 2213 * @skb: pointer to the SKB we're sending 2214 * @first: Tx buffer 2215 * @off: Tx offload parameters 2216 */ 2217 static void 2218 ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb, 2219 struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2220 { 2221 s8 idx; 2222 2223 /* only timestamp the outbound packet if the user has requested it */ 2224 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 2225 return; 2226 2227 if (!tx_ring->ptp_tx) 2228 return; 2229 2230 /* Tx timestamps cannot be sampled when doing TSO */ 2231 if (first->tx_flags & ICE_TX_FLAGS_TSO) 2232 return; 2233 2234 /* Grab an open timestamp slot */ 2235 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); 2236 if (idx < 0) { 2237 tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++; 2238 return; 2239 } 2240 2241 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2242 (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) | 2243 ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S)); 2244 first->tx_flags |= ICE_TX_FLAGS_TSYN; 2245 } 2246 2247 /** 2248 * ice_xmit_frame_ring - Sends buffer on Tx ring 2249 * @skb: send buffer 2250 * @tx_ring: ring to send buffer on 2251 * 2252 * Returns NETDEV_TX_OK if sent, else an error code 2253 */ 2254 static netdev_tx_t 2255 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) 2256 { 2257 struct ice_tx_offload_params offload = { 0 }; 2258 struct ice_vsi *vsi = tx_ring->vsi; 2259 struct ice_tx_buf *first; 2260 struct ethhdr *eth; 2261 unsigned int count; 2262 int tso, csum; 2263 2264 ice_trace(xmit_frame_ring, tx_ring, skb); 2265 2266 count = ice_xmit_desc_count(skb); 2267 if (ice_chk_linearize(skb, count)) { 2268 if (__skb_linearize(skb)) 2269 goto out_drop; 2270 count = ice_txd_use_count(skb->len); 2271 tx_ring->ring_stats->tx_stats.tx_linearize++; 2272 } 2273 2274 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 2275 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 2276 * + 4 desc gap to avoid the cache line where head is, 2277 * + 1 desc for context descriptor, 2278 * otherwise try next time 2279 */ 2280 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2281 ICE_DESCS_FOR_CTX_DESC)) { 2282 tx_ring->ring_stats->tx_stats.tx_busy++; 2283 return NETDEV_TX_BUSY; 2284 } 2285 2286 /* prefetch for bql data which is infrequently used */ 2287 netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring)); 2288 2289 offload.tx_ring = tx_ring; 2290 2291 /* record the location of the first descriptor for this packet */ 2292 first = &tx_ring->tx_buf[tx_ring->next_to_use]; 2293 first->skb = skb; 2294 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 2295 first->gso_segs = 1; 2296 first->tx_flags = 0; 2297 2298 /* prepare the VLAN tagging flags for Tx */ 2299 ice_tx_prepare_vlan_flags(tx_ring, first); 2300 if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { 2301 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2302 (ICE_TX_CTX_DESC_IL2TAG2 << 2303 ICE_TXD_CTX_QW1_CMD_S)); 2304 offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 2305 ICE_TX_FLAGS_VLAN_S; 2306 } 2307 2308 /* set up TSO offload */ 2309 tso = ice_tso(first, &offload); 2310 if (tso < 0) 2311 goto out_drop; 2312 2313 /* always set up Tx checksum offload */ 2314 csum = ice_tx_csum(first, &offload); 2315 if (csum < 0) 2316 goto out_drop; 2317 2318 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 2319 eth = (struct ethhdr *)skb_mac_header(skb); 2320 if (unlikely((skb->priority == TC_PRIO_CONTROL || 2321 eth->h_proto == htons(ETH_P_LLDP)) && 2322 vsi->type == ICE_VSI_PF && 2323 vsi->port_info->qos_cfg.is_sw_lldp)) 2324 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2325 ICE_TX_CTX_DESC_SWTCH_UPLINK << 2326 ICE_TXD_CTX_QW1_CMD_S); 2327 2328 ice_tstamp(tx_ring, skb, first, &offload); 2329 if (ice_is_switchdev_running(vsi->back)) 2330 ice_eswitch_set_target_vsi(skb, &offload); 2331 2332 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2333 struct ice_tx_ctx_desc *cdesc; 2334 u16 i = tx_ring->next_to_use; 2335 2336 /* grab the next descriptor */ 2337 cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2338 i++; 2339 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2340 2341 /* setup context descriptor */ 2342 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2343 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2344 cdesc->rsvd = cpu_to_le16(0); 2345 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2346 } 2347 2348 ice_tx_map(tx_ring, first, &offload); 2349 return NETDEV_TX_OK; 2350 2351 out_drop: 2352 ice_trace(xmit_frame_ring_drop, tx_ring, skb); 2353 dev_kfree_skb_any(skb); 2354 return NETDEV_TX_OK; 2355 } 2356 2357 /** 2358 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 2359 * @skb: send buffer 2360 * @netdev: network interface device structure 2361 * 2362 * Returns NETDEV_TX_OK if sent, else an error code 2363 */ 2364 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2365 { 2366 struct ice_netdev_priv *np = netdev_priv(netdev); 2367 struct ice_vsi *vsi = np->vsi; 2368 struct ice_tx_ring *tx_ring; 2369 2370 tx_ring = vsi->tx_rings[skb->queue_mapping]; 2371 2372 /* hardware can't handle really short frames, hardware padding works 2373 * beyond this point 2374 */ 2375 if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 2376 return NETDEV_TX_OK; 2377 2378 return ice_xmit_frame_ring(skb, tx_ring); 2379 } 2380 2381 /** 2382 * ice_get_dscp_up - return the UP/TC value for a SKB 2383 * @dcbcfg: DCB config that contains DSCP to UP/TC mapping 2384 * @skb: SKB to query for info to determine UP/TC 2385 * 2386 * This function is to only be called when the PF is in L3 DSCP PFC mode 2387 */ 2388 static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb) 2389 { 2390 u8 dscp = 0; 2391 2392 if (skb->protocol == htons(ETH_P_IP)) 2393 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 2394 else if (skb->protocol == htons(ETH_P_IPV6)) 2395 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 2396 2397 return dcbcfg->dscp_map[dscp]; 2398 } 2399 2400 u16 2401 ice_select_queue(struct net_device *netdev, struct sk_buff *skb, 2402 struct net_device *sb_dev) 2403 { 2404 struct ice_pf *pf = ice_netdev_to_pf(netdev); 2405 struct ice_dcbx_cfg *dcbcfg; 2406 2407 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; 2408 if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) 2409 skb->priority = ice_get_dscp_up(dcbcfg, skb); 2410 2411 return netdev_pick_tx(netdev, skb, sb_dev); 2412 } 2413 2414 /** 2415 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 2416 * @tx_ring: tx_ring to clean 2417 */ 2418 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) 2419 { 2420 struct ice_vsi *vsi = tx_ring->vsi; 2421 s16 i = tx_ring->next_to_clean; 2422 int budget = ICE_DFLT_IRQ_WORK; 2423 struct ice_tx_desc *tx_desc; 2424 struct ice_tx_buf *tx_buf; 2425 2426 tx_buf = &tx_ring->tx_buf[i]; 2427 tx_desc = ICE_TX_DESC(tx_ring, i); 2428 i -= tx_ring->count; 2429 2430 do { 2431 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2432 2433 /* if next_to_watch is not set then there is no pending work */ 2434 if (!eop_desc) 2435 break; 2436 2437 /* prevent any other reads prior to eop_desc */ 2438 smp_rmb(); 2439 2440 /* if the descriptor isn't done, no work to do */ 2441 if (!(eop_desc->cmd_type_offset_bsz & 2442 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2443 break; 2444 2445 /* clear next_to_watch to prevent false hangs */ 2446 tx_buf->next_to_watch = NULL; 2447 tx_desc->buf_addr = 0; 2448 tx_desc->cmd_type_offset_bsz = 0; 2449 2450 /* move past filter desc */ 2451 tx_buf++; 2452 tx_desc++; 2453 i++; 2454 if (unlikely(!i)) { 2455 i -= tx_ring->count; 2456 tx_buf = tx_ring->tx_buf; 2457 tx_desc = ICE_TX_DESC(tx_ring, 0); 2458 } 2459 2460 /* unmap the data header */ 2461 if (dma_unmap_len(tx_buf, len)) 2462 dma_unmap_single(tx_ring->dev, 2463 dma_unmap_addr(tx_buf, dma), 2464 dma_unmap_len(tx_buf, len), 2465 DMA_TO_DEVICE); 2466 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 2467 devm_kfree(tx_ring->dev, tx_buf->raw_buf); 2468 2469 /* clear next_to_watch to prevent false hangs */ 2470 tx_buf->raw_buf = NULL; 2471 tx_buf->tx_flags = 0; 2472 tx_buf->next_to_watch = NULL; 2473 dma_unmap_len_set(tx_buf, len, 0); 2474 tx_desc->buf_addr = 0; 2475 tx_desc->cmd_type_offset_bsz = 0; 2476 2477 /* move past eop_desc for start of next FD desc */ 2478 tx_buf++; 2479 tx_desc++; 2480 i++; 2481 if (unlikely(!i)) { 2482 i -= tx_ring->count; 2483 tx_buf = tx_ring->tx_buf; 2484 tx_desc = ICE_TX_DESC(tx_ring, 0); 2485 } 2486 2487 budget--; 2488 } while (likely(budget)); 2489 2490 i += tx_ring->count; 2491 tx_ring->next_to_clean = i; 2492 2493 /* re-enable interrupt if needed */ 2494 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 2495 } 2496