1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* The driver transmit and receive code */ 5 6 #include <linux/prefetch.h> 7 #include <linux/mm.h> 8 #include <linux/bpf_trace.h> 9 #include <net/xdp.h> 10 #include "ice_txrx_lib.h" 11 #include "ice_lib.h" 12 #include "ice.h" 13 #include "ice_dcb_lib.h" 14 #include "ice_xsk.h" 15 16 #define ICE_RX_HDR_SIZE 256 17 18 #define FDIR_DESC_RXDID 0x40 19 #define ICE_FDIR_CLEAN_DELAY 10 20 21 /** 22 * ice_prgm_fdir_fltr - Program a Flow Director filter 23 * @vsi: VSI to send dummy packet 24 * @fdir_desc: flow director descriptor 25 * @raw_packet: allocated buffer for flow director 26 */ 27 int 28 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 29 u8 *raw_packet) 30 { 31 struct ice_tx_buf *tx_buf, *first; 32 struct ice_fltr_desc *f_desc; 33 struct ice_tx_desc *tx_desc; 34 struct ice_ring *tx_ring; 35 struct device *dev; 36 dma_addr_t dma; 37 u32 td_cmd; 38 u16 i; 39 40 /* VSI and Tx ring */ 41 if (!vsi) 42 return -ENOENT; 43 tx_ring = vsi->tx_rings[0]; 44 if (!tx_ring || !tx_ring->desc) 45 return -ENOENT; 46 dev = tx_ring->dev; 47 48 /* we are using two descriptors to add/del a filter and we can wait */ 49 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 50 if (!i) 51 return -EAGAIN; 52 msleep_interruptible(1); 53 } 54 55 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 56 DMA_TO_DEVICE); 57 58 if (dma_mapping_error(dev, dma)) 59 return -EINVAL; 60 61 /* grab the next descriptor */ 62 i = tx_ring->next_to_use; 63 first = &tx_ring->tx_buf[i]; 64 f_desc = ICE_TX_FDIRDESC(tx_ring, i); 65 memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 66 67 i++; 68 i = (i < tx_ring->count) ? i : 0; 69 tx_desc = ICE_TX_DESC(tx_ring, i); 70 tx_buf = &tx_ring->tx_buf[i]; 71 72 i++; 73 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 74 75 memset(tx_buf, 0, sizeof(*tx_buf)); 76 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 77 dma_unmap_addr_set(tx_buf, dma, dma); 78 79 tx_desc->buf_addr = cpu_to_le64(dma); 80 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 81 ICE_TX_DESC_CMD_RE; 82 83 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; 84 tx_buf->raw_buf = raw_packet; 85 86 tx_desc->cmd_type_offset_bsz = 87 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 88 89 /* Force memory write to complete before letting h/w know 90 * there are new descriptors to fetch. 91 */ 92 wmb(); 93 94 /* mark the data descriptor to be watched */ 95 first->next_to_watch = tx_desc; 96 97 writel(tx_ring->next_to_use, tx_ring->tail); 98 99 return 0; 100 } 101 102 /** 103 * ice_unmap_and_free_tx_buf - Release a Tx buffer 104 * @ring: the ring that owns the buffer 105 * @tx_buf: the buffer to free 106 */ 107 static void 108 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 109 { 110 if (tx_buf->skb) { 111 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 112 devm_kfree(ring->dev, tx_buf->raw_buf); 113 else if (ice_ring_is_xdp(ring)) 114 page_frag_free(tx_buf->raw_buf); 115 else 116 dev_kfree_skb_any(tx_buf->skb); 117 if (dma_unmap_len(tx_buf, len)) 118 dma_unmap_single(ring->dev, 119 dma_unmap_addr(tx_buf, dma), 120 dma_unmap_len(tx_buf, len), 121 DMA_TO_DEVICE); 122 } else if (dma_unmap_len(tx_buf, len)) { 123 dma_unmap_page(ring->dev, 124 dma_unmap_addr(tx_buf, dma), 125 dma_unmap_len(tx_buf, len), 126 DMA_TO_DEVICE); 127 } 128 129 tx_buf->next_to_watch = NULL; 130 tx_buf->skb = NULL; 131 dma_unmap_len_set(tx_buf, len, 0); 132 /* tx_buf must be completely set up in the transmit path */ 133 } 134 135 static struct netdev_queue *txring_txq(const struct ice_ring *ring) 136 { 137 return netdev_get_tx_queue(ring->netdev, ring->q_index); 138 } 139 140 /** 141 * ice_clean_tx_ring - Free any empty Tx buffers 142 * @tx_ring: ring to be cleaned 143 */ 144 void ice_clean_tx_ring(struct ice_ring *tx_ring) 145 { 146 u16 i; 147 148 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 149 ice_xsk_clean_xdp_ring(tx_ring); 150 goto tx_skip_free; 151 } 152 153 /* ring already cleared, nothing to do */ 154 if (!tx_ring->tx_buf) 155 return; 156 157 /* Free all the Tx ring sk_buffs */ 158 for (i = 0; i < tx_ring->count; i++) 159 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 160 161 tx_skip_free: 162 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 163 164 /* Zero out the descriptor ring */ 165 memset(tx_ring->desc, 0, tx_ring->size); 166 167 tx_ring->next_to_use = 0; 168 tx_ring->next_to_clean = 0; 169 170 if (!tx_ring->netdev) 171 return; 172 173 /* cleanup Tx queue statistics */ 174 netdev_tx_reset_queue(txring_txq(tx_ring)); 175 } 176 177 /** 178 * ice_free_tx_ring - Free Tx resources per queue 179 * @tx_ring: Tx descriptor ring for a specific queue 180 * 181 * Free all transmit software resources 182 */ 183 void ice_free_tx_ring(struct ice_ring *tx_ring) 184 { 185 ice_clean_tx_ring(tx_ring); 186 devm_kfree(tx_ring->dev, tx_ring->tx_buf); 187 tx_ring->tx_buf = NULL; 188 189 if (tx_ring->desc) { 190 dmam_free_coherent(tx_ring->dev, tx_ring->size, 191 tx_ring->desc, tx_ring->dma); 192 tx_ring->desc = NULL; 193 } 194 } 195 196 /** 197 * ice_clean_tx_irq - Reclaim resources after transmit completes 198 * @tx_ring: Tx ring to clean 199 * @napi_budget: Used to determine if we are in netpoll 200 * 201 * Returns true if there's any budget left (e.g. the clean is finished) 202 */ 203 static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) 204 { 205 unsigned int total_bytes = 0, total_pkts = 0; 206 unsigned int budget = ICE_DFLT_IRQ_WORK; 207 struct ice_vsi *vsi = tx_ring->vsi; 208 s16 i = tx_ring->next_to_clean; 209 struct ice_tx_desc *tx_desc; 210 struct ice_tx_buf *tx_buf; 211 212 tx_buf = &tx_ring->tx_buf[i]; 213 tx_desc = ICE_TX_DESC(tx_ring, i); 214 i -= tx_ring->count; 215 216 prefetch(&vsi->state); 217 218 do { 219 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 220 221 /* if next_to_watch is not set then there is no work pending */ 222 if (!eop_desc) 223 break; 224 225 smp_rmb(); /* prevent any other reads prior to eop_desc */ 226 227 /* if the descriptor isn't done, no work yet to do */ 228 if (!(eop_desc->cmd_type_offset_bsz & 229 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 230 break; 231 232 /* clear next_to_watch to prevent false hangs */ 233 tx_buf->next_to_watch = NULL; 234 235 /* update the statistics for this packet */ 236 total_bytes += tx_buf->bytecount; 237 total_pkts += tx_buf->gso_segs; 238 239 if (ice_ring_is_xdp(tx_ring)) 240 page_frag_free(tx_buf->raw_buf); 241 else 242 /* free the skb */ 243 napi_consume_skb(tx_buf->skb, napi_budget); 244 245 /* unmap skb header data */ 246 dma_unmap_single(tx_ring->dev, 247 dma_unmap_addr(tx_buf, dma), 248 dma_unmap_len(tx_buf, len), 249 DMA_TO_DEVICE); 250 251 /* clear tx_buf data */ 252 tx_buf->skb = NULL; 253 dma_unmap_len_set(tx_buf, len, 0); 254 255 /* unmap remaining buffers */ 256 while (tx_desc != eop_desc) { 257 tx_buf++; 258 tx_desc++; 259 i++; 260 if (unlikely(!i)) { 261 i -= tx_ring->count; 262 tx_buf = tx_ring->tx_buf; 263 tx_desc = ICE_TX_DESC(tx_ring, 0); 264 } 265 266 /* unmap any remaining paged data */ 267 if (dma_unmap_len(tx_buf, len)) { 268 dma_unmap_page(tx_ring->dev, 269 dma_unmap_addr(tx_buf, dma), 270 dma_unmap_len(tx_buf, len), 271 DMA_TO_DEVICE); 272 dma_unmap_len_set(tx_buf, len, 0); 273 } 274 } 275 276 /* move us one more past the eop_desc for start of next pkt */ 277 tx_buf++; 278 tx_desc++; 279 i++; 280 if (unlikely(!i)) { 281 i -= tx_ring->count; 282 tx_buf = tx_ring->tx_buf; 283 tx_desc = ICE_TX_DESC(tx_ring, 0); 284 } 285 286 prefetch(tx_desc); 287 288 /* update budget accounting */ 289 budget--; 290 } while (likely(budget)); 291 292 i += tx_ring->count; 293 tx_ring->next_to_clean = i; 294 295 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 296 297 if (ice_ring_is_xdp(tx_ring)) 298 return !!budget; 299 300 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 301 total_bytes); 302 303 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 304 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 305 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 306 /* Make sure that anybody stopping the queue after this 307 * sees the new next_to_clean. 308 */ 309 smp_mb(); 310 if (__netif_subqueue_stopped(tx_ring->netdev, 311 tx_ring->q_index) && 312 !test_bit(__ICE_DOWN, vsi->state)) { 313 netif_wake_subqueue(tx_ring->netdev, 314 tx_ring->q_index); 315 ++tx_ring->tx_stats.restart_q; 316 } 317 } 318 319 return !!budget; 320 } 321 322 /** 323 * ice_setup_tx_ring - Allocate the Tx descriptors 324 * @tx_ring: the Tx ring to set up 325 * 326 * Return 0 on success, negative on error 327 */ 328 int ice_setup_tx_ring(struct ice_ring *tx_ring) 329 { 330 struct device *dev = tx_ring->dev; 331 332 if (!dev) 333 return -ENOMEM; 334 335 /* warn if we are about to overwrite the pointer */ 336 WARN_ON(tx_ring->tx_buf); 337 tx_ring->tx_buf = 338 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 339 GFP_KERNEL); 340 if (!tx_ring->tx_buf) 341 return -ENOMEM; 342 343 /* round up to nearest page */ 344 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 345 PAGE_SIZE); 346 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 347 GFP_KERNEL); 348 if (!tx_ring->desc) { 349 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 350 tx_ring->size); 351 goto err; 352 } 353 354 tx_ring->next_to_use = 0; 355 tx_ring->next_to_clean = 0; 356 tx_ring->tx_stats.prev_pkt = -1; 357 return 0; 358 359 err: 360 devm_kfree(dev, tx_ring->tx_buf); 361 tx_ring->tx_buf = NULL; 362 return -ENOMEM; 363 } 364 365 /** 366 * ice_clean_rx_ring - Free Rx buffers 367 * @rx_ring: ring to be cleaned 368 */ 369 void ice_clean_rx_ring(struct ice_ring *rx_ring) 370 { 371 struct device *dev = rx_ring->dev; 372 u16 i; 373 374 /* ring already cleared, nothing to do */ 375 if (!rx_ring->rx_buf) 376 return; 377 378 if (rx_ring->skb) { 379 dev_kfree_skb(rx_ring->skb); 380 rx_ring->skb = NULL; 381 } 382 383 if (rx_ring->xsk_pool) { 384 ice_xsk_clean_rx_ring(rx_ring); 385 goto rx_skip_free; 386 } 387 388 /* Free all the Rx ring sk_buffs */ 389 for (i = 0; i < rx_ring->count; i++) { 390 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 391 392 if (!rx_buf->page) 393 continue; 394 395 /* Invalidate cache lines that may have been written to by 396 * device so that we avoid corrupting memory. 397 */ 398 dma_sync_single_range_for_cpu(dev, rx_buf->dma, 399 rx_buf->page_offset, 400 rx_ring->rx_buf_len, 401 DMA_FROM_DEVICE); 402 403 /* free resources associated with mapping */ 404 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 405 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 406 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 407 408 rx_buf->page = NULL; 409 rx_buf->page_offset = 0; 410 } 411 412 rx_skip_free: 413 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 414 415 /* Zero out the descriptor ring */ 416 memset(rx_ring->desc, 0, rx_ring->size); 417 418 rx_ring->next_to_alloc = 0; 419 rx_ring->next_to_clean = 0; 420 rx_ring->next_to_use = 0; 421 } 422 423 /** 424 * ice_free_rx_ring - Free Rx resources 425 * @rx_ring: ring to clean the resources from 426 * 427 * Free all receive software resources 428 */ 429 void ice_free_rx_ring(struct ice_ring *rx_ring) 430 { 431 ice_clean_rx_ring(rx_ring); 432 if (rx_ring->vsi->type == ICE_VSI_PF) 433 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 434 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 435 rx_ring->xdp_prog = NULL; 436 devm_kfree(rx_ring->dev, rx_ring->rx_buf); 437 rx_ring->rx_buf = NULL; 438 439 if (rx_ring->desc) { 440 dmam_free_coherent(rx_ring->dev, rx_ring->size, 441 rx_ring->desc, rx_ring->dma); 442 rx_ring->desc = NULL; 443 } 444 } 445 446 /** 447 * ice_setup_rx_ring - Allocate the Rx descriptors 448 * @rx_ring: the Rx ring to set up 449 * 450 * Return 0 on success, negative on error 451 */ 452 int ice_setup_rx_ring(struct ice_ring *rx_ring) 453 { 454 struct device *dev = rx_ring->dev; 455 456 if (!dev) 457 return -ENOMEM; 458 459 /* warn if we are about to overwrite the pointer */ 460 WARN_ON(rx_ring->rx_buf); 461 rx_ring->rx_buf = 462 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 463 GFP_KERNEL); 464 if (!rx_ring->rx_buf) 465 return -ENOMEM; 466 467 /* round up to nearest page */ 468 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 469 PAGE_SIZE); 470 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 471 GFP_KERNEL); 472 if (!rx_ring->desc) { 473 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 474 rx_ring->size); 475 goto err; 476 } 477 478 rx_ring->next_to_use = 0; 479 rx_ring->next_to_clean = 0; 480 481 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 482 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 483 484 if (rx_ring->vsi->type == ICE_VSI_PF && 485 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 486 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 487 rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) 488 goto err; 489 return 0; 490 491 err: 492 devm_kfree(dev, rx_ring->rx_buf); 493 rx_ring->rx_buf = NULL; 494 return -ENOMEM; 495 } 496 497 static unsigned int 498 ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size) 499 { 500 unsigned int truesize; 501 502 #if (PAGE_SIZE < 8192) 503 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 504 #else 505 truesize = rx_ring->rx_offset ? 506 SKB_DATA_ALIGN(rx_ring->rx_offset + size) + 507 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 508 SKB_DATA_ALIGN(size); 509 #endif 510 return truesize; 511 } 512 513 /** 514 * ice_run_xdp - Executes an XDP program on initialized xdp_buff 515 * @rx_ring: Rx ring 516 * @xdp: xdp_buff used as input to the XDP program 517 * @xdp_prog: XDP program to run 518 * 519 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 520 */ 521 static int 522 ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, 523 struct bpf_prog *xdp_prog) 524 { 525 struct ice_ring *xdp_ring; 526 int err; 527 u32 act; 528 529 act = bpf_prog_run_xdp(xdp_prog, xdp); 530 switch (act) { 531 case XDP_PASS: 532 return ICE_XDP_PASS; 533 case XDP_TX: 534 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; 535 return ice_xmit_xdp_buff(xdp, xdp_ring); 536 case XDP_REDIRECT: 537 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 538 return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; 539 default: 540 bpf_warn_invalid_xdp_action(act); 541 fallthrough; 542 case XDP_ABORTED: 543 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 544 fallthrough; 545 case XDP_DROP: 546 return ICE_XDP_CONSUMED; 547 } 548 } 549 550 /** 551 * ice_xdp_xmit - submit packets to XDP ring for transmission 552 * @dev: netdev 553 * @n: number of XDP frames to be transmitted 554 * @frames: XDP frames to be transmitted 555 * @flags: transmit flags 556 * 557 * Returns number of frames successfully sent. Frames that fail are 558 * free'ed via XDP return API. 559 * For error cases, a negative errno code is returned and no-frames 560 * are transmitted (caller must handle freeing frames). 561 */ 562 int 563 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 564 u32 flags) 565 { 566 struct ice_netdev_priv *np = netdev_priv(dev); 567 unsigned int queue_index = smp_processor_id(); 568 struct ice_vsi *vsi = np->vsi; 569 struct ice_ring *xdp_ring; 570 int drops = 0, i; 571 572 if (test_bit(__ICE_DOWN, vsi->state)) 573 return -ENETDOWN; 574 575 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) 576 return -ENXIO; 577 578 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 579 return -EINVAL; 580 581 xdp_ring = vsi->xdp_rings[queue_index]; 582 for (i = 0; i < n; i++) { 583 struct xdp_frame *xdpf = frames[i]; 584 int err; 585 586 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 587 if (err != ICE_XDP_TX) { 588 xdp_return_frame_rx_napi(xdpf); 589 drops++; 590 } 591 } 592 593 if (unlikely(flags & XDP_XMIT_FLUSH)) 594 ice_xdp_ring_update_tail(xdp_ring); 595 596 return n - drops; 597 } 598 599 /** 600 * ice_alloc_mapped_page - recycle or make a new page 601 * @rx_ring: ring to use 602 * @bi: rx_buf struct to modify 603 * 604 * Returns true if the page was successfully allocated or 605 * reused. 606 */ 607 static bool 608 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) 609 { 610 struct page *page = bi->page; 611 dma_addr_t dma; 612 613 /* since we are recycling buffers we should seldom need to alloc */ 614 if (likely(page)) 615 return true; 616 617 /* alloc new page for storage */ 618 page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 619 if (unlikely(!page)) { 620 rx_ring->rx_stats.alloc_page_failed++; 621 return false; 622 } 623 624 /* map page for use */ 625 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 626 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 627 628 /* if mapping failed free memory back to system since 629 * there isn't much point in holding memory we can't use 630 */ 631 if (dma_mapping_error(rx_ring->dev, dma)) { 632 __free_pages(page, ice_rx_pg_order(rx_ring)); 633 rx_ring->rx_stats.alloc_page_failed++; 634 return false; 635 } 636 637 bi->dma = dma; 638 bi->page = page; 639 bi->page_offset = rx_ring->rx_offset; 640 page_ref_add(page, USHRT_MAX - 1); 641 bi->pagecnt_bias = USHRT_MAX; 642 643 return true; 644 } 645 646 /** 647 * ice_alloc_rx_bufs - Replace used receive buffers 648 * @rx_ring: ring to place buffers on 649 * @cleaned_count: number of buffers to replace 650 * 651 * Returns false if all allocations were successful, true if any fail. Returning 652 * true signals to the caller that we didn't replace cleaned_count buffers and 653 * there is more work to do. 654 * 655 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 656 * buffers. Then bump tail at most one time. Grouping like this lets us avoid 657 * multiple tail writes per call. 658 */ 659 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) 660 { 661 union ice_32b_rx_flex_desc *rx_desc; 662 u16 ntu = rx_ring->next_to_use; 663 struct ice_rx_buf *bi; 664 665 /* do nothing if no valid netdev defined */ 666 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || 667 !cleaned_count) 668 return false; 669 670 /* get the Rx descriptor and buffer based on next_to_use */ 671 rx_desc = ICE_RX_DESC(rx_ring, ntu); 672 bi = &rx_ring->rx_buf[ntu]; 673 674 do { 675 /* if we fail here, we have work remaining */ 676 if (!ice_alloc_mapped_page(rx_ring, bi)) 677 break; 678 679 /* sync the buffer for use by the device */ 680 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 681 bi->page_offset, 682 rx_ring->rx_buf_len, 683 DMA_FROM_DEVICE); 684 685 /* Refresh the desc even if buffer_addrs didn't change 686 * because each write-back erases this info. 687 */ 688 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 689 690 rx_desc++; 691 bi++; 692 ntu++; 693 if (unlikely(ntu == rx_ring->count)) { 694 rx_desc = ICE_RX_DESC(rx_ring, 0); 695 bi = rx_ring->rx_buf; 696 ntu = 0; 697 } 698 699 /* clear the status bits for the next_to_use descriptor */ 700 rx_desc->wb.status_error0 = 0; 701 702 cleaned_count--; 703 } while (cleaned_count); 704 705 if (rx_ring->next_to_use != ntu) 706 ice_release_rx_desc(rx_ring, ntu); 707 708 return !!cleaned_count; 709 } 710 711 /** 712 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 713 * @rx_buf: Rx buffer to adjust 714 * @size: Size of adjustment 715 * 716 * Update the offset within page so that Rx buf will be ready to be reused. 717 * For systems with PAGE_SIZE < 8192 this function will flip the page offset 718 * so the second half of page assigned to Rx buffer will be used, otherwise 719 * the offset is moved by "size" bytes 720 */ 721 static void 722 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 723 { 724 #if (PAGE_SIZE < 8192) 725 /* flip page offset to other buffer */ 726 rx_buf->page_offset ^= size; 727 #else 728 /* move offset up to the next cache line */ 729 rx_buf->page_offset += size; 730 #endif 731 } 732 733 /** 734 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 735 * @rx_buf: buffer containing the page 736 * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call 737 * 738 * If page is reusable, we have a green light for calling ice_reuse_rx_page, 739 * which will assign the current buffer to the buffer that next_to_alloc is 740 * pointing to; otherwise, the DMA mapping needs to be destroyed and 741 * page freed 742 */ 743 static bool 744 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) 745 { 746 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 747 struct page *page = rx_buf->page; 748 749 /* avoid re-using remote and pfmemalloc pages */ 750 if (!dev_page_is_reusable(page)) 751 return false; 752 753 #if (PAGE_SIZE < 8192) 754 /* if we are only owner of page we can reuse it */ 755 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) 756 return false; 757 #else 758 #define ICE_LAST_OFFSET \ 759 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 760 if (rx_buf->page_offset > ICE_LAST_OFFSET) 761 return false; 762 #endif /* PAGE_SIZE < 8192) */ 763 764 /* If we have drained the page fragment pool we need to update 765 * the pagecnt_bias and page count so that we fully restock the 766 * number of references the driver holds. 767 */ 768 if (unlikely(pagecnt_bias == 1)) { 769 page_ref_add(page, USHRT_MAX - 1); 770 rx_buf->pagecnt_bias = USHRT_MAX; 771 } 772 773 return true; 774 } 775 776 /** 777 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 778 * @rx_ring: Rx descriptor ring to transact packets on 779 * @rx_buf: buffer containing page to add 780 * @skb: sk_buff to place the data into 781 * @size: packet length from rx_desc 782 * 783 * This function will add the data contained in rx_buf->page to the skb. 784 * It will just attach the page as a frag to the skb. 785 * The function will then update the page offset. 786 */ 787 static void 788 ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 789 struct sk_buff *skb, unsigned int size) 790 { 791 #if (PAGE_SIZE >= 8192) 792 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset); 793 #else 794 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 795 #endif 796 797 if (!size) 798 return; 799 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 800 rx_buf->page_offset, size, truesize); 801 802 /* page is being used so we must update the page offset */ 803 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 804 } 805 806 /** 807 * ice_reuse_rx_page - page flip buffer and store it back on the ring 808 * @rx_ring: Rx descriptor ring to store buffers on 809 * @old_buf: donor buffer to have page reused 810 * 811 * Synchronizes page for reuse by the adapter 812 */ 813 static void 814 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) 815 { 816 u16 nta = rx_ring->next_to_alloc; 817 struct ice_rx_buf *new_buf; 818 819 new_buf = &rx_ring->rx_buf[nta]; 820 821 /* update, and store next to alloc */ 822 nta++; 823 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 824 825 /* Transfer page from old buffer to new buffer. 826 * Move each member individually to avoid possible store 827 * forwarding stalls and unnecessary copy of skb. 828 */ 829 new_buf->dma = old_buf->dma; 830 new_buf->page = old_buf->page; 831 new_buf->page_offset = old_buf->page_offset; 832 new_buf->pagecnt_bias = old_buf->pagecnt_bias; 833 } 834 835 /** 836 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 837 * @rx_ring: Rx descriptor ring to transact packets on 838 * @size: size of buffer to add to skb 839 * @rx_buf_pgcnt: rx_buf page refcount 840 * 841 * This function will pull an Rx buffer from the ring and synchronize it 842 * for use by the CPU. 843 */ 844 static struct ice_rx_buf * 845 ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size, 846 int *rx_buf_pgcnt) 847 { 848 struct ice_rx_buf *rx_buf; 849 850 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 851 *rx_buf_pgcnt = 852 #if (PAGE_SIZE < 8192) 853 page_count(rx_buf->page); 854 #else 855 0; 856 #endif 857 prefetchw(rx_buf->page); 858 859 if (!size) 860 return rx_buf; 861 /* we are reusing so sync this buffer for CPU use */ 862 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 863 rx_buf->page_offset, size, 864 DMA_FROM_DEVICE); 865 866 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 867 rx_buf->pagecnt_bias--; 868 869 return rx_buf; 870 } 871 872 /** 873 * ice_build_skb - Build skb around an existing buffer 874 * @rx_ring: Rx descriptor ring to transact packets on 875 * @rx_buf: Rx buffer to pull data from 876 * @xdp: xdp_buff pointing to the data 877 * 878 * This function builds an skb around an existing Rx buffer, taking care 879 * to set up the skb correctly and avoid any memcpy overhead. 880 */ 881 static struct sk_buff * 882 ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 883 struct xdp_buff *xdp) 884 { 885 u8 metasize = xdp->data - xdp->data_meta; 886 #if (PAGE_SIZE < 8192) 887 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 888 #else 889 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 890 SKB_DATA_ALIGN(xdp->data_end - 891 xdp->data_hard_start); 892 #endif 893 struct sk_buff *skb; 894 895 /* Prefetch first cache line of first page. If xdp->data_meta 896 * is unused, this points exactly as xdp->data, otherwise we 897 * likely have a consumer accessing first few bytes of meta 898 * data, and then actual data. 899 */ 900 net_prefetch(xdp->data_meta); 901 /* build an skb around the page buffer */ 902 skb = build_skb(xdp->data_hard_start, truesize); 903 if (unlikely(!skb)) 904 return NULL; 905 906 /* must to record Rx queue, otherwise OS features such as 907 * symmetric queue won't work 908 */ 909 skb_record_rx_queue(skb, rx_ring->q_index); 910 911 /* update pointers within the skb to store the data */ 912 skb_reserve(skb, xdp->data - xdp->data_hard_start); 913 __skb_put(skb, xdp->data_end - xdp->data); 914 if (metasize) 915 skb_metadata_set(skb, metasize); 916 917 /* buffer is used by skb, update page_offset */ 918 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 919 920 return skb; 921 } 922 923 /** 924 * ice_construct_skb - Allocate skb and populate it 925 * @rx_ring: Rx descriptor ring to transact packets on 926 * @rx_buf: Rx buffer to pull data from 927 * @xdp: xdp_buff pointing to the data 928 * 929 * This function allocates an skb. It then populates it with the page 930 * data from the current receive descriptor, taking care to set up the 931 * skb correctly. 932 */ 933 static struct sk_buff * 934 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 935 struct xdp_buff *xdp) 936 { 937 unsigned int size = xdp->data_end - xdp->data; 938 unsigned int headlen; 939 struct sk_buff *skb; 940 941 /* prefetch first cache line of first page */ 942 net_prefetch(xdp->data); 943 944 /* allocate a skb to store the frags */ 945 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 946 GFP_ATOMIC | __GFP_NOWARN); 947 if (unlikely(!skb)) 948 return NULL; 949 950 skb_record_rx_queue(skb, rx_ring->q_index); 951 /* Determine available headroom for copy */ 952 headlen = size; 953 if (headlen > ICE_RX_HDR_SIZE) 954 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 955 956 /* align pull length to size of long to optimize memcpy performance */ 957 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 958 sizeof(long))); 959 960 /* if we exhaust the linear part then add what is left as a frag */ 961 size -= headlen; 962 if (size) { 963 #if (PAGE_SIZE >= 8192) 964 unsigned int truesize = SKB_DATA_ALIGN(size); 965 #else 966 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 967 #endif 968 skb_add_rx_frag(skb, 0, rx_buf->page, 969 rx_buf->page_offset + headlen, size, truesize); 970 /* buffer is used by skb, update page_offset */ 971 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 972 } else { 973 /* buffer is unused, reset bias back to rx_buf; data was copied 974 * onto skb's linear part so there's no need for adjusting 975 * page offset and we can reuse this buffer as-is 976 */ 977 rx_buf->pagecnt_bias++; 978 } 979 980 return skb; 981 } 982 983 /** 984 * ice_put_rx_buf - Clean up used buffer and either recycle or free 985 * @rx_ring: Rx descriptor ring to transact packets on 986 * @rx_buf: Rx buffer to pull data from 987 * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect() 988 * 989 * This function will update next_to_clean and then clean up the contents 990 * of the rx_buf. It will either recycle the buffer or unmap it and free 991 * the associated resources. 992 */ 993 static void 994 ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 995 int rx_buf_pgcnt) 996 { 997 u16 ntc = rx_ring->next_to_clean + 1; 998 999 /* fetch, update, and store next to clean */ 1000 ntc = (ntc < rx_ring->count) ? ntc : 0; 1001 rx_ring->next_to_clean = ntc; 1002 1003 if (!rx_buf) 1004 return; 1005 1006 if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) { 1007 /* hand second half of page back to the ring */ 1008 ice_reuse_rx_page(rx_ring, rx_buf); 1009 } else { 1010 /* we are not reusing the buffer so unmap it */ 1011 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 1012 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 1013 ICE_RX_DMA_ATTR); 1014 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 1015 } 1016 1017 /* clear contents of buffer_info */ 1018 rx_buf->page = NULL; 1019 } 1020 1021 /** 1022 * ice_is_non_eop - process handling of non-EOP buffers 1023 * @rx_ring: Rx ring being processed 1024 * @rx_desc: Rx descriptor for current buffer 1025 * 1026 * If the buffer is an EOP buffer, this function exits returning false, 1027 * otherwise return true indicating that this is in fact a non-EOP buffer. 1028 */ 1029 static bool 1030 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc) 1031 { 1032 /* if we are the last buffer then there is nothing else to do */ 1033 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 1034 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 1035 return false; 1036 1037 rx_ring->rx_stats.non_eop_descs++; 1038 1039 return true; 1040 } 1041 1042 /** 1043 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 1044 * @rx_ring: Rx descriptor ring to transact packets on 1045 * @budget: Total limit on number of packets to process 1046 * 1047 * This function provides a "bounce buffer" approach to Rx interrupt 1048 * processing. The advantage to this is that on systems that have 1049 * expensive overhead for IOMMU access this provides a means of avoiding 1050 * it by maintaining the mapping of the page to the system. 1051 * 1052 * Returns amount of work completed 1053 */ 1054 int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 1055 { 1056 unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0; 1057 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1058 unsigned int offset = rx_ring->rx_offset; 1059 unsigned int xdp_res, xdp_xmit = 0; 1060 struct sk_buff *skb = rx_ring->skb; 1061 struct bpf_prog *xdp_prog = NULL; 1062 struct xdp_buff xdp; 1063 bool failure; 1064 1065 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1066 #if (PAGE_SIZE < 8192) 1067 frame_sz = ice_rx_frame_truesize(rx_ring, 0); 1068 #endif 1069 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); 1070 1071 /* start the loop to process Rx packets bounded by 'budget' */ 1072 while (likely(total_rx_pkts < (unsigned int)budget)) { 1073 union ice_32b_rx_flex_desc *rx_desc; 1074 struct ice_rx_buf *rx_buf; 1075 unsigned char *hard_start; 1076 unsigned int size; 1077 u16 stat_err_bits; 1078 int rx_buf_pgcnt; 1079 u16 vlan_tag = 0; 1080 u8 rx_ptype; 1081 1082 /* get the Rx desc from Rx ring based on 'next_to_clean' */ 1083 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 1084 1085 /* status_error_len will always be zero for unused descriptors 1086 * because it's cleared in cleanup, and overlaps with hdr_addr 1087 * which is always zero because packet split isn't used, if the 1088 * hardware wrote DD then it will be non-zero 1089 */ 1090 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 1091 if (!ice_test_staterr(rx_desc, stat_err_bits)) 1092 break; 1093 1094 /* This memory barrier is needed to keep us from reading 1095 * any other fields out of the rx_desc until we know the 1096 * DD bit is set. 1097 */ 1098 dma_rmb(); 1099 1100 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 1101 ice_put_rx_buf(rx_ring, NULL, 0); 1102 cleaned_count++; 1103 continue; 1104 } 1105 1106 size = le16_to_cpu(rx_desc->wb.pkt_len) & 1107 ICE_RX_FLX_DESC_PKT_LEN_M; 1108 1109 /* retrieve a buffer from the ring */ 1110 rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt); 1111 1112 if (!size) { 1113 xdp.data = NULL; 1114 xdp.data_end = NULL; 1115 xdp.data_hard_start = NULL; 1116 xdp.data_meta = NULL; 1117 goto construct_skb; 1118 } 1119 1120 hard_start = page_address(rx_buf->page) + rx_buf->page_offset - 1121 offset; 1122 xdp_prepare_buff(&xdp, hard_start, offset, size, true); 1123 #if (PAGE_SIZE > 4096) 1124 /* At larger PAGE_SIZE, frame_sz depend on len size */ 1125 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); 1126 #endif 1127 1128 rcu_read_lock(); 1129 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1130 if (!xdp_prog) { 1131 rcu_read_unlock(); 1132 goto construct_skb; 1133 } 1134 1135 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog); 1136 rcu_read_unlock(); 1137 if (!xdp_res) 1138 goto construct_skb; 1139 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1140 xdp_xmit |= xdp_res; 1141 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); 1142 } else { 1143 rx_buf->pagecnt_bias++; 1144 } 1145 total_rx_bytes += size; 1146 total_rx_pkts++; 1147 1148 cleaned_count++; 1149 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 1150 continue; 1151 construct_skb: 1152 if (skb) { 1153 ice_add_rx_frag(rx_ring, rx_buf, skb, size); 1154 } else if (likely(xdp.data)) { 1155 if (ice_ring_uses_build_skb(rx_ring)) 1156 skb = ice_build_skb(rx_ring, rx_buf, &xdp); 1157 else 1158 skb = ice_construct_skb(rx_ring, rx_buf, &xdp); 1159 } 1160 /* exit if we failed to retrieve a buffer */ 1161 if (!skb) { 1162 rx_ring->rx_stats.alloc_buf_failed++; 1163 if (rx_buf) 1164 rx_buf->pagecnt_bias++; 1165 break; 1166 } 1167 1168 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 1169 cleaned_count++; 1170 1171 /* skip if it is NOP desc */ 1172 if (ice_is_non_eop(rx_ring, rx_desc)) 1173 continue; 1174 1175 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 1176 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 1177 dev_kfree_skb_any(skb); 1178 continue; 1179 } 1180 1181 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 1182 if (ice_test_staterr(rx_desc, stat_err_bits)) 1183 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 1184 1185 /* pad the skb if needed, to make a valid ethernet frame */ 1186 if (eth_skb_pad(skb)) { 1187 skb = NULL; 1188 continue; 1189 } 1190 1191 /* probably a little skewed due to removing CRC */ 1192 total_rx_bytes += skb->len; 1193 1194 /* populate checksum, VLAN, and protocol */ 1195 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 1196 ICE_RX_FLEX_DESC_PTYPE_M; 1197 1198 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1199 1200 /* send completed skb up the stack */ 1201 ice_receive_skb(rx_ring, skb, vlan_tag); 1202 skb = NULL; 1203 1204 /* update budget accounting */ 1205 total_rx_pkts++; 1206 } 1207 1208 /* return up to cleaned_count buffers to hardware */ 1209 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1210 1211 if (xdp_prog) 1212 ice_finalize_xdp_rx(rx_ring, xdp_xmit); 1213 rx_ring->skb = skb; 1214 1215 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); 1216 1217 /* guarantee a trip back through this routine if there was a failure */ 1218 return failure ? budget : (int)total_rx_pkts; 1219 } 1220 1221 /** 1222 * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic 1223 * @port_info: port_info structure containing the current link speed 1224 * @avg_pkt_size: average size of Tx or Rx packets based on clean routine 1225 * @itr: ITR value to update 1226 * 1227 * Calculate how big of an increment should be applied to the ITR value passed 1228 * in based on wmem_default, SKB overhead, ethernet overhead, and the current 1229 * link speed. 1230 * 1231 * The following is a calculation derived from: 1232 * wmem_default / (size + overhead) = desired_pkts_per_int 1233 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 1234 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1235 * 1236 * Assuming wmem_default is 212992 and overhead is 640 bytes per 1237 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1238 * formula down to: 1239 * 1240 * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 1241 * ITR = -------------------------------------------- * -------------- 1242 * rate pkt_size + 640 1243 */ 1244 static unsigned int 1245 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, 1246 unsigned int avg_pkt_size, 1247 unsigned int itr) 1248 { 1249 switch (port_info->phy.link_info.link_speed) { 1250 case ICE_AQ_LINK_SPEED_100GB: 1251 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), 1252 avg_pkt_size + 640); 1253 break; 1254 case ICE_AQ_LINK_SPEED_50GB: 1255 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), 1256 avg_pkt_size + 640); 1257 break; 1258 case ICE_AQ_LINK_SPEED_40GB: 1259 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), 1260 avg_pkt_size + 640); 1261 break; 1262 case ICE_AQ_LINK_SPEED_25GB: 1263 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), 1264 avg_pkt_size + 640); 1265 break; 1266 case ICE_AQ_LINK_SPEED_20GB: 1267 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), 1268 avg_pkt_size + 640); 1269 break; 1270 case ICE_AQ_LINK_SPEED_10GB: 1271 default: 1272 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), 1273 avg_pkt_size + 640); 1274 break; 1275 } 1276 1277 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1278 itr &= ICE_ITR_ADAPTIVE_LATENCY; 1279 itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1280 } 1281 1282 return itr; 1283 } 1284 1285 /** 1286 * ice_update_itr - update the adaptive ITR value based on statistics 1287 * @q_vector: structure containing interrupt and ring information 1288 * @rc: structure containing ring performance data 1289 * 1290 * Stores a new ITR value based on packets and byte 1291 * counts during the last interrupt. The advantage of per interrupt 1292 * computation is faster updates and more accurate ITR for the current 1293 * traffic pattern. Constants in this function were computed 1294 * based on theoretical maximum wire speed and thresholds were set based 1295 * on testing data as well as attempting to minimize response time 1296 * while increasing bulk throughput. 1297 */ 1298 static void 1299 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) 1300 { 1301 unsigned long next_update = jiffies; 1302 unsigned int packets, bytes, itr; 1303 bool container_is_rx; 1304 1305 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) 1306 return; 1307 1308 /* If itr_countdown is set it means we programmed an ITR within 1309 * the last 4 interrupt cycles. This has a side effect of us 1310 * potentially firing an early interrupt. In order to work around 1311 * this we need to throw out any data received for a few 1312 * interrupts following the update. 1313 */ 1314 if (q_vector->itr_countdown) { 1315 itr = rc->target_itr; 1316 goto clear_counts; 1317 } 1318 1319 container_is_rx = (&q_vector->rx == rc); 1320 /* For Rx we want to push the delay up and default to low latency. 1321 * for Tx we want to pull the delay down and default to high latency. 1322 */ 1323 itr = container_is_rx ? 1324 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : 1325 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; 1326 1327 /* If we didn't update within up to 1 - 2 jiffies we can assume 1328 * that either packets are coming in so slow there hasn't been 1329 * any work, or that there is so much work that NAPI is dealing 1330 * with interrupt moderation and we don't need to do anything. 1331 */ 1332 if (time_after(next_update, rc->next_update)) 1333 goto clear_counts; 1334 1335 prefetch(q_vector->vsi->port_info); 1336 1337 packets = rc->total_pkts; 1338 bytes = rc->total_bytes; 1339 1340 if (container_is_rx) { 1341 /* If Rx there are 1 to 4 packets and bytes are less than 1342 * 9000 assume insufficient data to use bulk rate limiting 1343 * approach unless Tx is already in bulk rate limiting. We 1344 * are likely latency driven. 1345 */ 1346 if (packets && packets < 4 && bytes < 9000 && 1347 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { 1348 itr = ICE_ITR_ADAPTIVE_LATENCY; 1349 goto adjust_by_size_and_speed; 1350 } 1351 } else if (packets < 4) { 1352 /* If we have Tx and Rx ITR maxed and Tx ITR is running in 1353 * bulk mode and we are receiving 4 or fewer packets just 1354 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 1355 * that the Rx can relax. 1356 */ 1357 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && 1358 (q_vector->rx.target_itr & ICE_ITR_MASK) == 1359 ICE_ITR_ADAPTIVE_MAX_USECS) 1360 goto clear_counts; 1361 } else if (packets > 32) { 1362 /* If we have processed over 32 packets in a single interrupt 1363 * for Tx assume we need to switch over to "bulk" mode. 1364 */ 1365 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; 1366 } 1367 1368 /* We have no packets to actually measure against. This means 1369 * either one of the other queues on this vector is active or 1370 * we are a Tx queue doing TSO with too high of an interrupt rate. 1371 * 1372 * Between 4 and 56 we can assume that our current interrupt delay 1373 * is only slightly too low. As such we should increase it by a small 1374 * fixed amount. 1375 */ 1376 if (packets < 56) { 1377 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; 1378 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1379 itr &= ICE_ITR_ADAPTIVE_LATENCY; 1380 itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1381 } 1382 goto clear_counts; 1383 } 1384 1385 if (packets <= 256) { 1386 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 1387 itr &= ICE_ITR_MASK; 1388 1389 /* Between 56 and 112 is our "goldilocks" zone where we are 1390 * working out "just right". Just report that our current 1391 * ITR is good for us. 1392 */ 1393 if (packets <= 112) 1394 goto clear_counts; 1395 1396 /* If packet count is 128 or greater we are likely looking 1397 * at a slight overrun of the delay we want. Try halving 1398 * our delay to see if that will cut the number of packets 1399 * in half per interrupt. 1400 */ 1401 itr >>= 1; 1402 itr &= ICE_ITR_MASK; 1403 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) 1404 itr = ICE_ITR_ADAPTIVE_MIN_USECS; 1405 1406 goto clear_counts; 1407 } 1408 1409 /* The paths below assume we are dealing with a bulk ITR since 1410 * number of packets is greater than 256. We are just going to have 1411 * to compute a value and try to bring the count under control, 1412 * though for smaller packet sizes there isn't much we can do as 1413 * NAPI polling will likely be kicking in sooner rather than later. 1414 */ 1415 itr = ICE_ITR_ADAPTIVE_BULK; 1416 1417 adjust_by_size_and_speed: 1418 1419 /* based on checks above packets cannot be 0 so division is safe */ 1420 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, 1421 bytes / packets, itr); 1422 1423 clear_counts: 1424 /* write back value */ 1425 rc->target_itr = itr; 1426 1427 /* next update should occur within next jiffy */ 1428 rc->next_update = next_update + 1; 1429 1430 rc->total_bytes = 0; 1431 rc->total_pkts = 0; 1432 } 1433 1434 /** 1435 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 1436 * @itr_idx: interrupt throttling index 1437 * @itr: interrupt throttling value in usecs 1438 */ 1439 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 1440 { 1441 /* The ITR value is reported in microseconds, and the register value is 1442 * recorded in 2 microsecond units. For this reason we only need to 1443 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 1444 * granularity as a shift instead of division. The mask makes sure the 1445 * ITR value is never odd so we don't accidentally write into the field 1446 * prior to the ITR field. 1447 */ 1448 itr &= ICE_ITR_MASK; 1449 1450 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 1451 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 1452 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 1453 } 1454 1455 /* The act of updating the ITR will cause it to immediately trigger. In order 1456 * to prevent this from throwing off adaptive update statistics we defer the 1457 * update so that it can only happen so often. So after either Tx or Rx are 1458 * updated we make the adaptive scheme wait until either the ITR completely 1459 * expires via the next_update expiration or we have been through at least 1460 * 3 interrupts. 1461 */ 1462 #define ITR_COUNTDOWN_START 3 1463 1464 /** 1465 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt 1466 * @q_vector: q_vector for which ITR is being updated and interrupt enabled 1467 */ 1468 static void ice_update_ena_itr(struct ice_q_vector *q_vector) 1469 { 1470 struct ice_ring_container *tx = &q_vector->tx; 1471 struct ice_ring_container *rx = &q_vector->rx; 1472 struct ice_vsi *vsi = q_vector->vsi; 1473 u32 itr_val; 1474 1475 /* when exiting WB_ON_ITR just reset the countdown and let ITR 1476 * resume it's normal "interrupts-enabled" path 1477 */ 1478 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) 1479 q_vector->itr_countdown = 0; 1480 1481 /* This will do nothing if dynamic updates are not enabled */ 1482 ice_update_itr(q_vector, tx); 1483 ice_update_itr(q_vector, rx); 1484 1485 /* This block of logic allows us to get away with only updating 1486 * one ITR value with each interrupt. The idea is to perform a 1487 * pseudo-lazy update with the following criteria. 1488 * 1489 * 1. Rx is given higher priority than Tx if both are in same state 1490 * 2. If we must reduce an ITR that is given highest priority. 1491 * 3. We then give priority to increasing ITR based on amount. 1492 */ 1493 if (rx->target_itr < rx->current_itr) { 1494 /* Rx ITR needs to be reduced, this is highest priority */ 1495 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 1496 rx->current_itr = rx->target_itr; 1497 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1498 } else if ((tx->target_itr < tx->current_itr) || 1499 ((rx->target_itr - rx->current_itr) < 1500 (tx->target_itr - tx->current_itr))) { 1501 /* Tx ITR needs to be reduced, this is second priority 1502 * Tx ITR needs to be increased more than Rx, fourth priority 1503 */ 1504 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); 1505 tx->current_itr = tx->target_itr; 1506 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1507 } else if (rx->current_itr != rx->target_itr) { 1508 /* Rx ITR needs to be increased, third priority */ 1509 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 1510 rx->current_itr = rx->target_itr; 1511 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1512 } else { 1513 /* Still have to re-enable the interrupts */ 1514 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 1515 if (q_vector->itr_countdown) 1516 q_vector->itr_countdown--; 1517 } 1518 1519 if (!test_bit(__ICE_DOWN, vsi->state)) 1520 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 1521 } 1522 1523 /** 1524 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 1525 * @q_vector: q_vector to set WB_ON_ITR on 1526 * 1527 * We need to tell hardware to write-back completed descriptors even when 1528 * interrupts are disabled. Descriptors will be written back on cache line 1529 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 1530 * descriptors may not be written back if they don't fill a cache line until 1531 * the next interrupt. 1532 * 1533 * This sets the write-back frequency to whatever was set previously for the 1534 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we 1535 * aren't meddling with the INTENA_M bit. 1536 */ 1537 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 1538 { 1539 struct ice_vsi *vsi = q_vector->vsi; 1540 1541 /* already in wb_on_itr mode no need to change it */ 1542 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) 1543 return; 1544 1545 /* use previously set ITR values for all of the ITR indices by 1546 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and 1547 * be static in non-adaptive mode (user configured) 1548 */ 1549 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 1550 ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) & 1551 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | 1552 GLINT_DYN_CTL_WB_ON_ITR_M); 1553 1554 q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE; 1555 } 1556 1557 /** 1558 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 1559 * @napi: napi struct with our devices info in it 1560 * @budget: amount of work driver is allowed to do this pass, in packets 1561 * 1562 * This function will clean all queues associated with a q_vector. 1563 * 1564 * Returns the amount of work done 1565 */ 1566 int ice_napi_poll(struct napi_struct *napi, int budget) 1567 { 1568 struct ice_q_vector *q_vector = 1569 container_of(napi, struct ice_q_vector, napi); 1570 bool clean_complete = true; 1571 struct ice_ring *ring; 1572 int budget_per_ring; 1573 int work_done = 0; 1574 1575 /* Since the actual Tx work is minimal, we can give the Tx a larger 1576 * budget and be more aggressive about cleaning up the Tx descriptors. 1577 */ 1578 ice_for_each_ring(ring, q_vector->tx) { 1579 bool wd = ring->xsk_pool ? 1580 ice_clean_tx_irq_zc(ring, budget) : 1581 ice_clean_tx_irq(ring, budget); 1582 1583 if (!wd) 1584 clean_complete = false; 1585 } 1586 1587 /* Handle case where we are called by netpoll with a budget of 0 */ 1588 if (unlikely(budget <= 0)) 1589 return budget; 1590 1591 /* normally we have 1 Rx ring per q_vector */ 1592 if (unlikely(q_vector->num_ring_rx > 1)) 1593 /* We attempt to distribute budget to each Rx queue fairly, but 1594 * don't allow the budget to go below 1 because that would exit 1595 * polling early. 1596 */ 1597 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 1598 else 1599 /* Max of 1 Rx ring in this q_vector so give it the budget */ 1600 budget_per_ring = budget; 1601 1602 ice_for_each_ring(ring, q_vector->rx) { 1603 int cleaned; 1604 1605 /* A dedicated path for zero-copy allows making a single 1606 * comparison in the irq context instead of many inside the 1607 * ice_clean_rx_irq function and makes the codebase cleaner. 1608 */ 1609 cleaned = ring->xsk_pool ? 1610 ice_clean_rx_irq_zc(ring, budget_per_ring) : 1611 ice_clean_rx_irq(ring, budget_per_ring); 1612 work_done += cleaned; 1613 /* if we clean as many as budgeted, we must not be done */ 1614 if (cleaned >= budget_per_ring) 1615 clean_complete = false; 1616 } 1617 1618 /* If work not completed, return budget and polling will return */ 1619 if (!clean_complete) { 1620 /* Set the writeback on ITR so partial completions of 1621 * cache-lines will still continue even if we're polling. 1622 */ 1623 ice_set_wb_on_itr(q_vector); 1624 return budget; 1625 } 1626 1627 /* Exit the polling mode, but don't re-enable interrupts if stack might 1628 * poll us due to busy-polling 1629 */ 1630 if (likely(napi_complete_done(napi, work_done))) 1631 ice_update_ena_itr(q_vector); 1632 else 1633 ice_set_wb_on_itr(q_vector); 1634 1635 return min_t(int, work_done, budget - 1); 1636 } 1637 1638 /** 1639 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 1640 * @tx_ring: the ring to be checked 1641 * @size: the size buffer we want to assure is available 1642 * 1643 * Returns -EBUSY if a stop is needed, else 0 1644 */ 1645 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 1646 { 1647 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 1648 /* Memory barrier before checking head and tail */ 1649 smp_mb(); 1650 1651 /* Check again in a case another CPU has just made room available. */ 1652 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 1653 return -EBUSY; 1654 1655 /* A reprieve! - use start_subqueue because it doesn't call schedule */ 1656 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 1657 ++tx_ring->tx_stats.restart_q; 1658 return 0; 1659 } 1660 1661 /** 1662 * ice_maybe_stop_tx - 1st level check for Tx stop conditions 1663 * @tx_ring: the ring to be checked 1664 * @size: the size buffer we want to assure is available 1665 * 1666 * Returns 0 if stop is not needed 1667 */ 1668 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 1669 { 1670 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 1671 return 0; 1672 1673 return __ice_maybe_stop_tx(tx_ring, size); 1674 } 1675 1676 /** 1677 * ice_tx_map - Build the Tx descriptor 1678 * @tx_ring: ring to send buffer on 1679 * @first: first buffer info buffer to use 1680 * @off: pointer to struct that holds offload parameters 1681 * 1682 * This function loops over the skb data pointed to by *first 1683 * and gets a physical address for each memory location and programs 1684 * it and the length into the transmit descriptor. 1685 */ 1686 static void 1687 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, 1688 struct ice_tx_offload_params *off) 1689 { 1690 u64 td_offset, td_tag, td_cmd; 1691 u16 i = tx_ring->next_to_use; 1692 unsigned int data_len, size; 1693 struct ice_tx_desc *tx_desc; 1694 struct ice_tx_buf *tx_buf; 1695 struct sk_buff *skb; 1696 skb_frag_t *frag; 1697 dma_addr_t dma; 1698 1699 td_tag = off->td_l2tag1; 1700 td_cmd = off->td_cmd; 1701 td_offset = off->td_offset; 1702 skb = first->skb; 1703 1704 data_len = skb->data_len; 1705 size = skb_headlen(skb); 1706 1707 tx_desc = ICE_TX_DESC(tx_ring, i); 1708 1709 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1710 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1711 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1712 ICE_TX_FLAGS_VLAN_S; 1713 } 1714 1715 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1716 1717 tx_buf = first; 1718 1719 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1720 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1721 1722 if (dma_mapping_error(tx_ring->dev, dma)) 1723 goto dma_error; 1724 1725 /* record length, and DMA address */ 1726 dma_unmap_len_set(tx_buf, len, size); 1727 dma_unmap_addr_set(tx_buf, dma, dma); 1728 1729 /* align size to end of page */ 1730 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 1731 tx_desc->buf_addr = cpu_to_le64(dma); 1732 1733 /* account for data chunks larger than the hardware 1734 * can handle 1735 */ 1736 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 1737 tx_desc->cmd_type_offset_bsz = 1738 ice_build_ctob(td_cmd, td_offset, max_data, 1739 td_tag); 1740 1741 tx_desc++; 1742 i++; 1743 1744 if (i == tx_ring->count) { 1745 tx_desc = ICE_TX_DESC(tx_ring, 0); 1746 i = 0; 1747 } 1748 1749 dma += max_data; 1750 size -= max_data; 1751 1752 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1753 tx_desc->buf_addr = cpu_to_le64(dma); 1754 } 1755 1756 if (likely(!data_len)) 1757 break; 1758 1759 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 1760 size, td_tag); 1761 1762 tx_desc++; 1763 i++; 1764 1765 if (i == tx_ring->count) { 1766 tx_desc = ICE_TX_DESC(tx_ring, 0); 1767 i = 0; 1768 } 1769 1770 size = skb_frag_size(frag); 1771 data_len -= size; 1772 1773 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1774 DMA_TO_DEVICE); 1775 1776 tx_buf = &tx_ring->tx_buf[i]; 1777 } 1778 1779 /* record bytecount for BQL */ 1780 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1781 1782 /* record SW timestamp if HW timestamp is not available */ 1783 skb_tx_timestamp(first->skb); 1784 1785 i++; 1786 if (i == tx_ring->count) 1787 i = 0; 1788 1789 /* write last descriptor with RS and EOP bits */ 1790 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 1791 tx_desc->cmd_type_offset_bsz = 1792 ice_build_ctob(td_cmd, td_offset, size, td_tag); 1793 1794 /* Force memory writes to complete before letting h/w know there 1795 * are new descriptors to fetch. 1796 * 1797 * We also use this memory barrier to make certain all of the 1798 * status bits have been updated before next_to_watch is written. 1799 */ 1800 wmb(); 1801 1802 /* set next_to_watch value indicating a packet is present */ 1803 first->next_to_watch = tx_desc; 1804 1805 tx_ring->next_to_use = i; 1806 1807 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 1808 1809 /* notify HW of packet */ 1810 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) 1811 writel(i, tx_ring->tail); 1812 1813 return; 1814 1815 dma_error: 1816 /* clear DMA mappings for failed tx_buf map */ 1817 for (;;) { 1818 tx_buf = &tx_ring->tx_buf[i]; 1819 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 1820 if (tx_buf == first) 1821 break; 1822 if (i == 0) 1823 i = tx_ring->count; 1824 i--; 1825 } 1826 1827 tx_ring->next_to_use = i; 1828 } 1829 1830 /** 1831 * ice_tx_csum - Enable Tx checksum offloads 1832 * @first: pointer to the first descriptor 1833 * @off: pointer to struct that holds offload parameters 1834 * 1835 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1836 */ 1837 static 1838 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1839 { 1840 u32 l4_len = 0, l3_len = 0, l2_len = 0; 1841 struct sk_buff *skb = first->skb; 1842 union { 1843 struct iphdr *v4; 1844 struct ipv6hdr *v6; 1845 unsigned char *hdr; 1846 } ip; 1847 union { 1848 struct tcphdr *tcp; 1849 unsigned char *hdr; 1850 } l4; 1851 __be16 frag_off, protocol; 1852 unsigned char *exthdr; 1853 u32 offset, cmd = 0; 1854 u8 l4_proto = 0; 1855 1856 if (skb->ip_summed != CHECKSUM_PARTIAL) 1857 return 0; 1858 1859 ip.hdr = skb_network_header(skb); 1860 l4.hdr = skb_transport_header(skb); 1861 1862 /* compute outer L2 header size */ 1863 l2_len = ip.hdr - skb->data; 1864 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1865 1866 protocol = vlan_get_protocol(skb); 1867 1868 if (protocol == htons(ETH_P_IP)) 1869 first->tx_flags |= ICE_TX_FLAGS_IPV4; 1870 else if (protocol == htons(ETH_P_IPV6)) 1871 first->tx_flags |= ICE_TX_FLAGS_IPV6; 1872 1873 if (skb->encapsulation) { 1874 bool gso_ena = false; 1875 u32 tunnel = 0; 1876 1877 /* define outer network header type */ 1878 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1879 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 1880 ICE_TX_CTX_EIPT_IPV4 : 1881 ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 1882 l4_proto = ip.v4->protocol; 1883 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1884 int ret; 1885 1886 tunnel |= ICE_TX_CTX_EIPT_IPV6; 1887 exthdr = ip.hdr + sizeof(*ip.v6); 1888 l4_proto = ip.v6->nexthdr; 1889 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 1890 &l4_proto, &frag_off); 1891 if (ret < 0) 1892 return -1; 1893 } 1894 1895 /* define outer transport */ 1896 switch (l4_proto) { 1897 case IPPROTO_UDP: 1898 tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 1899 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1900 break; 1901 case IPPROTO_GRE: 1902 tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 1903 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1904 break; 1905 case IPPROTO_IPIP: 1906 case IPPROTO_IPV6: 1907 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1908 l4.hdr = skb_inner_network_header(skb); 1909 break; 1910 default: 1911 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1912 return -1; 1913 1914 skb_checksum_help(skb); 1915 return 0; 1916 } 1917 1918 /* compute outer L3 header size */ 1919 tunnel |= ((l4.hdr - ip.hdr) / 4) << 1920 ICE_TXD_CTX_QW0_EIPLEN_S; 1921 1922 /* switch IP header pointer from outer to inner header */ 1923 ip.hdr = skb_inner_network_header(skb); 1924 1925 /* compute tunnel header size */ 1926 tunnel |= ((ip.hdr - l4.hdr) / 2) << 1927 ICE_TXD_CTX_QW0_NATLEN_S; 1928 1929 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 1930 /* indicate if we need to offload outer UDP header */ 1931 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 1932 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1933 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 1934 1935 /* record tunnel offload values */ 1936 off->cd_tunnel_params |= tunnel; 1937 1938 /* set DTYP=1 to indicate that it's an Tx context descriptor 1939 * in IPsec tunnel mode with Tx offloads in Quad word 1 1940 */ 1941 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 1942 1943 /* switch L4 header pointer from outer to inner */ 1944 l4.hdr = skb_inner_transport_header(skb); 1945 l4_proto = 0; 1946 1947 /* reset type as we transition from outer to inner headers */ 1948 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 1949 if (ip.v4->version == 4) 1950 first->tx_flags |= ICE_TX_FLAGS_IPV4; 1951 if (ip.v6->version == 6) 1952 first->tx_flags |= ICE_TX_FLAGS_IPV6; 1953 } 1954 1955 /* Enable IP checksum offloads */ 1956 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1957 l4_proto = ip.v4->protocol; 1958 /* the stack computes the IP header already, the only time we 1959 * need the hardware to recompute it is in the case of TSO. 1960 */ 1961 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1962 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1963 else 1964 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1965 1966 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1967 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1968 exthdr = ip.hdr + sizeof(*ip.v6); 1969 l4_proto = ip.v6->nexthdr; 1970 if (l4.hdr != exthdr) 1971 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1972 &frag_off); 1973 } else { 1974 return -1; 1975 } 1976 1977 /* compute inner L3 header size */ 1978 l3_len = l4.hdr - ip.hdr; 1979 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1980 1981 /* Enable L4 checksum offloads */ 1982 switch (l4_proto) { 1983 case IPPROTO_TCP: 1984 /* enable checksum offloads */ 1985 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1986 l4_len = l4.tcp->doff; 1987 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1988 break; 1989 case IPPROTO_UDP: 1990 /* enable UDP checksum offload */ 1991 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1992 l4_len = (sizeof(struct udphdr) >> 2); 1993 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1994 break; 1995 case IPPROTO_SCTP: 1996 /* enable SCTP checksum offload */ 1997 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1998 l4_len = sizeof(struct sctphdr) >> 2; 1999 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 2000 break; 2001 2002 default: 2003 if (first->tx_flags & ICE_TX_FLAGS_TSO) 2004 return -1; 2005 skb_checksum_help(skb); 2006 return 0; 2007 } 2008 2009 off->td_cmd |= cmd; 2010 off->td_offset |= offset; 2011 return 1; 2012 } 2013 2014 /** 2015 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 2016 * @tx_ring: ring to send buffer on 2017 * @first: pointer to struct ice_tx_buf 2018 * 2019 * Checks the skb and set up correspondingly several generic transmit flags 2020 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 2021 */ 2022 static void 2023 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) 2024 { 2025 struct sk_buff *skb = first->skb; 2026 2027 /* nothing left to do, software offloaded VLAN */ 2028 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) 2029 return; 2030 2031 /* currently, we always assume 802.1Q for VLAN insertion as VLAN 2032 * insertion for 802.1AD is not supported 2033 */ 2034 if (skb_vlan_tag_present(skb)) { 2035 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 2036 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 2037 } 2038 2039 ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 2040 } 2041 2042 /** 2043 * ice_tso - computes mss and TSO length to prepare for TSO 2044 * @first: pointer to struct ice_tx_buf 2045 * @off: pointer to struct that holds offload parameters 2046 * 2047 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 2048 */ 2049 static 2050 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2051 { 2052 struct sk_buff *skb = first->skb; 2053 union { 2054 struct iphdr *v4; 2055 struct ipv6hdr *v6; 2056 unsigned char *hdr; 2057 } ip; 2058 union { 2059 struct tcphdr *tcp; 2060 struct udphdr *udp; 2061 unsigned char *hdr; 2062 } l4; 2063 u64 cd_mss, cd_tso_len; 2064 u32 paylen; 2065 u8 l4_start; 2066 int err; 2067 2068 if (skb->ip_summed != CHECKSUM_PARTIAL) 2069 return 0; 2070 2071 if (!skb_is_gso(skb)) 2072 return 0; 2073 2074 err = skb_cow_head(skb, 0); 2075 if (err < 0) 2076 return err; 2077 2078 /* cppcheck-suppress unreadVariable */ 2079 ip.hdr = skb_network_header(skb); 2080 l4.hdr = skb_transport_header(skb); 2081 2082 /* initialize outer IP header fields */ 2083 if (ip.v4->version == 4) { 2084 ip.v4->tot_len = 0; 2085 ip.v4->check = 0; 2086 } else { 2087 ip.v6->payload_len = 0; 2088 } 2089 2090 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 2091 SKB_GSO_GRE_CSUM | 2092 SKB_GSO_IPXIP4 | 2093 SKB_GSO_IPXIP6 | 2094 SKB_GSO_UDP_TUNNEL | 2095 SKB_GSO_UDP_TUNNEL_CSUM)) { 2096 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 2097 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 2098 l4.udp->len = 0; 2099 2100 /* determine offset of outer transport header */ 2101 l4_start = (u8)(l4.hdr - skb->data); 2102 2103 /* remove payload length from outer checksum */ 2104 paylen = skb->len - l4_start; 2105 csum_replace_by_diff(&l4.udp->check, 2106 (__force __wsum)htonl(paylen)); 2107 } 2108 2109 /* reset pointers to inner headers */ 2110 2111 /* cppcheck-suppress unreadVariable */ 2112 ip.hdr = skb_inner_network_header(skb); 2113 l4.hdr = skb_inner_transport_header(skb); 2114 2115 /* initialize inner IP header fields */ 2116 if (ip.v4->version == 4) { 2117 ip.v4->tot_len = 0; 2118 ip.v4->check = 0; 2119 } else { 2120 ip.v6->payload_len = 0; 2121 } 2122 } 2123 2124 /* determine offset of transport header */ 2125 l4_start = (u8)(l4.hdr - skb->data); 2126 2127 /* remove payload length from checksum */ 2128 paylen = skb->len - l4_start; 2129 2130 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 2131 csum_replace_by_diff(&l4.udp->check, 2132 (__force __wsum)htonl(paylen)); 2133 /* compute length of UDP segmentation header */ 2134 off->header_len = (u8)sizeof(l4.udp) + l4_start; 2135 } else { 2136 csum_replace_by_diff(&l4.tcp->check, 2137 (__force __wsum)htonl(paylen)); 2138 /* compute length of TCP segmentation header */ 2139 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 2140 } 2141 2142 /* update gso_segs and bytecount */ 2143 first->gso_segs = skb_shinfo(skb)->gso_segs; 2144 first->bytecount += (first->gso_segs - 1) * off->header_len; 2145 2146 cd_tso_len = skb->len - off->header_len; 2147 cd_mss = skb_shinfo(skb)->gso_size; 2148 2149 /* record cdesc_qw1 with TSO parameters */ 2150 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2151 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 2152 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2153 (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2154 first->tx_flags |= ICE_TX_FLAGS_TSO; 2155 return 1; 2156 } 2157 2158 /** 2159 * ice_txd_use_count - estimate the number of descriptors needed for Tx 2160 * @size: transmit request size in bytes 2161 * 2162 * Due to hardware alignment restrictions (4K alignment), we need to 2163 * assume that we can have no more than 12K of data per descriptor, even 2164 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 2165 * Thus, we need to divide by 12K. But division is slow! Instead, 2166 * we decompose the operation into shifts and one relatively cheap 2167 * multiply operation. 2168 * 2169 * To divide by 12K, we first divide by 4K, then divide by 3: 2170 * To divide by 4K, shift right by 12 bits 2171 * To divide by 3, multiply by 85, then divide by 256 2172 * (Divide by 256 is done by shifting right by 8 bits) 2173 * Finally, we add one to round up. Because 256 isn't an exact multiple of 2174 * 3, we'll underestimate near each multiple of 12K. This is actually more 2175 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 2176 * segment. For our purposes this is accurate out to 1M which is orders of 2177 * magnitude greater than our largest possible GSO size. 2178 * 2179 * This would then be implemented as: 2180 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 2181 * 2182 * Since multiplication and division are commutative, we can reorder 2183 * operations into: 2184 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2185 */ 2186 static unsigned int ice_txd_use_count(unsigned int size) 2187 { 2188 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2189 } 2190 2191 /** 2192 * ice_xmit_desc_count - calculate number of Tx descriptors needed 2193 * @skb: send buffer 2194 * 2195 * Returns number of data descriptors needed for this skb. 2196 */ 2197 static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 2198 { 2199 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 2200 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2201 unsigned int count = 0, size = skb_headlen(skb); 2202 2203 for (;;) { 2204 count += ice_txd_use_count(size); 2205 2206 if (!nr_frags--) 2207 break; 2208 2209 size = skb_frag_size(frag++); 2210 } 2211 2212 return count; 2213 } 2214 2215 /** 2216 * __ice_chk_linearize - Check if there are more than 8 buffers per packet 2217 * @skb: send buffer 2218 * 2219 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 2220 * and so we need to figure out the cases where we need to linearize the skb. 2221 * 2222 * For TSO we need to count the TSO header and segment payload separately. 2223 * As such we need to check cases where we have 7 fragments or more as we 2224 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 2225 * the segment payload in the first descriptor, and another 7 for the 2226 * fragments. 2227 */ 2228 static bool __ice_chk_linearize(struct sk_buff *skb) 2229 { 2230 const skb_frag_t *frag, *stale; 2231 int nr_frags, sum; 2232 2233 /* no need to check if number of frags is less than 7 */ 2234 nr_frags = skb_shinfo(skb)->nr_frags; 2235 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 2236 return false; 2237 2238 /* We need to walk through the list and validate that each group 2239 * of 6 fragments totals at least gso_size. 2240 */ 2241 nr_frags -= ICE_MAX_BUF_TXD - 2; 2242 frag = &skb_shinfo(skb)->frags[0]; 2243 2244 /* Initialize size to the negative value of gso_size minus 1. We 2245 * use this as the worst case scenario in which the frag ahead 2246 * of us only provides one byte which is why we are limited to 6 2247 * descriptors for a single transmit as the header and previous 2248 * fragment are already consuming 2 descriptors. 2249 */ 2250 sum = 1 - skb_shinfo(skb)->gso_size; 2251 2252 /* Add size of frags 0 through 4 to create our initial sum */ 2253 sum += skb_frag_size(frag++); 2254 sum += skb_frag_size(frag++); 2255 sum += skb_frag_size(frag++); 2256 sum += skb_frag_size(frag++); 2257 sum += skb_frag_size(frag++); 2258 2259 /* Walk through fragments adding latest fragment, testing it, and 2260 * then removing stale fragments from the sum. 2261 */ 2262 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 2263 int stale_size = skb_frag_size(stale); 2264 2265 sum += skb_frag_size(frag++); 2266 2267 /* The stale fragment may present us with a smaller 2268 * descriptor than the actual fragment size. To account 2269 * for that we need to remove all the data on the front and 2270 * figure out what the remainder would be in the last 2271 * descriptor associated with the fragment. 2272 */ 2273 if (stale_size > ICE_MAX_DATA_PER_TXD) { 2274 int align_pad = -(skb_frag_off(stale)) & 2275 (ICE_MAX_READ_REQ_SIZE - 1); 2276 2277 sum -= align_pad; 2278 stale_size -= align_pad; 2279 2280 do { 2281 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; 2282 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; 2283 } while (stale_size > ICE_MAX_DATA_PER_TXD); 2284 } 2285 2286 /* if sum is negative we failed to make sufficient progress */ 2287 if (sum < 0) 2288 return true; 2289 2290 if (!nr_frags--) 2291 break; 2292 2293 sum -= stale_size; 2294 } 2295 2296 return false; 2297 } 2298 2299 /** 2300 * ice_chk_linearize - Check if there are more than 8 fragments per packet 2301 * @skb: send buffer 2302 * @count: number of buffers used 2303 * 2304 * Note: Our HW can't scatter-gather more than 8 fragments to build 2305 * a packet on the wire and so we need to figure out the cases where we 2306 * need to linearize the skb. 2307 */ 2308 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 2309 { 2310 /* Both TSO and single send will work if count is less than 8 */ 2311 if (likely(count < ICE_MAX_BUF_TXD)) 2312 return false; 2313 2314 if (skb_is_gso(skb)) 2315 return __ice_chk_linearize(skb); 2316 2317 /* we can support up to 8 data buffers for a single send */ 2318 return count != ICE_MAX_BUF_TXD; 2319 } 2320 2321 /** 2322 * ice_xmit_frame_ring - Sends buffer on Tx ring 2323 * @skb: send buffer 2324 * @tx_ring: ring to send buffer on 2325 * 2326 * Returns NETDEV_TX_OK if sent, else an error code 2327 */ 2328 static netdev_tx_t 2329 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 2330 { 2331 struct ice_tx_offload_params offload = { 0 }; 2332 struct ice_vsi *vsi = tx_ring->vsi; 2333 struct ice_tx_buf *first; 2334 unsigned int count; 2335 int tso, csum; 2336 2337 count = ice_xmit_desc_count(skb); 2338 if (ice_chk_linearize(skb, count)) { 2339 if (__skb_linearize(skb)) 2340 goto out_drop; 2341 count = ice_txd_use_count(skb->len); 2342 tx_ring->tx_stats.tx_linearize++; 2343 } 2344 2345 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 2346 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 2347 * + 4 desc gap to avoid the cache line where head is, 2348 * + 1 desc for context descriptor, 2349 * otherwise try next time 2350 */ 2351 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2352 ICE_DESCS_FOR_CTX_DESC)) { 2353 tx_ring->tx_stats.tx_busy++; 2354 return NETDEV_TX_BUSY; 2355 } 2356 2357 offload.tx_ring = tx_ring; 2358 2359 /* record the location of the first descriptor for this packet */ 2360 first = &tx_ring->tx_buf[tx_ring->next_to_use]; 2361 first->skb = skb; 2362 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 2363 first->gso_segs = 1; 2364 first->tx_flags = 0; 2365 2366 /* prepare the VLAN tagging flags for Tx */ 2367 ice_tx_prepare_vlan_flags(tx_ring, first); 2368 2369 /* set up TSO offload */ 2370 tso = ice_tso(first, &offload); 2371 if (tso < 0) 2372 goto out_drop; 2373 2374 /* always set up Tx checksum offload */ 2375 csum = ice_tx_csum(first, &offload); 2376 if (csum < 0) 2377 goto out_drop; 2378 2379 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 2380 if (unlikely(skb->priority == TC_PRIO_CONTROL && 2381 vsi->type == ICE_VSI_PF && 2382 vsi->port_info->qos_cfg.is_sw_lldp)) 2383 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2384 ICE_TX_CTX_DESC_SWTCH_UPLINK << 2385 ICE_TXD_CTX_QW1_CMD_S); 2386 2387 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2388 struct ice_tx_ctx_desc *cdesc; 2389 u16 i = tx_ring->next_to_use; 2390 2391 /* grab the next descriptor */ 2392 cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2393 i++; 2394 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2395 2396 /* setup context descriptor */ 2397 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2398 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2399 cdesc->rsvd = cpu_to_le16(0); 2400 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2401 } 2402 2403 ice_tx_map(tx_ring, first, &offload); 2404 return NETDEV_TX_OK; 2405 2406 out_drop: 2407 dev_kfree_skb_any(skb); 2408 return NETDEV_TX_OK; 2409 } 2410 2411 /** 2412 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 2413 * @skb: send buffer 2414 * @netdev: network interface device structure 2415 * 2416 * Returns NETDEV_TX_OK if sent, else an error code 2417 */ 2418 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2419 { 2420 struct ice_netdev_priv *np = netdev_priv(netdev); 2421 struct ice_vsi *vsi = np->vsi; 2422 struct ice_ring *tx_ring; 2423 2424 tx_ring = vsi->tx_rings[skb->queue_mapping]; 2425 2426 /* hardware can't handle really short frames, hardware padding works 2427 * beyond this point 2428 */ 2429 if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 2430 return NETDEV_TX_OK; 2431 2432 return ice_xmit_frame_ring(skb, tx_ring); 2433 } 2434 2435 /** 2436 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 2437 * @tx_ring: tx_ring to clean 2438 */ 2439 void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring) 2440 { 2441 struct ice_vsi *vsi = tx_ring->vsi; 2442 s16 i = tx_ring->next_to_clean; 2443 int budget = ICE_DFLT_IRQ_WORK; 2444 struct ice_tx_desc *tx_desc; 2445 struct ice_tx_buf *tx_buf; 2446 2447 tx_buf = &tx_ring->tx_buf[i]; 2448 tx_desc = ICE_TX_DESC(tx_ring, i); 2449 i -= tx_ring->count; 2450 2451 do { 2452 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2453 2454 /* if next_to_watch is not set then there is no pending work */ 2455 if (!eop_desc) 2456 break; 2457 2458 /* prevent any other reads prior to eop_desc */ 2459 smp_rmb(); 2460 2461 /* if the descriptor isn't done, no work to do */ 2462 if (!(eop_desc->cmd_type_offset_bsz & 2463 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2464 break; 2465 2466 /* clear next_to_watch to prevent false hangs */ 2467 tx_buf->next_to_watch = NULL; 2468 tx_desc->buf_addr = 0; 2469 tx_desc->cmd_type_offset_bsz = 0; 2470 2471 /* move past filter desc */ 2472 tx_buf++; 2473 tx_desc++; 2474 i++; 2475 if (unlikely(!i)) { 2476 i -= tx_ring->count; 2477 tx_buf = tx_ring->tx_buf; 2478 tx_desc = ICE_TX_DESC(tx_ring, 0); 2479 } 2480 2481 /* unmap the data header */ 2482 if (dma_unmap_len(tx_buf, len)) 2483 dma_unmap_single(tx_ring->dev, 2484 dma_unmap_addr(tx_buf, dma), 2485 dma_unmap_len(tx_buf, len), 2486 DMA_TO_DEVICE); 2487 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 2488 devm_kfree(tx_ring->dev, tx_buf->raw_buf); 2489 2490 /* clear next_to_watch to prevent false hangs */ 2491 tx_buf->raw_buf = NULL; 2492 tx_buf->tx_flags = 0; 2493 tx_buf->next_to_watch = NULL; 2494 dma_unmap_len_set(tx_buf, len, 0); 2495 tx_desc->buf_addr = 0; 2496 tx_desc->cmd_type_offset_bsz = 0; 2497 2498 /* move past eop_desc for start of next FD desc */ 2499 tx_buf++; 2500 tx_desc++; 2501 i++; 2502 if (unlikely(!i)) { 2503 i -= tx_ring->count; 2504 tx_buf = tx_ring->tx_buf; 2505 tx_desc = ICE_TX_DESC(tx_ring, 0); 2506 } 2507 2508 budget--; 2509 } while (likely(budget)); 2510 2511 i += tx_ring->count; 2512 tx_ring->next_to_clean = i; 2513 2514 /* re-enable interrupt if needed */ 2515 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 2516 } 2517