1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* The driver transmit and receive code */ 5 6 #include <linux/prefetch.h> 7 #include <linux/mm.h> 8 #include <linux/bpf_trace.h> 9 #include <net/xdp.h> 10 #include "ice_txrx_lib.h" 11 #include "ice_lib.h" 12 #include "ice.h" 13 #include "ice_dcb_lib.h" 14 #include "ice_xsk.h" 15 16 #define ICE_RX_HDR_SIZE 256 17 18 #define FDIR_DESC_RXDID 0x40 19 #define ICE_FDIR_CLEAN_DELAY 10 20 21 /** 22 * ice_prgm_fdir_fltr - Program a Flow Director filter 23 * @vsi: VSI to send dummy packet 24 * @fdir_desc: flow director descriptor 25 * @raw_packet: allocated buffer for flow director 26 */ 27 int 28 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 29 u8 *raw_packet) 30 { 31 struct ice_tx_buf *tx_buf, *first; 32 struct ice_fltr_desc *f_desc; 33 struct ice_tx_desc *tx_desc; 34 struct ice_ring *tx_ring; 35 struct device *dev; 36 dma_addr_t dma; 37 u32 td_cmd; 38 u16 i; 39 40 /* VSI and Tx ring */ 41 if (!vsi) 42 return -ENOENT; 43 tx_ring = vsi->tx_rings[0]; 44 if (!tx_ring || !tx_ring->desc) 45 return -ENOENT; 46 dev = tx_ring->dev; 47 48 /* we are using two descriptors to add/del a filter and we can wait */ 49 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 50 if (!i) 51 return -EAGAIN; 52 msleep_interruptible(1); 53 } 54 55 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 56 DMA_TO_DEVICE); 57 58 if (dma_mapping_error(dev, dma)) 59 return -EINVAL; 60 61 /* grab the next descriptor */ 62 i = tx_ring->next_to_use; 63 first = &tx_ring->tx_buf[i]; 64 f_desc = ICE_TX_FDIRDESC(tx_ring, i); 65 memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 66 67 i++; 68 i = (i < tx_ring->count) ? i : 0; 69 tx_desc = ICE_TX_DESC(tx_ring, i); 70 tx_buf = &tx_ring->tx_buf[i]; 71 72 i++; 73 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 74 75 memset(tx_buf, 0, sizeof(*tx_buf)); 76 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 77 dma_unmap_addr_set(tx_buf, dma, dma); 78 79 tx_desc->buf_addr = cpu_to_le64(dma); 80 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 81 ICE_TX_DESC_CMD_RE; 82 83 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; 84 tx_buf->raw_buf = raw_packet; 85 86 tx_desc->cmd_type_offset_bsz = 87 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 88 89 /* Force memory write to complete before letting h/w know 90 * there are new descriptors to fetch. 91 */ 92 wmb(); 93 94 /* mark the data descriptor to be watched */ 95 first->next_to_watch = tx_desc; 96 97 writel(tx_ring->next_to_use, tx_ring->tail); 98 99 return 0; 100 } 101 102 /** 103 * ice_unmap_and_free_tx_buf - Release a Tx buffer 104 * @ring: the ring that owns the buffer 105 * @tx_buf: the buffer to free 106 */ 107 static void 108 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 109 { 110 if (tx_buf->skb) { 111 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 112 devm_kfree(ring->dev, tx_buf->raw_buf); 113 else if (ice_ring_is_xdp(ring)) 114 page_frag_free(tx_buf->raw_buf); 115 else 116 dev_kfree_skb_any(tx_buf->skb); 117 if (dma_unmap_len(tx_buf, len)) 118 dma_unmap_single(ring->dev, 119 dma_unmap_addr(tx_buf, dma), 120 dma_unmap_len(tx_buf, len), 121 DMA_TO_DEVICE); 122 } else if (dma_unmap_len(tx_buf, len)) { 123 dma_unmap_page(ring->dev, 124 dma_unmap_addr(tx_buf, dma), 125 dma_unmap_len(tx_buf, len), 126 DMA_TO_DEVICE); 127 } 128 129 tx_buf->next_to_watch = NULL; 130 tx_buf->skb = NULL; 131 dma_unmap_len_set(tx_buf, len, 0); 132 /* tx_buf must be completely set up in the transmit path */ 133 } 134 135 static struct netdev_queue *txring_txq(const struct ice_ring *ring) 136 { 137 return netdev_get_tx_queue(ring->netdev, ring->q_index); 138 } 139 140 /** 141 * ice_clean_tx_ring - Free any empty Tx buffers 142 * @tx_ring: ring to be cleaned 143 */ 144 void ice_clean_tx_ring(struct ice_ring *tx_ring) 145 { 146 u16 i; 147 148 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) { 149 ice_xsk_clean_xdp_ring(tx_ring); 150 goto tx_skip_free; 151 } 152 153 /* ring already cleared, nothing to do */ 154 if (!tx_ring->tx_buf) 155 return; 156 157 /* Free all the Tx ring sk_buffs */ 158 for (i = 0; i < tx_ring->count; i++) 159 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 160 161 tx_skip_free: 162 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 163 164 /* Zero out the descriptor ring */ 165 memset(tx_ring->desc, 0, tx_ring->size); 166 167 tx_ring->next_to_use = 0; 168 tx_ring->next_to_clean = 0; 169 170 if (!tx_ring->netdev) 171 return; 172 173 /* cleanup Tx queue statistics */ 174 netdev_tx_reset_queue(txring_txq(tx_ring)); 175 } 176 177 /** 178 * ice_free_tx_ring - Free Tx resources per queue 179 * @tx_ring: Tx descriptor ring for a specific queue 180 * 181 * Free all transmit software resources 182 */ 183 void ice_free_tx_ring(struct ice_ring *tx_ring) 184 { 185 ice_clean_tx_ring(tx_ring); 186 devm_kfree(tx_ring->dev, tx_ring->tx_buf); 187 tx_ring->tx_buf = NULL; 188 189 if (tx_ring->desc) { 190 dmam_free_coherent(tx_ring->dev, tx_ring->size, 191 tx_ring->desc, tx_ring->dma); 192 tx_ring->desc = NULL; 193 } 194 } 195 196 /** 197 * ice_clean_tx_irq - Reclaim resources after transmit completes 198 * @tx_ring: Tx ring to clean 199 * @napi_budget: Used to determine if we are in netpoll 200 * 201 * Returns true if there's any budget left (e.g. the clean is finished) 202 */ 203 static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) 204 { 205 unsigned int total_bytes = 0, total_pkts = 0; 206 unsigned int budget = ICE_DFLT_IRQ_WORK; 207 struct ice_vsi *vsi = tx_ring->vsi; 208 s16 i = tx_ring->next_to_clean; 209 struct ice_tx_desc *tx_desc; 210 struct ice_tx_buf *tx_buf; 211 212 tx_buf = &tx_ring->tx_buf[i]; 213 tx_desc = ICE_TX_DESC(tx_ring, i); 214 i -= tx_ring->count; 215 216 prefetch(&vsi->state); 217 218 do { 219 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 220 221 /* if next_to_watch is not set then there is no work pending */ 222 if (!eop_desc) 223 break; 224 225 smp_rmb(); /* prevent any other reads prior to eop_desc */ 226 227 /* if the descriptor isn't done, no work yet to do */ 228 if (!(eop_desc->cmd_type_offset_bsz & 229 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 230 break; 231 232 /* clear next_to_watch to prevent false hangs */ 233 tx_buf->next_to_watch = NULL; 234 235 /* update the statistics for this packet */ 236 total_bytes += tx_buf->bytecount; 237 total_pkts += tx_buf->gso_segs; 238 239 if (ice_ring_is_xdp(tx_ring)) 240 page_frag_free(tx_buf->raw_buf); 241 else 242 /* free the skb */ 243 napi_consume_skb(tx_buf->skb, napi_budget); 244 245 /* unmap skb header data */ 246 dma_unmap_single(tx_ring->dev, 247 dma_unmap_addr(tx_buf, dma), 248 dma_unmap_len(tx_buf, len), 249 DMA_TO_DEVICE); 250 251 /* clear tx_buf data */ 252 tx_buf->skb = NULL; 253 dma_unmap_len_set(tx_buf, len, 0); 254 255 /* unmap remaining buffers */ 256 while (tx_desc != eop_desc) { 257 tx_buf++; 258 tx_desc++; 259 i++; 260 if (unlikely(!i)) { 261 i -= tx_ring->count; 262 tx_buf = tx_ring->tx_buf; 263 tx_desc = ICE_TX_DESC(tx_ring, 0); 264 } 265 266 /* unmap any remaining paged data */ 267 if (dma_unmap_len(tx_buf, len)) { 268 dma_unmap_page(tx_ring->dev, 269 dma_unmap_addr(tx_buf, dma), 270 dma_unmap_len(tx_buf, len), 271 DMA_TO_DEVICE); 272 dma_unmap_len_set(tx_buf, len, 0); 273 } 274 } 275 276 /* move us one more past the eop_desc for start of next pkt */ 277 tx_buf++; 278 tx_desc++; 279 i++; 280 if (unlikely(!i)) { 281 i -= tx_ring->count; 282 tx_buf = tx_ring->tx_buf; 283 tx_desc = ICE_TX_DESC(tx_ring, 0); 284 } 285 286 prefetch(tx_desc); 287 288 /* update budget accounting */ 289 budget--; 290 } while (likely(budget)); 291 292 i += tx_ring->count; 293 tx_ring->next_to_clean = i; 294 295 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 296 297 if (ice_ring_is_xdp(tx_ring)) 298 return !!budget; 299 300 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 301 total_bytes); 302 303 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 304 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 305 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 306 /* Make sure that anybody stopping the queue after this 307 * sees the new next_to_clean. 308 */ 309 smp_mb(); 310 if (__netif_subqueue_stopped(tx_ring->netdev, 311 tx_ring->q_index) && 312 !test_bit(__ICE_DOWN, vsi->state)) { 313 netif_wake_subqueue(tx_ring->netdev, 314 tx_ring->q_index); 315 ++tx_ring->tx_stats.restart_q; 316 } 317 } 318 319 return !!budget; 320 } 321 322 /** 323 * ice_setup_tx_ring - Allocate the Tx descriptors 324 * @tx_ring: the Tx ring to set up 325 * 326 * Return 0 on success, negative on error 327 */ 328 int ice_setup_tx_ring(struct ice_ring *tx_ring) 329 { 330 struct device *dev = tx_ring->dev; 331 332 if (!dev) 333 return -ENOMEM; 334 335 /* warn if we are about to overwrite the pointer */ 336 WARN_ON(tx_ring->tx_buf); 337 tx_ring->tx_buf = 338 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 339 GFP_KERNEL); 340 if (!tx_ring->tx_buf) 341 return -ENOMEM; 342 343 /* round up to nearest page */ 344 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 345 PAGE_SIZE); 346 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 347 GFP_KERNEL); 348 if (!tx_ring->desc) { 349 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 350 tx_ring->size); 351 goto err; 352 } 353 354 tx_ring->next_to_use = 0; 355 tx_ring->next_to_clean = 0; 356 tx_ring->tx_stats.prev_pkt = -1; 357 return 0; 358 359 err: 360 devm_kfree(dev, tx_ring->tx_buf); 361 tx_ring->tx_buf = NULL; 362 return -ENOMEM; 363 } 364 365 /** 366 * ice_clean_rx_ring - Free Rx buffers 367 * @rx_ring: ring to be cleaned 368 */ 369 void ice_clean_rx_ring(struct ice_ring *rx_ring) 370 { 371 struct device *dev = rx_ring->dev; 372 u16 i; 373 374 /* ring already cleared, nothing to do */ 375 if (!rx_ring->rx_buf) 376 return; 377 378 if (rx_ring->xsk_umem) { 379 ice_xsk_clean_rx_ring(rx_ring); 380 goto rx_skip_free; 381 } 382 383 /* Free all the Rx ring sk_buffs */ 384 for (i = 0; i < rx_ring->count; i++) { 385 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 386 387 if (rx_buf->skb) { 388 dev_kfree_skb(rx_buf->skb); 389 rx_buf->skb = NULL; 390 } 391 if (!rx_buf->page) 392 continue; 393 394 /* Invalidate cache lines that may have been written to by 395 * device so that we avoid corrupting memory. 396 */ 397 dma_sync_single_range_for_cpu(dev, rx_buf->dma, 398 rx_buf->page_offset, 399 rx_ring->rx_buf_len, 400 DMA_FROM_DEVICE); 401 402 /* free resources associated with mapping */ 403 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 404 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 405 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 406 407 rx_buf->page = NULL; 408 rx_buf->page_offset = 0; 409 } 410 411 rx_skip_free: 412 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 413 414 /* Zero out the descriptor ring */ 415 memset(rx_ring->desc, 0, rx_ring->size); 416 417 rx_ring->next_to_alloc = 0; 418 rx_ring->next_to_clean = 0; 419 rx_ring->next_to_use = 0; 420 } 421 422 /** 423 * ice_free_rx_ring - Free Rx resources 424 * @rx_ring: ring to clean the resources from 425 * 426 * Free all receive software resources 427 */ 428 void ice_free_rx_ring(struct ice_ring *rx_ring) 429 { 430 ice_clean_rx_ring(rx_ring); 431 if (rx_ring->vsi->type == ICE_VSI_PF) 432 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 433 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 434 rx_ring->xdp_prog = NULL; 435 devm_kfree(rx_ring->dev, rx_ring->rx_buf); 436 rx_ring->rx_buf = NULL; 437 438 if (rx_ring->desc) { 439 dmam_free_coherent(rx_ring->dev, rx_ring->size, 440 rx_ring->desc, rx_ring->dma); 441 rx_ring->desc = NULL; 442 } 443 } 444 445 /** 446 * ice_setup_rx_ring - Allocate the Rx descriptors 447 * @rx_ring: the Rx ring to set up 448 * 449 * Return 0 on success, negative on error 450 */ 451 int ice_setup_rx_ring(struct ice_ring *rx_ring) 452 { 453 struct device *dev = rx_ring->dev; 454 455 if (!dev) 456 return -ENOMEM; 457 458 /* warn if we are about to overwrite the pointer */ 459 WARN_ON(rx_ring->rx_buf); 460 rx_ring->rx_buf = 461 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 462 GFP_KERNEL); 463 if (!rx_ring->rx_buf) 464 return -ENOMEM; 465 466 /* round up to nearest page */ 467 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 468 PAGE_SIZE); 469 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 470 GFP_KERNEL); 471 if (!rx_ring->desc) { 472 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 473 rx_ring->size); 474 goto err; 475 } 476 477 rx_ring->next_to_use = 0; 478 rx_ring->next_to_clean = 0; 479 480 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 481 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 482 483 if (rx_ring->vsi->type == ICE_VSI_PF && 484 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 485 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 486 rx_ring->q_index)) 487 goto err; 488 return 0; 489 490 err: 491 devm_kfree(dev, rx_ring->rx_buf); 492 rx_ring->rx_buf = NULL; 493 return -ENOMEM; 494 } 495 496 /** 497 * ice_rx_offset - Return expected offset into page to access data 498 * @rx_ring: Ring we are requesting offset of 499 * 500 * Returns the offset value for ring into the data buffer. 501 */ 502 static unsigned int ice_rx_offset(struct ice_ring *rx_ring) 503 { 504 if (ice_ring_uses_build_skb(rx_ring)) 505 return ICE_SKB_PAD; 506 else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 507 return XDP_PACKET_HEADROOM; 508 509 return 0; 510 } 511 512 static unsigned int ice_rx_frame_truesize(struct ice_ring *rx_ring, 513 unsigned int size) 514 { 515 unsigned int truesize; 516 517 #if (PAGE_SIZE < 8192) 518 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 519 #else 520 truesize = ice_rx_offset(rx_ring) ? 521 SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) + 522 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 523 SKB_DATA_ALIGN(size); 524 #endif 525 return truesize; 526 } 527 528 /** 529 * ice_run_xdp - Executes an XDP program on initialized xdp_buff 530 * @rx_ring: Rx ring 531 * @xdp: xdp_buff used as input to the XDP program 532 * @xdp_prog: XDP program to run 533 * 534 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 535 */ 536 static int 537 ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, 538 struct bpf_prog *xdp_prog) 539 { 540 int err, result = ICE_XDP_PASS; 541 struct ice_ring *xdp_ring; 542 u32 act; 543 544 act = bpf_prog_run_xdp(xdp_prog, xdp); 545 switch (act) { 546 case XDP_PASS: 547 break; 548 case XDP_TX: 549 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; 550 result = ice_xmit_xdp_buff(xdp, xdp_ring); 551 break; 552 case XDP_REDIRECT: 553 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 554 result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; 555 break; 556 default: 557 bpf_warn_invalid_xdp_action(act); 558 fallthrough; 559 case XDP_ABORTED: 560 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 561 fallthrough; 562 case XDP_DROP: 563 result = ICE_XDP_CONSUMED; 564 break; 565 } 566 567 return result; 568 } 569 570 /** 571 * ice_xdp_xmit - submit packets to XDP ring for transmission 572 * @dev: netdev 573 * @n: number of XDP frames to be transmitted 574 * @frames: XDP frames to be transmitted 575 * @flags: transmit flags 576 * 577 * Returns number of frames successfully sent. Frames that fail are 578 * free'ed via XDP return API. 579 * For error cases, a negative errno code is returned and no-frames 580 * are transmitted (caller must handle freeing frames). 581 */ 582 int 583 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 584 u32 flags) 585 { 586 struct ice_netdev_priv *np = netdev_priv(dev); 587 unsigned int queue_index = smp_processor_id(); 588 struct ice_vsi *vsi = np->vsi; 589 struct ice_ring *xdp_ring; 590 int drops = 0, i; 591 592 if (test_bit(__ICE_DOWN, vsi->state)) 593 return -ENETDOWN; 594 595 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) 596 return -ENXIO; 597 598 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 599 return -EINVAL; 600 601 xdp_ring = vsi->xdp_rings[queue_index]; 602 for (i = 0; i < n; i++) { 603 struct xdp_frame *xdpf = frames[i]; 604 int err; 605 606 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 607 if (err != ICE_XDP_TX) { 608 xdp_return_frame_rx_napi(xdpf); 609 drops++; 610 } 611 } 612 613 if (unlikely(flags & XDP_XMIT_FLUSH)) 614 ice_xdp_ring_update_tail(xdp_ring); 615 616 return n - drops; 617 } 618 619 /** 620 * ice_alloc_mapped_page - recycle or make a new page 621 * @rx_ring: ring to use 622 * @bi: rx_buf struct to modify 623 * 624 * Returns true if the page was successfully allocated or 625 * reused. 626 */ 627 static bool 628 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) 629 { 630 struct page *page = bi->page; 631 dma_addr_t dma; 632 633 /* since we are recycling buffers we should seldom need to alloc */ 634 if (likely(page)) { 635 rx_ring->rx_stats.page_reuse_count++; 636 return true; 637 } 638 639 /* alloc new page for storage */ 640 page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 641 if (unlikely(!page)) { 642 rx_ring->rx_stats.alloc_page_failed++; 643 return false; 644 } 645 646 /* map page for use */ 647 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 648 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 649 650 /* if mapping failed free memory back to system since 651 * there isn't much point in holding memory we can't use 652 */ 653 if (dma_mapping_error(rx_ring->dev, dma)) { 654 __free_pages(page, ice_rx_pg_order(rx_ring)); 655 rx_ring->rx_stats.alloc_page_failed++; 656 return false; 657 } 658 659 bi->dma = dma; 660 bi->page = page; 661 bi->page_offset = ice_rx_offset(rx_ring); 662 page_ref_add(page, USHRT_MAX - 1); 663 bi->pagecnt_bias = USHRT_MAX; 664 665 return true; 666 } 667 668 /** 669 * ice_alloc_rx_bufs - Replace used receive buffers 670 * @rx_ring: ring to place buffers on 671 * @cleaned_count: number of buffers to replace 672 * 673 * Returns false if all allocations were successful, true if any fail. Returning 674 * true signals to the caller that we didn't replace cleaned_count buffers and 675 * there is more work to do. 676 * 677 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 678 * buffers. Then bump tail at most one time. Grouping like this lets us avoid 679 * multiple tail writes per call. 680 */ 681 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) 682 { 683 union ice_32b_rx_flex_desc *rx_desc; 684 u16 ntu = rx_ring->next_to_use; 685 struct ice_rx_buf *bi; 686 687 /* do nothing if no valid netdev defined */ 688 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || 689 !cleaned_count) 690 return false; 691 692 /* get the Rx descriptor and buffer based on next_to_use */ 693 rx_desc = ICE_RX_DESC(rx_ring, ntu); 694 bi = &rx_ring->rx_buf[ntu]; 695 696 do { 697 /* if we fail here, we have work remaining */ 698 if (!ice_alloc_mapped_page(rx_ring, bi)) 699 break; 700 701 /* sync the buffer for use by the device */ 702 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 703 bi->page_offset, 704 rx_ring->rx_buf_len, 705 DMA_FROM_DEVICE); 706 707 /* Refresh the desc even if buffer_addrs didn't change 708 * because each write-back erases this info. 709 */ 710 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 711 712 rx_desc++; 713 bi++; 714 ntu++; 715 if (unlikely(ntu == rx_ring->count)) { 716 rx_desc = ICE_RX_DESC(rx_ring, 0); 717 bi = rx_ring->rx_buf; 718 ntu = 0; 719 } 720 721 /* clear the status bits for the next_to_use descriptor */ 722 rx_desc->wb.status_error0 = 0; 723 724 cleaned_count--; 725 } while (cleaned_count); 726 727 if (rx_ring->next_to_use != ntu) 728 ice_release_rx_desc(rx_ring, ntu); 729 730 return !!cleaned_count; 731 } 732 733 /** 734 * ice_page_is_reserved - check if reuse is possible 735 * @page: page struct to check 736 */ 737 static bool ice_page_is_reserved(struct page *page) 738 { 739 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 740 } 741 742 /** 743 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 744 * @rx_buf: Rx buffer to adjust 745 * @size: Size of adjustment 746 * 747 * Update the offset within page so that Rx buf will be ready to be reused. 748 * For systems with PAGE_SIZE < 8192 this function will flip the page offset 749 * so the second half of page assigned to Rx buffer will be used, otherwise 750 * the offset is moved by "size" bytes 751 */ 752 static void 753 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 754 { 755 #if (PAGE_SIZE < 8192) 756 /* flip page offset to other buffer */ 757 rx_buf->page_offset ^= size; 758 #else 759 /* move offset up to the next cache line */ 760 rx_buf->page_offset += size; 761 #endif 762 } 763 764 /** 765 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 766 * @rx_buf: buffer containing the page 767 * 768 * If page is reusable, we have a green light for calling ice_reuse_rx_page, 769 * which will assign the current buffer to the buffer that next_to_alloc is 770 * pointing to; otherwise, the DMA mapping needs to be destroyed and 771 * page freed 772 */ 773 static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) 774 { 775 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 776 struct page *page = rx_buf->page; 777 778 /* avoid re-using remote pages */ 779 if (unlikely(ice_page_is_reserved(page))) 780 return false; 781 782 #if (PAGE_SIZE < 8192) 783 /* if we are only owner of page we can reuse it */ 784 if (unlikely((page_count(page) - pagecnt_bias) > 1)) 785 return false; 786 #else 787 #define ICE_LAST_OFFSET \ 788 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 789 if (rx_buf->page_offset > ICE_LAST_OFFSET) 790 return false; 791 #endif /* PAGE_SIZE < 8192) */ 792 793 /* If we have drained the page fragment pool we need to update 794 * the pagecnt_bias and page count so that we fully restock the 795 * number of references the driver holds. 796 */ 797 if (unlikely(pagecnt_bias == 1)) { 798 page_ref_add(page, USHRT_MAX - 1); 799 rx_buf->pagecnt_bias = USHRT_MAX; 800 } 801 802 return true; 803 } 804 805 /** 806 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 807 * @rx_ring: Rx descriptor ring to transact packets on 808 * @rx_buf: buffer containing page to add 809 * @skb: sk_buff to place the data into 810 * @size: packet length from rx_desc 811 * 812 * This function will add the data contained in rx_buf->page to the skb. 813 * It will just attach the page as a frag to the skb. 814 * The function will then update the page offset. 815 */ 816 static void 817 ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 818 struct sk_buff *skb, unsigned int size) 819 { 820 #if (PAGE_SIZE >= 8192) 821 unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring)); 822 #else 823 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 824 #endif 825 826 if (!size) 827 return; 828 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 829 rx_buf->page_offset, size, truesize); 830 831 /* page is being used so we must update the page offset */ 832 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 833 } 834 835 /** 836 * ice_reuse_rx_page - page flip buffer and store it back on the ring 837 * @rx_ring: Rx descriptor ring to store buffers on 838 * @old_buf: donor buffer to have page reused 839 * 840 * Synchronizes page for reuse by the adapter 841 */ 842 static void 843 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) 844 { 845 u16 nta = rx_ring->next_to_alloc; 846 struct ice_rx_buf *new_buf; 847 848 new_buf = &rx_ring->rx_buf[nta]; 849 850 /* update, and store next to alloc */ 851 nta++; 852 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 853 854 /* Transfer page from old buffer to new buffer. 855 * Move each member individually to avoid possible store 856 * forwarding stalls and unnecessary copy of skb. 857 */ 858 new_buf->dma = old_buf->dma; 859 new_buf->page = old_buf->page; 860 new_buf->page_offset = old_buf->page_offset; 861 new_buf->pagecnt_bias = old_buf->pagecnt_bias; 862 } 863 864 /** 865 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 866 * @rx_ring: Rx descriptor ring to transact packets on 867 * @skb: skb to be used 868 * @size: size of buffer to add to skb 869 * 870 * This function will pull an Rx buffer from the ring and synchronize it 871 * for use by the CPU. 872 */ 873 static struct ice_rx_buf * 874 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, 875 const unsigned int size) 876 { 877 struct ice_rx_buf *rx_buf; 878 879 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 880 prefetchw(rx_buf->page); 881 *skb = rx_buf->skb; 882 883 if (!size) 884 return rx_buf; 885 /* we are reusing so sync this buffer for CPU use */ 886 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 887 rx_buf->page_offset, size, 888 DMA_FROM_DEVICE); 889 890 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 891 rx_buf->pagecnt_bias--; 892 893 return rx_buf; 894 } 895 896 /** 897 * ice_build_skb - Build skb around an existing buffer 898 * @rx_ring: Rx descriptor ring to transact packets on 899 * @rx_buf: Rx buffer to pull data from 900 * @xdp: xdp_buff pointing to the data 901 * 902 * This function builds an skb around an existing Rx buffer, taking care 903 * to set up the skb correctly and avoid any memcpy overhead. 904 */ 905 static struct sk_buff * 906 ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 907 struct xdp_buff *xdp) 908 { 909 u8 metasize = xdp->data - xdp->data_meta; 910 #if (PAGE_SIZE < 8192) 911 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 912 #else 913 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 914 SKB_DATA_ALIGN(xdp->data_end - 915 xdp->data_hard_start); 916 #endif 917 struct sk_buff *skb; 918 919 /* Prefetch first cache line of first page. If xdp->data_meta 920 * is unused, this points exactly as xdp->data, otherwise we 921 * likely have a consumer accessing first few bytes of meta 922 * data, and then actual data. 923 */ 924 prefetch(xdp->data_meta); 925 #if L1_CACHE_BYTES < 128 926 prefetch((void *)(xdp->data + L1_CACHE_BYTES)); 927 #endif 928 /* build an skb around the page buffer */ 929 skb = build_skb(xdp->data_hard_start, truesize); 930 if (unlikely(!skb)) 931 return NULL; 932 933 /* must to record Rx queue, otherwise OS features such as 934 * symmetric queue won't work 935 */ 936 skb_record_rx_queue(skb, rx_ring->q_index); 937 938 /* update pointers within the skb to store the data */ 939 skb_reserve(skb, xdp->data - xdp->data_hard_start); 940 __skb_put(skb, xdp->data_end - xdp->data); 941 if (metasize) 942 skb_metadata_set(skb, metasize); 943 944 /* buffer is used by skb, update page_offset */ 945 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 946 947 return skb; 948 } 949 950 /** 951 * ice_construct_skb - Allocate skb and populate it 952 * @rx_ring: Rx descriptor ring to transact packets on 953 * @rx_buf: Rx buffer to pull data from 954 * @xdp: xdp_buff pointing to the data 955 * 956 * This function allocates an skb. It then populates it with the page 957 * data from the current receive descriptor, taking care to set up the 958 * skb correctly. 959 */ 960 static struct sk_buff * 961 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 962 struct xdp_buff *xdp) 963 { 964 unsigned int size = xdp->data_end - xdp->data; 965 unsigned int headlen; 966 struct sk_buff *skb; 967 968 /* prefetch first cache line of first page */ 969 prefetch(xdp->data); 970 #if L1_CACHE_BYTES < 128 971 prefetch((void *)(xdp->data + L1_CACHE_BYTES)); 972 #endif /* L1_CACHE_BYTES */ 973 974 /* allocate a skb to store the frags */ 975 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 976 GFP_ATOMIC | __GFP_NOWARN); 977 if (unlikely(!skb)) 978 return NULL; 979 980 skb_record_rx_queue(skb, rx_ring->q_index); 981 /* Determine available headroom for copy */ 982 headlen = size; 983 if (headlen > ICE_RX_HDR_SIZE) 984 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 985 986 /* align pull length to size of long to optimize memcpy performance */ 987 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 988 sizeof(long))); 989 990 /* if we exhaust the linear part then add what is left as a frag */ 991 size -= headlen; 992 if (size) { 993 #if (PAGE_SIZE >= 8192) 994 unsigned int truesize = SKB_DATA_ALIGN(size); 995 #else 996 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 997 #endif 998 skb_add_rx_frag(skb, 0, rx_buf->page, 999 rx_buf->page_offset + headlen, size, truesize); 1000 /* buffer is used by skb, update page_offset */ 1001 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 1002 } else { 1003 /* buffer is unused, reset bias back to rx_buf; data was copied 1004 * onto skb's linear part so there's no need for adjusting 1005 * page offset and we can reuse this buffer as-is 1006 */ 1007 rx_buf->pagecnt_bias++; 1008 } 1009 1010 return skb; 1011 } 1012 1013 /** 1014 * ice_put_rx_buf - Clean up used buffer and either recycle or free 1015 * @rx_ring: Rx descriptor ring to transact packets on 1016 * @rx_buf: Rx buffer to pull data from 1017 * 1018 * This function will update next_to_clean and then clean up the contents 1019 * of the rx_buf. It will either recycle the buffer or unmap it and free 1020 * the associated resources. 1021 */ 1022 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) 1023 { 1024 u16 ntc = rx_ring->next_to_clean + 1; 1025 1026 /* fetch, update, and store next to clean */ 1027 ntc = (ntc < rx_ring->count) ? ntc : 0; 1028 rx_ring->next_to_clean = ntc; 1029 1030 if (!rx_buf) 1031 return; 1032 1033 if (ice_can_reuse_rx_page(rx_buf)) { 1034 /* hand second half of page back to the ring */ 1035 ice_reuse_rx_page(rx_ring, rx_buf); 1036 rx_ring->rx_stats.page_reuse_count++; 1037 } else { 1038 /* we are not reusing the buffer so unmap it */ 1039 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 1040 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 1041 ICE_RX_DMA_ATTR); 1042 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 1043 } 1044 1045 /* clear contents of buffer_info */ 1046 rx_buf->page = NULL; 1047 rx_buf->skb = NULL; 1048 } 1049 1050 /** 1051 * ice_is_non_eop - process handling of non-EOP buffers 1052 * @rx_ring: Rx ring being processed 1053 * @rx_desc: Rx descriptor for current buffer 1054 * @skb: Current socket buffer containing buffer in progress 1055 * 1056 * If the buffer is an EOP buffer, this function exits returning false, 1057 * otherwise return true indicating that this is in fact a non-EOP buffer. 1058 */ 1059 static bool 1060 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 1061 struct sk_buff *skb) 1062 { 1063 /* if we are the last buffer then there is nothing else to do */ 1064 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 1065 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 1066 return false; 1067 1068 /* place skb in next buffer to be received */ 1069 rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb; 1070 rx_ring->rx_stats.non_eop_descs++; 1071 1072 return true; 1073 } 1074 1075 /** 1076 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 1077 * @rx_ring: Rx descriptor ring to transact packets on 1078 * @budget: Total limit on number of packets to process 1079 * 1080 * This function provides a "bounce buffer" approach to Rx interrupt 1081 * processing. The advantage to this is that on systems that have 1082 * expensive overhead for IOMMU access this provides a means of avoiding 1083 * it by maintaining the mapping of the page to the system. 1084 * 1085 * Returns amount of work completed 1086 */ 1087 int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 1088 { 1089 unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 1090 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1091 unsigned int xdp_res, xdp_xmit = 0; 1092 struct bpf_prog *xdp_prog = NULL; 1093 struct xdp_buff xdp; 1094 bool failure; 1095 1096 xdp.rxq = &rx_ring->xdp_rxq; 1097 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 1098 #if (PAGE_SIZE < 8192) 1099 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0); 1100 #endif 1101 1102 /* start the loop to process Rx packets bounded by 'budget' */ 1103 while (likely(total_rx_pkts < (unsigned int)budget)) { 1104 union ice_32b_rx_flex_desc *rx_desc; 1105 struct ice_rx_buf *rx_buf; 1106 struct sk_buff *skb; 1107 unsigned int size; 1108 u16 stat_err_bits; 1109 u16 vlan_tag = 0; 1110 u8 rx_ptype; 1111 1112 /* get the Rx desc from Rx ring based on 'next_to_clean' */ 1113 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 1114 1115 /* status_error_len will always be zero for unused descriptors 1116 * because it's cleared in cleanup, and overlaps with hdr_addr 1117 * which is always zero because packet split isn't used, if the 1118 * hardware wrote DD then it will be non-zero 1119 */ 1120 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 1121 if (!ice_test_staterr(rx_desc, stat_err_bits)) 1122 break; 1123 1124 /* This memory barrier is needed to keep us from reading 1125 * any other fields out of the rx_desc until we know the 1126 * DD bit is set. 1127 */ 1128 dma_rmb(); 1129 1130 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 1131 ice_put_rx_buf(rx_ring, NULL); 1132 cleaned_count++; 1133 continue; 1134 } 1135 1136 size = le16_to_cpu(rx_desc->wb.pkt_len) & 1137 ICE_RX_FLX_DESC_PKT_LEN_M; 1138 1139 /* retrieve a buffer from the ring */ 1140 rx_buf = ice_get_rx_buf(rx_ring, &skb, size); 1141 1142 if (!size) { 1143 xdp.data = NULL; 1144 xdp.data_end = NULL; 1145 xdp.data_hard_start = NULL; 1146 xdp.data_meta = NULL; 1147 goto construct_skb; 1148 } 1149 1150 xdp.data = page_address(rx_buf->page) + rx_buf->page_offset; 1151 xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring); 1152 xdp.data_meta = xdp.data; 1153 xdp.data_end = xdp.data + size; 1154 #if (PAGE_SIZE > 4096) 1155 /* At larger PAGE_SIZE, frame_sz depend on len size */ 1156 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); 1157 #endif 1158 1159 rcu_read_lock(); 1160 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 1161 if (!xdp_prog) { 1162 rcu_read_unlock(); 1163 goto construct_skb; 1164 } 1165 1166 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog); 1167 rcu_read_unlock(); 1168 if (!xdp_res) 1169 goto construct_skb; 1170 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 1171 xdp_xmit |= xdp_res; 1172 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); 1173 } else { 1174 rx_buf->pagecnt_bias++; 1175 } 1176 total_rx_bytes += size; 1177 total_rx_pkts++; 1178 1179 cleaned_count++; 1180 ice_put_rx_buf(rx_ring, rx_buf); 1181 continue; 1182 construct_skb: 1183 if (skb) { 1184 ice_add_rx_frag(rx_ring, rx_buf, skb, size); 1185 } else if (likely(xdp.data)) { 1186 if (ice_ring_uses_build_skb(rx_ring)) 1187 skb = ice_build_skb(rx_ring, rx_buf, &xdp); 1188 else 1189 skb = ice_construct_skb(rx_ring, rx_buf, &xdp); 1190 } 1191 /* exit if we failed to retrieve a buffer */ 1192 if (!skb) { 1193 rx_ring->rx_stats.alloc_buf_failed++; 1194 if (rx_buf) 1195 rx_buf->pagecnt_bias++; 1196 break; 1197 } 1198 1199 ice_put_rx_buf(rx_ring, rx_buf); 1200 cleaned_count++; 1201 1202 /* skip if it is NOP desc */ 1203 if (ice_is_non_eop(rx_ring, rx_desc, skb)) 1204 continue; 1205 1206 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 1207 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 1208 dev_kfree_skb_any(skb); 1209 continue; 1210 } 1211 1212 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 1213 if (ice_test_staterr(rx_desc, stat_err_bits)) 1214 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 1215 1216 /* pad the skb if needed, to make a valid ethernet frame */ 1217 if (eth_skb_pad(skb)) { 1218 skb = NULL; 1219 continue; 1220 } 1221 1222 /* probably a little skewed due to removing CRC */ 1223 total_rx_bytes += skb->len; 1224 1225 /* populate checksum, VLAN, and protocol */ 1226 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 1227 ICE_RX_FLEX_DESC_PTYPE_M; 1228 1229 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1230 1231 /* send completed skb up the stack */ 1232 ice_receive_skb(rx_ring, skb, vlan_tag); 1233 1234 /* update budget accounting */ 1235 total_rx_pkts++; 1236 } 1237 1238 /* return up to cleaned_count buffers to hardware */ 1239 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1240 1241 if (xdp_prog) 1242 ice_finalize_xdp_rx(rx_ring, xdp_xmit); 1243 1244 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); 1245 1246 /* guarantee a trip back through this routine if there was a failure */ 1247 return failure ? budget : (int)total_rx_pkts; 1248 } 1249 1250 /** 1251 * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic 1252 * @port_info: port_info structure containing the current link speed 1253 * @avg_pkt_size: average size of Tx or Rx packets based on clean routine 1254 * @itr: ITR value to update 1255 * 1256 * Calculate how big of an increment should be applied to the ITR value passed 1257 * in based on wmem_default, SKB overhead, Ethernet overhead, and the current 1258 * link speed. 1259 * 1260 * The following is a calculation derived from: 1261 * wmem_default / (size + overhead) = desired_pkts_per_int 1262 * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate 1263 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1264 * 1265 * Assuming wmem_default is 212992 and overhead is 640 bytes per 1266 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1267 * formula down to: 1268 * 1269 * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 1270 * ITR = -------------------------------------------- * -------------- 1271 * rate pkt_size + 640 1272 */ 1273 static unsigned int 1274 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, 1275 unsigned int avg_pkt_size, 1276 unsigned int itr) 1277 { 1278 switch (port_info->phy.link_info.link_speed) { 1279 case ICE_AQ_LINK_SPEED_100GB: 1280 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), 1281 avg_pkt_size + 640); 1282 break; 1283 case ICE_AQ_LINK_SPEED_50GB: 1284 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), 1285 avg_pkt_size + 640); 1286 break; 1287 case ICE_AQ_LINK_SPEED_40GB: 1288 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), 1289 avg_pkt_size + 640); 1290 break; 1291 case ICE_AQ_LINK_SPEED_25GB: 1292 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), 1293 avg_pkt_size + 640); 1294 break; 1295 case ICE_AQ_LINK_SPEED_20GB: 1296 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), 1297 avg_pkt_size + 640); 1298 break; 1299 case ICE_AQ_LINK_SPEED_10GB: 1300 default: 1301 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), 1302 avg_pkt_size + 640); 1303 break; 1304 } 1305 1306 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1307 itr &= ICE_ITR_ADAPTIVE_LATENCY; 1308 itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1309 } 1310 1311 return itr; 1312 } 1313 1314 /** 1315 * ice_update_itr - update the adaptive ITR value based on statistics 1316 * @q_vector: structure containing interrupt and ring information 1317 * @rc: structure containing ring performance data 1318 * 1319 * Stores a new ITR value based on packets and byte 1320 * counts during the last interrupt. The advantage of per interrupt 1321 * computation is faster updates and more accurate ITR for the current 1322 * traffic pattern. Constants in this function were computed 1323 * based on theoretical maximum wire speed and thresholds were set based 1324 * on testing data as well as attempting to minimize response time 1325 * while increasing bulk throughput. 1326 */ 1327 static void 1328 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) 1329 { 1330 unsigned long next_update = jiffies; 1331 unsigned int packets, bytes, itr; 1332 bool container_is_rx; 1333 1334 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) 1335 return; 1336 1337 /* If itr_countdown is set it means we programmed an ITR within 1338 * the last 4 interrupt cycles. This has a side effect of us 1339 * potentially firing an early interrupt. In order to work around 1340 * this we need to throw out any data received for a few 1341 * interrupts following the update. 1342 */ 1343 if (q_vector->itr_countdown) { 1344 itr = rc->target_itr; 1345 goto clear_counts; 1346 } 1347 1348 container_is_rx = (&q_vector->rx == rc); 1349 /* For Rx we want to push the delay up and default to low latency. 1350 * for Tx we want to pull the delay down and default to high latency. 1351 */ 1352 itr = container_is_rx ? 1353 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : 1354 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; 1355 1356 /* If we didn't update within up to 1 - 2 jiffies we can assume 1357 * that either packets are coming in so slow there hasn't been 1358 * any work, or that there is so much work that NAPI is dealing 1359 * with interrupt moderation and we don't need to do anything. 1360 */ 1361 if (time_after(next_update, rc->next_update)) 1362 goto clear_counts; 1363 1364 prefetch(q_vector->vsi->port_info); 1365 1366 packets = rc->total_pkts; 1367 bytes = rc->total_bytes; 1368 1369 if (container_is_rx) { 1370 /* If Rx there are 1 to 4 packets and bytes are less than 1371 * 9000 assume insufficient data to use bulk rate limiting 1372 * approach unless Tx is already in bulk rate limiting. We 1373 * are likely latency driven. 1374 */ 1375 if (packets && packets < 4 && bytes < 9000 && 1376 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { 1377 itr = ICE_ITR_ADAPTIVE_LATENCY; 1378 goto adjust_by_size_and_speed; 1379 } 1380 } else if (packets < 4) { 1381 /* If we have Tx and Rx ITR maxed and Tx ITR is running in 1382 * bulk mode and we are receiving 4 or fewer packets just 1383 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 1384 * that the Rx can relax. 1385 */ 1386 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && 1387 (q_vector->rx.target_itr & ICE_ITR_MASK) == 1388 ICE_ITR_ADAPTIVE_MAX_USECS) 1389 goto clear_counts; 1390 } else if (packets > 32) { 1391 /* If we have processed over 32 packets in a single interrupt 1392 * for Tx assume we need to switch over to "bulk" mode. 1393 */ 1394 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; 1395 } 1396 1397 /* We have no packets to actually measure against. This means 1398 * either one of the other queues on this vector is active or 1399 * we are a Tx queue doing TSO with too high of an interrupt rate. 1400 * 1401 * Between 4 and 56 we can assume that our current interrupt delay 1402 * is only slightly too low. As such we should increase it by a small 1403 * fixed amount. 1404 */ 1405 if (packets < 56) { 1406 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; 1407 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1408 itr &= ICE_ITR_ADAPTIVE_LATENCY; 1409 itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1410 } 1411 goto clear_counts; 1412 } 1413 1414 if (packets <= 256) { 1415 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 1416 itr &= ICE_ITR_MASK; 1417 1418 /* Between 56 and 112 is our "goldilocks" zone where we are 1419 * working out "just right". Just report that our current 1420 * ITR is good for us. 1421 */ 1422 if (packets <= 112) 1423 goto clear_counts; 1424 1425 /* If packet count is 128 or greater we are likely looking 1426 * at a slight overrun of the delay we want. Try halving 1427 * our delay to see if that will cut the number of packets 1428 * in half per interrupt. 1429 */ 1430 itr >>= 1; 1431 itr &= ICE_ITR_MASK; 1432 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) 1433 itr = ICE_ITR_ADAPTIVE_MIN_USECS; 1434 1435 goto clear_counts; 1436 } 1437 1438 /* The paths below assume we are dealing with a bulk ITR since 1439 * number of packets is greater than 256. We are just going to have 1440 * to compute a value and try to bring the count under control, 1441 * though for smaller packet sizes there isn't much we can do as 1442 * NAPI polling will likely be kicking in sooner rather than later. 1443 */ 1444 itr = ICE_ITR_ADAPTIVE_BULK; 1445 1446 adjust_by_size_and_speed: 1447 1448 /* based on checks above packets cannot be 0 so division is safe */ 1449 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, 1450 bytes / packets, itr); 1451 1452 clear_counts: 1453 /* write back value */ 1454 rc->target_itr = itr; 1455 1456 /* next update should occur within next jiffy */ 1457 rc->next_update = next_update + 1; 1458 1459 rc->total_bytes = 0; 1460 rc->total_pkts = 0; 1461 } 1462 1463 /** 1464 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 1465 * @itr_idx: interrupt throttling index 1466 * @itr: interrupt throttling value in usecs 1467 */ 1468 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 1469 { 1470 /* The ITR value is reported in microseconds, and the register value is 1471 * recorded in 2 microsecond units. For this reason we only need to 1472 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 1473 * granularity as a shift instead of division. The mask makes sure the 1474 * ITR value is never odd so we don't accidentally write into the field 1475 * prior to the ITR field. 1476 */ 1477 itr &= ICE_ITR_MASK; 1478 1479 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 1480 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 1481 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 1482 } 1483 1484 /* The act of updating the ITR will cause it to immediately trigger. In order 1485 * to prevent this from throwing off adaptive update statistics we defer the 1486 * update so that it can only happen so often. So after either Tx or Rx are 1487 * updated we make the adaptive scheme wait until either the ITR completely 1488 * expires via the next_update expiration or we have been through at least 1489 * 3 interrupts. 1490 */ 1491 #define ITR_COUNTDOWN_START 3 1492 1493 /** 1494 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt 1495 * @q_vector: q_vector for which ITR is being updated and interrupt enabled 1496 */ 1497 static void ice_update_ena_itr(struct ice_q_vector *q_vector) 1498 { 1499 struct ice_ring_container *tx = &q_vector->tx; 1500 struct ice_ring_container *rx = &q_vector->rx; 1501 struct ice_vsi *vsi = q_vector->vsi; 1502 u32 itr_val; 1503 1504 /* when exiting WB_ON_ITR lets set a low ITR value and trigger 1505 * interrupts to expire right away in case we have more work ready to go 1506 * already 1507 */ 1508 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) { 1509 itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS); 1510 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 1511 /* set target back to last user set value */ 1512 rx->target_itr = rx->itr_setting; 1513 /* set current to what we just wrote and dynamic if needed */ 1514 rx->current_itr = ICE_WB_ON_ITR_USECS | 1515 (rx->itr_setting & ICE_ITR_DYNAMIC); 1516 /* allow normal interrupt flow to start */ 1517 q_vector->itr_countdown = 0; 1518 return; 1519 } 1520 1521 /* This will do nothing if dynamic updates are not enabled */ 1522 ice_update_itr(q_vector, tx); 1523 ice_update_itr(q_vector, rx); 1524 1525 /* This block of logic allows us to get away with only updating 1526 * one ITR value with each interrupt. The idea is to perform a 1527 * pseudo-lazy update with the following criteria. 1528 * 1529 * 1. Rx is given higher priority than Tx if both are in same state 1530 * 2. If we must reduce an ITR that is given highest priority. 1531 * 3. We then give priority to increasing ITR based on amount. 1532 */ 1533 if (rx->target_itr < rx->current_itr) { 1534 /* Rx ITR needs to be reduced, this is highest priority */ 1535 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 1536 rx->current_itr = rx->target_itr; 1537 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1538 } else if ((tx->target_itr < tx->current_itr) || 1539 ((rx->target_itr - rx->current_itr) < 1540 (tx->target_itr - tx->current_itr))) { 1541 /* Tx ITR needs to be reduced, this is second priority 1542 * Tx ITR needs to be increased more than Rx, fourth priority 1543 */ 1544 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); 1545 tx->current_itr = tx->target_itr; 1546 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1547 } else if (rx->current_itr != rx->target_itr) { 1548 /* Rx ITR needs to be increased, third priority */ 1549 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 1550 rx->current_itr = rx->target_itr; 1551 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1552 } else { 1553 /* Still have to re-enable the interrupts */ 1554 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 1555 if (q_vector->itr_countdown) 1556 q_vector->itr_countdown--; 1557 } 1558 1559 if (!test_bit(__ICE_DOWN, q_vector->vsi->state)) 1560 wr32(&q_vector->vsi->back->hw, 1561 GLINT_DYN_CTL(q_vector->reg_idx), 1562 itr_val); 1563 } 1564 1565 /** 1566 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 1567 * @q_vector: q_vector to set WB_ON_ITR on 1568 * 1569 * We need to tell hardware to write-back completed descriptors even when 1570 * interrupts are disabled. Descriptors will be written back on cache line 1571 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 1572 * descriptors may not be written back if they don't fill a cache line until the 1573 * next interrupt. 1574 * 1575 * This sets the write-back frequency to 2 microseconds as that is the minimum 1576 * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to 1577 * make sure hardware knows we aren't meddling with the INTENA_M bit. 1578 */ 1579 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 1580 { 1581 struct ice_vsi *vsi = q_vector->vsi; 1582 1583 /* already in WB_ON_ITR mode no need to change it */ 1584 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) 1585 return; 1586 1587 if (q_vector->num_ring_rx) 1588 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 1589 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, 1590 ICE_RX_ITR)); 1591 1592 if (q_vector->num_ring_tx) 1593 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 1594 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, 1595 ICE_TX_ITR)); 1596 1597 q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE; 1598 } 1599 1600 /** 1601 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 1602 * @napi: napi struct with our devices info in it 1603 * @budget: amount of work driver is allowed to do this pass, in packets 1604 * 1605 * This function will clean all queues associated with a q_vector. 1606 * 1607 * Returns the amount of work done 1608 */ 1609 int ice_napi_poll(struct napi_struct *napi, int budget) 1610 { 1611 struct ice_q_vector *q_vector = 1612 container_of(napi, struct ice_q_vector, napi); 1613 bool clean_complete = true; 1614 struct ice_ring *ring; 1615 int budget_per_ring; 1616 int work_done = 0; 1617 1618 /* Since the actual Tx work is minimal, we can give the Tx a larger 1619 * budget and be more aggressive about cleaning up the Tx descriptors. 1620 */ 1621 ice_for_each_ring(ring, q_vector->tx) { 1622 bool wd = ring->xsk_umem ? 1623 ice_clean_tx_irq_zc(ring, budget) : 1624 ice_clean_tx_irq(ring, budget); 1625 1626 if (!wd) 1627 clean_complete = false; 1628 } 1629 1630 /* Handle case where we are called by netpoll with a budget of 0 */ 1631 if (unlikely(budget <= 0)) 1632 return budget; 1633 1634 /* normally we have 1 Rx ring per q_vector */ 1635 if (unlikely(q_vector->num_ring_rx > 1)) 1636 /* We attempt to distribute budget to each Rx queue fairly, but 1637 * don't allow the budget to go below 1 because that would exit 1638 * polling early. 1639 */ 1640 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 1641 else 1642 /* Max of 1 Rx ring in this q_vector so give it the budget */ 1643 budget_per_ring = budget; 1644 1645 ice_for_each_ring(ring, q_vector->rx) { 1646 int cleaned; 1647 1648 /* A dedicated path for zero-copy allows making a single 1649 * comparison in the irq context instead of many inside the 1650 * ice_clean_rx_irq function and makes the codebase cleaner. 1651 */ 1652 cleaned = ring->xsk_umem ? 1653 ice_clean_rx_irq_zc(ring, budget_per_ring) : 1654 ice_clean_rx_irq(ring, budget_per_ring); 1655 work_done += cleaned; 1656 /* if we clean as many as budgeted, we must not be done */ 1657 if (cleaned >= budget_per_ring) 1658 clean_complete = false; 1659 } 1660 1661 /* If work not completed, return budget and polling will return */ 1662 if (!clean_complete) 1663 return budget; 1664 1665 /* Exit the polling mode, but don't re-enable interrupts if stack might 1666 * poll us due to busy-polling 1667 */ 1668 if (likely(napi_complete_done(napi, work_done))) 1669 ice_update_ena_itr(q_vector); 1670 else 1671 ice_set_wb_on_itr(q_vector); 1672 1673 return min_t(int, work_done, budget - 1); 1674 } 1675 1676 /** 1677 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 1678 * @tx_ring: the ring to be checked 1679 * @size: the size buffer we want to assure is available 1680 * 1681 * Returns -EBUSY if a stop is needed, else 0 1682 */ 1683 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 1684 { 1685 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 1686 /* Memory barrier before checking head and tail */ 1687 smp_mb(); 1688 1689 /* Check again in a case another CPU has just made room available. */ 1690 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 1691 return -EBUSY; 1692 1693 /* A reprieve! - use start_subqueue because it doesn't call schedule */ 1694 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 1695 ++tx_ring->tx_stats.restart_q; 1696 return 0; 1697 } 1698 1699 /** 1700 * ice_maybe_stop_tx - 1st level check for Tx stop conditions 1701 * @tx_ring: the ring to be checked 1702 * @size: the size buffer we want to assure is available 1703 * 1704 * Returns 0 if stop is not needed 1705 */ 1706 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 1707 { 1708 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 1709 return 0; 1710 1711 return __ice_maybe_stop_tx(tx_ring, size); 1712 } 1713 1714 /** 1715 * ice_tx_map - Build the Tx descriptor 1716 * @tx_ring: ring to send buffer on 1717 * @first: first buffer info buffer to use 1718 * @off: pointer to struct that holds offload parameters 1719 * 1720 * This function loops over the skb data pointed to by *first 1721 * and gets a physical address for each memory location and programs 1722 * it and the length into the transmit descriptor. 1723 */ 1724 static void 1725 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, 1726 struct ice_tx_offload_params *off) 1727 { 1728 u64 td_offset, td_tag, td_cmd; 1729 u16 i = tx_ring->next_to_use; 1730 unsigned int data_len, size; 1731 struct ice_tx_desc *tx_desc; 1732 struct ice_tx_buf *tx_buf; 1733 struct sk_buff *skb; 1734 skb_frag_t *frag; 1735 dma_addr_t dma; 1736 1737 td_tag = off->td_l2tag1; 1738 td_cmd = off->td_cmd; 1739 td_offset = off->td_offset; 1740 skb = first->skb; 1741 1742 data_len = skb->data_len; 1743 size = skb_headlen(skb); 1744 1745 tx_desc = ICE_TX_DESC(tx_ring, i); 1746 1747 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1748 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1749 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1750 ICE_TX_FLAGS_VLAN_S; 1751 } 1752 1753 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1754 1755 tx_buf = first; 1756 1757 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1758 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1759 1760 if (dma_mapping_error(tx_ring->dev, dma)) 1761 goto dma_error; 1762 1763 /* record length, and DMA address */ 1764 dma_unmap_len_set(tx_buf, len, size); 1765 dma_unmap_addr_set(tx_buf, dma, dma); 1766 1767 /* align size to end of page */ 1768 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 1769 tx_desc->buf_addr = cpu_to_le64(dma); 1770 1771 /* account for data chunks larger than the hardware 1772 * can handle 1773 */ 1774 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 1775 tx_desc->cmd_type_offset_bsz = 1776 ice_build_ctob(td_cmd, td_offset, max_data, 1777 td_tag); 1778 1779 tx_desc++; 1780 i++; 1781 1782 if (i == tx_ring->count) { 1783 tx_desc = ICE_TX_DESC(tx_ring, 0); 1784 i = 0; 1785 } 1786 1787 dma += max_data; 1788 size -= max_data; 1789 1790 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1791 tx_desc->buf_addr = cpu_to_le64(dma); 1792 } 1793 1794 if (likely(!data_len)) 1795 break; 1796 1797 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 1798 size, td_tag); 1799 1800 tx_desc++; 1801 i++; 1802 1803 if (i == tx_ring->count) { 1804 tx_desc = ICE_TX_DESC(tx_ring, 0); 1805 i = 0; 1806 } 1807 1808 size = skb_frag_size(frag); 1809 data_len -= size; 1810 1811 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1812 DMA_TO_DEVICE); 1813 1814 tx_buf = &tx_ring->tx_buf[i]; 1815 } 1816 1817 /* record bytecount for BQL */ 1818 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1819 1820 /* record SW timestamp if HW timestamp is not available */ 1821 skb_tx_timestamp(first->skb); 1822 1823 i++; 1824 if (i == tx_ring->count) 1825 i = 0; 1826 1827 /* write last descriptor with RS and EOP bits */ 1828 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 1829 tx_desc->cmd_type_offset_bsz = 1830 ice_build_ctob(td_cmd, td_offset, size, td_tag); 1831 1832 /* Force memory writes to complete before letting h/w know there 1833 * are new descriptors to fetch. 1834 * 1835 * We also use this memory barrier to make certain all of the 1836 * status bits have been updated before next_to_watch is written. 1837 */ 1838 wmb(); 1839 1840 /* set next_to_watch value indicating a packet is present */ 1841 first->next_to_watch = tx_desc; 1842 1843 tx_ring->next_to_use = i; 1844 1845 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 1846 1847 /* notify HW of packet */ 1848 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) 1849 writel(i, tx_ring->tail); 1850 1851 return; 1852 1853 dma_error: 1854 /* clear DMA mappings for failed tx_buf map */ 1855 for (;;) { 1856 tx_buf = &tx_ring->tx_buf[i]; 1857 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 1858 if (tx_buf == first) 1859 break; 1860 if (i == 0) 1861 i = tx_ring->count; 1862 i--; 1863 } 1864 1865 tx_ring->next_to_use = i; 1866 } 1867 1868 /** 1869 * ice_tx_csum - Enable Tx checksum offloads 1870 * @first: pointer to the first descriptor 1871 * @off: pointer to struct that holds offload parameters 1872 * 1873 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1874 */ 1875 static 1876 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1877 { 1878 u32 l4_len = 0, l3_len = 0, l2_len = 0; 1879 struct sk_buff *skb = first->skb; 1880 union { 1881 struct iphdr *v4; 1882 struct ipv6hdr *v6; 1883 unsigned char *hdr; 1884 } ip; 1885 union { 1886 struct tcphdr *tcp; 1887 unsigned char *hdr; 1888 } l4; 1889 __be16 frag_off, protocol; 1890 unsigned char *exthdr; 1891 u32 offset, cmd = 0; 1892 u8 l4_proto = 0; 1893 1894 if (skb->ip_summed != CHECKSUM_PARTIAL) 1895 return 0; 1896 1897 ip.hdr = skb_network_header(skb); 1898 l4.hdr = skb_transport_header(skb); 1899 1900 /* compute outer L2 header size */ 1901 l2_len = ip.hdr - skb->data; 1902 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1903 1904 protocol = vlan_get_protocol(skb); 1905 1906 if (protocol == htons(ETH_P_IP)) 1907 first->tx_flags |= ICE_TX_FLAGS_IPV4; 1908 else if (protocol == htons(ETH_P_IPV6)) 1909 first->tx_flags |= ICE_TX_FLAGS_IPV6; 1910 1911 if (skb->encapsulation) { 1912 bool gso_ena = false; 1913 u32 tunnel = 0; 1914 1915 /* define outer network header type */ 1916 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1917 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 1918 ICE_TX_CTX_EIPT_IPV4 : 1919 ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 1920 l4_proto = ip.v4->protocol; 1921 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 1922 tunnel |= ICE_TX_CTX_EIPT_IPV6; 1923 exthdr = ip.hdr + sizeof(*ip.v6); 1924 l4_proto = ip.v6->nexthdr; 1925 if (l4.hdr != exthdr) 1926 ipv6_skip_exthdr(skb, exthdr - skb->data, 1927 &l4_proto, &frag_off); 1928 } 1929 1930 /* define outer transport */ 1931 switch (l4_proto) { 1932 case IPPROTO_UDP: 1933 tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 1934 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1935 break; 1936 case IPPROTO_GRE: 1937 tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 1938 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1939 break; 1940 case IPPROTO_IPIP: 1941 case IPPROTO_IPV6: 1942 first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 1943 l4.hdr = skb_inner_network_header(skb); 1944 break; 1945 default: 1946 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1947 return -1; 1948 1949 skb_checksum_help(skb); 1950 return 0; 1951 } 1952 1953 /* compute outer L3 header size */ 1954 tunnel |= ((l4.hdr - ip.hdr) / 4) << 1955 ICE_TXD_CTX_QW0_EIPLEN_S; 1956 1957 /* switch IP header pointer from outer to inner header */ 1958 ip.hdr = skb_inner_network_header(skb); 1959 1960 /* compute tunnel header size */ 1961 tunnel |= ((ip.hdr - l4.hdr) / 2) << 1962 ICE_TXD_CTX_QW0_NATLEN_S; 1963 1964 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 1965 /* indicate if we need to offload outer UDP header */ 1966 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 1967 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 1968 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 1969 1970 /* record tunnel offload values */ 1971 off->cd_tunnel_params |= tunnel; 1972 1973 /* set DTYP=1 to indicate that it's an Tx context descriptor 1974 * in IPsec tunnel mode with Tx offloads in Quad word 1 1975 */ 1976 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 1977 1978 /* switch L4 header pointer from outer to inner */ 1979 l4.hdr = skb_inner_transport_header(skb); 1980 l4_proto = 0; 1981 1982 /* reset type as we transition from outer to inner headers */ 1983 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 1984 if (ip.v4->version == 4) 1985 first->tx_flags |= ICE_TX_FLAGS_IPV4; 1986 if (ip.v6->version == 6) 1987 first->tx_flags |= ICE_TX_FLAGS_IPV6; 1988 } 1989 1990 /* Enable IP checksum offloads */ 1991 if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 1992 l4_proto = ip.v4->protocol; 1993 /* the stack computes the IP header already, the only time we 1994 * need the hardware to recompute it is in the case of TSO. 1995 */ 1996 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1997 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1998 else 1999 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 2000 2001 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 2002 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 2003 exthdr = ip.hdr + sizeof(*ip.v6); 2004 l4_proto = ip.v6->nexthdr; 2005 if (l4.hdr != exthdr) 2006 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 2007 &frag_off); 2008 } else { 2009 return -1; 2010 } 2011 2012 /* compute inner L3 header size */ 2013 l3_len = l4.hdr - ip.hdr; 2014 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 2015 2016 /* Enable L4 checksum offloads */ 2017 switch (l4_proto) { 2018 case IPPROTO_TCP: 2019 /* enable checksum offloads */ 2020 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 2021 l4_len = l4.tcp->doff; 2022 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 2023 break; 2024 case IPPROTO_UDP: 2025 /* enable UDP checksum offload */ 2026 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 2027 l4_len = (sizeof(struct udphdr) >> 2); 2028 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 2029 break; 2030 case IPPROTO_SCTP: 2031 /* enable SCTP checksum offload */ 2032 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 2033 l4_len = sizeof(struct sctphdr) >> 2; 2034 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 2035 break; 2036 2037 default: 2038 if (first->tx_flags & ICE_TX_FLAGS_TSO) 2039 return -1; 2040 skb_checksum_help(skb); 2041 return 0; 2042 } 2043 2044 off->td_cmd |= cmd; 2045 off->td_offset |= offset; 2046 return 1; 2047 } 2048 2049 /** 2050 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 2051 * @tx_ring: ring to send buffer on 2052 * @first: pointer to struct ice_tx_buf 2053 * 2054 * Checks the skb and set up correspondingly several generic transmit flags 2055 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 2056 */ 2057 static void 2058 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) 2059 { 2060 struct sk_buff *skb = first->skb; 2061 2062 /* nothing left to do, software offloaded VLAN */ 2063 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) 2064 return; 2065 2066 /* currently, we always assume 802.1Q for VLAN insertion as VLAN 2067 * insertion for 802.1AD is not supported 2068 */ 2069 if (skb_vlan_tag_present(skb)) { 2070 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 2071 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 2072 } 2073 2074 ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 2075 } 2076 2077 /** 2078 * ice_tso - computes mss and TSO length to prepare for TSO 2079 * @first: pointer to struct ice_tx_buf 2080 * @off: pointer to struct that holds offload parameters 2081 * 2082 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 2083 */ 2084 static 2085 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2086 { 2087 struct sk_buff *skb = first->skb; 2088 union { 2089 struct iphdr *v4; 2090 struct ipv6hdr *v6; 2091 unsigned char *hdr; 2092 } ip; 2093 union { 2094 struct tcphdr *tcp; 2095 struct udphdr *udp; 2096 unsigned char *hdr; 2097 } l4; 2098 u64 cd_mss, cd_tso_len; 2099 u32 paylen; 2100 u8 l4_start; 2101 int err; 2102 2103 if (skb->ip_summed != CHECKSUM_PARTIAL) 2104 return 0; 2105 2106 if (!skb_is_gso(skb)) 2107 return 0; 2108 2109 err = skb_cow_head(skb, 0); 2110 if (err < 0) 2111 return err; 2112 2113 /* cppcheck-suppress unreadVariable */ 2114 ip.hdr = skb_network_header(skb); 2115 l4.hdr = skb_transport_header(skb); 2116 2117 /* initialize outer IP header fields */ 2118 if (ip.v4->version == 4) { 2119 ip.v4->tot_len = 0; 2120 ip.v4->check = 0; 2121 } else { 2122 ip.v6->payload_len = 0; 2123 } 2124 2125 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 2126 SKB_GSO_GRE_CSUM | 2127 SKB_GSO_IPXIP4 | 2128 SKB_GSO_IPXIP6 | 2129 SKB_GSO_UDP_TUNNEL | 2130 SKB_GSO_UDP_TUNNEL_CSUM)) { 2131 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 2132 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 2133 l4.udp->len = 0; 2134 2135 /* determine offset of outer transport header */ 2136 l4_start = (u8)(l4.hdr - skb->data); 2137 2138 /* remove payload length from outer checksum */ 2139 paylen = skb->len - l4_start; 2140 csum_replace_by_diff(&l4.udp->check, 2141 (__force __wsum)htonl(paylen)); 2142 } 2143 2144 /* reset pointers to inner headers */ 2145 2146 /* cppcheck-suppress unreadVariable */ 2147 ip.hdr = skb_inner_network_header(skb); 2148 l4.hdr = skb_inner_transport_header(skb); 2149 2150 /* initialize inner IP header fields */ 2151 if (ip.v4->version == 4) { 2152 ip.v4->tot_len = 0; 2153 ip.v4->check = 0; 2154 } else { 2155 ip.v6->payload_len = 0; 2156 } 2157 } 2158 2159 /* determine offset of transport header */ 2160 l4_start = (u8)(l4.hdr - skb->data); 2161 2162 /* remove payload length from checksum */ 2163 paylen = skb->len - l4_start; 2164 2165 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 2166 csum_replace_by_diff(&l4.udp->check, 2167 (__force __wsum)htonl(paylen)); 2168 /* compute length of UDP segmentation header */ 2169 off->header_len = (u8)sizeof(l4.udp) + l4_start; 2170 } else { 2171 csum_replace_by_diff(&l4.tcp->check, 2172 (__force __wsum)htonl(paylen)); 2173 /* compute length of TCP segmentation header */ 2174 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 2175 } 2176 2177 /* update gso_segs and bytecount */ 2178 first->gso_segs = skb_shinfo(skb)->gso_segs; 2179 first->bytecount += (first->gso_segs - 1) * off->header_len; 2180 2181 cd_tso_len = skb->len - off->header_len; 2182 cd_mss = skb_shinfo(skb)->gso_size; 2183 2184 /* record cdesc_qw1 with TSO parameters */ 2185 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2186 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 2187 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2188 (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 2189 first->tx_flags |= ICE_TX_FLAGS_TSO; 2190 return 1; 2191 } 2192 2193 /** 2194 * ice_txd_use_count - estimate the number of descriptors needed for Tx 2195 * @size: transmit request size in bytes 2196 * 2197 * Due to hardware alignment restrictions (4K alignment), we need to 2198 * assume that we can have no more than 12K of data per descriptor, even 2199 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 2200 * Thus, we need to divide by 12K. But division is slow! Instead, 2201 * we decompose the operation into shifts and one relatively cheap 2202 * multiply operation. 2203 * 2204 * To divide by 12K, we first divide by 4K, then divide by 3: 2205 * To divide by 4K, shift right by 12 bits 2206 * To divide by 3, multiply by 85, then divide by 256 2207 * (Divide by 256 is done by shifting right by 8 bits) 2208 * Finally, we add one to round up. Because 256 isn't an exact multiple of 2209 * 3, we'll underestimate near each multiple of 12K. This is actually more 2210 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 2211 * segment. For our purposes this is accurate out to 1M which is orders of 2212 * magnitude greater than our largest possible GSO size. 2213 * 2214 * This would then be implemented as: 2215 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 2216 * 2217 * Since multiplication and division are commutative, we can reorder 2218 * operations into: 2219 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2220 */ 2221 static unsigned int ice_txd_use_count(unsigned int size) 2222 { 2223 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 2224 } 2225 2226 /** 2227 * ice_xmit_desc_count - calculate number of Tx descriptors needed 2228 * @skb: send buffer 2229 * 2230 * Returns number of data descriptors needed for this skb. 2231 */ 2232 static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 2233 { 2234 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 2235 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2236 unsigned int count = 0, size = skb_headlen(skb); 2237 2238 for (;;) { 2239 count += ice_txd_use_count(size); 2240 2241 if (!nr_frags--) 2242 break; 2243 2244 size = skb_frag_size(frag++); 2245 } 2246 2247 return count; 2248 } 2249 2250 /** 2251 * __ice_chk_linearize - Check if there are more than 8 buffers per packet 2252 * @skb: send buffer 2253 * 2254 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 2255 * and so we need to figure out the cases where we need to linearize the skb. 2256 * 2257 * For TSO we need to count the TSO header and segment payload separately. 2258 * As such we need to check cases where we have 7 fragments or more as we 2259 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 2260 * the segment payload in the first descriptor, and another 7 for the 2261 * fragments. 2262 */ 2263 static bool __ice_chk_linearize(struct sk_buff *skb) 2264 { 2265 const skb_frag_t *frag, *stale; 2266 int nr_frags, sum; 2267 2268 /* no need to check if number of frags is less than 7 */ 2269 nr_frags = skb_shinfo(skb)->nr_frags; 2270 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 2271 return false; 2272 2273 /* We need to walk through the list and validate that each group 2274 * of 6 fragments totals at least gso_size. 2275 */ 2276 nr_frags -= ICE_MAX_BUF_TXD - 2; 2277 frag = &skb_shinfo(skb)->frags[0]; 2278 2279 /* Initialize size to the negative value of gso_size minus 1. We 2280 * use this as the worst case scenario in which the frag ahead 2281 * of us only provides one byte which is why we are limited to 6 2282 * descriptors for a single transmit as the header and previous 2283 * fragment are already consuming 2 descriptors. 2284 */ 2285 sum = 1 - skb_shinfo(skb)->gso_size; 2286 2287 /* Add size of frags 0 through 4 to create our initial sum */ 2288 sum += skb_frag_size(frag++); 2289 sum += skb_frag_size(frag++); 2290 sum += skb_frag_size(frag++); 2291 sum += skb_frag_size(frag++); 2292 sum += skb_frag_size(frag++); 2293 2294 /* Walk through fragments adding latest fragment, testing it, and 2295 * then removing stale fragments from the sum. 2296 */ 2297 stale = &skb_shinfo(skb)->frags[0]; 2298 for (;;) { 2299 sum += skb_frag_size(frag++); 2300 2301 /* if sum is negative we failed to make sufficient progress */ 2302 if (sum < 0) 2303 return true; 2304 2305 if (!nr_frags--) 2306 break; 2307 2308 sum -= skb_frag_size(stale++); 2309 } 2310 2311 return false; 2312 } 2313 2314 /** 2315 * ice_chk_linearize - Check if there are more than 8 fragments per packet 2316 * @skb: send buffer 2317 * @count: number of buffers used 2318 * 2319 * Note: Our HW can't scatter-gather more than 8 fragments to build 2320 * a packet on the wire and so we need to figure out the cases where we 2321 * need to linearize the skb. 2322 */ 2323 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 2324 { 2325 /* Both TSO and single send will work if count is less than 8 */ 2326 if (likely(count < ICE_MAX_BUF_TXD)) 2327 return false; 2328 2329 if (skb_is_gso(skb)) 2330 return __ice_chk_linearize(skb); 2331 2332 /* we can support up to 8 data buffers for a single send */ 2333 return count != ICE_MAX_BUF_TXD; 2334 } 2335 2336 /** 2337 * ice_xmit_frame_ring - Sends buffer on Tx ring 2338 * @skb: send buffer 2339 * @tx_ring: ring to send buffer on 2340 * 2341 * Returns NETDEV_TX_OK if sent, else an error code 2342 */ 2343 static netdev_tx_t 2344 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 2345 { 2346 struct ice_tx_offload_params offload = { 0 }; 2347 struct ice_vsi *vsi = tx_ring->vsi; 2348 struct ice_tx_buf *first; 2349 unsigned int count; 2350 int tso, csum; 2351 2352 count = ice_xmit_desc_count(skb); 2353 if (ice_chk_linearize(skb, count)) { 2354 if (__skb_linearize(skb)) 2355 goto out_drop; 2356 count = ice_txd_use_count(skb->len); 2357 tx_ring->tx_stats.tx_linearize++; 2358 } 2359 2360 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 2361 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 2362 * + 4 desc gap to avoid the cache line where head is, 2363 * + 1 desc for context descriptor, 2364 * otherwise try next time 2365 */ 2366 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2367 ICE_DESCS_FOR_CTX_DESC)) { 2368 tx_ring->tx_stats.tx_busy++; 2369 return NETDEV_TX_BUSY; 2370 } 2371 2372 offload.tx_ring = tx_ring; 2373 2374 /* record the location of the first descriptor for this packet */ 2375 first = &tx_ring->tx_buf[tx_ring->next_to_use]; 2376 first->skb = skb; 2377 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 2378 first->gso_segs = 1; 2379 first->tx_flags = 0; 2380 2381 /* prepare the VLAN tagging flags for Tx */ 2382 ice_tx_prepare_vlan_flags(tx_ring, first); 2383 2384 /* set up TSO offload */ 2385 tso = ice_tso(first, &offload); 2386 if (tso < 0) 2387 goto out_drop; 2388 2389 /* always set up Tx checksum offload */ 2390 csum = ice_tx_csum(first, &offload); 2391 if (csum < 0) 2392 goto out_drop; 2393 2394 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 2395 if (unlikely(skb->priority == TC_PRIO_CONTROL && 2396 vsi->type == ICE_VSI_PF && 2397 vsi->port_info->is_sw_lldp)) 2398 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2399 ICE_TX_CTX_DESC_SWTCH_UPLINK << 2400 ICE_TXD_CTX_QW1_CMD_S); 2401 2402 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2403 struct ice_tx_ctx_desc *cdesc; 2404 u16 i = tx_ring->next_to_use; 2405 2406 /* grab the next descriptor */ 2407 cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2408 i++; 2409 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2410 2411 /* setup context descriptor */ 2412 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2413 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2414 cdesc->rsvd = cpu_to_le16(0); 2415 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2416 } 2417 2418 ice_tx_map(tx_ring, first, &offload); 2419 return NETDEV_TX_OK; 2420 2421 out_drop: 2422 dev_kfree_skb_any(skb); 2423 return NETDEV_TX_OK; 2424 } 2425 2426 /** 2427 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 2428 * @skb: send buffer 2429 * @netdev: network interface device structure 2430 * 2431 * Returns NETDEV_TX_OK if sent, else an error code 2432 */ 2433 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2434 { 2435 struct ice_netdev_priv *np = netdev_priv(netdev); 2436 struct ice_vsi *vsi = np->vsi; 2437 struct ice_ring *tx_ring; 2438 2439 tx_ring = vsi->tx_rings[skb->queue_mapping]; 2440 2441 /* hardware can't handle really short frames, hardware padding works 2442 * beyond this point 2443 */ 2444 if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 2445 return NETDEV_TX_OK; 2446 2447 return ice_xmit_frame_ring(skb, tx_ring); 2448 } 2449 2450 /** 2451 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 2452 * @tx_ring: tx_ring to clean 2453 */ 2454 void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring) 2455 { 2456 struct ice_vsi *vsi = tx_ring->vsi; 2457 s16 i = tx_ring->next_to_clean; 2458 int budget = ICE_DFLT_IRQ_WORK; 2459 struct ice_tx_desc *tx_desc; 2460 struct ice_tx_buf *tx_buf; 2461 2462 tx_buf = &tx_ring->tx_buf[i]; 2463 tx_desc = ICE_TX_DESC(tx_ring, i); 2464 i -= tx_ring->count; 2465 2466 do { 2467 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2468 2469 /* if next_to_watch is not set then there is no pending work */ 2470 if (!eop_desc) 2471 break; 2472 2473 /* prevent any other reads prior to eop_desc */ 2474 smp_rmb(); 2475 2476 /* if the descriptor isn't done, no work to do */ 2477 if (!(eop_desc->cmd_type_offset_bsz & 2478 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2479 break; 2480 2481 /* clear next_to_watch to prevent false hangs */ 2482 tx_buf->next_to_watch = NULL; 2483 tx_desc->buf_addr = 0; 2484 tx_desc->cmd_type_offset_bsz = 0; 2485 2486 /* move past filter desc */ 2487 tx_buf++; 2488 tx_desc++; 2489 i++; 2490 if (unlikely(!i)) { 2491 i -= tx_ring->count; 2492 tx_buf = tx_ring->tx_buf; 2493 tx_desc = ICE_TX_DESC(tx_ring, 0); 2494 } 2495 2496 /* unmap the data header */ 2497 if (dma_unmap_len(tx_buf, len)) 2498 dma_unmap_single(tx_ring->dev, 2499 dma_unmap_addr(tx_buf, dma), 2500 dma_unmap_len(tx_buf, len), 2501 DMA_TO_DEVICE); 2502 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 2503 devm_kfree(tx_ring->dev, tx_buf->raw_buf); 2504 2505 /* clear next_to_watch to prevent false hangs */ 2506 tx_buf->raw_buf = NULL; 2507 tx_buf->tx_flags = 0; 2508 tx_buf->next_to_watch = NULL; 2509 dma_unmap_len_set(tx_buf, len, 0); 2510 tx_desc->buf_addr = 0; 2511 tx_desc->cmd_type_offset_bsz = 0; 2512 2513 /* move past eop_desc for start of next FD desc */ 2514 tx_buf++; 2515 tx_desc++; 2516 i++; 2517 if (unlikely(!i)) { 2518 i -= tx_ring->count; 2519 tx_buf = tx_ring->tx_buf; 2520 tx_desc = ICE_TX_DESC(tx_ring, 0); 2521 } 2522 2523 budget--; 2524 } while (likely(budget)); 2525 2526 i += tx_ring->count; 2527 tx_ring->next_to_clean = i; 2528 2529 /* re-enable interrupt if needed */ 2530 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 2531 } 2532