1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* The driver transmit and receive code */ 5 6 #include <linux/prefetch.h> 7 #include <linux/mm.h> 8 #include "ice.h" 9 #include "ice_dcb_lib.h" 10 11 #define ICE_RX_HDR_SIZE 256 12 13 /** 14 * ice_unmap_and_free_tx_buf - Release a Tx buffer 15 * @ring: the ring that owns the buffer 16 * @tx_buf: the buffer to free 17 */ 18 static void 19 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 20 { 21 if (tx_buf->skb) { 22 dev_kfree_skb_any(tx_buf->skb); 23 if (dma_unmap_len(tx_buf, len)) 24 dma_unmap_single(ring->dev, 25 dma_unmap_addr(tx_buf, dma), 26 dma_unmap_len(tx_buf, len), 27 DMA_TO_DEVICE); 28 } else if (dma_unmap_len(tx_buf, len)) { 29 dma_unmap_page(ring->dev, 30 dma_unmap_addr(tx_buf, dma), 31 dma_unmap_len(tx_buf, len), 32 DMA_TO_DEVICE); 33 } 34 35 tx_buf->next_to_watch = NULL; 36 tx_buf->skb = NULL; 37 dma_unmap_len_set(tx_buf, len, 0); 38 /* tx_buf must be completely set up in the transmit path */ 39 } 40 41 static struct netdev_queue *txring_txq(const struct ice_ring *ring) 42 { 43 return netdev_get_tx_queue(ring->netdev, ring->q_index); 44 } 45 46 /** 47 * ice_clean_tx_ring - Free any empty Tx buffers 48 * @tx_ring: ring to be cleaned 49 */ 50 void ice_clean_tx_ring(struct ice_ring *tx_ring) 51 { 52 u16 i; 53 54 /* ring already cleared, nothing to do */ 55 if (!tx_ring->tx_buf) 56 return; 57 58 /* Free all the Tx ring sk_buffs */ 59 for (i = 0; i < tx_ring->count; i++) 60 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 61 62 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 63 64 /* Zero out the descriptor ring */ 65 memset(tx_ring->desc, 0, tx_ring->size); 66 67 tx_ring->next_to_use = 0; 68 tx_ring->next_to_clean = 0; 69 70 if (!tx_ring->netdev) 71 return; 72 73 /* cleanup Tx queue statistics */ 74 netdev_tx_reset_queue(txring_txq(tx_ring)); 75 } 76 77 /** 78 * ice_free_tx_ring - Free Tx resources per queue 79 * @tx_ring: Tx descriptor ring for a specific queue 80 * 81 * Free all transmit software resources 82 */ 83 void ice_free_tx_ring(struct ice_ring *tx_ring) 84 { 85 ice_clean_tx_ring(tx_ring); 86 devm_kfree(tx_ring->dev, tx_ring->tx_buf); 87 tx_ring->tx_buf = NULL; 88 89 if (tx_ring->desc) { 90 dmam_free_coherent(tx_ring->dev, tx_ring->size, 91 tx_ring->desc, tx_ring->dma); 92 tx_ring->desc = NULL; 93 } 94 } 95 96 /** 97 * ice_clean_tx_irq - Reclaim resources after transmit completes 98 * @vsi: the VSI we care about 99 * @tx_ring: Tx ring to clean 100 * @napi_budget: Used to determine if we are in netpoll 101 * 102 * Returns true if there's any budget left (e.g. the clean is finished) 103 */ 104 static bool 105 ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) 106 { 107 unsigned int total_bytes = 0, total_pkts = 0; 108 unsigned int budget = vsi->work_lmt; 109 s16 i = tx_ring->next_to_clean; 110 struct ice_tx_desc *tx_desc; 111 struct ice_tx_buf *tx_buf; 112 113 tx_buf = &tx_ring->tx_buf[i]; 114 tx_desc = ICE_TX_DESC(tx_ring, i); 115 i -= tx_ring->count; 116 117 do { 118 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 119 120 /* if next_to_watch is not set then there is no work pending */ 121 if (!eop_desc) 122 break; 123 124 smp_rmb(); /* prevent any other reads prior to eop_desc */ 125 126 /* if the descriptor isn't done, no work yet to do */ 127 if (!(eop_desc->cmd_type_offset_bsz & 128 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 129 break; 130 131 /* clear next_to_watch to prevent false hangs */ 132 tx_buf->next_to_watch = NULL; 133 134 /* update the statistics for this packet */ 135 total_bytes += tx_buf->bytecount; 136 total_pkts += tx_buf->gso_segs; 137 138 /* free the skb */ 139 napi_consume_skb(tx_buf->skb, napi_budget); 140 141 /* unmap skb header data */ 142 dma_unmap_single(tx_ring->dev, 143 dma_unmap_addr(tx_buf, dma), 144 dma_unmap_len(tx_buf, len), 145 DMA_TO_DEVICE); 146 147 /* clear tx_buf data */ 148 tx_buf->skb = NULL; 149 dma_unmap_len_set(tx_buf, len, 0); 150 151 /* unmap remaining buffers */ 152 while (tx_desc != eop_desc) { 153 tx_buf++; 154 tx_desc++; 155 i++; 156 if (unlikely(!i)) { 157 i -= tx_ring->count; 158 tx_buf = tx_ring->tx_buf; 159 tx_desc = ICE_TX_DESC(tx_ring, 0); 160 } 161 162 /* unmap any remaining paged data */ 163 if (dma_unmap_len(tx_buf, len)) { 164 dma_unmap_page(tx_ring->dev, 165 dma_unmap_addr(tx_buf, dma), 166 dma_unmap_len(tx_buf, len), 167 DMA_TO_DEVICE); 168 dma_unmap_len_set(tx_buf, len, 0); 169 } 170 } 171 172 /* move us one more past the eop_desc for start of next pkt */ 173 tx_buf++; 174 tx_desc++; 175 i++; 176 if (unlikely(!i)) { 177 i -= tx_ring->count; 178 tx_buf = tx_ring->tx_buf; 179 tx_desc = ICE_TX_DESC(tx_ring, 0); 180 } 181 182 prefetch(tx_desc); 183 184 /* update budget accounting */ 185 budget--; 186 } while (likely(budget)); 187 188 i += tx_ring->count; 189 tx_ring->next_to_clean = i; 190 u64_stats_update_begin(&tx_ring->syncp); 191 tx_ring->stats.bytes += total_bytes; 192 tx_ring->stats.pkts += total_pkts; 193 u64_stats_update_end(&tx_ring->syncp); 194 tx_ring->q_vector->tx.total_bytes += total_bytes; 195 tx_ring->q_vector->tx.total_pkts += total_pkts; 196 197 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 198 total_bytes); 199 200 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 201 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 202 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 203 /* Make sure that anybody stopping the queue after this 204 * sees the new next_to_clean. 205 */ 206 smp_mb(); 207 if (__netif_subqueue_stopped(tx_ring->netdev, 208 tx_ring->q_index) && 209 !test_bit(__ICE_DOWN, vsi->state)) { 210 netif_wake_subqueue(tx_ring->netdev, 211 tx_ring->q_index); 212 ++tx_ring->tx_stats.restart_q; 213 } 214 } 215 216 return !!budget; 217 } 218 219 /** 220 * ice_setup_tx_ring - Allocate the Tx descriptors 221 * @tx_ring: the Tx ring to set up 222 * 223 * Return 0 on success, negative on error 224 */ 225 int ice_setup_tx_ring(struct ice_ring *tx_ring) 226 { 227 struct device *dev = tx_ring->dev; 228 229 if (!dev) 230 return -ENOMEM; 231 232 /* warn if we are about to overwrite the pointer */ 233 WARN_ON(tx_ring->tx_buf); 234 tx_ring->tx_buf = 235 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 236 GFP_KERNEL); 237 if (!tx_ring->tx_buf) 238 return -ENOMEM; 239 240 /* round up to nearest page */ 241 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 242 PAGE_SIZE); 243 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 244 GFP_KERNEL); 245 if (!tx_ring->desc) { 246 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 247 tx_ring->size); 248 goto err; 249 } 250 251 tx_ring->next_to_use = 0; 252 tx_ring->next_to_clean = 0; 253 tx_ring->tx_stats.prev_pkt = -1; 254 return 0; 255 256 err: 257 devm_kfree(dev, tx_ring->tx_buf); 258 tx_ring->tx_buf = NULL; 259 return -ENOMEM; 260 } 261 262 /** 263 * ice_clean_rx_ring - Free Rx buffers 264 * @rx_ring: ring to be cleaned 265 */ 266 void ice_clean_rx_ring(struct ice_ring *rx_ring) 267 { 268 struct device *dev = rx_ring->dev; 269 u16 i; 270 271 /* ring already cleared, nothing to do */ 272 if (!rx_ring->rx_buf) 273 return; 274 275 /* Free all the Rx ring sk_buffs */ 276 for (i = 0; i < rx_ring->count; i++) { 277 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 278 279 if (rx_buf->skb) { 280 dev_kfree_skb(rx_buf->skb); 281 rx_buf->skb = NULL; 282 } 283 if (!rx_buf->page) 284 continue; 285 286 /* Invalidate cache lines that may have been written to by 287 * device so that we avoid corrupting memory. 288 */ 289 dma_sync_single_range_for_cpu(dev, rx_buf->dma, 290 rx_buf->page_offset, 291 ICE_RXBUF_2048, DMA_FROM_DEVICE); 292 293 /* free resources associated with mapping */ 294 dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE, 295 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 296 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 297 298 rx_buf->page = NULL; 299 rx_buf->page_offset = 0; 300 } 301 302 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 303 304 /* Zero out the descriptor ring */ 305 memset(rx_ring->desc, 0, rx_ring->size); 306 307 rx_ring->next_to_alloc = 0; 308 rx_ring->next_to_clean = 0; 309 rx_ring->next_to_use = 0; 310 } 311 312 /** 313 * ice_free_rx_ring - Free Rx resources 314 * @rx_ring: ring to clean the resources from 315 * 316 * Free all receive software resources 317 */ 318 void ice_free_rx_ring(struct ice_ring *rx_ring) 319 { 320 ice_clean_rx_ring(rx_ring); 321 devm_kfree(rx_ring->dev, rx_ring->rx_buf); 322 rx_ring->rx_buf = NULL; 323 324 if (rx_ring->desc) { 325 dmam_free_coherent(rx_ring->dev, rx_ring->size, 326 rx_ring->desc, rx_ring->dma); 327 rx_ring->desc = NULL; 328 } 329 } 330 331 /** 332 * ice_setup_rx_ring - Allocate the Rx descriptors 333 * @rx_ring: the Rx ring to set up 334 * 335 * Return 0 on success, negative on error 336 */ 337 int ice_setup_rx_ring(struct ice_ring *rx_ring) 338 { 339 struct device *dev = rx_ring->dev; 340 341 if (!dev) 342 return -ENOMEM; 343 344 /* warn if we are about to overwrite the pointer */ 345 WARN_ON(rx_ring->rx_buf); 346 rx_ring->rx_buf = 347 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 348 GFP_KERNEL); 349 if (!rx_ring->rx_buf) 350 return -ENOMEM; 351 352 /* round up to nearest page */ 353 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 354 PAGE_SIZE); 355 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 356 GFP_KERNEL); 357 if (!rx_ring->desc) { 358 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 359 rx_ring->size); 360 goto err; 361 } 362 363 rx_ring->next_to_use = 0; 364 rx_ring->next_to_clean = 0; 365 return 0; 366 367 err: 368 devm_kfree(dev, rx_ring->rx_buf); 369 rx_ring->rx_buf = NULL; 370 return -ENOMEM; 371 } 372 373 /** 374 * ice_release_rx_desc - Store the new tail and head values 375 * @rx_ring: ring to bump 376 * @val: new head index 377 */ 378 static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) 379 { 380 u16 prev_ntu = rx_ring->next_to_use; 381 382 rx_ring->next_to_use = val; 383 384 /* update next to alloc since we have filled the ring */ 385 rx_ring->next_to_alloc = val; 386 387 /* QRX_TAIL will be updated with any tail value, but hardware ignores 388 * the lower 3 bits. This makes it so we only bump tail on meaningful 389 * boundaries. Also, this allows us to bump tail on intervals of 8 up to 390 * the budget depending on the current traffic load. 391 */ 392 val &= ~0x7; 393 if (prev_ntu != val) { 394 /* Force memory writes to complete before letting h/w 395 * know there are new descriptors to fetch. (Only 396 * applicable for weak-ordered memory model archs, 397 * such as IA-64). 398 */ 399 wmb(); 400 writel(val, rx_ring->tail); 401 } 402 } 403 404 /** 405 * ice_alloc_mapped_page - recycle or make a new page 406 * @rx_ring: ring to use 407 * @bi: rx_buf struct to modify 408 * 409 * Returns true if the page was successfully allocated or 410 * reused. 411 */ 412 static bool 413 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) 414 { 415 struct page *page = bi->page; 416 dma_addr_t dma; 417 418 /* since we are recycling buffers we should seldom need to alloc */ 419 if (likely(page)) { 420 rx_ring->rx_stats.page_reuse_count++; 421 return true; 422 } 423 424 /* alloc new page for storage */ 425 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 426 if (unlikely(!page)) { 427 rx_ring->rx_stats.alloc_page_failed++; 428 return false; 429 } 430 431 /* map page for use */ 432 dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, 433 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 434 435 /* if mapping failed free memory back to system since 436 * there isn't much point in holding memory we can't use 437 */ 438 if (dma_mapping_error(rx_ring->dev, dma)) { 439 __free_pages(page, 0); 440 rx_ring->rx_stats.alloc_page_failed++; 441 return false; 442 } 443 444 bi->dma = dma; 445 bi->page = page; 446 bi->page_offset = 0; 447 page_ref_add(page, USHRT_MAX - 1); 448 bi->pagecnt_bias = USHRT_MAX; 449 450 return true; 451 } 452 453 /** 454 * ice_alloc_rx_bufs - Replace used receive buffers 455 * @rx_ring: ring to place buffers on 456 * @cleaned_count: number of buffers to replace 457 * 458 * Returns false if all allocations were successful, true if any fail. Returning 459 * true signals to the caller that we didn't replace cleaned_count buffers and 460 * there is more work to do. 461 * 462 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 463 * buffers. Then bump tail at most one time. Grouping like this lets us avoid 464 * multiple tail writes per call. 465 */ 466 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) 467 { 468 union ice_32b_rx_flex_desc *rx_desc; 469 u16 ntu = rx_ring->next_to_use; 470 struct ice_rx_buf *bi; 471 472 /* do nothing if no valid netdev defined */ 473 if (!rx_ring->netdev || !cleaned_count) 474 return false; 475 476 /* get the Rx descriptor and buffer based on next_to_use */ 477 rx_desc = ICE_RX_DESC(rx_ring, ntu); 478 bi = &rx_ring->rx_buf[ntu]; 479 480 do { 481 /* if we fail here, we have work remaining */ 482 if (!ice_alloc_mapped_page(rx_ring, bi)) 483 break; 484 485 /* sync the buffer for use by the device */ 486 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 487 bi->page_offset, 488 ICE_RXBUF_2048, 489 DMA_FROM_DEVICE); 490 491 /* Refresh the desc even if buffer_addrs didn't change 492 * because each write-back erases this info. 493 */ 494 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 495 496 rx_desc++; 497 bi++; 498 ntu++; 499 if (unlikely(ntu == rx_ring->count)) { 500 rx_desc = ICE_RX_DESC(rx_ring, 0); 501 bi = rx_ring->rx_buf; 502 ntu = 0; 503 } 504 505 /* clear the status bits for the next_to_use descriptor */ 506 rx_desc->wb.status_error0 = 0; 507 508 cleaned_count--; 509 } while (cleaned_count); 510 511 if (rx_ring->next_to_use != ntu) 512 ice_release_rx_desc(rx_ring, ntu); 513 514 return !!cleaned_count; 515 } 516 517 /** 518 * ice_page_is_reserved - check if reuse is possible 519 * @page: page struct to check 520 */ 521 static bool ice_page_is_reserved(struct page *page) 522 { 523 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 524 } 525 526 /** 527 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 528 * @rx_buf: Rx buffer to adjust 529 * @size: Size of adjustment 530 * 531 * Update the offset within page so that Rx buf will be ready to be reused. 532 * For systems with PAGE_SIZE < 8192 this function will flip the page offset 533 * so the second half of page assigned to Rx buffer will be used, otherwise 534 * the offset is moved by the @size bytes 535 */ 536 static void 537 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 538 { 539 #if (PAGE_SIZE < 8192) 540 /* flip page offset to other buffer */ 541 rx_buf->page_offset ^= size; 542 #else 543 /* move offset up to the next cache line */ 544 rx_buf->page_offset += size; 545 #endif 546 } 547 548 /** 549 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 550 * @rx_buf: buffer containing the page 551 * 552 * If page is reusable, we have a green light for calling ice_reuse_rx_page, 553 * which will assign the current buffer to the buffer that next_to_alloc is 554 * pointing to; otherwise, the DMA mapping needs to be destroyed and 555 * page freed 556 */ 557 static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) 558 { 559 #if (PAGE_SIZE >= 8192) 560 unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; 561 #endif 562 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 563 struct page *page = rx_buf->page; 564 565 /* avoid re-using remote pages */ 566 if (unlikely(ice_page_is_reserved(page))) 567 return false; 568 569 #if (PAGE_SIZE < 8192) 570 /* if we are only owner of page we can reuse it */ 571 if (unlikely((page_count(page) - pagecnt_bias) > 1)) 572 return false; 573 #else 574 if (rx_buf->page_offset > last_offset) 575 return false; 576 #endif /* PAGE_SIZE < 8192) */ 577 578 /* If we have drained the page fragment pool we need to update 579 * the pagecnt_bias and page count so that we fully restock the 580 * number of references the driver holds. 581 */ 582 if (unlikely(pagecnt_bias == 1)) { 583 page_ref_add(page, USHRT_MAX - 1); 584 rx_buf->pagecnt_bias = USHRT_MAX; 585 } 586 587 return true; 588 } 589 590 /** 591 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 592 * @rx_buf: buffer containing page to add 593 * @skb: sk_buff to place the data into 594 * @size: packet length from rx_desc 595 * 596 * This function will add the data contained in rx_buf->page to the skb. 597 * It will just attach the page as a frag to the skb. 598 * The function will then update the page offset. 599 */ 600 static void 601 ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, 602 unsigned int size) 603 { 604 #if (PAGE_SIZE >= 8192) 605 unsigned int truesize = SKB_DATA_ALIGN(size); 606 #else 607 unsigned int truesize = ICE_RXBUF_2048; 608 #endif 609 610 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 611 rx_buf->page_offset, size, truesize); 612 613 /* page is being used so we must update the page offset */ 614 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 615 } 616 617 /** 618 * ice_reuse_rx_page - page flip buffer and store it back on the ring 619 * @rx_ring: Rx descriptor ring to store buffers on 620 * @old_buf: donor buffer to have page reused 621 * 622 * Synchronizes page for reuse by the adapter 623 */ 624 static void 625 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) 626 { 627 u16 nta = rx_ring->next_to_alloc; 628 struct ice_rx_buf *new_buf; 629 630 new_buf = &rx_ring->rx_buf[nta]; 631 632 /* update, and store next to alloc */ 633 nta++; 634 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 635 636 /* Transfer page from old buffer to new buffer. 637 * Move each member individually to avoid possible store 638 * forwarding stalls and unnecessary copy of skb. 639 */ 640 new_buf->dma = old_buf->dma; 641 new_buf->page = old_buf->page; 642 new_buf->page_offset = old_buf->page_offset; 643 new_buf->pagecnt_bias = old_buf->pagecnt_bias; 644 } 645 646 /** 647 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 648 * @rx_ring: Rx descriptor ring to transact packets on 649 * @skb: skb to be used 650 * @size: size of buffer to add to skb 651 * 652 * This function will pull an Rx buffer from the ring and synchronize it 653 * for use by the CPU. 654 */ 655 static struct ice_rx_buf * 656 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, 657 const unsigned int size) 658 { 659 struct ice_rx_buf *rx_buf; 660 661 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 662 prefetchw(rx_buf->page); 663 *skb = rx_buf->skb; 664 665 /* we are reusing so sync this buffer for CPU use */ 666 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 667 rx_buf->page_offset, size, 668 DMA_FROM_DEVICE); 669 670 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 671 rx_buf->pagecnt_bias--; 672 673 return rx_buf; 674 } 675 676 /** 677 * ice_construct_skb - Allocate skb and populate it 678 * @rx_ring: Rx descriptor ring to transact packets on 679 * @rx_buf: Rx buffer to pull data from 680 * @size: the length of the packet 681 * 682 * This function allocates an skb. It then populates it with the page 683 * data from the current receive descriptor, taking care to set up the 684 * skb correctly. 685 */ 686 static struct sk_buff * 687 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 688 unsigned int size) 689 { 690 void *va = page_address(rx_buf->page) + rx_buf->page_offset; 691 unsigned int headlen; 692 struct sk_buff *skb; 693 694 /* prefetch first cache line of first page */ 695 prefetch(va); 696 #if L1_CACHE_BYTES < 128 697 prefetch((u8 *)va + L1_CACHE_BYTES); 698 #endif /* L1_CACHE_BYTES */ 699 700 /* allocate a skb to store the frags */ 701 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 702 GFP_ATOMIC | __GFP_NOWARN); 703 if (unlikely(!skb)) 704 return NULL; 705 706 skb_record_rx_queue(skb, rx_ring->q_index); 707 /* Determine available headroom for copy */ 708 headlen = size; 709 if (headlen > ICE_RX_HDR_SIZE) 710 headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE); 711 712 /* align pull length to size of long to optimize memcpy performance */ 713 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 714 715 /* if we exhaust the linear part then add what is left as a frag */ 716 size -= headlen; 717 if (size) { 718 #if (PAGE_SIZE >= 8192) 719 unsigned int truesize = SKB_DATA_ALIGN(size); 720 #else 721 unsigned int truesize = ICE_RXBUF_2048; 722 #endif 723 skb_add_rx_frag(skb, 0, rx_buf->page, 724 rx_buf->page_offset + headlen, size, truesize); 725 /* buffer is used by skb, update page_offset */ 726 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 727 } else { 728 /* buffer is unused, reset bias back to rx_buf; data was copied 729 * onto skb's linear part so there's no need for adjusting 730 * page offset and we can reuse this buffer as-is 731 */ 732 rx_buf->pagecnt_bias++; 733 } 734 735 return skb; 736 } 737 738 /** 739 * ice_put_rx_buf - Clean up used buffer and either recycle or free 740 * @rx_ring: Rx descriptor ring to transact packets on 741 * @rx_buf: Rx buffer to pull data from 742 * 743 * This function will clean up the contents of the rx_buf. It will 744 * either recycle the buffer or unmap it and free the associated resources. 745 */ 746 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) 747 { 748 /* hand second half of page back to the ring */ 749 if (ice_can_reuse_rx_page(rx_buf)) { 750 ice_reuse_rx_page(rx_ring, rx_buf); 751 rx_ring->rx_stats.page_reuse_count++; 752 } else { 753 /* we are not reusing the buffer so unmap it */ 754 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE, 755 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 756 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 757 } 758 759 /* clear contents of buffer_info */ 760 rx_buf->page = NULL; 761 rx_buf->skb = NULL; 762 } 763 764 /** 765 * ice_cleanup_headers - Correct empty headers 766 * @skb: pointer to current skb being fixed 767 * 768 * Also address the case where we are pulling data in on pages only 769 * and as such no data is present in the skb header. 770 * 771 * In addition if skb is not at least 60 bytes we need to pad it so that 772 * it is large enough to qualify as a valid Ethernet frame. 773 * 774 * Returns true if an error was encountered and skb was freed. 775 */ 776 static bool ice_cleanup_headers(struct sk_buff *skb) 777 { 778 /* if eth_skb_pad returns an error the skb was freed */ 779 if (eth_skb_pad(skb)) 780 return true; 781 782 return false; 783 } 784 785 /** 786 * ice_test_staterr - tests bits in Rx descriptor status and error fields 787 * @rx_desc: pointer to receive descriptor (in le64 format) 788 * @stat_err_bits: value to mask 789 * 790 * This function does some fast chicanery in order to return the 791 * value of the mask which is really only used for boolean tests. 792 * The status_error_len doesn't need to be shifted because it begins 793 * at offset zero. 794 */ 795 static bool 796 ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits) 797 { 798 return !!(rx_desc->wb.status_error0 & 799 cpu_to_le16(stat_err_bits)); 800 } 801 802 /** 803 * ice_is_non_eop - process handling of non-EOP buffers 804 * @rx_ring: Rx ring being processed 805 * @rx_desc: Rx descriptor for current buffer 806 * @skb: Current socket buffer containing buffer in progress 807 * 808 * This function updates next to clean. If the buffer is an EOP buffer 809 * this function exits returning false, otherwise it will place the 810 * sk_buff in the next buffer to be chained and return true indicating 811 * that this is in fact a non-EOP buffer. 812 */ 813 static bool 814 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 815 struct sk_buff *skb) 816 { 817 u32 ntc = rx_ring->next_to_clean + 1; 818 819 /* fetch, update, and store next to clean */ 820 ntc = (ntc < rx_ring->count) ? ntc : 0; 821 rx_ring->next_to_clean = ntc; 822 823 prefetch(ICE_RX_DESC(rx_ring, ntc)); 824 825 /* if we are the last buffer then there is nothing else to do */ 826 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 827 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 828 return false; 829 830 /* place skb in next buffer to be received */ 831 rx_ring->rx_buf[ntc].skb = skb; 832 rx_ring->rx_stats.non_eop_descs++; 833 834 return true; 835 } 836 837 /** 838 * ice_ptype_to_htype - get a hash type 839 * @ptype: the ptype value from the descriptor 840 * 841 * Returns a hash type to be used by skb_set_hash 842 */ 843 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype) 844 { 845 return PKT_HASH_TYPE_NONE; 846 } 847 848 /** 849 * ice_rx_hash - set the hash value in the skb 850 * @rx_ring: descriptor ring 851 * @rx_desc: specific descriptor 852 * @skb: pointer to current skb 853 * @rx_ptype: the ptype value from the descriptor 854 */ 855 static void 856 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 857 struct sk_buff *skb, u8 rx_ptype) 858 { 859 struct ice_32b_rx_flex_desc_nic *nic_mdid; 860 u32 hash; 861 862 if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) 863 return; 864 865 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) 866 return; 867 868 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; 869 hash = le32_to_cpu(nic_mdid->rss_hash); 870 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); 871 } 872 873 /** 874 * ice_rx_csum - Indicate in skb if checksum is good 875 * @vsi: the VSI we care about 876 * @skb: skb currently being received and modified 877 * @rx_desc: the receive descriptor 878 * @ptype: the packet type decoded by hardware 879 * 880 * skb->protocol must be set before this function is called 881 */ 882 static void 883 ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, 884 union ice_32b_rx_flex_desc *rx_desc, u8 ptype) 885 { 886 struct ice_rx_ptype_decoded decoded; 887 u32 rx_error, rx_status; 888 bool ipv4, ipv6; 889 890 rx_status = le16_to_cpu(rx_desc->wb.status_error0); 891 rx_error = rx_status; 892 893 decoded = ice_decode_rx_desc_ptype(ptype); 894 895 /* Start with CHECKSUM_NONE and by default csum_level = 0 */ 896 skb->ip_summed = CHECKSUM_NONE; 897 skb_checksum_none_assert(skb); 898 899 /* check if Rx checksum is enabled */ 900 if (!(vsi->netdev->features & NETIF_F_RXCSUM)) 901 return; 902 903 /* check if HW has decoded the packet and checksum */ 904 if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) 905 return; 906 907 if (!(decoded.known && decoded.outer_ip)) 908 return; 909 910 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 911 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); 912 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 913 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); 914 915 if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | 916 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) 917 goto checksum_fail; 918 else if (ipv6 && (rx_status & 919 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) 920 goto checksum_fail; 921 922 /* check for L4 errors and handle packets that were not able to be 923 * checksummed due to arrival speed 924 */ 925 if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) 926 goto checksum_fail; 927 928 /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 929 switch (decoded.inner_prot) { 930 case ICE_RX_PTYPE_INNER_PROT_TCP: 931 case ICE_RX_PTYPE_INNER_PROT_UDP: 932 case ICE_RX_PTYPE_INNER_PROT_SCTP: 933 skb->ip_summed = CHECKSUM_UNNECESSARY; 934 default: 935 break; 936 } 937 return; 938 939 checksum_fail: 940 vsi->back->hw_csum_rx_error++; 941 } 942 943 /** 944 * ice_process_skb_fields - Populate skb header fields from Rx descriptor 945 * @rx_ring: Rx descriptor ring packet is being transacted on 946 * @rx_desc: pointer to the EOP Rx descriptor 947 * @skb: pointer to current skb being populated 948 * @ptype: the packet type decoded by hardware 949 * 950 * This function checks the ring, descriptor, and packet information in 951 * order to populate the hash, checksum, VLAN, protocol, and 952 * other fields within the skb. 953 */ 954 static void 955 ice_process_skb_fields(struct ice_ring *rx_ring, 956 union ice_32b_rx_flex_desc *rx_desc, 957 struct sk_buff *skb, u8 ptype) 958 { 959 ice_rx_hash(rx_ring, rx_desc, skb, ptype); 960 961 /* modifies the skb - consumes the enet header */ 962 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 963 964 ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype); 965 } 966 967 /** 968 * ice_receive_skb - Send a completed packet up the stack 969 * @rx_ring: Rx ring in play 970 * @skb: packet to send up 971 * @vlan_tag: VLAN tag for packet 972 * 973 * This function sends the completed packet (via. skb) up the stack using 974 * gro receive functions (with/without VLAN tag) 975 */ 976 static void 977 ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) 978 { 979 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 980 (vlan_tag & VLAN_VID_MASK)) 981 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 982 napi_gro_receive(&rx_ring->q_vector->napi, skb); 983 } 984 985 /** 986 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 987 * @rx_ring: Rx descriptor ring to transact packets on 988 * @budget: Total limit on number of packets to process 989 * 990 * This function provides a "bounce buffer" approach to Rx interrupt 991 * processing. The advantage to this is that on systems that have 992 * expensive overhead for IOMMU access this provides a means of avoiding 993 * it by maintaining the mapping of the page to the system. 994 * 995 * Returns amount of work completed 996 */ 997 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 998 { 999 unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 1000 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 1001 bool failure; 1002 1003 /* start the loop to process Rx packets bounded by 'budget' */ 1004 while (likely(total_rx_pkts < (unsigned int)budget)) { 1005 union ice_32b_rx_flex_desc *rx_desc; 1006 struct ice_rx_buf *rx_buf; 1007 struct sk_buff *skb; 1008 unsigned int size; 1009 u16 stat_err_bits; 1010 u16 vlan_tag = 0; 1011 u8 rx_ptype; 1012 1013 /* get the Rx desc from Rx ring based on 'next_to_clean' */ 1014 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 1015 1016 /* status_error_len will always be zero for unused descriptors 1017 * because it's cleared in cleanup, and overlaps with hdr_addr 1018 * which is always zero because packet split isn't used, if the 1019 * hardware wrote DD then it will be non-zero 1020 */ 1021 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 1022 if (!ice_test_staterr(rx_desc, stat_err_bits)) 1023 break; 1024 1025 /* This memory barrier is needed to keep us from reading 1026 * any other fields out of the rx_desc until we know the 1027 * DD bit is set. 1028 */ 1029 dma_rmb(); 1030 1031 size = le16_to_cpu(rx_desc->wb.pkt_len) & 1032 ICE_RX_FLX_DESC_PKT_LEN_M; 1033 1034 rx_buf = ice_get_rx_buf(rx_ring, &skb, size); 1035 /* allocate (if needed) and populate skb */ 1036 if (skb) 1037 ice_add_rx_frag(rx_buf, skb, size); 1038 else 1039 skb = ice_construct_skb(rx_ring, rx_buf, size); 1040 1041 /* exit if we failed to retrieve a buffer */ 1042 if (!skb) { 1043 rx_ring->rx_stats.alloc_buf_failed++; 1044 rx_buf->pagecnt_bias++; 1045 break; 1046 } 1047 1048 ice_put_rx_buf(rx_ring, rx_buf); 1049 cleaned_count++; 1050 1051 /* skip if it is NOP desc */ 1052 if (ice_is_non_eop(rx_ring, rx_desc, skb)) 1053 continue; 1054 1055 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 1056 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 1057 dev_kfree_skb_any(skb); 1058 continue; 1059 } 1060 1061 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 1062 ICE_RX_FLEX_DESC_PTYPE_M; 1063 1064 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 1065 if (ice_test_staterr(rx_desc, stat_err_bits)) 1066 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 1067 1068 /* correct empty headers and pad skb if needed (to make valid 1069 * ethernet frame 1070 */ 1071 if (ice_cleanup_headers(skb)) { 1072 skb = NULL; 1073 continue; 1074 } 1075 1076 /* probably a little skewed due to removing CRC */ 1077 total_rx_bytes += skb->len; 1078 1079 /* populate checksum, VLAN, and protocol */ 1080 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1081 1082 /* send completed skb up the stack */ 1083 ice_receive_skb(rx_ring, skb, vlan_tag); 1084 1085 /* update budget accounting */ 1086 total_rx_pkts++; 1087 } 1088 1089 /* return up to cleaned_count buffers to hardware */ 1090 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 1091 1092 /* update queue and vector specific stats */ 1093 u64_stats_update_begin(&rx_ring->syncp); 1094 rx_ring->stats.pkts += total_rx_pkts; 1095 rx_ring->stats.bytes += total_rx_bytes; 1096 u64_stats_update_end(&rx_ring->syncp); 1097 rx_ring->q_vector->rx.total_pkts += total_rx_pkts; 1098 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 1099 1100 /* guarantee a trip back through this routine if there was a failure */ 1101 return failure ? budget : (int)total_rx_pkts; 1102 } 1103 1104 /** 1105 * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic 1106 * @port_info: port_info structure containing the current link speed 1107 * @avg_pkt_size: average size of Tx or Rx packets based on clean routine 1108 * @itr: ITR value to update 1109 * 1110 * Calculate how big of an increment should be applied to the ITR value passed 1111 * in based on wmem_default, SKB overhead, Ethernet overhead, and the current 1112 * link speed. 1113 * 1114 * The following is a calculation derived from: 1115 * wmem_default / (size + overhead) = desired_pkts_per_int 1116 * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate 1117 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1118 * 1119 * Assuming wmem_default is 212992 and overhead is 640 bytes per 1120 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1121 * formula down to: 1122 * 1123 * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 1124 * ITR = -------------------------------------------- * -------------- 1125 * rate pkt_size + 640 1126 */ 1127 static unsigned int 1128 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, 1129 unsigned int avg_pkt_size, 1130 unsigned int itr) 1131 { 1132 switch (port_info->phy.link_info.link_speed) { 1133 case ICE_AQ_LINK_SPEED_100GB: 1134 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), 1135 avg_pkt_size + 640); 1136 break; 1137 case ICE_AQ_LINK_SPEED_50GB: 1138 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), 1139 avg_pkt_size + 640); 1140 break; 1141 case ICE_AQ_LINK_SPEED_40GB: 1142 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), 1143 avg_pkt_size + 640); 1144 break; 1145 case ICE_AQ_LINK_SPEED_25GB: 1146 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), 1147 avg_pkt_size + 640); 1148 break; 1149 case ICE_AQ_LINK_SPEED_20GB: 1150 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), 1151 avg_pkt_size + 640); 1152 break; 1153 case ICE_AQ_LINK_SPEED_10GB: 1154 /* fall through */ 1155 default: 1156 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), 1157 avg_pkt_size + 640); 1158 break; 1159 } 1160 1161 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1162 itr &= ICE_ITR_ADAPTIVE_LATENCY; 1163 itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1164 } 1165 1166 return itr; 1167 } 1168 1169 /** 1170 * ice_update_itr - update the adaptive ITR value based on statistics 1171 * @q_vector: structure containing interrupt and ring information 1172 * @rc: structure containing ring performance data 1173 * 1174 * Stores a new ITR value based on packets and byte 1175 * counts during the last interrupt. The advantage of per interrupt 1176 * computation is faster updates and more accurate ITR for the current 1177 * traffic pattern. Constants in this function were computed 1178 * based on theoretical maximum wire speed and thresholds were set based 1179 * on testing data as well as attempting to minimize response time 1180 * while increasing bulk throughput. 1181 */ 1182 static void 1183 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) 1184 { 1185 unsigned long next_update = jiffies; 1186 unsigned int packets, bytes, itr; 1187 bool container_is_rx; 1188 1189 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) 1190 return; 1191 1192 /* If itr_countdown is set it means we programmed an ITR within 1193 * the last 4 interrupt cycles. This has a side effect of us 1194 * potentially firing an early interrupt. In order to work around 1195 * this we need to throw out any data received for a few 1196 * interrupts following the update. 1197 */ 1198 if (q_vector->itr_countdown) { 1199 itr = rc->target_itr; 1200 goto clear_counts; 1201 } 1202 1203 container_is_rx = (&q_vector->rx == rc); 1204 /* For Rx we want to push the delay up and default to low latency. 1205 * for Tx we want to pull the delay down and default to high latency. 1206 */ 1207 itr = container_is_rx ? 1208 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : 1209 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; 1210 1211 /* If we didn't update within up to 1 - 2 jiffies we can assume 1212 * that either packets are coming in so slow there hasn't been 1213 * any work, or that there is so much work that NAPI is dealing 1214 * with interrupt moderation and we don't need to do anything. 1215 */ 1216 if (time_after(next_update, rc->next_update)) 1217 goto clear_counts; 1218 1219 packets = rc->total_pkts; 1220 bytes = rc->total_bytes; 1221 1222 if (container_is_rx) { 1223 /* If Rx there are 1 to 4 packets and bytes are less than 1224 * 9000 assume insufficient data to use bulk rate limiting 1225 * approach unless Tx is already in bulk rate limiting. We 1226 * are likely latency driven. 1227 */ 1228 if (packets && packets < 4 && bytes < 9000 && 1229 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { 1230 itr = ICE_ITR_ADAPTIVE_LATENCY; 1231 goto adjust_by_size_and_speed; 1232 } 1233 } else if (packets < 4) { 1234 /* If we have Tx and Rx ITR maxed and Tx ITR is running in 1235 * bulk mode and we are receiving 4 or fewer packets just 1236 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 1237 * that the Rx can relax. 1238 */ 1239 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && 1240 (q_vector->rx.target_itr & ICE_ITR_MASK) == 1241 ICE_ITR_ADAPTIVE_MAX_USECS) 1242 goto clear_counts; 1243 } else if (packets > 32) { 1244 /* If we have processed over 32 packets in a single interrupt 1245 * for Tx assume we need to switch over to "bulk" mode. 1246 */ 1247 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; 1248 } 1249 1250 /* We have no packets to actually measure against. This means 1251 * either one of the other queues on this vector is active or 1252 * we are a Tx queue doing TSO with too high of an interrupt rate. 1253 * 1254 * Between 4 and 56 we can assume that our current interrupt delay 1255 * is only slightly too low. As such we should increase it by a small 1256 * fixed amount. 1257 */ 1258 if (packets < 56) { 1259 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; 1260 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1261 itr &= ICE_ITR_ADAPTIVE_LATENCY; 1262 itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1263 } 1264 goto clear_counts; 1265 } 1266 1267 if (packets <= 256) { 1268 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 1269 itr &= ICE_ITR_MASK; 1270 1271 /* Between 56 and 112 is our "goldilocks" zone where we are 1272 * working out "just right". Just report that our current 1273 * ITR is good for us. 1274 */ 1275 if (packets <= 112) 1276 goto clear_counts; 1277 1278 /* If packet count is 128 or greater we are likely looking 1279 * at a slight overrun of the delay we want. Try halving 1280 * our delay to see if that will cut the number of packets 1281 * in half per interrupt. 1282 */ 1283 itr >>= 1; 1284 itr &= ICE_ITR_MASK; 1285 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) 1286 itr = ICE_ITR_ADAPTIVE_MIN_USECS; 1287 1288 goto clear_counts; 1289 } 1290 1291 /* The paths below assume we are dealing with a bulk ITR since 1292 * number of packets is greater than 256. We are just going to have 1293 * to compute a value and try to bring the count under control, 1294 * though for smaller packet sizes there isn't much we can do as 1295 * NAPI polling will likely be kicking in sooner rather than later. 1296 */ 1297 itr = ICE_ITR_ADAPTIVE_BULK; 1298 1299 adjust_by_size_and_speed: 1300 1301 /* based on checks above packets cannot be 0 so division is safe */ 1302 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, 1303 bytes / packets, itr); 1304 1305 clear_counts: 1306 /* write back value */ 1307 rc->target_itr = itr; 1308 1309 /* next update should occur within next jiffy */ 1310 rc->next_update = next_update + 1; 1311 1312 rc->total_bytes = 0; 1313 rc->total_pkts = 0; 1314 } 1315 1316 /** 1317 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 1318 * @itr_idx: interrupt throttling index 1319 * @itr: interrupt throttling value in usecs 1320 */ 1321 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 1322 { 1323 /* The ITR value is reported in microseconds, and the register value is 1324 * recorded in 2 microsecond units. For this reason we only need to 1325 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 1326 * granularity as a shift instead of division. The mask makes sure the 1327 * ITR value is never odd so we don't accidentally write into the field 1328 * prior to the ITR field. 1329 */ 1330 itr &= ICE_ITR_MASK; 1331 1332 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 1333 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 1334 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 1335 } 1336 1337 /* The act of updating the ITR will cause it to immediately trigger. In order 1338 * to prevent this from throwing off adaptive update statistics we defer the 1339 * update so that it can only happen so often. So after either Tx or Rx are 1340 * updated we make the adaptive scheme wait until either the ITR completely 1341 * expires via the next_update expiration or we have been through at least 1342 * 3 interrupts. 1343 */ 1344 #define ITR_COUNTDOWN_START 3 1345 1346 /** 1347 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt 1348 * @vsi: the VSI associated with the q_vector 1349 * @q_vector: q_vector for which ITR is being updated and interrupt enabled 1350 */ 1351 static void 1352 ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) 1353 { 1354 struct ice_ring_container *tx = &q_vector->tx; 1355 struct ice_ring_container *rx = &q_vector->rx; 1356 u32 itr_val; 1357 1358 /* This will do nothing if dynamic updates are not enabled */ 1359 ice_update_itr(q_vector, tx); 1360 ice_update_itr(q_vector, rx); 1361 1362 /* This block of logic allows us to get away with only updating 1363 * one ITR value with each interrupt. The idea is to perform a 1364 * pseudo-lazy update with the following criteria. 1365 * 1366 * 1. Rx is given higher priority than Tx if both are in same state 1367 * 2. If we must reduce an ITR that is given highest priority. 1368 * 3. We then give priority to increasing ITR based on amount. 1369 */ 1370 if (rx->target_itr < rx->current_itr) { 1371 /* Rx ITR needs to be reduced, this is highest priority */ 1372 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 1373 rx->current_itr = rx->target_itr; 1374 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1375 } else if ((tx->target_itr < tx->current_itr) || 1376 ((rx->target_itr - rx->current_itr) < 1377 (tx->target_itr - tx->current_itr))) { 1378 /* Tx ITR needs to be reduced, this is second priority 1379 * Tx ITR needs to be increased more than Rx, fourth priority 1380 */ 1381 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); 1382 tx->current_itr = tx->target_itr; 1383 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1384 } else if (rx->current_itr != rx->target_itr) { 1385 /* Rx ITR needs to be increased, third priority */ 1386 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 1387 rx->current_itr = rx->target_itr; 1388 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1389 } else { 1390 /* Still have to re-enable the interrupts */ 1391 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 1392 if (q_vector->itr_countdown) 1393 q_vector->itr_countdown--; 1394 } 1395 1396 if (!test_bit(__ICE_DOWN, vsi->state)) 1397 wr32(&vsi->back->hw, 1398 GLINT_DYN_CTL(q_vector->reg_idx), 1399 itr_val); 1400 } 1401 1402 /** 1403 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 1404 * @napi: napi struct with our devices info in it 1405 * @budget: amount of work driver is allowed to do this pass, in packets 1406 * 1407 * This function will clean all queues associated with a q_vector. 1408 * 1409 * Returns the amount of work done 1410 */ 1411 int ice_napi_poll(struct napi_struct *napi, int budget) 1412 { 1413 struct ice_q_vector *q_vector = 1414 container_of(napi, struct ice_q_vector, napi); 1415 struct ice_vsi *vsi = q_vector->vsi; 1416 bool clean_complete = true; 1417 int budget_per_ring = 0; 1418 struct ice_ring *ring; 1419 int work_done = 0; 1420 1421 /* Since the actual Tx work is minimal, we can give the Tx a larger 1422 * budget and be more aggressive about cleaning up the Tx descriptors. 1423 */ 1424 ice_for_each_ring(ring, q_vector->tx) 1425 if (!ice_clean_tx_irq(vsi, ring, budget)) 1426 clean_complete = false; 1427 1428 /* Handle case where we are called by netpoll with a budget of 0 */ 1429 if (budget <= 0) 1430 return budget; 1431 1432 /* We attempt to distribute budget to each Rx queue fairly, but don't 1433 * allow the budget to go below 1 because that would exit polling early. 1434 */ 1435 if (q_vector->num_ring_rx) 1436 budget_per_ring = max(budget / q_vector->num_ring_rx, 1); 1437 1438 ice_for_each_ring(ring, q_vector->rx) { 1439 int cleaned; 1440 1441 cleaned = ice_clean_rx_irq(ring, budget_per_ring); 1442 work_done += cleaned; 1443 /* if we clean as many as budgeted, we must not be done */ 1444 if (cleaned >= budget_per_ring) 1445 clean_complete = false; 1446 } 1447 1448 /* If work not completed, return budget and polling will return */ 1449 if (!clean_complete) 1450 return budget; 1451 1452 /* Exit the polling mode, but don't re-enable interrupts if stack might 1453 * poll us due to busy-polling 1454 */ 1455 if (likely(napi_complete_done(napi, work_done))) 1456 ice_update_ena_itr(vsi, q_vector); 1457 1458 return min_t(int, work_done, budget - 1); 1459 } 1460 1461 /* helper function for building cmd/type/offset */ 1462 static __le64 1463 build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) 1464 { 1465 return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | 1466 (td_cmd << ICE_TXD_QW1_CMD_S) | 1467 (td_offset << ICE_TXD_QW1_OFFSET_S) | 1468 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | 1469 (td_tag << ICE_TXD_QW1_L2TAG1_S)); 1470 } 1471 1472 /** 1473 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 1474 * @tx_ring: the ring to be checked 1475 * @size: the size buffer we want to assure is available 1476 * 1477 * Returns -EBUSY if a stop is needed, else 0 1478 */ 1479 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 1480 { 1481 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 1482 /* Memory barrier before checking head and tail */ 1483 smp_mb(); 1484 1485 /* Check again in a case another CPU has just made room available. */ 1486 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 1487 return -EBUSY; 1488 1489 /* A reprieve! - use start_subqueue because it doesn't call schedule */ 1490 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 1491 ++tx_ring->tx_stats.restart_q; 1492 return 0; 1493 } 1494 1495 /** 1496 * ice_maybe_stop_tx - 1st level check for Tx stop conditions 1497 * @tx_ring: the ring to be checked 1498 * @size: the size buffer we want to assure is available 1499 * 1500 * Returns 0 if stop is not needed 1501 */ 1502 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 1503 { 1504 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 1505 return 0; 1506 1507 return __ice_maybe_stop_tx(tx_ring, size); 1508 } 1509 1510 /** 1511 * ice_tx_map - Build the Tx descriptor 1512 * @tx_ring: ring to send buffer on 1513 * @first: first buffer info buffer to use 1514 * @off: pointer to struct that holds offload parameters 1515 * 1516 * This function loops over the skb data pointed to by *first 1517 * and gets a physical address for each memory location and programs 1518 * it and the length into the transmit descriptor. 1519 */ 1520 static void 1521 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, 1522 struct ice_tx_offload_params *off) 1523 { 1524 u64 td_offset, td_tag, td_cmd; 1525 u16 i = tx_ring->next_to_use; 1526 skb_frag_t *frag; 1527 unsigned int data_len, size; 1528 struct ice_tx_desc *tx_desc; 1529 struct ice_tx_buf *tx_buf; 1530 struct sk_buff *skb; 1531 dma_addr_t dma; 1532 1533 td_tag = off->td_l2tag1; 1534 td_cmd = off->td_cmd; 1535 td_offset = off->td_offset; 1536 skb = first->skb; 1537 1538 data_len = skb->data_len; 1539 size = skb_headlen(skb); 1540 1541 tx_desc = ICE_TX_DESC(tx_ring, i); 1542 1543 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1544 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1545 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1546 ICE_TX_FLAGS_VLAN_S; 1547 } 1548 1549 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1550 1551 tx_buf = first; 1552 1553 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1554 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1555 1556 if (dma_mapping_error(tx_ring->dev, dma)) 1557 goto dma_error; 1558 1559 /* record length, and DMA address */ 1560 dma_unmap_len_set(tx_buf, len, size); 1561 dma_unmap_addr_set(tx_buf, dma, dma); 1562 1563 /* align size to end of page */ 1564 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 1565 tx_desc->buf_addr = cpu_to_le64(dma); 1566 1567 /* account for data chunks larger than the hardware 1568 * can handle 1569 */ 1570 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 1571 tx_desc->cmd_type_offset_bsz = 1572 build_ctob(td_cmd, td_offset, max_data, td_tag); 1573 1574 tx_desc++; 1575 i++; 1576 1577 if (i == tx_ring->count) { 1578 tx_desc = ICE_TX_DESC(tx_ring, 0); 1579 i = 0; 1580 } 1581 1582 dma += max_data; 1583 size -= max_data; 1584 1585 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1586 tx_desc->buf_addr = cpu_to_le64(dma); 1587 } 1588 1589 if (likely(!data_len)) 1590 break; 1591 1592 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 1593 size, td_tag); 1594 1595 tx_desc++; 1596 i++; 1597 1598 if (i == tx_ring->count) { 1599 tx_desc = ICE_TX_DESC(tx_ring, 0); 1600 i = 0; 1601 } 1602 1603 size = skb_frag_size(frag); 1604 data_len -= size; 1605 1606 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1607 DMA_TO_DEVICE); 1608 1609 tx_buf = &tx_ring->tx_buf[i]; 1610 } 1611 1612 /* record bytecount for BQL */ 1613 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1614 1615 /* record SW timestamp if HW timestamp is not available */ 1616 skb_tx_timestamp(first->skb); 1617 1618 i++; 1619 if (i == tx_ring->count) 1620 i = 0; 1621 1622 /* write last descriptor with RS and EOP bits */ 1623 td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS); 1624 tx_desc->cmd_type_offset_bsz = 1625 build_ctob(td_cmd, td_offset, size, td_tag); 1626 1627 /* Force memory writes to complete before letting h/w know there 1628 * are new descriptors to fetch. 1629 * 1630 * We also use this memory barrier to make certain all of the 1631 * status bits have been updated before next_to_watch is written. 1632 */ 1633 wmb(); 1634 1635 /* set next_to_watch value indicating a packet is present */ 1636 first->next_to_watch = tx_desc; 1637 1638 tx_ring->next_to_use = i; 1639 1640 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 1641 1642 /* notify HW of packet */ 1643 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 1644 writel(i, tx_ring->tail); 1645 } 1646 1647 return; 1648 1649 dma_error: 1650 /* clear DMA mappings for failed tx_buf map */ 1651 for (;;) { 1652 tx_buf = &tx_ring->tx_buf[i]; 1653 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 1654 if (tx_buf == first) 1655 break; 1656 if (i == 0) 1657 i = tx_ring->count; 1658 i--; 1659 } 1660 1661 tx_ring->next_to_use = i; 1662 } 1663 1664 /** 1665 * ice_tx_csum - Enable Tx checksum offloads 1666 * @first: pointer to the first descriptor 1667 * @off: pointer to struct that holds offload parameters 1668 * 1669 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1670 */ 1671 static 1672 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1673 { 1674 u32 l4_len = 0, l3_len = 0, l2_len = 0; 1675 struct sk_buff *skb = first->skb; 1676 union { 1677 struct iphdr *v4; 1678 struct ipv6hdr *v6; 1679 unsigned char *hdr; 1680 } ip; 1681 union { 1682 struct tcphdr *tcp; 1683 unsigned char *hdr; 1684 } l4; 1685 __be16 frag_off, protocol; 1686 unsigned char *exthdr; 1687 u32 offset, cmd = 0; 1688 u8 l4_proto = 0; 1689 1690 if (skb->ip_summed != CHECKSUM_PARTIAL) 1691 return 0; 1692 1693 ip.hdr = skb_network_header(skb); 1694 l4.hdr = skb_transport_header(skb); 1695 1696 /* compute outer L2 header size */ 1697 l2_len = ip.hdr - skb->data; 1698 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1699 1700 if (skb->encapsulation) 1701 return -1; 1702 1703 /* Enable IP checksum offloads */ 1704 protocol = vlan_get_protocol(skb); 1705 if (protocol == htons(ETH_P_IP)) { 1706 l4_proto = ip.v4->protocol; 1707 /* the stack computes the IP header already, the only time we 1708 * need the hardware to recompute it is in the case of TSO. 1709 */ 1710 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1711 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1712 else 1713 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1714 1715 } else if (protocol == htons(ETH_P_IPV6)) { 1716 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1717 exthdr = ip.hdr + sizeof(*ip.v6); 1718 l4_proto = ip.v6->nexthdr; 1719 if (l4.hdr != exthdr) 1720 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1721 &frag_off); 1722 } else { 1723 return -1; 1724 } 1725 1726 /* compute inner L3 header size */ 1727 l3_len = l4.hdr - ip.hdr; 1728 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1729 1730 /* Enable L4 checksum offloads */ 1731 switch (l4_proto) { 1732 case IPPROTO_TCP: 1733 /* enable checksum offloads */ 1734 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1735 l4_len = l4.tcp->doff; 1736 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1737 break; 1738 case IPPROTO_UDP: 1739 /* enable UDP checksum offload */ 1740 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1741 l4_len = (sizeof(struct udphdr) >> 2); 1742 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1743 break; 1744 case IPPROTO_SCTP: 1745 /* enable SCTP checksum offload */ 1746 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1747 l4_len = sizeof(struct sctphdr) >> 2; 1748 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1749 break; 1750 1751 default: 1752 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1753 return -1; 1754 skb_checksum_help(skb); 1755 return 0; 1756 } 1757 1758 off->td_cmd |= cmd; 1759 off->td_offset |= offset; 1760 return 1; 1761 } 1762 1763 /** 1764 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1765 * @tx_ring: ring to send buffer on 1766 * @first: pointer to struct ice_tx_buf 1767 * 1768 * Checks the skb and set up correspondingly several generic transmit flags 1769 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1770 * 1771 * Returns error code indicate the frame should be dropped upon error and the 1772 * otherwise returns 0 to indicate the flags has been set properly. 1773 */ 1774 static int 1775 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) 1776 { 1777 struct sk_buff *skb = first->skb; 1778 __be16 protocol = skb->protocol; 1779 1780 if (protocol == htons(ETH_P_8021Q) && 1781 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 1782 /* when HW VLAN acceleration is turned off by the user the 1783 * stack sets the protocol to 8021q so that the driver 1784 * can take any steps required to support the SW only 1785 * VLAN handling. In our case the driver doesn't need 1786 * to take any further steps so just set the protocol 1787 * to the encapsulated ethertype. 1788 */ 1789 skb->protocol = vlan_get_protocol(skb); 1790 return 0; 1791 } 1792 1793 /* if we have a HW VLAN tag being added, default to the HW one */ 1794 if (skb_vlan_tag_present(skb)) { 1795 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1796 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1797 } else if (protocol == htons(ETH_P_8021Q)) { 1798 struct vlan_hdr *vhdr, _vhdr; 1799 1800 /* for SW VLAN, check the next protocol and store the tag */ 1801 vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN, 1802 sizeof(_vhdr), 1803 &_vhdr); 1804 if (!vhdr) 1805 return -EINVAL; 1806 1807 first->tx_flags |= ntohs(vhdr->h_vlan_TCI) << 1808 ICE_TX_FLAGS_VLAN_S; 1809 first->tx_flags |= ICE_TX_FLAGS_SW_VLAN; 1810 } 1811 1812 return ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 1813 } 1814 1815 /** 1816 * ice_tso - computes mss and TSO length to prepare for TSO 1817 * @first: pointer to struct ice_tx_buf 1818 * @off: pointer to struct that holds offload parameters 1819 * 1820 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1821 */ 1822 static 1823 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1824 { 1825 struct sk_buff *skb = first->skb; 1826 union { 1827 struct iphdr *v4; 1828 struct ipv6hdr *v6; 1829 unsigned char *hdr; 1830 } ip; 1831 union { 1832 struct tcphdr *tcp; 1833 unsigned char *hdr; 1834 } l4; 1835 u64 cd_mss, cd_tso_len; 1836 u32 paylen, l4_start; 1837 int err; 1838 1839 if (skb->ip_summed != CHECKSUM_PARTIAL) 1840 return 0; 1841 1842 if (!skb_is_gso(skb)) 1843 return 0; 1844 1845 err = skb_cow_head(skb, 0); 1846 if (err < 0) 1847 return err; 1848 1849 /* cppcheck-suppress unreadVariable */ 1850 ip.hdr = skb_network_header(skb); 1851 l4.hdr = skb_transport_header(skb); 1852 1853 /* initialize outer IP header fields */ 1854 if (ip.v4->version == 4) { 1855 ip.v4->tot_len = 0; 1856 ip.v4->check = 0; 1857 } else { 1858 ip.v6->payload_len = 0; 1859 } 1860 1861 /* determine offset of transport header */ 1862 l4_start = l4.hdr - skb->data; 1863 1864 /* remove payload length from checksum */ 1865 paylen = skb->len - l4_start; 1866 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); 1867 1868 /* compute length of segmentation header */ 1869 off->header_len = (l4.tcp->doff * 4) + l4_start; 1870 1871 /* update gso_segs and bytecount */ 1872 first->gso_segs = skb_shinfo(skb)->gso_segs; 1873 first->bytecount += (first->gso_segs - 1) * off->header_len; 1874 1875 cd_tso_len = skb->len - off->header_len; 1876 cd_mss = skb_shinfo(skb)->gso_size; 1877 1878 /* record cdesc_qw1 with TSO parameters */ 1879 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 1880 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 1881 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 1882 (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 1883 first->tx_flags |= ICE_TX_FLAGS_TSO; 1884 return 1; 1885 } 1886 1887 /** 1888 * ice_txd_use_count - estimate the number of descriptors needed for Tx 1889 * @size: transmit request size in bytes 1890 * 1891 * Due to hardware alignment restrictions (4K alignment), we need to 1892 * assume that we can have no more than 12K of data per descriptor, even 1893 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 1894 * Thus, we need to divide by 12K. But division is slow! Instead, 1895 * we decompose the operation into shifts and one relatively cheap 1896 * multiply operation. 1897 * 1898 * To divide by 12K, we first divide by 4K, then divide by 3: 1899 * To divide by 4K, shift right by 12 bits 1900 * To divide by 3, multiply by 85, then divide by 256 1901 * (Divide by 256 is done by shifting right by 8 bits) 1902 * Finally, we add one to round up. Because 256 isn't an exact multiple of 1903 * 3, we'll underestimate near each multiple of 12K. This is actually more 1904 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 1905 * segment. For our purposes this is accurate out to 1M which is orders of 1906 * magnitude greater than our largest possible GSO size. 1907 * 1908 * This would then be implemented as: 1909 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 1910 * 1911 * Since multiplication and division are commutative, we can reorder 1912 * operations into: 1913 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 1914 */ 1915 static unsigned int ice_txd_use_count(unsigned int size) 1916 { 1917 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 1918 } 1919 1920 /** 1921 * ice_xmit_desc_count - calculate number of Tx descriptors needed 1922 * @skb: send buffer 1923 * 1924 * Returns number of data descriptors needed for this skb. 1925 */ 1926 static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 1927 { 1928 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 1929 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1930 unsigned int count = 0, size = skb_headlen(skb); 1931 1932 for (;;) { 1933 count += ice_txd_use_count(size); 1934 1935 if (!nr_frags--) 1936 break; 1937 1938 size = skb_frag_size(frag++); 1939 } 1940 1941 return count; 1942 } 1943 1944 /** 1945 * __ice_chk_linearize - Check if there are more than 8 buffers per packet 1946 * @skb: send buffer 1947 * 1948 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 1949 * and so we need to figure out the cases where we need to linearize the skb. 1950 * 1951 * For TSO we need to count the TSO header and segment payload separately. 1952 * As such we need to check cases where we have 7 fragments or more as we 1953 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 1954 * the segment payload in the first descriptor, and another 7 for the 1955 * fragments. 1956 */ 1957 static bool __ice_chk_linearize(struct sk_buff *skb) 1958 { 1959 const skb_frag_t *frag, *stale; 1960 int nr_frags, sum; 1961 1962 /* no need to check if number of frags is less than 7 */ 1963 nr_frags = skb_shinfo(skb)->nr_frags; 1964 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 1965 return false; 1966 1967 /* We need to walk through the list and validate that each group 1968 * of 6 fragments totals at least gso_size. 1969 */ 1970 nr_frags -= ICE_MAX_BUF_TXD - 2; 1971 frag = &skb_shinfo(skb)->frags[0]; 1972 1973 /* Initialize size to the negative value of gso_size minus 1. We 1974 * use this as the worst case scenerio in which the frag ahead 1975 * of us only provides one byte which is why we are limited to 6 1976 * descriptors for a single transmit as the header and previous 1977 * fragment are already consuming 2 descriptors. 1978 */ 1979 sum = 1 - skb_shinfo(skb)->gso_size; 1980 1981 /* Add size of frags 0 through 4 to create our initial sum */ 1982 sum += skb_frag_size(frag++); 1983 sum += skb_frag_size(frag++); 1984 sum += skb_frag_size(frag++); 1985 sum += skb_frag_size(frag++); 1986 sum += skb_frag_size(frag++); 1987 1988 /* Walk through fragments adding latest fragment, testing it, and 1989 * then removing stale fragments from the sum. 1990 */ 1991 stale = &skb_shinfo(skb)->frags[0]; 1992 for (;;) { 1993 sum += skb_frag_size(frag++); 1994 1995 /* if sum is negative we failed to make sufficient progress */ 1996 if (sum < 0) 1997 return true; 1998 1999 if (!nr_frags--) 2000 break; 2001 2002 sum -= skb_frag_size(stale++); 2003 } 2004 2005 return false; 2006 } 2007 2008 /** 2009 * ice_chk_linearize - Check if there are more than 8 fragments per packet 2010 * @skb: send buffer 2011 * @count: number of buffers used 2012 * 2013 * Note: Our HW can't scatter-gather more than 8 fragments to build 2014 * a packet on the wire and so we need to figure out the cases where we 2015 * need to linearize the skb. 2016 */ 2017 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 2018 { 2019 /* Both TSO and single send will work if count is less than 8 */ 2020 if (likely(count < ICE_MAX_BUF_TXD)) 2021 return false; 2022 2023 if (skb_is_gso(skb)) 2024 return __ice_chk_linearize(skb); 2025 2026 /* we can support up to 8 data buffers for a single send */ 2027 return count != ICE_MAX_BUF_TXD; 2028 } 2029 2030 /** 2031 * ice_xmit_frame_ring - Sends buffer on Tx ring 2032 * @skb: send buffer 2033 * @tx_ring: ring to send buffer on 2034 * 2035 * Returns NETDEV_TX_OK if sent, else an error code 2036 */ 2037 static netdev_tx_t 2038 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 2039 { 2040 struct ice_tx_offload_params offload = { 0 }; 2041 struct ice_tx_buf *first; 2042 unsigned int count; 2043 int tso, csum; 2044 2045 count = ice_xmit_desc_count(skb); 2046 if (ice_chk_linearize(skb, count)) { 2047 if (__skb_linearize(skb)) 2048 goto out_drop; 2049 count = ice_txd_use_count(skb->len); 2050 tx_ring->tx_stats.tx_linearize++; 2051 } 2052 2053 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 2054 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 2055 * + 4 desc gap to avoid the cache line where head is, 2056 * + 1 desc for context descriptor, 2057 * otherwise try next time 2058 */ 2059 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2060 ICE_DESCS_FOR_CTX_DESC)) { 2061 tx_ring->tx_stats.tx_busy++; 2062 return NETDEV_TX_BUSY; 2063 } 2064 2065 offload.tx_ring = tx_ring; 2066 2067 /* record the location of the first descriptor for this packet */ 2068 first = &tx_ring->tx_buf[tx_ring->next_to_use]; 2069 first->skb = skb; 2070 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 2071 first->gso_segs = 1; 2072 first->tx_flags = 0; 2073 2074 /* prepare the VLAN tagging flags for Tx */ 2075 if (ice_tx_prepare_vlan_flags(tx_ring, first)) 2076 goto out_drop; 2077 2078 /* set up TSO offload */ 2079 tso = ice_tso(first, &offload); 2080 if (tso < 0) 2081 goto out_drop; 2082 2083 /* always set up Tx checksum offload */ 2084 csum = ice_tx_csum(first, &offload); 2085 if (csum < 0) 2086 goto out_drop; 2087 2088 if (tso || offload.cd_tunnel_params) { 2089 struct ice_tx_ctx_desc *cdesc; 2090 int i = tx_ring->next_to_use; 2091 2092 /* grab the next descriptor */ 2093 cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2094 i++; 2095 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2096 2097 /* setup context descriptor */ 2098 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2099 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2100 cdesc->rsvd = cpu_to_le16(0); 2101 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2102 } 2103 2104 ice_tx_map(tx_ring, first, &offload); 2105 return NETDEV_TX_OK; 2106 2107 out_drop: 2108 dev_kfree_skb_any(skb); 2109 return NETDEV_TX_OK; 2110 } 2111 2112 /** 2113 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 2114 * @skb: send buffer 2115 * @netdev: network interface device structure 2116 * 2117 * Returns NETDEV_TX_OK if sent, else an error code 2118 */ 2119 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2120 { 2121 struct ice_netdev_priv *np = netdev_priv(netdev); 2122 struct ice_vsi *vsi = np->vsi; 2123 struct ice_ring *tx_ring; 2124 2125 tx_ring = vsi->tx_rings[skb->queue_mapping]; 2126 2127 /* hardware can't handle really short frames, hardware padding works 2128 * beyond this point 2129 */ 2130 if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 2131 return NETDEV_TX_OK; 2132 2133 return ice_xmit_frame_ring(skb, tx_ring); 2134 } 2135