1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* The driver transmit and receive code */ 5 6 #include <linux/prefetch.h> 7 #include <linux/mm.h> 8 #include "ice.h" 9 #include "ice_dcb_lib.h" 10 11 #define ICE_RX_HDR_SIZE 256 12 13 /** 14 * ice_unmap_and_free_tx_buf - Release a Tx buffer 15 * @ring: the ring that owns the buffer 16 * @tx_buf: the buffer to free 17 */ 18 static void 19 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 20 { 21 if (tx_buf->skb) { 22 dev_kfree_skb_any(tx_buf->skb); 23 if (dma_unmap_len(tx_buf, len)) 24 dma_unmap_single(ring->dev, 25 dma_unmap_addr(tx_buf, dma), 26 dma_unmap_len(tx_buf, len), 27 DMA_TO_DEVICE); 28 } else if (dma_unmap_len(tx_buf, len)) { 29 dma_unmap_page(ring->dev, 30 dma_unmap_addr(tx_buf, dma), 31 dma_unmap_len(tx_buf, len), 32 DMA_TO_DEVICE); 33 } 34 35 tx_buf->next_to_watch = NULL; 36 tx_buf->skb = NULL; 37 dma_unmap_len_set(tx_buf, len, 0); 38 /* tx_buf must be completely set up in the transmit path */ 39 } 40 41 static struct netdev_queue *txring_txq(const struct ice_ring *ring) 42 { 43 return netdev_get_tx_queue(ring->netdev, ring->q_index); 44 } 45 46 /** 47 * ice_clean_tx_ring - Free any empty Tx buffers 48 * @tx_ring: ring to be cleaned 49 */ 50 void ice_clean_tx_ring(struct ice_ring *tx_ring) 51 { 52 u16 i; 53 54 /* ring already cleared, nothing to do */ 55 if (!tx_ring->tx_buf) 56 return; 57 58 /* Free all the Tx ring sk_bufss */ 59 for (i = 0; i < tx_ring->count; i++) 60 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 61 62 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 63 64 /* Zero out the descriptor ring */ 65 memset(tx_ring->desc, 0, tx_ring->size); 66 67 tx_ring->next_to_use = 0; 68 tx_ring->next_to_clean = 0; 69 70 if (!tx_ring->netdev) 71 return; 72 73 /* cleanup Tx queue statistics */ 74 netdev_tx_reset_queue(txring_txq(tx_ring)); 75 } 76 77 /** 78 * ice_free_tx_ring - Free Tx resources per queue 79 * @tx_ring: Tx descriptor ring for a specific queue 80 * 81 * Free all transmit software resources 82 */ 83 void ice_free_tx_ring(struct ice_ring *tx_ring) 84 { 85 ice_clean_tx_ring(tx_ring); 86 devm_kfree(tx_ring->dev, tx_ring->tx_buf); 87 tx_ring->tx_buf = NULL; 88 89 if (tx_ring->desc) { 90 dmam_free_coherent(tx_ring->dev, tx_ring->size, 91 tx_ring->desc, tx_ring->dma); 92 tx_ring->desc = NULL; 93 } 94 } 95 96 /** 97 * ice_clean_tx_irq - Reclaim resources after transmit completes 98 * @vsi: the VSI we care about 99 * @tx_ring: Tx ring to clean 100 * @napi_budget: Used to determine if we are in netpoll 101 * 102 * Returns true if there's any budget left (e.g. the clean is finished) 103 */ 104 static bool 105 ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) 106 { 107 unsigned int total_bytes = 0, total_pkts = 0; 108 unsigned int budget = vsi->work_lmt; 109 s16 i = tx_ring->next_to_clean; 110 struct ice_tx_desc *tx_desc; 111 struct ice_tx_buf *tx_buf; 112 113 tx_buf = &tx_ring->tx_buf[i]; 114 tx_desc = ICE_TX_DESC(tx_ring, i); 115 i -= tx_ring->count; 116 117 do { 118 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 119 120 /* if next_to_watch is not set then there is no work pending */ 121 if (!eop_desc) 122 break; 123 124 smp_rmb(); /* prevent any other reads prior to eop_desc */ 125 126 /* if the descriptor isn't done, no work yet to do */ 127 if (!(eop_desc->cmd_type_offset_bsz & 128 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 129 break; 130 131 /* clear next_to_watch to prevent false hangs */ 132 tx_buf->next_to_watch = NULL; 133 134 /* update the statistics for this packet */ 135 total_bytes += tx_buf->bytecount; 136 total_pkts += tx_buf->gso_segs; 137 138 /* free the skb */ 139 napi_consume_skb(tx_buf->skb, napi_budget); 140 141 /* unmap skb header data */ 142 dma_unmap_single(tx_ring->dev, 143 dma_unmap_addr(tx_buf, dma), 144 dma_unmap_len(tx_buf, len), 145 DMA_TO_DEVICE); 146 147 /* clear tx_buf data */ 148 tx_buf->skb = NULL; 149 dma_unmap_len_set(tx_buf, len, 0); 150 151 /* unmap remaining buffers */ 152 while (tx_desc != eop_desc) { 153 tx_buf++; 154 tx_desc++; 155 i++; 156 if (unlikely(!i)) { 157 i -= tx_ring->count; 158 tx_buf = tx_ring->tx_buf; 159 tx_desc = ICE_TX_DESC(tx_ring, 0); 160 } 161 162 /* unmap any remaining paged data */ 163 if (dma_unmap_len(tx_buf, len)) { 164 dma_unmap_page(tx_ring->dev, 165 dma_unmap_addr(tx_buf, dma), 166 dma_unmap_len(tx_buf, len), 167 DMA_TO_DEVICE); 168 dma_unmap_len_set(tx_buf, len, 0); 169 } 170 } 171 172 /* move us one more past the eop_desc for start of next pkt */ 173 tx_buf++; 174 tx_desc++; 175 i++; 176 if (unlikely(!i)) { 177 i -= tx_ring->count; 178 tx_buf = tx_ring->tx_buf; 179 tx_desc = ICE_TX_DESC(tx_ring, 0); 180 } 181 182 prefetch(tx_desc); 183 184 /* update budget accounting */ 185 budget--; 186 } while (likely(budget)); 187 188 i += tx_ring->count; 189 tx_ring->next_to_clean = i; 190 u64_stats_update_begin(&tx_ring->syncp); 191 tx_ring->stats.bytes += total_bytes; 192 tx_ring->stats.pkts += total_pkts; 193 u64_stats_update_end(&tx_ring->syncp); 194 tx_ring->q_vector->tx.total_bytes += total_bytes; 195 tx_ring->q_vector->tx.total_pkts += total_pkts; 196 197 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 198 total_bytes); 199 200 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 201 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 202 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 203 /* Make sure that anybody stopping the queue after this 204 * sees the new next_to_clean. 205 */ 206 smp_mb(); 207 if (__netif_subqueue_stopped(tx_ring->netdev, 208 tx_ring->q_index) && 209 !test_bit(__ICE_DOWN, vsi->state)) { 210 netif_wake_subqueue(tx_ring->netdev, 211 tx_ring->q_index); 212 ++tx_ring->tx_stats.restart_q; 213 } 214 } 215 216 return !!budget; 217 } 218 219 /** 220 * ice_setup_tx_ring - Allocate the Tx descriptors 221 * @tx_ring: the Tx ring to set up 222 * 223 * Return 0 on success, negative on error 224 */ 225 int ice_setup_tx_ring(struct ice_ring *tx_ring) 226 { 227 struct device *dev = tx_ring->dev; 228 229 if (!dev) 230 return -ENOMEM; 231 232 /* warn if we are about to overwrite the pointer */ 233 WARN_ON(tx_ring->tx_buf); 234 tx_ring->tx_buf = 235 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 236 GFP_KERNEL); 237 if (!tx_ring->tx_buf) 238 return -ENOMEM; 239 240 /* round up to nearest page */ 241 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 242 PAGE_SIZE); 243 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 244 GFP_KERNEL); 245 if (!tx_ring->desc) { 246 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 247 tx_ring->size); 248 goto err; 249 } 250 251 tx_ring->next_to_use = 0; 252 tx_ring->next_to_clean = 0; 253 tx_ring->tx_stats.prev_pkt = -1; 254 return 0; 255 256 err: 257 devm_kfree(dev, tx_ring->tx_buf); 258 tx_ring->tx_buf = NULL; 259 return -ENOMEM; 260 } 261 262 /** 263 * ice_clean_rx_ring - Free Rx buffers 264 * @rx_ring: ring to be cleaned 265 */ 266 void ice_clean_rx_ring(struct ice_ring *rx_ring) 267 { 268 struct device *dev = rx_ring->dev; 269 u16 i; 270 271 /* ring already cleared, nothing to do */ 272 if (!rx_ring->rx_buf) 273 return; 274 275 /* Free all the Rx ring sk_buffs */ 276 for (i = 0; i < rx_ring->count; i++) { 277 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 278 279 if (rx_buf->skb) { 280 dev_kfree_skb(rx_buf->skb); 281 rx_buf->skb = NULL; 282 } 283 if (!rx_buf->page) 284 continue; 285 286 /* Invalidate cache lines that may have been written to by 287 * device so that we avoid corrupting memory. 288 */ 289 dma_sync_single_range_for_cpu(dev, rx_buf->dma, 290 rx_buf->page_offset, 291 ICE_RXBUF_2048, DMA_FROM_DEVICE); 292 293 /* free resources associated with mapping */ 294 dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE, 295 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 296 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 297 298 rx_buf->page = NULL; 299 rx_buf->page_offset = 0; 300 } 301 302 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 303 304 /* Zero out the descriptor ring */ 305 memset(rx_ring->desc, 0, rx_ring->size); 306 307 rx_ring->next_to_alloc = 0; 308 rx_ring->next_to_clean = 0; 309 rx_ring->next_to_use = 0; 310 } 311 312 /** 313 * ice_free_rx_ring - Free Rx resources 314 * @rx_ring: ring to clean the resources from 315 * 316 * Free all receive software resources 317 */ 318 void ice_free_rx_ring(struct ice_ring *rx_ring) 319 { 320 ice_clean_rx_ring(rx_ring); 321 devm_kfree(rx_ring->dev, rx_ring->rx_buf); 322 rx_ring->rx_buf = NULL; 323 324 if (rx_ring->desc) { 325 dmam_free_coherent(rx_ring->dev, rx_ring->size, 326 rx_ring->desc, rx_ring->dma); 327 rx_ring->desc = NULL; 328 } 329 } 330 331 /** 332 * ice_setup_rx_ring - Allocate the Rx descriptors 333 * @rx_ring: the Rx ring to set up 334 * 335 * Return 0 on success, negative on error 336 */ 337 int ice_setup_rx_ring(struct ice_ring *rx_ring) 338 { 339 struct device *dev = rx_ring->dev; 340 341 if (!dev) 342 return -ENOMEM; 343 344 /* warn if we are about to overwrite the pointer */ 345 WARN_ON(rx_ring->rx_buf); 346 rx_ring->rx_buf = 347 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 348 GFP_KERNEL); 349 if (!rx_ring->rx_buf) 350 return -ENOMEM; 351 352 /* round up to nearest page */ 353 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 354 PAGE_SIZE); 355 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 356 GFP_KERNEL); 357 if (!rx_ring->desc) { 358 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 359 rx_ring->size); 360 goto err; 361 } 362 363 rx_ring->next_to_use = 0; 364 rx_ring->next_to_clean = 0; 365 return 0; 366 367 err: 368 devm_kfree(dev, rx_ring->rx_buf); 369 rx_ring->rx_buf = NULL; 370 return -ENOMEM; 371 } 372 373 /** 374 * ice_release_rx_desc - Store the new tail and head values 375 * @rx_ring: ring to bump 376 * @val: new head index 377 */ 378 static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) 379 { 380 rx_ring->next_to_use = val; 381 382 /* update next to alloc since we have filled the ring */ 383 rx_ring->next_to_alloc = val; 384 385 /* Force memory writes to complete before letting h/w 386 * know there are new descriptors to fetch. (Only 387 * applicable for weak-ordered memory model archs, 388 * such as IA-64). 389 */ 390 wmb(); 391 writel(val, rx_ring->tail); 392 } 393 394 /** 395 * ice_alloc_mapped_page - recycle or make a new page 396 * @rx_ring: ring to use 397 * @bi: rx_buf struct to modify 398 * 399 * Returns true if the page was successfully allocated or 400 * reused. 401 */ 402 static bool 403 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) 404 { 405 struct page *page = bi->page; 406 dma_addr_t dma; 407 408 /* since we are recycling buffers we should seldom need to alloc */ 409 if (likely(page)) { 410 rx_ring->rx_stats.page_reuse_count++; 411 return true; 412 } 413 414 /* alloc new page for storage */ 415 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 416 if (unlikely(!page)) { 417 rx_ring->rx_stats.alloc_page_failed++; 418 return false; 419 } 420 421 /* map page for use */ 422 dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, 423 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 424 425 /* if mapping failed free memory back to system since 426 * there isn't much point in holding memory we can't use 427 */ 428 if (dma_mapping_error(rx_ring->dev, dma)) { 429 __free_pages(page, 0); 430 rx_ring->rx_stats.alloc_page_failed++; 431 return false; 432 } 433 434 bi->dma = dma; 435 bi->page = page; 436 bi->page_offset = 0; 437 page_ref_add(page, USHRT_MAX - 1); 438 bi->pagecnt_bias = USHRT_MAX; 439 440 return true; 441 } 442 443 /** 444 * ice_alloc_rx_bufs - Replace used receive buffers 445 * @rx_ring: ring to place buffers on 446 * @cleaned_count: number of buffers to replace 447 * 448 * Returns false if all allocations were successful, true if any fail 449 */ 450 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) 451 { 452 union ice_32b_rx_flex_desc *rx_desc; 453 u16 ntu = rx_ring->next_to_use; 454 struct ice_rx_buf *bi; 455 456 /* do nothing if no valid netdev defined */ 457 if (!rx_ring->netdev || !cleaned_count) 458 return false; 459 460 /* get the Rx descriptor and buffer based on next_to_use */ 461 rx_desc = ICE_RX_DESC(rx_ring, ntu); 462 bi = &rx_ring->rx_buf[ntu]; 463 464 do { 465 if (!ice_alloc_mapped_page(rx_ring, bi)) 466 goto no_bufs; 467 468 /* sync the buffer for use by the device */ 469 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 470 bi->page_offset, 471 ICE_RXBUF_2048, 472 DMA_FROM_DEVICE); 473 474 /* Refresh the desc even if buffer_addrs didn't change 475 * because each write-back erases this info. 476 */ 477 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 478 479 rx_desc++; 480 bi++; 481 ntu++; 482 if (unlikely(ntu == rx_ring->count)) { 483 rx_desc = ICE_RX_DESC(rx_ring, 0); 484 bi = rx_ring->rx_buf; 485 ntu = 0; 486 } 487 488 /* clear the status bits for the next_to_use descriptor */ 489 rx_desc->wb.status_error0 = 0; 490 491 cleaned_count--; 492 } while (cleaned_count); 493 494 if (rx_ring->next_to_use != ntu) 495 ice_release_rx_desc(rx_ring, ntu); 496 497 return false; 498 499 no_bufs: 500 if (rx_ring->next_to_use != ntu) 501 ice_release_rx_desc(rx_ring, ntu); 502 503 /* make sure to come back via polling to try again after 504 * allocation failure 505 */ 506 return true; 507 } 508 509 /** 510 * ice_page_is_reserved - check if reuse is possible 511 * @page: page struct to check 512 */ 513 static bool ice_page_is_reserved(struct page *page) 514 { 515 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 516 } 517 518 /** 519 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 520 * @rx_buf: Rx buffer to adjust 521 * @size: Size of adjustment 522 * 523 * Update the offset within page so that Rx buf will be ready to be reused. 524 * For systems with PAGE_SIZE < 8192 this function will flip the page offset 525 * so the second half of page assigned to Rx buffer will be used, otherwise 526 * the offset is moved by the @size bytes 527 */ 528 static void 529 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 530 { 531 #if (PAGE_SIZE < 8192) 532 /* flip page offset to other buffer */ 533 rx_buf->page_offset ^= size; 534 #else 535 /* move offset up to the next cache line */ 536 rx_buf->page_offset += size; 537 #endif 538 } 539 540 /** 541 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 542 * @rx_buf: buffer containing the page 543 * 544 * If page is reusable, we have a green light for calling ice_reuse_rx_page, 545 * which will assign the current buffer to the buffer that next_to_alloc is 546 * pointing to; otherwise, the DMA mapping needs to be destroyed and 547 * page freed 548 */ 549 static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) 550 { 551 #if (PAGE_SIZE >= 8192) 552 unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; 553 #endif 554 unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 555 struct page *page = rx_buf->page; 556 557 /* avoid re-using remote pages */ 558 if (unlikely(ice_page_is_reserved(page))) 559 return false; 560 561 #if (PAGE_SIZE < 8192) 562 /* if we are only owner of page we can reuse it */ 563 if (unlikely((page_count(page) - pagecnt_bias) > 1)) 564 return false; 565 #else 566 if (rx_buf->page_offset > last_offset) 567 return false; 568 #endif /* PAGE_SIZE < 8192) */ 569 570 /* If we have drained the page fragment pool we need to update 571 * the pagecnt_bias and page count so that we fully restock the 572 * number of references the driver holds. 573 */ 574 if (unlikely(pagecnt_bias == 1)) { 575 page_ref_add(page, USHRT_MAX - 1); 576 rx_buf->pagecnt_bias = USHRT_MAX; 577 } 578 579 return true; 580 } 581 582 /** 583 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 584 * @rx_buf: buffer containing page to add 585 * @skb: sk_buff to place the data into 586 * @size: packet length from rx_desc 587 * 588 * This function will add the data contained in rx_buf->page to the skb. 589 * It will just attach the page as a frag to the skb. 590 * The function will then update the page offset. 591 */ 592 static void 593 ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, 594 unsigned int size) 595 { 596 #if (PAGE_SIZE >= 8192) 597 unsigned int truesize = SKB_DATA_ALIGN(size); 598 #else 599 unsigned int truesize = ICE_RXBUF_2048; 600 #endif 601 602 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 603 rx_buf->page_offset, size, truesize); 604 605 /* page is being used so we must update the page offset */ 606 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 607 } 608 609 /** 610 * ice_reuse_rx_page - page flip buffer and store it back on the ring 611 * @rx_ring: Rx descriptor ring to store buffers on 612 * @old_buf: donor buffer to have page reused 613 * 614 * Synchronizes page for reuse by the adapter 615 */ 616 static void 617 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) 618 { 619 u16 nta = rx_ring->next_to_alloc; 620 struct ice_rx_buf *new_buf; 621 622 new_buf = &rx_ring->rx_buf[nta]; 623 624 /* update, and store next to alloc */ 625 nta++; 626 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 627 628 /* Transfer page from old buffer to new buffer. 629 * Move each member individually to avoid possible store 630 * forwarding stalls and unnecessary copy of skb. 631 */ 632 new_buf->dma = old_buf->dma; 633 new_buf->page = old_buf->page; 634 new_buf->page_offset = old_buf->page_offset; 635 new_buf->pagecnt_bias = old_buf->pagecnt_bias; 636 } 637 638 /** 639 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 640 * @rx_ring: Rx descriptor ring to transact packets on 641 * @skb: skb to be used 642 * @size: size of buffer to add to skb 643 * 644 * This function will pull an Rx buffer from the ring and synchronize it 645 * for use by the CPU. 646 */ 647 static struct ice_rx_buf * 648 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, 649 const unsigned int size) 650 { 651 struct ice_rx_buf *rx_buf; 652 653 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 654 prefetchw(rx_buf->page); 655 *skb = rx_buf->skb; 656 657 /* we are reusing so sync this buffer for CPU use */ 658 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 659 rx_buf->page_offset, size, 660 DMA_FROM_DEVICE); 661 662 /* We have pulled a buffer for use, so decrement pagecnt_bias */ 663 rx_buf->pagecnt_bias--; 664 665 return rx_buf; 666 } 667 668 /** 669 * ice_construct_skb - Allocate skb and populate it 670 * @rx_ring: Rx descriptor ring to transact packets on 671 * @rx_buf: Rx buffer to pull data from 672 * @size: the length of the packet 673 * 674 * This function allocates an skb. It then populates it with the page 675 * data from the current receive descriptor, taking care to set up the 676 * skb correctly. 677 */ 678 static struct sk_buff * 679 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 680 unsigned int size) 681 { 682 void *va = page_address(rx_buf->page) + rx_buf->page_offset; 683 unsigned int headlen; 684 struct sk_buff *skb; 685 686 /* prefetch first cache line of first page */ 687 prefetch(va); 688 #if L1_CACHE_BYTES < 128 689 prefetch((u8 *)va + L1_CACHE_BYTES); 690 #endif /* L1_CACHE_BYTES */ 691 692 /* allocate a skb to store the frags */ 693 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 694 GFP_ATOMIC | __GFP_NOWARN); 695 if (unlikely(!skb)) 696 return NULL; 697 698 skb_record_rx_queue(skb, rx_ring->q_index); 699 /* Determine available headroom for copy */ 700 headlen = size; 701 if (headlen > ICE_RX_HDR_SIZE) 702 headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE); 703 704 /* align pull length to size of long to optimize memcpy performance */ 705 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 706 707 /* if we exhaust the linear part then add what is left as a frag */ 708 size -= headlen; 709 if (size) { 710 #if (PAGE_SIZE >= 8192) 711 unsigned int truesize = SKB_DATA_ALIGN(size); 712 #else 713 unsigned int truesize = ICE_RXBUF_2048; 714 #endif 715 skb_add_rx_frag(skb, 0, rx_buf->page, 716 rx_buf->page_offset + headlen, size, truesize); 717 /* buffer is used by skb, update page_offset */ 718 ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 719 } else { 720 /* buffer is unused, reset bias back to rx_buf; data was copied 721 * onto skb's linear part so there's no need for adjusting 722 * page offset and we can reuse this buffer as-is 723 */ 724 rx_buf->pagecnt_bias++; 725 } 726 727 return skb; 728 } 729 730 /** 731 * ice_put_rx_buf - Clean up used buffer and either recycle or free 732 * @rx_ring: Rx descriptor ring to transact packets on 733 * @rx_buf: Rx buffer to pull data from 734 * 735 * This function will clean up the contents of the rx_buf. It will 736 * either recycle the buffer or unmap it and free the associated resources. 737 */ 738 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) 739 { 740 /* hand second half of page back to the ring */ 741 if (ice_can_reuse_rx_page(rx_buf)) { 742 ice_reuse_rx_page(rx_ring, rx_buf); 743 rx_ring->rx_stats.page_reuse_count++; 744 } else { 745 /* we are not reusing the buffer so unmap it */ 746 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE, 747 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 748 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 749 } 750 751 /* clear contents of buffer_info */ 752 rx_buf->page = NULL; 753 rx_buf->skb = NULL; 754 } 755 756 /** 757 * ice_cleanup_headers - Correct empty headers 758 * @skb: pointer to current skb being fixed 759 * 760 * Also address the case where we are pulling data in on pages only 761 * and as such no data is present in the skb header. 762 * 763 * In addition if skb is not at least 60 bytes we need to pad it so that 764 * it is large enough to qualify as a valid Ethernet frame. 765 * 766 * Returns true if an error was encountered and skb was freed. 767 */ 768 static bool ice_cleanup_headers(struct sk_buff *skb) 769 { 770 /* if eth_skb_pad returns an error the skb was freed */ 771 if (eth_skb_pad(skb)) 772 return true; 773 774 return false; 775 } 776 777 /** 778 * ice_test_staterr - tests bits in Rx descriptor status and error fields 779 * @rx_desc: pointer to receive descriptor (in le64 format) 780 * @stat_err_bits: value to mask 781 * 782 * This function does some fast chicanery in order to return the 783 * value of the mask which is really only used for boolean tests. 784 * The status_error_len doesn't need to be shifted because it begins 785 * at offset zero. 786 */ 787 static bool 788 ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits) 789 { 790 return !!(rx_desc->wb.status_error0 & 791 cpu_to_le16(stat_err_bits)); 792 } 793 794 /** 795 * ice_is_non_eop - process handling of non-EOP buffers 796 * @rx_ring: Rx ring being processed 797 * @rx_desc: Rx descriptor for current buffer 798 * @skb: Current socket buffer containing buffer in progress 799 * 800 * This function updates next to clean. If the buffer is an EOP buffer 801 * this function exits returning false, otherwise it will place the 802 * sk_buff in the next buffer to be chained and return true indicating 803 * that this is in fact a non-EOP buffer. 804 */ 805 static bool 806 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 807 struct sk_buff *skb) 808 { 809 u32 ntc = rx_ring->next_to_clean + 1; 810 811 /* fetch, update, and store next to clean */ 812 ntc = (ntc < rx_ring->count) ? ntc : 0; 813 rx_ring->next_to_clean = ntc; 814 815 prefetch(ICE_RX_DESC(rx_ring, ntc)); 816 817 /* if we are the last buffer then there is nothing else to do */ 818 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 819 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 820 return false; 821 822 /* place skb in next buffer to be received */ 823 rx_ring->rx_buf[ntc].skb = skb; 824 rx_ring->rx_stats.non_eop_descs++; 825 826 return true; 827 } 828 829 /** 830 * ice_ptype_to_htype - get a hash type 831 * @ptype: the ptype value from the descriptor 832 * 833 * Returns a hash type to be used by skb_set_hash 834 */ 835 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype) 836 { 837 return PKT_HASH_TYPE_NONE; 838 } 839 840 /** 841 * ice_rx_hash - set the hash value in the skb 842 * @rx_ring: descriptor ring 843 * @rx_desc: specific descriptor 844 * @skb: pointer to current skb 845 * @rx_ptype: the ptype value from the descriptor 846 */ 847 static void 848 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 849 struct sk_buff *skb, u8 rx_ptype) 850 { 851 struct ice_32b_rx_flex_desc_nic *nic_mdid; 852 u32 hash; 853 854 if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) 855 return; 856 857 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) 858 return; 859 860 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; 861 hash = le32_to_cpu(nic_mdid->rss_hash); 862 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); 863 } 864 865 /** 866 * ice_rx_csum - Indicate in skb if checksum is good 867 * @vsi: the VSI we care about 868 * @skb: skb currently being received and modified 869 * @rx_desc: the receive descriptor 870 * @ptype: the packet type decoded by hardware 871 * 872 * skb->protocol must be set before this function is called 873 */ 874 static void 875 ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, 876 union ice_32b_rx_flex_desc *rx_desc, u8 ptype) 877 { 878 struct ice_rx_ptype_decoded decoded; 879 u32 rx_error, rx_status; 880 bool ipv4, ipv6; 881 882 rx_status = le16_to_cpu(rx_desc->wb.status_error0); 883 rx_error = rx_status; 884 885 decoded = ice_decode_rx_desc_ptype(ptype); 886 887 /* Start with CHECKSUM_NONE and by default csum_level = 0 */ 888 skb->ip_summed = CHECKSUM_NONE; 889 skb_checksum_none_assert(skb); 890 891 /* check if Rx checksum is enabled */ 892 if (!(vsi->netdev->features & NETIF_F_RXCSUM)) 893 return; 894 895 /* check if HW has decoded the packet and checksum */ 896 if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) 897 return; 898 899 if (!(decoded.known && decoded.outer_ip)) 900 return; 901 902 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 903 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); 904 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 905 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); 906 907 if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | 908 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) 909 goto checksum_fail; 910 else if (ipv6 && (rx_status & 911 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) 912 goto checksum_fail; 913 914 /* check for L4 errors and handle packets that were not able to be 915 * checksummed due to arrival speed 916 */ 917 if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) 918 goto checksum_fail; 919 920 /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 921 switch (decoded.inner_prot) { 922 case ICE_RX_PTYPE_INNER_PROT_TCP: 923 case ICE_RX_PTYPE_INNER_PROT_UDP: 924 case ICE_RX_PTYPE_INNER_PROT_SCTP: 925 skb->ip_summed = CHECKSUM_UNNECESSARY; 926 default: 927 break; 928 } 929 return; 930 931 checksum_fail: 932 vsi->back->hw_csum_rx_error++; 933 } 934 935 /** 936 * ice_process_skb_fields - Populate skb header fields from Rx descriptor 937 * @rx_ring: Rx descriptor ring packet is being transacted on 938 * @rx_desc: pointer to the EOP Rx descriptor 939 * @skb: pointer to current skb being populated 940 * @ptype: the packet type decoded by hardware 941 * 942 * This function checks the ring, descriptor, and packet information in 943 * order to populate the hash, checksum, VLAN, protocol, and 944 * other fields within the skb. 945 */ 946 static void 947 ice_process_skb_fields(struct ice_ring *rx_ring, 948 union ice_32b_rx_flex_desc *rx_desc, 949 struct sk_buff *skb, u8 ptype) 950 { 951 ice_rx_hash(rx_ring, rx_desc, skb, ptype); 952 953 /* modifies the skb - consumes the enet header */ 954 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 955 956 ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype); 957 } 958 959 /** 960 * ice_receive_skb - Send a completed packet up the stack 961 * @rx_ring: Rx ring in play 962 * @skb: packet to send up 963 * @vlan_tag: VLAN tag for packet 964 * 965 * This function sends the completed packet (via. skb) up the stack using 966 * gro receive functions (with/without VLAN tag) 967 */ 968 static void 969 ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) 970 { 971 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 972 (vlan_tag & VLAN_VID_MASK)) 973 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 974 napi_gro_receive(&rx_ring->q_vector->napi, skb); 975 } 976 977 /** 978 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 979 * @rx_ring: Rx descriptor ring to transact packets on 980 * @budget: Total limit on number of packets to process 981 * 982 * This function provides a "bounce buffer" approach to Rx interrupt 983 * processing. The advantage to this is that on systems that have 984 * expensive overhead for IOMMU access this provides a means of avoiding 985 * it by maintaining the mapping of the page to the system. 986 * 987 * Returns amount of work completed 988 */ 989 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 990 { 991 unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 992 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 993 bool failure = false; 994 995 /* start the loop to process Rx packets bounded by 'budget' */ 996 while (likely(total_rx_pkts < (unsigned int)budget)) { 997 union ice_32b_rx_flex_desc *rx_desc; 998 struct ice_rx_buf *rx_buf; 999 struct sk_buff *skb; 1000 unsigned int size; 1001 u16 stat_err_bits; 1002 u16 vlan_tag = 0; 1003 u8 rx_ptype; 1004 1005 /* return some buffers to hardware, one at a time is too slow */ 1006 if (cleaned_count >= ICE_RX_BUF_WRITE) { 1007 failure = failure || 1008 ice_alloc_rx_bufs(rx_ring, cleaned_count); 1009 cleaned_count = 0; 1010 } 1011 1012 /* get the Rx desc from Rx ring based on 'next_to_clean' */ 1013 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 1014 1015 /* status_error_len will always be zero for unused descriptors 1016 * because it's cleared in cleanup, and overlaps with hdr_addr 1017 * which is always zero because packet split isn't used, if the 1018 * hardware wrote DD then it will be non-zero 1019 */ 1020 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 1021 if (!ice_test_staterr(rx_desc, stat_err_bits)) 1022 break; 1023 1024 /* This memory barrier is needed to keep us from reading 1025 * any other fields out of the rx_desc until we know the 1026 * DD bit is set. 1027 */ 1028 dma_rmb(); 1029 1030 size = le16_to_cpu(rx_desc->wb.pkt_len) & 1031 ICE_RX_FLX_DESC_PKT_LEN_M; 1032 1033 rx_buf = ice_get_rx_buf(rx_ring, &skb, size); 1034 /* allocate (if needed) and populate skb */ 1035 if (skb) 1036 ice_add_rx_frag(rx_buf, skb, size); 1037 else 1038 skb = ice_construct_skb(rx_ring, rx_buf, size); 1039 1040 /* exit if we failed to retrieve a buffer */ 1041 if (!skb) { 1042 rx_ring->rx_stats.alloc_buf_failed++; 1043 rx_buf->pagecnt_bias++; 1044 break; 1045 } 1046 1047 ice_put_rx_buf(rx_ring, rx_buf); 1048 cleaned_count++; 1049 1050 /* skip if it is NOP desc */ 1051 if (ice_is_non_eop(rx_ring, rx_desc, skb)) 1052 continue; 1053 1054 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 1055 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 1056 dev_kfree_skb_any(skb); 1057 continue; 1058 } 1059 1060 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 1061 ICE_RX_FLEX_DESC_PTYPE_M; 1062 1063 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 1064 if (ice_test_staterr(rx_desc, stat_err_bits)) 1065 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 1066 1067 /* correct empty headers and pad skb if needed (to make valid 1068 * ethernet frame 1069 */ 1070 if (ice_cleanup_headers(skb)) { 1071 skb = NULL; 1072 continue; 1073 } 1074 1075 /* probably a little skewed due to removing CRC */ 1076 total_rx_bytes += skb->len; 1077 1078 /* populate checksum, VLAN, and protocol */ 1079 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1080 1081 /* send completed skb up the stack */ 1082 ice_receive_skb(rx_ring, skb, vlan_tag); 1083 1084 /* update budget accounting */ 1085 total_rx_pkts++; 1086 } 1087 1088 /* update queue and vector specific stats */ 1089 u64_stats_update_begin(&rx_ring->syncp); 1090 rx_ring->stats.pkts += total_rx_pkts; 1091 rx_ring->stats.bytes += total_rx_bytes; 1092 u64_stats_update_end(&rx_ring->syncp); 1093 rx_ring->q_vector->rx.total_pkts += total_rx_pkts; 1094 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 1095 1096 /* guarantee a trip back through this routine if there was a failure */ 1097 return failure ? budget : (int)total_rx_pkts; 1098 } 1099 1100 /** 1101 * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic 1102 * @port_info: port_info structure containing the current link speed 1103 * @avg_pkt_size: average size of Tx or Rx packets based on clean routine 1104 * @itr: itr value to update 1105 * 1106 * Calculate how big of an increment should be applied to the ITR value passed 1107 * in based on wmem_default, SKB overhead, Ethernet overhead, and the current 1108 * link speed. 1109 * 1110 * The following is a calculation derived from: 1111 * wmem_default / (size + overhead) = desired_pkts_per_int 1112 * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate 1113 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 1114 * 1115 * Assuming wmem_default is 212992 and overhead is 640 bytes per 1116 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 1117 * formula down to: 1118 * 1119 * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 1120 * ITR = -------------------------------------------- * -------------- 1121 * rate pkt_size + 640 1122 */ 1123 static unsigned int 1124 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, 1125 unsigned int avg_pkt_size, 1126 unsigned int itr) 1127 { 1128 switch (port_info->phy.link_info.link_speed) { 1129 case ICE_AQ_LINK_SPEED_100GB: 1130 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), 1131 avg_pkt_size + 640); 1132 break; 1133 case ICE_AQ_LINK_SPEED_50GB: 1134 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), 1135 avg_pkt_size + 640); 1136 break; 1137 case ICE_AQ_LINK_SPEED_40GB: 1138 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), 1139 avg_pkt_size + 640); 1140 break; 1141 case ICE_AQ_LINK_SPEED_25GB: 1142 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), 1143 avg_pkt_size + 640); 1144 break; 1145 case ICE_AQ_LINK_SPEED_20GB: 1146 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), 1147 avg_pkt_size + 640); 1148 break; 1149 case ICE_AQ_LINK_SPEED_10GB: 1150 /* fall through */ 1151 default: 1152 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), 1153 avg_pkt_size + 640); 1154 break; 1155 } 1156 1157 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1158 itr &= ICE_ITR_ADAPTIVE_LATENCY; 1159 itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1160 } 1161 1162 return itr; 1163 } 1164 1165 /** 1166 * ice_update_itr - update the adaptive ITR value based on statistics 1167 * @q_vector: structure containing interrupt and ring information 1168 * @rc: structure containing ring performance data 1169 * 1170 * Stores a new ITR value based on packets and byte 1171 * counts during the last interrupt. The advantage of per interrupt 1172 * computation is faster updates and more accurate ITR for the current 1173 * traffic pattern. Constants in this function were computed 1174 * based on theoretical maximum wire speed and thresholds were set based 1175 * on testing data as well as attempting to minimize response time 1176 * while increasing bulk throughput. 1177 */ 1178 static void 1179 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) 1180 { 1181 unsigned long next_update = jiffies; 1182 unsigned int packets, bytes, itr; 1183 bool container_is_rx; 1184 1185 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) 1186 return; 1187 1188 /* If itr_countdown is set it means we programmed an ITR within 1189 * the last 4 interrupt cycles. This has a side effect of us 1190 * potentially firing an early interrupt. In order to work around 1191 * this we need to throw out any data received for a few 1192 * interrupts following the update. 1193 */ 1194 if (q_vector->itr_countdown) { 1195 itr = rc->target_itr; 1196 goto clear_counts; 1197 } 1198 1199 container_is_rx = (&q_vector->rx == rc); 1200 /* For Rx we want to push the delay up and default to low latency. 1201 * for Tx we want to pull the delay down and default to high latency. 1202 */ 1203 itr = container_is_rx ? 1204 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : 1205 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; 1206 1207 /* If we didn't update within up to 1 - 2 jiffies we can assume 1208 * that either packets are coming in so slow there hasn't been 1209 * any work, or that there is so much work that NAPI is dealing 1210 * with interrupt moderation and we don't need to do anything. 1211 */ 1212 if (time_after(next_update, rc->next_update)) 1213 goto clear_counts; 1214 1215 packets = rc->total_pkts; 1216 bytes = rc->total_bytes; 1217 1218 if (container_is_rx) { 1219 /* If Rx there are 1 to 4 packets and bytes are less than 1220 * 9000 assume insufficient data to use bulk rate limiting 1221 * approach unless Tx is already in bulk rate limiting. We 1222 * are likely latency driven. 1223 */ 1224 if (packets && packets < 4 && bytes < 9000 && 1225 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { 1226 itr = ICE_ITR_ADAPTIVE_LATENCY; 1227 goto adjust_by_size_and_speed; 1228 } 1229 } else if (packets < 4) { 1230 /* If we have Tx and Rx ITR maxed and Tx ITR is running in 1231 * bulk mode and we are receiving 4 or fewer packets just 1232 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 1233 * that the Rx can relax. 1234 */ 1235 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && 1236 (q_vector->rx.target_itr & ICE_ITR_MASK) == 1237 ICE_ITR_ADAPTIVE_MAX_USECS) 1238 goto clear_counts; 1239 } else if (packets > 32) { 1240 /* If we have processed over 32 packets in a single interrupt 1241 * for Tx assume we need to switch over to "bulk" mode. 1242 */ 1243 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; 1244 } 1245 1246 /* We have no packets to actually measure against. This means 1247 * either one of the other queues on this vector is active or 1248 * we are a Tx queue doing TSO with too high of an interrupt rate. 1249 * 1250 * Between 4 and 56 we can assume that our current interrupt delay 1251 * is only slightly too low. As such we should increase it by a small 1252 * fixed amount. 1253 */ 1254 if (packets < 56) { 1255 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; 1256 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 1257 itr &= ICE_ITR_ADAPTIVE_LATENCY; 1258 itr += ICE_ITR_ADAPTIVE_MAX_USECS; 1259 } 1260 goto clear_counts; 1261 } 1262 1263 if (packets <= 256) { 1264 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 1265 itr &= ICE_ITR_MASK; 1266 1267 /* Between 56 and 112 is our "goldilocks" zone where we are 1268 * working out "just right". Just report that our current 1269 * ITR is good for us. 1270 */ 1271 if (packets <= 112) 1272 goto clear_counts; 1273 1274 /* If packet count is 128 or greater we are likely looking 1275 * at a slight overrun of the delay we want. Try halving 1276 * our delay to see if that will cut the number of packets 1277 * in half per interrupt. 1278 */ 1279 itr >>= 1; 1280 itr &= ICE_ITR_MASK; 1281 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) 1282 itr = ICE_ITR_ADAPTIVE_MIN_USECS; 1283 1284 goto clear_counts; 1285 } 1286 1287 /* The paths below assume we are dealing with a bulk ITR since 1288 * number of packets is greater than 256. We are just going to have 1289 * to compute a value and try to bring the count under control, 1290 * though for smaller packet sizes there isn't much we can do as 1291 * NAPI polling will likely be kicking in sooner rather than later. 1292 */ 1293 itr = ICE_ITR_ADAPTIVE_BULK; 1294 1295 adjust_by_size_and_speed: 1296 1297 /* based on checks above packets cannot be 0 so division is safe */ 1298 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, 1299 bytes / packets, itr); 1300 1301 clear_counts: 1302 /* write back value */ 1303 rc->target_itr = itr; 1304 1305 /* next update should occur within next jiffy */ 1306 rc->next_update = next_update + 1; 1307 1308 rc->total_bytes = 0; 1309 rc->total_pkts = 0; 1310 } 1311 1312 /** 1313 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 1314 * @itr_idx: interrupt throttling index 1315 * @itr: interrupt throttling value in usecs 1316 */ 1317 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 1318 { 1319 /* The itr value is reported in microseconds, and the register value is 1320 * recorded in 2 microsecond units. For this reason we only need to 1321 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 1322 * granularity as a shift instead of division. The mask makes sure the 1323 * ITR value is never odd so we don't accidentally write into the field 1324 * prior to the ITR field. 1325 */ 1326 itr &= ICE_ITR_MASK; 1327 1328 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 1329 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 1330 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 1331 } 1332 1333 /* The act of updating the ITR will cause it to immediately trigger. In order 1334 * to prevent this from throwing off adaptive update statistics we defer the 1335 * update so that it can only happen so often. So after either Tx or Rx are 1336 * updated we make the adaptive scheme wait until either the ITR completely 1337 * expires via the next_update expiration or we have been through at least 1338 * 3 interrupts. 1339 */ 1340 #define ITR_COUNTDOWN_START 3 1341 1342 /** 1343 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt 1344 * @vsi: the VSI associated with the q_vector 1345 * @q_vector: q_vector for which ITR is being updated and interrupt enabled 1346 */ 1347 static void 1348 ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) 1349 { 1350 struct ice_ring_container *tx = &q_vector->tx; 1351 struct ice_ring_container *rx = &q_vector->rx; 1352 u32 itr_val; 1353 1354 /* This will do nothing if dynamic updates are not enabled */ 1355 ice_update_itr(q_vector, tx); 1356 ice_update_itr(q_vector, rx); 1357 1358 /* This block of logic allows us to get away with only updating 1359 * one ITR value with each interrupt. The idea is to perform a 1360 * pseudo-lazy update with the following criteria. 1361 * 1362 * 1. Rx is given higher priority than Tx if both are in same state 1363 * 2. If we must reduce an ITR that is given highest priority. 1364 * 3. We then give priority to increasing ITR based on amount. 1365 */ 1366 if (rx->target_itr < rx->current_itr) { 1367 /* Rx ITR needs to be reduced, this is highest priority */ 1368 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 1369 rx->current_itr = rx->target_itr; 1370 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1371 } else if ((tx->target_itr < tx->current_itr) || 1372 ((rx->target_itr - rx->current_itr) < 1373 (tx->target_itr - tx->current_itr))) { 1374 /* Tx ITR needs to be reduced, this is second priority 1375 * Tx ITR needs to be increased more than Rx, fourth priority 1376 */ 1377 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); 1378 tx->current_itr = tx->target_itr; 1379 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1380 } else if (rx->current_itr != rx->target_itr) { 1381 /* Rx ITR needs to be increased, third priority */ 1382 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 1383 rx->current_itr = rx->target_itr; 1384 q_vector->itr_countdown = ITR_COUNTDOWN_START; 1385 } else { 1386 /* Still have to re-enable the interrupts */ 1387 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 1388 if (q_vector->itr_countdown) 1389 q_vector->itr_countdown--; 1390 } 1391 1392 if (!test_bit(__ICE_DOWN, vsi->state)) 1393 wr32(&vsi->back->hw, 1394 GLINT_DYN_CTL(q_vector->reg_idx), 1395 itr_val); 1396 } 1397 1398 /** 1399 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 1400 * @napi: napi struct with our devices info in it 1401 * @budget: amount of work driver is allowed to do this pass, in packets 1402 * 1403 * This function will clean all queues associated with a q_vector. 1404 * 1405 * Returns the amount of work done 1406 */ 1407 int ice_napi_poll(struct napi_struct *napi, int budget) 1408 { 1409 struct ice_q_vector *q_vector = 1410 container_of(napi, struct ice_q_vector, napi); 1411 struct ice_vsi *vsi = q_vector->vsi; 1412 struct ice_pf *pf = vsi->back; 1413 bool clean_complete = true; 1414 int budget_per_ring = 0; 1415 struct ice_ring *ring; 1416 int work_done = 0; 1417 1418 /* Since the actual Tx work is minimal, we can give the Tx a larger 1419 * budget and be more aggressive about cleaning up the Tx descriptors. 1420 */ 1421 ice_for_each_ring(ring, q_vector->tx) 1422 if (!ice_clean_tx_irq(vsi, ring, budget)) 1423 clean_complete = false; 1424 1425 /* Handle case where we are called by netpoll with a budget of 0 */ 1426 if (budget <= 0) 1427 return budget; 1428 1429 /* We attempt to distribute budget to each Rx queue fairly, but don't 1430 * allow the budget to go below 1 because that would exit polling early. 1431 */ 1432 if (q_vector->num_ring_rx) 1433 budget_per_ring = max(budget / q_vector->num_ring_rx, 1); 1434 1435 ice_for_each_ring(ring, q_vector->rx) { 1436 int cleaned; 1437 1438 cleaned = ice_clean_rx_irq(ring, budget_per_ring); 1439 work_done += cleaned; 1440 /* if we clean as many as budgeted, we must not be done */ 1441 if (cleaned >= budget_per_ring) 1442 clean_complete = false; 1443 } 1444 1445 /* If work not completed, return budget and polling will return */ 1446 if (!clean_complete) 1447 return budget; 1448 1449 /* Exit the polling mode, but don't re-enable interrupts if stack might 1450 * poll us due to busy-polling 1451 */ 1452 if (likely(napi_complete_done(napi, work_done))) 1453 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 1454 ice_update_ena_itr(vsi, q_vector); 1455 1456 return min_t(int, work_done, budget - 1); 1457 } 1458 1459 /* helper function for building cmd/type/offset */ 1460 static __le64 1461 build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) 1462 { 1463 return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | 1464 (td_cmd << ICE_TXD_QW1_CMD_S) | 1465 (td_offset << ICE_TXD_QW1_OFFSET_S) | 1466 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | 1467 (td_tag << ICE_TXD_QW1_L2TAG1_S)); 1468 } 1469 1470 /** 1471 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 1472 * @tx_ring: the ring to be checked 1473 * @size: the size buffer we want to assure is available 1474 * 1475 * Returns -EBUSY if a stop is needed, else 0 1476 */ 1477 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 1478 { 1479 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 1480 /* Memory barrier before checking head and tail */ 1481 smp_mb(); 1482 1483 /* Check again in a case another CPU has just made room available. */ 1484 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 1485 return -EBUSY; 1486 1487 /* A reprieve! - use start_subqueue because it doesn't call schedule */ 1488 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 1489 ++tx_ring->tx_stats.restart_q; 1490 return 0; 1491 } 1492 1493 /** 1494 * ice_maybe_stop_tx - 1st level check for Tx stop conditions 1495 * @tx_ring: the ring to be checked 1496 * @size: the size buffer we want to assure is available 1497 * 1498 * Returns 0 if stop is not needed 1499 */ 1500 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 1501 { 1502 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 1503 return 0; 1504 1505 return __ice_maybe_stop_tx(tx_ring, size); 1506 } 1507 1508 /** 1509 * ice_tx_map - Build the Tx descriptor 1510 * @tx_ring: ring to send buffer on 1511 * @first: first buffer info buffer to use 1512 * @off: pointer to struct that holds offload parameters 1513 * 1514 * This function loops over the skb data pointed to by *first 1515 * and gets a physical address for each memory location and programs 1516 * it and the length into the transmit descriptor. 1517 */ 1518 static void 1519 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, 1520 struct ice_tx_offload_params *off) 1521 { 1522 u64 td_offset, td_tag, td_cmd; 1523 u16 i = tx_ring->next_to_use; 1524 struct skb_frag_struct *frag; 1525 unsigned int data_len, size; 1526 struct ice_tx_desc *tx_desc; 1527 struct ice_tx_buf *tx_buf; 1528 struct sk_buff *skb; 1529 dma_addr_t dma; 1530 1531 td_tag = off->td_l2tag1; 1532 td_cmd = off->td_cmd; 1533 td_offset = off->td_offset; 1534 skb = first->skb; 1535 1536 data_len = skb->data_len; 1537 size = skb_headlen(skb); 1538 1539 tx_desc = ICE_TX_DESC(tx_ring, i); 1540 1541 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1542 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1543 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1544 ICE_TX_FLAGS_VLAN_S; 1545 } 1546 1547 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1548 1549 tx_buf = first; 1550 1551 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1552 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1553 1554 if (dma_mapping_error(tx_ring->dev, dma)) 1555 goto dma_error; 1556 1557 /* record length, and DMA address */ 1558 dma_unmap_len_set(tx_buf, len, size); 1559 dma_unmap_addr_set(tx_buf, dma, dma); 1560 1561 /* align size to end of page */ 1562 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 1563 tx_desc->buf_addr = cpu_to_le64(dma); 1564 1565 /* account for data chunks larger than the hardware 1566 * can handle 1567 */ 1568 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 1569 tx_desc->cmd_type_offset_bsz = 1570 build_ctob(td_cmd, td_offset, max_data, td_tag); 1571 1572 tx_desc++; 1573 i++; 1574 1575 if (i == tx_ring->count) { 1576 tx_desc = ICE_TX_DESC(tx_ring, 0); 1577 i = 0; 1578 } 1579 1580 dma += max_data; 1581 size -= max_data; 1582 1583 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1584 tx_desc->buf_addr = cpu_to_le64(dma); 1585 } 1586 1587 if (likely(!data_len)) 1588 break; 1589 1590 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 1591 size, td_tag); 1592 1593 tx_desc++; 1594 i++; 1595 1596 if (i == tx_ring->count) { 1597 tx_desc = ICE_TX_DESC(tx_ring, 0); 1598 i = 0; 1599 } 1600 1601 size = skb_frag_size(frag); 1602 data_len -= size; 1603 1604 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1605 DMA_TO_DEVICE); 1606 1607 tx_buf = &tx_ring->tx_buf[i]; 1608 } 1609 1610 /* record bytecount for BQL */ 1611 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1612 1613 /* record SW timestamp if HW timestamp is not available */ 1614 skb_tx_timestamp(first->skb); 1615 1616 i++; 1617 if (i == tx_ring->count) 1618 i = 0; 1619 1620 /* write last descriptor with RS and EOP bits */ 1621 td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS); 1622 tx_desc->cmd_type_offset_bsz = 1623 build_ctob(td_cmd, td_offset, size, td_tag); 1624 1625 /* Force memory writes to complete before letting h/w know there 1626 * are new descriptors to fetch. 1627 * 1628 * We also use this memory barrier to make certain all of the 1629 * status bits have been updated before next_to_watch is written. 1630 */ 1631 wmb(); 1632 1633 /* set next_to_watch value indicating a packet is present */ 1634 first->next_to_watch = tx_desc; 1635 1636 tx_ring->next_to_use = i; 1637 1638 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 1639 1640 /* notify HW of packet */ 1641 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 1642 writel(i, tx_ring->tail); 1643 } 1644 1645 return; 1646 1647 dma_error: 1648 /* clear dma mappings for failed tx_buf map */ 1649 for (;;) { 1650 tx_buf = &tx_ring->tx_buf[i]; 1651 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 1652 if (tx_buf == first) 1653 break; 1654 if (i == 0) 1655 i = tx_ring->count; 1656 i--; 1657 } 1658 1659 tx_ring->next_to_use = i; 1660 } 1661 1662 /** 1663 * ice_tx_csum - Enable Tx checksum offloads 1664 * @first: pointer to the first descriptor 1665 * @off: pointer to struct that holds offload parameters 1666 * 1667 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1668 */ 1669 static 1670 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1671 { 1672 u32 l4_len = 0, l3_len = 0, l2_len = 0; 1673 struct sk_buff *skb = first->skb; 1674 union { 1675 struct iphdr *v4; 1676 struct ipv6hdr *v6; 1677 unsigned char *hdr; 1678 } ip; 1679 union { 1680 struct tcphdr *tcp; 1681 unsigned char *hdr; 1682 } l4; 1683 __be16 frag_off, protocol; 1684 unsigned char *exthdr; 1685 u32 offset, cmd = 0; 1686 u8 l4_proto = 0; 1687 1688 if (skb->ip_summed != CHECKSUM_PARTIAL) 1689 return 0; 1690 1691 ip.hdr = skb_network_header(skb); 1692 l4.hdr = skb_transport_header(skb); 1693 1694 /* compute outer L2 header size */ 1695 l2_len = ip.hdr - skb->data; 1696 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1697 1698 if (skb->encapsulation) 1699 return -1; 1700 1701 /* Enable IP checksum offloads */ 1702 protocol = vlan_get_protocol(skb); 1703 if (protocol == htons(ETH_P_IP)) { 1704 l4_proto = ip.v4->protocol; 1705 /* the stack computes the IP header already, the only time we 1706 * need the hardware to recompute it is in the case of TSO. 1707 */ 1708 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1709 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1710 else 1711 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1712 1713 } else if (protocol == htons(ETH_P_IPV6)) { 1714 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1715 exthdr = ip.hdr + sizeof(*ip.v6); 1716 l4_proto = ip.v6->nexthdr; 1717 if (l4.hdr != exthdr) 1718 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1719 &frag_off); 1720 } else { 1721 return -1; 1722 } 1723 1724 /* compute inner L3 header size */ 1725 l3_len = l4.hdr - ip.hdr; 1726 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1727 1728 /* Enable L4 checksum offloads */ 1729 switch (l4_proto) { 1730 case IPPROTO_TCP: 1731 /* enable checksum offloads */ 1732 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1733 l4_len = l4.tcp->doff; 1734 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1735 break; 1736 case IPPROTO_UDP: 1737 /* enable UDP checksum offload */ 1738 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1739 l4_len = (sizeof(struct udphdr) >> 2); 1740 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1741 break; 1742 case IPPROTO_SCTP: 1743 /* enable SCTP checksum offload */ 1744 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 1745 l4_len = sizeof(struct sctphdr) >> 2; 1746 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1747 break; 1748 1749 default: 1750 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1751 return -1; 1752 skb_checksum_help(skb); 1753 return 0; 1754 } 1755 1756 off->td_cmd |= cmd; 1757 off->td_offset |= offset; 1758 return 1; 1759 } 1760 1761 /** 1762 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 1763 * @tx_ring: ring to send buffer on 1764 * @first: pointer to struct ice_tx_buf 1765 * 1766 * Checks the skb and set up correspondingly several generic transmit flags 1767 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1768 * 1769 * Returns error code indicate the frame should be dropped upon error and the 1770 * otherwise returns 0 to indicate the flags has been set properly. 1771 */ 1772 static int 1773 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) 1774 { 1775 struct sk_buff *skb = first->skb; 1776 __be16 protocol = skb->protocol; 1777 1778 if (protocol == htons(ETH_P_8021Q) && 1779 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 1780 /* when HW VLAN acceleration is turned off by the user the 1781 * stack sets the protocol to 8021q so that the driver 1782 * can take any steps required to support the SW only 1783 * VLAN handling. In our case the driver doesn't need 1784 * to take any further steps so just set the protocol 1785 * to the encapsulated ethertype. 1786 */ 1787 skb->protocol = vlan_get_protocol(skb); 1788 return 0; 1789 } 1790 1791 /* if we have a HW VLAN tag being added, default to the HW one */ 1792 if (skb_vlan_tag_present(skb)) { 1793 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1794 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1795 } else if (protocol == htons(ETH_P_8021Q)) { 1796 struct vlan_hdr *vhdr, _vhdr; 1797 1798 /* for SW VLAN, check the next protocol and store the tag */ 1799 vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN, 1800 sizeof(_vhdr), 1801 &_vhdr); 1802 if (!vhdr) 1803 return -EINVAL; 1804 1805 first->tx_flags |= ntohs(vhdr->h_vlan_TCI) << 1806 ICE_TX_FLAGS_VLAN_S; 1807 first->tx_flags |= ICE_TX_FLAGS_SW_VLAN; 1808 } 1809 1810 return ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 1811 } 1812 1813 /** 1814 * ice_tso - computes mss and TSO length to prepare for TSO 1815 * @first: pointer to struct ice_tx_buf 1816 * @off: pointer to struct that holds offload parameters 1817 * 1818 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1819 */ 1820 static 1821 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1822 { 1823 struct sk_buff *skb = first->skb; 1824 union { 1825 struct iphdr *v4; 1826 struct ipv6hdr *v6; 1827 unsigned char *hdr; 1828 } ip; 1829 union { 1830 struct tcphdr *tcp; 1831 unsigned char *hdr; 1832 } l4; 1833 u64 cd_mss, cd_tso_len; 1834 u32 paylen, l4_start; 1835 int err; 1836 1837 if (skb->ip_summed != CHECKSUM_PARTIAL) 1838 return 0; 1839 1840 if (!skb_is_gso(skb)) 1841 return 0; 1842 1843 err = skb_cow_head(skb, 0); 1844 if (err < 0) 1845 return err; 1846 1847 /* cppcheck-suppress unreadVariable */ 1848 ip.hdr = skb_network_header(skb); 1849 l4.hdr = skb_transport_header(skb); 1850 1851 /* initialize outer IP header fields */ 1852 if (ip.v4->version == 4) { 1853 ip.v4->tot_len = 0; 1854 ip.v4->check = 0; 1855 } else { 1856 ip.v6->payload_len = 0; 1857 } 1858 1859 /* determine offset of transport header */ 1860 l4_start = l4.hdr - skb->data; 1861 1862 /* remove payload length from checksum */ 1863 paylen = skb->len - l4_start; 1864 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); 1865 1866 /* compute length of segmentation header */ 1867 off->header_len = (l4.tcp->doff * 4) + l4_start; 1868 1869 /* update gso_segs and bytecount */ 1870 first->gso_segs = skb_shinfo(skb)->gso_segs; 1871 first->bytecount += (first->gso_segs - 1) * off->header_len; 1872 1873 cd_tso_len = skb->len - off->header_len; 1874 cd_mss = skb_shinfo(skb)->gso_size; 1875 1876 /* record cdesc_qw1 with TSO parameters */ 1877 off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX | 1878 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 1879 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 1880 (cd_mss << ICE_TXD_CTX_QW1_MSS_S); 1881 first->tx_flags |= ICE_TX_FLAGS_TSO; 1882 return 1; 1883 } 1884 1885 /** 1886 * ice_txd_use_count - estimate the number of descriptors needed for Tx 1887 * @size: transmit request size in bytes 1888 * 1889 * Due to hardware alignment restrictions (4K alignment), we need to 1890 * assume that we can have no more than 12K of data per descriptor, even 1891 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 1892 * Thus, we need to divide by 12K. But division is slow! Instead, 1893 * we decompose the operation into shifts and one relatively cheap 1894 * multiply operation. 1895 * 1896 * To divide by 12K, we first divide by 4K, then divide by 3: 1897 * To divide by 4K, shift right by 12 bits 1898 * To divide by 3, multiply by 85, then divide by 256 1899 * (Divide by 256 is done by shifting right by 8 bits) 1900 * Finally, we add one to round up. Because 256 isn't an exact multiple of 1901 * 3, we'll underestimate near each multiple of 12K. This is actually more 1902 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 1903 * segment. For our purposes this is accurate out to 1M which is orders of 1904 * magnitude greater than our largest possible GSO size. 1905 * 1906 * This would then be implemented as: 1907 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 1908 * 1909 * Since multiplication and division are commutative, we can reorder 1910 * operations into: 1911 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 1912 */ 1913 static unsigned int ice_txd_use_count(unsigned int size) 1914 { 1915 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 1916 } 1917 1918 /** 1919 * ice_xmit_desc_count - calculate number of Tx descriptors needed 1920 * @skb: send buffer 1921 * 1922 * Returns number of data descriptors needed for this skb. 1923 */ 1924 static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 1925 { 1926 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 1927 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1928 unsigned int count = 0, size = skb_headlen(skb); 1929 1930 for (;;) { 1931 count += ice_txd_use_count(size); 1932 1933 if (!nr_frags--) 1934 break; 1935 1936 size = skb_frag_size(frag++); 1937 } 1938 1939 return count; 1940 } 1941 1942 /** 1943 * __ice_chk_linearize - Check if there are more than 8 buffers per packet 1944 * @skb: send buffer 1945 * 1946 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 1947 * and so we need to figure out the cases where we need to linearize the skb. 1948 * 1949 * For TSO we need to count the TSO header and segment payload separately. 1950 * As such we need to check cases where we have 7 fragments or more as we 1951 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 1952 * the segment payload in the first descriptor, and another 7 for the 1953 * fragments. 1954 */ 1955 static bool __ice_chk_linearize(struct sk_buff *skb) 1956 { 1957 const struct skb_frag_struct *frag, *stale; 1958 int nr_frags, sum; 1959 1960 /* no need to check if number of frags is less than 7 */ 1961 nr_frags = skb_shinfo(skb)->nr_frags; 1962 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 1963 return false; 1964 1965 /* We need to walk through the list and validate that each group 1966 * of 6 fragments totals at least gso_size. 1967 */ 1968 nr_frags -= ICE_MAX_BUF_TXD - 2; 1969 frag = &skb_shinfo(skb)->frags[0]; 1970 1971 /* Initialize size to the negative value of gso_size minus 1. We 1972 * use this as the worst case scenerio in which the frag ahead 1973 * of us only provides one byte which is why we are limited to 6 1974 * descriptors for a single transmit as the header and previous 1975 * fragment are already consuming 2 descriptors. 1976 */ 1977 sum = 1 - skb_shinfo(skb)->gso_size; 1978 1979 /* Add size of frags 0 through 4 to create our initial sum */ 1980 sum += skb_frag_size(frag++); 1981 sum += skb_frag_size(frag++); 1982 sum += skb_frag_size(frag++); 1983 sum += skb_frag_size(frag++); 1984 sum += skb_frag_size(frag++); 1985 1986 /* Walk through fragments adding latest fragment, testing it, and 1987 * then removing stale fragments from the sum. 1988 */ 1989 stale = &skb_shinfo(skb)->frags[0]; 1990 for (;;) { 1991 sum += skb_frag_size(frag++); 1992 1993 /* if sum is negative we failed to make sufficient progress */ 1994 if (sum < 0) 1995 return true; 1996 1997 if (!nr_frags--) 1998 break; 1999 2000 sum -= skb_frag_size(stale++); 2001 } 2002 2003 return false; 2004 } 2005 2006 /** 2007 * ice_chk_linearize - Check if there are more than 8 fragments per packet 2008 * @skb: send buffer 2009 * @count: number of buffers used 2010 * 2011 * Note: Our HW can't scatter-gather more than 8 fragments to build 2012 * a packet on the wire and so we need to figure out the cases where we 2013 * need to linearize the skb. 2014 */ 2015 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 2016 { 2017 /* Both TSO and single send will work if count is less than 8 */ 2018 if (likely(count < ICE_MAX_BUF_TXD)) 2019 return false; 2020 2021 if (skb_is_gso(skb)) 2022 return __ice_chk_linearize(skb); 2023 2024 /* we can support up to 8 data buffers for a single send */ 2025 return count != ICE_MAX_BUF_TXD; 2026 } 2027 2028 /** 2029 * ice_xmit_frame_ring - Sends buffer on Tx ring 2030 * @skb: send buffer 2031 * @tx_ring: ring to send buffer on 2032 * 2033 * Returns NETDEV_TX_OK if sent, else an error code 2034 */ 2035 static netdev_tx_t 2036 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 2037 { 2038 struct ice_tx_offload_params offload = { 0 }; 2039 struct ice_tx_buf *first; 2040 unsigned int count; 2041 int tso, csum; 2042 2043 count = ice_xmit_desc_count(skb); 2044 if (ice_chk_linearize(skb, count)) { 2045 if (__skb_linearize(skb)) 2046 goto out_drop; 2047 count = ice_txd_use_count(skb->len); 2048 tx_ring->tx_stats.tx_linearize++; 2049 } 2050 2051 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 2052 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 2053 * + 4 desc gap to avoid the cache line where head is, 2054 * + 1 desc for context descriptor, 2055 * otherwise try next time 2056 */ 2057 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 2058 ICE_DESCS_FOR_CTX_DESC)) { 2059 tx_ring->tx_stats.tx_busy++; 2060 return NETDEV_TX_BUSY; 2061 } 2062 2063 offload.tx_ring = tx_ring; 2064 2065 /* record the location of the first descriptor for this packet */ 2066 first = &tx_ring->tx_buf[tx_ring->next_to_use]; 2067 first->skb = skb; 2068 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 2069 first->gso_segs = 1; 2070 first->tx_flags = 0; 2071 2072 /* prepare the VLAN tagging flags for Tx */ 2073 if (ice_tx_prepare_vlan_flags(tx_ring, first)) 2074 goto out_drop; 2075 2076 /* set up TSO offload */ 2077 tso = ice_tso(first, &offload); 2078 if (tso < 0) 2079 goto out_drop; 2080 2081 /* always set up Tx checksum offload */ 2082 csum = ice_tx_csum(first, &offload); 2083 if (csum < 0) 2084 goto out_drop; 2085 2086 if (tso || offload.cd_tunnel_params) { 2087 struct ice_tx_ctx_desc *cdesc; 2088 int i = tx_ring->next_to_use; 2089 2090 /* grab the next descriptor */ 2091 cdesc = ICE_TX_CTX_DESC(tx_ring, i); 2092 i++; 2093 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2094 2095 /* setup context descriptor */ 2096 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 2097 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 2098 cdesc->rsvd = cpu_to_le16(0); 2099 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 2100 } 2101 2102 ice_tx_map(tx_ring, first, &offload); 2103 return NETDEV_TX_OK; 2104 2105 out_drop: 2106 dev_kfree_skb_any(skb); 2107 return NETDEV_TX_OK; 2108 } 2109 2110 /** 2111 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 2112 * @skb: send buffer 2113 * @netdev: network interface device structure 2114 * 2115 * Returns NETDEV_TX_OK if sent, else an error code 2116 */ 2117 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2118 { 2119 struct ice_netdev_priv *np = netdev_priv(netdev); 2120 struct ice_vsi *vsi = np->vsi; 2121 struct ice_ring *tx_ring; 2122 2123 tx_ring = vsi->tx_rings[skb->queue_mapping]; 2124 2125 /* hardware can't handle really short frames, hardware padding works 2126 * beyond this point 2127 */ 2128 if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 2129 return NETDEV_TX_OK; 2130 2131 return ice_xmit_frame_ring(skb, tx_ring); 2132 } 2133