1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* The driver transmit and receive code */ 5 6 #include <linux/prefetch.h> 7 #include <linux/mm.h> 8 #include "ice.h" 9 10 #define ICE_RX_HDR_SIZE 256 11 12 /** 13 * ice_unmap_and_free_tx_buf - Release a Tx buffer 14 * @ring: the ring that owns the buffer 15 * @tx_buf: the buffer to free 16 */ 17 static void 18 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 19 { 20 if (tx_buf->skb) { 21 dev_kfree_skb_any(tx_buf->skb); 22 if (dma_unmap_len(tx_buf, len)) 23 dma_unmap_single(ring->dev, 24 dma_unmap_addr(tx_buf, dma), 25 dma_unmap_len(tx_buf, len), 26 DMA_TO_DEVICE); 27 } else if (dma_unmap_len(tx_buf, len)) { 28 dma_unmap_page(ring->dev, 29 dma_unmap_addr(tx_buf, dma), 30 dma_unmap_len(tx_buf, len), 31 DMA_TO_DEVICE); 32 } 33 34 tx_buf->next_to_watch = NULL; 35 tx_buf->skb = NULL; 36 dma_unmap_len_set(tx_buf, len, 0); 37 /* tx_buf must be completely set up in the transmit path */ 38 } 39 40 static struct netdev_queue *txring_txq(const struct ice_ring *ring) 41 { 42 return netdev_get_tx_queue(ring->netdev, ring->q_index); 43 } 44 45 /** 46 * ice_clean_tx_ring - Free any empty Tx buffers 47 * @tx_ring: ring to be cleaned 48 */ 49 void ice_clean_tx_ring(struct ice_ring *tx_ring) 50 { 51 unsigned long size; 52 u16 i; 53 54 /* ring already cleared, nothing to do */ 55 if (!tx_ring->tx_buf) 56 return; 57 58 /* Free all the Tx ring sk_bufss */ 59 for (i = 0; i < tx_ring->count; i++) 60 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 61 62 size = sizeof(struct ice_tx_buf) * tx_ring->count; 63 memset(tx_ring->tx_buf, 0, size); 64 65 /* Zero out the descriptor ring */ 66 memset(tx_ring->desc, 0, tx_ring->size); 67 68 tx_ring->next_to_use = 0; 69 tx_ring->next_to_clean = 0; 70 71 if (!tx_ring->netdev) 72 return; 73 74 /* cleanup Tx queue statistics */ 75 netdev_tx_reset_queue(txring_txq(tx_ring)); 76 } 77 78 /** 79 * ice_free_tx_ring - Free Tx resources per queue 80 * @tx_ring: Tx descriptor ring for a specific queue 81 * 82 * Free all transmit software resources 83 */ 84 void ice_free_tx_ring(struct ice_ring *tx_ring) 85 { 86 ice_clean_tx_ring(tx_ring); 87 devm_kfree(tx_ring->dev, tx_ring->tx_buf); 88 tx_ring->tx_buf = NULL; 89 90 if (tx_ring->desc) { 91 dmam_free_coherent(tx_ring->dev, tx_ring->size, 92 tx_ring->desc, tx_ring->dma); 93 tx_ring->desc = NULL; 94 } 95 } 96 97 /** 98 * ice_clean_tx_irq - Reclaim resources after transmit completes 99 * @vsi: the VSI we care about 100 * @tx_ring: Tx ring to clean 101 * @napi_budget: Used to determine if we are in netpoll 102 * 103 * Returns true if there's any budget left (e.g. the clean is finished) 104 */ 105 static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, 106 int napi_budget) 107 { 108 unsigned int total_bytes = 0, total_pkts = 0; 109 unsigned int budget = vsi->work_lmt; 110 s16 i = tx_ring->next_to_clean; 111 struct ice_tx_desc *tx_desc; 112 struct ice_tx_buf *tx_buf; 113 114 tx_buf = &tx_ring->tx_buf[i]; 115 tx_desc = ICE_TX_DESC(tx_ring, i); 116 i -= tx_ring->count; 117 118 do { 119 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 120 121 /* if next_to_watch is not set then there is no work pending */ 122 if (!eop_desc) 123 break; 124 125 smp_rmb(); /* prevent any other reads prior to eop_desc */ 126 127 /* if the descriptor isn't done, no work yet to do */ 128 if (!(eop_desc->cmd_type_offset_bsz & 129 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 130 break; 131 132 /* clear next_to_watch to prevent false hangs */ 133 tx_buf->next_to_watch = NULL; 134 135 /* update the statistics for this packet */ 136 total_bytes += tx_buf->bytecount; 137 total_pkts += tx_buf->gso_segs; 138 139 /* free the skb */ 140 napi_consume_skb(tx_buf->skb, napi_budget); 141 142 /* unmap skb header data */ 143 dma_unmap_single(tx_ring->dev, 144 dma_unmap_addr(tx_buf, dma), 145 dma_unmap_len(tx_buf, len), 146 DMA_TO_DEVICE); 147 148 /* clear tx_buf data */ 149 tx_buf->skb = NULL; 150 dma_unmap_len_set(tx_buf, len, 0); 151 152 /* unmap remaining buffers */ 153 while (tx_desc != eop_desc) { 154 tx_buf++; 155 tx_desc++; 156 i++; 157 if (unlikely(!i)) { 158 i -= tx_ring->count; 159 tx_buf = tx_ring->tx_buf; 160 tx_desc = ICE_TX_DESC(tx_ring, 0); 161 } 162 163 /* unmap any remaining paged data */ 164 if (dma_unmap_len(tx_buf, len)) { 165 dma_unmap_page(tx_ring->dev, 166 dma_unmap_addr(tx_buf, dma), 167 dma_unmap_len(tx_buf, len), 168 DMA_TO_DEVICE); 169 dma_unmap_len_set(tx_buf, len, 0); 170 } 171 } 172 173 /* move us one more past the eop_desc for start of next pkt */ 174 tx_buf++; 175 tx_desc++; 176 i++; 177 if (unlikely(!i)) { 178 i -= tx_ring->count; 179 tx_buf = tx_ring->tx_buf; 180 tx_desc = ICE_TX_DESC(tx_ring, 0); 181 } 182 183 prefetch(tx_desc); 184 185 /* update budget accounting */ 186 budget--; 187 } while (likely(budget)); 188 189 i += tx_ring->count; 190 tx_ring->next_to_clean = i; 191 u64_stats_update_begin(&tx_ring->syncp); 192 tx_ring->stats.bytes += total_bytes; 193 tx_ring->stats.pkts += total_pkts; 194 u64_stats_update_end(&tx_ring->syncp); 195 tx_ring->q_vector->tx.total_bytes += total_bytes; 196 tx_ring->q_vector->tx.total_pkts += total_pkts; 197 198 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 199 total_bytes); 200 201 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 202 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 203 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 204 /* Make sure that anybody stopping the queue after this 205 * sees the new next_to_clean. 206 */ 207 smp_mb(); 208 if (__netif_subqueue_stopped(tx_ring->netdev, 209 tx_ring->q_index) && 210 !test_bit(__ICE_DOWN, vsi->state)) { 211 netif_wake_subqueue(tx_ring->netdev, 212 tx_ring->q_index); 213 ++tx_ring->tx_stats.restart_q; 214 } 215 } 216 217 return !!budget; 218 } 219 220 /** 221 * ice_setup_tx_ring - Allocate the Tx descriptors 222 * @tx_ring: the tx ring to set up 223 * 224 * Return 0 on success, negative on error 225 */ 226 int ice_setup_tx_ring(struct ice_ring *tx_ring) 227 { 228 struct device *dev = tx_ring->dev; 229 int bi_size; 230 231 if (!dev) 232 return -ENOMEM; 233 234 /* warn if we are about to overwrite the pointer */ 235 WARN_ON(tx_ring->tx_buf); 236 bi_size = sizeof(struct ice_tx_buf) * tx_ring->count; 237 tx_ring->tx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL); 238 if (!tx_ring->tx_buf) 239 return -ENOMEM; 240 241 /* round up to nearest 4K */ 242 tx_ring->size = tx_ring->count * sizeof(struct ice_tx_desc); 243 tx_ring->size = ALIGN(tx_ring->size, 4096); 244 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 245 GFP_KERNEL); 246 if (!tx_ring->desc) { 247 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 248 tx_ring->size); 249 goto err; 250 } 251 252 tx_ring->next_to_use = 0; 253 tx_ring->next_to_clean = 0; 254 tx_ring->tx_stats.prev_pkt = -1; 255 return 0; 256 257 err: 258 devm_kfree(dev, tx_ring->tx_buf); 259 tx_ring->tx_buf = NULL; 260 return -ENOMEM; 261 } 262 263 /** 264 * ice_clean_rx_ring - Free Rx buffers 265 * @rx_ring: ring to be cleaned 266 */ 267 void ice_clean_rx_ring(struct ice_ring *rx_ring) 268 { 269 struct device *dev = rx_ring->dev; 270 unsigned long size; 271 u16 i; 272 273 /* ring already cleared, nothing to do */ 274 if (!rx_ring->rx_buf) 275 return; 276 277 /* Free all the Rx ring sk_buffs */ 278 for (i = 0; i < rx_ring->count; i++) { 279 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 280 281 if (rx_buf->skb) { 282 dev_kfree_skb(rx_buf->skb); 283 rx_buf->skb = NULL; 284 } 285 if (!rx_buf->page) 286 continue; 287 288 dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE); 289 __free_pages(rx_buf->page, 0); 290 291 rx_buf->page = NULL; 292 rx_buf->page_offset = 0; 293 } 294 295 size = sizeof(struct ice_rx_buf) * rx_ring->count; 296 memset(rx_ring->rx_buf, 0, size); 297 298 /* Zero out the descriptor ring */ 299 memset(rx_ring->desc, 0, rx_ring->size); 300 301 rx_ring->next_to_alloc = 0; 302 rx_ring->next_to_clean = 0; 303 rx_ring->next_to_use = 0; 304 } 305 306 /** 307 * ice_free_rx_ring - Free Rx resources 308 * @rx_ring: ring to clean the resources from 309 * 310 * Free all receive software resources 311 */ 312 void ice_free_rx_ring(struct ice_ring *rx_ring) 313 { 314 ice_clean_rx_ring(rx_ring); 315 devm_kfree(rx_ring->dev, rx_ring->rx_buf); 316 rx_ring->rx_buf = NULL; 317 318 if (rx_ring->desc) { 319 dmam_free_coherent(rx_ring->dev, rx_ring->size, 320 rx_ring->desc, rx_ring->dma); 321 rx_ring->desc = NULL; 322 } 323 } 324 325 /** 326 * ice_setup_rx_ring - Allocate the Rx descriptors 327 * @rx_ring: the rx ring to set up 328 * 329 * Return 0 on success, negative on error 330 */ 331 int ice_setup_rx_ring(struct ice_ring *rx_ring) 332 { 333 struct device *dev = rx_ring->dev; 334 int bi_size; 335 336 if (!dev) 337 return -ENOMEM; 338 339 /* warn if we are about to overwrite the pointer */ 340 WARN_ON(rx_ring->rx_buf); 341 bi_size = sizeof(struct ice_rx_buf) * rx_ring->count; 342 rx_ring->rx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL); 343 if (!rx_ring->rx_buf) 344 return -ENOMEM; 345 346 /* round up to nearest 4K */ 347 rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc); 348 rx_ring->size = ALIGN(rx_ring->size, 4096); 349 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 350 GFP_KERNEL); 351 if (!rx_ring->desc) { 352 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 353 rx_ring->size); 354 goto err; 355 } 356 357 rx_ring->next_to_use = 0; 358 rx_ring->next_to_clean = 0; 359 return 0; 360 361 err: 362 devm_kfree(dev, rx_ring->rx_buf); 363 rx_ring->rx_buf = NULL; 364 return -ENOMEM; 365 } 366 367 /** 368 * ice_release_rx_desc - Store the new tail and head values 369 * @rx_ring: ring to bump 370 * @val: new head index 371 */ 372 static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) 373 { 374 rx_ring->next_to_use = val; 375 376 /* update next to alloc since we have filled the ring */ 377 rx_ring->next_to_alloc = val; 378 379 /* Force memory writes to complete before letting h/w 380 * know there are new descriptors to fetch. (Only 381 * applicable for weak-ordered memory model archs, 382 * such as IA-64). 383 */ 384 wmb(); 385 writel(val, rx_ring->tail); 386 } 387 388 /** 389 * ice_alloc_mapped_page - recycle or make a new page 390 * @rx_ring: ring to use 391 * @bi: rx_buf struct to modify 392 * 393 * Returns true if the page was successfully allocated or 394 * reused. 395 */ 396 static bool ice_alloc_mapped_page(struct ice_ring *rx_ring, 397 struct ice_rx_buf *bi) 398 { 399 struct page *page = bi->page; 400 dma_addr_t dma; 401 402 /* since we are recycling buffers we should seldom need to alloc */ 403 if (likely(page)) { 404 rx_ring->rx_stats.page_reuse_count++; 405 return true; 406 } 407 408 /* alloc new page for storage */ 409 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 410 if (unlikely(!page)) { 411 rx_ring->rx_stats.alloc_page_failed++; 412 return false; 413 } 414 415 /* map page for use */ 416 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 417 418 /* if mapping failed free memory back to system since 419 * there isn't much point in holding memory we can't use 420 */ 421 if (dma_mapping_error(rx_ring->dev, dma)) { 422 __free_pages(page, 0); 423 rx_ring->rx_stats.alloc_page_failed++; 424 return false; 425 } 426 427 bi->dma = dma; 428 bi->page = page; 429 bi->page_offset = 0; 430 431 return true; 432 } 433 434 /** 435 * ice_alloc_rx_bufs - Replace used receive buffers 436 * @rx_ring: ring to place buffers on 437 * @cleaned_count: number of buffers to replace 438 * 439 * Returns false if all allocations were successful, true if any fail 440 */ 441 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) 442 { 443 union ice_32b_rx_flex_desc *rx_desc; 444 u16 ntu = rx_ring->next_to_use; 445 struct ice_rx_buf *bi; 446 447 /* do nothing if no valid netdev defined */ 448 if (!rx_ring->netdev || !cleaned_count) 449 return false; 450 451 /* get the RX descriptor and buffer based on next_to_use */ 452 rx_desc = ICE_RX_DESC(rx_ring, ntu); 453 bi = &rx_ring->rx_buf[ntu]; 454 455 do { 456 if (!ice_alloc_mapped_page(rx_ring, bi)) 457 goto no_bufs; 458 459 /* Refresh the desc even if buffer_addrs didn't change 460 * because each write-back erases this info. 461 */ 462 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 463 464 rx_desc++; 465 bi++; 466 ntu++; 467 if (unlikely(ntu == rx_ring->count)) { 468 rx_desc = ICE_RX_DESC(rx_ring, 0); 469 bi = rx_ring->rx_buf; 470 ntu = 0; 471 } 472 473 /* clear the status bits for the next_to_use descriptor */ 474 rx_desc->wb.status_error0 = 0; 475 476 cleaned_count--; 477 } while (cleaned_count); 478 479 if (rx_ring->next_to_use != ntu) 480 ice_release_rx_desc(rx_ring, ntu); 481 482 return false; 483 484 no_bufs: 485 if (rx_ring->next_to_use != ntu) 486 ice_release_rx_desc(rx_ring, ntu); 487 488 /* make sure to come back via polling to try again after 489 * allocation failure 490 */ 491 return true; 492 } 493 494 /** 495 * ice_page_is_reserved - check if reuse is possible 496 * @page: page struct to check 497 */ 498 static bool ice_page_is_reserved(struct page *page) 499 { 500 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 501 } 502 503 /** 504 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff 505 * @rx_buf: buffer containing page to add 506 * @rx_desc: descriptor containing length of buffer written by hardware 507 * @skb: sk_buf to place the data into 508 * 509 * This function will add the data contained in rx_buf->page to the skb. 510 * This is done either through a direct copy if the data in the buffer is 511 * less than the skb header size, otherwise it will just attach the page as 512 * a frag to the skb. 513 * 514 * The function will then update the page offset if necessary and return 515 * true if the buffer can be reused by the adapter. 516 */ 517 static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf, 518 union ice_32b_rx_flex_desc *rx_desc, 519 struct sk_buff *skb) 520 { 521 #if (PAGE_SIZE < 8192) 522 unsigned int truesize = ICE_RXBUF_2048; 523 #else 524 unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; 525 unsigned int truesize; 526 #endif /* PAGE_SIZE < 8192) */ 527 528 struct page *page; 529 unsigned int size; 530 531 size = le16_to_cpu(rx_desc->wb.pkt_len) & 532 ICE_RX_FLX_DESC_PKT_LEN_M; 533 534 page = rx_buf->page; 535 536 #if (PAGE_SIZE >= 8192) 537 truesize = ALIGN(size, L1_CACHE_BYTES); 538 #endif /* PAGE_SIZE >= 8192) */ 539 540 /* will the data fit in the skb we allocated? if so, just 541 * copy it as it is pretty small anyway 542 */ 543 if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { 544 unsigned char *va = page_address(page) + rx_buf->page_offset; 545 546 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 547 548 /* page is not reserved, we can reuse buffer as-is */ 549 if (likely(!ice_page_is_reserved(page))) 550 return true; 551 552 /* this page cannot be reused so discard it */ 553 __free_pages(page, 0); 554 return false; 555 } 556 557 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 558 rx_buf->page_offset, size, truesize); 559 560 /* avoid re-using remote pages */ 561 if (unlikely(ice_page_is_reserved(page))) 562 return false; 563 564 #if (PAGE_SIZE < 8192) 565 /* if we are only owner of page we can reuse it */ 566 if (unlikely(page_count(page) != 1)) 567 return false; 568 569 /* flip page offset to other buffer */ 570 rx_buf->page_offset ^= truesize; 571 #else 572 /* move offset up to the next cache line */ 573 rx_buf->page_offset += truesize; 574 575 if (rx_buf->page_offset > last_offset) 576 return false; 577 #endif /* PAGE_SIZE < 8192) */ 578 579 /* Even if we own the page, we are not allowed to use atomic_set() 580 * This would break get_page_unless_zero() users. 581 */ 582 get_page(rx_buf->page); 583 584 return true; 585 } 586 587 /** 588 * ice_reuse_rx_page - page flip buffer and store it back on the ring 589 * @rx_ring: rx descriptor ring to store buffers on 590 * @old_buf: donor buffer to have page reused 591 * 592 * Synchronizes page for reuse by the adapter 593 */ 594 static void ice_reuse_rx_page(struct ice_ring *rx_ring, 595 struct ice_rx_buf *old_buf) 596 { 597 u16 nta = rx_ring->next_to_alloc; 598 struct ice_rx_buf *new_buf; 599 600 new_buf = &rx_ring->rx_buf[nta]; 601 602 /* update, and store next to alloc */ 603 nta++; 604 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 605 606 /* transfer page from old buffer to new buffer */ 607 *new_buf = *old_buf; 608 } 609 610 /** 611 * ice_fetch_rx_buf - Allocate skb and populate it 612 * @rx_ring: rx descriptor ring to transact packets on 613 * @rx_desc: descriptor containing info written by hardware 614 * 615 * This function allocates an skb on the fly, and populates it with the page 616 * data from the current receive descriptor, taking care to set up the skb 617 * correctly, as well as handling calling the page recycle function if 618 * necessary. 619 */ 620 static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring, 621 union ice_32b_rx_flex_desc *rx_desc) 622 { 623 struct ice_rx_buf *rx_buf; 624 struct sk_buff *skb; 625 struct page *page; 626 627 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 628 page = rx_buf->page; 629 prefetchw(page); 630 631 skb = rx_buf->skb; 632 633 if (likely(!skb)) { 634 u8 *page_addr = page_address(page) + rx_buf->page_offset; 635 636 /* prefetch first cache line of first page */ 637 prefetch(page_addr); 638 #if L1_CACHE_BYTES < 128 639 prefetch((void *)(page_addr + L1_CACHE_BYTES)); 640 #endif /* L1_CACHE_BYTES */ 641 642 /* allocate a skb to store the frags */ 643 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, 644 ICE_RX_HDR_SIZE, 645 GFP_ATOMIC | __GFP_NOWARN); 646 if (unlikely(!skb)) { 647 rx_ring->rx_stats.alloc_buf_failed++; 648 return NULL; 649 } 650 651 /* we will be copying header into skb->data in 652 * pskb_may_pull so it is in our interest to prefetch 653 * it now to avoid a possible cache miss 654 */ 655 prefetchw(skb->data); 656 657 skb_record_rx_queue(skb, rx_ring->q_index); 658 } else { 659 /* we are reusing so sync this buffer for CPU use */ 660 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 661 rx_buf->page_offset, 662 ICE_RXBUF_2048, 663 DMA_FROM_DEVICE); 664 665 rx_buf->skb = NULL; 666 } 667 668 /* pull page into skb */ 669 if (ice_add_rx_frag(rx_buf, rx_desc, skb)) { 670 /* hand second half of page back to the ring */ 671 ice_reuse_rx_page(rx_ring, rx_buf); 672 rx_ring->rx_stats.page_reuse_count++; 673 } else { 674 /* we are not reusing the buffer so unmap it */ 675 dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE, 676 DMA_FROM_DEVICE); 677 } 678 679 /* clear contents of buffer_info */ 680 rx_buf->page = NULL; 681 682 return skb; 683 } 684 685 /** 686 * ice_pull_tail - ice specific version of skb_pull_tail 687 * @skb: pointer to current skb being adjusted 688 * 689 * This function is an ice specific version of __pskb_pull_tail. The 690 * main difference between this version and the original function is that 691 * this function can make several assumptions about the state of things 692 * that allow for significant optimizations versus the standard function. 693 * As a result we can do things like drop a frag and maintain an accurate 694 * truesize for the skb. 695 */ 696 static void ice_pull_tail(struct sk_buff *skb) 697 { 698 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 699 unsigned int pull_len; 700 unsigned char *va; 701 702 /* it is valid to use page_address instead of kmap since we are 703 * working with pages allocated out of the lomem pool per 704 * alloc_page(GFP_ATOMIC) 705 */ 706 va = skb_frag_address(frag); 707 708 /* we need the header to contain the greater of either ETH_HLEN or 709 * 60 bytes if the skb->len is less than 60 for skb_pad. 710 */ 711 pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE); 712 713 /* align pull length to size of long to optimize memcpy performance */ 714 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); 715 716 /* update all of the pointers */ 717 skb_frag_size_sub(frag, pull_len); 718 frag->page_offset += pull_len; 719 skb->data_len -= pull_len; 720 skb->tail += pull_len; 721 } 722 723 /** 724 * ice_cleanup_headers - Correct empty headers 725 * @skb: pointer to current skb being fixed 726 * 727 * Also address the case where we are pulling data in on pages only 728 * and as such no data is present in the skb header. 729 * 730 * In addition if skb is not at least 60 bytes we need to pad it so that 731 * it is large enough to qualify as a valid Ethernet frame. 732 * 733 * Returns true if an error was encountered and skb was freed. 734 */ 735 static bool ice_cleanup_headers(struct sk_buff *skb) 736 { 737 /* place header in linear portion of buffer */ 738 if (skb_is_nonlinear(skb)) 739 ice_pull_tail(skb); 740 741 /* if eth_skb_pad returns an error the skb was freed */ 742 if (eth_skb_pad(skb)) 743 return true; 744 745 return false; 746 } 747 748 /** 749 * ice_test_staterr - tests bits in Rx descriptor status and error fields 750 * @rx_desc: pointer to receive descriptor (in le64 format) 751 * @stat_err_bits: value to mask 752 * 753 * This function does some fast chicanery in order to return the 754 * value of the mask which is really only used for boolean tests. 755 * The status_error_len doesn't need to be shifted because it begins 756 * at offset zero. 757 */ 758 static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, 759 const u16 stat_err_bits) 760 { 761 return !!(rx_desc->wb.status_error0 & 762 cpu_to_le16(stat_err_bits)); 763 } 764 765 /** 766 * ice_is_non_eop - process handling of non-EOP buffers 767 * @rx_ring: Rx ring being processed 768 * @rx_desc: Rx descriptor for current buffer 769 * @skb: Current socket buffer containing buffer in progress 770 * 771 * This function updates next to clean. If the buffer is an EOP buffer 772 * this function exits returning false, otherwise it will place the 773 * sk_buff in the next buffer to be chained and return true indicating 774 * that this is in fact a non-EOP buffer. 775 */ 776 static bool ice_is_non_eop(struct ice_ring *rx_ring, 777 union ice_32b_rx_flex_desc *rx_desc, 778 struct sk_buff *skb) 779 { 780 u32 ntc = rx_ring->next_to_clean + 1; 781 782 /* fetch, update, and store next to clean */ 783 ntc = (ntc < rx_ring->count) ? ntc : 0; 784 rx_ring->next_to_clean = ntc; 785 786 prefetch(ICE_RX_DESC(rx_ring, ntc)); 787 788 /* if we are the last buffer then there is nothing else to do */ 789 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 790 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 791 return false; 792 793 /* place skb in next buffer to be received */ 794 rx_ring->rx_buf[ntc].skb = skb; 795 rx_ring->rx_stats.non_eop_descs++; 796 797 return true; 798 } 799 800 /** 801 * ice_ptype_to_htype - get a hash type 802 * @ptype: the ptype value from the descriptor 803 * 804 * Returns a hash type to be used by skb_set_hash 805 */ 806 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype) 807 { 808 return PKT_HASH_TYPE_NONE; 809 } 810 811 /** 812 * ice_rx_hash - set the hash value in the skb 813 * @rx_ring: descriptor ring 814 * @rx_desc: specific descriptor 815 * @skb: pointer to current skb 816 * @rx_ptype: the ptype value from the descriptor 817 */ 818 static void 819 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 820 struct sk_buff *skb, u8 rx_ptype) 821 { 822 struct ice_32b_rx_flex_desc_nic *nic_mdid; 823 u32 hash; 824 825 if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) 826 return; 827 828 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) 829 return; 830 831 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; 832 hash = le32_to_cpu(nic_mdid->rss_hash); 833 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); 834 } 835 836 /** 837 * ice_rx_csum - Indicate in skb if checksum is good 838 * @vsi: the VSI we care about 839 * @skb: skb currently being received and modified 840 * @rx_desc: the receive descriptor 841 * @ptype: the packet type decoded by hardware 842 * 843 * skb->protocol must be set before this function is called 844 */ 845 static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, 846 union ice_32b_rx_flex_desc *rx_desc, u8 ptype) 847 { 848 struct ice_rx_ptype_decoded decoded; 849 u32 rx_error, rx_status; 850 bool ipv4, ipv6; 851 852 rx_status = le16_to_cpu(rx_desc->wb.status_error0); 853 rx_error = rx_status; 854 855 decoded = ice_decode_rx_desc_ptype(ptype); 856 857 /* Start with CHECKSUM_NONE and by default csum_level = 0 */ 858 skb->ip_summed = CHECKSUM_NONE; 859 skb_checksum_none_assert(skb); 860 861 /* check if Rx checksum is enabled */ 862 if (!(vsi->netdev->features & NETIF_F_RXCSUM)) 863 return; 864 865 /* check if HW has decoded the packet and checksum */ 866 if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) 867 return; 868 869 if (!(decoded.known && decoded.outer_ip)) 870 return; 871 872 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 873 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); 874 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 875 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); 876 877 if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | 878 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) 879 goto checksum_fail; 880 else if (ipv6 && (rx_status & 881 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) 882 goto checksum_fail; 883 884 /* check for L4 errors and handle packets that were not able to be 885 * checksummed due to arrival speed 886 */ 887 if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) 888 goto checksum_fail; 889 890 /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 891 switch (decoded.inner_prot) { 892 case ICE_RX_PTYPE_INNER_PROT_TCP: 893 case ICE_RX_PTYPE_INNER_PROT_UDP: 894 case ICE_RX_PTYPE_INNER_PROT_SCTP: 895 skb->ip_summed = CHECKSUM_UNNECESSARY; 896 default: 897 break; 898 } 899 return; 900 901 checksum_fail: 902 vsi->back->hw_csum_rx_error++; 903 } 904 905 /** 906 * ice_process_skb_fields - Populate skb header fields from Rx descriptor 907 * @rx_ring: rx descriptor ring packet is being transacted on 908 * @rx_desc: pointer to the EOP Rx descriptor 909 * @skb: pointer to current skb being populated 910 * @ptype: the packet type decoded by hardware 911 * 912 * This function checks the ring, descriptor, and packet information in 913 * order to populate the hash, checksum, VLAN, protocol, and 914 * other fields within the skb. 915 */ 916 static void ice_process_skb_fields(struct ice_ring *rx_ring, 917 union ice_32b_rx_flex_desc *rx_desc, 918 struct sk_buff *skb, u8 ptype) 919 { 920 ice_rx_hash(rx_ring, rx_desc, skb, ptype); 921 922 /* modifies the skb - consumes the enet header */ 923 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 924 925 ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype); 926 } 927 928 /** 929 * ice_receive_skb - Send a completed packet up the stack 930 * @rx_ring: rx ring in play 931 * @skb: packet to send up 932 * @vlan_tag: vlan tag for packet 933 * 934 * This function sends the completed packet (via. skb) up the stack using 935 * gro receive functions (with/without vlan tag) 936 */ 937 static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, 938 u16 vlan_tag) 939 { 940 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 941 (vlan_tag & VLAN_VID_MASK)) { 942 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 943 } 944 napi_gro_receive(&rx_ring->q_vector->napi, skb); 945 } 946 947 /** 948 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 949 * @rx_ring: rx descriptor ring to transact packets on 950 * @budget: Total limit on number of packets to process 951 * 952 * This function provides a "bounce buffer" approach to Rx interrupt 953 * processing. The advantage to this is that on systems that have 954 * expensive overhead for IOMMU access this provides a means of avoiding 955 * it by maintaining the mapping of the page to the system. 956 * 957 * Returns amount of work completed 958 */ 959 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 960 { 961 unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 962 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 963 bool failure = false; 964 965 /* start the loop to process RX packets bounded by 'budget' */ 966 while (likely(total_rx_pkts < (unsigned int)budget)) { 967 union ice_32b_rx_flex_desc *rx_desc; 968 struct sk_buff *skb; 969 u16 stat_err_bits; 970 u16 vlan_tag = 0; 971 u8 rx_ptype; 972 973 /* return some buffers to hardware, one at a time is too slow */ 974 if (cleaned_count >= ICE_RX_BUF_WRITE) { 975 failure = failure || 976 ice_alloc_rx_bufs(rx_ring, cleaned_count); 977 cleaned_count = 0; 978 } 979 980 /* get the RX desc from RX ring based on 'next_to_clean' */ 981 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 982 983 /* status_error_len will always be zero for unused descriptors 984 * because it's cleared in cleanup, and overlaps with hdr_addr 985 * which is always zero because packet split isn't used, if the 986 * hardware wrote DD then it will be non-zero 987 */ 988 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 989 if (!ice_test_staterr(rx_desc, stat_err_bits)) 990 break; 991 992 /* This memory barrier is needed to keep us from reading 993 * any other fields out of the rx_desc until we know the 994 * DD bit is set. 995 */ 996 dma_rmb(); 997 998 /* allocate (if needed) and populate skb */ 999 skb = ice_fetch_rx_buf(rx_ring, rx_desc); 1000 if (!skb) 1001 break; 1002 1003 cleaned_count++; 1004 1005 /* skip if it is NOP desc */ 1006 if (ice_is_non_eop(rx_ring, rx_desc, skb)) 1007 continue; 1008 1009 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 1010 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 1011 dev_kfree_skb_any(skb); 1012 continue; 1013 } 1014 1015 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 1016 ICE_RX_FLEX_DESC_PTYPE_M; 1017 1018 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 1019 if (ice_test_staterr(rx_desc, stat_err_bits)) 1020 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 1021 1022 /* correct empty headers and pad skb if needed (to make valid 1023 * ethernet frame 1024 */ 1025 if (ice_cleanup_headers(skb)) { 1026 skb = NULL; 1027 continue; 1028 } 1029 1030 /* probably a little skewed due to removing CRC */ 1031 total_rx_bytes += skb->len; 1032 1033 /* populate checksum, VLAN, and protocol */ 1034 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 1035 1036 /* send completed skb up the stack */ 1037 ice_receive_skb(rx_ring, skb, vlan_tag); 1038 1039 /* update budget accounting */ 1040 total_rx_pkts++; 1041 } 1042 1043 /* update queue and vector specific stats */ 1044 u64_stats_update_begin(&rx_ring->syncp); 1045 rx_ring->stats.pkts += total_rx_pkts; 1046 rx_ring->stats.bytes += total_rx_bytes; 1047 u64_stats_update_end(&rx_ring->syncp); 1048 rx_ring->q_vector->rx.total_pkts += total_rx_pkts; 1049 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 1050 1051 /* guarantee a trip back through this routine if there was a failure */ 1052 return failure ? budget : (int)total_rx_pkts; 1053 } 1054 1055 /** 1056 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 1057 * @napi: napi struct with our devices info in it 1058 * @budget: amount of work driver is allowed to do this pass, in packets 1059 * 1060 * This function will clean all queues associated with a q_vector. 1061 * 1062 * Returns the amount of work done 1063 */ 1064 int ice_napi_poll(struct napi_struct *napi, int budget) 1065 { 1066 struct ice_q_vector *q_vector = 1067 container_of(napi, struct ice_q_vector, napi); 1068 struct ice_vsi *vsi = q_vector->vsi; 1069 struct ice_pf *pf = vsi->back; 1070 bool clean_complete = true; 1071 int budget_per_ring = 0; 1072 struct ice_ring *ring; 1073 int work_done = 0; 1074 1075 /* Since the actual Tx work is minimal, we can give the Tx a larger 1076 * budget and be more aggressive about cleaning up the Tx descriptors. 1077 */ 1078 ice_for_each_ring(ring, q_vector->tx) 1079 if (!ice_clean_tx_irq(vsi, ring, budget)) 1080 clean_complete = false; 1081 1082 /* Handle case where we are called by netpoll with a budget of 0 */ 1083 if (budget <= 0) 1084 return budget; 1085 1086 /* We attempt to distribute budget to each Rx queue fairly, but don't 1087 * allow the budget to go below 1 because that would exit polling early. 1088 */ 1089 if (q_vector->num_ring_rx) 1090 budget_per_ring = max(budget / q_vector->num_ring_rx, 1); 1091 1092 ice_for_each_ring(ring, q_vector->rx) { 1093 int cleaned; 1094 1095 cleaned = ice_clean_rx_irq(ring, budget_per_ring); 1096 work_done += cleaned; 1097 /* if we clean as many as budgeted, we must not be done */ 1098 if (cleaned >= budget_per_ring) 1099 clean_complete = false; 1100 } 1101 1102 /* If work not completed, return budget and polling will return */ 1103 if (!clean_complete) 1104 return budget; 1105 1106 /* Work is done so exit the polling mode and re-enable the interrupt */ 1107 napi_complete_done(napi, work_done); 1108 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 1109 ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector); 1110 return 0; 1111 } 1112 1113 /* helper function for building cmd/type/offset */ 1114 static __le64 1115 build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) 1116 { 1117 return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | 1118 (td_cmd << ICE_TXD_QW1_CMD_S) | 1119 (td_offset << ICE_TXD_QW1_OFFSET_S) | 1120 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | 1121 (td_tag << ICE_TXD_QW1_L2TAG1_S)); 1122 } 1123 1124 /** 1125 * __ice_maybe_stop_tx - 2nd level check for tx stop conditions 1126 * @tx_ring: the ring to be checked 1127 * @size: the size buffer we want to assure is available 1128 * 1129 * Returns -EBUSY if a stop is needed, else 0 1130 */ 1131 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 1132 { 1133 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 1134 /* Memory barrier before checking head and tail */ 1135 smp_mb(); 1136 1137 /* Check again in a case another CPU has just made room available. */ 1138 if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 1139 return -EBUSY; 1140 1141 /* A reprieve! - use start_subqueue because it doesn't call schedule */ 1142 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 1143 ++tx_ring->tx_stats.restart_q; 1144 return 0; 1145 } 1146 1147 /** 1148 * ice_maybe_stop_tx - 1st level check for tx stop conditions 1149 * @tx_ring: the ring to be checked 1150 * @size: the size buffer we want to assure is available 1151 * 1152 * Returns 0 if stop is not needed 1153 */ 1154 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 1155 { 1156 if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 1157 return 0; 1158 return __ice_maybe_stop_tx(tx_ring, size); 1159 } 1160 1161 /** 1162 * ice_tx_map - Build the Tx descriptor 1163 * @tx_ring: ring to send buffer on 1164 * @first: first buffer info buffer to use 1165 * @off: pointer to struct that holds offload parameters 1166 * 1167 * This function loops over the skb data pointed to by *first 1168 * and gets a physical address for each memory location and programs 1169 * it and the length into the transmit descriptor. 1170 */ 1171 static void 1172 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, 1173 struct ice_tx_offload_params *off) 1174 { 1175 u64 td_offset, td_tag, td_cmd; 1176 u16 i = tx_ring->next_to_use; 1177 struct skb_frag_struct *frag; 1178 unsigned int data_len, size; 1179 struct ice_tx_desc *tx_desc; 1180 struct ice_tx_buf *tx_buf; 1181 struct sk_buff *skb; 1182 dma_addr_t dma; 1183 1184 td_tag = off->td_l2tag1; 1185 td_cmd = off->td_cmd; 1186 td_offset = off->td_offset; 1187 skb = first->skb; 1188 1189 data_len = skb->data_len; 1190 size = skb_headlen(skb); 1191 1192 tx_desc = ICE_TX_DESC(tx_ring, i); 1193 1194 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 1195 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 1196 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 1197 ICE_TX_FLAGS_VLAN_S; 1198 } 1199 1200 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 1201 1202 tx_buf = first; 1203 1204 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 1205 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1206 1207 if (dma_mapping_error(tx_ring->dev, dma)) 1208 goto dma_error; 1209 1210 /* record length, and DMA address */ 1211 dma_unmap_len_set(tx_buf, len, size); 1212 dma_unmap_addr_set(tx_buf, dma, dma); 1213 1214 /* align size to end of page */ 1215 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 1216 tx_desc->buf_addr = cpu_to_le64(dma); 1217 1218 /* account for data chunks larger than the hardware 1219 * can handle 1220 */ 1221 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 1222 tx_desc->cmd_type_offset_bsz = 1223 build_ctob(td_cmd, td_offset, max_data, td_tag); 1224 1225 tx_desc++; 1226 i++; 1227 1228 if (i == tx_ring->count) { 1229 tx_desc = ICE_TX_DESC(tx_ring, 0); 1230 i = 0; 1231 } 1232 1233 dma += max_data; 1234 size -= max_data; 1235 1236 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 1237 tx_desc->buf_addr = cpu_to_le64(dma); 1238 } 1239 1240 if (likely(!data_len)) 1241 break; 1242 1243 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 1244 size, td_tag); 1245 1246 tx_desc++; 1247 i++; 1248 1249 if (i == tx_ring->count) { 1250 tx_desc = ICE_TX_DESC(tx_ring, 0); 1251 i = 0; 1252 } 1253 1254 size = skb_frag_size(frag); 1255 data_len -= size; 1256 1257 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 1258 DMA_TO_DEVICE); 1259 1260 tx_buf = &tx_ring->tx_buf[i]; 1261 } 1262 1263 /* record bytecount for BQL */ 1264 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 1265 1266 /* record SW timestamp if HW timestamp is not available */ 1267 skb_tx_timestamp(first->skb); 1268 1269 i++; 1270 if (i == tx_ring->count) 1271 i = 0; 1272 1273 /* write last descriptor with RS and EOP bits */ 1274 td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS); 1275 tx_desc->cmd_type_offset_bsz = 1276 build_ctob(td_cmd, td_offset, size, td_tag); 1277 1278 /* Force memory writes to complete before letting h/w know there 1279 * are new descriptors to fetch. 1280 * 1281 * We also use this memory barrier to make certain all of the 1282 * status bits have been updated before next_to_watch is written. 1283 */ 1284 wmb(); 1285 1286 /* set next_to_watch value indicating a packet is present */ 1287 first->next_to_watch = tx_desc; 1288 1289 tx_ring->next_to_use = i; 1290 1291 ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 1292 1293 /* notify HW of packet */ 1294 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { 1295 writel(i, tx_ring->tail); 1296 1297 /* we need this if more than one processor can write to our tail 1298 * at a time, it synchronizes IO on IA64/Altix systems 1299 */ 1300 mmiowb(); 1301 } 1302 1303 return; 1304 1305 dma_error: 1306 /* clear dma mappings for failed tx_buf map */ 1307 for (;;) { 1308 tx_buf = &tx_ring->tx_buf[i]; 1309 ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 1310 if (tx_buf == first) 1311 break; 1312 if (i == 0) 1313 i = tx_ring->count; 1314 i--; 1315 } 1316 1317 tx_ring->next_to_use = i; 1318 } 1319 1320 /** 1321 * ice_tx_csum - Enable Tx checksum offloads 1322 * @first: pointer to the first descriptor 1323 * @off: pointer to struct that holds offload parameters 1324 * 1325 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 1326 */ 1327 static 1328 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1329 { 1330 u32 l4_len = 0, l3_len = 0, l2_len = 0; 1331 struct sk_buff *skb = first->skb; 1332 union { 1333 struct iphdr *v4; 1334 struct ipv6hdr *v6; 1335 unsigned char *hdr; 1336 } ip; 1337 union { 1338 struct tcphdr *tcp; 1339 unsigned char *hdr; 1340 } l4; 1341 __be16 frag_off, protocol; 1342 unsigned char *exthdr; 1343 u32 offset, cmd = 0; 1344 u8 l4_proto = 0; 1345 1346 if (skb->ip_summed != CHECKSUM_PARTIAL) 1347 return 0; 1348 1349 ip.hdr = skb_network_header(skb); 1350 l4.hdr = skb_transport_header(skb); 1351 1352 /* compute outer L2 header size */ 1353 l2_len = ip.hdr - skb->data; 1354 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 1355 1356 if (skb->encapsulation) 1357 return -1; 1358 1359 /* Enable IP checksum offloads */ 1360 protocol = vlan_get_protocol(skb); 1361 if (protocol == htons(ETH_P_IP)) { 1362 l4_proto = ip.v4->protocol; 1363 /* the stack computes the IP header already, the only time we 1364 * need the hardware to recompute it is in the case of TSO. 1365 */ 1366 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1367 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 1368 else 1369 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 1370 1371 } else if (protocol == htons(ETH_P_IPV6)) { 1372 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 1373 exthdr = ip.hdr + sizeof(*ip.v6); 1374 l4_proto = ip.v6->nexthdr; 1375 if (l4.hdr != exthdr) 1376 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 1377 &frag_off); 1378 } else { 1379 return -1; 1380 } 1381 1382 /* compute inner L3 header size */ 1383 l3_len = l4.hdr - ip.hdr; 1384 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 1385 1386 /* Enable L4 checksum offloads */ 1387 switch (l4_proto) { 1388 case IPPROTO_TCP: 1389 /* enable checksum offloads */ 1390 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 1391 l4_len = l4.tcp->doff; 1392 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1393 break; 1394 case IPPROTO_UDP: 1395 /* enable UDP checksum offload */ 1396 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 1397 l4_len = (sizeof(struct udphdr) >> 2); 1398 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 1399 break; 1400 case IPPROTO_SCTP: 1401 default: 1402 if (first->tx_flags & ICE_TX_FLAGS_TSO) 1403 return -1; 1404 skb_checksum_help(skb); 1405 return 0; 1406 } 1407 1408 off->td_cmd |= cmd; 1409 off->td_offset |= offset; 1410 return 1; 1411 } 1412 1413 /** 1414 * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW 1415 * @tx_ring: ring to send buffer on 1416 * @first: pointer to struct ice_tx_buf 1417 * 1418 * Checks the skb and set up correspondingly several generic transmit flags 1419 * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 1420 * 1421 * Returns error code indicate the frame should be dropped upon error and the 1422 * otherwise returns 0 to indicate the flags has been set properly. 1423 */ 1424 static int 1425 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) 1426 { 1427 struct sk_buff *skb = first->skb; 1428 __be16 protocol = skb->protocol; 1429 1430 if (protocol == htons(ETH_P_8021Q) && 1431 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 1432 /* when HW VLAN acceleration is turned off by the user the 1433 * stack sets the protocol to 8021q so that the driver 1434 * can take any steps required to support the SW only 1435 * VLAN handling. In our case the driver doesn't need 1436 * to take any further steps so just set the protocol 1437 * to the encapsulated ethertype. 1438 */ 1439 skb->protocol = vlan_get_protocol(skb); 1440 goto out; 1441 } 1442 1443 /* if we have a HW VLAN tag being added, default to the HW one */ 1444 if (skb_vlan_tag_present(skb)) { 1445 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 1446 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 1447 } else if (protocol == htons(ETH_P_8021Q)) { 1448 struct vlan_hdr *vhdr, _vhdr; 1449 1450 /* for SW VLAN, check the next protocol and store the tag */ 1451 vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN, 1452 sizeof(_vhdr), 1453 &_vhdr); 1454 if (!vhdr) 1455 return -EINVAL; 1456 1457 first->tx_flags |= ntohs(vhdr->h_vlan_TCI) << 1458 ICE_TX_FLAGS_VLAN_S; 1459 first->tx_flags |= ICE_TX_FLAGS_SW_VLAN; 1460 } 1461 1462 out: 1463 return 0; 1464 } 1465 1466 /** 1467 * ice_tso - computes mss and TSO length to prepare for TSO 1468 * @first: pointer to struct ice_tx_buf 1469 * @off: pointer to struct that holds offload parameters 1470 * 1471 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 1472 */ 1473 static 1474 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 1475 { 1476 struct sk_buff *skb = first->skb; 1477 union { 1478 struct iphdr *v4; 1479 struct ipv6hdr *v6; 1480 unsigned char *hdr; 1481 } ip; 1482 union { 1483 struct tcphdr *tcp; 1484 unsigned char *hdr; 1485 } l4; 1486 u64 cd_mss, cd_tso_len; 1487 u32 paylen, l4_start; 1488 int err; 1489 1490 if (skb->ip_summed != CHECKSUM_PARTIAL) 1491 return 0; 1492 1493 if (!skb_is_gso(skb)) 1494 return 0; 1495 1496 err = skb_cow_head(skb, 0); 1497 if (err < 0) 1498 return err; 1499 1500 ip.hdr = skb_network_header(skb); 1501 l4.hdr = skb_transport_header(skb); 1502 1503 /* initialize outer IP header fields */ 1504 if (ip.v4->version == 4) { 1505 ip.v4->tot_len = 0; 1506 ip.v4->check = 0; 1507 } else { 1508 ip.v6->payload_len = 0; 1509 } 1510 1511 /* determine offset of transport header */ 1512 l4_start = l4.hdr - skb->data; 1513 1514 /* remove payload length from checksum */ 1515 paylen = skb->len - l4_start; 1516 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); 1517 1518 /* compute length of segmentation header */ 1519 off->header_len = (l4.tcp->doff * 4) + l4_start; 1520 1521 /* update gso_segs and bytecount */ 1522 first->gso_segs = skb_shinfo(skb)->gso_segs; 1523 first->bytecount = (first->gso_segs - 1) * off->header_len; 1524 1525 cd_tso_len = skb->len - off->header_len; 1526 cd_mss = skb_shinfo(skb)->gso_size; 1527 1528 /* record cdesc_qw1 with TSO parameters */ 1529 off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX | 1530 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 1531 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 1532 (cd_mss << ICE_TXD_CTX_QW1_MSS_S); 1533 first->tx_flags |= ICE_TX_FLAGS_TSO; 1534 return 1; 1535 } 1536 1537 /** 1538 * ice_txd_use_count - estimate the number of descriptors needed for Tx 1539 * @size: transmit request size in bytes 1540 * 1541 * Due to hardware alignment restrictions (4K alignment), we need to 1542 * assume that we can have no more than 12K of data per descriptor, even 1543 * though each descriptor can take up to 16K - 1 bytes of aligned memory. 1544 * Thus, we need to divide by 12K. But division is slow! Instead, 1545 * we decompose the operation into shifts and one relatively cheap 1546 * multiply operation. 1547 * 1548 * To divide by 12K, we first divide by 4K, then divide by 3: 1549 * To divide by 4K, shift right by 12 bits 1550 * To divide by 3, multiply by 85, then divide by 256 1551 * (Divide by 256 is done by shifting right by 8 bits) 1552 * Finally, we add one to round up. Because 256 isn't an exact multiple of 1553 * 3, we'll underestimate near each multiple of 12K. This is actually more 1554 * accurate as we have 4K - 1 of wiggle room that we can fit into the last 1555 * segment. For our purposes this is accurate out to 1M which is orders of 1556 * magnitude greater than our largest possible GSO size. 1557 * 1558 * This would then be implemented as: 1559 * return (((size >> 12) * 85) >> 8) + 1; 1560 * 1561 * Since multiplication and division are commutative, we can reorder 1562 * operations into: 1563 * return ((size * 85) >> 20) + 1; 1564 */ 1565 static unsigned int ice_txd_use_count(unsigned int size) 1566 { 1567 return ((size * 85) >> 20) + 1; 1568 } 1569 1570 /** 1571 * ice_xmit_desc_count - calculate number of tx descriptors needed 1572 * @skb: send buffer 1573 * 1574 * Returns number of data descriptors needed for this skb. 1575 */ 1576 static unsigned int ice_xmit_desc_count(struct sk_buff *skb) 1577 { 1578 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 1579 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1580 unsigned int count = 0, size = skb_headlen(skb); 1581 1582 for (;;) { 1583 count += ice_txd_use_count(size); 1584 1585 if (!nr_frags--) 1586 break; 1587 1588 size = skb_frag_size(frag++); 1589 } 1590 1591 return count; 1592 } 1593 1594 /** 1595 * __ice_chk_linearize - Check if there are more than 8 buffers per packet 1596 * @skb: send buffer 1597 * 1598 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 1599 * and so we need to figure out the cases where we need to linearize the skb. 1600 * 1601 * For TSO we need to count the TSO header and segment payload separately. 1602 * As such we need to check cases where we have 7 fragments or more as we 1603 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 1604 * the segment payload in the first descriptor, and another 7 for the 1605 * fragments. 1606 */ 1607 static bool __ice_chk_linearize(struct sk_buff *skb) 1608 { 1609 const struct skb_frag_struct *frag, *stale; 1610 int nr_frags, sum; 1611 1612 /* no need to check if number of frags is less than 7 */ 1613 nr_frags = skb_shinfo(skb)->nr_frags; 1614 if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 1615 return false; 1616 1617 /* We need to walk through the list and validate that each group 1618 * of 6 fragments totals at least gso_size. 1619 */ 1620 nr_frags -= ICE_MAX_BUF_TXD - 2; 1621 frag = &skb_shinfo(skb)->frags[0]; 1622 1623 /* Initialize size to the negative value of gso_size minus 1. We 1624 * use this as the worst case scenerio in which the frag ahead 1625 * of us only provides one byte which is why we are limited to 6 1626 * descriptors for a single transmit as the header and previous 1627 * fragment are already consuming 2 descriptors. 1628 */ 1629 sum = 1 - skb_shinfo(skb)->gso_size; 1630 1631 /* Add size of frags 0 through 4 to create our initial sum */ 1632 sum += skb_frag_size(frag++); 1633 sum += skb_frag_size(frag++); 1634 sum += skb_frag_size(frag++); 1635 sum += skb_frag_size(frag++); 1636 sum += skb_frag_size(frag++); 1637 1638 /* Walk through fragments adding latest fragment, testing it, and 1639 * then removing stale fragments from the sum. 1640 */ 1641 stale = &skb_shinfo(skb)->frags[0]; 1642 for (;;) { 1643 sum += skb_frag_size(frag++); 1644 1645 /* if sum is negative we failed to make sufficient progress */ 1646 if (sum < 0) 1647 return true; 1648 1649 if (!nr_frags--) 1650 break; 1651 1652 sum -= skb_frag_size(stale++); 1653 } 1654 1655 return false; 1656 } 1657 1658 /** 1659 * ice_chk_linearize - Check if there are more than 8 fragments per packet 1660 * @skb: send buffer 1661 * @count: number of buffers used 1662 * 1663 * Note: Our HW can't scatter-gather more than 8 fragments to build 1664 * a packet on the wire and so we need to figure out the cases where we 1665 * need to linearize the skb. 1666 */ 1667 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 1668 { 1669 /* Both TSO and single send will work if count is less than 8 */ 1670 if (likely(count < ICE_MAX_BUF_TXD)) 1671 return false; 1672 1673 if (skb_is_gso(skb)) 1674 return __ice_chk_linearize(skb); 1675 1676 /* we can support up to 8 data buffers for a single send */ 1677 return count != ICE_MAX_BUF_TXD; 1678 } 1679 1680 /** 1681 * ice_xmit_frame_ring - Sends buffer on Tx ring 1682 * @skb: send buffer 1683 * @tx_ring: ring to send buffer on 1684 * 1685 * Returns NETDEV_TX_OK if sent, else an error code 1686 */ 1687 static netdev_tx_t 1688 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 1689 { 1690 struct ice_tx_offload_params offload = { 0 }; 1691 struct ice_tx_buf *first; 1692 unsigned int count; 1693 int tso, csum; 1694 1695 count = ice_xmit_desc_count(skb); 1696 if (ice_chk_linearize(skb, count)) { 1697 if (__skb_linearize(skb)) 1698 goto out_drop; 1699 count = ice_txd_use_count(skb->len); 1700 tx_ring->tx_stats.tx_linearize++; 1701 } 1702 1703 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 1704 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 1705 * + 4 desc gap to avoid the cache line where head is, 1706 * + 1 desc for context descriptor, 1707 * otherwise try next time 1708 */ 1709 if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) { 1710 tx_ring->tx_stats.tx_busy++; 1711 return NETDEV_TX_BUSY; 1712 } 1713 1714 offload.tx_ring = tx_ring; 1715 1716 /* record the location of the first descriptor for this packet */ 1717 first = &tx_ring->tx_buf[tx_ring->next_to_use]; 1718 first->skb = skb; 1719 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 1720 first->gso_segs = 1; 1721 first->tx_flags = 0; 1722 1723 /* prepare the VLAN tagging flags for Tx */ 1724 if (ice_tx_prepare_vlan_flags(tx_ring, first)) 1725 goto out_drop; 1726 1727 /* set up TSO offload */ 1728 tso = ice_tso(first, &offload); 1729 if (tso < 0) 1730 goto out_drop; 1731 1732 /* always set up Tx checksum offload */ 1733 csum = ice_tx_csum(first, &offload); 1734 if (csum < 0) 1735 goto out_drop; 1736 1737 if (tso || offload.cd_tunnel_params) { 1738 struct ice_tx_ctx_desc *cdesc; 1739 int i = tx_ring->next_to_use; 1740 1741 /* grab the next descriptor */ 1742 cdesc = ICE_TX_CTX_DESC(tx_ring, i); 1743 i++; 1744 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 1745 1746 /* setup context descriptor */ 1747 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 1748 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 1749 cdesc->rsvd = cpu_to_le16(0); 1750 cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 1751 } 1752 1753 ice_tx_map(tx_ring, first, &offload); 1754 return NETDEV_TX_OK; 1755 1756 out_drop: 1757 dev_kfree_skb_any(skb); 1758 return NETDEV_TX_OK; 1759 } 1760 1761 /** 1762 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 1763 * @skb: send buffer 1764 * @netdev: network interface device structure 1765 * 1766 * Returns NETDEV_TX_OK if sent, else an error code 1767 */ 1768 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 1769 { 1770 struct ice_netdev_priv *np = netdev_priv(netdev); 1771 struct ice_vsi *vsi = np->vsi; 1772 struct ice_ring *tx_ring; 1773 1774 tx_ring = vsi->tx_rings[skb->queue_mapping]; 1775 1776 /* hardware can't handle really short frames, hardware padding works 1777 * beyond this point 1778 */ 1779 if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 1780 return NETDEV_TX_OK; 1781 1782 return ice_xmit_frame_ring(skb, tx_ring); 1783 } 1784