1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/ip.h> 5 #include <linux/ipv6.h> 6 #include <linux/if_vlan.h> 7 #include <net/ip6_checksum.h> 8 9 #include "ionic.h" 10 #include "ionic_lif.h" 11 #include "ionic_txrx.h" 12 13 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, 14 ionic_desc_cb cb_func, void *cb_arg) 15 { 16 ionic_q_post(q, ring_dbell, cb_func, cb_arg); 17 } 18 19 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, 20 ionic_desc_cb cb_func, void *cb_arg) 21 { 22 ionic_q_post(q, ring_dbell, cb_func, cb_arg); 23 } 24 25 bool ionic_txq_poke_doorbell(struct ionic_queue *q) 26 { 27 unsigned long now, then, dif; 28 struct netdev_queue *netdev_txq; 29 struct net_device *netdev; 30 31 netdev = q->lif->netdev; 32 netdev_txq = netdev_get_tx_queue(netdev, q->index); 33 34 HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id()); 35 36 if (q->tail_idx == q->head_idx) { 37 HARD_TX_UNLOCK(netdev, netdev_txq); 38 return false; 39 } 40 41 now = READ_ONCE(jiffies); 42 then = q->dbell_jiffies; 43 dif = now - then; 44 45 if (dif > q->dbell_deadline) { 46 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, 47 q->dbval | q->head_idx); 48 49 q->dbell_jiffies = now; 50 } 51 52 HARD_TX_UNLOCK(netdev, netdev_txq); 53 54 return true; 55 } 56 57 bool ionic_rxq_poke_doorbell(struct ionic_queue *q) 58 { 59 unsigned long now, then, dif; 60 61 /* no lock, called from rx napi or txrx napi, nothing else can fill */ 62 63 if (q->tail_idx == q->head_idx) 64 return false; 65 66 now = READ_ONCE(jiffies); 67 then = q->dbell_jiffies; 68 dif = now - then; 69 70 if (dif > q->dbell_deadline) { 71 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, 72 q->dbval | q->head_idx); 73 74 q->dbell_jiffies = now; 75 76 dif = 2 * q->dbell_deadline; 77 if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE) 78 dif = IONIC_RX_MAX_DOORBELL_DEADLINE; 79 80 q->dbell_deadline = dif; 81 } 82 83 return true; 84 } 85 86 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) 87 { 88 return netdev_get_tx_queue(q->lif->netdev, q->index); 89 } 90 91 static int ionic_rx_page_alloc(struct ionic_queue *q, 92 struct ionic_buf_info *buf_info) 93 { 94 struct net_device *netdev = q->lif->netdev; 95 struct ionic_rx_stats *stats; 96 struct device *dev; 97 struct page *page; 98 99 dev = q->dev; 100 stats = q_to_rx_stats(q); 101 102 if (unlikely(!buf_info)) { 103 net_err_ratelimited("%s: %s invalid buf_info in alloc\n", 104 netdev->name, q->name); 105 return -EINVAL; 106 } 107 108 page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); 109 if (unlikely(!page)) { 110 net_err_ratelimited("%s: %s page alloc failed\n", 111 netdev->name, q->name); 112 stats->alloc_err++; 113 return -ENOMEM; 114 } 115 116 buf_info->dma_addr = dma_map_page(dev, page, 0, 117 IONIC_PAGE_SIZE, DMA_FROM_DEVICE); 118 if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) { 119 __free_pages(page, 0); 120 net_err_ratelimited("%s: %s dma map failed\n", 121 netdev->name, q->name); 122 stats->dma_map_err++; 123 return -EIO; 124 } 125 126 buf_info->page = page; 127 buf_info->page_offset = 0; 128 129 return 0; 130 } 131 132 static void ionic_rx_page_free(struct ionic_queue *q, 133 struct ionic_buf_info *buf_info) 134 { 135 struct net_device *netdev = q->lif->netdev; 136 struct device *dev = q->dev; 137 138 if (unlikely(!buf_info)) { 139 net_err_ratelimited("%s: %s invalid buf_info in free\n", 140 netdev->name, q->name); 141 return; 142 } 143 144 if (!buf_info->page) 145 return; 146 147 dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); 148 __free_pages(buf_info->page, 0); 149 buf_info->page = NULL; 150 } 151 152 static bool ionic_rx_buf_recycle(struct ionic_queue *q, 153 struct ionic_buf_info *buf_info, u32 used) 154 { 155 u32 size; 156 157 /* don't re-use pages allocated in low-mem condition */ 158 if (page_is_pfmemalloc(buf_info->page)) 159 return false; 160 161 /* don't re-use buffers from non-local numa nodes */ 162 if (page_to_nid(buf_info->page) != numa_mem_id()) 163 return false; 164 165 size = ALIGN(used, IONIC_PAGE_SPLIT_SZ); 166 buf_info->page_offset += size; 167 if (buf_info->page_offset >= IONIC_PAGE_SIZE) 168 return false; 169 170 get_page(buf_info->page); 171 172 return true; 173 } 174 175 static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, 176 struct ionic_desc_info *desc_info, 177 struct ionic_rxq_comp *comp) 178 { 179 struct net_device *netdev = q->lif->netdev; 180 struct ionic_buf_info *buf_info; 181 struct ionic_rx_stats *stats; 182 struct device *dev = q->dev; 183 struct sk_buff *skb; 184 unsigned int i; 185 u16 frag_len; 186 u16 len; 187 188 stats = q_to_rx_stats(q); 189 190 buf_info = &desc_info->bufs[0]; 191 len = le16_to_cpu(comp->len); 192 193 prefetchw(buf_info->page); 194 195 skb = napi_get_frags(&q_to_qcq(q)->napi); 196 if (unlikely(!skb)) { 197 net_warn_ratelimited("%s: SKB alloc failed on %s!\n", 198 netdev->name, q->name); 199 stats->alloc_err++; 200 return NULL; 201 } 202 203 i = comp->num_sg_elems + 1; 204 do { 205 if (unlikely(!buf_info->page)) { 206 dev_kfree_skb(skb); 207 return NULL; 208 } 209 210 frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); 211 len -= frag_len; 212 213 dma_sync_single_for_cpu(dev, 214 buf_info->dma_addr + buf_info->page_offset, 215 frag_len, DMA_FROM_DEVICE); 216 217 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 218 buf_info->page, buf_info->page_offset, frag_len, 219 IONIC_PAGE_SIZE); 220 221 if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) { 222 dma_unmap_page(dev, buf_info->dma_addr, 223 IONIC_PAGE_SIZE, DMA_FROM_DEVICE); 224 buf_info->page = NULL; 225 } 226 227 buf_info++; 228 229 i--; 230 } while (i > 0); 231 232 return skb; 233 } 234 235 static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, 236 struct ionic_desc_info *desc_info, 237 struct ionic_rxq_comp *comp) 238 { 239 struct net_device *netdev = q->lif->netdev; 240 struct ionic_buf_info *buf_info; 241 struct ionic_rx_stats *stats; 242 struct device *dev = q->dev; 243 struct sk_buff *skb; 244 u16 len; 245 246 stats = q_to_rx_stats(q); 247 248 buf_info = &desc_info->bufs[0]; 249 len = le16_to_cpu(comp->len); 250 251 skb = napi_alloc_skb(&q_to_qcq(q)->napi, len); 252 if (unlikely(!skb)) { 253 net_warn_ratelimited("%s: SKB alloc failed on %s!\n", 254 netdev->name, q->name); 255 stats->alloc_err++; 256 return NULL; 257 } 258 259 if (unlikely(!buf_info->page)) { 260 dev_kfree_skb(skb); 261 return NULL; 262 } 263 264 dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset, 265 len, DMA_FROM_DEVICE); 266 skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len); 267 dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset, 268 len, DMA_FROM_DEVICE); 269 270 skb_put(skb, len); 271 skb->protocol = eth_type_trans(skb, q->lif->netdev); 272 273 return skb; 274 } 275 276 static void ionic_rx_clean(struct ionic_queue *q, 277 struct ionic_desc_info *desc_info, 278 struct ionic_cq_info *cq_info, 279 void *cb_arg) 280 { 281 struct net_device *netdev = q->lif->netdev; 282 struct ionic_qcq *qcq = q_to_qcq(q); 283 struct ionic_rx_stats *stats; 284 struct ionic_rxq_comp *comp; 285 struct sk_buff *skb; 286 287 comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp); 288 289 stats = q_to_rx_stats(q); 290 291 if (comp->status) { 292 stats->dropped++; 293 return; 294 } 295 296 stats->pkts++; 297 stats->bytes += le16_to_cpu(comp->len); 298 299 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) 300 skb = ionic_rx_copybreak(q, desc_info, comp); 301 else 302 skb = ionic_rx_frags(q, desc_info, comp); 303 304 if (unlikely(!skb)) { 305 stats->dropped++; 306 return; 307 } 308 309 skb_record_rx_queue(skb, q->index); 310 311 if (likely(netdev->features & NETIF_F_RXHASH)) { 312 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 313 case IONIC_PKT_TYPE_IPV4: 314 case IONIC_PKT_TYPE_IPV6: 315 skb_set_hash(skb, le32_to_cpu(comp->rss_hash), 316 PKT_HASH_TYPE_L3); 317 break; 318 case IONIC_PKT_TYPE_IPV4_TCP: 319 case IONIC_PKT_TYPE_IPV6_TCP: 320 case IONIC_PKT_TYPE_IPV4_UDP: 321 case IONIC_PKT_TYPE_IPV6_UDP: 322 skb_set_hash(skb, le32_to_cpu(comp->rss_hash), 323 PKT_HASH_TYPE_L4); 324 break; 325 } 326 } 327 328 if (likely(netdev->features & NETIF_F_RXCSUM) && 329 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) { 330 skb->ip_summed = CHECKSUM_COMPLETE; 331 skb->csum = (__force __wsum)le16_to_cpu(comp->csum); 332 stats->csum_complete++; 333 } else { 334 stats->csum_none++; 335 } 336 337 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 338 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) || 339 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))) 340 stats->csum_error++; 341 342 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 343 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) { 344 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 345 le16_to_cpu(comp->vlan_tci)); 346 stats->vlan_stripped++; 347 } 348 349 if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) { 350 __le64 *cq_desc_hwstamp; 351 u64 hwstamp; 352 353 cq_desc_hwstamp = 354 cq_info->cq_desc + 355 qcq->cq.desc_size - 356 sizeof(struct ionic_rxq_comp) - 357 IONIC_HWSTAMP_CQ_NEGOFFSET; 358 359 hwstamp = le64_to_cpu(*cq_desc_hwstamp); 360 361 if (hwstamp != IONIC_HWSTAMP_INVALID) { 362 skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp); 363 stats->hwstamp_valid++; 364 } else { 365 stats->hwstamp_invalid++; 366 } 367 } 368 369 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) 370 napi_gro_receive(&qcq->napi, skb); 371 else 372 napi_gro_frags(&qcq->napi); 373 } 374 375 bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) 376 { 377 struct ionic_queue *q = cq->bound_q; 378 struct ionic_desc_info *desc_info; 379 struct ionic_rxq_comp *comp; 380 381 comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); 382 383 if (!color_match(comp->pkt_type_color, cq->done_color)) 384 return false; 385 386 /* check for empty queue */ 387 if (q->tail_idx == q->head_idx) 388 return false; 389 390 if (q->tail_idx != le16_to_cpu(comp->comp_index)) 391 return false; 392 393 desc_info = &q->info[q->tail_idx]; 394 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 395 396 /* clean the related q entry, only one per qc completion */ 397 ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); 398 399 desc_info->cb = NULL; 400 desc_info->cb_arg = NULL; 401 402 return true; 403 } 404 405 void ionic_rx_fill(struct ionic_queue *q) 406 { 407 struct net_device *netdev = q->lif->netdev; 408 struct ionic_desc_info *desc_info; 409 struct ionic_rxq_sg_desc *sg_desc; 410 struct ionic_rxq_sg_elem *sg_elem; 411 struct ionic_buf_info *buf_info; 412 unsigned int fill_threshold; 413 struct ionic_rxq_desc *desc; 414 unsigned int remain_len; 415 unsigned int frag_len; 416 unsigned int nfrags; 417 unsigned int n_fill; 418 unsigned int i, j; 419 unsigned int len; 420 421 n_fill = ionic_q_space_avail(q); 422 423 fill_threshold = min_t(unsigned int, IONIC_RX_FILL_THRESHOLD, 424 q->num_descs / IONIC_RX_FILL_DIV); 425 if (n_fill < fill_threshold) 426 return; 427 428 len = netdev->mtu + ETH_HLEN + VLAN_HLEN; 429 430 for (i = n_fill; i; i--) { 431 nfrags = 0; 432 remain_len = len; 433 desc_info = &q->info[q->head_idx]; 434 desc = desc_info->desc; 435 buf_info = &desc_info->bufs[0]; 436 437 if (!buf_info->page) { /* alloc a new buffer? */ 438 if (unlikely(ionic_rx_page_alloc(q, buf_info))) { 439 desc->addr = 0; 440 desc->len = 0; 441 return; 442 } 443 } 444 445 /* fill main descriptor - buf[0] */ 446 desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); 447 frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); 448 desc->len = cpu_to_le16(frag_len); 449 remain_len -= frag_len; 450 buf_info++; 451 nfrags++; 452 453 /* fill sg descriptors - buf[1..n] */ 454 sg_desc = desc_info->sg_desc; 455 for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) { 456 sg_elem = &sg_desc->elems[j]; 457 if (!buf_info->page) { /* alloc a new sg buffer? */ 458 if (unlikely(ionic_rx_page_alloc(q, buf_info))) { 459 sg_elem->addr = 0; 460 sg_elem->len = 0; 461 return; 462 } 463 } 464 465 sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); 466 frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset); 467 sg_elem->len = cpu_to_le16(frag_len); 468 remain_len -= frag_len; 469 buf_info++; 470 nfrags++; 471 } 472 473 /* clear end sg element as a sentinel */ 474 if (j < q->max_sg_elems) { 475 sg_elem = &sg_desc->elems[j]; 476 memset(sg_elem, 0, sizeof(*sg_elem)); 477 } 478 479 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 480 IONIC_RXQ_DESC_OPCODE_SIMPLE; 481 desc_info->nbufs = nfrags; 482 483 ionic_rxq_post(q, false, ionic_rx_clean, NULL); 484 } 485 486 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, 487 q->dbval | q->head_idx); 488 489 q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE; 490 q->dbell_jiffies = jiffies; 491 492 mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline, 493 jiffies + IONIC_NAPI_DEADLINE); 494 } 495 496 void ionic_rx_empty(struct ionic_queue *q) 497 { 498 struct ionic_desc_info *desc_info; 499 struct ionic_buf_info *buf_info; 500 unsigned int i, j; 501 502 for (i = 0; i < q->num_descs; i++) { 503 desc_info = &q->info[i]; 504 for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) { 505 buf_info = &desc_info->bufs[j]; 506 if (buf_info->page) 507 ionic_rx_page_free(q, buf_info); 508 } 509 510 desc_info->nbufs = 0; 511 desc_info->cb = NULL; 512 desc_info->cb_arg = NULL; 513 } 514 515 q->head_idx = 0; 516 q->tail_idx = 0; 517 } 518 519 static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode) 520 { 521 struct dim_sample dim_sample; 522 struct ionic_lif *lif; 523 unsigned int qi; 524 u64 pkts, bytes; 525 526 if (!qcq->intr.dim_coal_hw) 527 return; 528 529 lif = qcq->q.lif; 530 qi = qcq->cq.bound_q->index; 531 532 switch (napi_mode) { 533 case IONIC_LIF_F_TX_DIM_INTR: 534 pkts = lif->txqstats[qi].pkts; 535 bytes = lif->txqstats[qi].bytes; 536 break; 537 case IONIC_LIF_F_RX_DIM_INTR: 538 pkts = lif->rxqstats[qi].pkts; 539 bytes = lif->rxqstats[qi].bytes; 540 break; 541 default: 542 pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts; 543 bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes; 544 break; 545 } 546 547 dim_update_sample(qcq->cq.bound_intr->rearm_count, 548 pkts, bytes, &dim_sample); 549 550 net_dim(&qcq->dim, dim_sample); 551 } 552 553 int ionic_tx_napi(struct napi_struct *napi, int budget) 554 { 555 struct ionic_qcq *qcq = napi_to_qcq(napi); 556 struct ionic_cq *cq = napi_to_cq(napi); 557 struct ionic_dev *idev; 558 struct ionic_lif *lif; 559 u32 work_done = 0; 560 u32 flags = 0; 561 562 lif = cq->bound_q->lif; 563 idev = &lif->ionic->idev; 564 565 work_done = ionic_cq_service(cq, budget, 566 ionic_tx_service, NULL, NULL); 567 568 if (work_done < budget && napi_complete_done(napi, work_done)) { 569 ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR); 570 flags |= IONIC_INTR_CRED_UNMASK; 571 cq->bound_intr->rearm_count++; 572 } 573 574 if (work_done || flags) { 575 flags |= IONIC_INTR_CRED_RESET_COALESCE; 576 ionic_intr_credits(idev->intr_ctrl, 577 cq->bound_intr->index, 578 work_done, flags); 579 } 580 581 if (!work_done && ionic_txq_poke_doorbell(&qcq->q)) 582 mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); 583 584 return work_done; 585 } 586 587 int ionic_rx_napi(struct napi_struct *napi, int budget) 588 { 589 struct ionic_qcq *qcq = napi_to_qcq(napi); 590 struct ionic_cq *cq = napi_to_cq(napi); 591 struct ionic_dev *idev; 592 struct ionic_lif *lif; 593 u32 work_done = 0; 594 u32 flags = 0; 595 596 lif = cq->bound_q->lif; 597 idev = &lif->ionic->idev; 598 599 work_done = ionic_cq_service(cq, budget, 600 ionic_rx_service, NULL, NULL); 601 602 ionic_rx_fill(cq->bound_q); 603 604 if (work_done < budget && napi_complete_done(napi, work_done)) { 605 ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR); 606 flags |= IONIC_INTR_CRED_UNMASK; 607 cq->bound_intr->rearm_count++; 608 } 609 610 if (work_done || flags) { 611 flags |= IONIC_INTR_CRED_RESET_COALESCE; 612 ionic_intr_credits(idev->intr_ctrl, 613 cq->bound_intr->index, 614 work_done, flags); 615 } 616 617 if (!work_done && ionic_rxq_poke_doorbell(&qcq->q)) 618 mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); 619 620 return work_done; 621 } 622 623 int ionic_txrx_napi(struct napi_struct *napi, int budget) 624 { 625 struct ionic_qcq *rxqcq = napi_to_qcq(napi); 626 struct ionic_cq *rxcq = napi_to_cq(napi); 627 unsigned int qi = rxcq->bound_q->index; 628 struct ionic_qcq *txqcq; 629 struct ionic_dev *idev; 630 struct ionic_lif *lif; 631 struct ionic_cq *txcq; 632 bool resched = false; 633 u32 rx_work_done = 0; 634 u32 tx_work_done = 0; 635 u32 flags = 0; 636 637 lif = rxcq->bound_q->lif; 638 idev = &lif->ionic->idev; 639 txqcq = lif->txqcqs[qi]; 640 txcq = &lif->txqcqs[qi]->cq; 641 642 tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, 643 ionic_tx_service, NULL, NULL); 644 645 rx_work_done = ionic_cq_service(rxcq, budget, 646 ionic_rx_service, NULL, NULL); 647 648 ionic_rx_fill(rxcq->bound_q); 649 650 if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { 651 ionic_dim_update(rxqcq, 0); 652 flags |= IONIC_INTR_CRED_UNMASK; 653 rxcq->bound_intr->rearm_count++; 654 } 655 656 if (rx_work_done || flags) { 657 flags |= IONIC_INTR_CRED_RESET_COALESCE; 658 ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, 659 tx_work_done + rx_work_done, flags); 660 } 661 662 if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q)) 663 resched = true; 664 if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q)) 665 resched = true; 666 if (resched) 667 mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); 668 669 return rx_work_done; 670 } 671 672 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, 673 void *data, size_t len) 674 { 675 struct ionic_tx_stats *stats = q_to_tx_stats(q); 676 struct device *dev = q->dev; 677 dma_addr_t dma_addr; 678 679 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); 680 if (dma_mapping_error(dev, dma_addr)) { 681 net_warn_ratelimited("%s: DMA single map failed on %s!\n", 682 q->lif->netdev->name, q->name); 683 stats->dma_map_err++; 684 return 0; 685 } 686 return dma_addr; 687 } 688 689 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, 690 const skb_frag_t *frag, 691 size_t offset, size_t len) 692 { 693 struct ionic_tx_stats *stats = q_to_tx_stats(q); 694 struct device *dev = q->dev; 695 dma_addr_t dma_addr; 696 697 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); 698 if (dma_mapping_error(dev, dma_addr)) { 699 net_warn_ratelimited("%s: DMA frag map failed on %s!\n", 700 q->lif->netdev->name, q->name); 701 stats->dma_map_err++; 702 } 703 return dma_addr; 704 } 705 706 static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, 707 struct ionic_desc_info *desc_info) 708 { 709 struct ionic_buf_info *buf_info = desc_info->bufs; 710 struct ionic_tx_stats *stats = q_to_tx_stats(q); 711 struct device *dev = q->dev; 712 dma_addr_t dma_addr; 713 unsigned int nfrags; 714 skb_frag_t *frag; 715 int frag_idx; 716 717 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); 718 if (dma_mapping_error(dev, dma_addr)) { 719 stats->dma_map_err++; 720 return -EIO; 721 } 722 buf_info->dma_addr = dma_addr; 723 buf_info->len = skb_headlen(skb); 724 buf_info++; 725 726 frag = skb_shinfo(skb)->frags; 727 nfrags = skb_shinfo(skb)->nr_frags; 728 for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) { 729 dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); 730 if (dma_mapping_error(dev, dma_addr)) { 731 stats->dma_map_err++; 732 goto dma_fail; 733 } 734 buf_info->dma_addr = dma_addr; 735 buf_info->len = skb_frag_size(frag); 736 buf_info++; 737 } 738 739 desc_info->nbufs = 1 + nfrags; 740 741 return 0; 742 743 dma_fail: 744 /* unwind the frag mappings and the head mapping */ 745 while (frag_idx > 0) { 746 frag_idx--; 747 buf_info--; 748 dma_unmap_page(dev, buf_info->dma_addr, 749 buf_info->len, DMA_TO_DEVICE); 750 } 751 dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE); 752 return -EIO; 753 } 754 755 static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, 756 struct ionic_desc_info *desc_info) 757 { 758 struct ionic_buf_info *buf_info = desc_info->bufs; 759 struct device *dev = q->dev; 760 unsigned int i; 761 762 if (!desc_info->nbufs) 763 return; 764 765 dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr, 766 buf_info->len, DMA_TO_DEVICE); 767 buf_info++; 768 for (i = 1; i < desc_info->nbufs; i++, buf_info++) 769 dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr, 770 buf_info->len, DMA_TO_DEVICE); 771 772 desc_info->nbufs = 0; 773 } 774 775 static void ionic_tx_clean(struct ionic_queue *q, 776 struct ionic_desc_info *desc_info, 777 struct ionic_cq_info *cq_info, 778 void *cb_arg) 779 { 780 struct ionic_tx_stats *stats = q_to_tx_stats(q); 781 struct ionic_qcq *qcq = q_to_qcq(q); 782 struct sk_buff *skb = cb_arg; 783 u16 qi; 784 785 ionic_tx_desc_unmap_bufs(q, desc_info); 786 787 if (!skb) 788 return; 789 790 qi = skb_get_queue_mapping(skb); 791 792 if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) { 793 if (cq_info) { 794 struct skb_shared_hwtstamps hwts = {}; 795 __le64 *cq_desc_hwstamp; 796 u64 hwstamp; 797 798 cq_desc_hwstamp = 799 cq_info->cq_desc + 800 qcq->cq.desc_size - 801 sizeof(struct ionic_txq_comp) - 802 IONIC_HWSTAMP_CQ_NEGOFFSET; 803 804 hwstamp = le64_to_cpu(*cq_desc_hwstamp); 805 806 if (hwstamp != IONIC_HWSTAMP_INVALID) { 807 hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp); 808 809 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 810 skb_tstamp_tx(skb, &hwts); 811 812 stats->hwstamp_valid++; 813 } else { 814 stats->hwstamp_invalid++; 815 } 816 } 817 818 } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) { 819 netif_wake_subqueue(q->lif->netdev, qi); 820 } 821 822 desc_info->bytes = skb->len; 823 stats->clean++; 824 825 dev_consume_skb_any(skb); 826 } 827 828 bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) 829 { 830 struct ionic_queue *q = cq->bound_q; 831 struct ionic_desc_info *desc_info; 832 struct ionic_txq_comp *comp; 833 int bytes = 0; 834 int pkts = 0; 835 u16 index; 836 837 comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); 838 839 if (!color_match(comp->color, cq->done_color)) 840 return false; 841 842 /* clean the related q entries, there could be 843 * several q entries completed for each cq completion 844 */ 845 do { 846 desc_info = &q->info[q->tail_idx]; 847 desc_info->bytes = 0; 848 index = q->tail_idx; 849 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 850 ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg); 851 if (desc_info->cb_arg) { 852 pkts++; 853 bytes += desc_info->bytes; 854 } 855 desc_info->cb = NULL; 856 desc_info->cb_arg = NULL; 857 } while (index != le16_to_cpu(comp->comp_index)); 858 859 if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) 860 netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes); 861 862 return true; 863 } 864 865 void ionic_tx_flush(struct ionic_cq *cq) 866 { 867 struct ionic_dev *idev = &cq->lif->ionic->idev; 868 u32 work_done; 869 870 work_done = ionic_cq_service(cq, cq->num_descs, 871 ionic_tx_service, NULL, NULL); 872 if (work_done) 873 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, 874 work_done, IONIC_INTR_CRED_RESET_COALESCE); 875 } 876 877 void ionic_tx_empty(struct ionic_queue *q) 878 { 879 struct ionic_desc_info *desc_info; 880 int bytes = 0; 881 int pkts = 0; 882 883 /* walk the not completed tx entries, if any */ 884 while (q->head_idx != q->tail_idx) { 885 desc_info = &q->info[q->tail_idx]; 886 desc_info->bytes = 0; 887 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 888 ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); 889 if (desc_info->cb_arg) { 890 pkts++; 891 bytes += desc_info->bytes; 892 } 893 desc_info->cb = NULL; 894 desc_info->cb_arg = NULL; 895 } 896 897 if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) 898 netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes); 899 } 900 901 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) 902 { 903 int err; 904 905 err = skb_cow_head(skb, 0); 906 if (err) 907 return err; 908 909 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 910 inner_ip_hdr(skb)->check = 0; 911 inner_tcp_hdr(skb)->check = 912 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, 913 inner_ip_hdr(skb)->daddr, 914 0, IPPROTO_TCP, 0); 915 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 916 inner_tcp_hdr(skb)->check = 917 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, 918 &inner_ipv6_hdr(skb)->daddr, 919 0, IPPROTO_TCP, 0); 920 } 921 922 return 0; 923 } 924 925 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) 926 { 927 int err; 928 929 err = skb_cow_head(skb, 0); 930 if (err) 931 return err; 932 933 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 934 ip_hdr(skb)->check = 0; 935 tcp_hdr(skb)->check = 936 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 937 ip_hdr(skb)->daddr, 938 0, IPPROTO_TCP, 0); 939 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 940 tcp_v6_gso_csum_prep(skb); 941 } 942 943 return 0; 944 } 945 946 static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 947 struct sk_buff *skb, 948 dma_addr_t addr, u8 nsge, u16 len, 949 unsigned int hdrlen, unsigned int mss, 950 bool outer_csum, 951 u16 vlan_tci, bool has_vlan, 952 bool start, bool done) 953 { 954 u8 flags = 0; 955 u64 cmd; 956 957 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 958 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 959 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 960 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 961 962 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr); 963 desc->cmd = cpu_to_le64(cmd); 964 desc->len = cpu_to_le16(len); 965 desc->vlan_tci = cpu_to_le16(vlan_tci); 966 desc->hdr_len = cpu_to_le16(hdrlen); 967 desc->mss = cpu_to_le16(mss); 968 969 if (start) { 970 skb_tx_timestamp(skb); 971 if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) 972 netdev_tx_sent_queue(q_to_ndq(q), skb->len); 973 ionic_txq_post(q, false, ionic_tx_clean, skb); 974 } else { 975 ionic_txq_post(q, done, NULL, NULL); 976 } 977 } 978 979 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) 980 { 981 struct ionic_tx_stats *stats = q_to_tx_stats(q); 982 struct ionic_desc_info *desc_info; 983 struct ionic_buf_info *buf_info; 984 struct ionic_txq_sg_elem *elem; 985 struct ionic_txq_desc *desc; 986 unsigned int chunk_len; 987 unsigned int frag_rem; 988 unsigned int tso_rem; 989 unsigned int seg_rem; 990 dma_addr_t desc_addr; 991 dma_addr_t frag_addr; 992 unsigned int hdrlen; 993 unsigned int len; 994 unsigned int mss; 995 bool start, done; 996 bool outer_csum; 997 bool has_vlan; 998 u16 desc_len; 999 u8 desc_nsge; 1000 u16 vlan_tci; 1001 bool encap; 1002 int err; 1003 1004 desc_info = &q->info[q->head_idx]; 1005 buf_info = desc_info->bufs; 1006 1007 if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) 1008 return -EIO; 1009 1010 len = skb->len; 1011 mss = skb_shinfo(skb)->gso_size; 1012 outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 1013 SKB_GSO_GRE_CSUM | 1014 SKB_GSO_IPXIP4 | 1015 SKB_GSO_IPXIP6 | 1016 SKB_GSO_UDP_TUNNEL | 1017 SKB_GSO_UDP_TUNNEL_CSUM)); 1018 has_vlan = !!skb_vlan_tag_present(skb); 1019 vlan_tci = skb_vlan_tag_get(skb); 1020 encap = skb->encapsulation; 1021 1022 /* Preload inner-most TCP csum field with IP pseudo hdr 1023 * calculated with IP length set to zero. HW will later 1024 * add in length to each TCP segment resulting from the TSO. 1025 */ 1026 1027 if (encap) 1028 err = ionic_tx_tcp_inner_pseudo_csum(skb); 1029 else 1030 err = ionic_tx_tcp_pseudo_csum(skb); 1031 if (err) { 1032 /* clean up mapping from ionic_tx_map_skb */ 1033 ionic_tx_desc_unmap_bufs(q, desc_info); 1034 return err; 1035 } 1036 1037 if (encap) 1038 hdrlen = skb_inner_tcp_all_headers(skb); 1039 else 1040 hdrlen = skb_tcp_all_headers(skb); 1041 1042 tso_rem = len; 1043 seg_rem = min(tso_rem, hdrlen + mss); 1044 1045 frag_addr = 0; 1046 frag_rem = 0; 1047 1048 start = true; 1049 1050 while (tso_rem > 0) { 1051 desc = NULL; 1052 elem = NULL; 1053 desc_addr = 0; 1054 desc_len = 0; 1055 desc_nsge = 0; 1056 /* use fragments until we have enough to post a single descriptor */ 1057 while (seg_rem > 0) { 1058 /* if the fragment is exhausted then move to the next one */ 1059 if (frag_rem == 0) { 1060 /* grab the next fragment */ 1061 frag_addr = buf_info->dma_addr; 1062 frag_rem = buf_info->len; 1063 buf_info++; 1064 } 1065 chunk_len = min(frag_rem, seg_rem); 1066 if (!desc) { 1067 /* fill main descriptor */ 1068 desc = desc_info->txq_desc; 1069 elem = desc_info->txq_sg_desc->elems; 1070 desc_addr = frag_addr; 1071 desc_len = chunk_len; 1072 } else { 1073 /* fill sg descriptor */ 1074 elem->addr = cpu_to_le64(frag_addr); 1075 elem->len = cpu_to_le16(chunk_len); 1076 elem++; 1077 desc_nsge++; 1078 } 1079 frag_addr += chunk_len; 1080 frag_rem -= chunk_len; 1081 tso_rem -= chunk_len; 1082 seg_rem -= chunk_len; 1083 } 1084 seg_rem = min(tso_rem, mss); 1085 done = (tso_rem == 0); 1086 /* post descriptor */ 1087 ionic_tx_tso_post(q, desc, skb, 1088 desc_addr, desc_nsge, desc_len, 1089 hdrlen, mss, outer_csum, vlan_tci, has_vlan, 1090 start, done); 1091 start = false; 1092 /* Buffer information is stored with the first tso descriptor */ 1093 desc_info = &q->info[q->head_idx]; 1094 desc_info->nbufs = 0; 1095 } 1096 1097 stats->pkts += DIV_ROUND_UP(len - hdrlen, mss); 1098 stats->bytes += len; 1099 stats->tso++; 1100 stats->tso_bytes = len; 1101 1102 return 0; 1103 } 1104 1105 static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb, 1106 struct ionic_desc_info *desc_info) 1107 { 1108 struct ionic_txq_desc *desc = desc_info->txq_desc; 1109 struct ionic_buf_info *buf_info = desc_info->bufs; 1110 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1111 bool has_vlan; 1112 u8 flags = 0; 1113 bool encap; 1114 u64 cmd; 1115 1116 has_vlan = !!skb_vlan_tag_present(skb); 1117 encap = skb->encapsulation; 1118 1119 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 1120 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 1121 1122 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL, 1123 flags, skb_shinfo(skb)->nr_frags, 1124 buf_info->dma_addr); 1125 desc->cmd = cpu_to_le64(cmd); 1126 desc->len = cpu_to_le16(buf_info->len); 1127 if (has_vlan) { 1128 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 1129 stats->vlan_inserted++; 1130 } else { 1131 desc->vlan_tci = 0; 1132 } 1133 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); 1134 desc->csum_offset = cpu_to_le16(skb->csum_offset); 1135 1136 if (skb_csum_is_sctp(skb)) 1137 stats->crc32_csum++; 1138 else 1139 stats->csum++; 1140 } 1141 1142 static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb, 1143 struct ionic_desc_info *desc_info) 1144 { 1145 struct ionic_txq_desc *desc = desc_info->txq_desc; 1146 struct ionic_buf_info *buf_info = desc_info->bufs; 1147 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1148 bool has_vlan; 1149 u8 flags = 0; 1150 bool encap; 1151 u64 cmd; 1152 1153 has_vlan = !!skb_vlan_tag_present(skb); 1154 encap = skb->encapsulation; 1155 1156 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 1157 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 1158 1159 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, 1160 flags, skb_shinfo(skb)->nr_frags, 1161 buf_info->dma_addr); 1162 desc->cmd = cpu_to_le64(cmd); 1163 desc->len = cpu_to_le16(buf_info->len); 1164 if (has_vlan) { 1165 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 1166 stats->vlan_inserted++; 1167 } else { 1168 desc->vlan_tci = 0; 1169 } 1170 desc->csum_start = 0; 1171 desc->csum_offset = 0; 1172 1173 stats->csum_none++; 1174 } 1175 1176 static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb, 1177 struct ionic_desc_info *desc_info) 1178 { 1179 struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc; 1180 struct ionic_buf_info *buf_info = &desc_info->bufs[1]; 1181 struct ionic_txq_sg_elem *elem = sg_desc->elems; 1182 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1183 unsigned int i; 1184 1185 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) { 1186 elem->addr = cpu_to_le64(buf_info->dma_addr); 1187 elem->len = cpu_to_le16(buf_info->len); 1188 } 1189 1190 stats->frags += skb_shinfo(skb)->nr_frags; 1191 } 1192 1193 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) 1194 { 1195 struct ionic_desc_info *desc_info = &q->info[q->head_idx]; 1196 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1197 1198 if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) 1199 return -EIO; 1200 1201 /* set up the initial descriptor */ 1202 if (skb->ip_summed == CHECKSUM_PARTIAL) 1203 ionic_tx_calc_csum(q, skb, desc_info); 1204 else 1205 ionic_tx_calc_no_csum(q, skb, desc_info); 1206 1207 /* add frags */ 1208 ionic_tx_skb_frags(q, skb, desc_info); 1209 1210 skb_tx_timestamp(skb); 1211 stats->pkts++; 1212 stats->bytes += skb->len; 1213 1214 if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) 1215 netdev_tx_sent_queue(q_to_ndq(q), skb->len); 1216 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); 1217 1218 return 0; 1219 } 1220 1221 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) 1222 { 1223 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1224 int ndescs; 1225 int err; 1226 1227 /* Each desc is mss long max, so a descriptor for each gso_seg */ 1228 if (skb_is_gso(skb)) 1229 ndescs = skb_shinfo(skb)->gso_segs; 1230 else 1231 ndescs = 1; 1232 1233 /* If non-TSO, just need 1 desc and nr_frags sg elems */ 1234 if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems) 1235 return ndescs; 1236 1237 /* Too many frags, so linearize */ 1238 err = skb_linearize(skb); 1239 if (err) 1240 return err; 1241 1242 stats->linearize++; 1243 1244 return ndescs; 1245 } 1246 1247 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) 1248 { 1249 int stopped = 0; 1250 1251 if (unlikely(!ionic_q_has_space(q, ndescs))) { 1252 netif_stop_subqueue(q->lif->netdev, q->index); 1253 stopped = 1; 1254 1255 /* Might race with ionic_tx_clean, check again */ 1256 smp_rmb(); 1257 if (ionic_q_has_space(q, ndescs)) { 1258 netif_wake_subqueue(q->lif->netdev, q->index); 1259 stopped = 0; 1260 } 1261 } 1262 1263 return stopped; 1264 } 1265 1266 static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb, 1267 struct net_device *netdev) 1268 { 1269 struct ionic_lif *lif = netdev_priv(netdev); 1270 struct ionic_queue *q = &lif->hwstamp_txq->q; 1271 int err, ndescs; 1272 1273 /* Does not stop/start txq, because we post to a separate tx queue 1274 * for timestamping, and if a packet can't be posted immediately to 1275 * the timestamping queue, it is dropped. 1276 */ 1277 1278 ndescs = ionic_tx_descs_needed(q, skb); 1279 if (unlikely(ndescs < 0)) 1280 goto err_out_drop; 1281 1282 if (unlikely(!ionic_q_has_space(q, ndescs))) 1283 goto err_out_drop; 1284 1285 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP; 1286 if (skb_is_gso(skb)) 1287 err = ionic_tx_tso(q, skb); 1288 else 1289 err = ionic_tx(q, skb); 1290 1291 if (err) 1292 goto err_out_drop; 1293 1294 return NETDEV_TX_OK; 1295 1296 err_out_drop: 1297 q->drop++; 1298 dev_kfree_skb(skb); 1299 return NETDEV_TX_OK; 1300 } 1301 1302 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) 1303 { 1304 u16 queue_index = skb_get_queue_mapping(skb); 1305 struct ionic_lif *lif = netdev_priv(netdev); 1306 struct ionic_queue *q; 1307 int ndescs; 1308 int err; 1309 1310 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) { 1311 dev_kfree_skb(skb); 1312 return NETDEV_TX_OK; 1313 } 1314 1315 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 1316 if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode) 1317 return ionic_start_hwstamp_xmit(skb, netdev); 1318 1319 if (unlikely(queue_index >= lif->nxqs)) 1320 queue_index = 0; 1321 q = &lif->txqcqs[queue_index]->q; 1322 1323 ndescs = ionic_tx_descs_needed(q, skb); 1324 if (ndescs < 0) 1325 goto err_out_drop; 1326 1327 if (unlikely(ionic_maybe_stop_tx(q, ndescs))) 1328 return NETDEV_TX_BUSY; 1329 1330 if (skb_is_gso(skb)) 1331 err = ionic_tx_tso(q, skb); 1332 else 1333 err = ionic_tx(q, skb); 1334 1335 if (err) 1336 goto err_out_drop; 1337 1338 /* Stop the queue if there aren't descriptors for the next packet. 1339 * Since our SG lists per descriptor take care of most of the possible 1340 * fragmentation, we don't need to have many descriptors available. 1341 */ 1342 ionic_maybe_stop_tx(q, 4); 1343 1344 return NETDEV_TX_OK; 1345 1346 err_out_drop: 1347 q->drop++; 1348 dev_kfree_skb(skb); 1349 return NETDEV_TX_OK; 1350 } 1351