1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/ip.h> 5 #include <linux/ipv6.h> 6 #include <linux/if_vlan.h> 7 #include <net/ip6_checksum.h> 8 9 #include "ionic.h" 10 #include "ionic_lif.h" 11 #include "ionic_txrx.h" 12 13 static void ionic_rx_clean(struct ionic_queue *q, 14 struct ionic_desc_info *desc_info, 15 struct ionic_cq_info *cq_info, 16 void *cb_arg); 17 18 static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); 19 20 static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); 21 22 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, 23 ionic_desc_cb cb_func, void *cb_arg) 24 { 25 DEBUG_STATS_TXQ_POST(q, ring_dbell); 26 27 ionic_q_post(q, ring_dbell, cb_func, cb_arg); 28 } 29 30 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, 31 ionic_desc_cb cb_func, void *cb_arg) 32 { 33 ionic_q_post(q, ring_dbell, cb_func, cb_arg); 34 35 DEBUG_STATS_RX_BUFF_CNT(q); 36 } 37 38 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) 39 { 40 return netdev_get_tx_queue(q->lif->netdev, q->index); 41 } 42 43 static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, 44 unsigned int len, bool frags) 45 { 46 struct ionic_lif *lif = q->lif; 47 struct ionic_rx_stats *stats; 48 struct net_device *netdev; 49 struct sk_buff *skb; 50 51 netdev = lif->netdev; 52 stats = &q->lif->rxqstats[q->index]; 53 54 if (frags) 55 skb = napi_get_frags(&q_to_qcq(q)->napi); 56 else 57 skb = netdev_alloc_skb_ip_align(netdev, len); 58 59 if (unlikely(!skb)) { 60 net_warn_ratelimited("%s: SKB alloc failed on %s!\n", 61 netdev->name, q->name); 62 stats->alloc_err++; 63 return NULL; 64 } 65 66 return skb; 67 } 68 69 static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, 70 struct ionic_desc_info *desc_info, 71 struct ionic_cq_info *cq_info) 72 { 73 struct ionic_rxq_comp *comp = cq_info->cq_desc; 74 struct device *dev = q->lif->ionic->dev; 75 struct ionic_page_info *page_info; 76 struct sk_buff *skb; 77 unsigned int i; 78 u16 frag_len; 79 u16 len; 80 81 page_info = &desc_info->pages[0]; 82 len = le16_to_cpu(comp->len); 83 84 prefetch(page_address(page_info->page) + NET_IP_ALIGN); 85 86 skb = ionic_rx_skb_alloc(q, len, true); 87 if (unlikely(!skb)) 88 return NULL; 89 90 i = comp->num_sg_elems + 1; 91 do { 92 if (unlikely(!page_info->page)) { 93 struct napi_struct *napi = &q_to_qcq(q)->napi; 94 95 napi->skb = NULL; 96 dev_kfree_skb(skb); 97 return NULL; 98 } 99 100 frag_len = min(len, (u16)PAGE_SIZE); 101 len -= frag_len; 102 103 dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr), 104 PAGE_SIZE, DMA_FROM_DEVICE); 105 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 106 page_info->page, 0, frag_len, PAGE_SIZE); 107 page_info->page = NULL; 108 page_info++; 109 i--; 110 } while (i > 0); 111 112 return skb; 113 } 114 115 static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, 116 struct ionic_desc_info *desc_info, 117 struct ionic_cq_info *cq_info) 118 { 119 struct ionic_rxq_comp *comp = cq_info->cq_desc; 120 struct device *dev = q->lif->ionic->dev; 121 struct ionic_page_info *page_info; 122 struct sk_buff *skb; 123 u16 len; 124 125 page_info = &desc_info->pages[0]; 126 len = le16_to_cpu(comp->len); 127 128 skb = ionic_rx_skb_alloc(q, len, false); 129 if (unlikely(!skb)) 130 return NULL; 131 132 if (unlikely(!page_info->page)) { 133 dev_kfree_skb(skb); 134 return NULL; 135 } 136 137 dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr), 138 len, DMA_FROM_DEVICE); 139 skb_copy_to_linear_data(skb, page_address(page_info->page), len); 140 dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr), 141 len, DMA_FROM_DEVICE); 142 143 skb_put(skb, len); 144 skb->protocol = eth_type_trans(skb, q->lif->netdev); 145 146 return skb; 147 } 148 149 static void ionic_rx_clean(struct ionic_queue *q, 150 struct ionic_desc_info *desc_info, 151 struct ionic_cq_info *cq_info, 152 void *cb_arg) 153 { 154 struct ionic_rxq_comp *comp = cq_info->cq_desc; 155 struct ionic_qcq *qcq = q_to_qcq(q); 156 struct ionic_rx_stats *stats; 157 struct net_device *netdev; 158 struct sk_buff *skb; 159 160 stats = q_to_rx_stats(q); 161 netdev = q->lif->netdev; 162 163 if (comp->status) { 164 stats->dropped++; 165 return; 166 } 167 168 stats->pkts++; 169 stats->bytes += le16_to_cpu(comp->len); 170 171 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) 172 skb = ionic_rx_copybreak(q, desc_info, cq_info); 173 else 174 skb = ionic_rx_frags(q, desc_info, cq_info); 175 176 if (unlikely(!skb)) { 177 stats->dropped++; 178 return; 179 } 180 181 skb_record_rx_queue(skb, q->index); 182 183 if (likely(netdev->features & NETIF_F_RXHASH)) { 184 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 185 case IONIC_PKT_TYPE_IPV4: 186 case IONIC_PKT_TYPE_IPV6: 187 skb_set_hash(skb, le32_to_cpu(comp->rss_hash), 188 PKT_HASH_TYPE_L3); 189 break; 190 case IONIC_PKT_TYPE_IPV4_TCP: 191 case IONIC_PKT_TYPE_IPV6_TCP: 192 case IONIC_PKT_TYPE_IPV4_UDP: 193 case IONIC_PKT_TYPE_IPV6_UDP: 194 skb_set_hash(skb, le32_to_cpu(comp->rss_hash), 195 PKT_HASH_TYPE_L4); 196 break; 197 } 198 } 199 200 if (likely(netdev->features & NETIF_F_RXCSUM)) { 201 if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 202 skb->ip_summed = CHECKSUM_COMPLETE; 203 skb->csum = (__wsum)le16_to_cpu(comp->csum); 204 stats->csum_complete++; 205 } 206 } else { 207 stats->csum_none++; 208 } 209 210 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 211 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) || 212 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))) 213 stats->csum_error++; 214 215 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 216 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) { 217 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 218 le16_to_cpu(comp->vlan_tci)); 219 stats->vlan_stripped++; 220 } 221 222 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) 223 napi_gro_receive(&qcq->napi, skb); 224 else 225 napi_gro_frags(&qcq->napi); 226 } 227 228 static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) 229 { 230 struct ionic_rxq_comp *comp = cq_info->cq_desc; 231 struct ionic_queue *q = cq->bound_q; 232 struct ionic_desc_info *desc_info; 233 234 if (!color_match(comp->pkt_type_color, cq->done_color)) 235 return false; 236 237 /* check for empty queue */ 238 if (q->tail_idx == q->head_idx) 239 return false; 240 241 if (q->tail_idx != le16_to_cpu(comp->comp_index)) 242 return false; 243 244 desc_info = &q->info[q->tail_idx]; 245 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 246 247 /* clean the related q entry, only one per qc completion */ 248 ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); 249 250 desc_info->cb = NULL; 251 desc_info->cb_arg = NULL; 252 253 return true; 254 } 255 256 void ionic_rx_flush(struct ionic_cq *cq) 257 { 258 struct ionic_dev *idev = &cq->lif->ionic->idev; 259 u32 work_done; 260 261 work_done = ionic_cq_service(cq, cq->num_descs, 262 ionic_rx_service, NULL, NULL); 263 264 if (work_done) 265 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, 266 work_done, IONIC_INTR_CRED_RESET_COALESCE); 267 } 268 269 static int ionic_rx_page_alloc(struct ionic_queue *q, 270 struct ionic_page_info *page_info) 271 { 272 struct ionic_lif *lif = q->lif; 273 struct ionic_rx_stats *stats; 274 struct net_device *netdev; 275 struct device *dev; 276 277 netdev = lif->netdev; 278 dev = lif->ionic->dev; 279 stats = q_to_rx_stats(q); 280 281 if (unlikely(!page_info)) { 282 net_err_ratelimited("%s: %s invalid page_info in alloc\n", 283 netdev->name, q->name); 284 return -EINVAL; 285 } 286 287 page_info->page = dev_alloc_page(); 288 if (unlikely(!page_info->page)) { 289 net_err_ratelimited("%s: %s page alloc failed\n", 290 netdev->name, q->name); 291 stats->alloc_err++; 292 return -ENOMEM; 293 } 294 295 page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE, 296 DMA_FROM_DEVICE); 297 if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) { 298 put_page(page_info->page); 299 page_info->dma_addr = 0; 300 page_info->page = NULL; 301 net_err_ratelimited("%s: %s dma map failed\n", 302 netdev->name, q->name); 303 stats->dma_map_err++; 304 return -EIO; 305 } 306 307 return 0; 308 } 309 310 static void ionic_rx_page_free(struct ionic_queue *q, 311 struct ionic_page_info *page_info) 312 { 313 struct ionic_lif *lif = q->lif; 314 struct net_device *netdev; 315 struct device *dev; 316 317 netdev = lif->netdev; 318 dev = lif->ionic->dev; 319 320 if (unlikely(!page_info)) { 321 net_err_ratelimited("%s: %s invalid page_info in free\n", 322 netdev->name, q->name); 323 return; 324 } 325 326 if (unlikely(!page_info->page)) { 327 net_err_ratelimited("%s: %s invalid page in free\n", 328 netdev->name, q->name); 329 return; 330 } 331 332 dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); 333 334 put_page(page_info->page); 335 page_info->dma_addr = 0; 336 page_info->page = NULL; 337 } 338 339 void ionic_rx_fill(struct ionic_queue *q) 340 { 341 struct net_device *netdev = q->lif->netdev; 342 struct ionic_desc_info *desc_info; 343 struct ionic_page_info *page_info; 344 struct ionic_rxq_sg_desc *sg_desc; 345 struct ionic_rxq_sg_elem *sg_elem; 346 struct ionic_rxq_desc *desc; 347 unsigned int remain_len; 348 unsigned int seg_len; 349 unsigned int nfrags; 350 unsigned int i, j; 351 unsigned int len; 352 353 len = netdev->mtu + ETH_HLEN; 354 nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE; 355 356 for (i = ionic_q_space_avail(q); i; i--) { 357 remain_len = len; 358 desc_info = &q->info[q->head_idx]; 359 desc = desc_info->desc; 360 sg_desc = desc_info->sg_desc; 361 page_info = &desc_info->pages[0]; 362 363 if (page_info->page) { /* recycle the buffer */ 364 ionic_rxq_post(q, false, ionic_rx_clean, NULL); 365 continue; 366 } 367 368 /* fill main descriptor - pages[0] */ 369 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 370 IONIC_RXQ_DESC_OPCODE_SIMPLE; 371 desc_info->npages = nfrags; 372 if (unlikely(ionic_rx_page_alloc(q, page_info))) { 373 desc->addr = 0; 374 desc->len = 0; 375 return; 376 } 377 desc->addr = cpu_to_le64(page_info->dma_addr); 378 seg_len = min_t(unsigned int, PAGE_SIZE, len); 379 desc->len = cpu_to_le16(seg_len); 380 remain_len -= seg_len; 381 page_info++; 382 383 /* fill sg descriptors - pages[1..n] */ 384 for (j = 0; j < nfrags - 1; j++) { 385 if (page_info->page) /* recycle the sg buffer */ 386 continue; 387 388 sg_elem = &sg_desc->elems[j]; 389 if (unlikely(ionic_rx_page_alloc(q, page_info))) { 390 sg_elem->addr = 0; 391 sg_elem->len = 0; 392 return; 393 } 394 sg_elem->addr = cpu_to_le64(page_info->dma_addr); 395 seg_len = min_t(unsigned int, PAGE_SIZE, remain_len); 396 sg_elem->len = cpu_to_le16(seg_len); 397 remain_len -= seg_len; 398 page_info++; 399 } 400 401 ionic_rxq_post(q, false, ionic_rx_clean, NULL); 402 } 403 404 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, 405 q->dbval | q->head_idx); 406 } 407 408 static void ionic_rx_fill_cb(void *arg) 409 { 410 ionic_rx_fill(arg); 411 } 412 413 void ionic_rx_empty(struct ionic_queue *q) 414 { 415 struct ionic_desc_info *desc_info; 416 struct ionic_rxq_desc *desc; 417 unsigned int i; 418 u16 idx; 419 420 idx = q->tail_idx; 421 while (idx != q->head_idx) { 422 desc_info = &q->info[idx]; 423 desc = desc_info->desc; 424 desc->addr = 0; 425 desc->len = 0; 426 427 for (i = 0; i < desc_info->npages; i++) 428 ionic_rx_page_free(q, &desc_info->pages[i]); 429 430 desc_info->cb_arg = NULL; 431 idx = (idx + 1) & (q->num_descs - 1); 432 } 433 } 434 435 static void ionic_dim_update(struct ionic_qcq *qcq) 436 { 437 struct dim_sample dim_sample; 438 struct ionic_lif *lif; 439 unsigned int qi; 440 441 if (!qcq->intr.dim_coal_hw) 442 return; 443 444 lif = qcq->q.lif; 445 qi = qcq->cq.bound_q->index; 446 447 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 448 lif->rxqcqs[qi]->intr.index, 449 qcq->intr.dim_coal_hw); 450 451 dim_update_sample(qcq->cq.bound_intr->rearm_count, 452 lif->txqstats[qi].pkts, 453 lif->txqstats[qi].bytes, 454 &dim_sample); 455 456 net_dim(&qcq->dim, dim_sample); 457 } 458 459 int ionic_tx_napi(struct napi_struct *napi, int budget) 460 { 461 struct ionic_qcq *qcq = napi_to_qcq(napi); 462 struct ionic_cq *cq = napi_to_cq(napi); 463 struct ionic_dev *idev; 464 struct ionic_lif *lif; 465 u32 work_done = 0; 466 u32 flags = 0; 467 468 lif = cq->bound_q->lif; 469 idev = &lif->ionic->idev; 470 471 work_done = ionic_cq_service(cq, budget, 472 ionic_tx_service, NULL, NULL); 473 474 if (work_done < budget && napi_complete_done(napi, work_done)) { 475 ionic_dim_update(qcq); 476 flags |= IONIC_INTR_CRED_UNMASK; 477 cq->bound_intr->rearm_count++; 478 } 479 480 if (work_done || flags) { 481 flags |= IONIC_INTR_CRED_RESET_COALESCE; 482 ionic_intr_credits(idev->intr_ctrl, 483 cq->bound_intr->index, 484 work_done, flags); 485 } 486 487 DEBUG_STATS_NAPI_POLL(qcq, work_done); 488 489 return work_done; 490 } 491 492 int ionic_rx_napi(struct napi_struct *napi, int budget) 493 { 494 struct ionic_qcq *qcq = napi_to_qcq(napi); 495 struct ionic_cq *cq = napi_to_cq(napi); 496 struct ionic_dev *idev; 497 struct ionic_lif *lif; 498 u32 work_done = 0; 499 u32 flags = 0; 500 501 lif = cq->bound_q->lif; 502 idev = &lif->ionic->idev; 503 504 work_done = ionic_cq_service(cq, budget, 505 ionic_rx_service, NULL, NULL); 506 507 if (work_done) 508 ionic_rx_fill(cq->bound_q); 509 510 if (work_done < budget && napi_complete_done(napi, work_done)) { 511 ionic_dim_update(qcq); 512 flags |= IONIC_INTR_CRED_UNMASK; 513 cq->bound_intr->rearm_count++; 514 } 515 516 if (work_done || flags) { 517 flags |= IONIC_INTR_CRED_RESET_COALESCE; 518 ionic_intr_credits(idev->intr_ctrl, 519 cq->bound_intr->index, 520 work_done, flags); 521 } 522 523 DEBUG_STATS_NAPI_POLL(qcq, work_done); 524 525 return work_done; 526 } 527 528 int ionic_txrx_napi(struct napi_struct *napi, int budget) 529 { 530 struct ionic_qcq *qcq = napi_to_qcq(napi); 531 struct ionic_cq *rxcq = napi_to_cq(napi); 532 unsigned int qi = rxcq->bound_q->index; 533 struct ionic_dev *idev; 534 struct ionic_lif *lif; 535 struct ionic_cq *txcq; 536 u32 rx_work_done = 0; 537 u32 tx_work_done = 0; 538 u32 flags = 0; 539 540 lif = rxcq->bound_q->lif; 541 idev = &lif->ionic->idev; 542 txcq = &lif->txqcqs[qi]->cq; 543 544 tx_work_done = ionic_cq_service(txcq, lif->tx_budget, 545 ionic_tx_service, NULL, NULL); 546 547 rx_work_done = ionic_cq_service(rxcq, budget, 548 ionic_rx_service, NULL, NULL); 549 if (rx_work_done) 550 ionic_rx_fill_cb(rxcq->bound_q); 551 552 if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { 553 ionic_dim_update(qcq); 554 flags |= IONIC_INTR_CRED_UNMASK; 555 rxcq->bound_intr->rearm_count++; 556 } 557 558 if (rx_work_done || flags) { 559 flags |= IONIC_INTR_CRED_RESET_COALESCE; 560 ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, 561 tx_work_done + rx_work_done, flags); 562 } 563 564 DEBUG_STATS_NAPI_POLL(qcq, rx_work_done); 565 DEBUG_STATS_NAPI_POLL(qcq, tx_work_done); 566 567 return rx_work_done; 568 } 569 570 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, 571 void *data, size_t len) 572 { 573 struct ionic_tx_stats *stats = q_to_tx_stats(q); 574 struct device *dev = q->lif->ionic->dev; 575 dma_addr_t dma_addr; 576 577 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); 578 if (dma_mapping_error(dev, dma_addr)) { 579 net_warn_ratelimited("%s: DMA single map failed on %s!\n", 580 q->lif->netdev->name, q->name); 581 stats->dma_map_err++; 582 return 0; 583 } 584 return dma_addr; 585 } 586 587 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, 588 const skb_frag_t *frag, 589 size_t offset, size_t len) 590 { 591 struct ionic_tx_stats *stats = q_to_tx_stats(q); 592 struct device *dev = q->lif->ionic->dev; 593 dma_addr_t dma_addr; 594 595 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); 596 if (dma_mapping_error(dev, dma_addr)) { 597 net_warn_ratelimited("%s: DMA frag map failed on %s!\n", 598 q->lif->netdev->name, q->name); 599 stats->dma_map_err++; 600 } 601 return dma_addr; 602 } 603 604 static void ionic_tx_clean(struct ionic_queue *q, 605 struct ionic_desc_info *desc_info, 606 struct ionic_cq_info *cq_info, 607 void *cb_arg) 608 { 609 struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc; 610 struct ionic_txq_sg_elem *elem = sg_desc->elems; 611 struct ionic_tx_stats *stats = q_to_tx_stats(q); 612 struct ionic_txq_desc *desc = desc_info->desc; 613 struct device *dev = q->lif->ionic->dev; 614 u8 opcode, flags, nsge; 615 u16 queue_index; 616 unsigned int i; 617 u64 addr; 618 619 decode_txq_desc_cmd(le64_to_cpu(desc->cmd), 620 &opcode, &flags, &nsge, &addr); 621 622 /* use unmap_single only if either this is not TSO, 623 * or this is first descriptor of a TSO 624 */ 625 if (opcode != IONIC_TXQ_DESC_OPCODE_TSO || 626 flags & IONIC_TXQ_DESC_FLAG_TSO_SOT) 627 dma_unmap_single(dev, (dma_addr_t)addr, 628 le16_to_cpu(desc->len), DMA_TO_DEVICE); 629 else 630 dma_unmap_page(dev, (dma_addr_t)addr, 631 le16_to_cpu(desc->len), DMA_TO_DEVICE); 632 633 for (i = 0; i < nsge; i++, elem++) 634 dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr), 635 le16_to_cpu(elem->len), DMA_TO_DEVICE); 636 637 if (cb_arg) { 638 struct sk_buff *skb = cb_arg; 639 u32 len = skb->len; 640 641 queue_index = skb_get_queue_mapping(skb); 642 if (unlikely(__netif_subqueue_stopped(q->lif->netdev, 643 queue_index))) { 644 netif_wake_subqueue(q->lif->netdev, queue_index); 645 q->wake++; 646 } 647 dev_kfree_skb_any(skb); 648 stats->clean++; 649 netdev_tx_completed_queue(q_to_ndq(q), 1, len); 650 } 651 } 652 653 static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) 654 { 655 struct ionic_txq_comp *comp = cq_info->cq_desc; 656 struct ionic_queue *q = cq->bound_q; 657 struct ionic_desc_info *desc_info; 658 u16 index; 659 660 if (!color_match(comp->color, cq->done_color)) 661 return false; 662 663 /* clean the related q entries, there could be 664 * several q entries completed for each cq completion 665 */ 666 do { 667 desc_info = &q->info[q->tail_idx]; 668 index = q->tail_idx; 669 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 670 ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg); 671 desc_info->cb = NULL; 672 desc_info->cb_arg = NULL; 673 } while (index != le16_to_cpu(comp->comp_index)); 674 675 return true; 676 } 677 678 void ionic_tx_flush(struct ionic_cq *cq) 679 { 680 struct ionic_dev *idev = &cq->lif->ionic->idev; 681 u32 work_done; 682 683 work_done = ionic_cq_service(cq, cq->num_descs, 684 ionic_tx_service, NULL, NULL); 685 if (work_done) 686 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, 687 work_done, IONIC_INTR_CRED_RESET_COALESCE); 688 } 689 690 void ionic_tx_empty(struct ionic_queue *q) 691 { 692 struct ionic_desc_info *desc_info; 693 694 /* walk the not completed tx entries, if any */ 695 while (q->head_idx != q->tail_idx) { 696 desc_info = &q->info[q->tail_idx]; 697 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 698 ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); 699 desc_info->cb = NULL; 700 desc_info->cb_arg = NULL; 701 } 702 } 703 704 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) 705 { 706 int err; 707 708 err = skb_cow_head(skb, 0); 709 if (err) 710 return err; 711 712 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 713 inner_ip_hdr(skb)->check = 0; 714 inner_tcp_hdr(skb)->check = 715 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, 716 inner_ip_hdr(skb)->daddr, 717 0, IPPROTO_TCP, 0); 718 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 719 inner_tcp_hdr(skb)->check = 720 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, 721 &inner_ipv6_hdr(skb)->daddr, 722 0, IPPROTO_TCP, 0); 723 } 724 725 return 0; 726 } 727 728 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) 729 { 730 int err; 731 732 err = skb_cow_head(skb, 0); 733 if (err) 734 return err; 735 736 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 737 ip_hdr(skb)->check = 0; 738 tcp_hdr(skb)->check = 739 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 740 ip_hdr(skb)->daddr, 741 0, IPPROTO_TCP, 0); 742 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 743 tcp_v6_gso_csum_prep(skb); 744 } 745 746 return 0; 747 } 748 749 static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 750 struct sk_buff *skb, 751 dma_addr_t addr, u8 nsge, u16 len, 752 unsigned int hdrlen, unsigned int mss, 753 bool outer_csum, 754 u16 vlan_tci, bool has_vlan, 755 bool start, bool done) 756 { 757 u8 flags = 0; 758 u64 cmd; 759 760 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 761 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 762 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 763 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 764 765 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr); 766 desc->cmd = cpu_to_le64(cmd); 767 desc->len = cpu_to_le16(len); 768 desc->vlan_tci = cpu_to_le16(vlan_tci); 769 desc->hdr_len = cpu_to_le16(hdrlen); 770 desc->mss = cpu_to_le16(mss); 771 772 if (done) { 773 skb_tx_timestamp(skb); 774 netdev_tx_sent_queue(q_to_ndq(q), skb->len); 775 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); 776 } else { 777 ionic_txq_post(q, false, ionic_tx_clean, NULL); 778 } 779 } 780 781 static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q, 782 struct ionic_txq_sg_elem **elem) 783 { 784 struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc; 785 struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; 786 787 *elem = sg_desc->elems; 788 return desc; 789 } 790 791 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) 792 { 793 struct ionic_tx_stats *stats = q_to_tx_stats(q); 794 struct ionic_desc_info *rewind_desc_info; 795 struct device *dev = q->lif->ionic->dev; 796 struct ionic_txq_sg_elem *elem; 797 struct ionic_txq_desc *desc; 798 unsigned int frag_left = 0; 799 unsigned int offset = 0; 800 u16 abort = q->head_idx; 801 unsigned int len_left; 802 dma_addr_t desc_addr; 803 unsigned int hdrlen; 804 unsigned int nfrags; 805 unsigned int seglen; 806 u64 total_bytes = 0; 807 u64 total_pkts = 0; 808 u16 rewind = abort; 809 unsigned int left; 810 unsigned int len; 811 unsigned int mss; 812 skb_frag_t *frag; 813 bool start, done; 814 bool outer_csum; 815 bool has_vlan; 816 u16 desc_len; 817 u8 desc_nsge; 818 u16 vlan_tci; 819 bool encap; 820 int err; 821 822 mss = skb_shinfo(skb)->gso_size; 823 nfrags = skb_shinfo(skb)->nr_frags; 824 len_left = skb->len - skb_headlen(skb); 825 outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) || 826 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); 827 has_vlan = !!skb_vlan_tag_present(skb); 828 vlan_tci = skb_vlan_tag_get(skb); 829 encap = skb->encapsulation; 830 831 /* Preload inner-most TCP csum field with IP pseudo hdr 832 * calculated with IP length set to zero. HW will later 833 * add in length to each TCP segment resulting from the TSO. 834 */ 835 836 if (encap) 837 err = ionic_tx_tcp_inner_pseudo_csum(skb); 838 else 839 err = ionic_tx_tcp_pseudo_csum(skb); 840 if (err) 841 return err; 842 843 if (encap) 844 hdrlen = skb_inner_transport_header(skb) - skb->data + 845 inner_tcp_hdrlen(skb); 846 else 847 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 848 849 seglen = hdrlen + mss; 850 left = skb_headlen(skb); 851 852 desc = ionic_tx_tso_next(q, &elem); 853 start = true; 854 855 /* Chop skb->data up into desc segments */ 856 857 while (left > 0) { 858 len = min(seglen, left); 859 frag_left = seglen - len; 860 desc_addr = ionic_tx_map_single(q, skb->data + offset, len); 861 if (dma_mapping_error(dev, desc_addr)) 862 goto err_out_abort; 863 desc_len = len; 864 desc_nsge = 0; 865 left -= len; 866 offset += len; 867 if (nfrags > 0 && frag_left > 0) 868 continue; 869 done = (nfrags == 0 && left == 0); 870 ionic_tx_tso_post(q, desc, skb, 871 desc_addr, desc_nsge, desc_len, 872 hdrlen, mss, 873 outer_csum, 874 vlan_tci, has_vlan, 875 start, done); 876 total_pkts++; 877 total_bytes += start ? len : len + hdrlen; 878 desc = ionic_tx_tso_next(q, &elem); 879 start = false; 880 seglen = mss; 881 } 882 883 /* Chop skb frags into desc segments */ 884 885 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 886 offset = 0; 887 left = skb_frag_size(frag); 888 len_left -= left; 889 nfrags--; 890 stats->frags++; 891 892 while (left > 0) { 893 if (frag_left > 0) { 894 len = min(frag_left, left); 895 frag_left -= len; 896 elem->addr = 897 cpu_to_le64(ionic_tx_map_frag(q, frag, 898 offset, len)); 899 if (dma_mapping_error(dev, elem->addr)) 900 goto err_out_abort; 901 elem->len = cpu_to_le16(len); 902 elem++; 903 desc_nsge++; 904 left -= len; 905 offset += len; 906 if (nfrags > 0 && frag_left > 0) 907 continue; 908 done = (nfrags == 0 && left == 0); 909 ionic_tx_tso_post(q, desc, skb, desc_addr, 910 desc_nsge, desc_len, 911 hdrlen, mss, outer_csum, 912 vlan_tci, has_vlan, 913 start, done); 914 total_pkts++; 915 total_bytes += start ? len : len + hdrlen; 916 desc = ionic_tx_tso_next(q, &elem); 917 start = false; 918 } else { 919 len = min(mss, left); 920 frag_left = mss - len; 921 desc_addr = ionic_tx_map_frag(q, frag, 922 offset, len); 923 if (dma_mapping_error(dev, desc_addr)) 924 goto err_out_abort; 925 desc_len = len; 926 desc_nsge = 0; 927 left -= len; 928 offset += len; 929 if (nfrags > 0 && frag_left > 0) 930 continue; 931 done = (nfrags == 0 && left == 0); 932 ionic_tx_tso_post(q, desc, skb, desc_addr, 933 desc_nsge, desc_len, 934 hdrlen, mss, outer_csum, 935 vlan_tci, has_vlan, 936 start, done); 937 total_pkts++; 938 total_bytes += start ? len : len + hdrlen; 939 desc = ionic_tx_tso_next(q, &elem); 940 start = false; 941 } 942 } 943 } 944 945 stats->pkts += total_pkts; 946 stats->bytes += total_bytes; 947 stats->tso++; 948 stats->tso_bytes += total_bytes; 949 950 return 0; 951 952 err_out_abort: 953 while (rewind != q->head_idx) { 954 rewind_desc_info = &q->info[rewind]; 955 ionic_tx_clean(q, rewind_desc_info, NULL, NULL); 956 rewind = (rewind + 1) & (q->num_descs - 1); 957 } 958 q->head_idx = abort; 959 960 return -ENOMEM; 961 } 962 963 static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) 964 { 965 struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; 966 struct ionic_tx_stats *stats = q_to_tx_stats(q); 967 struct device *dev = q->lif->ionic->dev; 968 dma_addr_t dma_addr; 969 bool has_vlan; 970 u8 flags = 0; 971 bool encap; 972 u64 cmd; 973 974 has_vlan = !!skb_vlan_tag_present(skb); 975 encap = skb->encapsulation; 976 977 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); 978 if (dma_mapping_error(dev, dma_addr)) 979 return -ENOMEM; 980 981 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 982 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 983 984 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL, 985 flags, skb_shinfo(skb)->nr_frags, dma_addr); 986 desc->cmd = cpu_to_le64(cmd); 987 desc->len = cpu_to_le16(skb_headlen(skb)); 988 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); 989 desc->csum_offset = cpu_to_le16(skb->csum_offset); 990 if (has_vlan) { 991 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 992 stats->vlan_inserted++; 993 } 994 995 if (skb->csum_not_inet) 996 stats->crc32_csum++; 997 else 998 stats->csum++; 999 1000 return 0; 1001 } 1002 1003 static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) 1004 { 1005 struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc; 1006 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1007 struct device *dev = q->lif->ionic->dev; 1008 dma_addr_t dma_addr; 1009 bool has_vlan; 1010 u8 flags = 0; 1011 bool encap; 1012 u64 cmd; 1013 1014 has_vlan = !!skb_vlan_tag_present(skb); 1015 encap = skb->encapsulation; 1016 1017 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); 1018 if (dma_mapping_error(dev, dma_addr)) 1019 return -ENOMEM; 1020 1021 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 1022 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 1023 1024 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, 1025 flags, skb_shinfo(skb)->nr_frags, dma_addr); 1026 desc->cmd = cpu_to_le64(cmd); 1027 desc->len = cpu_to_le16(skb_headlen(skb)); 1028 if (has_vlan) { 1029 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 1030 stats->vlan_inserted++; 1031 } 1032 1033 stats->csum_none++; 1034 1035 return 0; 1036 } 1037 1038 static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb) 1039 { 1040 struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc; 1041 unsigned int len_left = skb->len - skb_headlen(skb); 1042 struct ionic_txq_sg_elem *elem = sg_desc->elems; 1043 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1044 struct device *dev = q->lif->ionic->dev; 1045 dma_addr_t dma_addr; 1046 skb_frag_t *frag; 1047 u16 len; 1048 1049 for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) { 1050 len = skb_frag_size(frag); 1051 elem->len = cpu_to_le16(len); 1052 dma_addr = ionic_tx_map_frag(q, frag, 0, len); 1053 if (dma_mapping_error(dev, dma_addr)) 1054 return -ENOMEM; 1055 elem->addr = cpu_to_le64(dma_addr); 1056 len_left -= len; 1057 stats->frags++; 1058 } 1059 1060 return 0; 1061 } 1062 1063 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) 1064 { 1065 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1066 int err; 1067 1068 /* set up the initial descriptor */ 1069 if (skb->ip_summed == CHECKSUM_PARTIAL) 1070 err = ionic_tx_calc_csum(q, skb); 1071 else 1072 err = ionic_tx_calc_no_csum(q, skb); 1073 if (err) 1074 return err; 1075 1076 /* add frags */ 1077 err = ionic_tx_skb_frags(q, skb); 1078 if (err) 1079 return err; 1080 1081 skb_tx_timestamp(skb); 1082 stats->pkts++; 1083 stats->bytes += skb->len; 1084 1085 netdev_tx_sent_queue(q_to_ndq(q), skb->len); 1086 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); 1087 1088 return 0; 1089 } 1090 1091 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) 1092 { 1093 int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems; 1094 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1095 int err; 1096 1097 /* If TSO, need roundup(skb->len/mss) descs */ 1098 if (skb_is_gso(skb)) 1099 return (skb->len / skb_shinfo(skb)->gso_size) + 1; 1100 1101 /* If non-TSO, just need 1 desc and nr_frags sg elems */ 1102 if (skb_shinfo(skb)->nr_frags <= sg_elems) 1103 return 1; 1104 1105 /* Too many frags, so linearize */ 1106 err = skb_linearize(skb); 1107 if (err) 1108 return err; 1109 1110 stats->linearize++; 1111 1112 /* Need 1 desc and zero sg elems */ 1113 return 1; 1114 } 1115 1116 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) 1117 { 1118 int stopped = 0; 1119 1120 if (unlikely(!ionic_q_has_space(q, ndescs))) { 1121 netif_stop_subqueue(q->lif->netdev, q->index); 1122 q->stop++; 1123 stopped = 1; 1124 1125 /* Might race with ionic_tx_clean, check again */ 1126 smp_rmb(); 1127 if (ionic_q_has_space(q, ndescs)) { 1128 netif_wake_subqueue(q->lif->netdev, q->index); 1129 stopped = 0; 1130 } 1131 } 1132 1133 return stopped; 1134 } 1135 1136 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) 1137 { 1138 u16 queue_index = skb_get_queue_mapping(skb); 1139 struct ionic_lif *lif = netdev_priv(netdev); 1140 struct ionic_queue *q; 1141 int ndescs; 1142 int err; 1143 1144 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) { 1145 dev_kfree_skb(skb); 1146 return NETDEV_TX_OK; 1147 } 1148 1149 if (unlikely(queue_index >= lif->nxqs)) 1150 queue_index = 0; 1151 q = &lif->txqcqs[queue_index]->q; 1152 1153 ndescs = ionic_tx_descs_needed(q, skb); 1154 if (ndescs < 0) 1155 goto err_out_drop; 1156 1157 if (unlikely(ionic_maybe_stop_tx(q, ndescs))) 1158 return NETDEV_TX_BUSY; 1159 1160 if (skb_is_gso(skb)) 1161 err = ionic_tx_tso(q, skb); 1162 else 1163 err = ionic_tx(q, skb); 1164 1165 if (err) 1166 goto err_out_drop; 1167 1168 /* Stop the queue if there aren't descriptors for the next packet. 1169 * Since our SG lists per descriptor take care of most of the possible 1170 * fragmentation, we don't need to have many descriptors available. 1171 */ 1172 ionic_maybe_stop_tx(q, 4); 1173 1174 return NETDEV_TX_OK; 1175 1176 err_out_drop: 1177 q->stop++; 1178 q->drop++; 1179 dev_kfree_skb(skb); 1180 return NETDEV_TX_OK; 1181 } 1182