1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/ip.h> 5 #include <linux/ipv6.h> 6 #include <linux/if_vlan.h> 7 #include <net/ip6_checksum.h> 8 9 #include "ionic.h" 10 #include "ionic_lif.h" 11 #include "ionic_txrx.h" 12 13 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, 14 ionic_desc_cb cb_func, void *cb_arg) 15 { 16 ionic_q_post(q, ring_dbell, cb_func, cb_arg); 17 } 18 19 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, 20 ionic_desc_cb cb_func, void *cb_arg) 21 { 22 ionic_q_post(q, ring_dbell, cb_func, cb_arg); 23 } 24 25 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) 26 { 27 return netdev_get_tx_queue(q->lif->netdev, q->index); 28 } 29 30 static int ionic_rx_page_alloc(struct ionic_queue *q, 31 struct ionic_buf_info *buf_info) 32 { 33 struct net_device *netdev = q->lif->netdev; 34 struct ionic_rx_stats *stats; 35 struct device *dev; 36 struct page *page; 37 38 dev = q->dev; 39 stats = q_to_rx_stats(q); 40 41 if (unlikely(!buf_info)) { 42 net_err_ratelimited("%s: %s invalid buf_info in alloc\n", 43 netdev->name, q->name); 44 return -EINVAL; 45 } 46 47 page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); 48 if (unlikely(!page)) { 49 net_err_ratelimited("%s: %s page alloc failed\n", 50 netdev->name, q->name); 51 stats->alloc_err++; 52 return -ENOMEM; 53 } 54 55 buf_info->dma_addr = dma_map_page(dev, page, 0, 56 IONIC_PAGE_SIZE, DMA_FROM_DEVICE); 57 if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) { 58 __free_pages(page, 0); 59 net_err_ratelimited("%s: %s dma map failed\n", 60 netdev->name, q->name); 61 stats->dma_map_err++; 62 return -EIO; 63 } 64 65 buf_info->page = page; 66 buf_info->page_offset = 0; 67 68 return 0; 69 } 70 71 static void ionic_rx_page_free(struct ionic_queue *q, 72 struct ionic_buf_info *buf_info) 73 { 74 struct net_device *netdev = q->lif->netdev; 75 struct device *dev = q->dev; 76 77 if (unlikely(!buf_info)) { 78 net_err_ratelimited("%s: %s invalid buf_info in free\n", 79 netdev->name, q->name); 80 return; 81 } 82 83 if (!buf_info->page) 84 return; 85 86 dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); 87 __free_pages(buf_info->page, 0); 88 buf_info->page = NULL; 89 } 90 91 static bool ionic_rx_buf_recycle(struct ionic_queue *q, 92 struct ionic_buf_info *buf_info, u32 used) 93 { 94 u32 size; 95 96 /* don't re-use pages allocated in low-mem condition */ 97 if (page_is_pfmemalloc(buf_info->page)) 98 return false; 99 100 /* don't re-use buffers from non-local numa nodes */ 101 if (page_to_nid(buf_info->page) != numa_mem_id()) 102 return false; 103 104 size = ALIGN(used, IONIC_PAGE_SPLIT_SZ); 105 buf_info->page_offset += size; 106 if (buf_info->page_offset >= IONIC_PAGE_SIZE) 107 return false; 108 109 get_page(buf_info->page); 110 111 return true; 112 } 113 114 static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, 115 struct ionic_desc_info *desc_info, 116 struct ionic_rxq_comp *comp) 117 { 118 struct net_device *netdev = q->lif->netdev; 119 struct ionic_buf_info *buf_info; 120 struct ionic_rx_stats *stats; 121 struct device *dev = q->dev; 122 struct sk_buff *skb; 123 unsigned int i; 124 u16 frag_len; 125 u16 len; 126 127 stats = q_to_rx_stats(q); 128 129 buf_info = &desc_info->bufs[0]; 130 len = le16_to_cpu(comp->len); 131 132 prefetchw(buf_info->page); 133 134 skb = napi_get_frags(&q_to_qcq(q)->napi); 135 if (unlikely(!skb)) { 136 net_warn_ratelimited("%s: SKB alloc failed on %s!\n", 137 netdev->name, q->name); 138 stats->alloc_err++; 139 return NULL; 140 } 141 142 i = comp->num_sg_elems + 1; 143 do { 144 if (unlikely(!buf_info->page)) { 145 dev_kfree_skb(skb); 146 return NULL; 147 } 148 149 frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); 150 len -= frag_len; 151 152 dma_sync_single_for_cpu(dev, 153 buf_info->dma_addr + buf_info->page_offset, 154 frag_len, DMA_FROM_DEVICE); 155 156 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 157 buf_info->page, buf_info->page_offset, frag_len, 158 IONIC_PAGE_SIZE); 159 160 if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) { 161 dma_unmap_page(dev, buf_info->dma_addr, 162 IONIC_PAGE_SIZE, DMA_FROM_DEVICE); 163 buf_info->page = NULL; 164 } 165 166 buf_info++; 167 168 i--; 169 } while (i > 0); 170 171 return skb; 172 } 173 174 static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, 175 struct ionic_desc_info *desc_info, 176 struct ionic_rxq_comp *comp) 177 { 178 struct net_device *netdev = q->lif->netdev; 179 struct ionic_buf_info *buf_info; 180 struct ionic_rx_stats *stats; 181 struct device *dev = q->dev; 182 struct sk_buff *skb; 183 u16 len; 184 185 stats = q_to_rx_stats(q); 186 187 buf_info = &desc_info->bufs[0]; 188 len = le16_to_cpu(comp->len); 189 190 skb = napi_alloc_skb(&q_to_qcq(q)->napi, len); 191 if (unlikely(!skb)) { 192 net_warn_ratelimited("%s: SKB alloc failed on %s!\n", 193 netdev->name, q->name); 194 stats->alloc_err++; 195 return NULL; 196 } 197 198 if (unlikely(!buf_info->page)) { 199 dev_kfree_skb(skb); 200 return NULL; 201 } 202 203 dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset, 204 len, DMA_FROM_DEVICE); 205 skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len); 206 dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset, 207 len, DMA_FROM_DEVICE); 208 209 skb_put(skb, len); 210 skb->protocol = eth_type_trans(skb, q->lif->netdev); 211 212 return skb; 213 } 214 215 static void ionic_rx_clean(struct ionic_queue *q, 216 struct ionic_desc_info *desc_info, 217 struct ionic_cq_info *cq_info, 218 void *cb_arg) 219 { 220 struct net_device *netdev = q->lif->netdev; 221 struct ionic_qcq *qcq = q_to_qcq(q); 222 struct ionic_rx_stats *stats; 223 struct ionic_rxq_comp *comp; 224 struct sk_buff *skb; 225 226 comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp); 227 228 stats = q_to_rx_stats(q); 229 230 if (comp->status) { 231 stats->dropped++; 232 return; 233 } 234 235 stats->pkts++; 236 stats->bytes += le16_to_cpu(comp->len); 237 238 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) 239 skb = ionic_rx_copybreak(q, desc_info, comp); 240 else 241 skb = ionic_rx_frags(q, desc_info, comp); 242 243 if (unlikely(!skb)) { 244 stats->dropped++; 245 return; 246 } 247 248 skb_record_rx_queue(skb, q->index); 249 250 if (likely(netdev->features & NETIF_F_RXHASH)) { 251 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 252 case IONIC_PKT_TYPE_IPV4: 253 case IONIC_PKT_TYPE_IPV6: 254 skb_set_hash(skb, le32_to_cpu(comp->rss_hash), 255 PKT_HASH_TYPE_L3); 256 break; 257 case IONIC_PKT_TYPE_IPV4_TCP: 258 case IONIC_PKT_TYPE_IPV6_TCP: 259 case IONIC_PKT_TYPE_IPV4_UDP: 260 case IONIC_PKT_TYPE_IPV6_UDP: 261 skb_set_hash(skb, le32_to_cpu(comp->rss_hash), 262 PKT_HASH_TYPE_L4); 263 break; 264 } 265 } 266 267 if (likely(netdev->features & NETIF_F_RXCSUM) && 268 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) { 269 skb->ip_summed = CHECKSUM_COMPLETE; 270 skb->csum = (__force __wsum)le16_to_cpu(comp->csum); 271 stats->csum_complete++; 272 } else { 273 stats->csum_none++; 274 } 275 276 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 277 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) || 278 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))) 279 stats->csum_error++; 280 281 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 282 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) { 283 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 284 le16_to_cpu(comp->vlan_tci)); 285 stats->vlan_stripped++; 286 } 287 288 if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) { 289 __le64 *cq_desc_hwstamp; 290 u64 hwstamp; 291 292 cq_desc_hwstamp = 293 cq_info->cq_desc + 294 qcq->cq.desc_size - 295 sizeof(struct ionic_rxq_comp) - 296 IONIC_HWSTAMP_CQ_NEGOFFSET; 297 298 hwstamp = le64_to_cpu(*cq_desc_hwstamp); 299 300 if (hwstamp != IONIC_HWSTAMP_INVALID) { 301 skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp); 302 stats->hwstamp_valid++; 303 } else { 304 stats->hwstamp_invalid++; 305 } 306 } 307 308 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) 309 napi_gro_receive(&qcq->napi, skb); 310 else 311 napi_gro_frags(&qcq->napi); 312 } 313 314 bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) 315 { 316 struct ionic_queue *q = cq->bound_q; 317 struct ionic_desc_info *desc_info; 318 struct ionic_rxq_comp *comp; 319 320 comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); 321 322 if (!color_match(comp->pkt_type_color, cq->done_color)) 323 return false; 324 325 /* check for empty queue */ 326 if (q->tail_idx == q->head_idx) 327 return false; 328 329 if (q->tail_idx != le16_to_cpu(comp->comp_index)) 330 return false; 331 332 desc_info = &q->info[q->tail_idx]; 333 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 334 335 /* clean the related q entry, only one per qc completion */ 336 ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); 337 338 desc_info->cb = NULL; 339 desc_info->cb_arg = NULL; 340 341 return true; 342 } 343 344 void ionic_rx_fill(struct ionic_queue *q) 345 { 346 struct net_device *netdev = q->lif->netdev; 347 struct ionic_desc_info *desc_info; 348 struct ionic_rxq_sg_desc *sg_desc; 349 struct ionic_rxq_sg_elem *sg_elem; 350 struct ionic_buf_info *buf_info; 351 struct ionic_rxq_desc *desc; 352 unsigned int remain_len; 353 unsigned int frag_len; 354 unsigned int nfrags; 355 unsigned int i, j; 356 unsigned int len; 357 358 len = netdev->mtu + ETH_HLEN + VLAN_HLEN; 359 360 for (i = ionic_q_space_avail(q); i; i--) { 361 nfrags = 0; 362 remain_len = len; 363 desc_info = &q->info[q->head_idx]; 364 desc = desc_info->desc; 365 buf_info = &desc_info->bufs[0]; 366 367 if (!buf_info->page) { /* alloc a new buffer? */ 368 if (unlikely(ionic_rx_page_alloc(q, buf_info))) { 369 desc->addr = 0; 370 desc->len = 0; 371 return; 372 } 373 } 374 375 /* fill main descriptor - buf[0] */ 376 desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); 377 frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); 378 desc->len = cpu_to_le16(frag_len); 379 remain_len -= frag_len; 380 buf_info++; 381 nfrags++; 382 383 /* fill sg descriptors - buf[1..n] */ 384 sg_desc = desc_info->sg_desc; 385 for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) { 386 sg_elem = &sg_desc->elems[j]; 387 if (!buf_info->page) { /* alloc a new sg buffer? */ 388 if (unlikely(ionic_rx_page_alloc(q, buf_info))) { 389 sg_elem->addr = 0; 390 sg_elem->len = 0; 391 return; 392 } 393 } 394 395 sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); 396 frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset); 397 sg_elem->len = cpu_to_le16(frag_len); 398 remain_len -= frag_len; 399 buf_info++; 400 nfrags++; 401 } 402 403 /* clear end sg element as a sentinel */ 404 if (j < q->max_sg_elems) { 405 sg_elem = &sg_desc->elems[j]; 406 memset(sg_elem, 0, sizeof(*sg_elem)); 407 } 408 409 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 410 IONIC_RXQ_DESC_OPCODE_SIMPLE; 411 desc_info->nbufs = nfrags; 412 413 ionic_rxq_post(q, false, ionic_rx_clean, NULL); 414 } 415 416 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, 417 q->dbval | q->head_idx); 418 } 419 420 void ionic_rx_empty(struct ionic_queue *q) 421 { 422 struct ionic_desc_info *desc_info; 423 struct ionic_buf_info *buf_info; 424 unsigned int i, j; 425 426 for (i = 0; i < q->num_descs; i++) { 427 desc_info = &q->info[i]; 428 for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) { 429 buf_info = &desc_info->bufs[j]; 430 if (buf_info->page) 431 ionic_rx_page_free(q, buf_info); 432 } 433 434 desc_info->nbufs = 0; 435 desc_info->cb = NULL; 436 desc_info->cb_arg = NULL; 437 } 438 439 q->head_idx = 0; 440 q->tail_idx = 0; 441 } 442 443 static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode) 444 { 445 struct dim_sample dim_sample; 446 struct ionic_lif *lif; 447 unsigned int qi; 448 u64 pkts, bytes; 449 450 if (!qcq->intr.dim_coal_hw) 451 return; 452 453 lif = qcq->q.lif; 454 qi = qcq->cq.bound_q->index; 455 456 switch (napi_mode) { 457 case IONIC_LIF_F_TX_DIM_INTR: 458 pkts = lif->txqstats[qi].pkts; 459 bytes = lif->txqstats[qi].bytes; 460 break; 461 case IONIC_LIF_F_RX_DIM_INTR: 462 pkts = lif->rxqstats[qi].pkts; 463 bytes = lif->rxqstats[qi].bytes; 464 break; 465 default: 466 pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts; 467 bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes; 468 break; 469 } 470 471 dim_update_sample(qcq->cq.bound_intr->rearm_count, 472 pkts, bytes, &dim_sample); 473 474 net_dim(&qcq->dim, dim_sample); 475 } 476 477 int ionic_tx_napi(struct napi_struct *napi, int budget) 478 { 479 struct ionic_qcq *qcq = napi_to_qcq(napi); 480 struct ionic_cq *cq = napi_to_cq(napi); 481 struct ionic_dev *idev; 482 struct ionic_lif *lif; 483 u32 work_done = 0; 484 u32 flags = 0; 485 486 lif = cq->bound_q->lif; 487 idev = &lif->ionic->idev; 488 489 work_done = ionic_cq_service(cq, budget, 490 ionic_tx_service, NULL, NULL); 491 492 if (work_done < budget && napi_complete_done(napi, work_done)) { 493 ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR); 494 flags |= IONIC_INTR_CRED_UNMASK; 495 cq->bound_intr->rearm_count++; 496 } 497 498 if (work_done || flags) { 499 flags |= IONIC_INTR_CRED_RESET_COALESCE; 500 ionic_intr_credits(idev->intr_ctrl, 501 cq->bound_intr->index, 502 work_done, flags); 503 } 504 505 return work_done; 506 } 507 508 int ionic_rx_napi(struct napi_struct *napi, int budget) 509 { 510 struct ionic_qcq *qcq = napi_to_qcq(napi); 511 struct ionic_cq *cq = napi_to_cq(napi); 512 struct ionic_dev *idev; 513 struct ionic_lif *lif; 514 u16 rx_fill_threshold; 515 u32 work_done = 0; 516 u32 flags = 0; 517 518 lif = cq->bound_q->lif; 519 idev = &lif->ionic->idev; 520 521 work_done = ionic_cq_service(cq, budget, 522 ionic_rx_service, NULL, NULL); 523 524 rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD, 525 cq->num_descs / IONIC_RX_FILL_DIV); 526 if (work_done && ionic_q_space_avail(cq->bound_q) >= rx_fill_threshold) 527 ionic_rx_fill(cq->bound_q); 528 529 if (work_done < budget && napi_complete_done(napi, work_done)) { 530 ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR); 531 flags |= IONIC_INTR_CRED_UNMASK; 532 cq->bound_intr->rearm_count++; 533 } 534 535 if (work_done || flags) { 536 flags |= IONIC_INTR_CRED_RESET_COALESCE; 537 ionic_intr_credits(idev->intr_ctrl, 538 cq->bound_intr->index, 539 work_done, flags); 540 } 541 542 return work_done; 543 } 544 545 int ionic_txrx_napi(struct napi_struct *napi, int budget) 546 { 547 struct ionic_qcq *qcq = napi_to_qcq(napi); 548 struct ionic_cq *rxcq = napi_to_cq(napi); 549 unsigned int qi = rxcq->bound_q->index; 550 struct ionic_dev *idev; 551 struct ionic_lif *lif; 552 struct ionic_cq *txcq; 553 u16 rx_fill_threshold; 554 u32 rx_work_done = 0; 555 u32 tx_work_done = 0; 556 u32 flags = 0; 557 558 lif = rxcq->bound_q->lif; 559 idev = &lif->ionic->idev; 560 txcq = &lif->txqcqs[qi]->cq; 561 562 tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, 563 ionic_tx_service, NULL, NULL); 564 565 rx_work_done = ionic_cq_service(rxcq, budget, 566 ionic_rx_service, NULL, NULL); 567 568 rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD, 569 rxcq->num_descs / IONIC_RX_FILL_DIV); 570 if (rx_work_done && ionic_q_space_avail(rxcq->bound_q) >= rx_fill_threshold) 571 ionic_rx_fill(rxcq->bound_q); 572 573 if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { 574 ionic_dim_update(qcq, 0); 575 flags |= IONIC_INTR_CRED_UNMASK; 576 rxcq->bound_intr->rearm_count++; 577 } 578 579 if (rx_work_done || flags) { 580 flags |= IONIC_INTR_CRED_RESET_COALESCE; 581 ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, 582 tx_work_done + rx_work_done, flags); 583 } 584 585 return rx_work_done; 586 } 587 588 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, 589 void *data, size_t len) 590 { 591 struct ionic_tx_stats *stats = q_to_tx_stats(q); 592 struct device *dev = q->dev; 593 dma_addr_t dma_addr; 594 595 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); 596 if (dma_mapping_error(dev, dma_addr)) { 597 net_warn_ratelimited("%s: DMA single map failed on %s!\n", 598 q->lif->netdev->name, q->name); 599 stats->dma_map_err++; 600 return 0; 601 } 602 return dma_addr; 603 } 604 605 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, 606 const skb_frag_t *frag, 607 size_t offset, size_t len) 608 { 609 struct ionic_tx_stats *stats = q_to_tx_stats(q); 610 struct device *dev = q->dev; 611 dma_addr_t dma_addr; 612 613 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); 614 if (dma_mapping_error(dev, dma_addr)) { 615 net_warn_ratelimited("%s: DMA frag map failed on %s!\n", 616 q->lif->netdev->name, q->name); 617 stats->dma_map_err++; 618 } 619 return dma_addr; 620 } 621 622 static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, 623 struct ionic_desc_info *desc_info) 624 { 625 struct ionic_buf_info *buf_info = desc_info->bufs; 626 struct ionic_tx_stats *stats = q_to_tx_stats(q); 627 struct device *dev = q->dev; 628 dma_addr_t dma_addr; 629 unsigned int nfrags; 630 skb_frag_t *frag; 631 int frag_idx; 632 633 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); 634 if (dma_mapping_error(dev, dma_addr)) { 635 stats->dma_map_err++; 636 return -EIO; 637 } 638 buf_info->dma_addr = dma_addr; 639 buf_info->len = skb_headlen(skb); 640 buf_info++; 641 642 frag = skb_shinfo(skb)->frags; 643 nfrags = skb_shinfo(skb)->nr_frags; 644 for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) { 645 dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); 646 if (dma_mapping_error(dev, dma_addr)) { 647 stats->dma_map_err++; 648 goto dma_fail; 649 } 650 buf_info->dma_addr = dma_addr; 651 buf_info->len = skb_frag_size(frag); 652 buf_info++; 653 } 654 655 desc_info->nbufs = 1 + nfrags; 656 657 return 0; 658 659 dma_fail: 660 /* unwind the frag mappings and the head mapping */ 661 while (frag_idx > 0) { 662 frag_idx--; 663 buf_info--; 664 dma_unmap_page(dev, buf_info->dma_addr, 665 buf_info->len, DMA_TO_DEVICE); 666 } 667 dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE); 668 return -EIO; 669 } 670 671 static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, 672 struct ionic_desc_info *desc_info) 673 { 674 struct ionic_buf_info *buf_info = desc_info->bufs; 675 struct device *dev = q->dev; 676 unsigned int i; 677 678 if (!desc_info->nbufs) 679 return; 680 681 dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr, 682 buf_info->len, DMA_TO_DEVICE); 683 buf_info++; 684 for (i = 1; i < desc_info->nbufs; i++, buf_info++) 685 dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr, 686 buf_info->len, DMA_TO_DEVICE); 687 688 desc_info->nbufs = 0; 689 } 690 691 static void ionic_tx_clean(struct ionic_queue *q, 692 struct ionic_desc_info *desc_info, 693 struct ionic_cq_info *cq_info, 694 void *cb_arg) 695 { 696 struct ionic_tx_stats *stats = q_to_tx_stats(q); 697 struct ionic_qcq *qcq = q_to_qcq(q); 698 struct sk_buff *skb = cb_arg; 699 u16 qi; 700 701 ionic_tx_desc_unmap_bufs(q, desc_info); 702 703 if (!skb) 704 return; 705 706 qi = skb_get_queue_mapping(skb); 707 708 if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) { 709 if (cq_info) { 710 struct skb_shared_hwtstamps hwts = {}; 711 __le64 *cq_desc_hwstamp; 712 u64 hwstamp; 713 714 cq_desc_hwstamp = 715 cq_info->cq_desc + 716 qcq->cq.desc_size - 717 sizeof(struct ionic_txq_comp) - 718 IONIC_HWSTAMP_CQ_NEGOFFSET; 719 720 hwstamp = le64_to_cpu(*cq_desc_hwstamp); 721 722 if (hwstamp != IONIC_HWSTAMP_INVALID) { 723 hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp); 724 725 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 726 skb_tstamp_tx(skb, &hwts); 727 728 stats->hwstamp_valid++; 729 } else { 730 stats->hwstamp_invalid++; 731 } 732 } 733 734 } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) { 735 netif_wake_subqueue(q->lif->netdev, qi); 736 } 737 738 desc_info->bytes = skb->len; 739 stats->clean++; 740 741 dev_consume_skb_any(skb); 742 } 743 744 bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) 745 { 746 struct ionic_queue *q = cq->bound_q; 747 struct ionic_desc_info *desc_info; 748 struct ionic_txq_comp *comp; 749 int bytes = 0; 750 int pkts = 0; 751 u16 index; 752 753 comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); 754 755 if (!color_match(comp->color, cq->done_color)) 756 return false; 757 758 /* clean the related q entries, there could be 759 * several q entries completed for each cq completion 760 */ 761 do { 762 desc_info = &q->info[q->tail_idx]; 763 desc_info->bytes = 0; 764 index = q->tail_idx; 765 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 766 ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg); 767 if (desc_info->cb_arg) { 768 pkts++; 769 bytes += desc_info->bytes; 770 } 771 desc_info->cb = NULL; 772 desc_info->cb_arg = NULL; 773 } while (index != le16_to_cpu(comp->comp_index)); 774 775 if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) 776 netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes); 777 778 return true; 779 } 780 781 void ionic_tx_flush(struct ionic_cq *cq) 782 { 783 struct ionic_dev *idev = &cq->lif->ionic->idev; 784 u32 work_done; 785 786 work_done = ionic_cq_service(cq, cq->num_descs, 787 ionic_tx_service, NULL, NULL); 788 if (work_done) 789 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, 790 work_done, IONIC_INTR_CRED_RESET_COALESCE); 791 } 792 793 void ionic_tx_empty(struct ionic_queue *q) 794 { 795 struct ionic_desc_info *desc_info; 796 int bytes = 0; 797 int pkts = 0; 798 799 /* walk the not completed tx entries, if any */ 800 while (q->head_idx != q->tail_idx) { 801 desc_info = &q->info[q->tail_idx]; 802 desc_info->bytes = 0; 803 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 804 ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); 805 if (desc_info->cb_arg) { 806 pkts++; 807 bytes += desc_info->bytes; 808 } 809 desc_info->cb = NULL; 810 desc_info->cb_arg = NULL; 811 } 812 813 if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) 814 netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes); 815 } 816 817 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) 818 { 819 int err; 820 821 err = skb_cow_head(skb, 0); 822 if (err) 823 return err; 824 825 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 826 inner_ip_hdr(skb)->check = 0; 827 inner_tcp_hdr(skb)->check = 828 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, 829 inner_ip_hdr(skb)->daddr, 830 0, IPPROTO_TCP, 0); 831 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 832 inner_tcp_hdr(skb)->check = 833 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, 834 &inner_ipv6_hdr(skb)->daddr, 835 0, IPPROTO_TCP, 0); 836 } 837 838 return 0; 839 } 840 841 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) 842 { 843 int err; 844 845 err = skb_cow_head(skb, 0); 846 if (err) 847 return err; 848 849 if (skb->protocol == cpu_to_be16(ETH_P_IP)) { 850 ip_hdr(skb)->check = 0; 851 tcp_hdr(skb)->check = 852 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 853 ip_hdr(skb)->daddr, 854 0, IPPROTO_TCP, 0); 855 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { 856 tcp_v6_gso_csum_prep(skb); 857 } 858 859 return 0; 860 } 861 862 static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 863 struct sk_buff *skb, 864 dma_addr_t addr, u8 nsge, u16 len, 865 unsigned int hdrlen, unsigned int mss, 866 bool outer_csum, 867 u16 vlan_tci, bool has_vlan, 868 bool start, bool done) 869 { 870 u8 flags = 0; 871 u64 cmd; 872 873 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 874 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 875 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 876 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 877 878 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr); 879 desc->cmd = cpu_to_le64(cmd); 880 desc->len = cpu_to_le16(len); 881 desc->vlan_tci = cpu_to_le16(vlan_tci); 882 desc->hdr_len = cpu_to_le16(hdrlen); 883 desc->mss = cpu_to_le16(mss); 884 885 if (start) { 886 skb_tx_timestamp(skb); 887 if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) 888 netdev_tx_sent_queue(q_to_ndq(q), skb->len); 889 ionic_txq_post(q, false, ionic_tx_clean, skb); 890 } else { 891 ionic_txq_post(q, done, NULL, NULL); 892 } 893 } 894 895 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) 896 { 897 struct ionic_tx_stats *stats = q_to_tx_stats(q); 898 struct ionic_desc_info *desc_info; 899 struct ionic_buf_info *buf_info; 900 struct ionic_txq_sg_elem *elem; 901 struct ionic_txq_desc *desc; 902 unsigned int chunk_len; 903 unsigned int frag_rem; 904 unsigned int tso_rem; 905 unsigned int seg_rem; 906 dma_addr_t desc_addr; 907 dma_addr_t frag_addr; 908 unsigned int hdrlen; 909 unsigned int len; 910 unsigned int mss; 911 bool start, done; 912 bool outer_csum; 913 bool has_vlan; 914 u16 desc_len; 915 u8 desc_nsge; 916 u16 vlan_tci; 917 bool encap; 918 int err; 919 920 desc_info = &q->info[q->head_idx]; 921 buf_info = desc_info->bufs; 922 923 if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) 924 return -EIO; 925 926 len = skb->len; 927 mss = skb_shinfo(skb)->gso_size; 928 outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) || 929 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); 930 has_vlan = !!skb_vlan_tag_present(skb); 931 vlan_tci = skb_vlan_tag_get(skb); 932 encap = skb->encapsulation; 933 934 /* Preload inner-most TCP csum field with IP pseudo hdr 935 * calculated with IP length set to zero. HW will later 936 * add in length to each TCP segment resulting from the TSO. 937 */ 938 939 if (encap) 940 err = ionic_tx_tcp_inner_pseudo_csum(skb); 941 else 942 err = ionic_tx_tcp_pseudo_csum(skb); 943 if (err) { 944 /* clean up mapping from ionic_tx_map_skb */ 945 ionic_tx_desc_unmap_bufs(q, desc_info); 946 return err; 947 } 948 949 if (encap) 950 hdrlen = skb_inner_transport_header(skb) - skb->data + 951 inner_tcp_hdrlen(skb); 952 else 953 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 954 955 tso_rem = len; 956 seg_rem = min(tso_rem, hdrlen + mss); 957 958 frag_addr = 0; 959 frag_rem = 0; 960 961 start = true; 962 963 while (tso_rem > 0) { 964 desc = NULL; 965 elem = NULL; 966 desc_addr = 0; 967 desc_len = 0; 968 desc_nsge = 0; 969 /* use fragments until we have enough to post a single descriptor */ 970 while (seg_rem > 0) { 971 /* if the fragment is exhausted then move to the next one */ 972 if (frag_rem == 0) { 973 /* grab the next fragment */ 974 frag_addr = buf_info->dma_addr; 975 frag_rem = buf_info->len; 976 buf_info++; 977 } 978 chunk_len = min(frag_rem, seg_rem); 979 if (!desc) { 980 /* fill main descriptor */ 981 desc = desc_info->txq_desc; 982 elem = desc_info->txq_sg_desc->elems; 983 desc_addr = frag_addr; 984 desc_len = chunk_len; 985 } else { 986 /* fill sg descriptor */ 987 elem->addr = cpu_to_le64(frag_addr); 988 elem->len = cpu_to_le16(chunk_len); 989 elem++; 990 desc_nsge++; 991 } 992 frag_addr += chunk_len; 993 frag_rem -= chunk_len; 994 tso_rem -= chunk_len; 995 seg_rem -= chunk_len; 996 } 997 seg_rem = min(tso_rem, mss); 998 done = (tso_rem == 0); 999 /* post descriptor */ 1000 ionic_tx_tso_post(q, desc, skb, 1001 desc_addr, desc_nsge, desc_len, 1002 hdrlen, mss, outer_csum, vlan_tci, has_vlan, 1003 start, done); 1004 start = false; 1005 /* Buffer information is stored with the first tso descriptor */ 1006 desc_info = &q->info[q->head_idx]; 1007 desc_info->nbufs = 0; 1008 } 1009 1010 stats->pkts += DIV_ROUND_UP(len - hdrlen, mss); 1011 stats->bytes += len; 1012 stats->tso++; 1013 stats->tso_bytes = len; 1014 1015 return 0; 1016 } 1017 1018 static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb, 1019 struct ionic_desc_info *desc_info) 1020 { 1021 struct ionic_txq_desc *desc = desc_info->txq_desc; 1022 struct ionic_buf_info *buf_info = desc_info->bufs; 1023 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1024 bool has_vlan; 1025 u8 flags = 0; 1026 bool encap; 1027 u64 cmd; 1028 1029 has_vlan = !!skb_vlan_tag_present(skb); 1030 encap = skb->encapsulation; 1031 1032 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 1033 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 1034 1035 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL, 1036 flags, skb_shinfo(skb)->nr_frags, 1037 buf_info->dma_addr); 1038 desc->cmd = cpu_to_le64(cmd); 1039 desc->len = cpu_to_le16(buf_info->len); 1040 if (has_vlan) { 1041 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 1042 stats->vlan_inserted++; 1043 } else { 1044 desc->vlan_tci = 0; 1045 } 1046 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); 1047 desc->csum_offset = cpu_to_le16(skb->csum_offset); 1048 1049 if (skb_csum_is_sctp(skb)) 1050 stats->crc32_csum++; 1051 else 1052 stats->csum++; 1053 } 1054 1055 static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb, 1056 struct ionic_desc_info *desc_info) 1057 { 1058 struct ionic_txq_desc *desc = desc_info->txq_desc; 1059 struct ionic_buf_info *buf_info = desc_info->bufs; 1060 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1061 bool has_vlan; 1062 u8 flags = 0; 1063 bool encap; 1064 u64 cmd; 1065 1066 has_vlan = !!skb_vlan_tag_present(skb); 1067 encap = skb->encapsulation; 1068 1069 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 1070 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 1071 1072 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, 1073 flags, skb_shinfo(skb)->nr_frags, 1074 buf_info->dma_addr); 1075 desc->cmd = cpu_to_le64(cmd); 1076 desc->len = cpu_to_le16(buf_info->len); 1077 if (has_vlan) { 1078 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 1079 stats->vlan_inserted++; 1080 } else { 1081 desc->vlan_tci = 0; 1082 } 1083 desc->csum_start = 0; 1084 desc->csum_offset = 0; 1085 1086 stats->csum_none++; 1087 } 1088 1089 static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb, 1090 struct ionic_desc_info *desc_info) 1091 { 1092 struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc; 1093 struct ionic_buf_info *buf_info = &desc_info->bufs[1]; 1094 struct ionic_txq_sg_elem *elem = sg_desc->elems; 1095 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1096 unsigned int i; 1097 1098 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) { 1099 elem->addr = cpu_to_le64(buf_info->dma_addr); 1100 elem->len = cpu_to_le16(buf_info->len); 1101 } 1102 1103 stats->frags += skb_shinfo(skb)->nr_frags; 1104 } 1105 1106 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) 1107 { 1108 struct ionic_desc_info *desc_info = &q->info[q->head_idx]; 1109 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1110 1111 if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) 1112 return -EIO; 1113 1114 /* set up the initial descriptor */ 1115 if (skb->ip_summed == CHECKSUM_PARTIAL) 1116 ionic_tx_calc_csum(q, skb, desc_info); 1117 else 1118 ionic_tx_calc_no_csum(q, skb, desc_info); 1119 1120 /* add frags */ 1121 ionic_tx_skb_frags(q, skb, desc_info); 1122 1123 skb_tx_timestamp(skb); 1124 stats->pkts++; 1125 stats->bytes += skb->len; 1126 1127 if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) 1128 netdev_tx_sent_queue(q_to_ndq(q), skb->len); 1129 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); 1130 1131 return 0; 1132 } 1133 1134 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) 1135 { 1136 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1137 int ndescs; 1138 int err; 1139 1140 /* Each desc is mss long max, so a descriptor for each gso_seg */ 1141 if (skb_is_gso(skb)) 1142 ndescs = skb_shinfo(skb)->gso_segs; 1143 else 1144 ndescs = 1; 1145 1146 /* If non-TSO, just need 1 desc and nr_frags sg elems */ 1147 if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems) 1148 return ndescs; 1149 1150 /* Too many frags, so linearize */ 1151 err = skb_linearize(skb); 1152 if (err) 1153 return err; 1154 1155 stats->linearize++; 1156 1157 return ndescs; 1158 } 1159 1160 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) 1161 { 1162 int stopped = 0; 1163 1164 if (unlikely(!ionic_q_has_space(q, ndescs))) { 1165 netif_stop_subqueue(q->lif->netdev, q->index); 1166 stopped = 1; 1167 1168 /* Might race with ionic_tx_clean, check again */ 1169 smp_rmb(); 1170 if (ionic_q_has_space(q, ndescs)) { 1171 netif_wake_subqueue(q->lif->netdev, q->index); 1172 stopped = 0; 1173 } 1174 } 1175 1176 return stopped; 1177 } 1178 1179 static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb, 1180 struct net_device *netdev) 1181 { 1182 struct ionic_lif *lif = netdev_priv(netdev); 1183 struct ionic_queue *q = &lif->hwstamp_txq->q; 1184 int err, ndescs; 1185 1186 /* Does not stop/start txq, because we post to a separate tx queue 1187 * for timestamping, and if a packet can't be posted immediately to 1188 * the timestamping queue, it is dropped. 1189 */ 1190 1191 ndescs = ionic_tx_descs_needed(q, skb); 1192 if (unlikely(ndescs < 0)) 1193 goto err_out_drop; 1194 1195 if (unlikely(!ionic_q_has_space(q, ndescs))) 1196 goto err_out_drop; 1197 1198 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP; 1199 if (skb_is_gso(skb)) 1200 err = ionic_tx_tso(q, skb); 1201 else 1202 err = ionic_tx(q, skb); 1203 1204 if (err) 1205 goto err_out_drop; 1206 1207 return NETDEV_TX_OK; 1208 1209 err_out_drop: 1210 q->drop++; 1211 dev_kfree_skb(skb); 1212 return NETDEV_TX_OK; 1213 } 1214 1215 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) 1216 { 1217 u16 queue_index = skb_get_queue_mapping(skb); 1218 struct ionic_lif *lif = netdev_priv(netdev); 1219 struct ionic_queue *q; 1220 int ndescs; 1221 int err; 1222 1223 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) { 1224 dev_kfree_skb(skb); 1225 return NETDEV_TX_OK; 1226 } 1227 1228 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 1229 if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode) 1230 return ionic_start_hwstamp_xmit(skb, netdev); 1231 1232 if (unlikely(queue_index >= lif->nxqs)) 1233 queue_index = 0; 1234 q = &lif->txqcqs[queue_index]->q; 1235 1236 ndescs = ionic_tx_descs_needed(q, skb); 1237 if (ndescs < 0) 1238 goto err_out_drop; 1239 1240 if (unlikely(ionic_maybe_stop_tx(q, ndescs))) 1241 return NETDEV_TX_BUSY; 1242 1243 if (skb_is_gso(skb)) 1244 err = ionic_tx_tso(q, skb); 1245 else 1246 err = ionic_tx(q, skb); 1247 1248 if (err) 1249 goto err_out_drop; 1250 1251 /* Stop the queue if there aren't descriptors for the next packet. 1252 * Since our SG lists per descriptor take care of most of the possible 1253 * fragmentation, we don't need to have many descriptors available. 1254 */ 1255 ionic_maybe_stop_tx(q, 4); 1256 1257 return NETDEV_TX_OK; 1258 1259 err_out_drop: 1260 q->drop++; 1261 dev_kfree_skb(skb); 1262 return NETDEV_TX_OK; 1263 } 1264