Lines Matching refs:q

56 static void *txq_end(const struct funeth_txq *q)  in txq_end()  argument
58 return (void *)q->hw_wb; in txq_end()
64 static unsigned int txq_to_end(const struct funeth_txq *q, void *p) in txq_to_end() argument
66 return txq_end(q) - p; in txq_to_end()
78 static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q, in fun_write_gl() argument
90 i < ngle && txq_to_end(q, gle); i++, gle++) in fun_write_gl()
93 if (txq_to_end(q, gle) == 0) { in fun_write_gl()
94 gle = (struct fun_dataop_gl *)q->desc; in fun_write_gl()
107 static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q, in fun_tls_tx() argument
132 FUN_QSTAT_INC(q, tx_tls_fallback); in fun_tls_tx()
135 FUN_QSTAT_INC(q, tx_tls_drops); in fun_tls_tx()
149 static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q, in write_pkt_desc() argument
153 unsigned int idx = q->prod_cnt & q->mask; in write_pkt_desc()
165 if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data, in write_pkt_desc()
167 FUN_QSTAT_INC(q, tx_map_err); in write_pkt_desc()
171 req = fun_tx_desc_addr(q, idx); in write_pkt_desc()
218 FUN_QSTAT_INC(q, tx_encap_tso); in write_pkt_desc()
236 FUN_QSTAT_INC(q, tx_uso); in write_pkt_desc()
253 FUN_QSTAT_INC(q, tx_tso); in write_pkt_desc()
256 u64_stats_update_begin(&q->syncp); in write_pkt_desc()
257 q->stats.tx_cso += shinfo->gso_segs; in write_pkt_desc()
258 u64_stats_update_end(&q->syncp); in write_pkt_desc()
269 FUN_QSTAT_INC(q, tx_cso); in write_pkt_desc()
277 gle = fun_write_gl(q, req, addrs, lens, ngle); in write_pkt_desc()
290 u64_stats_update_begin(&q->syncp); in write_pkt_desc()
291 q->stats.tx_tls_bytes += tls_len; in write_pkt_desc()
292 q->stats.tx_tls_pkts += 1 + extra_pkts; in write_pkt_desc()
293 u64_stats_update_end(&q->syncp); in write_pkt_desc()
296 u64_stats_update_begin(&q->syncp); in write_pkt_desc()
297 q->stats.tx_bytes += skb->len + extra_bytes; in write_pkt_desc()
298 q->stats.tx_pkts += 1 + extra_pkts; in write_pkt_desc()
299 u64_stats_update_end(&q->syncp); in write_pkt_desc()
301 q->info[idx].skb = skb; in write_pkt_desc()
303 trace_funeth_tx(q, skb->len, idx, req->dataop.ngather); in write_pkt_desc()
311 static unsigned int fun_txq_avail(const struct funeth_txq *q) in fun_txq_avail() argument
313 return q->mask - q->prod_cnt + q->cons_cnt; in fun_txq_avail()
317 static void fun_tx_check_stop(struct funeth_txq *q) in fun_tx_check_stop() argument
319 if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC)) in fun_tx_check_stop()
322 netif_tx_stop_queue(q->ndq); in fun_tx_check_stop()
329 if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC)) in fun_tx_check_stop()
330 FUN_QSTAT_INC(q, tx_nstops); in fun_tx_check_stop()
332 netif_tx_start_queue(q->ndq); in fun_tx_check_stop()
338 static bool fun_txq_may_restart(struct funeth_txq *q) in fun_txq_may_restart() argument
340 return fun_txq_avail(q) >= q->mask / 4; in fun_txq_may_restart()
347 struct funeth_txq *q = fp->txqs[qid]; in fun_start_xmit() local
352 skb = fun_tls_tx(skb, q, &tls_len); in fun_start_xmit()
357 ndesc = write_pkt_desc(skb, q, tls_len); in fun_start_xmit()
363 q->prod_cnt += ndesc; in fun_start_xmit()
364 fun_tx_check_stop(q); in fun_start_xmit()
368 if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more())) in fun_start_xmit()
369 fun_txq_wr_db(q); in fun_start_xmit()
371 FUN_QSTAT_INC(q, tx_more); in fun_start_xmit()
380 fun_txq_wr_db(q); in fun_start_xmit()
385 static u16 txq_hw_head(const struct funeth_txq *q) in txq_hw_head() argument
387 return (u16)be64_to_cpu(*q->hw_wb); in txq_hw_head()
393 static unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx) in fun_unmap_pkt() argument
395 const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx); in fun_unmap_pkt()
401 dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data), in fun_unmap_pkt()
404 for (gle++; --ngle && txq_to_end(q, gle); gle++) in fun_unmap_pkt()
405 dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data), in fun_unmap_pkt()
409 for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++) in fun_unmap_pkt()
410 dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data), in fun_unmap_pkt()
423 static bool fun_txq_reclaim(struct funeth_txq *q, int budget) in fun_txq_reclaim() argument
431 for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask; in fun_txq_reclaim()
432 head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) { in fun_txq_reclaim()
441 unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx); in fun_txq_reclaim()
442 struct sk_buff *skb = q->info[reclaim_idx].skb; in fun_txq_reclaim()
444 trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head); in fun_txq_reclaim()
449 reclaim_idx = (reclaim_idx + pkt_desc) & q->mask; in fun_txq_reclaim()
454 q->cons_cnt += ndesc; in fun_txq_reclaim()
455 netdev_tx_completed_queue(q->ndq, npkts, nbytes); in fun_txq_reclaim()
458 if (unlikely(netif_tx_queue_stopped(q->ndq) && in fun_txq_reclaim()
459 fun_txq_may_restart(q))) { in fun_txq_reclaim()
460 netif_tx_wake_queue(q->ndq); in fun_txq_reclaim()
461 FUN_QSTAT_INC(q, tx_nrestarts); in fun_txq_reclaim()
471 struct funeth_txq *q = irq->txq; in fun_txq_napi_poll() local
474 if (fun_txq_reclaim(q, budget)) in fun_txq_napi_poll()
478 db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask); in fun_txq_napi_poll()
479 writel(db_val, q->db); in fun_txq_napi_poll()
484 static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget) in fun_xdpq_clean() argument
488 for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask; in fun_xdpq_clean()
489 head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) { in fun_xdpq_clean()
498 unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx); in fun_xdpq_clean()
500 xdp_return_frame(q->info[reclaim_idx].xdpf); in fun_xdpq_clean()
502 trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head); in fun_xdpq_clean()
504 reclaim_idx = (reclaim_idx + pkt_desc) & q->mask; in fun_xdpq_clean()
510 q->cons_cnt += ndesc; in fun_xdpq_clean()
514 bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf) in fun_xdp_tx() argument
522 if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES) in fun_xdp_tx()
523 fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH); in fun_xdp_tx()
534 if (unlikely(fun_txq_avail(q) < ndesc)) { in fun_xdp_tx()
535 FUN_QSTAT_INC(q, tx_xdp_full); in fun_xdp_tx()
539 if (unlikely(fun_map_pkt(q->dma_dev, si, xdpf->data, xdpf->len, dma, in fun_xdp_tx()
541 FUN_QSTAT_INC(q, tx_map_err); in fun_xdp_tx()
545 idx = q->prod_cnt & q->mask; in fun_xdp_tx()
546 req = fun_tx_desc_addr(q, idx); in fun_xdp_tx()
556 fun_write_gl(q, req, dma, lens, nfrags); in fun_xdp_tx()
558 q->info[idx].xdpf = xdpf; in fun_xdp_tx()
560 u64_stats_update_begin(&q->syncp); in fun_xdp_tx()
561 q->stats.tx_bytes += tot_len; in fun_xdp_tx()
562 q->stats.tx_pkts++; in fun_xdp_tx()
563 u64_stats_update_end(&q->syncp); in fun_xdp_tx()
565 trace_funeth_tx(q, tot_len, idx, nfrags); in fun_xdp_tx()
566 q->prod_cnt += ndesc; in fun_xdp_tx()
575 struct funeth_txq *q, **xdpqs; in fun_xdp_xmit_frames() local
589 for (q = xdpqs[q_idx], i = 0; i < n; i++) in fun_xdp_xmit_frames()
590 if (!fun_xdp_tx(q, frames[i])) in fun_xdp_xmit_frames()
594 fun_txq_wr_db(q); in fun_xdp_xmit_frames()
601 static void fun_txq_purge(struct funeth_txq *q) in fun_txq_purge() argument
603 while (q->cons_cnt != q->prod_cnt) { in fun_txq_purge()
604 unsigned int idx = q->cons_cnt & q->mask; in fun_txq_purge()
606 q->cons_cnt += fun_unmap_pkt(q, idx); in fun_txq_purge()
607 dev_kfree_skb_any(q->info[idx].skb); in fun_txq_purge()
609 netdev_tx_reset_queue(q->ndq); in fun_txq_purge()
612 static void fun_xdpq_purge(struct funeth_txq *q) in fun_xdpq_purge() argument
614 while (q->cons_cnt != q->prod_cnt) { in fun_xdpq_purge()
615 unsigned int idx = q->cons_cnt & q->mask; in fun_xdpq_purge()
617 q->cons_cnt += fun_unmap_pkt(q, idx); in fun_xdpq_purge()
618 xdp_return_frame(q->info[idx].xdpf); in fun_xdpq_purge()
629 struct funeth_txq *q; in fun_txq_create_sw() local
637 q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node); in fun_txq_create_sw()
638 if (!q) in fun_txq_create_sw()
641 q->dma_dev = &fp->pdev->dev; in fun_txq_create_sw()
642 q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE, in fun_txq_create_sw()
643 sizeof(*q->info), true, numa_node, in fun_txq_create_sw()
644 &q->dma_addr, (void **)&q->info, in fun_txq_create_sw()
645 &q->hw_wb); in fun_txq_create_sw()
646 if (!q->desc) in fun_txq_create_sw()
649 q->netdev = dev; in fun_txq_create_sw()
650 q->mask = ndesc - 1; in fun_txq_create_sw()
651 q->qidx = qidx; in fun_txq_create_sw()
652 q->numa_node = numa_node; in fun_txq_create_sw()
653 u64_stats_init(&q->syncp); in fun_txq_create_sw()
654 q->init_state = FUN_QSTATE_INIT_SW; in fun_txq_create_sw()
655 return q; in fun_txq_create_sw()
658 kfree(q); in fun_txq_create_sw()
665 static void fun_txq_free_sw(struct funeth_txq *q) in fun_txq_free_sw() argument
667 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_txq_free_sw()
669 fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true, in fun_txq_free_sw()
670 q->desc, q->dma_addr, q->info); in fun_txq_free_sw()
672 fp->tx_packets += q->stats.tx_pkts; in fun_txq_free_sw()
673 fp->tx_bytes += q->stats.tx_bytes; in fun_txq_free_sw()
674 fp->tx_dropped += q->stats.tx_map_err; in fun_txq_free_sw()
676 kfree(q); in fun_txq_free_sw()
680 int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq) in fun_txq_create_dev() argument
682 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_txq_create_dev()
683 unsigned int irq_idx, ndesc = q->mask + 1; in fun_txq_create_dev()
686 q->irq = irq; in fun_txq_create_dev()
687 *q->hw_wb = 0; in fun_txq_create_dev()
688 q->prod_cnt = 0; in fun_txq_create_dev()
689 q->cons_cnt = 0; in fun_txq_create_dev()
696 q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec, in fun_txq_create_dev()
698 &q->hw_qid, &q->db); in fun_txq_create_dev()
702 err = fun_create_and_bind_tx(fp, q->hw_qid); in fun_txq_create_dev()
705 q->ethid = err; in fun_txq_create_dev()
708 irq->txq = q; in fun_txq_create_dev()
709 q->ndq = netdev_get_tx_queue(q->netdev, q->qidx); in fun_txq_create_dev()
710 q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec, in fun_txq_create_dev()
712 writel(q->irq_db_val, q->db); in fun_txq_create_dev()
715 q->init_state = FUN_QSTATE_INIT_FULL; in fun_txq_create_dev()
716 netif_info(fp, ifup, q->netdev, in fun_txq_create_dev()
718 irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx, in fun_txq_create_dev()
719 q->ethid, q->numa_node); in fun_txq_create_dev()
723 fun_destroy_sq(fp->fdev, q->hw_qid); in fun_txq_create_dev()
725 netdev_err(q->netdev, in fun_txq_create_dev()
727 irq ? "Tx" : "XDP", q->qidx, err); in fun_txq_create_dev()
731 static void fun_txq_free_dev(struct funeth_txq *q) in fun_txq_free_dev() argument
733 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_txq_free_dev()
735 if (q->init_state < FUN_QSTATE_INIT_FULL) in fun_txq_free_dev()
738 netif_info(fp, ifdown, q->netdev, in fun_txq_free_dev()
740 q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid, in fun_txq_free_dev()
741 q->irq ? q->irq->irq_idx : 0, q->ethid); in fun_txq_free_dev()
743 fun_destroy_sq(fp->fdev, q->hw_qid); in fun_txq_free_dev()
744 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid); in fun_txq_free_dev()
746 if (q->irq) { in fun_txq_free_dev()
747 q->irq->txq = NULL; in fun_txq_free_dev()
748 fun_txq_purge(q); in fun_txq_free_dev()
750 fun_xdpq_purge(q); in fun_txq_free_dev()
753 q->init_state = FUN_QSTATE_INIT_SW; in fun_txq_free_dev()
763 struct funeth_txq *q = *qp; in funeth_txq_create() local
766 if (!q) in funeth_txq_create()
767 q = fun_txq_create_sw(dev, qidx, ndesc, irq); in funeth_txq_create()
768 if (!q) in funeth_txq_create()
771 if (q->init_state >= state) in funeth_txq_create()
774 err = fun_txq_create_dev(q, irq); in funeth_txq_create()
777 fun_txq_free_sw(q); in funeth_txq_create()
782 *qp = q; in funeth_txq_create()
789 struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state) in funeth_txq_free() argument
792 fun_txq_free_dev(q); in funeth_txq_free()
795 fun_txq_free_sw(q); in funeth_txq_free()
796 q = NULL; in funeth_txq_free()
799 return q; in funeth_txq_free()