Lines Matching refs:q

50 static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf)  in cache_offer()  argument
52 struct funeth_rx_cache *c = &q->cache; in cache_offer()
58 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_offer()
67 static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb) in cache_get() argument
69 struct funeth_rx_cache *c = &q->cache; in cache_get()
77 dma_sync_single_for_device(q->dma_dev, buf->dma_addr, in cache_get()
88 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_get()
98 static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb, in funeth_alloc_page() argument
103 if (cache_get(q, rb)) in funeth_alloc_page()
110 rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE, in funeth_alloc_page()
112 if (unlikely(dma_mapping_error(q->dma_dev, rb->dma_addr))) { in funeth_alloc_page()
113 FUN_QSTAT_INC(q, rx_map_err); in funeth_alloc_page()
118 FUN_QSTAT_INC(q, rx_page_alloc); in funeth_alloc_page()
127 static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb) in funeth_free_page() argument
130 dma_unmap_page(q->dma_dev, rb->dma_addr, PAGE_SIZE, in funeth_free_page()
141 static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va, in fun_run_xdp() argument
151 &q->xdp_rxq); in fun_run_xdp()
155 xdp_prog = READ_ONCE(q->xdp_prog); in fun_run_xdp()
171 FUN_QSTAT_INC(q, xdp_tx); in fun_run_xdp()
172 q->xdp_flush |= FUN_XDP_FLUSH_TX; in fun_run_xdp()
177 if (unlikely(xdp_do_redirect(q->netdev, &xdp, xdp_prog))) in fun_run_xdp()
179 FUN_QSTAT_INC(q, xdp_redir); in fun_run_xdp()
180 q->xdp_flush |= FUN_XDP_FLUSH_REDIR; in fun_run_xdp()
183 bpf_warn_invalid_xdp_action(q->netdev, xdp_prog, act); in fun_run_xdp()
186 trace_xdp_exception(q->netdev, xdp_prog, act); in fun_run_xdp()
188 q->cur_buf->pg_refs++; /* return frags' page reference */ in fun_run_xdp()
189 FUN_QSTAT_INC(q, xdp_err); in fun_run_xdp()
192 q->cur_buf->pg_refs++; in fun_run_xdp()
193 FUN_QSTAT_INC(q, xdp_drops); in fun_run_xdp()
256 get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len) in get_buf() argument
258 if (q->buf_offset + len <= PAGE_SIZE || !q->buf_offset) in get_buf()
268 buf->node == numa_mem_id()) || !q->spare_buf.page) { in get_buf()
269 dma_sync_single_for_device(q->dma_dev, buf->dma_addr, in get_buf()
273 cache_offer(q, buf); in get_buf()
274 *buf = q->spare_buf; in get_buf()
275 q->spare_buf.page = NULL; in get_buf()
276 q->rqes[q->rq_cons & q->rq_mask] = in get_buf()
279 q->buf_offset = 0; in get_buf()
280 q->rq_cons++; in get_buf()
281 return &q->bufs[q->rq_cons & q->rq_mask]; in get_buf()
296 static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len, in fun_gather_pkt() argument
299 struct funeth_rxbuf *buf = q->cur_buf; in fun_gather_pkt()
304 buf = get_buf(q, buf, tot_len); in fun_gather_pkt()
312 if (!q->spare_buf.page && in fun_gather_pkt()
313 funeth_alloc_page(q, &q->spare_buf, numa_mem_id(), in fun_gather_pkt()
318 PAGE_SIZE - q->buf_offset); in fun_gather_pkt()
319 dma_sync_single_for_cpu(q->dma_dev, in fun_gather_pkt()
320 buf->dma_addr + q->buf_offset, in fun_gather_pkt()
326 skb_frag_fill_page_desc(frags++, buf->page, q->buf_offset, in fun_gather_pkt()
333 q->buf_offset = PAGE_SIZE; in fun_gather_pkt()
335 q->buf_offset = ALIGN(q->buf_offset + frag_len, FUN_EPRQ_PKT_ALIGN); in fun_gather_pkt()
336 q->cur_buf = buf; in fun_gather_pkt()
348 static void advance_cq(struct funeth_rxq *q) in advance_cq() argument
350 if (unlikely(q->cq_head == q->cq_mask)) { in advance_cq()
351 q->cq_head = 0; in advance_cq()
352 q->phase ^= 1; in advance_cq()
353 q->next_cqe_info = cqe_to_info(q->cqes); in advance_cq()
355 q->cq_head++; in advance_cq()
356 q->next_cqe_info += FUNETH_CQE_SIZE; in advance_cq()
358 prefetch(q->next_cqe_info); in advance_cq()
365 static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q) in fun_handle_cqe_pkt() argument
367 const struct fun_eth_cqe *rxreq = info_to_cqe(q->next_cqe_info); in fun_handle_cqe_pkt()
369 struct net_device *ndev = q->netdev; in fun_handle_cqe_pkt()
379 u64_stats_update_begin(&q->syncp); in fun_handle_cqe_pkt()
380 q->stats.rx_pkts++; in fun_handle_cqe_pkt()
381 q->stats.rx_bytes += pkt_len; in fun_handle_cqe_pkt()
382 u64_stats_update_end(&q->syncp); in fun_handle_cqe_pkt()
384 advance_cq(q); in fun_handle_cqe_pkt()
392 ref_ok = fun_gather_pkt(q, tot_len, frags); in fun_handle_cqe_pkt()
395 va = fun_run_xdp(q, frags, va, ref_ok, xdp_q); in fun_handle_cqe_pkt()
415 skb = napi_get_frags(q->napi); in fun_handle_cqe_pkt()
432 skb_record_rx_queue(skb, q->qidx); in fun_handle_cqe_pkt()
434 if (likely((q->netdev->features & NETIF_F_RXHASH) && rxreq->hash)) in fun_handle_cqe_pkt()
437 if (likely((q->netdev->features & NETIF_F_RXCSUM) && rxreq->csum)) { in fun_handle_cqe_pkt()
438 FUN_QSTAT_INC(q, rx_cso); in fun_handle_cqe_pkt()
442 if (unlikely(rx_hwtstamp_enabled(q->netdev))) in fun_handle_cqe_pkt()
445 trace_funeth_rx(q, rxreq->nsgl, pkt_len, skb->hash, cv); in fun_handle_cqe_pkt()
447 gro_res = skb->data_len ? napi_gro_frags(q->napi) : in fun_handle_cqe_pkt()
448 napi_gro_receive(q->napi, skb); in fun_handle_cqe_pkt()
450 FUN_QSTAT_INC(q, gro_merged); in fun_handle_cqe_pkt()
452 FUN_QSTAT_INC(q, gro_pkts); in fun_handle_cqe_pkt()
456 FUN_QSTAT_INC(q, rx_mem_drops); in fun_handle_cqe_pkt()
461 q->cur_buf->pg_refs++; in fun_handle_cqe_pkt()
479 static int fun_process_cqes(struct funeth_rxq *q, int budget) in fun_process_cqes() argument
481 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_process_cqes()
488 while (budget && !cqe_phase_mismatch(q->next_cqe_info, q->phase)) { in fun_process_cqes()
492 fun_handle_cqe_pkt(q, xdp_q); in fun_process_cqes()
496 if (unlikely(q->xdp_flush)) { in fun_process_cqes()
497 if (q->xdp_flush & FUN_XDP_FLUSH_TX) in fun_process_cqes()
499 if (q->xdp_flush & FUN_XDP_FLUSH_REDIR) in fun_process_cqes()
501 q->xdp_flush = 0; in fun_process_cqes()
513 struct funeth_rxq *q = irq->rxq; in fun_rxq_napi_poll() local
514 int work_done = budget - fun_process_cqes(q, budget); in fun_rxq_napi_poll()
515 u32 cq_db_val = q->cq_head; in fun_rxq_napi_poll()
518 FUN_QSTAT_INC(q, rx_budget); in fun_rxq_napi_poll()
520 cq_db_val |= q->irq_db_val; in fun_rxq_napi_poll()
523 if (q->rq_cons - q->rq_cons_db >= q->rq_db_thres) { in fun_rxq_napi_poll()
524 u64_stats_update_begin(&q->syncp); in fun_rxq_napi_poll()
525 q->stats.rx_bufs += q->rq_cons - q->rq_cons_db; in fun_rxq_napi_poll()
526 u64_stats_update_end(&q->syncp); in fun_rxq_napi_poll()
527 q->rq_cons_db = q->rq_cons; in fun_rxq_napi_poll()
528 writel((q->rq_cons - 1) & q->rq_mask, q->rq_db); in fun_rxq_napi_poll()
531 writel(cq_db_val, q->cq_db); in fun_rxq_napi_poll()
536 static void fun_rxq_free_bufs(struct funeth_rxq *q) in fun_rxq_free_bufs() argument
538 struct funeth_rxbuf *b = q->bufs; in fun_rxq_free_bufs()
541 for (i = 0; i <= q->rq_mask; i++, b++) in fun_rxq_free_bufs()
542 funeth_free_page(q, b); in fun_rxq_free_bufs()
544 funeth_free_page(q, &q->spare_buf); in fun_rxq_free_bufs()
545 q->cur_buf = NULL; in fun_rxq_free_bufs()
549 static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node) in fun_rxq_alloc_bufs() argument
551 struct funeth_rxbuf *b = q->bufs; in fun_rxq_alloc_bufs()
554 for (i = 0; i <= q->rq_mask; i++, b++) { in fun_rxq_alloc_bufs()
555 if (funeth_alloc_page(q, b, node, GFP_KERNEL)) { in fun_rxq_alloc_bufs()
556 fun_rxq_free_bufs(q); in fun_rxq_alloc_bufs()
559 q->rqes[i] = FUN_EPRQ_RQBUF_INIT(b->dma_addr); in fun_rxq_alloc_bufs()
561 q->cur_buf = q->bufs; in fun_rxq_alloc_bufs()
575 static void fun_rxq_free_cache(struct funeth_rxq *q) in fun_rxq_free_cache() argument
577 struct funeth_rxbuf *b = q->cache.bufs; in fun_rxq_free_cache()
580 for (i = 0; i <= q->cache.mask; i++, b++) in fun_rxq_free_cache()
581 funeth_free_page(q, b); in fun_rxq_free_cache()
583 kvfree(q->cache.bufs); in fun_rxq_free_cache()
584 q->cache.bufs = NULL; in fun_rxq_free_cache()
587 int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog) in fun_rxq_set_bpf() argument
589 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_rxq_set_bpf()
595 if (headroom != q->headroom) { in fun_rxq_set_bpf()
600 0, q->hw_cqid, headroom); in fun_rxq_set_bpf()
605 q->headroom = headroom; in fun_rxq_set_bpf()
608 WRITE_ONCE(q->xdp_prog, prog); in fun_rxq_set_bpf()
620 struct funeth_rxq *q; in fun_rxq_create_sw() local
625 q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node); in fun_rxq_create_sw()
626 if (!q) in fun_rxq_create_sw()
629 q->qidx = qidx; in fun_rxq_create_sw()
630 q->netdev = dev; in fun_rxq_create_sw()
631 q->cq_mask = ncqe - 1; in fun_rxq_create_sw()
632 q->rq_mask = nrqe - 1; in fun_rxq_create_sw()
633 q->numa_node = numa_node; in fun_rxq_create_sw()
634 q->rq_db_thres = nrqe / 4; in fun_rxq_create_sw()
635 u64_stats_init(&q->syncp); in fun_rxq_create_sw()
636 q->dma_dev = &fp->pdev->dev; in fun_rxq_create_sw()
638 q->rqes = fun_alloc_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), in fun_rxq_create_sw()
639 sizeof(*q->bufs), false, numa_node, in fun_rxq_create_sw()
640 &q->rq_dma_addr, (void **)&q->bufs, NULL); in fun_rxq_create_sw()
641 if (!q->rqes) in fun_rxq_create_sw()
644 q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0, in fun_rxq_create_sw()
645 false, numa_node, &q->cq_dma_addr, NULL, in fun_rxq_create_sw()
647 if (!q->cqes) in fun_rxq_create_sw()
650 err = fun_rxq_init_cache(&q->cache, nrqe, numa_node); in fun_rxq_create_sw()
654 err = fun_rxq_alloc_bufs(q, numa_node); in fun_rxq_create_sw()
658 q->stats.rx_bufs = q->rq_mask; in fun_rxq_create_sw()
659 q->init_state = FUN_QSTATE_INIT_SW; in fun_rxq_create_sw()
660 return q; in fun_rxq_create_sw()
663 fun_rxq_free_cache(q); in fun_rxq_create_sw()
665 dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes, in fun_rxq_create_sw()
666 q->cq_dma_addr); in fun_rxq_create_sw()
668 fun_free_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), false, q->rqes, in fun_rxq_create_sw()
669 q->rq_dma_addr, q->bufs); in fun_rxq_create_sw()
671 kfree(q); in fun_rxq_create_sw()
677 static void fun_rxq_free_sw(struct funeth_rxq *q) in fun_rxq_free_sw() argument
679 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_rxq_free_sw()
681 fun_rxq_free_cache(q); in fun_rxq_free_sw()
682 fun_rxq_free_bufs(q); in fun_rxq_free_sw()
683 fun_free_ring_mem(q->dma_dev, q->rq_mask + 1, sizeof(*q->rqes), false, in fun_rxq_free_sw()
684 q->rqes, q->rq_dma_addr, q->bufs); in fun_rxq_free_sw()
685 dma_free_coherent(q->dma_dev, (q->cq_mask + 1) * FUNETH_CQE_SIZE, in fun_rxq_free_sw()
686 q->cqes, q->cq_dma_addr); in fun_rxq_free_sw()
689 fp->rx_packets += q->stats.rx_pkts; in fun_rxq_free_sw()
690 fp->rx_bytes += q->stats.rx_bytes; in fun_rxq_free_sw()
691 fp->rx_dropped += q->stats.rx_map_err + q->stats.rx_mem_drops; in fun_rxq_free_sw()
693 kfree(q); in fun_rxq_free_sw()
697 int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq) in fun_rxq_create_dev() argument
699 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_rxq_create_dev()
700 unsigned int ncqe = q->cq_mask + 1; in fun_rxq_create_dev()
701 unsigned int nrqe = q->rq_mask + 1; in fun_rxq_create_dev()
704 err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx, in fun_rxq_create_dev()
709 err = xdp_rxq_info_reg_mem_model(&q->xdp_rxq, MEM_TYPE_PAGE_SHARED, in fun_rxq_create_dev()
714 q->phase = 1; in fun_rxq_create_dev()
715 q->irq_cnt = 0; in fun_rxq_create_dev()
716 q->cq_head = 0; in fun_rxq_create_dev()
717 q->rq_cons = 0; in fun_rxq_create_dev()
718 q->rq_cons_db = 0; in fun_rxq_create_dev()
719 q->buf_offset = 0; in fun_rxq_create_dev()
720 q->napi = &irq->napi; in fun_rxq_create_dev()
721 q->irq_db_val = fp->cq_irq_db; in fun_rxq_create_dev()
722 q->next_cqe_info = cqe_to_info(q->cqes); in fun_rxq_create_dev()
724 q->xdp_prog = fp->xdp_prog; in fun_rxq_create_dev()
725 q->headroom = fp->xdp_prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM; in fun_rxq_create_dev()
729 FUN_HCI_ID_INVALID, 0, nrqe, q->rq_dma_addr, 0, 0, in fun_rxq_create_dev()
731 &q->hw_sqid, &q->rq_db); in fun_rxq_create_dev()
737 q->hw_sqid, ilog2(FUNETH_CQE_SIZE), ncqe, in fun_rxq_create_dev()
738 q->cq_dma_addr, q->headroom, FUN_RX_TAILROOM, 0, 0, in fun_rxq_create_dev()
740 &q->hw_cqid, &q->cq_db); in fun_rxq_create_dev()
744 irq->rxq = q; in fun_rxq_create_dev()
745 writel(q->rq_mask, q->rq_db); in fun_rxq_create_dev()
746 q->init_state = FUN_QSTATE_INIT_FULL; in fun_rxq_create_dev()
748 netif_info(fp, ifup, q->netdev, in fun_rxq_create_dev()
750 q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx, in fun_rxq_create_dev()
751 q->numa_node, q->headroom); in fun_rxq_create_dev()
755 fun_destroy_sq(fp->fdev, q->hw_sqid); in fun_rxq_create_dev()
757 xdp_rxq_info_unreg(&q->xdp_rxq); in fun_rxq_create_dev()
759 netdev_err(q->netdev, in fun_rxq_create_dev()
761 q->qidx, err); in fun_rxq_create_dev()
765 static void fun_rxq_free_dev(struct funeth_rxq *q) in fun_rxq_free_dev() argument
767 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_rxq_free_dev()
770 if (q->init_state < FUN_QSTATE_INIT_FULL) in fun_rxq_free_dev()
773 irq = container_of(q->napi, struct fun_irq, napi); in fun_rxq_free_dev()
774 netif_info(fp, ifdown, q->netdev, in fun_rxq_free_dev()
776 q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx); in fun_rxq_free_dev()
779 xdp_rxq_info_unreg(&q->xdp_rxq); in fun_rxq_free_dev()
780 fun_destroy_sq(fp->fdev, q->hw_sqid); in fun_rxq_free_dev()
781 fun_destroy_cq(fp->fdev, q->hw_cqid); in fun_rxq_free_dev()
782 q->init_state = FUN_QSTATE_INIT_SW; in fun_rxq_free_dev()
792 struct funeth_rxq *q = *qp; in funeth_rxq_create() local
795 if (!q) { in funeth_rxq_create()
796 q = fun_rxq_create_sw(dev, qidx, ncqe, nrqe, irq); in funeth_rxq_create()
797 if (IS_ERR(q)) in funeth_rxq_create()
798 return PTR_ERR(q); in funeth_rxq_create()
801 if (q->init_state >= state) in funeth_rxq_create()
804 err = fun_rxq_create_dev(q, irq); in funeth_rxq_create()
807 fun_rxq_free_sw(q); in funeth_rxq_create()
812 *qp = q; in funeth_rxq_create()
817 struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state) in funeth_rxq_free() argument
820 fun_rxq_free_dev(q); in funeth_rxq_free()
823 fun_rxq_free_sw(q); in funeth_rxq_free()
824 q = NULL; in funeth_rxq_free()
827 return q; in funeth_rxq_free()