Lines Matching refs:tx

26 	struct gve_tx_ring *tx = &priv->tx[tx_qid];  in gve_xdp_tx_flush()  local
28 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xdp_tx_flush()
157 static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_xdp_done() argument
168 idx = tx->done & tx->mask; in gve_clean_xdp_done()
169 info = &tx->info[idx]; in gve_clean_xdp_done()
170 tx->done++; in gve_clean_xdp_done()
187 gve_tx_free_fifo(&tx->tx_fifo, space_freed); in gve_clean_xdp_done()
188 if (xsk_complete > 0 && tx->xsk_pool) in gve_clean_xdp_done()
189 xsk_tx_completed(tx->xsk_pool, xsk_complete); in gve_clean_xdp_done()
190 u64_stats_update_begin(&tx->statss); in gve_clean_xdp_done()
191 tx->bytes_done += bytes; in gve_clean_xdp_done()
192 tx->pkt_done += pkts; in gve_clean_xdp_done()
193 u64_stats_update_end(&tx->statss); in gve_clean_xdp_done()
197 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
202 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_free_ring() local
208 slots = tx->mask + 1; in gve_tx_free_ring()
209 if (tx->q_num < priv->tx_cfg.num_queues) { in gve_tx_free_ring()
210 gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false); in gve_tx_free_ring()
211 netdev_tx_reset_queue(tx->netdev_txq); in gve_tx_free_ring()
213 gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt); in gve_tx_free_ring()
216 dma_free_coherent(hdev, sizeof(*tx->q_resources), in gve_tx_free_ring()
217 tx->q_resources, tx->q_resources_bus); in gve_tx_free_ring()
218 tx->q_resources = NULL; in gve_tx_free_ring()
220 if (!tx->raw_addressing) { in gve_tx_free_ring()
221 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_free_ring()
222 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); in gve_tx_free_ring()
223 tx->tx_fifo.qpl = NULL; in gve_tx_free_ring()
226 bytes = sizeof(*tx->desc) * slots; in gve_tx_free_ring()
227 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_free_ring()
228 tx->desc = NULL; in gve_tx_free_ring()
230 vfree(tx->info); in gve_tx_free_ring()
231 tx->info = NULL; in gve_tx_free_ring()
238 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_alloc_ring() local
244 memset(tx, 0, sizeof(*tx)); in gve_tx_alloc_ring()
245 spin_lock_init(&tx->clean_lock); in gve_tx_alloc_ring()
246 spin_lock_init(&tx->xdp_lock); in gve_tx_alloc_ring()
247 tx->q_num = idx; in gve_tx_alloc_ring()
249 tx->mask = slots - 1; in gve_tx_alloc_ring()
252 tx->info = vcalloc(slots, sizeof(*tx->info)); in gve_tx_alloc_ring()
253 if (!tx->info) in gve_tx_alloc_ring()
257 bytes = sizeof(*tx->desc) * slots; in gve_tx_alloc_ring()
258 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); in gve_tx_alloc_ring()
259 if (!tx->desc) in gve_tx_alloc_ring()
262 tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; in gve_tx_alloc_ring()
263 tx->dev = &priv->pdev->dev; in gve_tx_alloc_ring()
264 if (!tx->raw_addressing) { in gve_tx_alloc_ring()
265 tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx); in gve_tx_alloc_ring()
266 if (!tx->tx_fifo.qpl) in gve_tx_alloc_ring()
269 if (gve_tx_fifo_init(priv, &tx->tx_fifo)) in gve_tx_alloc_ring()
273 tx->q_resources = in gve_tx_alloc_ring()
275 sizeof(*tx->q_resources), in gve_tx_alloc_ring()
276 &tx->q_resources_bus, in gve_tx_alloc_ring()
278 if (!tx->q_resources) in gve_tx_alloc_ring()
282 (unsigned long)tx->bus); in gve_tx_alloc_ring()
284 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); in gve_tx_alloc_ring()
290 if (!tx->raw_addressing) in gve_tx_alloc_ring()
291 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_alloc_ring()
293 if (!tx->raw_addressing) in gve_tx_alloc_ring()
294 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); in gve_tx_alloc_ring()
296 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_alloc_ring()
297 tx->desc = NULL; in gve_tx_alloc_ring()
299 vfree(tx->info); in gve_tx_alloc_ring()
300 tx->info = NULL; in gve_tx_alloc_ring()
343 static inline u32 gve_tx_avail(struct gve_tx_ring *tx) in gve_tx_avail() argument
345 return tx->mask + 1 - (tx->req - tx->done); in gve_tx_avail()
348 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, in gve_skb_fifo_bytes_required() argument
358 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, in gve_skb_fifo_bytes_required()
393 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) in gve_can_tx() argument
397 if (!tx->raw_addressing) in gve_can_tx()
398 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required); in gve_can_tx()
400 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); in gve_can_tx()
406 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_maybe_stop_tx() argument
414 if (!tx->raw_addressing) in gve_maybe_stop_tx()
415 bytes_required = gve_skb_fifo_bytes_required(tx, skb); in gve_maybe_stop_tx()
417 if (likely(gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
421 spin_lock(&tx->clean_lock); in gve_maybe_stop_tx()
422 nic_done = gve_tx_load_event_counter(priv, tx); in gve_maybe_stop_tx()
423 to_do = nic_done - tx->done; in gve_maybe_stop_tx()
426 if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) { in gve_maybe_stop_tx()
429 gve_clean_tx_done(priv, tx, to_do, false); in gve_maybe_stop_tx()
431 if (likely(gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
436 tx->stop_queue++; in gve_maybe_stop_tx()
437 netif_tx_stop_queue(tx->netdev_txq); in gve_maybe_stop_tx()
439 spin_unlock(&tx->clean_lock); in gve_maybe_stop_tx()
509 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb) in gve_tx_add_skb_copy() argument
516 u32 idx = tx->req & tx->mask; in gve_tx_add_skb_copy()
522 info = &tx->info[idx]; in gve_tx_add_skb_copy()
523 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb_copy()
536 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen); in gve_tx_add_skb_copy()
537 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes, in gve_tx_add_skb_copy()
540 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen, in gve_tx_add_skb_copy()
549 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, in gve_tx_add_skb_copy()
551 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb_copy()
557 next_idx = (tx->req + 1) & tx->mask; in gve_tx_add_skb_copy()
558 gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb); in gve_tx_add_skb_copy()
562 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask; in gve_tx_add_skb_copy()
563 seg_desc = &tx->desc[next_idx]; in gve_tx_add_skb_copy()
572 tx->tx_fifo.base + info->iov[i].iov_offset, in gve_tx_add_skb_copy()
574 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb_copy()
583 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_add_skb_no_copy() argument
592 u32 idx = tx->req & tx->mask; in gve_tx_add_skb_no_copy()
597 info = &tx->info[idx]; in gve_tx_add_skb_no_copy()
598 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
611 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy()
612 if (unlikely(dma_mapping_error(tx->dev, addr))) { in gve_tx_add_skb_no_copy()
613 tx->dma_mapping_error++; in gve_tx_add_skb_no_copy()
630 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
631 mtd_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
641 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
642 seg_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
651 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
652 seg_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
654 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy()
655 if (unlikely(dma_mapping_error(tx->dev, addr))) { in gve_tx_add_skb_no_copy()
656 tx->dma_mapping_error++; in gve_tx_add_skb_no_copy()
659 tx->info[idx].skb = NULL; in gve_tx_add_skb_no_copy()
660 dma_unmap_len_set(&tx->info[idx], len, len); in gve_tx_add_skb_no_copy()
661 dma_unmap_addr_set(&tx->info[idx], dma, addr); in gve_tx_add_skb_no_copy()
677 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]); in gve_tx_add_skb_no_copy()
680 tx->dropped_pkt++; in gve_tx_add_skb_no_copy()
687 struct gve_tx_ring *tx; in gve_tx() local
692 tx = &priv->tx[skb_get_queue_mapping(skb)]; in gve_tx()
693 if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) { in gve_tx()
699 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
702 if (tx->raw_addressing) in gve_tx()
703 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb); in gve_tx()
705 nsegs = gve_tx_add_skb_copy(priv, tx, skb); in gve_tx()
709 netdev_tx_sent_queue(tx->netdev_txq, skb->len); in gve_tx()
711 tx->req += nsegs; in gve_tx()
716 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) in gve_tx()
722 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
726 static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_fill_xdp() argument
731 u32 reqi = tx->req; in gve_tx_fill_xdp()
733 pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len); in gve_tx_fill_xdp()
736 info = &tx->info[reqi & tx->mask]; in gve_tx_fill_xdp()
741 nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len, in gve_tx_fill_xdp()
749 gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0, in gve_tx_fill_xdp()
754 gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask], in gve_tx_fill_xdp()
759 memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset, in gve_tx_fill_xdp()
762 tx->tx_fifo.qpl->page_buses, in gve_tx_fill_xdp()
777 struct gve_tx_ring *tx; in gve_xdp_xmit() local
786 tx = &priv->tx[qid]; in gve_xdp_xmit()
788 spin_lock(&tx->xdp_lock); in gve_xdp_xmit()
790 err = gve_xdp_xmit_one(priv, tx, frames[i]->data, in gve_xdp_xmit()
797 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xdp_xmit()
799 spin_unlock(&tx->xdp_lock); in gve_xdp_xmit()
801 u64_stats_update_begin(&tx->statss); in gve_xdp_xmit()
802 tx->xdp_xmit += n; in gve_xdp_xmit()
803 tx->xdp_xmit_errors += n - i; in gve_xdp_xmit()
804 u64_stats_update_end(&tx->statss); in gve_xdp_xmit()
809 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_xdp_xmit_one() argument
814 if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1)) in gve_xdp_xmit_one()
817 nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false); in gve_xdp_xmit_one()
818 tx->req += nsegs; in gve_xdp_xmit_one()
825 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_tx_done() argument
836 idx = tx->done & tx->mask; in gve_clean_tx_done()
839 tx->q_num, __func__, idx, tx->req, tx->done); in gve_clean_tx_done()
840 info = &tx->info[idx]; in gve_clean_tx_done()
844 if (tx->raw_addressing) in gve_clean_tx_done()
845 gve_tx_unmap_buf(tx->dev, info); in gve_clean_tx_done()
846 tx->done++; in gve_clean_tx_done()
853 if (tx->raw_addressing) in gve_clean_tx_done()
859 if (!tx->raw_addressing) in gve_clean_tx_done()
860 gve_tx_free_fifo(&tx->tx_fifo, space_freed); in gve_clean_tx_done()
861 u64_stats_update_begin(&tx->statss); in gve_clean_tx_done()
862 tx->bytes_done += bytes; in gve_clean_tx_done()
863 tx->pkt_done += pkts; in gve_clean_tx_done()
864 u64_stats_update_end(&tx->statss); in gve_clean_tx_done()
865 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes); in gve_clean_tx_done()
872 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) && in gve_clean_tx_done()
873 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) { in gve_clean_tx_done()
874 tx->wake_queue++; in gve_clean_tx_done()
875 netif_tx_wake_queue(tx->netdev_txq); in gve_clean_tx_done()
882 struct gve_tx_ring *tx) in gve_tx_load_event_counter() argument
884 u32 counter_index = be32_to_cpu(tx->q_resources->counter_index); in gve_tx_load_event_counter()
890 static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_xsk_tx() argument
897 spin_lock(&tx->xdp_lock); in gve_xsk_tx()
899 if (!gve_can_tx(tx, GVE_TX_START_THRESH)) in gve_xsk_tx()
902 if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) { in gve_xsk_tx()
903 tx->xdp_xsk_done = tx->xdp_xsk_wakeup; in gve_xsk_tx()
907 data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr); in gve_xsk_tx()
908 nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true); in gve_xsk_tx()
909 tx->req += nsegs; in gve_xsk_tx()
914 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xsk_tx()
915 xsk_tx_release(tx->xsk_pool); in gve_xsk_tx()
917 spin_unlock(&tx->xdp_lock); in gve_xsk_tx()
924 struct gve_tx_ring *tx = block->tx; in gve_xdp_poll() local
930 nic_done = gve_tx_load_event_counter(priv, tx); in gve_xdp_poll()
931 to_do = min_t(u32, (nic_done - tx->done), budget); in gve_xdp_poll()
932 gve_clean_xdp_done(priv, tx, to_do); in gve_xdp_poll()
933 repoll = nic_done != tx->done; in gve_xdp_poll()
935 if (tx->xsk_pool) { in gve_xdp_poll()
936 int sent = gve_xsk_tx(priv, tx, budget); in gve_xdp_poll()
938 u64_stats_update_begin(&tx->statss); in gve_xdp_poll()
939 tx->xdp_xsk_sent += sent; in gve_xdp_poll()
940 u64_stats_update_end(&tx->statss); in gve_xdp_poll()
942 if (xsk_uses_need_wakeup(tx->xsk_pool)) in gve_xdp_poll()
943 xsk_set_tx_need_wakeup(tx->xsk_pool); in gve_xdp_poll()
953 struct gve_tx_ring *tx = block->tx; in gve_tx_poll() local
965 spin_lock(&tx->clean_lock); in gve_tx_poll()
967 nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_poll()
968 to_do = min_t(u32, (nic_done - tx->done), budget); in gve_tx_poll()
969 gve_clean_tx_done(priv, tx, to_do, true); in gve_tx_poll()
970 spin_unlock(&tx->clean_lock); in gve_tx_poll()
972 return nic_done != tx->done; in gve_tx_poll()
975 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_tx_clean_pending() argument
977 u32 nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_clean_pending()
979 return nic_done != tx->done; in gve_tx_clean_pending()