Lines Matching refs:tx

17 static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count)  in gve_has_free_tx_qpl_bufs()  argument
21 if (!tx->dqo.qpl) in gve_has_free_tx_qpl_bufs()
24 num_avail = tx->dqo.num_tx_qpl_bufs - in gve_has_free_tx_qpl_bufs()
25 (tx->dqo_tx.alloc_tx_qpl_buf_cnt - in gve_has_free_tx_qpl_bufs()
26 tx->dqo_tx.free_tx_qpl_buf_cnt); in gve_has_free_tx_qpl_bufs()
32 tx->dqo_tx.free_tx_qpl_buf_cnt = in gve_has_free_tx_qpl_bufs()
33 atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_cnt); in gve_has_free_tx_qpl_bufs()
35 num_avail = tx->dqo.num_tx_qpl_bufs - in gve_has_free_tx_qpl_bufs()
36 (tx->dqo_tx.alloc_tx_qpl_buf_cnt - in gve_has_free_tx_qpl_bufs()
37 tx->dqo_tx.free_tx_qpl_buf_cnt); in gve_has_free_tx_qpl_bufs()
43 gve_alloc_tx_qpl_buf(struct gve_tx_ring *tx) in gve_alloc_tx_qpl_buf() argument
47 index = tx->dqo_tx.free_tx_qpl_buf_head; in gve_alloc_tx_qpl_buf()
53 tx->dqo_tx.free_tx_qpl_buf_head = in gve_alloc_tx_qpl_buf()
54 atomic_xchg(&tx->dqo_compl.free_tx_qpl_buf_head, -1); in gve_alloc_tx_qpl_buf()
55 index = tx->dqo_tx.free_tx_qpl_buf_head; in gve_alloc_tx_qpl_buf()
62 tx->dqo_tx.free_tx_qpl_buf_head = tx->dqo.tx_qpl_buf_next[index]; in gve_alloc_tx_qpl_buf()
68 gve_free_tx_qpl_bufs(struct gve_tx_ring *tx, in gve_free_tx_qpl_bufs() argument
80 tx->dqo.tx_qpl_buf_next[index] = pkt->tx_qpl_buf_ids[i]; in gve_free_tx_qpl_bufs()
85 s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_head); in gve_free_tx_qpl_bufs()
87 tx->dqo.tx_qpl_buf_next[index] = old_head; in gve_free_tx_qpl_bufs()
88 if (atomic_cmpxchg(&tx->dqo_compl.free_tx_qpl_buf_head, in gve_free_tx_qpl_bufs()
95 atomic_add(pkt->num_bufs, &tx->dqo_compl.free_tx_qpl_buf_cnt); in gve_free_tx_qpl_bufs()
100 static bool gve_has_pending_packet(struct gve_tx_ring *tx) in gve_has_pending_packet() argument
103 if (tx->dqo_tx.free_pending_packets != -1) in gve_has_pending_packet()
107 if (atomic_read_acquire(&tx->dqo_compl.free_pending_packets) != -1) in gve_has_pending_packet()
114 gve_alloc_pending_packet(struct gve_tx_ring *tx) in gve_alloc_pending_packet() argument
119 index = tx->dqo_tx.free_pending_packets; in gve_alloc_pending_packet()
125 tx->dqo_tx.free_pending_packets = in gve_alloc_pending_packet()
126 atomic_xchg(&tx->dqo_compl.free_pending_packets, -1); in gve_alloc_pending_packet()
127 index = tx->dqo_tx.free_pending_packets; in gve_alloc_pending_packet()
133 pending_packet = &tx->dqo.pending_packets[index]; in gve_alloc_pending_packet()
136 tx->dqo_tx.free_pending_packets = pending_packet->next; in gve_alloc_pending_packet()
143 gve_free_pending_packet(struct gve_tx_ring *tx, in gve_free_pending_packet() argument
146 s16 index = pending_packet - tx->dqo.pending_packets; in gve_free_pending_packet()
150 s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_pending_packets); in gve_free_pending_packet()
153 if (atomic_cmpxchg(&tx->dqo_compl.free_pending_packets, in gve_free_pending_packet()
162 static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx) in gve_tx_clean_pending_packets() argument
166 for (i = 0; i < tx->dqo.num_pending_packets; i++) { in gve_tx_clean_pending_packets()
168 &tx->dqo.pending_packets[i]; in gve_tx_clean_pending_packets()
173 dma_unmap_single(tx->dev, in gve_tx_clean_pending_packets()
178 dma_unmap_page(tx->dev, in gve_tx_clean_pending_packets()
193 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_free_ring_dqo() local
199 if (tx->q_resources) { in gve_tx_free_ring_dqo()
200 dma_free_coherent(hdev, sizeof(*tx->q_resources), in gve_tx_free_ring_dqo()
201 tx->q_resources, tx->q_resources_bus); in gve_tx_free_ring_dqo()
202 tx->q_resources = NULL; in gve_tx_free_ring_dqo()
205 if (tx->dqo.compl_ring) { in gve_tx_free_ring_dqo()
206 bytes = sizeof(tx->dqo.compl_ring[0]) * in gve_tx_free_ring_dqo()
207 (tx->dqo.complq_mask + 1); in gve_tx_free_ring_dqo()
208 dma_free_coherent(hdev, bytes, tx->dqo.compl_ring, in gve_tx_free_ring_dqo()
209 tx->complq_bus_dqo); in gve_tx_free_ring_dqo()
210 tx->dqo.compl_ring = NULL; in gve_tx_free_ring_dqo()
213 if (tx->dqo.tx_ring) { in gve_tx_free_ring_dqo()
214 bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1); in gve_tx_free_ring_dqo()
215 dma_free_coherent(hdev, bytes, tx->dqo.tx_ring, tx->bus); in gve_tx_free_ring_dqo()
216 tx->dqo.tx_ring = NULL; in gve_tx_free_ring_dqo()
219 kvfree(tx->dqo.pending_packets); in gve_tx_free_ring_dqo()
220 tx->dqo.pending_packets = NULL; in gve_tx_free_ring_dqo()
222 kvfree(tx->dqo.tx_qpl_buf_next); in gve_tx_free_ring_dqo()
223 tx->dqo.tx_qpl_buf_next = NULL; in gve_tx_free_ring_dqo()
225 if (tx->dqo.qpl) { in gve_tx_free_ring_dqo()
226 gve_unassign_qpl(priv, tx->dqo.qpl->id); in gve_tx_free_ring_dqo()
227 tx->dqo.qpl = NULL; in gve_tx_free_ring_dqo()
233 static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx) in gve_tx_qpl_buf_init() argument
236 tx->dqo.qpl->num_entries; in gve_tx_qpl_buf_init()
239 tx->dqo.tx_qpl_buf_next = kvcalloc(num_tx_qpl_bufs, in gve_tx_qpl_buf_init()
240 sizeof(tx->dqo.tx_qpl_buf_next[0]), in gve_tx_qpl_buf_init()
242 if (!tx->dqo.tx_qpl_buf_next) in gve_tx_qpl_buf_init()
245 tx->dqo.num_tx_qpl_bufs = num_tx_qpl_bufs; in gve_tx_qpl_buf_init()
249 tx->dqo.tx_qpl_buf_next[i] = i + 1; in gve_tx_qpl_buf_init()
250 tx->dqo.tx_qpl_buf_next[num_tx_qpl_bufs - 1] = -1; in gve_tx_qpl_buf_init()
252 atomic_set_release(&tx->dqo_compl.free_tx_qpl_buf_head, -1); in gve_tx_qpl_buf_init()
258 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_alloc_ring_dqo() local
264 memset(tx, 0, sizeof(*tx)); in gve_tx_alloc_ring_dqo()
265 tx->q_num = idx; in gve_tx_alloc_ring_dqo()
266 tx->dev = &priv->pdev->dev; in gve_tx_alloc_ring_dqo()
267 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); in gve_tx_alloc_ring_dqo()
268 atomic_set_release(&tx->dqo_compl.hw_tx_head, 0); in gve_tx_alloc_ring_dqo()
271 tx->mask = priv->tx_desc_cnt - 1; in gve_tx_alloc_ring_dqo()
272 tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ? in gve_tx_alloc_ring_dqo()
274 tx->mask; in gve_tx_alloc_ring_dqo()
282 num_pending_packets = tx->dqo.complq_mask + 1; in gve_tx_alloc_ring_dqo()
288 (tx->dqo.complq_mask + 1) / GVE_TX_MIN_RE_INTERVAL; in gve_tx_alloc_ring_dqo()
295 tx->dqo.num_pending_packets = min_t(int, num_pending_packets, S16_MAX); in gve_tx_alloc_ring_dqo()
296 tx->dqo.pending_packets = kvcalloc(tx->dqo.num_pending_packets, in gve_tx_alloc_ring_dqo()
297 sizeof(tx->dqo.pending_packets[0]), in gve_tx_alloc_ring_dqo()
299 if (!tx->dqo.pending_packets) in gve_tx_alloc_ring_dqo()
303 for (i = 0; i < tx->dqo.num_pending_packets - 1; i++) in gve_tx_alloc_ring_dqo()
304 tx->dqo.pending_packets[i].next = i + 1; in gve_tx_alloc_ring_dqo()
306 tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1; in gve_tx_alloc_ring_dqo()
307 atomic_set_release(&tx->dqo_compl.free_pending_packets, -1); in gve_tx_alloc_ring_dqo()
308 tx->dqo_compl.miss_completions.head = -1; in gve_tx_alloc_ring_dqo()
309 tx->dqo_compl.miss_completions.tail = -1; in gve_tx_alloc_ring_dqo()
310 tx->dqo_compl.timed_out_completions.head = -1; in gve_tx_alloc_ring_dqo()
311 tx->dqo_compl.timed_out_completions.tail = -1; in gve_tx_alloc_ring_dqo()
313 bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1); in gve_tx_alloc_ring_dqo()
314 tx->dqo.tx_ring = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); in gve_tx_alloc_ring_dqo()
315 if (!tx->dqo.tx_ring) in gve_tx_alloc_ring_dqo()
318 bytes = sizeof(tx->dqo.compl_ring[0]) * (tx->dqo.complq_mask + 1); in gve_tx_alloc_ring_dqo()
319 tx->dqo.compl_ring = dma_alloc_coherent(hdev, bytes, in gve_tx_alloc_ring_dqo()
320 &tx->complq_bus_dqo, in gve_tx_alloc_ring_dqo()
322 if (!tx->dqo.compl_ring) in gve_tx_alloc_ring_dqo()
325 tx->q_resources = dma_alloc_coherent(hdev, sizeof(*tx->q_resources), in gve_tx_alloc_ring_dqo()
326 &tx->q_resources_bus, GFP_KERNEL); in gve_tx_alloc_ring_dqo()
327 if (!tx->q_resources) in gve_tx_alloc_ring_dqo()
331 tx->dqo.qpl = gve_assign_tx_qpl(priv, idx); in gve_tx_alloc_ring_dqo()
332 if (!tx->dqo.qpl) in gve_tx_alloc_ring_dqo()
335 if (gve_tx_qpl_buf_init(tx)) in gve_tx_alloc_ring_dqo()
377 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_free_rings_dqo() local
379 gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL); in gve_tx_free_rings_dqo()
380 netdev_tx_reset_queue(tx->netdev_txq); in gve_tx_free_rings_dqo()
381 gve_tx_clean_pending_packets(tx); in gve_tx_free_rings_dqo()
388 static u32 num_avail_tx_slots(const struct gve_tx_ring *tx) in num_avail_tx_slots() argument
390 u32 num_used = (tx->dqo_tx.tail - tx->dqo_tx.head) & tx->mask; in num_avail_tx_slots()
392 return tx->mask - num_used; in num_avail_tx_slots()
395 static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx, in gve_has_avail_slots_tx_dqo() argument
398 return gve_has_pending_packet(tx) && in gve_has_avail_slots_tx_dqo()
399 num_avail_tx_slots(tx) >= desc_count && in gve_has_avail_slots_tx_dqo()
400 gve_has_free_tx_qpl_bufs(tx, buf_count); in gve_has_avail_slots_tx_dqo()
406 static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, in gve_maybe_stop_tx_dqo() argument
409 if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) in gve_maybe_stop_tx_dqo()
413 tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); in gve_maybe_stop_tx_dqo()
415 if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) in gve_maybe_stop_tx_dqo()
419 tx->stop_queue++; in gve_maybe_stop_tx_dqo()
420 netif_tx_stop_queue(tx->netdev_txq); in gve_maybe_stop_tx_dqo()
428 tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); in gve_maybe_stop_tx_dqo()
430 if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) in gve_maybe_stop_tx_dqo()
433 netif_tx_start_queue(tx->netdev_txq); in gve_maybe_stop_tx_dqo()
434 tx->wake_queue++; in gve_maybe_stop_tx_dqo()
455 static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx, in gve_tx_fill_pkt_desc_dqo() argument
463 &tx->dqo.tx_ring[*desc_idx].pkt; in gve_tx_fill_pkt_desc_dqo()
478 *desc_idx = (*desc_idx + 1) & tx->mask; in gve_tx_fill_pkt_desc_dqo()
568 static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, in gve_tx_add_skb_no_copy_dqo() argument
592 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy_dqo()
593 if (unlikely(dma_mapping_error(tx->dev, addr))) in gve_tx_add_skb_no_copy_dqo()
600 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, in gve_tx_add_skb_no_copy_dqo()
611 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy_dqo()
612 if (unlikely(dma_mapping_error(tx->dev, addr))) in gve_tx_add_skb_no_copy_dqo()
619 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, in gve_tx_add_skb_no_copy_dqo()
627 dma_unmap_single(tx->dev, in gve_tx_add_skb_no_copy_dqo()
632 dma_unmap_page(tx->dev, in gve_tx_add_skb_no_copy_dqo()
646 static void gve_tx_buf_get_addr(struct gve_tx_ring *tx, in gve_tx_buf_get_addr() argument
653 *va = page_address(tx->dqo.qpl->pages[page_id]) + offset; in gve_tx_buf_get_addr()
654 *dma_addr = tx->dqo.qpl->page_buses[page_id] + offset; in gve_tx_buf_get_addr()
657 static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx, in gve_tx_add_skb_copy_dqo() argument
673 index = gve_alloc_tx_qpl_buf(tx); in gve_tx_add_skb_copy_dqo()
677 gve_tx_buf_get_addr(tx, index, &va, &dma_addr); in gve_tx_add_skb_copy_dqo()
683 dma_sync_single_for_device(tx->dev, dma_addr, in gve_tx_add_skb_copy_dqo()
685 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, in gve_tx_add_skb_copy_dqo()
693 ++tx->dqo_tx.alloc_tx_qpl_buf_cnt; in gve_tx_add_skb_copy_dqo()
700 gve_free_tx_qpl_bufs(tx, pkt); in gve_tx_add_skb_copy_dqo()
709 static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx, in gve_tx_add_skb_dqo() argument
713 u32 desc_idx = tx->dqo_tx.tail; in gve_tx_add_skb_dqo()
718 pkt = gve_alloc_pending_packet(tx); in gve_tx_add_skb_dqo()
720 completion_tag = pkt - tx->dqo.pending_packets; in gve_tx_add_skb_dqo()
729 gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx, in gve_tx_add_skb_dqo()
731 desc_idx = (desc_idx + 1) & tx->mask; in gve_tx_add_skb_dqo()
734 gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx, in gve_tx_add_skb_dqo()
736 desc_idx = (desc_idx + 1) & tx->mask; in gve_tx_add_skb_dqo()
738 if (tx->dqo.qpl) { in gve_tx_add_skb_dqo()
739 if (gve_tx_add_skb_copy_dqo(tx, skb, pkt, in gve_tx_add_skb_dqo()
744 if (gve_tx_add_skb_no_copy_dqo(tx, skb, pkt, in gve_tx_add_skb_dqo()
750 tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs; in gve_tx_add_skb_dqo()
753 tx->dqo_tx.tail = desc_idx; in gve_tx_add_skb_dqo()
759 u32 last_desc_idx = (desc_idx - 1) & tx->mask; in gve_tx_add_skb_dqo()
761 (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask; in gve_tx_add_skb_dqo()
765 tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true; in gve_tx_add_skb_dqo()
766 tx->dqo_tx.last_re_idx = last_desc_idx; in gve_tx_add_skb_dqo()
774 gve_free_pending_packet(tx, pkt); in gve_tx_add_skb_dqo()
841 static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_try_tx_skb() argument
847 if (tx->dqo.qpl) { in gve_try_tx_skb()
890 if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs + in gve_try_tx_skb()
896 if (unlikely(gve_tx_add_skb_dqo(tx, skb) < 0)) in gve_try_tx_skb()
899 netdev_tx_sent_queue(tx->netdev_txq, skb->len); in gve_try_tx_skb()
904 tx->dropped_pkt++; in gve_try_tx_skb()
913 struct gve_tx_ring *tx; in gve_tx_dqo() local
915 tx = &priv->tx[skb_get_queue_mapping(skb)]; in gve_tx_dqo()
916 if (unlikely(gve_try_tx_skb(priv, tx, skb) < 0)) { in gve_tx_dqo()
921 gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); in gve_tx_dqo()
925 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) in gve_tx_dqo()
928 gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); in gve_tx_dqo()
932 static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list, in add_to_list() argument
937 index = pending_packet - tx->dqo.pending_packets; in add_to_list()
943 tx->dqo.pending_packets[old_tail].next = index; in add_to_list()
949 static void remove_from_list(struct gve_tx_ring *tx, in remove_from_list() argument
962 tx->dqo.pending_packets[prev_index].next = next_index; in remove_from_list()
968 tx->dqo.pending_packets[next_index].prev = prev_index; in remove_from_list()
994 struct gve_tx_ring *tx, bool is_napi, in gve_handle_packet_completion() argument
1000 if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) { in gve_handle_packet_completion()
1006 pending_packet = &tx->dqo.pending_packets[compl_tag]; in gve_handle_packet_completion()
1016 remove_from_list(tx, in gve_handle_packet_completion()
1017 &tx->dqo_compl.timed_out_completions, in gve_handle_packet_completion()
1019 gve_free_pending_packet(tx, pending_packet); in gve_handle_packet_completion()
1033 remove_from_list(tx, &tx->dqo_compl.miss_completions, in gve_handle_packet_completion()
1044 tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs; in gve_handle_packet_completion()
1045 if (tx->dqo.qpl) in gve_handle_packet_completion()
1046 gve_free_tx_qpl_bufs(tx, pending_packet); in gve_handle_packet_completion()
1048 gve_unmap_packet(tx->dev, pending_packet); in gve_handle_packet_completion()
1054 gve_free_pending_packet(tx, pending_packet); in gve_handle_packet_completion()
1058 struct gve_tx_ring *tx, u16 compl_tag, in gve_handle_miss_completion() argument
1063 if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) { in gve_handle_miss_completion()
1069 pending_packet = &tx->dqo.pending_packets[compl_tag]; in gve_handle_miss_completion()
1084 add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet); in gve_handle_miss_completion()
1091 struct gve_tx_ring *tx) in remove_miss_completions() argument
1096 next_index = tx->dqo_compl.miss_completions.head; in remove_miss_completions()
1098 pending_packet = &tx->dqo.pending_packets[next_index]; in remove_miss_completions()
1104 remove_from_list(tx, &tx->dqo_compl.miss_completions, in remove_miss_completions()
1111 if (tx->dqo.qpl) in remove_miss_completions()
1112 gve_free_tx_qpl_bufs(tx, pending_packet); in remove_miss_completions()
1114 gve_unmap_packet(tx->dev, pending_packet); in remove_miss_completions()
1119 tx->dropped_pkt++; in remove_miss_completions()
1122 (int)(pending_packet - tx->dqo.pending_packets)); in remove_miss_completions()
1132 add_to_list(tx, &tx->dqo_compl.timed_out_completions, in remove_miss_completions()
1138 struct gve_tx_ring *tx) in remove_timed_out_completions() argument
1143 next_index = tx->dqo_compl.timed_out_completions.head; in remove_timed_out_completions()
1145 pending_packet = &tx->dqo.pending_packets[next_index]; in remove_timed_out_completions()
1151 remove_from_list(tx, &tx->dqo_compl.timed_out_completions, in remove_timed_out_completions()
1153 gve_free_pending_packet(tx, pending_packet); in remove_timed_out_completions()
1157 int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_tx_done_dqo() argument
1171 &tx->dqo.compl_ring[tx->dqo_compl.head]; in gve_clean_tx_done_dqo()
1174 if (compl_desc->generation == tx->dqo_compl.cur_gen_bit) in gve_clean_tx_done_dqo()
1178 prefetch(&tx->dqo.compl_ring[(tx->dqo_compl.head + 1) & in gve_clean_tx_done_dqo()
1179 tx->dqo.complq_mask]); in gve_clean_tx_done_dqo()
1189 atomic_set_release(&tx->dqo_compl.hw_tx_head, tx_head); in gve_clean_tx_done_dqo()
1194 gve_handle_miss_completion(priv, tx, compl_tag, in gve_clean_tx_done_dqo()
1198 gve_handle_packet_completion(priv, tx, !!napi, in gve_clean_tx_done_dqo()
1207 gve_handle_miss_completion(priv, tx, compl_tag, in gve_clean_tx_done_dqo()
1213 gve_handle_packet_completion(priv, tx, !!napi, in gve_clean_tx_done_dqo()
1220 tx->dqo_compl.head = in gve_clean_tx_done_dqo()
1221 (tx->dqo_compl.head + 1) & tx->dqo.complq_mask; in gve_clean_tx_done_dqo()
1223 tx->dqo_compl.cur_gen_bit ^= tx->dqo_compl.head == 0; in gve_clean_tx_done_dqo()
1227 netdev_tx_completed_queue(tx->netdev_txq, in gve_clean_tx_done_dqo()
1231 remove_miss_completions(priv, tx); in gve_clean_tx_done_dqo()
1232 remove_timed_out_completions(priv, tx); in gve_clean_tx_done_dqo()
1234 u64_stats_update_begin(&tx->statss); in gve_clean_tx_done_dqo()
1235 tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes; in gve_clean_tx_done_dqo()
1236 tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts; in gve_clean_tx_done_dqo()
1237 u64_stats_update_end(&tx->statss); in gve_clean_tx_done_dqo()
1244 struct gve_tx_ring *tx = block->tx; in gve_tx_poll_dqo() local
1248 int num_descs_cleaned = gve_clean_tx_done_dqo(priv, tx, in gve_tx_poll_dqo()
1254 if (netif_tx_queue_stopped(tx->netdev_txq) && in gve_tx_poll_dqo()
1256 tx->wake_queue++; in gve_tx_poll_dqo()
1257 netif_tx_wake_queue(tx->netdev_txq); in gve_tx_poll_dqo()
1262 compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head]; in gve_tx_poll_dqo()
1263 return compl_desc->generation != tx->dqo_compl.cur_gen_bit; in gve_tx_poll_dqo()