Lines Matching full:rx
35 static struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx) in gve_alloc_buf_state() argument
40 buffer_id = rx->dqo.free_buf_states; in gve_alloc_buf_state()
44 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_alloc_buf_state()
47 rx->dqo.free_buf_states = buf_state->next; in gve_alloc_buf_state()
55 static bool gve_buf_state_is_allocated(struct gve_rx_ring *rx, in gve_buf_state_is_allocated() argument
58 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_buf_state_is_allocated()
63 static void gve_free_buf_state(struct gve_rx_ring *rx, in gve_free_buf_state() argument
66 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_free_buf_state()
68 buf_state->next = rx->dqo.free_buf_states; in gve_free_buf_state()
69 rx->dqo.free_buf_states = buffer_id; in gve_free_buf_state()
73 gve_dequeue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list) in gve_dequeue_buf_state() argument
82 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_dequeue_buf_state()
95 static void gve_enqueue_buf_state(struct gve_rx_ring *rx, in gve_enqueue_buf_state() argument
99 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_enqueue_buf_state()
109 rx->dqo.buf_states[tail].next = buffer_id; in gve_enqueue_buf_state()
115 gve_get_recycled_buf_state(struct gve_rx_ring *rx) in gve_get_recycled_buf_state() argument
121 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states); in gve_get_recycled_buf_state()
125 if (unlikely(rx->dqo.used_buf_states.head == -1)) in gve_get_recycled_buf_state()
134 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); in gve_get_recycled_buf_state()
136 rx->dqo.used_buf_states_cnt--; in gve_get_recycled_buf_state()
140 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); in gve_get_recycled_buf_state()
146 if (rx->dqo.qpl) in gve_get_recycled_buf_state()
152 if (unlikely(rx->dqo.free_buf_states == -1)) { in gve_get_recycled_buf_state()
153 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); in gve_get_recycled_buf_state()
157 gve_free_page_dqo(rx->gve, buf_state, true); in gve_get_recycled_buf_state()
158 gve_free_buf_state(rx, buf_state); in gve_get_recycled_buf_state()
164 static int gve_alloc_page_dqo(struct gve_rx_ring *rx, in gve_alloc_page_dqo() argument
167 struct gve_priv *priv = rx->gve; in gve_alloc_page_dqo()
170 if (!rx->dqo.qpl) { in gve_alloc_page_dqo()
180 idx = rx->dqo.next_qpl_page_idx; in gve_alloc_page_dqo()
186 buf_state->page_info.page = rx->dqo.qpl->pages[idx]; in gve_alloc_page_dqo()
187 buf_state->addr = rx->dqo.qpl->page_buses[idx]; in gve_alloc_page_dqo()
188 rx->dqo.next_qpl_page_idx++; in gve_alloc_page_dqo()
204 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_free_ring_dqo() local
211 completion_queue_slots = rx->dqo.complq.mask + 1; in gve_rx_free_ring_dqo()
212 buffer_queue_slots = rx->dqo.bufq.mask + 1; in gve_rx_free_ring_dqo()
216 if (rx->q_resources) { in gve_rx_free_ring_dqo()
217 dma_free_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_free_ring_dqo()
218 rx->q_resources, rx->q_resources_bus); in gve_rx_free_ring_dqo()
219 rx->q_resources = NULL; in gve_rx_free_ring_dqo()
222 for (i = 0; i < rx->dqo.num_buf_states; i++) { in gve_rx_free_ring_dqo()
223 struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; in gve_rx_free_ring_dqo()
226 gve_free_page_dqo(priv, bs, !rx->dqo.qpl); in gve_rx_free_ring_dqo()
228 if (rx->dqo.qpl) { in gve_rx_free_ring_dqo()
229 gve_unassign_qpl(priv, rx->dqo.qpl->id); in gve_rx_free_ring_dqo()
230 rx->dqo.qpl = NULL; in gve_rx_free_ring_dqo()
233 if (rx->dqo.bufq.desc_ring) { in gve_rx_free_ring_dqo()
234 size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; in gve_rx_free_ring_dqo()
235 dma_free_coherent(hdev, size, rx->dqo.bufq.desc_ring, in gve_rx_free_ring_dqo()
236 rx->dqo.bufq.bus); in gve_rx_free_ring_dqo()
237 rx->dqo.bufq.desc_ring = NULL; in gve_rx_free_ring_dqo()
240 if (rx->dqo.complq.desc_ring) { in gve_rx_free_ring_dqo()
241 size = sizeof(rx->dqo.complq.desc_ring[0]) * in gve_rx_free_ring_dqo()
243 dma_free_coherent(hdev, size, rx->dqo.complq.desc_ring, in gve_rx_free_ring_dqo()
244 rx->dqo.complq.bus); in gve_rx_free_ring_dqo()
245 rx->dqo.complq.desc_ring = NULL; in gve_rx_free_ring_dqo()
248 kvfree(rx->dqo.buf_states); in gve_rx_free_ring_dqo()
249 rx->dqo.buf_states = NULL; in gve_rx_free_ring_dqo()
251 netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); in gve_rx_free_ring_dqo()
256 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_alloc_ring_dqo() local
266 netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n"); in gve_rx_alloc_ring_dqo()
268 memset(rx, 0, sizeof(*rx)); in gve_rx_alloc_ring_dqo()
269 rx->gve = priv; in gve_rx_alloc_ring_dqo()
270 rx->q_num = idx; in gve_rx_alloc_ring_dqo()
271 rx->dqo.bufq.mask = buffer_queue_slots - 1; in gve_rx_alloc_ring_dqo()
272 rx->dqo.complq.num_free_slots = completion_queue_slots; in gve_rx_alloc_ring_dqo()
273 rx->dqo.complq.mask = completion_queue_slots - 1; in gve_rx_alloc_ring_dqo()
274 rx->ctx.skb_head = NULL; in gve_rx_alloc_ring_dqo()
275 rx->ctx.skb_tail = NULL; in gve_rx_alloc_ring_dqo()
277 rx->dqo.num_buf_states = priv->queue_format == GVE_DQO_RDA_FORMAT ? in gve_rx_alloc_ring_dqo()
280 rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, in gve_rx_alloc_ring_dqo()
281 sizeof(rx->dqo.buf_states[0]), in gve_rx_alloc_ring_dqo()
283 if (!rx->dqo.buf_states) in gve_rx_alloc_ring_dqo()
287 for (i = 0; i < rx->dqo.num_buf_states - 1; i++) in gve_rx_alloc_ring_dqo()
288 rx->dqo.buf_states[i].next = i + 1; in gve_rx_alloc_ring_dqo()
290 rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1; in gve_rx_alloc_ring_dqo()
291 rx->dqo.recycled_buf_states.head = -1; in gve_rx_alloc_ring_dqo()
292 rx->dqo.recycled_buf_states.tail = -1; in gve_rx_alloc_ring_dqo()
293 rx->dqo.used_buf_states.head = -1; in gve_rx_alloc_ring_dqo()
294 rx->dqo.used_buf_states.tail = -1; in gve_rx_alloc_ring_dqo()
296 /* Allocate RX completion queue */ in gve_rx_alloc_ring_dqo()
297 size = sizeof(rx->dqo.complq.desc_ring[0]) * in gve_rx_alloc_ring_dqo()
299 rx->dqo.complq.desc_ring = in gve_rx_alloc_ring_dqo()
300 dma_alloc_coherent(hdev, size, &rx->dqo.complq.bus, GFP_KERNEL); in gve_rx_alloc_ring_dqo()
301 if (!rx->dqo.complq.desc_ring) in gve_rx_alloc_ring_dqo()
304 /* Allocate RX buffer queue */ in gve_rx_alloc_ring_dqo()
305 size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; in gve_rx_alloc_ring_dqo()
306 rx->dqo.bufq.desc_ring = in gve_rx_alloc_ring_dqo()
307 dma_alloc_coherent(hdev, size, &rx->dqo.bufq.bus, GFP_KERNEL); in gve_rx_alloc_ring_dqo()
308 if (!rx->dqo.bufq.desc_ring) in gve_rx_alloc_ring_dqo()
312 rx->dqo.qpl = gve_assign_rx_qpl(priv, rx->q_num); in gve_rx_alloc_ring_dqo()
313 if (!rx->dqo.qpl) in gve_rx_alloc_ring_dqo()
315 rx->dqo.next_qpl_page_idx = 0; in gve_rx_alloc_ring_dqo()
318 rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_alloc_ring_dqo()
319 &rx->q_resources_bus, GFP_KERNEL); in gve_rx_alloc_ring_dqo()
320 if (!rx->q_resources) in gve_rx_alloc_ring_dqo()
334 const struct gve_rx_ring *rx = &priv->rx[queue_idx]; in gve_rx_write_doorbell_dqo() local
335 u64 index = be32_to_cpu(rx->q_resources->db_index); in gve_rx_write_doorbell_dqo()
337 iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]); in gve_rx_write_doorbell_dqo()
349 "Failed to alloc rx ring=%d: err=%d\n", in gve_rx_alloc_rings_dqo()
372 void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) in gve_rx_post_buffers_dqo() argument
374 struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; in gve_rx_post_buffers_dqo()
375 struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq; in gve_rx_post_buffers_dqo()
376 struct gve_priv *priv = rx->gve; in gve_rx_post_buffers_dqo()
389 buf_state = gve_get_recycled_buf_state(rx); in gve_rx_post_buffers_dqo()
391 buf_state = gve_alloc_buf_state(rx); in gve_rx_post_buffers_dqo()
395 if (unlikely(gve_alloc_page_dqo(rx, buf_state))) { in gve_rx_post_buffers_dqo()
396 u64_stats_update_begin(&rx->statss); in gve_rx_post_buffers_dqo()
397 rx->rx_buf_alloc_fail++; in gve_rx_post_buffers_dqo()
398 u64_stats_update_end(&rx->statss); in gve_rx_post_buffers_dqo()
399 gve_free_buf_state(rx, buf_state); in gve_rx_post_buffers_dqo()
404 desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); in gve_rx_post_buffers_dqo()
413 gve_rx_write_doorbell_dqo(priv, rx->q_num); in gve_rx_post_buffers_dqo()
416 rx->fill_cnt += num_posted; in gve_rx_post_buffers_dqo()
419 static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_try_recycle_buf() argument
453 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); in gve_try_recycle_buf()
457 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); in gve_try_recycle_buf()
458 rx->dqo.used_buf_states_cnt++; in gve_try_recycle_buf()
509 static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx) in gve_rx_free_skb() argument
511 if (!rx->ctx.skb_head) in gve_rx_free_skb()
514 if (rx->ctx.skb_head == napi->skb) in gve_rx_free_skb()
516 dev_kfree_skb_any(rx->ctx.skb_head); in gve_rx_free_skb()
517 rx->ctx.skb_head = NULL; in gve_rx_free_skb()
518 rx->ctx.skb_tail = NULL; in gve_rx_free_skb()
521 static bool gve_rx_should_trigger_copy_ondemand(struct gve_rx_ring *rx) in gve_rx_should_trigger_copy_ondemand() argument
523 if (!rx->dqo.qpl) in gve_rx_should_trigger_copy_ondemand()
525 if (rx->dqo.used_buf_states_cnt < in gve_rx_should_trigger_copy_ondemand()
526 (rx->dqo.num_buf_states - in gve_rx_should_trigger_copy_ondemand()
532 static int gve_rx_copy_ondemand(struct gve_rx_ring *rx, in gve_rx_copy_ondemand() argument
546 num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags; in gve_rx_copy_ondemand()
547 skb_add_rx_frag(rx->ctx.skb_tail, num_frags, page, in gve_rx_copy_ondemand()
550 u64_stats_update_begin(&rx->statss); in gve_rx_copy_ondemand()
551 rx->rx_frag_alloc_cnt++; in gve_rx_copy_ondemand()
552 u64_stats_update_end(&rx->statss); in gve_rx_copy_ondemand()
554 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); in gve_rx_copy_ondemand()
558 /* Chains multi skbs for single rx packet.
563 u16 buf_len, struct gve_rx_ring *rx, in gve_rx_append_frags() argument
566 int num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags; in gve_rx_append_frags()
575 if (rx->ctx.skb_tail == rx->ctx.skb_head) in gve_rx_append_frags()
576 skb_shinfo(rx->ctx.skb_head)->frag_list = skb; in gve_rx_append_frags()
578 rx->ctx.skb_tail->next = skb; in gve_rx_append_frags()
579 rx->ctx.skb_tail = skb; in gve_rx_append_frags()
582 if (rx->ctx.skb_tail != rx->ctx.skb_head) { in gve_rx_append_frags()
583 rx->ctx.skb_head->len += buf_len; in gve_rx_append_frags()
584 rx->ctx.skb_head->data_len += buf_len; in gve_rx_append_frags()
585 rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo; in gve_rx_append_frags()
589 if (gve_rx_should_trigger_copy_ondemand(rx)) in gve_rx_append_frags()
590 return gve_rx_copy_ondemand(rx, buf_state, buf_len); in gve_rx_append_frags()
592 skb_add_rx_frag(rx->ctx.skb_tail, num_frags, in gve_rx_append_frags()
601 gve_try_recycle_buf(priv, rx, buf_state); in gve_rx_append_frags()
609 static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, in gve_rx_dqo() argument
616 struct gve_priv *priv = rx->gve; in gve_rx_dqo()
619 if (unlikely(buffer_id >= rx->dqo.num_buf_states)) { in gve_rx_dqo()
620 net_err_ratelimited("%s: Invalid RX buffer_id=%u\n", in gve_rx_dqo()
624 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_rx_dqo()
625 if (unlikely(!gve_buf_state_is_allocated(rx, buf_state))) { in gve_rx_dqo()
626 net_err_ratelimited("%s: RX buffer_id is not allocated: %u\n", in gve_rx_dqo()
632 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, in gve_rx_dqo()
650 if (rx->ctx.skb_head) { in gve_rx_dqo()
651 if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx, in gve_rx_dqo()
659 rx->ctx.skb_head = gve_rx_copy(priv->dev, napi, in gve_rx_dqo()
661 if (unlikely(!rx->ctx.skb_head)) in gve_rx_dqo()
663 rx->ctx.skb_tail = rx->ctx.skb_head; in gve_rx_dqo()
665 u64_stats_update_begin(&rx->statss); in gve_rx_dqo()
666 rx->rx_copied_pkt++; in gve_rx_dqo()
667 rx->rx_copybreak_pkt++; in gve_rx_dqo()
668 u64_stats_update_end(&rx->statss); in gve_rx_dqo()
670 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, in gve_rx_dqo()
675 rx->ctx.skb_head = napi_get_frags(napi); in gve_rx_dqo()
676 if (unlikely(!rx->ctx.skb_head)) in gve_rx_dqo()
678 rx->ctx.skb_tail = rx->ctx.skb_head; in gve_rx_dqo()
680 if (gve_rx_should_trigger_copy_ondemand(rx)) { in gve_rx_dqo()
681 if (gve_rx_copy_ondemand(rx, buf_state, buf_len) < 0) in gve_rx_dqo()
686 skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page, in gve_rx_dqo()
691 gve_try_recycle_buf(priv, rx, buf_state); in gve_rx_dqo()
695 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); in gve_rx_dqo()
725 static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi, in gve_rx_complete_skb() argument
730 rx->gve->ptype_lut_dqo->ptypes[desc->packet_type]; in gve_rx_complete_skb()
733 skb_record_rx_queue(rx->ctx.skb_head, rx->q_num); in gve_rx_complete_skb()
736 gve_rx_skb_hash(rx->ctx.skb_head, desc, ptype); in gve_rx_complete_skb()
739 gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype); in gve_rx_complete_skb()
745 err = gve_rx_complete_rsc(rx->ctx.skb_head, desc, ptype); in gve_rx_complete_skb()
750 if (skb_headlen(rx->ctx.skb_head) == 0) in gve_rx_complete_skb()
753 napi_gro_receive(napi, rx->ctx.skb_head); in gve_rx_complete_skb()
763 struct gve_rx_ring *rx = block->rx; in gve_rx_poll_dqo() local
764 struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; in gve_rx_poll_dqo()
786 err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num); in gve_rx_poll_dqo()
788 gve_rx_free_skb(napi, rx); in gve_rx_poll_dqo()
789 u64_stats_update_begin(&rx->statss); in gve_rx_poll_dqo()
791 rx->rx_skb_alloc_fail++; in gve_rx_poll_dqo()
793 rx->rx_desc_err_dropped_pkt++; in gve_rx_poll_dqo()
794 u64_stats_update_end(&rx->statss); in gve_rx_poll_dqo()
807 struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq; in gve_rx_poll_dqo()
813 rx->cnt++; in gve_rx_poll_dqo()
815 if (!rx->ctx.skb_head) in gve_rx_poll_dqo()
822 pkt_bytes = rx->ctx.skb_head->len; in gve_rx_poll_dqo()
826 if (skb_headlen(rx->ctx.skb_head)) in gve_rx_poll_dqo()
830 if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) { in gve_rx_poll_dqo()
831 gve_rx_free_skb(napi, rx); in gve_rx_poll_dqo()
832 u64_stats_update_begin(&rx->statss); in gve_rx_poll_dqo()
833 rx->rx_desc_err_dropped_pkt++; in gve_rx_poll_dqo()
834 u64_stats_update_end(&rx->statss); in gve_rx_poll_dqo()
839 rx->ctx.skb_head = NULL; in gve_rx_poll_dqo()
840 rx->ctx.skb_tail = NULL; in gve_rx_poll_dqo()
843 gve_rx_post_buffers_dqo(rx); in gve_rx_poll_dqo()
845 u64_stats_update_begin(&rx->statss); in gve_rx_poll_dqo()
846 rx->rpackets += work_done; in gve_rx_poll_dqo()
847 rx->rbytes += bytes; in gve_rx_poll_dqo()
848 u64_stats_update_end(&rx->statss); in gve_rx_poll_dqo()